blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f75f2fa41423788a5642e7b498beb646c096f34a | eef659a707d87e979741cc11ad59344c911790f5 | /cc3/rules/migrations/0001_initial.py | 31a6bb660c67cf36a33595c6a36d7bfd85cde79a | [] | no_license | qoin-open-source/samen-doen-cc3 | 1e5e40a9b677886aa78f980670df130cbbb95629 | 8b7806177e1e245af33b5112c551438b8c0af5d2 | refs/heads/master | 2020-05-04T02:26:07.039872 | 2019-04-02T21:19:54 | 2019-04-02T21:19:54 | 178,926,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,110 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ActionStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action', models.CharField(help_text='Which action was performed', max_length=255)),
('params', models.CharField(help_text='What parameters were passed', max_length=255)),
('performed', models.CharField(help_text='What did the action return', max_length=255)),
],
options={
'verbose_name_plural': 'Action status',
},
),
migrations.CreateModel(
name='Condition',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('evaluates_field', models.CharField(help_text='Name of (heading for) field in rule process_model where to get value for condition', max_length=255, verbose_name='Field')),
('field_expression', models.TextField(default=b'', help_text="If the field needs to be a parameter of an expression, use expression({0}) where {0} will be replaced with the field value, ie dateparse.parse_datetime('{0}').month", verbose_name='Field Expression', blank=True)),
('evaluate_operator', models.CharField(default=b'==', help_text='Operator to use for condition, field operator expression, ie price > (10*1.04)+4', max_length=10, verbose_name='Operator', choices=[(b'==', 'equals'), (b'>', 'greater than'), (b'<', 'less than'), (b'>=', 'greater than or equal to'), (b'<=', 'less than or equal to'), (b'!=', 'not equal to'), (b'is', 'Use when comparing anything with (python) None (Python null)')])),
('evaluate_expression', models.TextField(help_text='(Python) expression for condition to evaluate and compare with, ie (125*1.05) or timezone.now().month', verbose_name='Expression')),
('join_condition', models.CharField(default=b'AND', max_length=3, choices=[(b'AND', b'AND'), (b'OR', b'OR')])),
],
),
migrations.CreateModel(
name='Rule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='Name of rule', max_length=150, blank=True)),
('description', models.CharField(help_text='Description of rule', max_length=255, blank=True)),
('action_class', models.CharField(default=b'', help_text='Action to be run (if any), ie: cc3.rules.actions.Pay', max_length=255, blank=True)),
('parameter_names', models.CharField(default=b'', help_text='Amount', max_length=255, blank=True)),
('parameter_values', models.CharField(default=b'', help_text='123', max_length=255, blank=True)),
('instance_identifier', models.CharField(help_text='Instance identifier, which model instance field passed to run_evaluate should be handed over to action', max_length=255)),
('instance_qualifier', models.CharField(default=b'', help_text="Extra kwargs as qualifier in case of multiple rows, for example {'parent': None}", max_length=255, blank=True)),
('exit_on_match', models.BooleanField(default=False, help_text='If checked, exit rule chain if it evaluates true')),
('exit_on_fail', models.BooleanField(default=False, help_text='If checked, exit rule chain if it evaluates false')),
('perform_action_once', models.BooleanField(default=False, help_text='If checked, only ever perform the action once')),
('sequence', models.IntegerField(help_text='Sequence in which to run rules')),
('active', models.BooleanField(default=True, help_text='Run rule as part of ruleset')),
('process_model', models.ForeignKey(help_text='Process model of instance passed to run_evaluate. Also used to identify related rules', to='contenttypes.ContentType')),
],
options={
'ordering': ['sequence'],
},
),
migrations.CreateModel(
name='RuleSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='RuleStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('identity', models.CharField(default=b'-1', help_text='What value was in the instance_identifier field, ie which persoonnummer?', max_length=255, blank=True)),
('condition', models.ForeignKey(to='rules.Condition')),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
('rule', models.ForeignKey(to='rules.Rule')),
],
options={
'verbose_name_plural': 'Rule status',
},
),
migrations.AddField(
model_name='rule',
name='ruleset',
field=models.ForeignKey(blank=True, to='rules.RuleSet', help_text='Which set of rules does this belong to', null=True),
),
migrations.AddField(
model_name='condition',
name='rule',
field=models.ForeignKey(to='rules.Rule'),
),
migrations.AddField(
model_name='actionstatus',
name='rule_status',
field=models.ForeignKey(help_text='Which RuleStatus caused this action to be run', to='rules.RuleStatus'),
),
]
| [
"stephen.wolff@qoin.com"
] | stephen.wolff@qoin.com |
564bf1210eb6fed846bfaec9f7411d65251e2fbb | 9e335834e7be81068f001d5451781d5c1530ebbf | /python_lxf/20151013/BMI2.py | bc7b34e90fce186744773af6ba182711d43ffe89 | [] | no_license | jtr109/SelfLearning | c1dbffa5485d0cd2f444ea510da62a8e3d269dbc | cc920ed507647762b9855385be76869adac89e7c | refs/heads/master | 2020-04-06T04:11:31.143688 | 2016-07-22T02:19:39 | 2016-07-22T02:19:39 | 58,049,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
h = input('请输入您的身高(米):')
w = input('请输入您的体重(千克):')
height = float(h)
weight = float(w)
bmi = weight / (height ** 2)
if bmi < 18.5:
print('过轻')
elif bmi < 25:
print('正常')
elif bmi < 28:
print('过重')
elif bmi < 32:
print('肥胖')
else:
print('严重肥胖')
| [
"lyp_login@outlook.com"
] | lyp_login@outlook.com |
a405913e9b1934afadaf8c347e41153c946f033e | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit238.py | 2f98d4110f6710b2fa7c4e284c1409c0065a845a | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,455 | py | # qubit number=3
# total number=45
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.cx(input_qubit[0],input_qubit[2]) # number=35
prog.x(input_qubit[2]) # number=36
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit238.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
997f02e6559b4e90b8fe5769bbda3cf138b8bfcb | d7016f69993570a1c55974582cda899ff70907ec | /sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2021_07_01/operations/_operations.py | 88aeae1f2c96dcb662d71e9fdd57fe09951740e9 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 6,706 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-07-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Devices/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.iothub.v2021_07_01.IotHubClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available IoT Hub REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2021_07_01.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-07-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-07-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Devices/operations"}
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
4ed4ff0299695f10ce9f869a060b8d95d932daeb | fd84c4c97bf33b99b063ec9d41fc00752ad45b86 | /tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_softmax_grad_rule.py | 2727ef641d89ae379cba9ec87d5e63e2bdd43a9a | [
"Apache-2.0",
"AGPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"MPL-1.1",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"MPL-2.0",
"LGPL-2.1-only",
"GPL-2.0-only",
"Libpng",
"BSL-1.0",
"MIT",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Z... | permissive | zjd1988/mindspore | d9c283416bee6e18b6ca2b04ff0d9fd8f1473c2e | 10481470df2dd0c9713ce45e41b7c37a4050f643 | refs/heads/master | 2021-05-23T00:19:47.869207 | 2020-04-04T09:31:23 | 2020-04-04T09:31:23 | 253,152,063 | 1 | 0 | Apache-2.0 | 2020-04-05T04:01:54 | 2020-04-05T04:01:53 | null | UTF-8 | Python | false | false | 1,639 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import operations as P
from mindspore.ops import Primitive
mul = P.Mul()
reduce_sum = P.ReduceSum()
sub = P.Sub()
confusion_softmax_grad = Primitive('ConfusionSoftmaxGrad')
make_tuple = Primitive('make_tuple')
tuple_getitem = Primitive('tuple_getitem')
axis = 2
class FnDict:
def __init__(self):
self.fnDict = {}
def __call__(self, fn):
self.fnDict[fn.__name__] = fn
def __getitem__(self, name):
return self.fnDict[name]
def test_confusion_softmax_grad_rule(tag):
""" test_confusion_softmax_grad_rule """
fns = FnDict()
@fns
def before(input0, input1):
res = mul(input0, input1)
# input axis will be convert to attr in ConstructKernelGraph step
res = reduce_sum(res, axis)
res = sub(input0, res)
return res
@fns
def after(input0, input1):
res = confusion_softmax_grad(input0, input1)
return make_tuple(res)
return fns[tag]
| [
"leon.wanghui@huawei.com"
] | leon.wanghui@huawei.com |
05c9c6a9568fb9710d016411fc2ee17d7974abfc | 6bb433db849f55913d8be6b5aa6527951788090f | /c4-iter_gen/it_on_items_sep_containers.py | ff4281a10b89e900db0421991f72b139a11cd748 | [] | no_license | pranavchandran/redtheme_v13b | 7fe7404cd6f8e5b832824c4b2db7ff23db69b7d8 | e4927e296c24b4861a5e8b8ebe26e13c66c2ece4 | refs/heads/main | 2023-04-03T10:12:46.750224 | 2021-04-09T09:16:01 | 2021-04-09T09:16:01 | 344,432,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | from itertools import chain
a = [1,2,3,4]
b = ['x','y','z']
for x in chain(a,b):
print(x)
active_items = set()
inactive_items = set()
# Iterate over all items
for item in chain(active_items, inactive_items):
print(item)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
0f40b0fc2046c023f44323b588a6dbb9b763c267 | e10fe03d808fc46e09e010653f3fa6a1fa7f0559 | /api/serializers/basic.py | 598971069123027bb37deda16afa2fad82f93b66 | [] | no_license | erics1996/pythoneers-api | 57259f9817ea36f47a556e681630b41c1c7bc37f | f188cbfb79b0774472b3f88213f87081ccb49c3b | refs/heads/master | 2023-01-24T15:58:29.081045 | 2020-10-30T08:12:33 | 2020-10-30T08:12:33 | 297,828,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | from rest_framework import serializers
from .. import models
class NoticeSerializer(serializers.ModelSerializer):
# 对时间格式化
add_time = serializers.DateTimeField(format='%Y-%m-%d %X')
class Meta:
model = models.Notice
fields = '__all__'
class BannerSerializer(serializers.ModelSerializer):
class Meta:
model = models.Banner
fields = (
'id',
'image',
'resource',
'title'
)
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = models.Book
fields = (
'title',
'image',
'order_address',
'coupon_address',
'is_coupon',
'current_price',
'current_face_price',
'monthly_sale',
'book_store'
)
| [
"erics1996@yeah.net"
] | erics1996@yeah.net |
21d7e5fb8ef2b6210a97afa76f050d8f28564c78 | 6351221d588668804e2df01936732eede4d96ed0 | /leetcode-cn/Python/61.旋转链表.py | 2d7e6a911832422910418fb0ac8c3fb7c844beab | [] | no_license | LogicJake/code-for-interview | 8e4ec9e24ec661a443ad42aa2496d78a1fbc8a3f | 5990b09866696c2f3e845047c755fa72553dd421 | refs/heads/master | 2021-09-20T20:19:17.118333 | 2021-09-14T13:46:30 | 2021-09-14T13:46:30 | 102,202,212 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | #
# @lc app=leetcode.cn id=61 lang=python3
#
# [61] 旋转链表
#
# @lc code=start
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if head is None:
return None
p = head
n = 1
while p.next:
n += 1
p = p.next
p.next = head
k = k % n
k = n - k - 1
p = head
for _ in range(k):
p = p.next
new_head = p.next
p.next = None
return new_head
# @lc code=end
| [
"835410808@qq.com"
] | 835410808@qq.com |
0f31d691c1dad98f960558e830452740485963a8 | 97cd0270b1490fe296a12ffa224bb14aa2af6f2d | /voter/tests.py | dd5c27ad3158f29beadd046e3ba2f137fa2e9a2c | [] | no_license | Brian23-eng/Project-voter | 685b0b41e6480b2e7b7460b21776a86d43bd4807 | 4e8b935d960a8c2f36142c0066b3514947825f0c | refs/heads/master | 2022-12-15T06:03:08.989626 | 2019-11-25T13:21:15 | 2019-11-25T13:21:15 | 223,181,923 | 0 | 0 | null | 2022-12-08T06:58:14 | 2019-11-21T13:35:53 | Python | UTF-8 | Python | false | false | 2,264 | py | from django.test import TestCase
from . models import *
class TestProfile(TestCase):
def setUp(self):
self.user = User(id = 1, username = 'Brian', password = 'beta')
self.user.save()
def test_instance(self):
self.assertTrue(isinstance(self.user, User))
def save_user(self):
self.user.save()
def delete_user(self):
self.user.delete()
class TestPost(TestCase):
def setUp(self):
self.user = User.objects.create(id = 1, username = 'Brian')
self.post = Post.objects.create(id = 1, title = 'test post', photo = 'https://img.com/345', description = 'this is a test image', user = self.user, url = 'https://url.com')
def test_instance(self):
self.assertTrue(isinstance(self.post, Post))
def save_post(self):
self.post.save_post()
post = Post.objects.all()
self.assertTrue(len(post)>0)
def get_post(self):
self.post.save()
posts = Post.all_posts()
self.assertTrue(len(posts)>0)
def test_search_post(self):
self.post.save()
post = Post.search_project('test')
self.assertTrue(len(post) > 0)
def test_delete_post(self):
self.post.delete_post()
post = Post.search_project('test')
self.assertTrue(len(post) < 1)
class RatingTest(TestCase):
def setUp(self):
self.user = User.objects.create(id=1, username='Brian')
self.post = Post.objects.create(id=1, title='test post', photo='https://img.com/345', description='this is a test image',
user=self.user, url='https://url.com')
self.rating = Rating.objects.create(id=1, design=6, usability=7, content=9, user=self.user, post=self.post)
def test_instance(self):
self.assertTrue(isinstance(self.rating, Rating))
def test_save_rating(self):
self.rating.save_rating()
rating = Rating.objects.all()
self.assertTrue(len(rating) > 0)
def test_get_post_rating(self, id):
self.rating.save()
rating = Rating.get_ratings(post_id=id)
self.assertTrue(len(rating) == 1)
| [
"b.odhiambo.bo@gmail.com"
] | b.odhiambo.bo@gmail.com |
b541d1cba0235891290f9afc47211cb2eee93e5c | cb4cfcece4bc14f591b038adbc7fadccaf447a1d | /ZUBREACH.py | 3caf3538e4052bcb757be41819f42f11a02bdce4 | [] | no_license | psycho-pomp/CodeChef | ba88cc8e15b3e87d39ad0c4665c6892620c09d22 | 881edddded0bc8820d22f42b94b9959fd6912c88 | refs/heads/master | 2023-03-21T06:46:14.455055 | 2021-03-11T12:07:48 | 2021-03-11T12:07:48 | 275,214,989 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # cook your dish here
from sys import stdin,stdout
def get_ints():return map(int,stdin.readline().strip().split())
def get_array():return list(map(int,stdin.readline().strip().split()))
def get_string():return stdin.readline().strip()
def get_int():return int(stdin.readline().strip()+"\n")
def op(c):return stdout.write(c)
for _ in range(1,get_int()+1):
m,n=get_ints()
rx,ry=get_ints()
l=get_int()
s=get_string()
mov={'L':0,'R':0,'D':0,'U':0}
for i in s:
mov[i]+=1
currx=mov['R']-mov['L']
curry=mov['U']-mov['D']
if currx<0 or curry<0 or currx>m or curry>n:
print("Case "+str(_)+": "+"DANGER")
elif currx==rx and curry==ry:
print("Case "+str(_)+": "+"REACHED")
else:
print("Case "+str(_)+": "+"SOMEWHERE")
| [
"noreply@github.com"
] | psycho-pomp.noreply@github.com |
b4a33e3b653670da6ff412ebfd66a2319ad74ef6 | 27965822587ca914e65f097f334da1a7d54293bb | /codeChef/test.py | a0e63a454e210effde06b6cfce59f95595ac8620 | [] | no_license | paras1810/Competitive-Programming | 89a772106edad9765a0141042816431b75a33011 | 01a66213dd481cbc9fb160d83024e0e6a9cf165f | refs/heads/master | 2020-04-02T10:08:10.496142 | 2018-10-21T06:08:24 | 2018-10-21T06:08:24 | 154,325,876 | 0 | 0 | null | 2018-10-23T12:40:42 | 2018-10-23T12:40:42 | null | UTF-8 | Python | false | false | 1,024 | py | import random
def getArray(array_length,element_lower_bound,element_upper_bound):
arr = []
for i in range(array_length):
arr.append(random.randint(element_lower_bound,element_upper_bound))
return arr
def writeArray(array, file):
for i in array:
file.write(str(i)+" " )
file.write('\n')
def getNumber(element_lower_bound,element_upper_bound):
return random.randint(element_lower_bound,element_upper_bound)
def writeNumber(number, file):
file.write(str(number) + ' ')
file.write('\n')
def getString(arr,length):
S = ""
i = 0
while i < len(arr):
S = S + arr[random.randint(0,len(arr)-1)]
i = i + 1
return S
def writeString(S,file):
file.write(str(S) + "\n")
capLetters = []
i = 65
while i <= 90:
capLetters.append(chr(i))
i = i + 1
smallLetters = []
i = 97
while i <= 122:
smallLetters.append(chr(i))
i = i + 1
digits = []
i = 0
while i < 10:
digits.append(str(i))
i = i + 1
fopen = open('input.txt','w')
period = rando | [
"mahajan.ayush2306@gmail.com"
] | mahajan.ayush2306@gmail.com |
2979dcdca3788c252aa0c674c8bab6254dea1e50 | 46ae8264edb9098c9875d2a0a508bc071201ec8b | /res/scripts/client_common/shared_utils__init__.py | dfd2e8db242d42f2588610a3ef1e2b018cf761f0 | [] | no_license | Difrex/wotsdk | 1fc6156e07e3a5302e6f78eafdea9bec4c897cfb | 510a34c67b8f4c02168a9830d23f5b00068d155b | refs/heads/master | 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,436 | py | # Embedded file name: scripts/client_common/shared_utils/__init__.py
import weakref
import itertools
import types
import BigWorld
from debug_utils import LOG_ERROR, LOG_WARNING
ScalarTypes = (types.IntType,
types.LongType,
types.FloatType,
types.BooleanType) + types.StringTypes
IntegralTypes = (types.IntType, types.LongType)
def makeTupleByDict(ntClass, data):
unsupportedFields = set(data) - set(ntClass._fields)
supported = {}
for k, v in data.iteritems():
if k not in unsupportedFields:
supported[k] = v
return ntClass(**supported)
class BoundMethodWeakref(object):
def __init__(self, func):
self.methodName = func.__name__
raise not self.methodName.startswith('__') or AssertionError('BoundMethodWeakref: private methods are not supported')
self.wrefCls = weakref.ref(func.__self__)
def __call__(self, *args, **kwargs):
ref = self.wrefCls()
if ref is not None:
return getattr(ref, self.methodName)(*args, **kwargs)
else:
return
def forEach(function, sequence):
for e in sequence:
function(e)
def isEmpty(sequence):
try:
next(sequence)
except StopIteration:
return True
return False
def safeCancelCallback(callbackID):
try:
BigWorld.cancelCallback(callbackID)
except ValueError:
LOG_ERROR('Cannot cancel BigWorld callback: incorrect callback ID.')
def prettyPrint(dict, sort_keys = True, indent = 4):
import json
return json.dumps(dict, sort_keys=sort_keys, indent=indent)
def findFirst(function_or_None, sequence, default = None):
try:
return next(itertools.ifilter(function_or_None, sequence))
except StopIteration:
return default
def first(sequence, default = None):
return findFirst(None, sequence, default)
class CONST_CONTAINER(object):
__keyByValue = None
@classmethod
def getIterator(cls):
for k, v in cls.__dict__.iteritems():
if not k.startswith('_') and type(v) in ScalarTypes:
yield (k, v)
@classmethod
def getKeyByValue(cls, value):
cls.__doInit()
return cls.__keyByValue.get(value)
@classmethod
def hasKey(cls, key):
return key in cls.__dict__
@classmethod
def hasValue(cls, value):
cls.__doInit()
return value in cls.__keyByValue
@classmethod
def ALL(cls):
return tuple([ v for k, v in cls.getIterator() ])
@classmethod
def __doInit(cls):
if cls.__keyByValue is None:
cls.__keyByValue = dict(((v, k) for k, v in cls.getIterator()))
return
def _getBitIndexesMap(capacity):
map = {}
for index in range(1, capacity + 1):
key = (1 << index) - 1
map[key] = index - 1
return map
_INT64_SET_BITS_INDEXES_MAP = _getBitIndexesMap(64)
class BitmaskHelper(object):
@classmethod
def add(cls, mask, flag):
if not mask & flag:
mask |= flag
return mask
return -1
@classmethod
def addIfNot(cls, mask, flag):
if not mask & flag:
mask |= flag
return mask
@classmethod
def remove(cls, mask, flag):
if mask & flag > 0:
mask ^= flag
return mask
return -1
@classmethod
def removeIfHas(cls, mask, flag):
if mask & flag > 0:
mask ^= flag
return mask
@classmethod
def hasAllBitsSet(cls, number, mask):
return number & mask == mask
@classmethod
def hasAnyBitSet(cls, number, mask):
return number & mask > 0
@classmethod
def isBitSet(self, number, bitIndex):
return number & 1 << bitIndex > 0
@classmethod
def getSetBitsCount(cls, mask):
"""
Method goes through as many iterations as there are set bits. So if we have a 32-bit word
with only the high bit set, then it will only go once through the loop.
For details please see Brian Kernighan's algorithm.
:param mask: Bit mask
:return: Count of set bits
"""
count = 0
while mask:
count += 1
mask &= mask - 1
return count
@classmethod
def getSetBitIndexes(cls, mask):
return [ i for i in BitmaskHelper.iterateSetBitsIndexes(mask) ]
@classmethod
def iterateSetBitsIndexes(cls, number):
"""
Generator that returns indexes of set bits of the given number. Does NOT depend on number
bit capacity.
NOTE: If known that bit capacity <= INT64, use iterateInt64SetBitsIndexes because
it is faster on 25-30%.
:param number: Bit mask
:return: Indexes of set bits starting from 0
"""
counter = 0
while number:
if number & 1:
yield counter
counter += 1
number >>= 1
@classmethod
def iterateInt64SetBitsIndexes(cls, number):
"""
Generator that returns indexes of set bits of the given INT64. Depends on number
bit capacity (=64!). Generator goes through as many iterations as there are set bits.
NOTE: It faster on 25-30% than iterateSetBitsIndexes!
:param number: Bit mask
:return: Indexes of set bits starting from 0
"""
while number:
submask = number - 1
yield _INT64_SET_BITS_INDEXES_MAP[number ^ submask]
number &= submask
class AlwaysValidObject(object):
def __init__(self, name = ''):
self.__name = name
def __getattr__(self, item):
if item in self.__dict__:
return self.__dict__[item]
return AlwaysValidObject(self._makeName(self.__name, item))
def __call__(self, *args, **kwargs):
return AlwaysValidObject()
def getName(self):
return self.__name
@classmethod
def _makeName(cls, parentName, nodeName):
return '%s/%s' % (parentName, nodeName)
def isDefaultDict(sourceDict, defaultDict):
for k, v in defaultDict.iteritems():
if k not in sourceDict:
return False
if sourceDict[k] != v:
return False
return True
def nextTick(func):
"""
Moves function calling to the next frame
"""
def wrapper(*args, **kwargs):
BigWorld.callback(0.01, lambda : func(*args, **kwargs))
return wrapper | [
"m4rtijn@gmail.com"
] | m4rtijn@gmail.com |
1eb1c4279ddd2ed7031dd62e0e077306308bacf3 | 0d16155ef6288829e3e36177580845e2fdac7d3a | /tutorial/snippets/urls_viewSet_router.py | a5ac3f254fadd4b7768dade6ff4e6c3b65116928 | [
"MIT"
] | permissive | BennyJane/django-demo | 71681cfadedff13ffa9631f3ddc6f9c45f77b031 | 7d697bd959e94f89cf3ea1b782143d0794efc1fb | refs/heads/main | 2023-02-10T00:24:35.299176 | 2021-01-14T14:06:43 | 2021-01-14T14:06:43 | 324,715,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | # !/usr/bin/env python
# -*-coding:utf-8 -*-
# PROJECT : tutorial
# Time :2020/12/27 12:33
# Warning :The Hard Way Is Easier
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from snippets import views
"""
因为我们使用的是ViewSet类而不是View类,所以实际上我们不需要自己设计URL conf。
使用Router类可以自动处理将资源连接到视图和url的约定。我们需要做的就是向路由器注册适当的视图集,
然后让其余的工作完成。
向路由器注册视图集类似于提供urlpattern。我们包含两个参数-视图的URL前缀和视图集本身。
DefaultRouter我们正在使用的类还会自动为我们创建API根视图,
因此我们现在可以api_root从views模块中删除方法。
"""
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r'snippets', views.SnippetViewSet)
router.register(r'users', views.UserViewSet)
# The API URLs are now determined automatically by the router.
urlpatterns = [
path('', include(router.urls)),
]
| [
"3355817143@qq.com"
] | 3355817143@qq.com |
94da85797138607175d013b490d479267ed174cb | 24cffa6c6c6145958bfc527b202261cf59b9462c | /detection_algorithms/ipython_profile.py | f1af5dd4233ccd1b7b6d7e687b99b521db403dc2 | [
"MIT"
] | permissive | hanahs-deepfake-detection/detection-algorithms | 015aeed20de418e9be4cbe60197bb7633a86bd8c | 6d7ec53eaf333adb10a1aba448f80fceaf7722be | refs/heads/master | 2023-01-29T07:41:51.939768 | 2020-12-13T23:55:56 | 2020-12-13T23:55:56 | 284,211,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from tensorflow import keras
from sys import path
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
project_root = os.path.dirname(os.path.realpath(__file__))
path.append(project_root)
path.append(os.path.join(project_root, 'jitter-detection'))
path.append(os.path.join(project_root, 'common'))
import dataset_util
import spatial_transformer
import data_pipeline
| [
"loop.infinitely@gmail.com"
] | loop.infinitely@gmail.com |
96af681b63de66aaa9a753340fbfe7e4b071794c | 1a1a36fb99cf25d2522d5dadc9b5d816bb81ffff | /objects/tags/Layer.py | 6649cf63ba14b4f40e004b90e763b0f34f1407f6 | [] | no_license | arkimede/pysynfig | cae74eeca153f7c412cf6642a2092a9ee6948692 | 3bc46a34c275d44a69521afd63af2884e2187c51 | refs/heads/master | 2021-05-04T11:03:03.412344 | 2018-07-29T14:00:12 | 2018-07-29T14:00:12 | 52,307,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,030 | py | from treedict import TreeDict
class Layer:
def __init__(self):
self.type = None
self.active = None
self.exclude_from_rendering = None
self.version = None
self.desc = None
self.HashParam = {} #hash di tutti i param del layer
self.treeObj = TreeDict()
self.zMax = None
self.filenameParamImportLayer = None
self.node = None
def setNode(self,node):
self.node = node
def getNode(self):
return self.node
def setZmax(self,zMax):
self.zMax = zMax
def getZmax(self):
return self.zMax
def setType(self,type):
self.type = type
if self.node is not None:
self.node.set('type', self.type)
def getType(self):
return self.type
def setActive(self,active):
self.active = active
if self.node is not None:
self.node.set('active', self.active)
def getActive(self):
return self.active
def setExcludeFromRendering(self,efr):
self.exclude_from_rendering = efr
if self.node is not None:
self.node.set('exclude_from_rendering', self.exclude_from_rendering)
def getExcludeFromRendering(self):
return self.exclude_from_rendering
def setVersion(self, version):
self.version = version
if self.node is not None:
self.node.set('version', self.version)
def getVersion(self):
return self.version
def setDesc(self, desc):
self.desc = desc
if self.node is not None:
self.node.set('desc', self.desc)
def getDesc(self):
return self.desc
def getHashParam(self):
return self.HashParam
def addParam(self,param):
self.HashParam[param.name] = param
def getParam(self,paramName):
return self.HashParam[paramName]
def getFilenameParamImportLayer(self):
return self.filenameParamImportLayer
def setFilenameParamImportLayer(self, filenameParamImportLayer):
self.filenameParamImportLayer = filenameParamImportLayer
def getParamFilename(self):
tmpFilename = self.HashParam.get('filename')
if tmpFilename is not None:
return tmpFilename
else:
return self.filenameParamImportLayer
def get(self, name):
if name == "offset":
transformation = self.getParam("transformation")
offset = transformation.getComposite().getOffset()
if offset.getAnimated() is not None:
return offset.getAnimated().getWaypoint()
elif offset.getVector() is not None:
return offset.getVector()
if name == "origin":
origin = self.getParam("origin")
if origin.getAnimated() is not None:
return origin.getAnimated().getWaypoint()
elif origin.getVector() is not None:
return origin.getVector()
if name == "scale":
transformation = self.getParam("transformation")
scale = transformation.getComposite().getScale()
if scale.getAnimated() is not None:
return scale.getAnimated().getWaypoint()
elif scale.getVector() is not None:
return scale.getVector()
if name == "z_depth":
z_depth = self.getParam("z_depth")
if z_depth.getAnimated() is not None:
return z_depth.getAnimated().getWaypoint()
elif z_depth.getValue() is not None:
return z_depth.getValue()
if name == "amount":
amount = self.getParam("amount")
if amount.getAnimated() is not None:
return amount.getAnimated().getWaypoint()
elif amount.getValue() is not None:
return amount.getValue()
def getTagParam(self, pathTag):
tmp = pathTag.split(".",1)
paramRoot = tmp[0]
tags = tmp[1].split(".")
if paramRoot == "z_depth":
pass
elif paramRoot == "amount":
pass
elif paramRoot == "blend_method":
pass
elif paramRoot == "tranformation":
if len(tags) == 1:
if tags[0] == "composite":
return self.HashParam["transformation"].getComposite();
elif len(tags) == 2:
if tags[1] == "offset":
return self.HashParam["transformation"].getComposite().getOffset()
elif tags[1] == "angle":
return self.HashParam["transformation"].getComposite().getAngle()
elif tags[1] == "skew_angle":
return self.HashParam["transformation"].getComposite().getSkewAngle()
elif tags[1] == "scale":
return self.HashParam["transformation"].getComposite().getScale()
elif len(tags) == 3:
if tags[1] == "offset" and tags[2] == "animated":
return self.HashParam["transformation"].getComposite().getOffset().getAnimated()
| [
"a.caristia@gmail.com"
] | a.caristia@gmail.com |
d9ed52014372234569a2fdb6826d8be2dd77e58c | ac34cad5e20b8f46c0b0aa67df829f55ed90dcb6 | /tests/test_scene_v1/test_imports.py | 62072bc9b8c08ebf0cb9ec4e281a01b5d7abe925 | [
"MIT"
] | permissive | sudo-logic/ballistica | fd3bf54a043717f874b71f4b2ccd551d61c65008 | 9aa73cd20941655e96b0e626017a7395ccb40062 | refs/heads/master | 2023-07-26T19:52:06.113981 | 2023-07-12T21:32:56 | 2023-07-12T21:37:46 | 262,056,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | # Released under the MIT License. See LICENSE for details.
#
"""Testing asset manager functionality."""
from __future__ import annotations
import pytest
from batools import apprun
@pytest.mark.skipif(
apprun.test_runs_disabled(), reason=apprun.test_runs_disabled_reason()
)
def test_imports() -> None:
"""Test imports for our featureset."""
# Make sure our package and binary module can be cleanly imported by
# themselves.
apprun.python_command('import bascenev1', purpose='import testing')
apprun.python_command('import _bascenev1', purpose='import testing')
| [
"ericfroemling@gmail.com"
] | ericfroemling@gmail.com |
ccaf7a8803c329684dc72d1d27bf2e6afda30f13 | 08441713096d3c0b68b96294e43278c04a086a96 | /models/__init__.py | 3139cc74fb630c167749d7eaf33cdf1e31cf6702 | [] | no_license | mir-of/oneflow-cifar | 15e7946650f2dd8cc6d9c2112cf0fadad851c3e8 | c50993f85e984daee25418e09bbd5eee832e39a1 | refs/heads/main | 2023-08-05T04:45:33.448384 | 2021-09-26T06:47:50 | 2021-09-26T06:47:50 | 410,485,038 | 0 | 0 | null | 2021-09-26T07:46:58 | 2021-09-26T07:43:14 | null | UTF-8 | Python | false | false | 450 | py | from .vgg import *
from .dpn import *
from .lenet import *
from .senet import *
from .pnasnet import *
from .densenet import *
from .googlenet import *
from .shufflenet import *
from .shufflenetv2 import *
from .resnet import *
from .resnext import *
from .preact_resnet import *
from .mobilenet import *
from .mobilenetv2 import *
from .efficientnet import *
from .regnet import *
from .dla_simple import *
from .dla import *
from .alexnet import *
| [
"1182563586@qq.com"
] | 1182563586@qq.com |
97fa5d4f14e9cdd298538a01c0c98d394fbc04dc | f56e4bb2d3a91b068292d698388ac5e82a40f078 | /inkshop/apps/website/migrations/0004_auto_20190506_1537.py | 2cb4eada120719587c7220cde82dba6bf8baa9c9 | [] | no_license | inkandfeet/inkshop | 979064eb902c86dc95a6399e79ac753efbe547d1 | 691187b3eb4435782f8054e6404f1203e7d0c383 | refs/heads/master | 2022-12-13T01:26:02.361970 | 2021-11-18T23:01:50 | 2021-11-18T23:01:50 | 175,481,726 | 1 | 1 | null | 2022-12-08T04:59:16 | 2019-03-13T18:59:17 | Python | UTF-8 | Python | false | false | 398 | py | # Generated by Django 2.2 on 2019-05-06 15:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0003_auto_20190506_1533'),
]
operations = [
migrations.AlterField(
model_name='template',
name='name',
field=models.CharField(max_length=254, unique=True),
),
]
| [
"steven@quantumimagery.com"
] | steven@quantumimagery.com |
73c5d38830af8b2e003b8c6697313eacca30557d | 95761ba9ca92c9bf68f3fb88524ee01ddba9b314 | /admin-web/src/www/notasquare/urad_web/page_contexts/standard.py | 7fc6a3293fbb3dfc5d0d78499a8c1e9250bf93c3 | [] | no_license | duytran92-cse/nas-workboard | 918adf4b976f04a13dc756f8dc32aecf397c6258 | bebe7674a7c6e8a3776264f18a3b7ca6b417dc7e | refs/heads/master | 2022-10-23T01:02:39.583449 | 2020-06-14T19:25:01 | 2020-06-14T19:25:01 | 272,268,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | from notasquare.urad_web.page_contexts import BasePageContext
class FullPageContext(BasePageContext):
class Breadcrumb(object):
def __init__(self):
self.entries = []
def build(self):
self.entries = []
def add_entry(self, name, title, url = ''):
self.entries.append({
'name': name,
'title': title,
'url': url
})
class Menu(object):
def __init__(self):
self.groups = []
self.group_selected = ''
self.entry_selected = ''
class MenuGroup(object):
def __init__(self, name, title, url, iclass):
self.name = name
self.title = title
self.url = url
self.iclass = iclass
self.entries = []
def add_menu_entry(self, name, title, url):
self.entries.append({
'name': name,
'title': title,
'url': url
})
def build(self):
pass
def create_menu_group(self, name, title, url = '', iclass = ''):
menu_group = self.MenuGroup(name, title, url, iclass)
self.groups.append(menu_group)
return menu_group
def set_group_selected(self, group):
self.group_selected = group
def set_entry_selected(self, entry):
self.entry_selected = entry
def __init__(self):
super(FullPageContext, self).__init__()
self.app_title = ''
self.page_title = ''
self.menu = self.Menu()
self.submenu = self.Menu()
self.subsubmenu = self.Menu()
self.breadcrumb = self.Breadcrumb()
self.widgets = []
self.renderer = None
def add_widget(self, widget):
self.widgets.append(widget)
| [
"thanh.tran@etudiant.univ-lr.fr"
] | thanh.tran@etudiant.univ-lr.fr |
b3c27b4fb74b537090e0267cc2f1fb764bcf44fd | 51d0377511a5da902033fb9d80184db0e096fe2c | /06-importing-data-in-python-2/1-importing-data-from-the-internet/01-importing-flat-files-from-the-web.py | 1d8dcfe7ec9099bec27b509004633b3c95c02f18 | [] | no_license | sashakrasnov/datacamp | c28c6bda178163337baed646220b2f7dcc36047d | 759f4cec297883907e21118f24a3449d84c80761 | refs/heads/master | 2021-12-07T02:54:51.190672 | 2021-09-17T21:05:29 | 2021-09-17T21:05:29 | 157,093,632 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | '''
Importing flat files from the web: your turn!
You are about to import your first file from the web! The flat file you will import will be 'winequality-red.csv' from the University of California, Irvine's Machine Learning repository. The flat file contains tabular data of physiochemical properties of red wine, such as pH, alcohol content and citric acid content, along with wine quality rating.
The URL of the file is
| 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
After you import it, you'll check your working directory to confirm that it is there and then you'll load it into a pandas DataFrame.
Instructions
* Import the function urlretrieve from the subpackage urllib.request.
* Assign the URL of the file to the variable url.
* Use the function urlretrieve() to save the file locally as 'winequality-red.csv'.
* Execute the remaining code to load 'winequality-red.csv' in a pandas DataFrame and to print its head to the shell.
'''
# Import package
from urllib.request import urlretrieve
# Import pandas
import pandas as pd
# Assign url of file: url
url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
# Save file locally
urlretrieve(url, 'winequality-red.csv')
# Read file into a DataFrame and print its head
df = pd.read_csv('winequality-red.csv', sep=';')
print(df.head())
| [
"a@skrasnov.com"
] | a@skrasnov.com |
4619ac7fea2889de98c92414a462e16a3068f021 | d21071464bef4f3fd51e554f280418d06975a77e | /leetcode/120 Triangle.py | 0c97ed50a118a07318564f5524b5511fecabc64a | [] | no_license | DeshErBojhaa/sports_programming | ec106dcc24e96231d447cdcac494d76a94868b2d | 96e086d4ee6169c0f83fff3819f38f32b8f17c98 | refs/heads/master | 2021-06-13T19:43:40.782021 | 2021-03-27T14:21:49 | 2021-03-27T14:21:49 | 164,201,394 | 1 | 0 | null | 2019-08-27T22:21:26 | 2019-01-05T09:39:41 | C++ | UTF-8 | Python | false | false | 493 | py | # 120. Triangle
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
for i in range(1, len(triangle)):
for j in range(i+1):
cur_min = float('inf')
if j < i:
cur_min = min(cur_min, triangle[i-1][j])
if j - 1 >=0:
cur_min = min(cur_min, triangle[i-1][j-1])
triangle[i][j] += cur_min
return min(triangle[-1])
| [
"noreply@github.com"
] | DeshErBojhaa.noreply@github.com |
0f1869a302c439f98e592e8158873b627110019a | 7491ceb405287660538e876317d3f69328757651 | /aydin/util/fast_uniform_filter/numba_cpu_uf.py | a6d5d1922fdb5f790dd69ce97387143a7f4ca625 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] | permissive | royerlab/aydin | 4d0bd5cb1a3786cf32f1d8661d3a3aa13ec7cab1 | 9312f227605be26fce960373c1f29a71323da914 | refs/heads/master | 2023-04-29T20:45:42.515226 | 2023-02-16T22:21:07 | 2023-02-16T22:21:07 | 188,953,977 | 125 | 14 | BSD-3-Clause | 2023-03-15T01:04:16 | 2019-05-28T04:30:19 | Python | UTF-8 | Python | false | false | 7,380 | py | from math import ceil
import numba
import numpy
from numba import jit, prange
__fastmath = {'contract', 'afn', 'reassoc'}
__error_model = 'numpy'
def numba_cpu_uniform_filter(
image, size=3, output=None, mode="nearest", cval=0.0, origin=0
):
# Save original image dtype:
original_dtype = image.dtype
# Numba does not support float16 yet:
dtype = numpy.float32 if original_dtype == numpy.float16 else original_dtype
image = image.astype(dtype=dtype, copy=False)
# Instantiates working images:
image_a_0 = numpy.empty(image.shape, dtype=dtype)
image_b_0 = numpy.empty(image.shape, dtype=dtype)
# Current output (just a ref)
output = None
axes = list(range(image.ndim))
if len(axes) == 1:
_cpu_line_filter(image, image_a_0, size)
output = image_a_0
elif len(axes) > 0:
for axis in axes:
if axis == 0:
image_a = image
image_b = image_a_0
filter_size = size[axis] if isinstance(size, tuple) else size
# lprint(f"axis: {axis}, filter_size: {filter_size}")
# set the parallelism:
parallelism = numba.get_num_threads()
# lprint(f"Number of threads: {parallelism}")
# max(1, int(0.9*multiprocessing.cpu_count()))
uniform_filter1d_with_conditionals(
image_a, image_b, filter_size, axis, parallelism=parallelism
)
output = image_b
if axis == 0:
image_a = image_a_0
image_b = image_b_0
else:
image_b, image_a = image_a, image_b
else:
output = image.copy()
# Make sure that output image has correct dtype:
output = output.astype(dtype=original_dtype, copy=False)
return output
def uniform_filter1d_with_conditionals(image, output, filter_size, axis, parallelism=8):
if image.ndim == 1:
_cpu_line_uniform_filter_without_loops(image, output, filter_size, parallelism)
elif image.ndim == 2:
prepared_image = image.swapaxes(0, 1) if axis == 0 else image
prepared_output = output.swapaxes(0, 1) if axis == 0 else output
_cpu_line_uniform_filter_without_loops(
prepared_image, prepared_output, filter_size, parallelism
)
elif image.ndim == 3:
prepared_image = image.swapaxes(axis, 2) if axis != 2 else image
prepared_output = output.swapaxes(axis, 2) if axis != 2 else output
_cpu_line_uniform_filter_with_2d_loop(
prepared_image, prepared_output, filter_size, parallelism
)
# cpu_line_uniform_filter_with_2d_loop.parallel_diagnostics(level=4)
# for key, value in cpu_line_filter.inspect_asm().items():
# print(f"{key} -> {value}")
elif image.ndim == 4:
prepared_image = image.swapaxes(axis, 3) if axis != 3 else image
prepared_output = output.swapaxes(axis, 3) if axis != 3 else output
_cpu_line_uniform_filter_with_3d_loop(
prepared_image, prepared_output, filter_size, parallelism
)
# cpu_line_uniform_filter_with_3d_loop.parallel_diagnostics(level=4)
@jit(nopython=True, parallel=True, error_model=__error_model, fastmath=__fastmath)
def _cpu_line_uniform_filter_without_loops(image, output, filter_size, parallelism=8):
"""
Numba jitted and parallelized method to apply uniform filter across
last axis of the image provided. Doesn't return anything, output array
should be provided as an argument.
Parameters
----------
image
output
filter_size
parallelism
"""
length = image.shape[0]
chunk_length = int(ceil(length / parallelism))
for c in prange(parallelism):
for k in range(chunk_length):
i = k + c * chunk_length
if i < length:
input_line = image[i, :]
output_line = output[i, :]
_cpu_line_filter(input_line, output_line, filter_size)
# print(cpu_line_filter.inspect_llvm())
@jit(nopython=True, parallel=True, error_model=__error_model, fastmath=__fastmath)
def _cpu_line_uniform_filter_with_2d_loop(image, output, filter_size, parallelism=8):
"""
Numba jitted and parallelized method to apply uniform filter across
last axis of the image provided. Doesn't return anything, output array
should be provided as an argument.
Parameters
----------
image
output
filter_size
"""
height = image.shape[0]
width = image.shape[1]
chunk_height = int(ceil(height / parallelism))
for c in prange(parallelism):
for k in range(chunk_height):
y = k + c * chunk_height
image_y = image[y]
output_y = output[y]
if y < height:
for x in range(width):
input_line = image_y[x]
output_line = output_y[x]
_cpu_line_filter(input_line, output_line, filter_size)
@jit(nopython=True, parallel=True, error_model=__error_model, fastmath=__fastmath)
def _cpu_line_uniform_filter_with_3d_loop(image, output, filter_size, parallelism=8):
"""
Numba jitted and parallelized method to apply uniform filter across
last axis of the image provided. Doesn't return anything, output array
should be provided as an argument.
Parameters
----------
image
output
filter_size
"""
depth = image.shape[0]
height = image.shape[1]
width = image.shape[2]
chunk_depth = int(ceil(depth / parallelism))
for c in prange(parallelism):
for k in range(chunk_depth):
z = k + c * chunk_depth
if z < depth:
image_z = image[z]
output_z = output[z]
for y in range(height):
image_y = image_z[y]
output_y = output_z[y]
for x in range(width):
input_line = image_y[x]
output_line = output_y[x]
_cpu_line_filter(input_line, output_line, filter_size)
@jit(nopython=True, error_model=__error_model, fastmath=__fastmath)
def _cpu_line_filter(input_line, output_line, filter_size):
"""
Numba jitted line filter implementation. Doesn't return anything,
output array should be provided as an argument.
Parameters
----------
input_line
1D input array
output_line
1D output array
filter_size
Size of the uniform filter
"""
def safe_index(index, size):
if index < 0:
return 0
elif index >= size:
return size - 1
else:
return index
array_size = len(input_line)
left_offset, right_offset = (filter_size // 2, filter_size - (filter_size // 2) - 1)
tmp = numpy.float64(0.0)
for ind in range(-left_offset, right_offset + 1):
tmp += input_line[safe_index(ind, array_size)]
output_line[0] = tmp / filter_size
for ind in range(1, array_size):
element_to_add_index = safe_index(ind + right_offset, array_size)
element_to_sub_index = safe_index(ind - left_offset - 1, array_size)
tmp += input_line[element_to_add_index]
tmp -= input_line[element_to_sub_index]
output_line[ind] = tmp / filter_size
| [
"noreply@github.com"
] | royerlab.noreply@github.com |
c3b80b4b8e6a8031d6fa9c18d7ecbfeb391996f9 | c22e52875567db55ee0e589a2720f4270e4c8d8b | /lsystem/Tree3psy.py | a5e921d9d56af8f2ba4e20453435ea096e905472 | [
"MIT"
] | permissive | Hiestaa/3D-Lsystem | fd42693921d25c14945a445a7555ab7c66fe3669 | f5ea9e7bb5b979a4b324c2b1794e99ebad82e117 | refs/heads/master | 2021-01-25T08:54:52.132486 | 2019-05-28T01:01:49 | 2019-05-28T01:01:49 | 14,755,648 | 8 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,191 | py | from lsystem.LSystem import LSystem
from Conf import Conf
import math
import time
class Tree3psy(LSystem):
"""Fractale en forme d'arbre, v3, psyche version"""
def defineParams(self):
self.LSName = "Tree 3"
self.LSAngle = math.pi / 8
self.LSSegment = 1
self.LSSteps = 6
self.LSStartingString = "F"
self.LSStochastic = False
self.LSStochRange = 0.1
def createVars(self):
self.LSVars = {
'F': self.turtle.forward,
'+': self.turtle.rotZ,
'-': self.turtle.irotZ,
'^': self.turtle.rotY,
'&': self.turtle.irotY,
'<': self.turtle.rotX,
'>': self.turtle.irotX,
'|': self.turtle.rotX,
'[': self.turtle.push,
']': self.turtle.pop,
'N': self.turtle.nextColor,
}
self.LSParams = {
'F': self.LSSegment,
'+': self.LSAngle,
'-': self.LSAngle,
'&': self.LSAngle,
'^': self.LSAngle,
'<': self.LSAngle,
'>': self.LSAngle,
'|': self.LSAngle * 2,
'[': None,
']': None,
'N': 0.01
}
def createRules(self):
self.LSRules = {
'F': "NFF[-FF][+FF][<FF][>FF]"
}
def createShaders(self):
self.LSVertexShader = """
in vec4 gl_Vertex;
in vec4 gl_Color;
uniform float time;
varying vec4 vertex_color;
vec3 rgb2hsv(vec3 c)
{
vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g));
vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r));
float d = q.x - min(q.w, q.y);
float e = 1.0e-10;
return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
}
vec3 hsv2rgb(vec3 c)
{
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
void main() {
vec3 col = rgb2hsv(gl_Color.xyz);
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
col.x += - (time * 0.1);
while (col.x > 1) {
col.x -= 1;
}
vertex_color = vec4(hsv2rgb(col), 1);
}
"""
self.LSPixelShader = """
varying vec4 vertex_color;
void main() {
gl_FragColor = vertex_color;
}
"""
self.LSUniforms = {'time': lambda: time.time() - Conf.LAUNCH_TIME}
| [
"rom1guyot@gmail.com"
] | rom1guyot@gmail.com |
11b169f08af0908228373a5cdd8fdf3f2f22155b | a550aece79bda789826b463280b91abffbf2d372 | /books/python-3-oop-packt/Chapter9/9_06_for_loop_converter.py | ea4e69ec09f61a3e205acc2d8977f8e1db44f4e8 | [
"MIT"
] | permissive | phiratio/learn_python | 20376470eaa292c157fd01f52b3077e3a983cd5a | a32240d4355fb331805d515f96e1d009914e5c47 | refs/heads/master | 2022-11-27T07:07:45.712373 | 2020-12-03T22:04:31 | 2020-12-03T22:04:31 | 189,397,679 | 1 | 0 | MIT | 2022-11-22T04:40:27 | 2019-05-30T10:56:10 | Python | UTF-8 | Python | false | false | 130 | py | input_strings = ['1', '5', '28', '131', '3']
output_integers = []
for num in input_strings:
output_integers.append(int(num))
| [
"phiratio161@gmail.com"
] | phiratio161@gmail.com |
54e5cbb62f96000c6538f0a466271ab6364f22da | 0da9d2a15305421e224795cdf078838bd97eccc8 | /Algorithms/Implementation/ServiceLane.py | 43aba35eaec78d9f1fcda2e309591df45ad54718 | [] | no_license | LysanderGG/HackerRank | ac1300eea2f4e00f7d4e5084b5d570aa6fae0cfb | 039ec4414612cff84a941a7e7538fb36e10d427f | refs/heads/master | 2021-01-21T16:09:59.174131 | 2017-07-09T12:33:32 | 2017-07-09T12:33:32 | 91,877,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | #!/bin/python3
import sys
n,t = input().strip().split(' ')
n,t = [int(n),int(t)]
width = [int(width_temp) for width_temp in input().strip().split(' ')]
for a0 in range(t):
i,j = input().strip().split(' ')
i,j = [int(i),int(j)]
ans = min(width[i:j+1])
print(ans) | [
"lysandergc@gmail.com"
] | lysandergc@gmail.com |
ccb7910d0fc54211cda5bc1ceef754dd2cbff7a5 | 9099ed0407521ac40b88f3b92872307f66c57bf9 | /codes/contest/leetcode/nth-digit.py | f8a29836185512d9cf8730c8195521c4ace2bb80 | [] | no_license | jiluhu/dirtysalt.github.io | 0cea3f52d2c4adf2bbf5c23b74f4cb1070025816 | c026f2969c784827fac702b34b07a9268b70b62a | refs/heads/master | 2020-08-31T09:32:05.273168 | 2019-10-29T01:53:45 | 2019-10-29T01:53:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | #!/usr/bin/env python
# coding:utf-8
# Copyright (C) dirlt
class Solution:
"""
@param n: a positive integer
@return: the nth digit of the infinite integer sequence
"""
def findNthDigit(self, n):
# write your code here
d = 1
base = 9
acc = 9
prev_acc = 0
while n > acc:
prev_acc = acc
d += 1
base *= 10
acc += d * base
n = n - 1 - prev_acc
value = (n // d) + (10 ** (d - 1))
offset = n % d
return int(str(value)[offset])
if __name__ == '__main__':
s = Solution()
print(s.findNthDigit(5))
print(s.findNthDigit(9))
print(s.findNthDigit(10))
print(s.findNthDigit(11))
print(s.findNthDigit(12))
print(s.findNthDigit(189))
print(s.findNthDigit(190))
| [
"dirtysalt1987@gmail.com"
] | dirtysalt1987@gmail.com |
3c1eaa647307e482b6bac6a4dc2d4355e10fef59 | dac6227800cb7bbcc934ac94a158cc76535c76e2 | /day9/samochod2.py | 46cc0370d3fdcf9009e42b770d4ad830d8ecd84c | [] | no_license | marcinpgit/Python_days | b11fd5322d47f06e3292189fe8d1578171fc293c | 052993759cf6c79de237b9d391ee7bb5ec202ffc | refs/heads/master | 2021-01-24T02:39:29.058793 | 2018-02-28T20:16:56 | 2018-02-28T20:16:56 | 87,470,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | class Samochod(object):
def __init__(self, marka, model, kolor):
self.marka = marka
self.model = model
self.kolor = kolor
self.czy_jedzie = None
self.silnik = None
def jedz(self):
print(self.marka, ": Jadę")
self.czy_jedzie = True
def zatrzymaj(self):
self.czy_jedzie = False
| [
"marcinp2012@gmail.com"
] | marcinp2012@gmail.com |
8c6542ffe63e9431b97397a55b0fb74c081b825d | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/bar/_hoverinfo.py | db2a85fac06d1c15a1d5436cb0b08b73f46cefc0 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 523 | py | import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name='hoverinfo', parent_name='bar', **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=True,
edit_type='none',
extras=['all', 'none', 'skip'],
flags=['x', 'y', 'z', 'text', 'name'],
role='info',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
8ec8c3845ea91a7c5096d3fa8666cfb38d67e4db | a3eccc652f83815318bdb033a33573c5b1e073e9 | /nac/caravans/migrations/0011_auto_20210315_1936.py | 18519a7391d65e423a74c982692365f5b16234f9 | [] | no_license | jsvelu/coms-dev--old | 8139fa511e2985b4d71550f1c59402069d09edf3 | de300ad6ef947d29380972a6efe809f4ef05d7e1 | refs/heads/main | 2023-07-17T20:44:36.101738 | 2021-09-04T21:56:38 | 2021-09-04T21:56:38 | 403,158,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,768 | py | # Generated by Django 2.2.7 on 2021-03-15 08:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('caravans', '0010_auto_20210315_1911'),
]
operations = [
# migrations.CreateModel(
# name='SKUPrice',
# fields=[
# ('deleted', models.DateTimeField(blank=True, default=None, editable=False, null=True)),
# ('price_id', models.AutoField(primary_key=True, serialize=False)),
# ('retail_price', models.DecimalField(blank=True, decimal_places=2, max_digits=8, null=True)),
# ('wholesale_price', models.DecimalField(blank=True, decimal_places=2, max_digits=8, null=True)),
# ('cost_price', models.DecimalField(blank=True, decimal_places=2, max_digits=8, null=True)),
# ('effective_date', models.DateField(blank=True, null=True)),
# ('change_date', models.CharField(max_length=100, null=True)),
# ('done_by', models.CharField(max_length=100, null=True)),
# ('sku', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='caravans.SKU')),
# ],
# options={
# 'verbose_name': 'SKU Price',
# 'verbose_name_plural': 'SKUs Price',
# 'db_table': 'sku_price',
# },
# ),
# migrations.RemoveField(
# model_name='series',
# name='avg_ball_weight_max',
# ),
# migrations.RemoveField(
# model_name='series',
# name='avg_ball_weight_min',
# ),
# migrations.RemoveField(
# model_name='series',
# name='avg_tare_weight_max',
# ),
# migrations.RemoveField(
# model_name='series',
# name='avg_tare_weight_min',
# ),
# migrations.RemoveField(
# model_name='series',
# name='height_max_incl_ac_feet',
# ),
# migrations.RemoveField(
# model_name='series',
# name='height_max_incl_ac_inches',
# ),
# migrations.RemoveField(
# model_name='series',
# name='length_feet',
# ),
# migrations.RemoveField(
# model_name='series',
# name='length_inches',
# ),
# migrations.RemoveField(
# model_name='series',
# name='length_incl_aframe_feet',
# ),
# migrations.RemoveField(
# model_name='series',
# name='length_incl_aframe_inches',
# ),
# migrations.RemoveField(
# model_name='series',
# name='length_incl_bumper_feet',
# ),
# migrations.RemoveField(
# model_name='series',
# name='length_incl_bumper_inches',
# ),
# migrations.RemoveField(
# model_name='series',
# name='width_feet',
# ),
# migrations.RemoveField(
# model_name='series',
# name='width_inches',
# ),
# migrations.RemoveField(
# model_name='series',
# name='width_incl_awning_feet',
# ),
# migrations.RemoveField(
# model_name='series',
# name='width_incl_awning_inches',
# ),
# migrations.AddField(
# model_name='series',
# name='avg_ball_weight',
# field=models.IntegerField(blank=True, null=True, verbose_name='Avg Ball Weight in kg'),
# ),
# migrations.AddField(
# model_name='series',
# name='avg_tare_weight',
# field=models.IntegerField(blank=True, null=True, verbose_name='Avg Tare Weight in kg'),
# ),
# migrations.AddField(
# model_name='series',
# name='height_max_incl_ac_mm',
# field=models.IntegerField(blank=True, null=True, verbose_name='Max Internal Living Height in mm'),
# ),
# migrations.AddField(
# model_name='series',
# name='length_incl_aframe_mm',
# field=models.IntegerField(blank=True, null=True, verbose_name='Travel Length in mm'),
# ),
# migrations.AddField(
# model_name='series',
# name='length_incl_bumper_mm',
# field=models.IntegerField(blank=True, null=True, verbose_name='Max External Travel Height in mm'),
# ),
# migrations.AddField(
# model_name='series',
# name='length_mm',
# field=models.IntegerField(blank=True, null=True, verbose_name='Length in mm'),
# ),
# migrations.AddField(
# model_name='series',
# name='series_type',
# field=models.CharField(choices=[('Caravans', 'Caravans'), ('PopTops', 'PopTops'), ('Campers', 'Campers')], default='Caravans', max_length=32),
# ),
# migrations.AddField(
# model_name='series',
# name='width_incl_awning_mm',
# field=models.IntegerField(blank=True, null=True, verbose_name='Max Travel Width in mm'),
# ),
# migrations.AddField(
# model_name='series',
# name='width_mm',
# field=models.IntegerField(blank=True, null=True, verbose_name='Width in mm'),
# ),
# migrations.AlterField(
# model_name='series',
# name='production_unit',
# field=models.IntegerField(choices=[(1, 'Caravans'), (2, 'Pop-Top/Campers')]),
# ),
# migrations.DeleteModel(
# name='ProductionUnit',
# ),
]
| [
"velu@qrsolutions.in"
] | velu@qrsolutions.in |
feaffe36d86cd27a0ee01c95ab7eafe90316f53b | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/components/cronet/tools/cr_cronet.py | 229154bf340936ba576a2d1dcb8a00f17714cd03 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 5,329 | py | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
cr_cronet.py - cr - like helper tool for cronet developers
"""
import argparse
import os
import sys
def run(command, extra_options=''):
command = command + ' ' + extra_options
print command
return os.system(command)
def build(out_dir, test_target, extra_options=''):
return run('ninja -C ' + out_dir + ' ' + test_target,
extra_options)
def install(out_dir, release_arg):
cmd = 'BUILDTYPE={0} build/android/adb_install_apk.py {1} --apk={2}'
build_dir = out_dir.split('/', 1)[1] # the 'Foo' part of 'out/Foo'
return run(cmd.format(build_dir, release_arg, 'CronetTest.apk')) or \
run(cmd.format(build_dir, release_arg, 'ChromiumNetTestSupport.apk'))
def test(out_dir, extra_options):
return run(out_dir + '/bin/run_cronet_test_instrumentation_apk ' + \
extra_options)
def test_ios(out_dir, extra_options):
return run(out_dir + '/iossim ' + out_dir + '/cronet_test.app', extra_options)
def debug(extra_options):
return run('build/android/adb_gdb --start ' + \
'--activity=.CronetTestActivity ' + \
'--program-name=CronetTest ' + \
'--package-name=org.chromium.net',
extra_options)
def stack(out_dir):
return run('adb logcat -d | CHROMIUM_OUTPUT_DIR=' + out_dir +
' third_party/android_platform/development/scripts/stack')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('command',
choices=['gyp',
'gn',
'sync',
'build',
'install',
'proguard',
'test',
'build-test',
'stack',
'debug',
'build-debug'])
parser.add_argument('-g', '--gn', action='store_true',
help='use gn output directory suffix')
parser.add_argument('-d', '--out_dir', action='store',
help='name of the build directory')
parser.add_argument('-i', '--iphoneos', action='store_true',
help='build for physical iphone')
parser.add_argument('-r', '--release', action='store_true',
help='use release configuration')
options, extra_options_list = parser.parse_known_args()
print options
print extra_options_list
is_os = (sys.platform == 'darwin')
if is_os:
target_os = 'ios'
test_target = 'cronet_test'
gn_args = 'target_cpu = "x64" '
out_dir_suffix = '-iphonesimulator'
if options.iphoneos:
gn_args = 'target_cpu = "arm64" '
out_dir_suffix = '-iphoneos'
else:
target_os = 'android'
test_target = 'cronet_test_instrumentation_apk'
gn_args = 'use_errorprone_java_compiler=true '
out_dir_suffix = ''
gyp_defines = 'GYP_DEFINES="OS=' + target_os + ' enable_websockets=0 '+ \
'disable_file_support=1 disable_ftp_support=1 '+ \
'enable_errorprone=1 use_platform_icu_alternatives=1 ' + \
'disable_brotli_filter=1"'
gn_args += 'target_os="' + target_os + '" enable_websockets=false '+ \
'disable_file_support=true disable_ftp_support=true '+ \
'use_platform_icu_alternatives=true '+ \
'disable_brotli_filter=true'
extra_options = ' '.join(extra_options_list)
if options.gn:
out_dir_suffix += "-gn"
if options.release:
out_dir = 'out/Release' + out_dir_suffix
release_arg = ' --release'
gn_args += ' is_debug=false '
else:
out_dir = 'out/Debug' + out_dir_suffix
release_arg = ''
if options.out_dir:
out_dir = options.out_dir
if (options.command=='gyp'):
return run (gyp_defines + ' gclient runhooks')
if (options.command=='gn'):
return run ('gn gen ' + out_dir + ' --args=\'' + gn_args + '\'')
if (options.command=='sync'):
return run ('git pull --rebase && ' + gyp_defines + ' gclient sync')
if (options.command=='build'):
return build(out_dir, test_target, extra_options)
if (not is_os):
if (options.command=='install'):
return install(out_dir, release_arg)
if (options.command=='proguard'):
return run ('ninja -C ' + out_dir + ' cronet_sample_proguard_apk')
if (options.command=='test'):
return install(out_dir, release_arg) or test(out_dir, extra_options)
if (options.command=='build-test'):
return build(out_dir, test_target) or install(out_dir, release_arg) or \
test(out_dir, extra_options)
if (options.command=='stack'):
return stack(out_dir)
if (options.command=='debug'):
return install(out_dir, release_arg) or debug(extra_options)
if (options.command=='build-debug'):
return build(out_dir, test_target) or install(out_dir, release_arg) or \
debug(extra_options)
else:
if (options.command=='test'):
return test_ios(out_dir, extra_options)
if (options.command=='build-test'):
return build(out_dir, test_target) or test_ios(out_dir, extra_options)
parser.print_help()
return 1
if __name__ == '__main__':
sys.exit(main())
| [
"changhyeok.bae@lge.com"
] | changhyeok.bae@lge.com |
241f55cb247440e532daf878ca9d599a1ecdb4ca | ea5b4fdf353e76c44a8de71fa16aa8bae88c726a | /array/138.maxSubarray.py | 7d20115e2af66e43048eef92bf4c273fa530bca0 | [] | no_license | umnstao/lintcode-practice | dd61c66950ae89abec000063fe0d1a33f13ce6ec | e73b495e23c4dcb0421ab09133e573aaba23c431 | refs/heads/master | 2021-01-23T02:48:26.294160 | 2018-03-27T21:54:26 | 2018-03-27T21:54:26 | 86,024,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | class Solution:
"""
@param nums: A list of integers
@return: An integer denote the sum of maximum subarray
"""
def maxSubArray(self, nums):
# write your code here
if nums is None or len(nums) == 0:
return None
sum = 0
maxVal = -float('inf')
for i in range(len(nums)):
sum += nums[i]
maxVal = max(maxVal, sum)
sum = max(0, sum)
return maxVal | [
"umnstao@gmail.com"
] | umnstao@gmail.com |
e89da4771364c8b06503c64cdd3b8a833395b2eb | bb7712c8fab2380ffd37e53136097d8d322a73e7 | /accounts/views.py | f755f090239a0ce3783c5338cfb0221261aefc51 | [] | no_license | nitin1011/Daily-Kart | 5dfaad06c4ab7ea236a8f1b0e29aaea4baba0b81 | 59859bd2dc66563ff1ab0649591e4b19b6b4a85b | refs/heads/master | 2020-08-15T09:52:22.826037 | 2019-10-15T14:56:28 | 2019-10-15T14:56:28 | 215,320,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,552 | py | from django.contrib import messages, auth
from django.contrib.auth.forms import PasswordChangeForm
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from .models import Account
# Create your views here.
def register(request):
if request.method == 'POST':
username = request.POST['username']
firstname = request.POST['firstname']
lastname = request.POST['lastname']
email = request.POST['email']
mobile = request.POST['mobile']
state = request.POST['state']
city = request.POST['city']
area = request.POST['area']
address = request.POST['address']
password = request.POST['password']
password2 = request.POST['password2']
category = request.POST['category']
if 'terms' in request.POST:
if password == password2:
if User.objects.filter(username=username).exists():
messages.error(request, 'That username is taken')
return redirect('register')
else:
if User.objects.filter(email=email).exists():
messages.error(request, 'That email is already being used ')
return redirect('register')
else:
if len(mobile) != 10:
messages.error(request, 'Mobile no. is incorrect')
return redirect('register')
else:
user = User.objects.create_user(username=username, password=password, email=email,
first_name=firstname, last_name=lastname)
user.save()
account = Account(user=user, username=username, firstname=firstname, lastname=lastname,
email=email, mobile=mobile, state=state, city=city, area=area, address=address,
category=category)
if category == 'delivery':
account.shop = request.POST['shop']
account.save()
messages.success(request, "Your account has been created successfully")
return redirect('login')
else:
messages.error(request, 'Password do not match')
return redirect('register')
else:
messages.error(request, 'you have to accept the Terms and Conditions')
return redirect('register')
else:
accounts = Account.objects.filter(category='shopkeeper')
context = {'shop1': accounts[0], 'shops': accounts[1:]}
return render(request, 'accounts/register.html', context)
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
category = request.POST['category']
user = auth.authenticate(username=username, password=password)
if user is not None:
account = Account.objects.get(user=user)
if account.category == category:
auth.login(request, user)
return redirect('account')
else:
s = "you are not " + str(category)
messages.error(request, s)
return redirect('login')
else:
messages.error(request, 'Username/Password is incorrect')
return redirect('login')
else:
return render(request, 'accounts/login.html')
@login_required
def logout(request):
auth.logout(request)
return redirect('login')
@login_required
def user_account(request):
account = Account.objects.get(user=request.user)
context = {'account': account}
if account.category == 'shopkeeper':
shop = True
context['shop'] = shop
elif account.category == 'delivery':
delivery = True
context['delivery'] = delivery
return render(request, 'accounts/user_account.html', context)
@login_required
def edit_account(request):
if request.method == 'POST':
firstname = request.POST['firstname']
lastname = request.POST['lastname']
email = request.POST['email']
mobile = request.POST['mobile']
state = request.POST['state']
city = request.POST['city']
area = request.POST['area']
address = request.POST['address']
account = Account.objects.get(user=request.user)
if User.objects.filter(email=email).exists() and account.email != email:
messages.error(request, 'That email is already being used ')
return redirect('edit-account')
else:
if Account.objects.filter(mobile=mobile).exists() and account.mobile != mobile:
messages.error(request, 'That mobile is already being used ')
return redirect('edit-account')
else:
account.firstname = firstname
account.lastname = lastname
account.email = email
account.mobile = mobile
account.state = state
account.city = city
account.area = area
account.address = address
account.save()
return redirect('account')
else:
account = Account.objects.get(user=request.user)
context = {'account': account}
return render(request, 'accounts/edit_account.html', context)
@login_required
def change_password(request):
if request.method == 'POST':
password = request.POST['old']
password1 = request.POST['password1']
password2 = request.POST['password2']
user = auth.authenticate(username=request.user.username, password=password)
if user == request.user:
if password1 == password2:
user.set_password(password1)
user.save()
return redirect('login')
else:
return redirect('change-password')
else:
messages.error(request, 'Wrong password')
return redirect('change-password')
else:
return render(request, 'accounts/change_password.html')
@login_required
def delete_account(request):
user = request.user
user.delete()
return redirect('home')
| [
"nitinjethwani10@gmail.com"
] | nitinjethwani10@gmail.com |
d34c7fbf8bdddb35c1186c63218255db184aa210 | 375a278d49f346695b15761bfa81783c619c08cb | /hope/settings.py | 56302193290ae45be91d6d1b59314e476e97e31a | [
"MIT"
] | permissive | totalgood/hope | 03b4388e430d704a1766fdae9ba5cbb7d0a73789 | fa898f286c62d6ff39263a51a8fff2267cd16779 | refs/heads/master | 2021-01-18T02:49:13.045272 | 2016-10-20T05:36:42 | 2016-10-20T05:36:42 | 63,300,461 | 1 | 0 | null | 2016-07-14T03:53:13 | 2016-07-14T03:53:13 | null | UTF-8 | Python | false | false | 5,679 | py | """
Django settings for hope project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from .constants import DB_PATH, PROJECT_ROOT, MODULE_ROOT
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = PROJECT_ROOT
APP_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
STATIC_URL = '/static/'
STATIC_ROOT = os.path.normpath(os.path.join(BASE_DIR, 'static'))
STATICFILES_DIRS = ()
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-ljr#li4@^_3zch&+&f4m+o9tfc4yp61#t36=6v@z5nw52s*gi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SITE_ROOT = os.path.dirname(BASE_DIR)
SITE_NAME = os.path.basename(BASE_DIR)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'pipeline',
'rest_framework',
'chatterbot.ext.django_chatterbot',
'chatterbot_app',
# 'predict',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hope.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hope.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DB_PATH,
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.normpath(os.path.join(BASE_DIR, 'static'))
STATICFILES_DIRS = (
os.path.normpath(os.path.join(BASE_DIR, 'static')),
os.path.normpath(os.path.join(BASE_DIR, APP_NAME, 'static')),
os.path.normpath(os.path.join(BASE_DIR, 'chatterbot_app', 'static')),
)
# Django Pipeline (and browserify)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# browserify-specific
PIPELINE_COMPILERS = (
'pipeline_browserify.compiler.BrowserifyCompiler',
)
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.NoopCompressor'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.uglifyjs.UglifyJSCompressor'
if DEBUG:
PIPELINE_BROWSERIFY_ARGUMENTS = '-t babelify'
PIPELINE_CSS = {
'hope_css': {
'source_filenames': (
'css/style.css',
),
'output_filename': 'css/hope_css.css',
},
}
PIPELINE_JS = {
'hope_js': {
'source_filenames': (
'js/bower_components/jquery/dist/jquery.min.js',
'js/bower_components/react/JSXTransformer.js',
'js/bower_components/react/react-with-addons.js',
'js/app.browserify.js',
),
'output_filename': 'js/hope_js.js',
}
}
PIPELINE = {
'PIPELINE_ENABLED': True,
'STYLESHEETS': {
'hopestyle': {
'source_filenames': (
'css/bootstrap.css',
# 'css/colors/*.css',
# 'css/layers.css'
),
'output_filename': 'css/compressed_hopestyle.css',
'extra_context': {
'media': 'screen,projection',
},
},
},
'JAVASCRIPT': {
'hope': {
'source_filenames': (
'js/jquery.js',
'js/bootstrap.js',
# 'js/d3.js',
# 'js/collections/*.js',
# 'js/application.js',
),
'output_filename': 'js/compresed_hope.js',
}
}
}
| [
"github@totalgood.com"
] | github@totalgood.com |
93d617436c5e5dc0c175b9a98b88b208e4b47e12 | a3a741f27db3a8669bc49a6eb928d8b34d1ea872 | /api/accounts/admin.py | 5a66a4c4555970ff2031b2bfe70511bf34e5a1e3 | [
"MIT"
] | permissive | ectvalenz/virtual-queue | 2f77a007d4a925e9db1a1f27fbe68c5f514ed250 | b49d4bfb6f45b177b5744273316716effe306161 | refs/heads/main | 2023-06-18T17:26:14.922319 | 2021-07-05T08:55:14 | 2021-07-05T08:55:14 | 370,253,647 | 0 | 0 | MIT | 2021-05-24T06:41:43 | 2021-05-24T06:41:42 | null | UTF-8 | Python | false | false | 1,484 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Account, AccountBarcode
PERSONAL_INFO_FIELDS = (
'Personal info', {
'fields': (
'first_name',
'last_name',
'preferred_name',
'secondary_email',
'region',
'country',
'longitude',
'latitude',
)
}
)
PERMISSIONS_FIELDS = (
'Permissions', {
'fields': (
'is_active',
'is_staff',
'is_superuser',
'groups',
'user_permissions',
)
}
)
class AccountAdmin(UserAdmin):
model = Account
list_display = (
'email',
'preferred_name',
'is_active',
)
list_filter = (
'is_active',
)
fieldsets = (
(None, {
'fields': (
'email',
'password',
)
}),
PERSONAL_INFO_FIELDS,
PERMISSIONS_FIELDS,
('Important dates', {
'fields': (
'last_login',
'date_joined',
)
}),
)
add_fieldsets = (
(None, {
'fields': (
'email',
'password1',
'password2',
)
}),
PERSONAL_INFO_FIELDS,
PERMISSIONS_FIELDS,
)
admin.site.register(Account, AccountAdmin)
admin.site.register(AccountBarcode) | [
"jayragaile@gmail.com"
] | jayragaile@gmail.com |
982e419da1ff1dff2d79c91530aea1e32e474040 | e4bc9d7f2e177bcd9850ffa12e5b2ddabb3f98ab | /03. AI/03. Deep Learning/2. Tensorflow/1. perception/3_variable_initialize.py | bc899875d85558ad556152f7ec990b90b56d43e3 | [] | no_license | yameenjavaid/bigdata2019 | 94b80b570b76cb174fcacddf8c8c1711a72e77b4 | 3565a1f10162d0ad147a9f189536b858ebe04ffa | refs/heads/master | 2022-01-13T15:39:09.793982 | 2019-04-29T00:14:07 | 2019-04-29T00:14:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import tensorflow as tf
# 상수 텐서, 모든 요소값이 0인 3*3 행렬
W1 = tf.zeros((3,3))
# 변수 텐서, 모든 요소 값이 0인 2*2 행렬
W2 = tf.Variable(tf.zeros((2,2)), name='weights')
session = tf.InteractiveSession()
session.run(tf.global_variables_initializer())
print(W1.eval())
print(W2.eval())
session.close() | [
"bgd0706@hanmail.net"
] | bgd0706@hanmail.net |
f98a5bab80293aea31b98ba8f01b2fadc74d4950 | 9f7b78421cb2eb84ce438bfe50e8c613591616d2 | /solutions/helper/getInput/getInput.py | 2fa9a353935aeb193d8d3ed01cbf716d72cd0803 | [] | no_license | iCodeIN/aoc2020-2 | 7074c8c2ad20c077fa615f78fecf5fa25177a70d | c441865ecf44d29dbde734d5bd933412a4b67757 | refs/heads/master | 2023-04-02T07:18:54.475024 | 2020-12-18T05:38:59 | 2020-12-18T05:38:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | import os
import sys
from dotenv import load_dotenv
from requests import get
YEAR = 2020
def get_cookie():
load_dotenv()
return os.getenv("COOKIE")
def download(url: str, file_name: str):
cookies = {
"session": get_cookie()
}
with open(file_name, "wb") as file:
response = get(url, cookies=cookies)
file.write(response.content)
def create_source_file(d: int):
print('here', d)
with open(f"day{d}.py", "w") as file:
lines = [
"from typing import List, Dict, Set\n",
"from collections import *\n",
"from functools import lru_cache\n",
"from pprint import pprint as pp\n",
"from math import *\n",
"from helper.submit.submit import *\n",
"from utils import *\n",
"\n\n",
f"DAY = {d}\n",
"setup(DAY)\n",
"lines = read_file(DAY)\n",
"\n\n",
"# Part 1\n",
"#"*50,
"\ndef part1(lines: List[str]) -> int:",
"\n return float(\"inf\")",
"\n"*2,
"submit(1, part1(lines))\n"
"\n\n",
"# Part 2\n",
"#"*50,
"\ndef part2(lines: List[str]) -> int:",
"\n return float(\"inf\")",
"\n"*2,
"submit(2, part2(lines))\n"
"\n\n",
]
file.writelines(lines)
def download_input_for_day(day: int):
url = f"https://adventofcode.com/{YEAR}/day/{day}/input"
file_path = f"./inputs/input{day}.txt"
if os.path.exists(file_path):
return False
download(url, file_path)
return True
def get_day_from_args():
return int(sys.argv[1])
day = get_day_from_args()
if download_input_for_day(day):
create_source_file(day)
else:
print("input already exists")
| [
"10962267+arora-aditya@users.noreply.github.com"
] | 10962267+arora-aditya@users.noreply.github.com |
6fc5add1f3c19ca1e8d343dff0a13cdb8d66f219 | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/tables/0/3_15_table.py | 78202cfa9827d549542f8f260d35fba8f0c0f7dc | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | def load(h):
return ({'abbr': 20, 'code': 20, 'title': 'Temperature K'},
{'abbr': 100, 'code': 100, 'title': 'Pressure Pa'},
{'abbr': 101,
'code': 101,
'title': 'Pressure deviation from mean sea level Pa'},
{'abbr': 102, 'code': 102, 'title': 'Altitude above mean sea level m'},
{'abbr': 103, 'code': 103, 'title': 'Height above ground m'},
{'abbr': 104, 'code': 104, 'title': 'Sigma coordinate'},
{'abbr': 105, 'code': 105, 'title': 'Hybrid coordinate'},
{'abbr': 106, 'code': 106, 'title': 'Depth below land surface m'},
{'abbr': 'pt', 'code': 107, 'title': 'Potential temperature (theta) K'},
{'abbr': 108,
'code': 108,
'title': 'Pressure deviation from ground to level Pa'},
{'abbr': 'pv', 'code': 109, 'title': 'Potential vorticity K m-2 kg-1 s-1'},
{'abbr': 110, 'code': 110, 'title': 'Geometrical height m'},
{'abbr': 111, 'code': 111, 'title': 'Eta coordinate'},
{'abbr': 112, 'code': 112, 'title': 'Geopotential height gpm'},
{'abbr': 160, 'code': 160, 'title': 'Depth below sea level m'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
| [
"baudouin.raoult@ecmwf.int"
] | baudouin.raoult@ecmwf.int |
19b56d589b90c261f7c606e1b5c1df2d93de92fd | 5e45ba79976ba805f6744f0bcb4ddfbde3e0a7a4 | /alibi_detect/saving/_typing.py | 22d9327b1352959d5832a30b371ffc0d217e1699 | [
"Apache-2.0"
] | permissive | SeldonIO/alibi-detect | e3293baa0603acace6f79bfb14953cf953943b10 | 4a1b4f74a8590117965421e86c2295bff0f33e89 | refs/heads/master | 2023-08-25T05:47:14.038826 | 2023-08-08T14:12:47 | 2023-08-08T14:12:47 | 213,390,927 | 1,922 | 195 | Apache-2.0 | 2023-09-12T07:07:13 | 2019-10-07T13:29:13 | Python | UTF-8 | Python | false | false | 951 | py | """Typing constructs for saving and loading functionality
List of detectors that are valid for saving and loading either via the legacy methods or the new config driven
functionality"""
VALID_DETECTORS = [
'AdversarialAE',
'ChiSquareDrift',
'ClassifierDrift',
'IForest',
'KSDrift',
'LLR',
'Mahalanobis',
'MMDDrift',
'LSDDDrift',
'ModelDistillation',
'OutlierAE',
'OutlierAEGMM',
'OutlierProphet',
'OutlierSeq2Seq',
'OutlierVAE',
'OutlierVAEGMM',
'SpectralResidual',
'TabularDrift',
'CVMDrift',
'FETDrift',
'SpotTheDiffDrift',
'ClassifierUncertaintyDrift',
'RegressorUncertaintyDrift',
'LearnedKernelDrift',
'ContextMMDDrift',
'MMDDriftTF', # TODO - remove when legacy loading removed
'ClassifierDriftTF', # TODO - remove when legacy loading removed
'MMDDriftOnline',
'LSDDDriftOnline',
'CVMDriftOnline',
'FETDriftOnline'
]
| [
"noreply@github.com"
] | SeldonIO.noreply@github.com |
a50193d599aa612de4358266cf2f2032f4896bb7 | 1882ba2b04e2230692e7da0b963f20ccf859ce34 | /Collect/SoilGrids/Nitrogen.py | dd14149c2a26cc50b2dda34dab315f69bbc67466 | [
"Apache-2.0"
] | permissive | TimHessels/watertools | 908230ae0f45de5379e6808fec827c55245c1cc2 | 2fc3680bfc6ad34bd2a11fba4cf302c5b84e5d78 | refs/heads/master | 2023-08-16T16:18:47.003632 | 2023-08-06T15:35:49 | 2023-08-06T15:35:49 | 158,684,796 | 4 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | # -*- coding: utf-8 -*-
"""
WaterSat
author: Tim Martijn Hessels
Created on Sat Sep 28 14:15:13 2019
"""
import os
from watertools.Collect.SoilGrids.DataAccess import DownloadData
import sys
def main(Dir, latlim, lonlim, level = 'sl1', Waitbar = 1):
"""
Downloads data from SoilGrids (www.soilgrids.org)
The following keyword arguments are needed:
Dir -- 'C:/file/to/path/'
latlim -- [ymin, ymax]
lonlim -- [xmin, xmax]
level -- 'sl1' (Default)
'sl2'
'sl3'
'sl4'
'sl5'
'sl6'
'sl7'
Waitbar -- '1' if you want a waitbar (Default = 1)
"""
# Create directory if not exists for the output
output_folder = os.path.join(Dir, 'SoilGrids', 'Nitrogen')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Define the output map and create this if not exists
nameEnd = os.path.join(output_folder,'Nitrogen_%s_SoilGrids_g_kg-1.tif' %level)
if not os.path.exists(nameEnd):
# Create Waitbar
if Waitbar == 1:
print('\nDownload Soil nitrogen map of %s from SoilGrids.org' %level)
import watertools.Functions.Random.WaitbarConsole as WaitbarConsole
total_amount = 1
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Download and process the data
DownloadData(output_folder, latlim, lonlim, "NITROGEN", level)
if Waitbar == 1:
amount = 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
else:
if Waitbar == 1:
print("\nDownload Soil nitrogen map of %s from SoilGrids.org already exists in output folder" %level)
if __name__ == '__main__':
main(sys.argv)
| [
"timhessels@hotmail.com"
] | timhessels@hotmail.com |
d75203b1f812059549ddd3811d42d6df16adc7cb | 31014bf4464a5fae77ff86241ae15cfdd71ccb9e | /gnomics/objects/disease_files/disgenet.py | 35d80aeb2915826df69c255ac9d3eacb5bea239a | [
"BSD-3-Clause",
"BSD-2-Clause-Views"
] | permissive | izhangcd/Gnomics | 14de8f90960f88d3eb2f2a49c94fa3a0f8048a2d | bd0fb4e7be009b2afe1c2667f2890c712ae0ad9d | refs/heads/master | 2021-09-09T03:42:40.953105 | 2018-03-13T16:05:17 | 2018-03-13T16:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,463 | py | #!/usr/bin/env python
#
#
#
#
#
#
# IMPORT SOURCES:
# PYMEDTERMINO
# http://pythonhosted.org/PyMedTermino/
#
#
# DisGeNET functions.
#
# PRE-CODE
import faulthandler
faulthandler.enable()
# IMPORTS
# Imports for recognizing modules.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
# Import modules.
from gnomics.objects.user import User
import gnomics.objects.disease
import gnomics.objects.gene
import gnomics.objects.pathway
# Other imports.
from bioservices import *
from pymedtermino import *
from pymedtermino.icd10 import *
from pymedtermino.umls import *
from SPARQLWrapper import SPARQLWrapper
from SPARQLWrapper import JSON
# MAIN
def main():
disgenet_unit_tests("2394", "C0010674")
# Get gene-disease associations (GDAs).
def get_gdas(dis, gen):
print("NOT FUNCTIONAL.")
# Get GDA evidence.
def get_gda_evidence():
print("NOT FUNCTIONAL.")
# Get genes associated with a disease.
def get_genes(dis):
print("NOT FUNCTIONAL.")
# Get DisGeNET disease ID.
def get_disgenet(dis):
id_array = []
results_array = {}
for ident in dis.identifiers:
if ident["identifier_type"].lower() in ["doid", "disease ontology id", "disease ontology identifier"]:
if ident["identifier"] not in id_array:
sparql = SPARQLWrapper("http://rdf.disgenet.org/sparql/")
sparql.setQuery("""
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX void: <http://rdfs.org/ns/void#>
PREFIX sio: <http://semanticscience.org/resource/>
PREFIX ncit: <http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#>
PREFIX up: <http://purl.uniprot.org/core/>
PREFIX dcat: <http://www.w3.org/ns/dcat#>
PREFIX dctypes: <http://purl.org/dc/dcmitype/>
PREFIX wi: <http://http://purl.org/ontology/wi/core#>
PREFIX eco: <http://http://purl.obolibrary.org/obo/eco.owl#>
PREFIX prov: <http://http://http://www.w3.org/ns/prov#>
PREFIX pav: <http://http://http://purl.org/pav/>
PREFIX obo: <http://purl.obolibrary.org/obo/>
SELECT DISTINCT ?umls
?umlsTerm
?doid
?doTerm
WHERE {
?gda sio:SIO_000628 ?umls .
?umls dcterms:title ?umlsTerm ;
skos:exactMatch ?doid .
?doid rdfs:label ?doTerm ;
rdfs:subClassOf+ <%s> .
FILTER regex(?umls, "umls/id")
}
""" % doid_obolibrary_url(dis))
sparql.setReturnFormat(JSON)
try:
results = sparql.query().convert()
formatted_results = []
for result in results["results"]["bindings"]:
formatted_results.append(result)
id_array.append(ident["identifier"])
results_array[ident["identifier"]] = formatted_results
except:
continue
return results_array
# DOID URL (Ontobee).
def doid_obolibrary_url(dis):
base_url = "http://purl.obolibrary.org/obo/DOID_"
for ident in dis.identifiers:
if ident["identifier_type"].lower() in ["doid", "disease ontology id", "disease ontology identifier"]:
return base_url + str(ident["identifier"])
# Get genes related to a UMLS ID.
def umls_genes(dis):
id_array = []
results_array = {}
for ident in dis.identifiers:
if ident["identifier_type"].lower() in ["umls", "umls id", "umls identifier"]:
if ident["identifier"] not in id_array:
sparql = SPARQLWrapper("http://rdf.disgenet.org/sparql/")
sparql.setQuery("""
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX void: <http://rdfs.org/ns/void#>
PREFIX sio: <http://semanticscience.org/resource/>
PREFIX ncit: <http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#>
PREFIX up: <http://purl.uniprot.org/core/>
PREFIX dcat: <http://www.w3.org/ns/dcat#>
PREFIX dctypes: <http://purl.org/dc/dcmitype/>
PREFIX wi: <http://http://purl.org/ontology/wi/core#>
PREFIX eco: <http://http://purl.obolibrary.org/obo/eco.owl#>
PREFIX prov: <http://http://http://www.w3.org/ns/prov#>
PREFIX pav: <http://http://http://purl.org/pav/>
PREFIX obo: <http://purl.obolibrary.org/obo/>
SELECT DISTINCT ?gda
<%s> as ?disease
?gene
?score
?source
?associationType
?pmid
?sentence
WHERE {
?gda sio:SIO_000628 <%s> ;
rdf:type ?associationType ;
sio:SIO_000628 ?gene ;
sio:SIO_000216 ?scoreIRI ;
sio:SIO_000253 ?source .
?scoreIRI sio:SIO_000300 ?score .
OPTIONAL {
?gda sio:SIO_000772 ?pmid .
?gda dcterms:description ?sentence .
}
}
""" % (linked_life_data_url(dis), linked_life_data_url(dis)))
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
formatted_results = []
for result in results["results"]["bindings"]:
formatted_results.append(result)
id_array.append(ident["identifier"])
results_array[ident["identifier"]] = formatted_results
return results_array
# Get Linked Life Data UMLS URL.
def linked_life_data_url(dis):
base_url = "http://linkedlifedata.com/resource/umls/id/"
for ident in dis.identifiers:
if ident["identifier_type"].lower() in ["umls", "umls id", "umls identifier"]:
return base_url + str(ident["identifier"])
# UNIT TESTS
def disgenet_unit_tests(doid, umls_id):
doid_dis = gnomics.objects.disease.Disease(identifier = str(doid), identifier_type = "DOID", source = "Disease Ontology")
get_disgenet(doid_dis)
umls_dis = gnomics.objects.disease.Disease(identifier = str(umls_id), identifier_type = "UMLS ID", source = "UMLS")
umls_genes(umls_dis)
# MAIN
if __name__ == "__main__": main() | [
"charles.kronk@gmail.com"
] | charles.kronk@gmail.com |
10f55c23b2bf6884d05412fd17885057fb1b2143 | 20b2cbc7ea90285a9ba9347684057f5735728338 | /shopapp/migrations/0020_auto_20180718_1330.py | 1c10bd9b28e5ee964328401a8898d668e751562e | [] | no_license | gruzdevasch/shop-django | 0f4ab6a8cf2fc655daf9629f486cfb24c7d7b4c1 | 35365b0e17ecfefc6e71875f77d7904d05cf824d | refs/heads/master | 2020-03-23T10:16:37.089690 | 2018-07-19T10:43:12 | 2018-07-19T10:43:12 | 141,434,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # Generated by Django 2.0.7 on 2018-07-18 06:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shopapp', '0019_auto_20180718_1157'),
]
operations = [
migrations.AlterField(
model_name='productincart',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cartproducts', to='shopapp.Product'),
),
]
| [
"gruzdevasch@gmail.com"
] | gruzdevasch@gmail.com |
bc69e3ed150b2098d6c22b3b5f4e7d982f7f9389 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-ci/connectors/metadata_service/lib/tests/test_transform.py | 8222e5e12e892f1c914634e65d4e291db54015fc | [
"LicenseRef-scancode-free-unknown",
"MIT",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 1,993 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import pathlib
import yaml
from metadata_service.models.generated.ConnectorMetadataDefinitionV0 import ConnectorMetadataDefinitionV0
from metadata_service.models import transform
def get_all_dict_key_paths(dict_to_traverse, key_path=""):
"""Get all paths to keys in a dict.
Args:
dict_to_traverse (dict): A dict.
Returns:
list: List of paths to keys in the dict. e.g ["data.name", "data.version", "data.meta.url""]
"""
if not isinstance(dict_to_traverse, dict):
return [key_path]
key_paths = []
for key, value in dict_to_traverse.items():
new_key_path = f"{key_path}.{key}" if key_path else key
key_paths += get_all_dict_key_paths(value, new_key_path)
return key_paths
def have_same_keys(dict1, dict2):
"""Check if two dicts have the same keys.
Args:
dict1 (dict): A dict.
dict2 (dict): A dict.
Returns:
bool: True if the dicts have the same keys, False otherwise.
"""
return set(get_all_dict_key_paths(dict1)) == set(get_all_dict_key_paths(dict2))
def test_transform_to_json_does_not_mutate_keys(valid_metadata_upload_files, valid_metadata_yaml_files):
all_valid_metadata_files = valid_metadata_upload_files + valid_metadata_yaml_files
for file_path in all_valid_metadata_files:
metadata_file_path = pathlib.Path(file_path)
original_yaml_text = metadata_file_path.read_text()
metadata_yaml_dict = yaml.safe_load(original_yaml_text)
metadata = ConnectorMetadataDefinitionV0.parse_obj(metadata_yaml_dict)
metadata_json_dict = transform.to_json_sanitized_dict(metadata)
new_yaml_text = yaml.safe_dump(metadata_json_dict, sort_keys=False)
new_yaml_dict = yaml.safe_load(new_yaml_text)
# assert same keys in both dicts, deep compare, and that the values are the same
assert have_same_keys(metadata_yaml_dict, new_yaml_dict)
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
8c2db9d26313acda0eaf0d6feda5c3cadb41e6eb | dc60fbae177523b1c1d6c6317388f18a96aa9c6e | /code/switchblade_users/urls.py | 274bba5c2ce3924479600443d372f9c5ee6b5d5d | [] | no_license | lucheol/prjwn_example | b611298995d13adc42ee6c7185eb47ba9fdad468 | 5154e8f12df7a2c4dd0f1f552829c8df67e6ef3d | refs/heads/main | 2023-07-10T02:46:53.646317 | 2021-08-20T23:48:56 | 2021-08-20T23:48:56 | 398,425,150 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,741 | py | from django.contrib.auth.decorators import login_required
from django.urls import path, include, reverse_lazy
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from django.utils.translation import gettext as _
from switchblade_dashboard.forms import FormSetHelper
from switchblade_dashboard.decorators import register_resource
from switchblade_dashboard.views import DashboardIndexView
from .apis import UserUpdateAvatarAPI
from .autocomplete import UserAutocomplete, UserResourceAutocomplete, RoleAutocomplete
from .views import UserNotification, UserListView, UserCreateView, UserDetailView, \
UserUpdateView, UserDeleteView, UserUpdateAvatarView, RoleList, RoleCreate, RoleDetail, \
RoleUpdate, RoleDelete, UserChangePassword, UserProfileChangeView
urlpatterns = [
path('auth/', include([
path('login/', auth_views.LoginView.as_view(template_name='auth/login.html', redirect_authenticated_user=True),
name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('reset-password/', auth_views.PasswordResetView.as_view(template_name='auth/password_reset.html',
extra_context={'helper': FormSetHelper}),
name='password_reset'),
path('reset-password-done/', auth_views.PasswordResetDoneView.as_view(template_name='auth/password_reset_done.html'),
name='password_reset_done'),
path('reset-password-confirm/<uidb64>/<token>', auth_views.PasswordResetConfirmView.as_view(
template_name='auth/password_reset_confirm.html',
extra_context={'helper': FormSetHelper}),
name='password_reset_confirm'),
path('reset-password-complete/', auth_views.PasswordResetCompleteView.as_view(
template_name='auth/password_reset_complete.html'),
name='password_reset_complete'),
path('notifications', login_required(UserNotification.as_view()), name='user-notifications'),
path('autocomplete/', login_required(UserAutocomplete.as_view()), name='user-autocomplete'),
])),
path('profile/', include([
path('', register_resource(
DashboardIndexView.as_view(page_title='Profile', header='Profile', columns=[3, 3, 3, 3])),
name='dashboard-profile'),
path('personal-info/', register_resource(UserProfileChangeView), name='profile-personal-info'),
path('avatar/', register_resource(UserUpdateAvatarView), name='profile-avatar-update'),
path('change-password/', auth_views.PasswordChangeView.as_view(success_url=reverse_lazy('profile-password-done'), template_name='auth/password_change.html',
extra_context={'helper': FormSetHelper, 'PageTitle': _('Change Password')}),
name='profile-password-change'),
path('change-password-done/',
auth_views.PasswordChangeDoneView.as_view(template_name='auth/password_change_done.html'),
name='profile-password-done'),
])),
path('config/', include([
path('change-password/', login_required(UserChangePassword.as_view()), name='users-change-password'),
path('users/', include([
path('', register_resource(UserListView), name='users-list'),
path('create/', register_resource(UserCreateView), name='users-create'),
path('detail/<int:pk>', register_resource(UserDetailView), name='users-detail'),
path('update/<int:pk>', register_resource(UserUpdateView), name='users-update'),
path('delete/<int:pk>', register_resource(UserDeleteView), name='users-delete'),
])),
path('roles/', include([
path('', register_resource(RoleList), name='users-role-list'),
path('create/', register_resource(RoleCreate), name='users-role-create'),
path('detail/<int:pk>', register_resource(RoleDetail), name='users-role-detail'),
path('update/<int:pk>', register_resource(RoleUpdate.as_view(), as_view=False, resource='role.update',description="Update Roles"), name='users-role-update'),
path('delete/<int:pk>', register_resource(RoleDelete), name='users-role-delete'),
path('autocomplete/', login_required(RoleAutocomplete.as_view()), name='role-autocomplete'),
path('resources/autocomplete/', login_required(UserResourceAutocomplete.as_view()),
name='userresource-autocomplete'),
])),
])),
path('api/', include([
path('users/avatar/update/', UserUpdateAvatarAPI.as_view(), name='api-users-avatar-update'),
])),
]
| [
"lucheol@gmail.com"
] | lucheol@gmail.com |
46486da752e52f31380a9777fba5704140c77aa0 | f71f44d5ddc17e3c30e2bfd7988e5111a55a8b9a | /diplom/source/src/app/endpoints_api.py | 987f920ca901c0280006fc62121f864c72c58f48 | [] | no_license | Yashchuk/diplom | 5ed1998d4b3d1fe568599973ec134f7ca13e8417 | 4029ed91ce93a41af44f03bcce365fdaecb64a37 | refs/heads/master | 2021-01-15T17:02:03.723007 | 2014-01-21T13:42:48 | 2014-01-21T13:42:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # -*- coding: utf8 -*-
import sys
sys.path.insert(0, 'lib/')
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from utils.utils import SolutionRenderer, get_translation
an_api = endpoints.api(
name='libvertix',
version='v1.0',
description="Vertix API"
)
#Definig the API default types of messages
#testEcho
class EchoRequest(messages.Message):
message = messages.StringField(1)
class EchoResponse(messages.Message):
message = messages.StringField(1)
@an_api.api_class(
resource_name="echo",
path="echo"
)
class Echo(remote.Service):
@endpoints.method(
EchoRequest,
EchoResponse
)
def echo(self,request):
return EchoResponse(message=request.message)
__author__ = 'andrew.vasyltsiv' | [
"andrew.freelance@i.ua"
] | andrew.freelance@i.ua |
28d78821e117ac0e77d7e7441cc80593e183d017 | c8ef7ec1df13c60daea0172a608e6cec6af6ab11 | /examples/spinning-cube2.py | 4b32eda9332d2e8a29fa2a33f58358ef79cdb852 | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] | permissive | joe311/vispy | 0356354ca04fd3e121cd292af5abfbc2c8cf5c66 | 89cffb5da27715c0267651db811792cf3b3c9cb7 | refs/heads/master | 2021-01-18T21:04:30.513517 | 2013-09-09T18:05:45 | 2013-09-09T18:05:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,039 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Show spinning cube using VBO's, and transforms, and texturing.
"""
import numpy as np
from vispy import app, gl, oogl, io
from vispy.util.transforms import perspective, translate, rotate
VERT_CODE = """
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
attribute vec3 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main()
{
v_texcoord = a_texcoord;
gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);
//gl_Position = vec4(a_position,1.0);
}
"""
FRAG_CODE = """
uniform sampler2D u_texture;
varying vec2 v_texcoord;
void main()
{
float ty = v_texcoord.y;
float tx = sin(ty*50.0)*0.01 + v_texcoord.x;
gl_FragColor = texture2D(u_texture, vec2(tx, ty));
}
"""
# Read cube data
positions, faces, normals, texcoords = io.read_mesh('cube.obj')
colors = np.random.uniform(0,1,positions.shape).astype('float32')
faces_buffer = oogl.ElementBuffer(faces.astype(np.uint16))
class Canvas(app.Canvas):
def __init__(self, **kwargs):
app.Canvas.__init__(self, **kwargs)
self.geometry = 0, 0, 400, 400
self.program = oogl.Program(VERT_CODE, FRAG_CODE)
# Set attributes
self.program['a_position'] = oogl.VertexBuffer(positions)
self.program['a_texcoord'] = oogl.VertexBuffer(texcoords)
self.program['u_texture'] = oogl.Texture2D(io.crate())
# Handle transformations
self.init_transforms()
self.timer = app.Timer(1.0/60)
self.timer.connect(self.update_transforms)
self.timer.start()
def on_initialize(self, event):
gl.glClearColor(1,1,1,1)
gl.glEnable(gl.GL_DEPTH_TEST)
def on_resize(self, event):
width, height = event.size
gl.glViewport(0, 0, width, height)
self.projection = perspective( 45.0, width/float(height), 2.0, 10.0 )
self.program['u_projection'] = self.projection
def on_paint(self, event):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
with self.program as prog:
prog.draw_elements(gl.GL_TRIANGLES, faces_buffer)
def init_transforms(self):
self.view = np.eye(4,dtype=np.float32)
self.model = np.eye(4,dtype=np.float32)
self.projection = np.eye(4,dtype=np.float32)
self.theta = 0
self.phi = 0
translate(self.view, 0,0,-5)
self.program['u_model'] = self.model
self.program['u_view'] = self.view
def update_transforms(self,event):
self.theta += .5
self.phi += .5
self.model = np.eye(4, dtype=np.float32)
rotate(self.model, self.theta, 0,0,1)
rotate(self.model, self.phi, 0,1,0)
self.program['u_model'] = self.model
self.update()
if __name__ == '__main__':
c = Canvas()
c.show()
app.run()
| [
"almar.klein@gmail.com"
] | almar.klein@gmail.com |
ca6c346c4461e562d4bddaade2d654e6530ed503 | 7afa5820b809b8c8cf5443bf53d048cc7abc867a | /accounts/passwords/urls.py | cfa71a13545b8b24a3fc6c844a4a96caebb55410 | [] | no_license | Brucehaha/ecommerce | 037fb25608e848f5c0fd4ed78f42028d21872e39 | 8f873c7557e2554977dd0a23349c4d51063e2ed3 | refs/heads/master | 2023-01-03T19:35:13.894572 | 2018-09-05T07:14:40 | 2018-09-05T07:14:40 | 124,492,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | from django.urls import path, re_path
from django.contrib.auth import views as auth_views
urlpatterns = [
path('password/change/', auth_views.PasswordChangeView.as_view() ,name='change'),
path('password/change/done/', auth_views.PasswordChangeDoneView.as_view() ,name='password_change_done'),
path('password/reset/', auth_views.PasswordResetView.as_view() ,name='password_reset'),
re_path(r'^password/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', auth_views.PasswordResetConfirmView.as_view() ,name='password_reset_confirm'),
path('password/reset/done/', auth_views.PasswordResetDoneView.as_view() ,name='password_reset_done'),
path('password/reset/complete/', auth_views.PasswordResetCompleteView.as_view() ,name='password_reset_complete'),
]
#
# accounts[name='login']
# accounts/logout/ [name='logout']
# accounts/password_change/ [name='password_change']
# accounts/password_change/done/ [name='password_change_done']
# accounts/password_reset/ [name='password_reset']
# accounts/password_reset/done/ [name='password_reset_done']
# accounts/reset/<uidb64>/<token>/ [name='password_reset_confirm']
# accounts/reset/done/ [name='password_reset_complete']
| [
"henninglee2013@gmail.com"
] | henninglee2013@gmail.com |
487ccd619eb605da561903ecb9fd098c6b8e72b2 | ed0dd577f03a804cdc274f6c7558fafaac574dff | /python/pyre/db/Time.py | b4c602876d7b8874044e981b09d74cb8b6f19038 | [
"Apache-2.0"
] | permissive | leandromoreira/vmaf | fd26e2859136126ecc8e9feeebe38a51d14db3de | a4cf599444701ea168f966162194f608b4e68697 | refs/heads/master | 2021-01-19T03:43:15.677322 | 2016-10-08T18:02:22 | 2016-10-08T18:02:22 | 70,248,500 | 3 | 0 | null | 2016-10-07T13:21:28 | 2016-10-07T13:21:27 | null | UTF-8 | Python | false | false | 1,472 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from Column import Column
class Time(Column):
def type(self):
if not self.tz:
return "time without time zone"
return "time"
def __init__(self, name, tz=True, **kwds):
Column.__init__(self, name, **kwds)
self.tz = tz
return
def __get__(self, instance, cls=None):
ret = Column.__get__(self, instance, cls = cls)
if ret is None:
import time
return time.ctime()
return self._cast(ret)
def _cast(self, value):
format = '%a %b %d %H:%M:%S %Y'
if isinstance(value, basestring):
return calendar.timegm(time.strptime(value, format))
if isinstance(value, time.struct_time):
return calendar.timegm(value)
if isinstance(value, float) or isinstance(value, int):
return value
raise NotImplementedError
def _format(self, value):
return time.asctime(time.gmtime(value))
import time, calendar
# version
__id__ = "$Id: Time.py,v 1.1.1.1 2006-11-27 00:09:55 aivazis Exp $"
# End of file
| [
"zli@netflix.com"
] | zli@netflix.com |
dbaaa53331f0a0f026ad26980051621b2c7b9a22 | 9e3e8f1befeeb31150b27650ce5f6460b9c17445 | /quest/inference.py | d5707e8dce57159c0fc8bc51a94658cfb69ad9b7 | [
"MIT"
] | permissive | ceshine/kaggle-quest | d5ee108e0b7a7c2daa5469abad0c443f31e00029 | a25b9d964c28d97769bc040da05f2e39987433c4 | refs/heads/master | 2023-06-23T07:18:52.293776 | 2020-03-31T07:09:17 | 2020-03-31T07:09:17 | 240,172,051 | 8 | 0 | MIT | 2023-06-12T21:27:41 | 2020-02-13T03:53:23 | Python | UTF-8 | Python | false | false | 4,571 | py | import glob
import logging
from pathlib import Path
import fire
import joblib
import numpy as np
import pandas as pd
from tqdm import tqdm
import tensorflow as tf
from scipy.special import expit
from transformers import AutoTokenizer
from transformers import RobertaConfig
from .models import DualRobertaModel
from .prepare_tfrecords import Preprocessor, INPUT_COLUMNS, OUTPUT_COLUMNS
from .post_processing import prevent_nan
ROBERTA_CONFIG = {
"architectures": [
"RobertaForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"finetuning_task": None,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"is_decoder": False,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-05,
"max_position_embeddings": 514,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"num_labels": 30,
"output_attentions": False,
"output_hidden_states": False,
"output_past": True,
"pruned_heads": {},
"torchscript": False,
"type_vocab_size": 1,
"use_bfloat16": False,
"vocab_size": 50265
}
def get_batch(input_dicts):
return {
"input_ids_question": tf.convert_to_tensor(np.stack([
x["input_ids_question"] for x in input_dicts
], axis=0)),
"attention_mask_question": tf.convert_to_tensor(np.stack([
x["input_mask_question"] for x in input_dicts
], axis=0)),
"input_ids_answer": tf.convert_to_tensor(np.stack([
x["input_ids_answer"] for x in input_dicts
], axis=0)),
"attention_mask_answer": tf.convert_to_tensor(np.stack([
x["input_mask_answer"] for x in input_dicts
], axis=0)),
}
def main(
input_path: str = "data/",
tokenizer_path: str = "cache/tfrecords/tokenizer_roberta-base/",
model_path_pattern: str = "cache/roberta-base-fold-*",
best_bins_path: str = "cache/best_bins.jl",
batch_size: int = 8, progress_bar: bool = True,
add_sigmoid: bool = False, rank: bool = False
):
df_valid = pd.read_csv(input_path + 'test.csv')
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
processor = Preprocessor(tokenizer)
inputs = df_valid.loc[:, INPUT_COLUMNS].values
tmp = []
for i in tqdm(range(inputs.shape[0]), ncols=100, disable=not progress_bar):
tmp.append(processor.process_one_example(
inputs[i, 0],
inputs[i, 1],
inputs[i, 2])
)
processed_inputs = np.array(tmp)
del tmp, inputs
buffer = []
for model_path in glob.glob(model_path_pattern):
model_name = Path(model_path).name
print(model_path, model_name)
if model_name.lower().startswith("roberta-base"):
config = RobertaConfig.from_dict(
ROBERTA_CONFIG)
model = DualRobertaModel(
model_name="roberta-base", config=config, pretrained=False)
# build
model(get_batch(processed_inputs[:2]), training=False)
model.load_weights(model_path)
else:
raise ValueError("Unknown model.")
@tf.function
def predict_batch(inputs):
return model(inputs, training=False)[0]
preds = []
for i in tqdm(range(
0, len(processed_inputs), batch_size
), ncols=100, disable=not progress_bar):
input_dicts = processed_inputs[i:i+batch_size]
preds.append(predict_batch(get_batch(input_dicts)).numpy())
if add_sigmoid and not rank:
buffer.append(expit(np.concatenate(preds)))
elif rank:
tmp = np.concatenate(preds)
buffer.append(
tmp.argsort(axis=0).argsort(axis=0) / tmp.shape[0]
)
else:
buffer.append(np.concatenate(preds))
final_preds = np.mean(buffer, axis=0)
if add_sigmoid and not rank:
best_bins, scaler = joblib.load(best_bins_path)
best_bins = np.array(best_bins)[None, :]
# post-process
final_preds = np.clip(scaler.transform(final_preds), 0., 1.)
final_preds = prevent_nan(
np.round(final_preds * best_bins) / best_bins
)
df_sub = pd.DataFrame(final_preds, columns=OUTPUT_COLUMNS)
df_sub["qa_id"] = df_valid["qa_id"].values
df_sub.to_csv("submission.csv", index=False)
if __name__ == '__main__':
fire.Fire(main)
| [
"shuanck@gmail.com"
] | shuanck@gmail.com |
6ebce452a9616c0da47d9d865a6cd5f02ae0ad96 | 0eeeb78637ada50becb64ee73eeb6e872300818d | /tests/core/tags_manager_test.py | b2e94ada28b6e30309fa6b2084f273477fc7a39a | [
"MIT",
"CC-BY-4.0"
] | permissive | data-mermaid/golem | 4eabed7181a22220d09c4bc1b7fd0f4197e9dace | 06bbb33275a9f79cd7cc30a0e264b9bafdad3073 | refs/heads/master | 2022-11-20T11:28:57.891744 | 2020-07-21T18:12:18 | 2020-07-21T18:12:18 | 281,468,772 | 0 | 0 | MIT | 2020-07-21T18:02:12 | 2020-07-21T18:02:11 | null | UTF-8 | Python | false | false | 7,567 | py | import os
import json
import time
from types import SimpleNamespace
import pytest
from golem.core import tags_manager
class TestFilterTestsByTags:
@pytest.fixture(scope="class")
def _project_with_tags(self, project_class, test_utils):
"""A fixture of a project with tests that contain tags"""
_, project = project_class.activate()
tests = SimpleNamespace()
tests.test_alfa_bravo = 'test_alfa_bravo'
content = 'tags = ["alfa", "bravo"]'
test_utils.create_test(project, tests.test_alfa_bravo, content=content)
tests.test_bravo_charlie = 'test_bravo_charlie'
content = 'tags = ["bravo", "charlie"]'
test_utils.create_test(project, tests.test_bravo_charlie, content=content)
tests.test_delta_echo_foxtrot = 'test_delta_echo_foxtrot'
content = 'tags = ["delta", "echo", "fox trot"]'
test_utils.create_test(project, tests.test_delta_echo_foxtrot, content=content)
tests.test_empty_tags = 'test_empty_tags'
content = 'tags = []'
test_utils.create_test(project, tests.test_empty_tags, content=content)
tests.test_no_tags = 'test_no_tags'
content = 'def test(data):\n pass'
test_utils.create_test(project, tests.test_no_tags, content=content)
project_class.tests = list(tests.__dict__)
project_class.t = tests
return project_class
def test_filter_tests_by_tags(self, _project_with_tags):
_, project = _project_with_tags.activate()
tests = _project_with_tags.tests
t = _project_with_tags.t
filtered = tags_manager.filter_tests_by_tags(project, tests, ['alfa', 'bravo'])
assert filtered == [t.test_alfa_bravo]
filtered = tags_manager.filter_tests_by_tags(project, tests, ['bravo'])
assert sorted(filtered) == sorted([t.test_alfa_bravo, t.test_bravo_charlie])
filtered = tags_manager.filter_tests_by_tags(project, tests, ['alfa and bravo'])
assert filtered == [t.test_alfa_bravo]
filtered = tags_manager.filter_tests_by_tags(project, tests, ['"alfa" and "bravo"'])
assert filtered == [t.test_alfa_bravo]
filtered = tags_manager.filter_tests_by_tags(project, tests, ['alfa or bravo'])
assert sorted(filtered) == sorted([t.test_alfa_bravo, t.test_bravo_charlie])
filtered = tags_manager.filter_tests_by_tags(project, tests, ['bravo and not alfa'])
assert filtered == [t.test_bravo_charlie]
filtered = tags_manager.filter_tests_by_tags(project, tests, ['(alfa or bravo) and charlie'])
assert filtered == [t.test_bravo_charlie]
filtered = tags_manager.filter_tests_by_tags(project, tests, ['bravo or delta and not charlie'])
assert sorted(filtered) == sorted([t.test_alfa_bravo, t.test_bravo_charlie, t.test_delta_echo_foxtrot])
filtered = tags_manager.filter_tests_by_tags(project, tests, ['(bravo or delta) and not charlie'])
assert sorted(filtered) == sorted([t.test_alfa_bravo, t.test_delta_echo_foxtrot])
filtered = tags_manager.filter_tests_by_tags(project, tests, ["fox trot"])
assert sorted(filtered) == sorted([t.test_delta_echo_foxtrot])
filtered = tags_manager.filter_tests_by_tags(project, tests, ["delta", "fox trot"])
assert sorted(filtered) == sorted([t.test_delta_echo_foxtrot])
filtered = tags_manager.filter_tests_by_tags(project, tests, ['"delta" or "fox trot"'])
assert sorted(filtered) == sorted([t.test_delta_echo_foxtrot])
filtered = tags_manager.filter_tests_by_tags(project, tests, ['bravo and echo'])
assert filtered == []
def test_filter_tests_by_tags_empty_list(self, _project_with_tags):
_, project = _project_with_tags.activate()
filtered = tags_manager.filter_tests_by_tags(project, _project_with_tags.tests,
tags=[])
assert filtered == []
def test_filter_tests_by_tags_invalid_query(self, _project_with_tags):
_, project = _project_with_tags.activate()
with pytest.raises(tags_manager.InvalidTagExpression) as excinfo:
tags_manager.filter_tests_by_tags(project, _project_with_tags.tests, tags=['foo = 2'])
expected = ("unknown expression <class '_ast.Assign'>, the only valid "
"operators for tag expressions are: 'and', 'or' & 'not'")
assert expected in str(excinfo.value)
class TestGetTestsTags:
def test_get_tests_tags(self, project_session, test_utils):
_, project = project_session.activate()
# empty test list
tags = tags_manager.get_tests_tags(project, [])
assert tags == {}
content = 'tags = ["foo", "bar"]'
test_one = test_utils.random_string()
test_utils.create_test(project, name=test_one, content=content)
# test tags for one test
tags = tags_manager.get_tests_tags(project, [test_one])
assert tags == {test_one: ['foo', 'bar']}
# test without tags returns empty list
test_two = test_utils.create_random_test(project)
tags = tags_manager.get_tests_tags(project, [test_one, test_two])
assert tags[test_one] == ['foo', 'bar']
assert tags[test_two] == []
@pytest.mark.slow
def test_get_tests_tags_verify_cache(self, project_function, test_utils):
testdir, project = project_function.activate()
test_name = 'test_tags_003'
content = 'tags = ["foo", "bar"]'
test_path = test_utils.create_test(project, test_name, content=content)
# verify cache file does not exist and is created afterwards
cache_path = os.path.join(testdir, 'projects', project, '.tags')
assert not os.path.isfile(cache_path)
tags = tags_manager.get_tests_tags(project, [test_name])
assert os.path.isfile(cache_path)
assert tags[test_name] == ["foo", "bar"]
# verify that when a test is updated, the cache is updated as well
time.sleep(0.3) # give it some time!
content = 'tags = ["baz"]'
with open(test_path, 'w') as f:
f.write(content)
tags = tags_manager.get_tests_tags(project, [test_name])
with open(cache_path) as f:
cache = json.load(f)
assert cache[test_name]['tags'] == ['baz']
assert tags[test_name] == ['baz']
class TestGetAllProjectTestsTags:
def test_get_all_project_tests_tags(self, project_function, test_utils):
_, project = project_function.activate()
# no tests
tags = tags_manager.get_all_project_tests_tags(project)
assert tags == {}
# with tests
content = 'tags = ["foo", "bar"]'
test_utils.create_test(project, 'test001', content=content)
content = 'tags = ["001", "002"]'
test_utils.create_test(project, 'test002', content=content)
tags = tags_manager.get_all_project_tests_tags(project)
assert tags == {'test001': ['foo', 'bar'], 'test002': ['001', '002']}
class TestGetProjectUniqueTags:
def test_get_project_unique_tags(self, project_function, test_utils):
_, project = project_function.activate()
content = 'tags = ["foo", "bar"]'
test_utils.create_test(project, name='test001', content=content)
content = 'tags = ["bar", "baz"]'
test_utils.create_test(project, 'test002', content=content)
tags = tags_manager.get_project_unique_tags(project)
assert sorted(tags) == sorted(['foo', 'bar', 'baz'])
| [
"luciano@lucianorenzi.com"
] | luciano@lucianorenzi.com |
fdc4d85a1747b9528ab39eab9c261b0b2f9f2959 | 1ab5c39319564d20babedfb823307829cf489695 | /example_site/users/migrations/0004_update_uuid.py | cb907a7e3fcab135de1ccdad118b32dfc45ab678 | [
"MIT"
] | permissive | bihealth/sodar-core | 1c64368544d20b0934688a81d9cff7b27135a5ac | 6156b6409af01828fca506796a736b4f4669f33e | refs/heads/main | 2023-08-16T22:52:43.749729 | 2023-06-01T12:15:30 | 2023-06-01T12:15:30 | 165,220,058 | 7 | 0 | MIT | 2021-05-03T13:11:57 | 2019-01-11T09:50:04 | Python | UTF-8 | Python | false | false | 504 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-20 15:29
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('users', '0003_rename_uuid'),
]
operations = [
migrations.AlterField(
model_name='user',
name='sodar_uuid',
field=models.UUIDField(default=uuid.uuid4, help_text='User SODAR UUID', unique=True),
),
]
| [
"mikko.nieminen@bihealth.de"
] | mikko.nieminen@bihealth.de |
e71dbdeb83246f2c32513b1e07b6a33041c643de | 208560a564cc79822d5c6258ddd16e0e0e26362e | /Chapter-04-Strings-AssociativeArrays/Invert-Hash/Invert-Hash.py | 28a18f05d8e63e2ebd93f9ed364740506873b0f4 | [] | no_license | vishnuap/Algorithms | c778984e1afd6b8d160ce868f6ad4408da84855f | fa6c3022616a958bce86f0b1218372d47fe8bf7e | refs/heads/master | 2020-09-15T19:00:11.552634 | 2017-06-25T19:32:11 | 2017-06-25T19:32:11 | 67,612,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # Given an object, write a function to convert the keys to values and values to keys. given {"name": "Zaphod", "charm": "high", "morals": "dicey"} return {"Zaphod": "name", "high": "charm", "dicey": "morals"}
def invert(hash):
result = {}
for key in hash:
result[hash[key]] = key
return result
myHash = {"name": "Zaphod", "charm": "high", "morals": "dicey"}
print(invert(myHash))
| [
"vishnusak@gmail.com"
] | vishnusak@gmail.com |
3cb5ea47932b8fd2beba13c91b4b21f445c3f635 | 747755833862b8e9d0f58ebc62879d6ef47c23c8 | /python-master (4)/python-master/content/ex52/gothonweb/tests/app_tests.py | 242115a3a71d5b981e8fb0f11bc9f85167654cb6 | [] | no_license | tangsong41/stu_py | 98a06730dbca6e158cf81c18d98fe1317c1ae512 | d41507cd8dd9e8a54084872dfa15c36da443c02b | refs/heads/master | 2022-12-11T23:53:57.530946 | 2019-01-15T18:29:19 | 2019-01-15T18:29:19 | 163,953,100 | 3 | 0 | null | 2022-12-07T23:24:01 | 2019-01-03T09:41:29 | Jupyter Notebook | UTF-8 | Python | false | false | 1,009 | py | from nose.tools import *
from bin.app import app
from tests.tools import assert_response
def test_index():
# 这个是原来的test,有问题
#test Get request to /hello
resp = app.request("/hello")
assert_response(resp)
#make sure default values work for the form
resp = app.request("/hello", method="POST")
assert_response(resp, contains="Nobody")
#test that we get expected values
data = {'name':'Zed', 'greet': 'Hola'}
resp = app.request("/hello", method="POST", data=data)
assert_response(resp, contains="Zed")
# 笔记
#
# 这个web游戏我还没有完全看懂,等我慢慢消化了,在回头看
# 而且我觉得这个学这个框架还不如学flash,我下一步要学这个
#
# 但是有一点我承认我偷懒了:我应该把这个测试写好的,
# 可是我没弄懂这个程序,写测试有点难。算了,以后再说吧。
# 没准有一天在学习其他的过程中,对这个测试就突然懂了呢!
#
| [
"369223985@qq.com"
] | 369223985@qq.com |
efb64bee0ef472dc2f267101c73e037d18856bb7 | 6e68584f2819351abe628b659c01184f51fec976 | /Centre_College/CSC_339_SP2015/vindiniumAI/pybrain/tests/unittests/_test_equivalence_to_ctypes.py | 1ceb6722362bc62f6f93ecd2a4f3ddcc841fac3f | [
"WTFPL"
] | permissive | DanSGraham/code | 0a16a2bfe51cebb62819cd510c7717ae24b12d1b | fc54b6d50360ae12f207385b5d25adf72bfa8121 | refs/heads/master | 2020-03-29T21:09:18.974467 | 2017-06-14T04:04:48 | 2017-06-14T04:04:48 | 36,774,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,076 | py | """
>>> from pybrain.tools.shortcuts import buildNetwork
>>> from test_recurrent_network import buildRecurrentNetwork
>>> from test_peephole_lstm import buildMinimalLSTMNetwork
>>> from test_peephole_mdlstm import buildMinimalMDLSTMNetwork
>>> from test_nested_network import buildNestedNetwork
>>> from test_simple_lstm_network import buildSimpleLSTMNetwork
>>> from test_simple_mdlstm import buildSimpleMDLSTMNetwork
>>> from test_swiping_network import buildSwipingNetwork
>>> from test_shared_connections import buildSharedCrossedNetwork
>>> from test_sliced_connections import buildSlicedNetwork
>>> from test_borderswipingnetwork import buildSimpleBorderSwipingNet
Test a number of network architectures, and compare if they produce the same output,
whether the Python implementation is used, or CTYPES.
Use the network construction scripts in other test files to build a number of networks,
and then test the equivalence of each.
Simple net
>>> testEquivalence(buildNetwork(2,2))
True
A lot of layers
>>> net = buildNetwork(2,3,4,3,2,3,4,3,2)
>>> testEquivalence(net)
True
Nonstandard components
>>> from pybrain.structure import TanhLayer
>>> net = buildNetwork(2,3,2, bias = True, outclass = TanhLayer)
>>> testEquivalence(net)
True
Shared connections
>>> net = buildSharedCrossedNetwork()
>>> testEquivalence(net)
True
Sliced connections
>>> net = buildSlicedNetwork()
>>> testEquivalence(net)
True
Nested networks (not supposed to work yet!)
>>> net = buildNestedNetwork()
>>> testEquivalence(net)
Network cannot be converted.
Recurrent networks
>>> net = buildRecurrentNetwork()
>>> net.name = '22'
>>> net.params[:] = [1,1,0.5]
>>> testEquivalence(net)
True
Swiping networks
>>> net = buildSwipingNetwork()
>>> testEquivalence(net)
True
Border-swiping networks
>>> net = buildSimpleBorderSwipingNet()
>>> testEquivalence(net)
True
Lstm
>>> net = buildSimpleLSTMNetwork()
>>> testEquivalence(net)
True
Mdlstm
>>> net = buildSimpleMDLSTMNetwork()
>>> testEquivalence(net)
True
Lstm with peepholes
>>> net = buildMinimalLSTMNetwork(True)
>>> testEquivalence(net)
True
Mdlstm with peepholes
>>> net = buildMinimalMDLSTMNetwork(True)
>>> testEquivalence(net)
True
TODO:
- heavily nested
- exotic module use
"""
__author__ = 'Tom Schaul, tom@idsia.ch'
_dependencies = ['arac']
from pybrain.tests.helpers import buildAppropriateDataset, epsilonCheck
from pybrain.tests import runModuleTestSuite
def testEquivalence(net):
cnet = net.convertToFastNetwork()
if cnet == None:
return None
ds = buildAppropriateDataset(net)
if net.sequential:
for seq in ds:
net.reset()
cnet.reset()
for input, _ in seq:
res = net.activate(input)
cres = cnet.activate(input)
if net.name == '22':
h = net['hidden0']
ch = cnet['hidden0']
print 'ni', input, net.inputbuffer.T
print 'ci', input, cnet.inputbuffer.T
print 'hni', h.inputbuffer.T[0]
print 'hci', ch.inputbuffer.T[0]
print 'hnout', h.outputbuffer.T[0]
print 'hcout', ch.outputbuffer.T[0]
print
else:
for input, _ in ds:
res = net.activate(input)
cres = cnet.activate(input)
if epsilonCheck(sum(res - cres), 0.001):
return True
else:
print 'in-net', net.inputbuffer.T
print 'in-arac', cnet.inputbuffer.T
print 'out-net', net.outputbuffer.T
print 'out-arac', cnet.outputbuffer.T
return (res, cres)
if __name__ == "__main__":
runModuleTestSuite(__import__('__main__'))
| [
"dan.s.graham@gmail.com"
] | dan.s.graham@gmail.com |
6e6fc45fe50c57f123aaf85bdfdd9e9b69a2a0d7 | e827e848562c990570cd105183087173073f10e3 | /chapters.py | e79fb0f3fa38035b114d2fb8bc9b072a56cb3aa9 | [] | no_license | srobo/sr2021-comp | 5e24d1964dc59a8a31a66d57009285121decfe03 | 44810406083f58d9cfa92f5f4839f93f3e2dbe78 | refs/heads/master | 2023-04-18T19:47:52.874826 | 2021-05-01T18:49:37 | 2021-05-01T18:49:37 | 322,952,147 | 0 | 3 | null | 2021-04-30T23:04:36 | 2020-12-19T22:50:12 | Python | UTF-8 | Python | false | false | 2,310 | py | #!/usr/bin/env python3
import argparse
import datetime
import sys
from pathlib import Path
from sr.comp.comp import SRComp
def format_time(delta: datetime.timedelta) -> str:
seconds = int(delta.total_seconds() // 1 % 60)
minutes = int((delta.total_seconds() // 60) % 60)
hours = int(delta.total_seconds() // (60 * 60))
return f'{hours}:{minutes:0>2}:{seconds:0>2}'
def main(args: argparse.Namespace) -> None:
offset = datetime.timedelta(seconds=args.offset_seconds)
match_number: int = args.match_number
comp = SRComp(Path(__file__).parent)
if len(comp.arenas) != 1:
raise ValueError("Multiple arenas not supported")
if len(comp.corners) != 2:
raise ValueError("More than two corners not supported")
arena, = comp.arenas.keys()
slots = comp.schedule.matches[match_number:]
matches = [x[arena] for x in slots]
# Yes, this doesn't account for the game not aligning within the slot.
# Happily we don't need to account for that explicitly as it's a fixed
# offset which affects all matches equally and thus drops out.
stream_start = matches[0].start_time - offset
print(f"{format_time(datetime.timedelta())} Introduction")
for match in matches:
if None in match.teams:
print(
f"Match {match.display_name} contains unknown teams. Stopping.",
file=sys.stderr,
)
break
match_steam_time = format_time(match.start_time - stream_start)
teams = " vs ".join(match.teams)
print(f"{match_steam_time} {match.display_name}: {teams}")
print("Note: also add the outtro/wrapup!", file=sys.stderr)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
'offset_seconds',
type=int,
help=(
"The YouTube url for the start of the first match. Hint: pause at "
"the start of the match, then use 'Copy video URL at current time' "
"and extract the value of the 't' argument from the query string."
),
)
parser.add_argument(
'match_number',
type=int,
help="The match number to start at.",
)
return parser.parse_args()
if __name__ == '__main__':
main(parse_args())
| [
"PeterJCLaw@gmail.com"
] | PeterJCLaw@gmail.com |
af6c67df1202840f3eeb4de69bb270e3a39ad3ae | be6853182634afda24c1bef9247d0d99442e980d | /ml_project/prediction_process.py | e8cc4560e14bb4c3519a7ff45bdacdbfc61c4505 | [] | no_license | wavelike/speech_commands | 84c98f69d467537cd23101378f46f75609686ecd | 60ef48fd295abcb58194a3a180f53cd992921422 | refs/heads/main | 2023-05-31T15:23:17.781113 | 2021-07-06T14:01:02 | 2021-07-06T14:06:22 | 383,488,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | from typing import Any, Tuple, Dict
import pandas as pd
import requests
from ml_project.config import Config
from ml_project.data_validation import ProductionData
import numpy as np
def get_predictions(config: Config, model: Any, data: np.array) -> np.array:
predictions = np.argmax(model.predict(data), axis=1)
return predictions
def get_server_predictions(config: Config, data: Dict) -> Tuple[pd.Series, pd.Series]:
# validate and parse data
data_dict = vars(ProductionData(**data))
response = requests.post(config.prediction_service_url, json=data_dict)
response_dict = response.json()
predictions = response_dict['label']
return predictions | [
"you@example.com"
] | you@example.com |
08280c22792c5e7f7332e300c80ac664b807c56b | df690ac0484ff04cb63f71f528a9d0a0e557d6a3 | /.history/ws_20210608160434.py | 2c43fa2782d678290a5e26e91d8e86df4c39eea6 | [] | no_license | khanhdk0000/Mqtt-Web-Socket | 437777c740c68d4197353e334f6fe6a629094afd | 4f9e49a3817baa9ebc4e4f8dcffc21b6ea9d0134 | refs/heads/master | 2023-06-20T17:08:09.447381 | 2021-06-08T17:42:37 | 2021-06-08T17:42:37 | 375,090,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,864 | py | from flask import Flask, jsonify, request
from flask_sock import Sock
import time
import random
app = Flask(__name__)
sock = Sock(app)
import threading
BROKER = 'io.adafruit.com'
USER = 'khanhdk0000'
PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
TOPIC = 'khanhdk0000/feeds/'
LIGHT = 'light'
SOUND = 'sound'
TEMP = 'temp'
LCD = 'iot_led'
BUZZER = 'buzzer'
########
# USER = 'CSE_BBC'
# PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
# TOPIC = 'CSE_BBC/feeds/'
# USER1 = 'CSE_BBC1'
# PASSWORD1 = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
# TOPIC1 = 'CSE_BBC1/feeds/'
# LIGHT = 'bk-iot-light'
# SOUND = 'bk-iot-sound'
# TEMP = 'bk-iot-temp-humid' ## CSE_BBC
# LCD = 'bk-iot-lcd' ## CSE_BBC
# BUZZER = 'bk-iot-speaker' ## CSE_BBC
resLight = '"id":"13","name":"LIGHT","data":"0","unit":""'
prevLight = resLight
resTemp = '"id":"7","name":"SOUND","data":"0","unit":""'
prevTemp = resTemp
resSound = '"id":"12","name":"TEMP-HUMID","data":"0","unit":""'
prevSound = resSound
resBuzzer = '"id":"2","name":"SPEAKER","data":"0","unit":""'
prevBuzzer = resBuzzer
resLCD = '"id":"3","name":"LCD","data":"0","unit":""'
prevLCD= resLCD
def mqttGet(user, password,topic,device):
import paho.mqtt.client as mqtt
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
if rc == 0:
print('good')
else:
print('no good')
def on_disconnect(client, userdata, flags, rc=0):
print("Disconnected result code " + str(rc))
def on_message(client, userdata, message):
if device == LIGHT:
global resLight
message = str(message.payload.decode("utf-8"))
print(message)
resLight = message
elif device == TEMP:
global resTemp
message = str(message.payload.decode("utf-8"))
print(message)
resTemp = message
elif device == SOUND:
global resSound
message = str(message.payload.decode("utf-8"))
print(message)
resSound = message
client = mqtt.Client(client_id=str(random.randint(0,1000)))
client.username_pw_set(username=user,password=password)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.connect(BROKER, 1883, 60)
client.subscribe(topic)
client.loop_forever()
t1 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + LIGHT, LIGHT))
t1.start()
t2 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + TEMP, TEMP))
t2.start()
t3 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + SOUND, SOUND))
t3.start()
t4 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + BUZZER, BUZZER))
t4.start()
t5 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + LCD, LCD))
t5.start()
def mqttPost(topic, user, password,payload):
import paho.mqtt.publish as publish
publish.single(topic,hostname="io.adafruit.com",auth={"username":user, "password":password},payload = payload)
@sock.route('/light')
def light(ws):
global resLight, prevLight
while True:
if prevLight == resLight:
continue
else:
ws.send(resLight)
prevLight = resLight
@sock.route('/sound')
def sound(ws):
global resSound, prevSound
while True:
if prevSound == resSound:
continue
else:
ws.send(resSound)
prevSound = resSound
@sock.route('/temp')
def temp(ws):
global resTemp, prevTemp
while True:
if prevTemp == resTemp:
continue
else:
ws.send(resTemp)
prevTemp = resTemp
@sock.route('/buzzer')
def buzzer(ws):
global resTemp, prevTemp
while True:
if prevTemp == resTemp:
continue
else:
ws.send(resTemp)
prevTemp = resTemp
@sock.route('/lcd')
def lcd(ws):
global resLCD, prevLCD
while True:
if prevTemp == resLCD:
continue
else:
ws.send(resLCD)
prevTemp = resLCD
@app.route('/postlcd', methods=["POST"])
def postlcd():
input_json = request.get_json(force=True)
data = input_json['data']
print('receive data', data)
mqttPost(TOPIC+LCD, USER, PASSWORD, f'{{"id":"3", "name":"LCD", "data":"{data}", "unit":""}}')
return 'yea: ' + data
@app.route('/postbuzzer', methods=["POST"])
def postbuzzer():
input_json = request.get_json(force=True)
data = input_json['data']
print('receive data', data)
mqttPost(TOPIC+BUZZER, USER, PASSWORD, f'{{"id":"2", "name":"SPEAKER", "data":"{data}", "unit":""}}')
return 'yea: ' + data
if __name__ == '__main__':
app.run(debug=True) | [
"khanhtran28092000@gmail.com"
] | khanhtran28092000@gmail.com |
58801548e31e001b4c9be47337fc8e7db69b516e | 8ed80561e1b3c0bcdb6201cae8af845d5da23edc | /guppe/funcoes_com_parametro.py | 28aac69c5647374a936b29cb5b6358a8088bf445 | [] | no_license | Fulvio7/curso-python-guppe | 42d5a1ecd80c1f3b27dc3f5dad074a51c9b774eb | 98966963f698eb33e65ed58a84f96e28f675848a | refs/heads/main | 2023-08-28T13:31:12.916407 | 2021-10-09T19:03:17 | 2021-10-09T19:03:17 | 415,393,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | """
Funções com parâmetro (de entrada)
São aquelas que recebem dados para serem processadas dentro da mesma.
entrada -> processamento -> saída
# Exemplo 1
def quadrado(numero):
return numero**2
print(quadrado(7))
print(quadrado(2))
ret = quadrado(3)
print(ret)
# Exemplo 2
def cantar_parabens(aniversariante):
print('Parabéns pra você')
print('Nesta data querida')
print('Muitas felicidades')
print('Muitos anos de vida!')
print(f'Viva o/a {aniversariante}!')
cantar_parabens('Marcos')
# Exemplo 3
def soma(a, b): # a e b são parâmetros da função
return a + b
def multiplica(a, b):
return a * b
def outra(num1, b, msg):
return (num1 + b) * msg
print(soma(2, 5)) # 2 e 5 são argumentos da função
print(soma(10, 20))
print(multiplica(4, 5))
print(multiplica(2, 8))
print(outra(3, 2, 'Beep '))
print(outra(4, 5, 'Python '))
# Obs: ocorre TypeError se informarmos um número de parâmetros diferente
# do correto
# Nomeando parâmetros
def nome_completo(nome, sobrenome):
# nome e sobrenome são parâmetros que explicitam o seu valor
return f'Seu nome completo é {nome} {sobrenome}'
print(nome_completo('Fúlvio', 'Barichello '))
# Parâmetros são variáveis declaradas na definição de uma função
# Argumentos são dados passados durante a execução de uma função
# A ordem dos parâmetros importa
# Argumentos Nomeados (Keyword Arguments)
print(nome_completo(nome='Fúlvio', sobrenome='Barichello'))
print(nome_completo(sobrenome='Barichello', nome='Fúlvio'))
# Se nomearmos os argumentos, eles podem ser passados em qualquer ordem
# Devemos sempre tomar cuidado com a posição so return dentro da função
def soma_impares(numeros):
total = 0
for i in numeros:
if i % 2 != 0:
total += i
return total
lista = [1, 2, 3, 4, 5, 6, 7]
print(soma_impares(lista))
tupla = 1, 2, 3, 4, 5, 6, 7
print(soma_impares(tupla))
"""
| [
"fulvio.barichello@gmail.com"
] | fulvio.barichello@gmail.com |
84289ad87a468bb3e5f22dcc94e76cda81645a68 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03578/s809546981.py | 11d6988c2dbdb25345af027c05f9f76b7b316ced | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | import sys
readline = sys.stdin.readline
sys.setrecursionlimit(10**8)
mod = 10**9+7
#mod = 998244353
INF = 10**18
eps = 10**-7
N = int(readline())
D = list(map(int,readline().split()))
M = int(readline())
T = list(map(int,readline().split()))
from collections import defaultdict
d_dict = defaultdict(int)
for t in T:
d_dict[t] += 1
for d in D:
if d_dict[d] > 0:
d_dict[d] -= 1
print('YES' if sum(v for v in d_dict.values()) == 0 else 'NO')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f0cf34ac61beb77be7bc230cfd5e12f6e2f7152d | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/logic/v20190501/list_integration_account_schema_content_callback_url.py | 40987ac55a9920e16d68be4a4ec402f29b3a0ec9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,817 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'ListIntegrationAccountSchemaContentCallbackUrlResult',
'AwaitableListIntegrationAccountSchemaContentCallbackUrlResult',
'list_integration_account_schema_content_callback_url',
]
@pulumi.output_type
class ListIntegrationAccountSchemaContentCallbackUrlResult:
"""
The workflow trigger callback URL.
"""
def __init__(__self__, base_path=None, method=None, queries=None, relative_path=None, relative_path_parameters=None, value=None):
if base_path and not isinstance(base_path, str):
raise TypeError("Expected argument 'base_path' to be a str")
pulumi.set(__self__, "base_path", base_path)
if method and not isinstance(method, str):
raise TypeError("Expected argument 'method' to be a str")
pulumi.set(__self__, "method", method)
if queries and not isinstance(queries, dict):
raise TypeError("Expected argument 'queries' to be a dict")
pulumi.set(__self__, "queries", queries)
if relative_path and not isinstance(relative_path, str):
raise TypeError("Expected argument 'relative_path' to be a str")
pulumi.set(__self__, "relative_path", relative_path)
if relative_path_parameters and not isinstance(relative_path_parameters, list):
raise TypeError("Expected argument 'relative_path_parameters' to be a list")
pulumi.set(__self__, "relative_path_parameters", relative_path_parameters)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="basePath")
def base_path(self) -> str:
"""
Gets the workflow trigger callback URL base path.
"""
return pulumi.get(self, "base_path")
@property
@pulumi.getter
def method(self) -> str:
"""
Gets the workflow trigger callback URL HTTP method.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def queries(self) -> Optional['outputs.WorkflowTriggerListCallbackUrlQueriesResponseResult']:
"""
Gets the workflow trigger callback URL query parameters.
"""
return pulumi.get(self, "queries")
@property
@pulumi.getter(name="relativePath")
def relative_path(self) -> str:
"""
Gets the workflow trigger callback URL relative path.
"""
return pulumi.get(self, "relative_path")
@property
@pulumi.getter(name="relativePathParameters")
def relative_path_parameters(self) -> Optional[Sequence[str]]:
"""
Gets the workflow trigger callback URL relative path parameters.
"""
return pulumi.get(self, "relative_path_parameters")
@property
@pulumi.getter
def value(self) -> str:
"""
Gets the workflow trigger callback URL.
"""
return pulumi.get(self, "value")
class AwaitableListIntegrationAccountSchemaContentCallbackUrlResult(ListIntegrationAccountSchemaContentCallbackUrlResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIntegrationAccountSchemaContentCallbackUrlResult(
base_path=self.base_path,
method=self.method,
queries=self.queries,
relative_path=self.relative_path,
relative_path_parameters=self.relative_path_parameters,
value=self.value)
def list_integration_account_schema_content_callback_url(integration_account_name: Optional[str] = None,
key_type: Optional[Union[str, 'KeyType']] = None,
not_after: Optional[str] = None,
resource_group_name: Optional[str] = None,
schema_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIntegrationAccountSchemaContentCallbackUrlResult:
"""
The workflow trigger callback URL.
:param str integration_account_name: The integration account name.
:param Union[str, 'KeyType'] key_type: The key type.
:param str not_after: The expiry time.
:param str resource_group_name: The resource group name.
:param str schema_name: The integration account schema name.
"""
__args__ = dict()
__args__['integrationAccountName'] = integration_account_name
__args__['keyType'] = key_type
__args__['notAfter'] = not_after
__args__['resourceGroupName'] = resource_group_name
__args__['schemaName'] = schema_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:logic/v20190501:listIntegrationAccountSchemaContentCallbackUrl', __args__, opts=opts, typ=ListIntegrationAccountSchemaContentCallbackUrlResult).value
return AwaitableListIntegrationAccountSchemaContentCallbackUrlResult(
base_path=__ret__.base_path,
method=__ret__.method,
queries=__ret__.queries,
relative_path=__ret__.relative_path,
relative_path_parameters=__ret__.relative_path_parameters,
value=__ret__.value)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
a21a66bd24462f6e156cb4d444aadf2677ff1aaf | e01c5d1ee81cc4104b248be375e93ae29c4b3572 | /CLRS/Heapsort/priorityqueue.py | 653094269a47725b5d54ae867519f23870b97913 | [] | no_license | lalitzz/DS | 7de54281a34814601f26ee826c722d123ee8bd99 | 66272a7a8c20c0c3e85aa5f9d19f29e0a3e11db1 | refs/heads/master | 2021-10-14T09:47:08.754570 | 2018-12-29T11:00:25 | 2018-12-29T11:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | from heapsort import MaxHeap
from ctypes import py_object
import sys
class PriorityQueue(MaxHeap):
def __init__(self, size = 16):
self.size = size
self.heap_size = -1
self.heap = (size * py_object)()
def heap_maximum(self):
return self.heap[0]
def extract_max(self):
if self.heap_size < 0:
return Exception("No Element")
mx = self.heap[0]
self.heap[0] = self.heap[self.heap_size]
self.heap_size -= 1
self.max_heapify(self.heap, 0)
return mx
def inrease_key(self, i, key):
print(self.heap[i], key)
if key < self.heap[i]:
return Exception("Smaller Element")
self.heap[i] = key
while i > 0 and self.heap[self.parent(i)] < self.heap[i]:
self.heap[i], self.heap[self.parent(i)] = self.heap[self.parent(i)], self.heap[i]
i = self.parent(i)
def insert(self, key):
self.heap_size += 1
self.heap[self.heap_size] = -99999
self.inrease_key(self.heap_size, key)
q = PriorityQueue()
q.insert(1)
print(q.extract_max()) | [
"lalit.slg007@gmail.com"
] | lalit.slg007@gmail.com |
b1a75529371c00a2e917344ea7d00bc767b0a4c3 | 78db4e6170ba9325e6f601e62b583f01e7fa3940 | /Array/t-s-II.py | c6b67d664829a96686d0a237be20fd703b5afdc8 | [] | no_license | parambole/LeetCode | 3d3061f6240939085633441bf2e173f0d335975f | 1cfcf8da6983fffebe6d037ee134aeff3ca96320 | refs/heads/master | 2023-05-01T19:45:14.279187 | 2021-05-22T16:04:07 | 2021-05-22T16:04:07 | 244,571,425 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
low = 0
high = len(numbers) - 1
while low <= high:
a = numbers[low]
b = numbers[high]
two_sum = a + b
if two_sum > target:
high -= 1
elif two_sum < target:
low += 1
else:
return [low + 1, high + 1]
return []
| [
"noreply@github.com"
] | parambole.noreply@github.com |
b4b0eaabd5679d22dd8673390a43fe92367d60b5 | d2876c5d80f03336418a243c36a080fc3a65d629 | /build/learning_ros/Part_2/xform_utils/catkin_generated/pkg.installspace.context.pc.py | 229397ae9364950fdee98bc49179c6d638ab7d13 | [] | no_license | brucemingxinliu/ARIAC_Inventory_control | 302b8648451c38a461d07a57e8c12702538abd23 | 8084b095822d7ad8ccab6dbf98d85348c8a9fb8c | refs/heads/master | 2021-01-19T18:39:13.649891 | 2017-04-18T15:22:13 | 2017-04-18T15:22:13 | 88,370,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/brucemingxinliu/ros_ws/install/include".split(';') if "/home/brucemingxinliu/ros_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;tf;geometry_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lxform_utils".split(';') if "-lxform_utils" != "" else []
PROJECT_NAME = "xform_utils"
PROJECT_SPACE_DIR = "/home/brucemingxinliu/ros_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"mxl592@case.edu"
] | mxl592@case.edu |
b1599ccb7b5e3984fd561f3a47cfbb17b7b469d0 | de392462a549be77e5b3372fbd9ea6d7556f0282 | /accounts/migrations/0023_auto_20200806_1620.py | ec72bee9248b1b318199b9ff696b441de57f5366 | [] | no_license | amutebe/AMMS_General | 2830770b276e995eca97e37f50a7c51f482b2405 | 57b9b85ea2bdd272b44c59f222da8202d3173382 | refs/heads/main | 2023-07-17T02:06:36.862081 | 2021-08-28T19:07:17 | 2021-08-28T19:07:17 | 400,064,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # Generated by Django 3.0.2 on 2020-08-06 13:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0022_auto_20200806_1620'),
]
operations = [
migrations.AlterField(
model_name='car',
name='car_number',
field=models.CharField(default='TEGA06082020800', max_length=200, primary_key=True, serialize=False, verbose_name='Corrective action no.:'),
),
migrations.AlterField(
model_name='employees',
name='employeeID',
field=models.CharField(default='TEGA750', max_length=10, primary_key=True, serialize=False, verbose_name='Employee ID'),
),
]
| [
"mutebe2@gmail.com"
] | mutebe2@gmail.com |
be15b63fc2f58d92b6c66e5cb35c2e73a94d2a0d | 8d9318a33afc2c3b5ca8ac99fce0d8544478c94a | /Books/Casandra DB/opscenter-5.1.0/lib/py/libcloud/common/cloudstack.py | 771c04d41d58e120c1538c9a44e23d75dd009111 | [] | no_license | tushar239/git-large-repo | e30aa7b1894454bf00546312a3fb595f6dad0ed6 | 9ee51112596e5fc3a7ab2ea97a86ec6adc677162 | refs/heads/master | 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:afe55538b95a252958994246ec811790d8191877ce3448932aefbca47915e43e
size 4540
| [
"tushar239@gmail.com"
] | tushar239@gmail.com |
2950e14f12615a7207ba565bf3a5717afda0d752 | 1b2609f093eed8e893593b75621c7b8495caa2ef | /PythonCode/custom_class1.py | 7f06196ead22f81bcc4d0f3aba4af1a82d8ad039 | [] | no_license | Frostmoune/My-Note | 06a2f0bd268bbdf07a68f1bc8408bceffb2ab927 | 7896a06e0531c0720d02850babe59155c7d5e51f | refs/heads/master | 2021-09-06T12:43:32.427911 | 2018-02-06T15:52:46 | 2018-02-06T15:52:46 | 107,769,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,200 | py | class Student(object):
__slots__=('_score','__name','__age','grade','className')
def __init__(self,name="",score=60,age=17):
self._score=score
self.__name=name
self.__age=age
@property
def age(self):
return self.__age
@age.setter
def age(self,age):
if not isinstance(age,int):
return TypeError("The age must be an integer")
if age<0:
return ValueError("The age must be positive")
self.__age=age
return "Done"
@property
def name(self):
return self.__name
def __getattr__(self,attr):
if attr=='finish':# 如果属性名为'finish'
return 'True'
if attr=='getscore':# 如果属性名为'getscore'
return lambda:self._score# 返回一个函数
return AttributeError("Student object has no attribute %s"%attr)# 否则输出没有这个属性
# __getattr__方法使得类的实例可以动态返回一个未被定义的属性或方法
print(Student('Bill'))
# 打印出<__main__.Student object at 0x0345EB10>
print(Student('Sam',70).finish)
print(Student('Sam',70).getscore())
class NewStudent(Student):
__slots__=()
def __init__(self,name="",score=60,age=17):
super(NewStudent,self).__init__(name,score,age)
def __str__(self):
return "Hello,My name is %s.\nI'm %d years old.\nI'm from class %d grade %d"%(self.name,self.age,self.className,self.grade)
# 为类定义好__str__()方法,当print该类的一个对象的时候会调用__str__()
__repr__=__str__
# 加上这条语句,在终端直接输入对象名,也能够按照自己想要的字符串输出对象
def __call__(self,string):
return string+str(self._score)
# 定义__call__方法后,可以将对象作为函数来调用
who=NewStudent("Bob",80,19)
who.grade,who.className=6,5
print(who.finish)
print(who.getscore())
# 说明子类可以直接继承父类的__getattr__方法
print(who)
# 根据__str__方法打印出相应的内容
print(who("My score is "))
s=Student()
print(callable(max)) #函数可以被调用
# callable用于判断一个对象是否可以被调用
print(callable([1,2,3]))
print(callable("abcde"))# list和str类型均不可被调用
print(callable(s))# 没有定义__call__()方法的类实例不可以被调用
print(callable(who))# 定义了__call__()方法的类实例可以被调用
class MoreStudent(NewStudent):
__slots__=()
def __init__(self,name="",score=60,age=17):
super(MoreStudent,self).__init__(name,score,age)
more=MoreStudent("Sam")
print(callable(more)) #True
print(more("My score is "))
# 子类可以继承父类的__call__()方法
more.grade,more.className=6,5
print(more)
# 子类同样可以继承父类的__str__()和__repr__()方法
class Chain(object):
def __init__(self, path=''):
self._path = path
def __getattr__(self, path):
return Chain('%s/%s' % (self._path, path))
# 返回一个新chain
def __str__(self):
return self._path
__repr__ = __str__
print(Chain('Student').father.wallet.money.cost.buy)
# 这可以看做一个调用链,输出Student/father/wallet/money/cost/buy | [
"810343087@qq.com"
] | 810343087@qq.com |
a0cb46cb66cc8241e6fba18549b2bce75fde32c9 | 69bf012ca88897cd87535701369f2b87c6522d57 | /modules/templates/historic/RLP/tools/poolsplit.py | da9345cf9ea10146a69e6ccb9490c1b82a80b0e8 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sahana/eden | e2cc73f6b34a2ab6579094da09367a9f0be10fd1 | 1cb5a76f36fb45fa636577e2ee5a9aa39f35b391 | refs/heads/master | 2023-08-20T20:56:57.404752 | 2023-02-24T17:16:47 | 2023-02-24T17:16:47 | 3,021,325 | 227 | 253 | NOASSERTION | 2023-01-10T10:32:33 | 2011-12-20T17:49:16 | Python | UTF-8 | Python | false | false | 2,507 | py | # -*- coding: utf-8 -*-
#
# Helper Script to split a pool retaining POOLREADER assignments
#
# RLP Template Version 1.5.0
#
# - Adjust POOLNAME and ADDPOOL below, then
# - Execute in web2py folder like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/RLP/tools/poolsplit.py
#
# The name of the pool to split
POOLNAME = "Weitere Freiwillige"
# The name of the new pool containing a subset of the former
ADDPOOL = "Impf-ÄrztInnen"
import sys
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
def info(msg):
sys.stderr.write("%s" % msg)
def infoln(msg):
sys.stderr.write("%s\n" % msg)
# Load models for tables
gtable = s3db.pr_group
rtable = auth.settings.table_group
mtable = auth.settings.table_membership
IMPORT_XSLT_FOLDER = os.path.join(request.folder, "static", "formats", "s3csv")
TEMPLATE_FOLDER = os.path.join(request.folder, "modules", "templates", "RLP")
# -----------------------------------------------------------------------------
# Upgrade user role assignments
#
if not failed:
info("Identify pools")
# Look up the PE-ID of either pool
query = (gtable.name.belongs([POOLNAME, ADDPOOL])) & \
(gtable.deleted == False)
pools = db(query).select(gtable.pe_id,
gtable.name,
)
pool_ids = {pool.name: pool.pe_id for pool in pools}
for name in (POOLNAME, ADDPOOL):
if name not in pool_ids:
infoln("...failed (%s pool not found)" % name)
failed = True
break
if not failed:
infoln("...found")
if not failed:
info("Assign POOLREADER role for %s pool" % ADDPOOL)
join = rtable.on(rtable.id == mtable.group_id)
query = (rtable.uuid == "POOLREADER") & \
(mtable.pe_id == pool_ids[POOLNAME]) & \
(mtable.deleted == False)
rows = db(query).select(mtable.group_id,
mtable.user_id,
join=join,
)
updated = 0
for row in rows:
auth.s3_assign_role(row.user_id, row.group_id, for_pe=pool_ids[ADDPOOL])
info(".")
updated += 1
infoln("...done (%s users updated)" % updated)
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
infoln("UPGRADE FAILED - Action rolled back.")
else:
db.commit()
infoln("UPGRADE SUCCESSFUL.")
| [
"dominic@nursix.org"
] | dominic@nursix.org |
9ca6f9c9e78298ac662b69254bbd3621a2801e6d | 92754bb891a128687f3fbc48a312aded752b6bcd | /Algorithms/Python3.x/1028-Recover_a_Tree_From_Preorder_Traversal.py | ee9f4273b0813042aa8e53d9b4946cc541af0d3c | [] | no_license | daidai21/Leetcode | ddecaf0ffbc66604a464c3c9751f35f3abe5e7e5 | eb726b3411ed11e2bd00fee02dc41b77f35f2632 | refs/heads/master | 2023-03-24T21:13:31.128127 | 2023-03-08T16:11:43 | 2023-03-08T16:11:43 | 167,968,602 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # Runtime: 56 ms, faster than 99.45% of Python3 online submissions for Recover a Tree From Preorder Traversal.
# Memory Usage: 14.5 MB, less than 25.00% of Python3 online submissions for Recover a Tree From Preorder Traversal.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def recoverFromPreorder(self, S: str) -> TreeNode:
self.values = [(len(s[1]), int(s[2])) for s in re.findall("((-*)(\d+))", S)][::-1]
print(self.values)
return self.recursion(0)
def recursion(self, level):
if not self.values or level != self.values[-1][0]:
return None
node = TreeNode(self.values.pop()[1])
node.left = self.recursion(level + 1)
node.right = self.recursion(level + 1)
return node
| [
"daidai4269@aliyun.com"
] | daidai4269@aliyun.com |
dec09ad760f0ee1f0e968dfe985298c3edece58a | f473f44ef230ecd8a3f8ececf613f06a6e4bc82b | /Array/medium-Find Peak Element.py | 4cab604540cb87acf4bfcd798061079bfb60c822 | [] | no_license | Wdiii/LeetCode-exercise-start-from-easy | 878e3e5789906803e02dee47429bcbe2fdfa0ada | d6e87c40445adafc297cbc22815e2f1882681596 | refs/heads/master | 2023-01-01T14:14:24.301141 | 2020-10-20T06:02:38 | 2020-10-20T06:02:38 | 254,024,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
class Solution:
def findPeakElement(self, nums):
#nums: List[int]) -> int:
left,right=0,len(nums)-1
while left<right:
mid=(left+right)//2
if nums[mid]>nums[mid+1]:
right=mid
else:
left=mid+1
return left
| [
"noreply@github.com"
] | Wdiii.noreply@github.com |
2c59b22ae05193dc6806df4b6d50e18c1f426821 | 732c0303ecfe8e915548846144e2a257d0ba0bd0 | /prob433.py | 49c4b49b51cd3fa8e7b11d6dd5d2c27b3ff62be4 | [] | no_license | mercurium/proj_euler | e2c041d833b80369f0e7b7aa493a9ff5c1e22d91 | a8326af80cac040fa515350cf9972dca6f116f82 | refs/heads/master | 2020-04-06T05:24:31.185785 | 2017-05-06T23:50:56 | 2017-05-06T23:50:56 | 9,540,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | #NOTE TODO need to solve it
import time
start = time.time()
count = 0
size = 100
vals = dict()
def euclid(a, b): #returns c,d such that ac+bd =1
if b == 0:
return 0
count = 0
while b != 0:
count, a,b = count+1,b, a%b
return count
ext_gcd = euclid
for i in xrange(1,size+ 1):
for j in xrange(1,i):
count += 2*ext_gcd(i,j)+1
count += size # This is for i = j
print count
print "Time Taken", time.time() - start
"""
OH YEAHHHH.... I forgot. I wrote this one using C++ since it was taking so long with python.... :D;;;
"""
| [
"jerrychen434@gmail.com"
] | jerrychen434@gmail.com |
06b8cfbd8e4db0312b13cb701fb8e3d3e4d2f5fe | e3cb14336e3199a6313ca4dc146faa23e537c139 | /apps/ice2o/migrations/0006_auto_20170413_2208.py | 5de72b245e6ef6adf7b4f55dae7c6a53744b3566 | [] | no_license | Ice2Ocean/ice2oceansDjango | c9456b01db0defc710d4a45b401cb5e10b3de4cb | fe3b06ce76fb0a4a9432944b79e532ebcbebeb46 | refs/heads/master | 2021-01-20T03:09:24.468150 | 2017-05-05T20:56:48 | 2017-05-05T20:56:48 | 89,504,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-13 22:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ice2o', '0005_delete_arendtregions'),
]
operations = [
migrations.DeleteModel(
name='Burgessregions',
),
migrations.DeleteModel(
name='Ergi',
),
migrations.DeleteModel(
name='Ergibins',
),
migrations.DeleteModel(
name='Icesat',
),
migrations.DeleteModel(
name='Lamb',
),
migrations.DeleteModel(
name='MasconFit',
),
migrations.DeleteModel(
name='MasconSolution',
),
migrations.DeleteModel(
name='Prism',
),
migrations.DeleteModel(
name='SnowradarOld',
),
migrations.DeleteModel(
name='SweingestLines',
),
migrations.DeleteModel(
name='SweingestMetadata',
),
]
| [
"landungs@uw.edu"
] | landungs@uw.edu |
3cfefd58945efdda1147317172355d4153234dee | af4eb8204923b5848fce3158c6f8a89a480ea1d8 | /script_config/CEP/BF_FieldTest_Car_0001.py | 052278808a159c4d2ac7ac929a7cb525106f9668 | [] | no_license | wanghaoplus/gatog | 4ab0c77d4f9eb93da136ad3933a68cbf0b5c5bca | 8935e20a426638462cd1cc7bc048a16751287a2f | refs/heads/master | 2022-04-10T21:36:20.395304 | 2020-03-26T10:09:28 | 2020-03-26T10:09:28 | 248,264,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | # -*- coding: utf-8 -*-
class BF_FieldTest_Car_0001(object):
LoopTimes = 1
sceneId = 'Car_BeiJing_HaiDian_Park_20191219143930'
class BF_FieldTest_Car_0001():
LoopTimes = 1 | [
"418816179@qq.com"
] | 418816179@qq.com |
00266d059d05c03c752b995e61d513c6057b08fe | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02715/s393667996.py | 407a3c9c7183463400b25788523525c49a568b6d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | MOD = 10 ** 9 + 7
N, K = map(int, input().split())
# cnt(n): gcd = n となる数列の個数
cnt = [0] * (K + 1)
# A1,...,ANのすべてがxの倍数である数列の個数 = floor(k/x)^N個
for n in range(1, K + 1):
cnt[n] = pow(K // n, N, MOD)
res = 0
for k in range(K, 0, -1):
p = 2
while p * k <= K:
cnt[k] -= cnt[p * k]
cnt[k] %= MOD
p += 1
res += cnt[k] * k
res %= MOD
print(res)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
040384d45f45d4820d0204220d039ccc0248148e | 50106d3b4996c56cd625ae5cebbd9e8175d76e4f | /jobadvisor/urls.py | 0310a61b3fcabdceeee7897d809699968f1883d9 | [] | no_license | ewgen19892/jobadvisor | 07231a6245024468a33cc6ff734a6276f9b6ec95 | 713b9d84ac70d964d46f189ab1f9c7b944b9684b | refs/heads/master | 2023-05-27T00:15:35.170865 | 2021-06-20T16:31:32 | 2021-06-20T16:31:32 | 378,689,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | """JobAdvisor URL Configuration."""
from django.contrib import admin
from django.urls import include, path
from django_prometheus.exports import ExportToDjangoView
from .views import index, schema_view
urlpatterns: list = [
path("admin/", admin.site.urls),
path("metrics/", ExportToDjangoView, name="metrics"),
path("docs/", schema_view.with_ui("redoc", cache_timeout=0), name="documentation"),
path("", index, name="index"),
path("", include("jobadvisor.authentication.urls")),
path("", include("jobadvisor.users.urls")),
path("", include("jobadvisor.companies.urls")),
path("", include("jobadvisor.reviews.urls")),
path("", include("jobadvisor.polls.urls")),
path("", include("jobadvisor.landing.urls")),
path("", include("jobadvisor.notifications.urls")),
]
| [
"e.bohovchuk@admitad.com"
] | e.bohovchuk@admitad.com |
6d26362466675d24c200997d89a9da62ba5bcd1d | 0a11a15cf64e25585d28f484bb2118e8f858cfeb | /programmers/0803_다리를 지나는 트럭.py | 16b2b3cd64d94336381fd955952083b0da2e50d8 | [] | no_license | seoul-ssafy-class-2-studyclub/GaYoung_SSAFY | 7d9a44afd0dff13fe2ba21f76d0d99c082972116 | 23e0b491d95ffd9c7a74b7f3f74436fe71ed987d | refs/heads/master | 2021-06-30T09:09:00.646827 | 2020-11-30T14:09:03 | 2020-11-30T14:09:03 | 197,476,649 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | from collections import deque
def solution(bridge_length, weight, truck_weights):
answer = 0
check = deque()
for j in range(bridge_length):
check.append(0)
trucks = deque()
for i in range(len(truck_weights)):
trucks.append(truck_weights[i])
now_weight = 0
while trucks:
answer += 1
x = check.popleft() # 나갈것 내보내고
now_weight -= x
if trucks[0] + now_weight > weight: # 못 들어가면
check.append(0)
else: # 들어가면
y = trucks.popleft()
check.append(y)
now_weight += y
while len(check) != 0:
answer += 1
check.popleft()
return answer | [
"gyyoon4u@naver.com"
] | gyyoon4u@naver.com |
1dcd64782b91ebecc3d591d3acd36c37b77e79be | 1483486fae7cc9adf9493dadb1e60c087e2d6af9 | /env/bin/dynamodb_dump | 4a3e5b487927eb479a9001c976a1f299c80ec854 | [] | no_license | e-TPO/server | a8cf711ddef443ccb1008fe2fbcc6ac45369f189 | bf735321a4d3e52ea0b65cee8a9533a8250e5985 | refs/heads/master | 2022-12-14T22:49:30.776005 | 2018-01-27T21:33:53 | 2018-01-27T21:33:53 | 119,088,886 | 1 | 1 | null | 2022-12-08T00:36:21 | 2018-01-26T18:37:09 | CSS | UTF-8 | Python | false | false | 2,171 | #!/home/kuldeep/Projects/etpo/e-tpo-server/env/bin/python3
import argparse
import errno
import os
import boto
from boto.compat import json
from boto.compat import six
DESCRIPTION = """Dump the contents of one or more DynamoDB tables to the local filesystem.
Each table is dumped into two files:
- {table_name}.metadata stores the table's name, schema and provisioned
throughput.
- {table_name}.data stores the table's actual contents.
Both files are created in the current directory. To write them somewhere else,
use the --out-dir parameter (the target directory will be created if needed).
"""
def dump_table(table, out_dir):
metadata_file = os.path.join(out_dir, "%s.metadata" % table.name)
data_file = os.path.join(out_dir, "%s.data" % table.name)
with open(metadata_file, "w") as metadata_fd:
json.dump(
{
"name": table.name,
"schema": table.schema.dict,
"read_units": table.read_units,
"write_units": table.write_units,
},
metadata_fd
)
with open(data_file, "w") as data_fd:
for item in table.scan():
# JSON can't serialize sets -- convert those to lists.
data = {}
for k, v in six.iteritems(item):
if isinstance(v, (set, frozenset)):
data[k] = list(v)
else:
data[k] = v
data_fd.write(json.dumps(data))
data_fd.write("\n")
def dynamodb_dump(tables, out_dir):
try:
os.makedirs(out_dir)
except OSError as e:
# We don't care if the dir already exists.
if e.errno != errno.EEXIST:
raise
conn = boto.connect_dynamodb()
for t in tables:
dump_table(conn.get_table(t), out_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="dynamodb_dump",
description=DESCRIPTION
)
parser.add_argument("--out-dir", default=".")
parser.add_argument("tables", metavar="TABLES", nargs="+")
namespace = parser.parse_args()
dynamodb_dump(namespace.tables, namespace.out_dir)
| [
"pisdak79@gmail.com"
] | pisdak79@gmail.com | |
c7ea5cb5391ba09bec694be21d86fb43d7632139 | 403d6edb643431a4e2b1fc5d1d47f3ecc1cd4766 | /parse.py | f7178c17b3ea57784029df6b797bbff38c29c6ce | [] | no_license | elaineo/clscrapper | d49b965414322a3fa8e516eed37e5158ab7b7f77 | dc63a6cb5579830d41346b0e0b91882fd07def8e | refs/heads/master | 2020-12-24T20:43:14.449097 | 2015-03-09T05:14:38 | 2015-03-09T05:14:38 | 59,610,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | import csv
m=[]
with open("rideshare_new.tsv") as tsv:
for line in csv.reader(tsv, dialect="excel-tab", quoting=csv.QUOTE_NONE):
m.append(line)
f = open('./cldata.js', 'w+')
print >> f, 'cldata = '
print >> f, m
print >> f, ';'
m=[]
with open("zimride_new.tsv") as tsv:
last = ""
for line in csv.reader(tsv, dialect="excel-tab", quoting=csv.QUOTE_NONE):
if line[-2]+line[-1] == last:
last = ""
continue
else:
last = line[-2]+line[-1]
m.append(line)
f = open('./zdata.js', 'w+')
print >> f, 'zdata = '
print >> f, m
print >> f, ';' | [
"elaine.ou@gmail.com"
] | elaine.ou@gmail.com |
0857f9280e719896dcaa06a9cad718bb8e6b73b5 | d5fea299b1cc8b917a5da1f8bc6288c1ee4f87a5 | /django/practice/modelvideo/exemple/migrations/0001_initial.py | 24709f5cc9181929411b0d67f579bae3325e99e5 | [] | no_license | sashavinocurov/python-practice | 57bcadbaa24ceb5c41bf2fa9f6bc143e503a0cbd | f292cf39cc41fa40fbbc3b290b8d2974a179b9b4 | refs/heads/master | 2022-11-26T12:27:18.368684 | 2020-08-04T19:04:17 | 2020-08-04T19:04:17 | 268,667,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # Generated by Django 2.2.6 on 2019-10-25 00:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
]
| [
"vinocurovxandr@gmail.com"
] | vinocurovxandr@gmail.com |
89cc0a00d255af430b9ad537aeb098671caddcab | dfc86f29ca0f80b182acbd8e9cf5df57c695ebe2 | /app/tests/test_models.py | ef25ee59c8f2a7c2a589293f998e273708b4dcda | [
"MIT"
] | permissive | badmark/memegen | c17daa2ea4e115b3a482118f5b370d9488bb8ac2 | bef48c5d07a8cf1822fb3f5090e911f0af350ef5 | refs/heads/main | 2022-12-12T09:17:27.327806 | 2020-09-07T21:32:13 | 2020-09-07T21:32:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | from pathlib import Path
import log
import pytest
from .. import settings
from ..models import Template, Text
def describe_text():
def describe_stylize():
@pytest.mark.parametrize(
("style", "before", "after"),
[
("none", "Hello, world!", "Hello, world!"),
("upper", "Hello, world!", "HELLO, WORLD!"),
("lower", "Hello, world!", "hello, world!"),
("title", "these are words", "These Are Words"),
("capitalize", "these are words", "These are words"),
("mock", "these are words", "ThEsE aRe WorDs"),
("<unknown>", "Hello, world!", "Hello, world!"),
],
)
def it_applies_style(expect, style, before, after):
text = Text()
text.style = style
expect(text.stylize(before)) == after
def it_defaults_to_upper(expect):
text = Text()
text.style = ""
expect(text.stylize("Foobar")) == "FOOBAR"
def describe_template():
def describe_text():
def it_defaults_to_two_lines(expect):
template = Template.objects.get("_test")
expect(template.text) == [Text(), Text(anchor_x=0.0, anchor_y=0.8)]
def describe_image():
def it_has_generic_extension_when_absent(expect):
template = Template.objects.get("_test")
expect(template.image) == Path.cwd() / "templates" / "_test" / "default.img"
def it_creates_template_directory_automatically(expect):
template = Template.objects.get_or_create("_custom-empty")
template.datafile.path.unlink()
template.datafile.path.parent.rmdir()
log.info(template.image)
expect(template.datafile.path.parent.exists()) == True
def describe_create():
@pytest.mark.asyncio
async def it_downloads_the_image(expect, monkeypatch):
monkeypatch.setattr(settings, "DEBUG", True)
url = "https://www.gstatic.com/webp/gallery/1.jpg"
path = (
Path.cwd()
/ "templates"
/ "_custom-2d3c91e23b91d6387050e85efc1f3acb39b5a95d"
/ "default.img"
)
template = await Template.create(url)
expect(template.image) == path
expect(template.image.exists()) == True
@pytest.mark.asyncio
async def it_handles_invalid_urls(expect):
url = "http://example.com/does_not_exist.png"
template = await Template.create(url)
expect(template.image.exists()) == False
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com |
efb6911552a1abbddf0e7b85e16c5978556d0a55 | 7aecab27c231c5207f26a1682543b0d6c5093c06 | /server/dancedeets/nlp/styles/vogue.py | d295bd8e46c06f6f087f15a4c19e5c6e2d0da041 | [] | no_license | mikelambert/dancedeets-monorepo | 685ed9a0258ea2f9439ae4ed47ebf68bb5f89256 | 4eff1034b9afd3417d168750ea3acfaecd20adc6 | refs/heads/master | 2022-08-10T07:16:32.427913 | 2018-04-15T22:05:58 | 2018-04-15T22:05:58 | 75,126,334 | 24 | 2 | null | 2022-07-29T22:28:45 | 2016-11-29T22:04:44 | Python | UTF-8 | Python | false | false | 1,925 | py | # -*-*- encoding: utf-8 -*-*-
from dancedeets.nlp import base_auto_classifier
from dancedeets.nlp import event_types
from dancedeets.nlp import grammar
from dancedeets.nlp import style_base
from dancedeets.nlp.street import keywords
Any = grammar.Any
Name = grammar.Name
connected = grammar.connected
commutative_connected = grammar.commutative_connected
class Classifier(base_auto_classifier.DanceStyleEventClassifier):
COMBINED_KEYWORDS = Any(
keywords.VOGUE,
keywords.VOGUE_EVENT,
keywords.EASY_VOGUE,
keywords.TOO_EASY_VOGUE,
)
@base_auto_classifier.log_to_bucket('has_any_relevant_keywords')
def _has_any_relevant_keywords(self):
# Override this here.
# Don't use the other_bad_regex and GOOD/AMBIGUOUS keywords
return self._has(self.COMBINED_KEYWORDS)
@base_auto_classifier.log_to_bucket('is_dance_event')
def is_dance_event(self):
self._log('Starting %s classifier', self.vertical)
if not self._has_any_relevant_keywords():
self._log('does not have any relevant keywords for this style')
return False
from dancedeets.nlp.street import classifier
is_vogue = classifier.is_vogue_event(self._classified_event)
self._log(is_vogue[1])
if is_vogue[0]:
return 'has vogue keywords'
return False
class Style(style_base.Style):
@classmethod
def get_name(cls):
return 'VOGUE'
@classmethod
def get_rare_search_keywords(cls):
return [
'mini ball',
'vogue ball',
]
@classmethod
def get_popular_search_keywords(cls):
return [
'vogue',
'vogue dance',
]
@classmethod
def get_search_keyword_event_types(cls):
return event_types.STREET_EVENT_TYPES
@classmethod
def _get_classifier(cls):
return Classifier
| [
"mlambert@gmail.com"
] | mlambert@gmail.com |
d02db149919936f9a7568a6abefb7a3cbf9c93d5 | d2845579ea6aa51a2e150f0ffe6ccfda85d035ce | /common/python/utils/store_type.py | cc86ffe8b266b5710d17012d06aaf53f0a9326fd | [
"Apache-2.0"
] | permissive | as23187/WeFe | d8de9ff626f9f3e5d98e0850b0b717a80fd73e72 | ba92871d4b1d2eef6c606c34795f4575e84703bd | refs/heads/main | 2023-08-22T12:01:06.718246 | 2021-10-28T01:54:05 | 2021-10-28T01:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | # Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class StoreTypes(object):
STORE_TYPE_PROCESS = 'PROCESS'
STORE_TYPE_PERSISTENCE = 'PERSISTENCE'
class DBTypes(object):
LMDB = "LMDB"
CLICKHOUSE = "CLICKHOUSE"
SQLITE = "SQLITE"
| [
"winter.zou@welab-inc.com"
] | winter.zou@welab-inc.com |
1761ffff31d2444dc73980c649d50c88e4d096aa | e3adbec6cd8d0b50880b3b606352a1c751d4ac79 | /Code/105.py | 2896a9fcb7a11b6d6c6309904569945a030cc858 | [] | no_license | ZiyaoGeng/LeetCode | 3cc5b553df5eac2e5bbb3ccd0f0ed4229574fa2f | c4c60b289c0bd9d9f228d04abe948d6287e70ea8 | refs/heads/master | 2022-04-07T08:19:58.647408 | 2020-03-12T08:56:13 | 2020-03-12T08:56:13 | 218,981,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
sys.path.append('../functions/')
from tree import TreeNode
from typing import List
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
i = 0
while i < len(inorder) and preorder[0] != inorder[i]:
i += 1
if i < len(inorder):
p = TreeNode(inorder[i])
p.left = self.buildTree(preorder[1:i+1], inorder[:i-1])
p.right = self.buildTree(preorder[i+1:], inorder[i+1:])
return p
return None
| [
"593947521@qq.com"
] | 593947521@qq.com |
e0f54357b44e0c7f73bce3d2ac8d0d07b031ff62 | 6550cc368f029b3955261085eebbddcfee0547e1 | /第9部分-flask+智能玩具(火龙果)/day129/今日代码/TuXingSun/ws_serv.py | 6cd381d5dc52943efb15f990584883c4baacfd99 | [] | no_license | vividyellow/oldboyeduPython14qi | d00c8f45326e16464c3d4e8df200d93779f68bd3 | de1e9f6efafa2846c068b3fe5ad6e1ca19f74a11 | refs/heads/master | 2022-09-17T21:03:17.898472 | 2020-01-31T10:55:01 | 2020-01-31T10:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | from flask import Flask,request
from geventwebsocket.handler import WebSocketHandler
from gevent.pywsgi import WSGIServer
from geventwebsocket.websocket import WebSocket
from baidu_aip import speech
import json
ws = Flask(__name__)
user_socket_dict = {}
@ws.route("/toy/<toy_id>")
def toy(toy_id):
user_socket = request.environ.get("wsgi.websocket") # type:WebSocket
if user_socket:
user_socket_dict[toy_id] = user_socket
print(user_socket_dict)
while 1:
msg = user_socket.receive()
# b'{to_user:"toy123",from_user:"123123123",chat:"11111.mp3","user_type":toy}'
msg_dict = json.loads(msg)
if msg_dict.get("user_type") == "toy":
xxtx = speech.get_remark(msg_dict.get("to_user"),msg_dict.get("from_user"))
msg_dict["chat"] = xxtx
usocket = user_socket_dict.get(msg_dict.get("to_user"))
usocket.send(json.dumps(msg_dict))
@ws.route("/app/<app_id>")
def app(app_id):
user_socket = request.environ.get("wsgi.websocket") # type:WebSocket
if user_socket:
user_socket_dict[app_id] = user_socket
print(user_socket_dict)
while 1:
msg = user_socket.receive()
msg_dict = json.loads(msg)
print(msg_dict)
xxtx = speech.get_remark(msg_dict.get("to_user"),msg_dict.get("from_user"))
usocket = user_socket_dict.get(msg_dict.get("to_user"))
msg_dict["chat"] = xxtx
usocket.send(json.dumps(msg_dict))
if __name__ == '__main__':
http_serv = WSGIServer(("0.0.0.0",9528),ws,handler_class=WebSocketHandler)
http_serv.serve_forever() | [
"524991368@qq.com"
] | 524991368@qq.com |
c81627bb65c4bed639b9cef6d34f184227b3b1e2 | af3ec207381de315f4cb6dddba727d16d42d6c57 | /dialogue-engine/test/programytest/parser/template/node_tests/test_select.py | 52aef102d9fcfb14ee0fe79167d86ff8f461482a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcf-yuichi/cotoba-agent-oss | 02a5554fe81ce21517f33229101013b6487f5404 | ce60833915f484c4cbdc54b4b8222d64be4b6c0d | refs/heads/master | 2023-01-12T20:07:34.364188 | 2020-11-11T00:55:16 | 2020-11-11T00:55:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,921 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.select import TemplateSelectNode, Query, NotQuery
from programytest.parser.base import ParserTestsBaseClass
class MockTemplateSelectNode(TemplateSelectNode):
def __init__(self):
TemplateSelectNode.__init__(self)
def resolve_to_string(self, context):
raise Exception("This is an error")
class TemplateSelectNodeTests(ParserTestsBaseClass):
def test_to_string(self):
root = TemplateSelectNode()
self.assertIsNotNone(root)
self.assertEqual("[SELECT]", root.to_string())
def test_to_xml(self):
root = TemplateNode()
node = TemplateSelectNode()
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><select /></template>", xml_str)
def test_to_xml_vars(self):
root = TemplateNode()
node = TemplateSelectNode(vars=["?x", "?y"])
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><select><vars>?x ?y</vars></select></template>", xml_str)
def test_to_xml_query(self):
root = TemplateNode()
query = Query("subject", "predicate", "object")
node = TemplateSelectNode(queries=[query])
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><select><q><subj>subject</subj><pred>predicate</pred><obj>object</obj></q></select></template>", xml_str)
def test_to_xml_not_query(self):
root = TemplateNode()
not_query = NotQuery("subject", "predicate", "object")
node = TemplateSelectNode(queries=[not_query])
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual("<template><select><notq><subj>subject</subj><pred>predicate</pred><obj>object</obj></notq></select></template>", xml_str)
def test_node_default(self):
root = TemplateNode()
node = TemplateSelectNode()
root.append(node)
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("", result)
def test_node_exception_handling(self):
root = TemplateNode()
node = MockTemplateSelectNode()
root.append(node)
with self.assertRaises(Exception):
root.resolve(self._client_context)
| [
"cliff@cotobadesign.com"
] | cliff@cotobadesign.com |
279495290df39e9f9c8b707c65247810ddb1af5b | f791bd4e916d7531bf26d2064e9699118ba788a8 | /src/cldfbench/commands/readme.py | 089ce4504f52604f28b200108215c16d82dd5635 | [
"Apache-2.0"
] | permissive | cldf/cldfbench | 7732b0c59ad417fd195a0c460bd4bc55d047f2fa | f78799decd24b27704dca69e88ba84821191599d | refs/heads/master | 2023-08-17T21:20:17.893901 | 2023-08-02T10:32:48 | 2023-08-02T10:32:48 | 181,492,879 | 8 | 5 | Apache-2.0 | 2023-01-21T15:42:44 | 2019-04-15T13:30:04 | Python | UTF-8 | Python | false | false | 256 | py | """
Write dataset metadata to a README.md in the dataset's directory.
"""
from cldfbench.cli_util import add_dataset_spec, with_datasets
def register(parser):
add_dataset_spec(parser, multiple=True)
def run(args):
with_datasets(args, 'readme')
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
da3f296ef2402fb29412fb1923133007eb85463c | c7d7dfa5ac23b940e852a67155364439d9069486 | /pos_create_purchase_order/__manifest__.py | 3e94e76ff3d8fbabd844ed0253cb05b1fd2b9fac | [] | no_license | shurshilov/odoo | d163f6c939bcbfb36bdf83eeeeffca368f0a4722 | 8099e62254b7f1e113be7b522585dbc352aea5a8 | refs/heads/16.0 | 2023-09-04T03:02:31.427240 | 2023-09-03T16:25:28 | 2023-09-03T16:25:28 | 89,852,559 | 20 | 43 | null | 2023-09-03T06:30:22 | 2017-04-30T13:32:08 | JavaScript | UTF-8 | Python | false | false | 1,032 | py | # Copyright (C) 2021 Artem Shurshilov <shurshilov.a@yandex.ru>
# License OPL-1.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "POS create purchase order",
"summary": """Pos create purchase order quick create sales order in POS point of sale""",
"author": "Shurshilov Artem",
"website": "https://eurodoo.com",
"live_test_url": "https://youtu.be/h1fH0Bh_J0c",
# Categories can be used to filter modules in modules listing
"category": "Point of sale",
"version": "14.0.0.0",
# any module necessary for this one to work correctly
"depends": ["web", "point_of_sale", "purchase"],
"license": "OPL-1",
"price": 20,
"currency": "EUR",
"images": [
"static/description/preview.gif",
],
"data": [
"views/pos.xml",
],
"qweb": [
"static/src/xml/pos.xml",
],
"installable": True,
"application": False,
# If it's True, the modules will be auto-installed when all dependencies
# are installed
"auto_install": False,
}
| [
"shurshilov.a@yandex.ru"
] | shurshilov.a@yandex.ru |
6fe8a7cf0ab384b9423e44b5ea41632843e24eda | e8df354a756710f6af4f7e747b762136c016bb56 | /src/sentry/relay/query.py | 329f1c3e67c81dc3616715ded5f7b47f69858461 | [
"BSD-2-Clause"
] | permissive | thomas-mcdonald/sentry | 65ca34b87616b293d9055a7090b1ca2a41a8e971 | 81df3fd1ce2326314eb19abb8560bf1f7a48f045 | refs/heads/master | 2020-05-29T21:56:03.052651 | 2019-05-30T01:07:58 | 2019-05-30T01:07:58 | 189,395,177 | 1 | 0 | BSD-3-Clause | 2019-05-30T10:37:25 | 2019-05-30T10:37:25 | null | UTF-8 | Python | false | false | 1,247 | py | from __future__ import absolute_import
import six
from sentry.relay.queries.base import InvalidQuery
from sentry.relay.utils import type_to_class_name
def execute_queries(relay, queries):
from django.utils.importlib import import_module
query_results = {}
for query_id, query in six.iteritems(queries):
try:
relay_query = import_module('sentry.relay.queries.%s' % query.get('type', None))
except ImportError:
result = {
'status': 'error',
'error': 'unknown query'
}
else:
query_class = getattr(relay_query, type_to_class_name(query.get('type', None)))
query_inst = query_class(relay)
try:
query_inst.preprocess(query)
except InvalidQuery as exc:
result = {
'status': 'error',
'error': six.binary_type(exc),
}
else:
# TODO(mitsuhiko): support for pending or failing queries
result = {
'status': 'ok',
'result': query_inst.execute(),
}
query_results[query_id] = result
return query_results
| [
"armin.ronacher@active-4.com"
] | armin.ronacher@active-4.com |
8f8fb39d461e48b2b511b608e9ea13755c0c1b9a | 16003e4ba3b5ce007c6582887030758acc102ba9 | /MoneyTracker/userpreferences/migrations/0003_auto_20210128_1647.py | e35a9fb632e5bca0baf95f09dc2a3fcb85fc05ff | [] | no_license | coderj001/MoneyTracker | ea9d66673b778aa45d94b7fb04b5b5517b70240e | ea89068bf79d4459b26716204e54187ffd31a8f0 | refs/heads/master | 2023-04-16T17:34:54.960018 | 2021-04-14T04:17:33 | 2021-04-14T04:17:33 | 332,488,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # Generated by Django 3.1.5 on 2021-01-28 16:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('userpreferences', '0002_auto_20210128_1338'),
]
operations = [
migrations.AlterField(
model_name='userpreference',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='preferences', to=settings.AUTH_USER_MODEL),
),
]
| [
"amirajubolchi001@gmail.com"
] | amirajubolchi001@gmail.com |
c7aa8be640357b2f744e51fa3a6aadc22e03efc5 | 698e8f5b85e82392c68472291aa774ab65e86ccd | /tests/mysite/myapp/migrations/0001_initial.py | 376c9b5401636a98afadbeb019a5a2df59a8257d | [
"MIT"
] | permissive | alexmojaki/friendly_states | 6b7880ed0e3c730b6714c836fc592c7ae8abf003 | 22fc338e8b56e25635e456f0370cd216022db304 | refs/heads/master | 2020-07-27T22:05:26.490777 | 2019-10-05T12:59:34 | 2019-10-05T12:59:34 | 209,228,885 | 20 | 3 | MIT | 2019-10-02T05:40:49 | 2019-09-18T05:55:16 | Python | UTF-8 | Python | false | false | 831 | py | # Generated by Django 2.2.5 on 2019-10-01 17:52
from django.db import migrations, models
import friendly_states.django
import myapp.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', friendly_states.django.StateField(myapp.models.TrafficLightMachine)),
('nullable_state', friendly_states.django.StateField(myapp.models.NullableMachine, null=True)),
('defaultable_state', friendly_states.django.StateField(myapp.models.DefaultableMachine, default=myapp.models.DefaultableState)),
],
),
]
| [
"alex.mojaki@gmail.com"
] | alex.mojaki@gmail.com |
a452b4a13e1ad462a06f93f0ee0803bae74faef0 | 67d70b30abf1a64c53997dc4e43d5593d411bfc2 | /feather/rad_onewire.py | c748c8be88ad2f31807f9974e403889e5718a2ee | [] | no_license | edgecollective/knuth-soil-remote | 4d095ba5e3f8381c06ad25b77e0c42e076f25649 | 4b8df9136d63e95794c98c54a8f4fb090eb29afd | refs/heads/master | 2020-06-11T13:44:22.572872 | 2019-08-11T22:45:24 | 2019-08-11T22:45:24 | 193,985,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | import board
import busio
import digitalio
import time
import adafruit_rfm9x
import gc
from adafruit_onewire.bus import OneWireBus
from adafruit_ds18x20 import DS18X20
SLEEP_TIME=120
# Initialize one-wire bus on board pin D5.
ow_bus = OneWireBus(board.D5)
# Scan for sensors and grab the first one found.
ds18_bus=ow_bus.scan()
print(ds18_bus)
ds18=[]
for probe in ds18_bus:
print(probe)
ds18.append(DS18X20(ow_bus, probe))
# lora radio
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
cs = digitalio.DigitalInOut(board.RFM9X_CS)
reset = digitalio.DigitalInOut(board.RFM9X_RST)
rfm9x = adafruit_rfm9x.RFM9x(spi, cs, reset, 915.0)
# led
led = digitalio.DigitalInOut(board.D13)
led.direction = digitalio.Direction.OUTPUT
# default values if probe doesn't work
temp=30
moisture=20
while True:
if (len(ds18)==1):
sensor=ds18[0]
temp=float(sensor.temperature)
#print("read temp:"+temp)
#print('{0:0.3f}C'.format(sensor.temperature))
sendstr=str(temp)+","+str(moisture)
rfm9x.send(sendstr)
led.value=True
time.sleep(.1)
led.value = False
time.sleep(.1)
print(sendstr)
time.sleep(1)
gc.collect()
time.sleep(SLEEP_TIME)
| [
"donblair@gmail.com"
] | donblair@gmail.com |
6a9eb927017cca25592e6d0b797e82897ef6449f | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/aristotle-mdr/aristotle-metadata-registry/aristotle_mdr/contrib/help/templatetags/aristotle_help.py | 405a0ae05dd3862b40bce20141b41f78398956e7 | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 2,077 | py | from django import template
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse, resolve
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from aristotle_mdr import perms
import aristotle_mdr.models as MDR
from aristotle_mdr.contrib.help.models import ConceptHelp
from aristotle_mdr.templatetags.aristotle_tags import doc
register = template.Library()
@register.simple_tag
def help_doc(item, field='brief', request=None):
"""Gets the appropriate help text for a model.
"""
from aristotle_mdr.utils.doc_parse import parse_rst, parse_docstring
app_label = item._meta.app_label
model_name = item._meta.model_name
_help = ConceptHelp.objects.filter(app_label=app_label, concept_type=model_name).first()
if _help:
help_text = getattr(_help, field)
if help_text:
return relink(_help, field)
return doc(item)
@register.simple_tag
def relink(help_item, field):
text = getattr(help_item, field)
if not text:
return ""
import re
def make_link(match):
from django.core.urlresolvers import reverse_lazy
try:
m = match.group(1).lower().replace(' ', '').split('.', 1)
flags = match.group(2) or ""
if len(m) == 1:
app = help_item.app_label
model = m[0]
else:
app, model = m
ct = ContentType.objects.get(app_label=app, model=model)
if 's' not in flags:
name = ct.model_class().get_verbose_name()
else:
name=ct.model_class().get_verbose_name_plural()
return "<a href='{url}'>{name}</a>".format(
name=name,
url=reverse_lazy("concept_help", args=[app, model])
)
except:
return "unknown model - %s" % match.group(0)
text = re.sub(
r"\[\[([[a-zA-Z _.]+)(\|[a-z]+)?\]\]",
make_link, text
)
return text
| [
"rares.begu@gmail.com"
] | rares.begu@gmail.com |
4fdb2fc60ffe8db86858e3930323151bb0b1032b | 61ef3a8928caa88e874aa79d46203871d0e5bbfb | /p/dist-packages/kitchen/versioning/__init__.py | 69bc21ca04bb80149763f9e6a7da628fcf4f5d79 | [] | no_license | joshg111/craigslist_kbb | cd941f237821fa7d2a9a8140b2744294948ba56c | a473d0d389612e53a2ad6fe8a18d984474c44623 | refs/heads/master | 2021-01-10T09:35:04.334601 | 2018-05-26T16:05:20 | 2018-05-26T16:05:20 | 44,120,875 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,447 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Red Hat, Inc
#
# kitchen is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# kitchen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with kitchen; if not, see <http://www.gnu.org/licenses/>
#
# Authors:
# Toshio Kuratomi <toshio@fedoraproject.org>
'''
----------------------------
PEP-386 compliant versioning
----------------------------
:pep:`386` defines a standard format for version strings. This module
contains a function for creating strings in that format.
'''
__version_info__ = ((1, 0, 0),)
import itertools
def version_tuple_to_string(version_info):
'''Return a :pep:`386` version string from a :pep:`386` style version tuple
:arg version_info: Nested set of tuples that describes the version. See
below for an example.
:returns: a version string
This function implements just enough of :pep:`386` to satisfy our needs.
:pep:`386` defines a standard format for version strings and refers to
a function that will be merged into the |stdlib|_ that transforms a tuple
of version information into a standard version string. This function is
an implementation of that function. Once that function becomes available
in the |stdlib|_ we will start using it and deprecate this function.
:attr:`version_info` takes the form that :pep:`386`'s
:func:`NormalizedVersion.from_parts` uses::
((Major, Minor, [Micros]), [(Alpha/Beta/rc marker, version)],
[(post/dev marker, version)])
Ex: ((1, 0, 0), ('a', 2), ('dev', 3456))
It generates a :pep:`386` compliant version string::
N.N[.N]+[{a|b|c|rc}N[.N]+][.postN][.devN]
Ex: 1.0.0a2.dev3456
.. warning:: This function does next to no error checking. It's up to the
person defining the version tuple to make sure that the values make
sense. If the :pep:`386` compliant version parser doesn't get
released soon we'll look at making this function check that the
version tuple makes sense before transforming it into a string.
It's recommended that you use this function to keep
a :data:`__version_info__` tuple and :data:`__version__` string in your
modules. Why do we need both a tuple and a string? The string is often
useful for putting into human readable locations like release
announcements, version strings in tarballs, etc. Meanwhile the tuple is
very easy for a computer to compare. For example, kitchen sets up its
version information like this::
from kitchen.versioning import version_tuple_to_string
__version_info__ = ((0, 2, 1),)
__version__ = version_tuple_to_string(__version_info__)
Other programs that depend on a kitchen version between 0.2.1 and 0.3.0
can find whether the present version is okay with code like this::
from kitchen import __version_info__, __version__
if __version_info__ < ((0, 2, 1),) or __version_info__ >= ((0, 3, 0),):
print 'kitchen is present but not at the right version.'
print 'We need at least version 0.2.1 and less than 0.3.0'
print 'Currently found: kitchen-%s' % __version__
'''
ver_components = []
for values in version_info:
if isinstance(values[0], int):
ver_components.append('.'.join(itertools.imap(str, values)))
else:
if isinstance(values[0], unicode):
modifier = values[0].encode('ascii')
else:
modifier = values[0]
if modifier in ('a', 'b', 'c', 'rc'):
ver_components.append('%s%s' % (modifier,
'.'.join(itertools.imap(str, values[1:])) or '0'))
else:
ver_components.append('.%s%s' % (modifier,
str(values[1])))
return unicode(''.join(ver_components), 'ascii')
__version__ = version_tuple_to_string(__version_info__)
__all__ = ('version_tuple_to_string',)
| [
"jagreenf111@gmail.com"
] | jagreenf111@gmail.com |
6976c67ae9bc1f5fff7808159fd2f0e8512ea6db | e00186e71a1f52b394315a0cbc27162254cfffb9 | /machine_learning_practice/test.py | 3511448c275c80959b0b5d9798e240cf52d11b51 | [] | no_license | anilkumar0470/git_practice | cf132eb7970c40d0d032520d43e6d4a1aca90742 | 588e7f654f158e974f9893e5018d3367a0d88eeb | refs/heads/master | 2023-04-27T04:50:14.688534 | 2023-04-22T05:54:21 | 2023-04-22T05:54:21 | 100,364,712 | 0 | 1 | null | 2021-12-08T19:44:58 | 2017-08-15T10:02:33 | Python | UTF-8 | Python | false | false | 250 | py | # import settings
# settings.l.append("junk")
# print(settings.l)
import logging
# def sample():
# return True, "sdfdsd"
#
# a,b = sample()
# print(a,b)
import logging
# logger = logging.getLogger("test")
logging.debug("sddrtrfsdf", "gfh") | [
"anilkumar.0466@gmail.com"
] | anilkumar.0466@gmail.com |
a47d6a561e569660c9bd139b61ac64236dbca507 | d9fb6c246965cbf290186268298859ddb913ee6e | /190820/반복문자 지우기.py | af4ab695f6bdb1d1e5f1b506c15e12da2022ddf3 | [] | no_license | 91hongppie/algorithm | 1ca6d54de6eab252c708bf83835ace8a109d73fc | 4c2fa8178e0ef7afbf0b736387f05cbada72f95d | refs/heads/master | 2020-07-20T22:17:40.700366 | 2020-06-29T00:06:11 | 2020-06-29T00:06:11 | 206,717,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | import sys
sys.stdin = open('sample_input_04.txt', 'r')
N = int(input())
for i in range(1, N+1):
words = input()
word_list = []
for idx, word in enumerate(words):
if idx == 0 or len(word_list) == 0:
word_list.append(word)
elif word_list[-1] == word:
word_list.pop()
else:
word_list.append(word)
print('#{} {}'.format(i, len(word_list))) | [
"91hongppie@gmail.com"
] | 91hongppie@gmail.com |
156a3482b2252e4ea5d3065636b5d7e8c3f1a5a3 | b4c740714b29ea325562377e117c9e0a181c6d5e | /env/bin/pyrsa-keygen | 253e283e8d9846e1d625b8385ff27067321d1a14 | [] | no_license | mazharoddin/keyword-pos-SEO-updated | b232d304e22dfa0a9076d7892a035d0407bba4b3 | e322e31b0db0d9bd9d79574fcfdaf4946b203ec4 | refs/heads/master | 2023-03-02T10:26:27.299213 | 2021-02-11T10:55:55 | 2021-02-11T10:55:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | #!/home/anand/Desktop/seotool/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import keygen
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(keygen())
| [
"anand98.ar@gmail.com"
] | anand98.ar@gmail.com | |
65ba85aa2a39a33e07ec80b6f5b898df37322d3a | f4882a337b5513555719755a008ced6efc7d8605 | /api/serializers.py | 4eafd9eddf4be886e258b4b1b9d534ef4fd07d73 | [] | no_license | SamirIngley/shopping-website | b7577970c995fbb91023828f1b327005692a8dd1 | 1aa9930efd575288195e75857750779a1f8335f3 | refs/heads/master | 2021-02-06T11:43:12.722733 | 2020-03-08T06:54:15 | 2020-03-08T06:54:15 | 243,911,445 | 1 | 0 | null | 2020-06-05T21:24:16 | 2020-02-29T05:37:28 | Python | UTF-8 | Python | false | false | 189 | py | from rest_framework.serializers import ModelSerializer
from post.models import Post
class PostSerializer(ModelSerializer):
class Meta:
model = Post
fields = '__all__'
| [
"samir.ingle7@gmail.com"
] | samir.ingle7@gmail.com |
9f9077620a8201390d7229d56699c1dd504e5426 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5658282861527040_0/Python/caethan/new_lottery_game_small.py | 98a932ba11cf3a3fb10667447b87778ca9bcf0f2 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,487 | py | #Template code developed by Brett Olsen (brett.olsen@gmail.com), 2013
#for the Google Code Jam programming contest
###############################################################################
# Imports go here
###############################################################################
#Do proper division
from __future__ import division
#For faster numerical analysis
import numpy as np
import sys
#Needed for the memoization decorator
import collections
import functools
###############################################################################
# Global variables (for caching, etc.) go here
###############################################################################
###############################################################################
# Decorators (taken from http://wiki.python.org/moin/PythonDecoratorLibrary)
###############################################################################
class memoize(object):
"""Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
###############################################################################
# Functions
###############################################################################
def precalculate():
"""Perform any calculations that need to be performed before the main path
(e.g., preparing lookup tables, etc.)
N.B. Make sure you make any important variables global so that other
functions can access them.
"""
pass
def read_input(infile):
"""This function should take an open input file, load in all of the
relevant information for a single case of the problem, and output it
as a single object.
"""
#Some utility functions to read in particular types of input
def read_int():
return int(infile.readline().strip())
def read_ints():
return np.array(infile.readline().split(), dtype=int)
def read_bigints(): #For ints that won't fit directly in an int32 array
line = infile.readline().split()
return np.array(map(lambda x: int(x), line))
def read_float():
return float(infile.readline().strip())
def read_floats():
return np.array(infile.readline().split(), dtype=float)
def read_string():
return infile.readline().strip()
def read_strings():
return np.array(infile.readline().split(), dtype=object) #N.B. general dtype
A, B, K = read_ints()
return A, B, K
def solve_case(case):
A, B, K = case
count = 0
for i in range(A):
for j in range(B):
if i & j < K:
count += 1
return count
###############################################################################
# Main execution path
###############################################################################
if __name__ == "__main__":
#Do any pre-calculations required
precalculate()
#Open up the input & output files based on the provided input file
assert len(sys.argv) == 2 #only one argument
assert sys.argv[1][-3:] == ".in" #input must end with .in
infile = open("%s" % sys.argv[1], 'r')
outfile = open("%s.out" % sys.argv[1][:-3], 'w')
#Read in the number of cases (the first input line) to iterate through
cases = int(infile.readline().strip('\n'))
for i in range(cases):
#Read in the input data for this case
case = read_input(infile)
#Solve the problem for this case
output = solve_case(case)
#Write out the output of this case
outfile.write('Case #%i: %s\n' % (i+1, output))
print 'Case #%i: %s\n' % (i+1, output)
#Close files
infile.close()
outfile.close()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
fa9190bdb85cff19f7737a8efaaf6c2f20035450 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /chrome/browser/resources/unpack_pak_test.py | ca5b87a33cf7c88c770696f293dbfbe8894813ee | [
"BSD-3-Clause"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 594 | py | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unpack_pak
import unittest
class UnpackPakTest(unittest.TestCase):
def testMapFileLine(self):
self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH}'))
def testGzippedMapFileLine(self):
self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH, false}'))
self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH, true}'))
if __name__ == '__main__':
unittest.main()
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
f0392c185c2f8547adf9ac1709b72fc4515a181b | e8b5fde61cda3752507c3d8309c9d7075047a8d0 | /desihub/desisim/py/desisim/scripts/py/qa_zfind.py | 3985b5edc84006bf58d7302b934b7dd6294818a0 | [
"BSD-3-Clause"
] | permissive | michaelJwilson/LBGCMB | f83af41d80d1991644607d473cd00246432fe601 | fca7d0cd515b756233dfd530e9f779c637730bc4 | refs/heads/master | 2021-08-15T19:40:43.151033 | 2020-04-22T17:42:11 | 2020-04-22T17:42:11 | 163,872,551 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,662 | py | #!/usr/bin/env python
#
# See top-level LICENSE.rst file for Copyright information
#
# -*- coding: utf-8 -*-
"""
Read fibermaps and zbest files to generate QA related to redshifts
and compare against the 'true' values
"""
import argparse
def parse(options=None):
parser = argparse.ArgumentParser(description="Generate QA on redshift for a production [v1.1]", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--verbose', action = 'store_true',
help = 'Provide verbose reporting of progress.')
parser.add_argument('--load_simz_table', type = str, default = None, required=False,
help = 'Load an existing simz Table to remake figures')
parser.add_argument('--reduxdir', type = str, default = None, metavar = 'PATH',
help = 'Override default path ($DESI_SPECTRO_REDUX/$SPECPROD) to processed data.')
parser.add_argument('--rawdir', type = str, default = None, metavar = 'PATH',
help = 'Override default path ($DESI_SPECTRO_REDUX/$SPECPROD) to processed data.')
parser.add_argument('--qafile', type = str, default = None, required=False,
help = 'path of QA file.')
parser.add_argument('--qafig_root', type=str, default=None, help = 'Root name (and path) of QA figure files')
parser.add_argument('--write_simz_table', type=str, default=None, help = 'Write simz to this filename')
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args):
import os.path
import sys
import yaml
import numpy as np
import pdb
import matplotlib
matplotlib.use('agg')
import desispec.io
from desiutil.log import get_logger
from desisim.spec_qa import redshifts as dsqa_z
from desiutil.io import yamlify
import desiutil.depend
from desimodel.footprint import radec2pix
log = get_logger()
if args.load_simz_table is not None:
from astropy.table import Table
log.info("Loading simz info from {:s}".format(args.load_simz_table))
simz_tab = Table.read(args.load_simz_table)
else:
# Grab list of fibermap files
fibermap_files = []
zbest_files = []
nights = desispec.io.get_nights()
for night in nights:
for exposure in desispec.io.get_exposures(night, raw=True, rawdata_dir=args.rawdir):
# Ignore exposures with no fibermap, assuming they are calibration data.
fibermap_path = desispec.io.findfile(filetype='fibermap', night=night,
expid=exposure, rawdata_dir=args.rawdir)
if not os.path.exists(fibermap_path):
log.debug('Skipping exposure %08d with no fibermap.' % exposure)
continue
'''
# Search for zbest files
fibermap_data = desispec.io.read_fibermap(fibermap_path)
flavor = fibermap_data.meta['FLAVOR']
if flavor.lower() in ('arc', 'flat', 'bias'):
log.debug('Skipping calibration {} exposure {:08d}'.format(flavor, exposure))
continue
brick_names = set(fibermap_data['BRICKNAME'])
import pdb; pdb.set_trace()
for brick in brick_names:
zbest_path=desispec.io.findfile('zbest', groupname=brick, specprod_dir=args.reduxdir)
if os.path.exists(zbest_path):
log.debug('Found {}'.format(os.path.basename(zbest_path)))
zbest_files.append(zbest_path)
else:
log.warn('Missing {}'.format(os.path.basename(zbest_path)))
#pdb.set_trace()
'''
# Load data
fibermap_data = desispec.io.read_fibermap(fibermap_path)
# Skip calib
if fibermap_data['OBJTYPE'][0] in ['FLAT','ARC','BIAS']:
continue
elif fibermap_data['OBJTYPE'][0] in ['SKY','STD','SCIENCE']:
pass
else:
pdb.set_trace()
# Append fibermap file
fibermap_files.append(fibermap_path)
# Search for zbest files with healpy
ra_targ = fibermap_data['RA_TARGET'].data
dec_targ = fibermap_data['DEC_TARGET'].data
# Getting some NAN in RA/DEC
good = np.isfinite(ra_targ) & np.isfinite(dec_targ)
pixels = radec2pix(64, ra_targ[good], dec_targ[good])
uni_pixels = np.unique(pixels)
for uni_pix in uni_pixels:
zbest_files.append(desispec.io.findfile('zbest', groupname=uni_pix, nside=64))
# Cut down zbest_files to unique ones
zbest_files = list(set(zbest_files))
if len(zbest_files) == 0:
log.fatal('No zbest files found')
sys.exit(1)
# Write? Table
simz_tab = dsqa_z.load_z(fibermap_files, zbest_files)
if args.write_simz_table is not None:
simz_tab.write(args.write_simz_table, overwrite=True)
# Meta data
meta = dict(
DESISIM = desiutil.depend.getdep(simz_tab.meta, 'desisim'),
SPECTER = desiutil.depend.getdep(simz_tab.meta, 'specter'),
SPECPROD = os.getenv('SPECPROD', 'unknown'),
PIXPROD = os.getenv('PIXPROD', 'unknown'),
)
# Run stats
log.info("Running stats..")
summ_dict = dsqa_z.summ_stats(simz_tab)
if args.qafile is not None:
log.info("Generating yaml file: {:s}".format(args.qafile))
# yamlify
# Write yaml
with open(args.qafile, 'w') as outfile:
outfile.write(yaml.dump(yamlify(meta), default_flow_style=False))
outfile.write(yaml.dump(yamlify(summ_dict), default_flow_style=False))
if args.qafig_root is not None:
log.info("Generating QA files")
# Summary for dz of all types
outfile = args.qafig_root+'_dzsumm.png'
#dsqa_z.dz_summ(simz_tab, outfile=outfile)
# Summary of individual types
#outfile = args.qafig_root+'_summ_fig.png'
#dsqa_z.summ_fig(simz_tab, summ_dict, meta, outfile=outfile)
for objtype in ['BGS', 'MWS', 'ELG','LRG', 'QSO_T', 'QSO_L']:
outfile = args.qafig_root+'_zfind_{:s}.png'.format(objtype)
dsqa_z.obj_fig(simz_tab, objtype, summ_dict, outfile=outfile)
if __name__ == '__main__':
main()
| [
"mjw@roe.ac.uk"
] | mjw@roe.ac.uk |
22579972a40c1b60cdc54eb3ebb4de4cac0531d8 | be02a1c01b3372c76b82e706b2b5c28ec9a22a9a | /guillotina_cms/interfaces/versioning.py | 052c2103411595330144e5b081a7a9405537041c | [
"BSD-2-Clause"
] | permissive | twaapo/guillotina_cms | b5399a69207cae81ad713c9269e11f63c576b937 | b42b3b91e0b1bd6807c3dd53277b3418d066e382 | refs/heads/master | 2020-04-09T08:22:39.063907 | 2018-11-21T18:36:39 | 2018-11-21T18:36:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from zope.interface import Interface
from guillotina.fields.annotation import BucketListField
class IVersioning(Interface):
diffs = BucketListField(
readonly=True,
annotation_prefix='diffs-',
bucket_len=5
)
class IVersioningMarker(Interface):
"""Marker interface an object is Versioning"""
class IDiffCalculator(Interface):
"""Interface for an adapter to look for diffs"""
| [
"ramon.nb@gmail.com"
] | ramon.nb@gmail.com |
bc9f20100d01d4d02d8243509f5296b2c18bc191 | 21dda956404fa5e19a42f031c34b64367281ebee | /024.py | b95d401c458d5d4e8251f7be26e39730c1dbb62f | [] | no_license | caudate-julie/ProjectEuler | e9bb7b6ab503a7a622be8f5599402ce07bc68121 | 80cd002ac8c452faac4da6f251275e4f90309e79 | refs/heads/master | 2020-12-30T00:16:50.961389 | 2020-02-06T21:50:10 | 2020-02-06T21:50:10 | 238,792,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # What is the millionth lexicographic permutation
# of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?
from math import factorial
from time import time
def Permutations(a, ordnum):
seq = list(range(a))
n = len(seq)
pnum = [0]*(n-1)
seq.sort()
for i in range(1, n):
pnum[i-1] = factorial(n-i)
trans = [0]*(n-1)
t = 0
for i in range(1, n):
j = 0
while t <= ordnum-1 and (i == 1 or j < pnum[i-2]):
t += pnum[i-1]
j += 1
t -= pnum[i-1]
j -= 1
trans[i-1] = j
for i in range(n-1):
t = seq[i]
seq[i] = seq[i+trans[i]]
seq[i+trans[i]] = t
seq[(i+1):] = sorted(seq[(i+1):])
return seq
LEN = 10
ORD = 1000000
ts = time()
print Permutations(LEN, ORD)
print "Time:", round(time() - ts, 4)
| [
"kyra-mensk@yandex.ru"
] | kyra-mensk@yandex.ru |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.