blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ffe67684bd72535366fe4685e80555f0e7c0ac0b | 32aa592fc3b7376b8fb36c0ac2245e6571fb7bdd | /datbase4.py | 0b901173cb7de7c8fc6d558bb3f096c56f52cca7 | [] | no_license | 1234567890boo/ywviktor | 00063a1c58b392cb4230791a9cffced6d2864889 | 12b18887243e9b64fb08db4ad440c7144bdf8cbb | refs/heads/master | 2022-05-14T12:43:43.422329 | 2022-04-30T04:24:05 | 2022-04-30T04:24:05 | 57,740,866 | 0 | 0 | null | 2020-06-29T00:22:12 | 2016-05-01T18:48:27 | Python | UTF-8 | Python | false | false | 129 | py | import sqlite3
conn=sqlite3.connect('sample4.db')
c=conn.cursor()
SELECT COUNT(studentid) FROM STUDENTS
conn.commit()
c.close()
| [
"you@example.com"
] | you@example.com |
ce596f62fcc6f217f7ff9a62753caafb1dedf83c | e423c84898f3fbb1e43c49475aedad0d6547c22f | /tensorflow/python/keras/distribute/dataset_creator_model_fit_test_base.py | 371b154447c68c0073caf1758a97f3daf509fc5f | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | yash982000/tensorflow | 9a9cfd1433bae84a5f197c20f5d3526883f781a2 | 0d09ce8fc82d8eaf9734bd50ea5f0bf739be766c | refs/heads/master | 2023-05-09T07:35:24.800146 | 2021-05-31T16:15:35 | 2021-05-31T16:15:35 | 358,957,873 | 1 | 0 | Apache-2.0 | 2021-05-31T16:15:36 | 2021-04-17T18:50:16 | null | UTF-8 | Python | false | false | 7,420 | py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.keras import callbacks as callbacks_lib
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core as core_layers
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.utils import dataset_creator
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class DatasetCreatorModelFitTestBase(test.TestCase, parameterized.TestCase):
"""The base class for DatasetCreator with Model.fit tests."""
def _get_dataset_fn(self, use_lookup_layer):
if use_lookup_layer:
filepath = os.path.join(self.get_temp_dir(), "vocab")
with open(filepath, "w") as f:
f.write("\n".join(["earth", "wind", "and", "fire"]))
def dataset_fn(input_context):
del input_context
lookup_layer = string_lookup.StringLookup(
num_oov_indices=1, vocabulary=filepath)
x = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
y = np.array([0, 1])
map_fn = lambda x, y: (lookup_layer(x), y)
return dataset_ops.DatasetV2.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2).map(map_fn)
else:
def dataset_fn(input_context):
del input_context
x = random_ops.random_uniform((10, 10))
y = random_ops.random_uniform((10,))
return dataset_ops.DatasetV2.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2)
return dataset_fn
def _model_compile(self,
strategy,
steps_per_execution=1,
run_eagerly=False,
with_normalization_layer=False,
use_lookup_layer=False):
class ResultAssertingCallback(callbacks_lib.Callback):
"""A callback that asserts the result of the tests."""
def __init__(self):
self._prev_epoch = -1
def on_epoch_end(self, epoch, logs=None):
logging.info("testModelFit: epoch=%r, logs=%r", epoch, logs)
if epoch <= self._prev_epoch:
raise RuntimeError("Epoch is supposed to be larger than previous.")
self._prev_epoch = epoch
is_loss_float = (
logs.get("loss", None) is not None and
isinstance(logs["loss"], (float, np.floating)))
if not is_loss_float:
raise RuntimeError("loss is supposed to be in the logs and float.")
def on_train_end(self, logs=None):
if self._prev_epoch != 9:
raise RuntimeError("Unexpected last epoch: {}".format(
self._prev_epoch))
with strategy.scope():
model = sequential.Sequential([core_layers.Dense(10)])
if with_normalization_layer:
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.add(core_layers.Dense(1, activation="sigmoid"))
self._accuracy_metric = keras.metrics.Accuracy()
model.compile(
gradient_descent.SGD(),
loss="binary_crossentropy",
metrics=[self._accuracy_metric],
steps_per_execution=steps_per_execution,
run_eagerly=run_eagerly)
return model, [ResultAssertingCallback()]
def _model_fit(self,
strategy,
steps_per_execution=1,
validation_data=None,
x=None,
steps_per_epoch=10,
run_eagerly=False,
with_normalization_layer=False,
callbacks=None,
use_lookup_layer=False):
if callbacks is None:
callbacks = []
model, default_callbacks = self._model_compile(strategy,
steps_per_execution,
run_eagerly,
with_normalization_layer,
use_lookup_layer)
callbacks += default_callbacks
x = x or dataset_creator.DatasetCreator(
self._get_dataset_fn(use_lookup_layer))
validation_data = (
validation_data or
dataset_creator.DatasetCreator(self._get_dataset_fn(use_lookup_layer)))
model.fit(
x,
epochs=10,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=steps_per_epoch)
return model
def _model_evaluate(self,
strategy,
steps_per_execution=1,
validation_data=None,
steps=10,
run_eagerly=False,
with_normalization_layer=False,
callbacks=None):
if callbacks is None:
callbacks = []
model, default_callbacks = self._model_compile(
strategy,
steps_per_execution,
run_eagerly,
with_normalization_layer,
)
callbacks += default_callbacks
def dataset_fn(input_context):
del input_context
x = random_ops.random_uniform((10, 10))
y = random_ops.random_uniform((10, 1))
return dataset_ops.DatasetV2.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(8)
validation_data = (
validation_data or dataset_creator.DatasetCreator(dataset_fn))
model.evaluate(x=validation_data, steps=steps, callbacks=callbacks)
return model
def _model_predict(
self,
strategy,
model=None,
steps_per_execution=1,
test_data=None,
steps=10,
with_normalization_layer=False,
):
callbacks = []
if model is None:
model, default_callbacks = self._model_compile(
strategy,
steps_per_execution,
with_normalization_layer=with_normalization_layer,
)
callbacks += default_callbacks
def create_test_data():
x = constant_op.constant([1., 2., 3., 1., 5., 1.])
return dataset_ops.DatasetV2.from_tensor_slices(x).repeat().batch(2)
test_data = test_data or create_test_data()
predictions = model.predict(x=test_data, steps=steps, callbacks=callbacks)
predictions = np.around(predictions, 4)
return model, predictions
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
5201444f1f49e41e337b827110bb30274cc8a6e4 | 633f28bc4f1aa3954e33ccea95a935d46e103024 | /src/tests/plugins/test_ticketoutputpdf.py | 088a5ac545801e6893c3aaafb10e4ae06bf82aff | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | sha2017/pretix | 442ebbf33938ccc9d03e89bb2bbd67b576a0d4f6 | dcd6f2d8d12f69d68f18fb9b1b6fe8927d60bfe9 | refs/heads/master | 2020-12-24T09:53:31.542612 | 2016-10-21T08:49:09 | 2016-10-21T08:49:09 | 68,192,732 | 0 | 0 | null | 2016-09-14T09:25:45 | 2016-09-14T09:25:44 | null | UTF-8 | Python | false | false | 1,808 | py | from datetime import timedelta
from decimal import Decimal
from io import BytesIO
import pytest
from django.utils.timezone import now
from PyPDF2 import PdfFileReader
from pretix.base.models import (
Event, Item, ItemVariation, Order, OrderPosition, Organizer,
)
from pretix.plugins.ticketoutputpdf.ticketoutput import PdfTicketOutput
@pytest.fixture
def env():
o = Organizer.objects.create(name='Dummy', slug='dummy')
event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(), live=True
)
o1 = Order.objects.create(
code='FOOBAR', event=event, email='dummy@dummy.test',
status=Order.STATUS_PENDING,
datetime=now(), expires=now() + timedelta(days=10),
total=Decimal('13.37'), payment_provider='banktransfer'
)
shirt = Item.objects.create(event=event, name='T-Shirt', default_price=12)
shirt_red = ItemVariation.objects.create(item=shirt, default_price=14, value="Red")
OrderPosition.objects.create(
order=o1, item=shirt, variation=shirt_red,
price=12, attendee_name=None, secret='1234'
)
OrderPosition.objects.create(
order=o1, item=shirt, variation=shirt_red,
price=12, attendee_name=None, secret='5678'
)
return event, o1
@pytest.mark.django_db
def test_generate_pdf(env, mocker):
mocked = mocker.patch('reportlab.pdfgen.canvas.Canvas.drawString')
event, order = env
event.settings.set('ticketoutput_pdf_code_x', 30)
event.settings.set('ticketoutput_pdf_code_y', 50)
event.settings.set('ticketoutput_pdf_code_s', 2)
o = PdfTicketOutput(event)
fname, ftype, buf = o.generate(order)
assert ftype == 'application/pdf'
pdf = PdfFileReader(BytesIO(buf))
assert pdf.numPages == 2
assert mocked.called
| [
"mail@raphaelmichel.de"
] | mail@raphaelmichel.de |
6bb7485728d62675725bc6326bf68cdb4cdef2ad | 2d0bada349646b801a69c542407279cc7bc25013 | /src/vai_optimizer/pytorch_binding/pytorch_nndct/nn/modules/interpolate.py | 5b30027d57596d14e2a282f4d1a6439e43baedca | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | Xilinx/Vitis-AI | 31e664f7adff0958bb7d149883ab9c231efb3541 | f74ddc6ed086ba949b791626638717e21505dba2 | refs/heads/master | 2023-08-31T02:44:51.029166 | 2023-07-27T06:50:28 | 2023-07-27T06:50:28 | 215,649,623 | 1,283 | 683 | Apache-2.0 | 2023-08-17T09:24:55 | 2019-10-16T21:41:54 | Python | UTF-8 | Python | false | false | 1,564 | py |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from nndct_shared.quantization import maybe_get_quantizer
from nndct_shared.quantization import quantize_tensors
import pytorch_nndct.utils as py_utils
__all__ = ['Interpolate']
class deephi_Interpolate(torch.nn.Module):
def __init__(self, *args, **kwards):
super(deephi_Interpolate, self).__init__(*args, **kwards)
self.node = None
self.quant_mode, self.quantizer = maybe_get_quantizer()
def forward(self,
input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
qinput = quantize_tensors([input], self.node, tensor_type='input')[0]
output = torch.nn.functional.interpolate(qinput, size, scale_factor, mode,
align_corners)
output = quantize_tensors([output], self.node)[0]
return output
@py_utils.register_quant_op
def Interpolate(*args, **kwargs):
return deephi_Interpolate(*args, **kwargs)
| [
"do-not-reply@gitenterprise.xilinx.com"
] | do-not-reply@gitenterprise.xilinx.com |
30d407573895e37be8c589a7c6732422960405a5 | 93ad56acf9e48d590768c2c2500d76ec2c4b6bb1 | /app/main/errors.py | 7403c733b99cdc878224c88e068d40eec4dc92b1 | [] | no_license | nanohaikaros/learn_flask | ce87657cdddc6fb12bb09db625732ced370f0434 | cf3fa1ebd968da226d8f71dadbf24d6656f0ab4d | refs/heads/master | 2021-01-19T22:46:38.087567 | 2017-05-22T06:33:32 | 2017-05-22T06:33:32 | 88,866,035 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | from flask import render_template
from . import main
@main.app_errorhandler(403)
def forbidden(e):
return render_template('403.html'), 403
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500 | [
"1371689491@qq.com"
] | 1371689491@qq.com |
4d07559abce454e6988074e172fc27bcd712241c | a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c | /venv/Lib/site-packages/pandas/tests/io/test_stata.py | 5708459bbeee5168c52eb369a9e89caf6e5cf5be | [] | no_license | mylonabusiness28/Final-Year-Project- | e4b79ccce6c19a371cac63c7a4ff431d6e26e38f | 68455795be7902b4032ee1f145258232212cc639 | refs/heads/main | 2023-07-08T21:43:49.300370 | 2021-06-05T12:34:16 | 2021-06-05T12:34:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ac3c82a633dbd93b8dd3e42a92c364056f31c17d5aab641a1531a16c854100ce
size 80351
| [
"chuksajeh1@gmail.com"
] | chuksajeh1@gmail.com |
183474e30fec8665fd8abb76258ae4b4c206c2f2 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/security/azure-mgmt-security/azure/mgmt/security/aio/operations/_regulatory_compliance_standards_operations.py | 8a7c10f30b23bf8e13a849a86fea24e6ef2a177e | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 8,189 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RegulatoryComplianceStandardsOperations:
"""RegulatoryComplianceStandardsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.RegulatoryComplianceStandardList"]:
"""Supported regulatory compliance standards details and state.
:param filter: OData filter. Optional.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegulatoryComplianceStandardList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.models.RegulatoryComplianceStandardList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegulatoryComplianceStandardList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RegulatoryComplianceStandardList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/regulatoryComplianceStandards'} # type: ignore
async def get(
self,
regulatory_compliance_standard_name: str,
**kwargs
) -> "_models.RegulatoryComplianceStandard":
"""Supported regulatory compliance details state for selected standard.
:param regulatory_compliance_standard_name: Name of the regulatory compliance standard object.
:type regulatory_compliance_standard_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegulatoryComplianceStandard, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.RegulatoryComplianceStandard
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegulatoryComplianceStandard"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'regulatoryComplianceStandardName': self._serialize.url("regulatory_compliance_standard_name", regulatory_compliance_standard_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegulatoryComplianceStandard', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/regulatoryComplianceStandards/{regulatoryComplianceStandardName}'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
94e898b82eb56429ee14e655db9f586c9926c987 | d852a39939776a2374320f4a1371e307ec5722a8 | /evphoto/urls.py | a2d81337a3a03304700da1a3d0a130bb7dc7b6f1 | [
"MIT"
] | permissive | raikel/evphoto | 5dfd4ecb5a3516ecd636260a0ba75d4ba1970e55 | 2c3702b2adcd53b6e03a2d596bd79e174e1a93a8 | refs/heads/main | 2023-02-13T05:07:30.618069 | 2021-01-14T19:21:13 | 2021-01-14T19:21:13 | 329,567,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | """evphoto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import TemplateView
from .apidoc import swagger_view, redoc_view
urlpatterns = [
path('', TemplateView.as_view(template_name='photos/index.html')),
path('admin/', admin.site.urls),
path('api/', include('photos.urls', 'photos',)),
] + (
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) +
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
)
if settings.API_DOCS:
urlpatterns.extend([
path('api/docs/swagger/', swagger_view, name='docs-swagger'),
path('api/docs/redoc/', redoc_view, name='docs-redoc'),
])
| [
"raikelbl@gmail.com"
] | raikelbl@gmail.com |
7c27410170786c5d93f67b9ebcfa51fd96f37230 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02266/s420424562.py | 880d6c89e7de0c5785e81f9f9077c6c0f5b05e62 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from queue import LifoQueue
MAP = input()
que = LifoQueue()
res = LifoQueue()
for i, m in enumerate(MAP):
if m=='\\':
que.put(i)
elif m=='/':
if not que.empty():
j = que.get(False)
v = i - j
t = (j, v)
while not res.empty():
pre = res.get(False)
if (pre[0] > j):
t = (t[0], t[1] + pre[1])
else:
res.put(pre)
res.put(t)
break
else:
res.put(t)
summaly = 0
lakes = []
while not res.empty():
v = res.get()
lakes.append(v[1])
summaly += v[1]
print(summaly)
print(len(lakes), *(reversed(lakes)))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4601aa9f22da55cb30b966de539a25b4e9f6c58d | 55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850 | /.history/master_20200119193629.py | 2db10fab9ef6d872c3dc3a5b8b648d2a52490261 | [] | no_license | StRobertCHSCS/final-project-team | c115dc11b318f7ac782c94860a8801bb558bd107 | 48907e72813c4dd3b48ff36f794f6fce04533219 | refs/heads/master | 2020-12-03T22:35:37.833893 | 2020-01-31T04:05:38 | 2020-01-31T04:05:38 | 231,506,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,480 | py | '''
-**make snake longer when eaten
- FIGURE OUT HOW TO KNOW WHERE TO ADD THE NEXT BLOCK (MOVE LAST LOCATION TO BACK)
-fix player_location lists, so that the list only has the location of the current snake location, not infinite list (done)
- fix apple so disappers when you go over it (done)
'''
import arcade
import random
# Set how many rows and columns we will have
ROW_COUNT = 29
COLUMN_COUNT = 51
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
# and on the edges of the screen.
MARGIN = 5
# Do the math to figure out our screen dimensions
SCREEN_WIDTH = (WIDTH + MARGIN) * COLUMN_COUNT + MARGIN
SCREEN_HEIGHT = (HEIGHT + MARGIN) * ROW_COUNT + MARGIN
up = False
down = False
left = False
right = False
player_x_column = 5
player_y_row = 5
body = 1
snake_pos = []
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_display = True
grid_texture = arcade.load_texture("29x51_grid.jpg")
def on_update(delta_time):
snake_move()
def on_draw():
arcade.start_render()
grid_background()
snake()
apple()
def grid_background():
arcade.draw_texture_rectangle(SCREEN_WIDTH//2, SCREEN_HEIGHT//2, grid_texture.width, grid_texture.height, grid_texture, 0)
def snake_move():
global player_x, player_y, player_x_column, player_y_row
if (0 <= player_x_column < COLUMN_COUNT) and (0 <= player_y_row < ROW_COUNT):
if up:
player_y_row += 1
elif down:
player_y_row -= 1
elif right:
player_x_column += 1
elif left:
player_x_column -= 1
# for i in range (1):
# player_loaction_x = player_loaction_x(player_x_column)
# player_loaction_y.append(player_y_row)
else:
restart()
# Player coordinates
player_x = (MARGIN + WIDTH) * player_x_column + MARGIN + WIDTH // 2
player_y = (MARGIN + HEIGHT) * player_y_row + MARGIN + HEIGHT // 2
def restart():
global player_x_column, player_y_row
global up, down, left, right
player_x_column = 5
player_y_row = 5
snake_len = []
body = 1
up = False
down = False
left = False
right = False
print ("You died")
def snake():
global player_x_column, player_y_row, apple_x, apple_y, snake_len, body
arcade.draw_rectangle_filled(player_x , player_y, WIDTH, HEIGHT, arcade.color.BLUE)
snake_len = [[player_x_column, player_y_row]]
if (body > 1):
for num in range (1, body):
snake_len.append([10 + num, 10])
# snake_len[i]= snake_len[i-1]
snake_pos.append([player_x_column, player_y_row])
if body < len(snake_pos):
snake_pos.pop(0)
print(snake_len, "body", body, len(snake_pos), snake_pos)
# for index in range (body - 1, 0, -1):
# player_x_column = snake_len[index - 1][0]
# player_y_row = snake_len[index - 1][1]
# snake_len[index]
for i in range (body):
arcade.draw_rectangle_filled(
(MARGIN + WIDTH) * snake_len[i][0] + MARGIN + WIDTH // 2,
(MARGIN + HEIGHT) * snake_len[i][1] + MARGIN + HEIGHT // 2 ,
WIDTH, HEIGHT, arcade.color.BLUE)
def apple():
global apple_x, apple_y, apple_x_coordinate, apple_y_coordinate, body, snake_len
global SPEED
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
if (player_x_column == apple_x) and (player_y_row == apple_y):
apple_display = False
body += 1
print ("hit")
else:
apple_display = True
if apple_display is True:
arcade.draw_rectangle_filled(apple_x_coordinate, apple_y_coordinate, WIDTH, HEIGHT, arcade.color.RED)
elif apple_display is False:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
apple_display == True
def on_key_press(key, modifiers):
global up, down, left, right
if key == arcade.key.W:
up = True
down = False
right = False
left = False
elif key == arcade.key.S:
down = True
up = False
right = False
left = False
elif key == arcade.key.A:
left = True
up = False
down = False
right = False
elif key == arcade.key.D:
right = True
up = False
down = False
left = False
def on_key_release(key, modifiers):
pass
def on_mouse_press(x, y, button, modifiers):
pass
def setup():
global grid
# global player_x_column, apple_x, player_y_row, apple_y, SPEED
# SPEED = 10
# if (player_x_column == apple_x) and (player_y_row == apple_y):
# SPEED += 5
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "snake")
arcade.set_background_color(arcade.color.BLACK)
arcade.schedule(on_update, 1/10)
# Override arcade window methods
window = arcade.get_window()
window.on_draw = on_draw
window.on_key_press = on_key_press
window.on_key_release = on_key_release
window.on_mouse_press = on_mouse_press
arcade.run()
if __name__ == '__main__':
setup() | [
"clementina1023@gmail.com"
] | clementina1023@gmail.com |
b04b76458382bec38d6c2113df7825f5c942223b | d125c002a6447c3f14022b786b07712a7f5b4974 | /tests/functional/arno/indices/test_lower_bound_desc_02_segments_01.py | 690211e08ed9cda8673a99c976031908ca55972f | [
"MIT"
] | permissive | FirebirdSQL/firebird-qa | 89d5b0035071f9f69d1c869997afff60c005fca9 | cae18186f8c31511a7f68248b20f03be2f0b97c6 | refs/heads/master | 2023-08-03T02:14:36.302876 | 2023-07-31T23:02:56 | 2023-07-31T23:02:56 | 295,681,819 | 3 | 2 | MIT | 2023-06-16T10:05:55 | 2020-09-15T09:41:22 | Python | UTF-8 | Python | false | false | 2,148 | py | #coding:utf-8
"""
ID: index.lower-bound-desc-2-segments
TITLE: DESC 2-segment index lower bound
DESCRIPTION:
Check if all 5 values are fetched with "equals" operator over first segment and
"lower than or equal" operator on second segment. 2 values are bound to the lower
segments and 1 value is bound to the upper segment.
FBTEST: functional.arno.indices.lower_bound_desc_02_segments_01
"""
import pytest
from firebird.qa import *
init_script = """CREATE TABLE Table_2_10 (
F1 INTEGER,
F2 INTEGER
);
COMMIT;
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 1);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 2);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 3);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 4);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 5);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 6);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 7);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 8);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 9);
INSERT INTO Table_2_10 (F1, F2) VALUES (1, 10);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 1);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 2);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 3);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 4);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 5);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 6);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 7);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 8);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 9);
INSERT INTO Table_2_10 (F1, F2) VALUES (2, 10);
COMMIT;
CREATE DESC INDEX I_Table_2_10_DESC ON Table_2_10 (F1, F2);
COMMIT;
"""
db = db_factory(init=init_script)
test_script = """SET PLAN ON;
SELECT
t.F1,
t.F2
FROM
Table_2_10 t
WHERE
t.F1 = 2 and t.F2 <= 5;"""
act = isql_act('db', test_script)
expected_stdout = """PLAN (T INDEX (I_TABLE_2_10_DESC))
F1 F2
============ ============
2 1
2 2
2 3
2 4
2 5"""
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
assert act.clean_stdout == act.clean_expected_stdout
| [
"pcisar@ibphoenix.cz"
] | pcisar@ibphoenix.cz |
3c39fb15eff05bb0b750c408fe4b51bb2ece9eb1 | 01733042e84a768b77f64ec24118d0242b2f13b8 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/radiusglobals/radiusglobals.py | 732fa15ba936da62be6717259bf7eb06313f5074 | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 5,069 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class RadiusGlobals(Base):
"""Global settings for the RADIUS extension.
The RadiusGlobals class encapsulates a list of radiusGlobals resources that are managed by the user.
A list of resources can be retrieved from the server using the RadiusGlobals.find() method.
The list can be managed by using the RadiusGlobals.add() and RadiusGlobals.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'radiusGlobals'
_SDM_ATT_MAP = {
'ObjectId': 'objectId',
}
def __init__(self, parent):
super(RadiusGlobals, self).__init__(parent)
@property
def DhcpOptionSet(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.dhcpglobals.dhcpoptionset.dhcpoptionset.DhcpOptionSet): An instance of the DhcpOptionSet class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.dhcpglobals.dhcpoptionset.dhcpoptionset import DhcpOptionSet
return DhcpOptionSet(self)
@property
def ObjectId(self):
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
def add(self):
"""Adds a new radiusGlobals resource on the server and adds it to the container.
Returns
-------
- self: This instance with all currently retrieved radiusGlobals resources using find and the newly added radiusGlobals resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained radiusGlobals resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ObjectId=None):
"""Finds and retrieves radiusGlobals resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve radiusGlobals resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all radiusGlobals resources from the server.
Args
----
- ObjectId (str): Unique identifier for this object
Returns
-------
- self: This instance with matching radiusGlobals resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of radiusGlobals data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the radiusGlobals resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
13f9ee52c3ae3189d989e10eec846b988ed1b723 | e85e846960750dd498431ac8412d9967646ff98d | /certificates/migrations/0007_auto_20170222_1005.py | 09bd66c793fc3f42e6ff4fa3b92692a45a7f4742 | [] | no_license | onosaburo/clublink_django | 19368b4a59b3aed3632883ceffe3326bfc7a61a6 | d2f6024b6224ea7f47595481b3382b8d0670584f | refs/heads/master | 2022-03-30T05:30:12.288354 | 2020-01-27T18:09:11 | 2020-01-27T18:09:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-22 10:05
# flake8: noqa
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('certificates', '0006_auto_20170222_0827'),
]
operations = [
migrations.AlterModelOptions(
name='certificate',
options={'permissions': (('can_create', 'Can create certificates'), ('can_view', 'Can view certificates'))},
),
]
| [
"bestwork888@outlook.com"
] | bestwork888@outlook.com |
06784a03823b09ab1c522efa2cce9f839dc4592b | 99b92de16867f4846b9119e94d13377a58c182f4 | /doped_cath/tst/migrations/tp/0006_load_nDOPE.py | a6bd4c6354b2c256866c882e3299a25ce42abbd8 | [] | no_license | CATH-summer-2017/domchop | 8ef37f8e218ff99e7f5e1c7a75c406327ad22ff0 | d0c717ebc0541eba0d196a3c5885e4edf83a0ecb | refs/heads/master | 2020-12-14T07:20:43.062710 | 2017-09-25T23:21:05 | 2017-09-25T23:21:05 | 95,545,543 | 2 | 3 | null | 2017-06-30T11:16:10 | 2017-06-27T10:13:23 | null | UTF-8 | Python | false | false | 1,506 | py |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
# def load_classes_sql():
# from coffeehouse.settings import PROJECT_DIR
# import os
# sql_statements = open(os.path.join(PROJECT_DIR,'tst/sql/load_classes.sql'), 'r').read()
# return sql_statements
'''
INSERT INTO DJANGO_CATH.tst_classification (id,Class,arch,topo,homsf,s35,s60,s95,s100,version_id,level_id)
select * from CATH.temp_class;
'''
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def load_nDOPE(apps, schema_editor):
import sys
# classification = apps.get_model("tst", "classification")
domain = apps.get_model("tst", "domain")
import os
cwd = os.getcwd()
print >>sys.stdout, '\n\n\n %s'%cwd
dope_file=('tst/migrations/bak/nDOPE-s35-v410.csv');
import csv
cnt = 0;
cmax = file_len(dope_file);
with open(dope_file,'r') as f:
c = csv.reader(f);
for row in c:
dom = domain.objects.get(domain_id=row[0])
dom.nDOPE = float(row[1]);
dom.save()
cnt += 1
if not cnt%100:
print >>sys.stdout, '%d of %d lines loaded'%(cnt,cmax);
# f.readlines()
# for d in domain.objects.
def do_nothing(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('tst', '0005_make_parents'),
]
operations = [
migrations.RunPython(load_nDOPE,do_nothing),
]
| [
"shouldsee.gem@gmail.com"
] | shouldsee.gem@gmail.com |
34d116201474c7e62a2d9adcc6b8d3a85318c2d2 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /zqMREZ2MQd9M5jNfM_9.py | d93270c362002418e307c213ec315c4133aacb07 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | """
Create a function that takes a number as an argument and returns `True` or
`False` depending on whether the number is symmetrical or not. A number is
symmetrical when it is the same as its reverse.
### Examples
is_symmetrical(7227) ➞ True
is_symmetrical(12567) ➞ False
is_symmetrical(44444444) ➞ True
is_symmetrical(9939) ➞ False
is_symmetrical(1112111) ➞ True
### Notes
N/A
"""
def is_symmetrical(num):
num=str(num)
f=num[::-1]
if num==f:
return bool(1)
return bool(0)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
d9d7d3810f2986d67be89873e3fb282685427dc5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /J9fCHDa3yYJWnK3A7_17.py | a1830f2d014b3eddfc13778136564b2dc07fd3af | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | """
A **happy number** is a number which yields a `1` by repeatedly summing up the
square of its digit. If such a process results in an endless cycle of numbers
containing `4`, the number is said to be an **unhappy number**.
Create a function that accepts a number and determines whether the number is a
_happy number_ or not. Return `True` if so, `False` otherwise.
### Examples
is_happy(67) ➞ False
is_happy(89) ➞ False
is_happy(139) ➞ True
is_happy(1327) ➞ False
is_happy(2871) ➞ False
is_happy(3970) ➞ True
### Notes
* You are expected to solve this challenge via recursion.
* You can check on the **Resources** tab for more details about recursion.
* A non-recursive version of this challenge can be found in [here](https://edabit.com/challenge/rGAcibgZ6u9MtasfW).
"""
def f(n):
return sum(int(d)**2 for d in str(n))
def is_happy(n):
return True if n == 1 else False if n == 4 else is_happy(f(n))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
f476eb1935ab5f29eb6380a213b3e4fd835af68f | bc8509d57a162fb685da06a98c67dc8130d96316 | /src/nninst/utils/numpy.py | 9607ffcb70f0a6dc19d1c7f583b44e22f2c0c57c | [
"Apache-2.0"
] | permissive | Ptolemy-DL/Ptolemy | 2065e2d157d641010567062410bee4608691d059 | f72a531286d17c69e0e2e84d0ad8a5b0587e2e08 | refs/heads/master | 2023-05-29T08:58:18.328258 | 2021-06-15T09:28:16 | 2021-06-15T09:28:16 | 284,590,756 | 115 | 5 | NOASSERTION | 2020-10-24T04:18:51 | 2020-08-03T03:06:35 | Python | UTF-8 | Python | false | false | 2,781 | py | from typing import Union
import numpy as np
__all__ = ["argtopk", "arg_approx", "arg_approx_signed", "repeat", "concatenate"]
def get_int_k(array: np.ndarray, k: Union[int, float]) -> int:
if type(k) is float:
if 0.0 < k < 1.0:
k = round(array.size * k)
if k == array.size:
return array.size - 1
elif k == 0:
return 1
return k
else:
raise ValueError()
else:
return k
def argtopk(array: np.ndarray, k: Union[int, float]) -> np.ndarray:
k = get_int_k(array, k)
if k == 1:
return np.array([np.argmax(array)])
else:
return np.argpartition(array, -k, axis=None)[-k:]
def arg_sorted_topk(array: np.ndarray, k: Union[int, float]) -> np.ndarray:
# topk_index = argtopk(array, k)
# sorted_index = np.array(list(reversed(np.argsort(array[topk_index]))))
# return topk_index[sorted_index]
k = get_int_k(array, k)
return np.argsort(array)[::-1][:k]
def arg_approx(array: np.ndarray, precision: float) -> np.ndarray:
if (1 / array.size) >= precision:
return np.array([np.argmax(array)])
input_sum = array.sum()
if input_sum <= 0:
return np.array([np.argmax(array)])
input = array.flatten()
threshold = input_sum * precision
sorted_input = input.copy()
sorted_input[::-1].sort()
# topk = np.argmax(sorted_input.cumsum() >= threshold)
topk = sorted_input.cumsum().searchsorted(threshold)
if topk == len(input):
return np.where(input > 0)[0]
else:
return argtopk(input, topk + 1)
# def arg_approx(array: np.ndarray, precision: float) -> np.ndarray:
# input_sum = array.sum()
# if input_sum == 0:
# return np.array([], dtype=np.int64)
# input = array.flatten()
# threshold = input_sum * precision
# sorted_input = input.copy()
# sorted_input[::-1].sort()
# # topk = np.argmax(sorted_input.cumsum() >= threshold)
# topk = sorted_input.cumsum().searchsorted(threshold)
# return argtopk(input, topk + 1)
def arg_approx_signed(array: np.ndarray, precision: float) -> np.ndarray:
result = []
for input in [array.copy(), -array]:
input[input < 0] = 0
result.append(arg_approx(input, precision))
return np.concatenate(result)
def repeat(a: int, repeats: int) -> np.ndarray:
# if repeats > 1:
# return np.repeat(a, repeats)
# elif repeats == 1:
# return np.array([a])
# else:
# return np.array([])
return np.repeat(a, repeats)
def concatenate(a_tuple, axis=0, out=None, dtype=np.int64) -> np.ndarray:
if len(a_tuple) == 0:
return np.array([], dtype=dtype)
else:
return np.concatenate(a_tuple, axis, out)
| [
"ygan10@ur.rochester.edu"
] | ygan10@ur.rochester.edu |
3ecf11e01e3b723c2b1431b7b60c91b9a4447008 | 4749d3cf395522d90cb74d1842087d2f5671fa87 | /alice/LC406.py | ebd41c6469e26d50cf2299deca09ce35e1e6efae | [] | no_license | AliceTTXu/LeetCode | c1ad763c3fa229362350ce3227498dfb1f022ab0 | ed15eb27936b39980d4cb5fb61cd937ec7ddcb6a | refs/heads/master | 2021-01-23T11:49:49.903285 | 2018-08-03T06:00:16 | 2018-08-03T06:00:16 | 33,470,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key = lambda x: [-x[0], x[1]])
out = []
for x in people:
out.insert(x[1], x)
return out | [
"aliceadelice@gmail.com"
] | aliceadelice@gmail.com |
c1e70ef156cf238a0729e59e883d766bdc7079de | 0be8fcf11032836c40d6cf80404130bfeeb4f147 | /formulas/constants.py | 07ac7d5860ed343c693632960b394553b6992c42 | [] | no_license | granttremblay/formulas | 7b13661b8248deeb84a42ce9fa8baaa5fc7d8667 | a56dbfa92fbcaef194d6735a0d5fb38d48308a36 | refs/heads/master | 2021-01-11T22:26:17.165819 | 2016-11-25T04:19:10 | 2016-11-25T04:19:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | from sympy import symbols
from sympy import pi as sym_pi
from formulas.base import Formula
try:
import yt.utilities.physical_constants as yt_pc
except ImportError:
yt_pc = None
try:
import astropy.constants as astropy_pc
except ImportError:
astropy_pc = None
try:
from pint import UnitRegistry
pint_pc = UnitRegistry(system='cgs')
except ImportError:
pint_pc = None
yt_map = {"m_e": "me",
"m_p": "mp",
"m_h": "mh",
"k_B": "kboltz"}
astropy_map = {}
pint_map = {"G": "newtonian_constant_of_gravitation",
"k_B": "k"}
class FormulaConstant(Formula):
def __init__(self, name, value):
name = symbols(name)
super(FormulaConstant, self).__init__(name, [], [name])
self.param_values[str(name)] = value
self._value = value
def set_param_values(self, **kwargs):
"""
Set the values of one or more parameters.
"""
if self.num_params > 0:
raise RuntimeError("Can't change the value of a constant!")
def clear_param_values(self):
"""
Set all of the parameter values to None.
"""
if self.num_params > 0:
raise RuntimeError("Can't change the value of a constant!")
@property
def value(self):
return self._value
class FormulaPi(FormulaConstant):
def __init__(self):
Formula.__init__(self, sym_pi, [], [])
@property
def value(self):
return self.formula.evalf()
class PhysicalConstants(object):
def __init__(self, constants, map):
self.constants = constants
self.map = map
def __getattr__(self, item):
const = self.map.get(item, item)
return FormulaConstant(item, 1.0*getattr(self.constants, const))
pi = FormulaPi()
if yt_pc is not None:
yt_constants = PhysicalConstants(yt_pc, yt_map)
if astropy_pc is not None:
astropy_constants = PhysicalConstants(astropy_pc, astropy_map)
if pint_pc is not None:
pint_constants = PhysicalConstants(pint_pc, pint_map) | [
"jzuhone@gmail.com"
] | jzuhone@gmail.com |
68dc9fbe7e29ec923121a4526fc054557776fd07 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201019123819.py | 9808ed21b1d2b74118bd70924d30f80e541f48fd | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
blocks.StructBlock(
[
('title', blocks.CharBlock(max_length=100, help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.')),
('text', blocks.TextBlock(max_length=255, help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.')),
('image', ImageChooserBlock(help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli')),
('link_')
]
)
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
55ee6222654cb56bc64aa38ff54959a133fa43b6 | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/GodOfPython/P00_OriginalSource/ch14/exercise/idpw.py | 21c3d7198abd9ed03005393d1a2ab537906e0965 | [] | no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import tkinter
def reset():
text_id.set('')
text_pw.set('')
def login():
f = open ("C:/gop/ch14/exercise/idpw.txt", 'r')
try:
for s in f:
idpw = s.split('/')
if (idpw[0] == text_id.get()) and (idpw[1].strip()==text_pw.get()):
frame.destroy()
label = tkinter.Label(text = "{} login..".format(text_id.get()))
label.pack()
return 0
reset()
except:
print("exception occured")
finally:
f.close()
root = tkinter.Tk()
root.title("login")
root.geometry('180x120')
text_id = tkinter.StringVar(value='')
text_pw = tkinter.StringVar(value='')
frame = tkinter.Frame(root)
frame.pack()
button = tkinter.Button(frame, text = 'reset', command = reset)
button.grid(row=0, column=0, columnspan = 2)
button = tkinter.Button(frame, text = 'login', command = login)
button.grid(row=3, column=0, columnspan = 2)
label = tkinter.Label(frame, text = 'ID')
label.grid(row = 1, column = 0)
entry_id = tkinter.Entry(frame, textvariable = text_id)
entry_id.grid(row = 1, column = 1)
label = tkinter.Label(frame, text = 'PW')
label.grid(row = 2, column = 0)
entry_pw = tkinter.Entry(frame, textvariable = text_pw, show='*')
entry_pw.grid(row = 2, column = 1)
root.mainloop()
| [
"broodsky1122@hanmail.net"
] | broodsky1122@hanmail.net |
543f648806301c64f30a4f1e315ba24aba1a0b57 | d0d21c4e00d2674dcd0e0e07c47576f15706a064 | /munge/03-random-forest-on-tfidf.py | 6b63bd838c012ebb5e9e59757fd64060f5985289 | [] | no_license | paulhendricks/kaggle-home-depot-product-search-relevance | 1312d874c91b5f0eb6f420d23fe5813db222868a | cd2e68976ab4a6eb224c55f80a0b851bb37a9fde | refs/heads/master | 2021-01-10T05:25:31.073869 | 2016-01-19T15:14:47 | 2016-01-19T15:14:47 | 49,902,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.metrics import mean_squared_error
attributes = pd.read_csv("./data/attributes.csv")
product_descriptions = pd.read_csv("./data/product_descriptions.csv")
train = pd.read_csv("./data/train.csv")
test = pd.read_csv("./data/test.csv")
sample_submission = pd.read_csv("./data/sample_submission.csv")
count = CountVectorizer()
docs = np.array(['The sun is shining',
'The weather is sweet',
'The sun is shining and the weather is sweet'])
bag = count.fit_transform(docs)
tfidf = TfidfTransformer()
np.set_printoptions(precision=2)
print(tfidf.fit_transform(count.fit_transform(docs)).toarray())
data = 1
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
tokenizer_porter('runners like running and thus they run')
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop = stopwords.words('english')
[w for w in tokenizer_porter('a runner likes running and runs a lot')[-10:] if w not in stop]
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None)
rf_rfidf = Pipeline([('vect', tfidf), ('rf', RandomForestRegressor(random_state=0))])
X_train = train['search_term'].values
y_train = train['relevance'].values
rf_rfidf.fit(X_train, y_train)
X_test =test['search_term'].values
y_hat = rf_rfidf.predict(rf_rfidf.transform(X_test))
X_train, X_validation, y_train, y_validation = train_test_split(train['search_term'].values,
train['relevance'].values, test_size=0.4,
random_state=0)
rf_rfidf = Pipeline([('vect', tfidf), ('rf', RandomForestRegressor(random_state=0))])
rf_rfidf.fit(X_train, y_train)
y_hat = rf_rfidf.predict(X_validation)
mean_squared_error(y_true=y_validation, y_pred=y_hat)
X_test = test['search_term'].values
sample_submission['relevance'] = rf_rfidf.predict(X_test)
sample_submission.to_csv('./data/random_forest_submission.csv', index=False)
| [
"paul.hendricks.2013@owu.edu"
] | paul.hendricks.2013@owu.edu |
66a7413029ca2510d8617ddeb21324b7203e0da2 | c1373f7b6956468b45db6c664b119322e667ac5e | /Study/baekjoon/hw/17609.py | 6c5dcf2987055f70bac2678287a966e9c3484626 | [] | no_license | JaeLinJoo/Python-Team-Notes | e01f2010852d0c4365cdc9d087c305e876c90f9a | 869dae62e489686eab56bc58bdea8c391ffbca19 | refs/heads/master | 2023-09-02T14:10:02.032521 | 2021-10-06T16:20:04 | 2021-10-06T16:20:04 | 339,348,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | #회문
### ↓↓↓↓↓↓↓↓↓↓↓↓↓solution↓↓↓↓↓↓↓↓↓↓↓↓↓↓
def pseudo(x, left, right):
while left < right:
if x[left] == x[right]:
left += 1
right -= 1
else:
return False
return True
def palindrome(x,left, right):
while left < right:
if x[left] == x[right]:
left += 1
right -= 1
else:
res1 = pseudo(x,left+1, right)
res2 = pseudo(x,left,right-1)
if res1 == True or res2 == True:
return 1
else:
return 2
return 0
t = int(input())
for i in range(t):
x = input()
res = palindrome(x,0,len(x)-1)
print(res)
############시간초과############
# 회문
def palindrome(x):
ps = False
# 회문인 경우
if x == x[::-1]:
return 0
# 유사회문의 경우
else:
for s in x:
pseudo = x.replace(s,'')
if pseudo == pseudo[::-1]:
ps = True
continue
if ps:
return 1
else:
return 2
t = int(input())
res = []
for i in range(t):
x = input()
res.append(palindrome(x))
for r in res:
print(r)
| [
"replituser@example.com"
] | replituser@example.com |
750217b5c01d3385971f10c801f303fa66cf729a | 98743080e28537d635364a998aa1988551dffcdf | /cride/cride/users/urls.py | 1cd703d1b08c9189fbc30cfb0e5b5957ed36c4c6 | [
"MIT"
] | permissive | INFINITY-RUBER/Curso_Django-avanzado_proyect | 75561cbc54a8057d1d2b536f34b2506233f40f80 | 1850a73a3648cfca17fdbf48b40dbd3eadd9f7fd | refs/heads/main | 2023-06-11T05:49:43.330422 | 2021-06-29T17:11:26 | 2021-06-29T17:11:26 | 374,808,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | """ Users URLs."""
# Django
from django.urls import path, include
# Django REST Framework
from rest_framework.routers import DefaultRouter
# Views
from .views import users as user_views
# from cride.users.views import (UserLoginAPIView, UserSignUpAPIView,
# AccountVerificationAPIView)
# urlpatterns = [
# path('users/login/', UserLoginAPIView.as_view(), name='login'),
# path('users/signup/', UserSignUpAPIView.as_view(), name='signup'),
# path('users/verify/', AccountVerificationAPIView.as_view(), name='verify'),
# ]
router = DefaultRouter()
router.register(r'users', user_views.UserViewSet, basename='users')
urlpatterns = [path('', include(router.urls))] | [
"ruberhenandez@gmail.com"
] | ruberhenandez@gmail.com |
bcc862b5a9cd861cbe6356c4fb7bc1316aeb2392 | 8bda8911512f1c454f5e75ef36f3d828661b1479 | /dfs_bfs/test08.py | 6bfdb60652a5ad808b25d0d77843e9f27c78d695 | [] | no_license | choijaehoon1/backjoon | 0f5909a1e1d416f8f431d6b986754af7eb6a3396 | a0411dba08c057a312733e38683246162256e61d | refs/heads/master | 2023-02-26T12:28:33.733297 | 2021-02-05T13:28:33 | 2021-02-05T13:28:33 | 280,430,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from collections import deque
N, M = map(int, input().split())
visit = [0 for _ in range(100001)]
queue = deque()
queue.append([N,0])
# print(queue)
while queue:
pos = queue[0][0]
depth = queue[0][1]
if pos == M: # 기준점이 됫을때의 depth를 찍으면 최단거리임
break
queue.popleft()
visit[pos] = 1
if pos-1 >= 0 and visit[pos-1] == 0:
queue.append([pos-1, depth+1])
if pos+1 <= 100000 and visit[pos+1] == 0:
queue.append([pos+1, depth+1])
if pos*2 <= 100000 and visit[pos*2] == 0:
queue.append([pos*2, depth+1])
print(queue[0][1])
| [
"wogns_20@naver.com"
] | wogns_20@naver.com |
b230197181bdab76dc21bbefbc4970e62ae9d85b | 746bf62ae3599f0d2dcd620ae37cd11370733cc3 | /leetcode/minimumpathsum.py | 0b3383cd94fdea6db18babd6a9663eaa701785c1 | [] | no_license | wanglinjie/coding | ec0e614343b39dc02191455165eb1a5c9e6747ce | 350f28cad5ec384df476f6403cb7a7db419de329 | refs/heads/master | 2021-04-22T14:00:48.825959 | 2017-05-02T12:49:05 | 2017-05-02T12:49:05 | 48,011,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# date:20160712
class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
Given a m x n grid filled with non-negative numbers,
find a path from top left to bottom right which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time.
思路:
动态规划思想
使用一个新的矩阵存储到当前位置的最短路径
"""
m = len(grid)
n = len(grid[0])
paths = []
for i in xrange(m):
paths.append([0] * n)
for i in xrange(m):
for j in xrange(n):
if i:
if j:
# 当前位置左边和上边较小值和当前位置数字相加
paths[i][j] = grid[i][j] + min(paths[i-1][j], paths[i][j-1])
else:
# 在矩阵最左侧,只能从上面元素到当前位置
paths[i][j] = grid[i][j] + paths[i-1][j]
else:
if j:
paths[i][j] = grid[i][j] + paths[i][j-1]
else:
paths[i][j] = grid[i][j]
print paths
return paths[m-1][n-1]
grid = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
so = Solution()
print so.minPathSum(grid) | [
"hitwhwlj@163.com"
] | hitwhwlj@163.com |
35decafd0911bc653eafe7e1928f9aafc9d264c6 | 4aa3e91f50443894de3d2d5339df7cbcb2c88bfd | /t_08ALFARO.CAJO/CAJO/concatenacion/ejercicio7.py | 8e68e8fce9f5780988f0b1da353beaf938a9ee63 | [] | no_license | Piero942/T08_Alfaro.Cajo | d7164ab2c22116037739b32680cd82eafef0cfa1 | 8e1d05459d15093d0a8ecc46c7caf7e225b53e5f | refs/heads/master | 2020-10-02T07:43:03.482372 | 2019-12-13T02:01:09 | 2019-12-13T02:01:09 | 227,733,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # 2 3 4 5 6 7
# 0123456789012345678901234567890123456789012345678901234567890123
msg="El ser humano nace con inteligencia, pero debe aprender a pensar"
print( "aprende" + " " + "con" + " " + "inteligencia" )
| [
"palfaro@unprg.edu.pe"
] | palfaro@unprg.edu.pe |
40e95cb6d1ecd25ec81f2e9f679e937390f9151c | f68d246ea82f980706bfa574da91d99797c29b38 | /code/sequentialSearch.py | 3f21c9fcc1154eb22d72f6c04f1a8470444e9477 | [] | no_license | nicolas4d/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python | 40684370ab0c8a22894aa58c0479da6697ea0a13 | 5c7595cab3c5501e4b4177b700708a2609c74e30 | refs/heads/master | 2020-12-02T13:43:49.547926 | 2020-02-01T14:19:08 | 2020-02-01T14:19:08 | 231,025,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | def sequentialSearch(alist, item):
pos = 0
found = False
while pos < len(alist) and not found:
if alist[pos] == item :
found = True
else :
pos = pos + 1
return found
testlist = [1, 2, 32, 8, 17, 19, 42, 13, 0]
print(sequentialSearch(testlist, 3))
print(sequentialSearch(testlist, 13))
'''
False
True
'''
| [
"nicolas4d@foxmail.com"
] | nicolas4d@foxmail.com |
6bdd4cb44ebe52649aa072e82ce0f6cf397c91c6 | 7bd15f37ffd26f9f0470cae2b4c1ef491c35c5c1 | /python/dirigible/feedback/urls.py | 64c3810ceefb7a6b75b23e864325bb941af66522 | [
"MIT"
] | permissive | bwhmather/dirigible-spreadsheet | 0063aba1cec7df1dc4fc0d5dbbcfaeeb1dad932f | ff0414912110553a5d0f317495cdba39a077a044 | refs/heads/master | 2020-12-28T22:53:10.312409 | 2014-09-14T16:15:59 | 2014-09-14T16:15:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | # Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from django.conf.urls.defaults import *
from dirigible.feedback.views import submit
urlpatterns = patterns('',
url(
r'^submit/$',
submit,
name="feedback_submit"
),
)
| [
"hjwp2@cantab.net"
] | hjwp2@cantab.net |
c7c95faecb272485b632e0ee5d8c8ae918aabe5c | a39a8e8d1192ca475c6ed348b79e10108c1fa6ba | /WebApp/admin.py | aa3bf10d1d4fce749276c043cb777099722ae42a | [] | no_license | gitNikhilsahu/SQLiteProject | 2d7bac75456d793e97424ed0a939a4a4b87cf5d0 | bfa49909059789f6e3ba47cd274782de6b6414c5 | refs/heads/master | 2020-09-14T09:44:57.481471 | 2019-11-21T05:21:22 | 2019-11-21T05:21:22 | 223,093,719 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from django.contrib import admin
from WebApp.models import Emp
# class EmpAdmin(admin.ModelAdmin):
# list_display = ['EmpID', 'EmpName', 'EmpSal', 'EmpAdd']
admin.site.register(Emp) | [
"Nikhilsahu.in@gmail.com"
] | Nikhilsahu.in@gmail.com |
5167277aeef9cb935f9bc676a1027f0ba41df2c9 | 8d15427c02f7ea265985159c7bf67df2f9731991 | /ApiTest/case/test_db.py | dd65ca78ebdaac25d0806ad61e9f962a355c61d7 | [] | no_license | yolotester/learngit | aa862b7f0ecbeb5c056b8ca576c8a00efc55f62c | d885b520757097c1d984d1cdda5d242ee5c6a5d6 | refs/heads/master | 2023-04-07T19:48:23.153638 | 2023-03-17T02:03:05 | 2023-03-17T02:03:05 | 248,145,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | '''
目标:在unittest中使用读取数据库封装类
'''
# 导包 unittest read_database
import unittest
from tools.read_database import ReadDB
import logging
logging.basicConfig(level=logging.INFO, format= '%(asctime)s-%(levelname)s-%(name)s-%(message)s')
logger = logging.getLogger(__name__)
# 创建测试类,继承unittese.TestCase
class TestDB(object):
# 新建测试方法
def test_db(self):
# 要执行的sql语句
sql = 'select * from goods'
# 调用数据库封装类的主要方法并接收数据
data = ReadDB().get_sql_one(sql)
# 对结果进行断言
self.assertEqual(1, data[0])
if __name__ == '__main__':
unittest.main()
| [
"yolo@ying31.com"
] | yolo@ying31.com |
b082be539c1caf0875da89e785561da6f00009ef | 25d8bac5635ac1cc3577a3593a4512e042ea7ecd | /scripts/simplehttpserver-example-1.py | d1f9965dbfb74211c5c5666c8a39df5691eef1e4 | [] | no_license | mtslong/demo | 2333fa571d6d9def7bdffc90f7bcb623b15e6e4b | a78b74e0eea7f84df489f5c70969b9b4797a4873 | refs/heads/master | 2020-05-18T18:28:48.237100 | 2013-11-11T16:10:11 | 2013-11-11T16:10:11 | 4,136,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | import SimpleHTTPServer
import SocketServer
# minimal web server. serves files relative to the
# current directory.
PORT = 8000
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
## serving at port 8000
## localhost - - [11/Oct/1999 15:07:44] code 403, message Directory listing not sup
## ported
## localhost - - [11/Oct/1999 15:07:44] "GET / HTTP/1.1" 403 -
## localhost - - [11/Oct/1999 15:07:56] "GET /samples/sample.htm HTTP/1.1" 200 -
| [
"mofeng@netease.com"
] | mofeng@netease.com |
5d23db06cd5064470408bdb43c1240099438ca90 | 2eca1ddd6016499dec09459d9981fdb11ad21ec3 | /doc/vcs.py | 144dfd8fc7abceda8b62b6458f327734b0bf1875 | [
"BSD-2-Clause"
] | permissive | AWhetter/plac | 85eb9a09879a06ae33ba0760d69517f06de61157 | c5e786d08600fe0ac2158bc50a7d83cf25a59c7f | refs/heads/master | 2020-10-02T06:44:54.314142 | 2019-10-27T07:41:16 | 2019-10-27T07:41:16 | 227,724,805 | 0 | 0 | BSD-2-Clause | 2019-12-13T00:52:05 | 2019-12-13T00:52:04 | null | UTF-8 | Python | false | false | 963 | py | "A Fake Version Control System"
import plac # this implementation also works with Python 2.4
commands = 'checkout', 'commit', 'status'
@plac.annotations(url='url of the source code')
def checkout(url):
"A fake checkout command"
return ('checkout ', url)
@plac.annotations(message=('commit message', 'option'))
def commit(message):
"A fake commit command"
return ('commit ', message)
@plac.annotations(quiet=('summary information', 'flag', 'q'))
def status(quiet):
"A fake status command"
return ('status ', quiet)
def __missing__(name):
return ('Command %r does not exist' % name,)
def __exit__(etype, exc, tb):
"Will be called automatically at the end of the intepreter loop"
if etype in (None, GeneratorExit): # success
print('ok')
main = __import__(__name__) # the module imports itself!
if __name__ == '__main__':
import plac
for out in plac.call(main, version='0.1.0'):
print(out)
| [
"michele.simionato@gmail.com"
] | michele.simionato@gmail.com |
8ff7174f22a50a8d9d3d834f9c24f2e797fb4c06 | e0980f704a573894350e285f66f4cf390837238e | /.history/home/models_20201030104943.py | e2b8486c5ac363b0730c2b639a493695b4a0b0c1 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,533 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import StreamField
from wagtail.admin.edit_handlers import FieldPanel, PageChooserPanel, StreamFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.blocks import SnippetChooserBlock
from streams import blocks
new_table_options = {
'minSpareRows': 0,
'startRows': 4,
'startCols': 4,
'colHeaders': False,
'rowHeaders': True,
'contextMenu': [
'row_above',
'row_below',
'---------',
'col_left',
'col_right',
'---------',
'remove_row',
'remove_col',
'---------',
'undo',
'redo'
],
'editor': 'text',
'stretchH': 'all',
'renderer': 'text',
'autoColumnSize': False,
}
class HomePage(Page):
parent_page_types = ['wagtailcore.Page']
subpage_types = ['flex.FlexPage', 'services.Service']
max_count = 1
lead_text = models.CharField(
max_length = 140,
blank = True,
help_text = 'Podtytuł pod tytułem banera'
)
button = models.ForeignKey(
'wagtailcore.Page',
blank = True,
null = True,
related_name = '+',
help_text = 'Wybierz opcjonalną stronę, do której chcesz utworzyć łącze',
on_delete = models.SET_NULL,
)
button_text = models.CharField(
max_length = 50,
default = 'Czytaj więcej',
blank = False,
help_text = 'Przycisk tekstowy'
)
banner_background_image = models.ForeignKey(
'wagtailimages.Image',
blank = False,
null =True,
related_name = '+',
help_text = 'Obraz tła baneru',
on_delete = models.SET_NULL,
)
body = StreamField([
('title', blocks.TitleBlock()),
('cards', blocks.CardsBlock()),
('image_and_text', blocks.ImageAndTextBlock()),
('cta', blocks.CallToActionBlock()),
('testimonial', SnippetChooserBlock(
target_model='testimonials.Testimonial',
template = 'streams/testimonial_block.html'
)),
('pricing_table', blocks.PricingTableBlock(table_options=new_table_options)),
], null=True, blank=True)
content_panels = Page.content_panels + [
FieldPanel('lead_text'),
PageChooserPanel('button'),
FieldPanel('button_text'),
ImageChooserPanel('banner_background_image'),
StreamFieldPanel('body'),
]
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
f714c0d4977db7e7c55232f29b48fe1670ee1204 | f620403443b2c0affaed53505c002f35dc68020c | /Prediction/GetEntropy.py | 8e857882fcbac924009a016effa2b606dbd3965e | [] | no_license | ZhuJiahui/CTMTS | c552b3026deb47879f9aa5bde4b002cf6283858d | 9f8981f6e61900a68a38ae0392e01771beee9651 | refs/heads/master | 2021-01-12T10:18:27.579697 | 2016-12-14T02:23:29 | 2016-12-14T02:23:29 | 76,416,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,562 | py | # -*- coding: utf-8 -*-
'''
Created on 2014年8月3日
@author: ZhuJiahui506
'''
import os
import numpy as np
from TextToolkit import quick_write_list_to_text
import time
def get_topic_entropy(read_directory, write_filename):
file_number = sum([len(files) for root, dirs, files in os.walk(read_directory)])
all_e = []
for i in range(file_number):
PHAI = np.loadtxt(read_directory + '/' + str(i + 1) + '.txt')
#出现单个
if len(PHAI) >= 300:
PHAI = np.array([PHAI])
this_e_list = []
for j in range(len(PHAI)):
temp_e = 0.0
for k in range(len(PHAI[j])):
if PHAI[j][k] > 0.00001:
temp_e += (-1.0 * PHAI[j][k] * np.log2(PHAI[j][k]))
this_e_list.append(temp_e)
all_e.append(str(np.average(this_e_list)))
quick_write_list_to_text(all_e, write_filename)
if __name__ == '__main__':
start = time.clock()
now_directory = os.getcwd()
root_directory = os.path.dirname(now_directory) + '/'
write_directory1 = root_directory + u'dataset/prediction'
write_filename = write_directory1 + u'/topic_entropy.txt'
read_directory = root_directory + u'dataset/DCTM/mctrwctm_ct_word'
if (not(os.path.exists(write_directory1))):
os.mkdir(write_directory1)
get_topic_entropy(read_directory, write_filename)
print 'Total time %f seconds' % (time.clock() - start)
print 'Complete !!!'
| [
"zhujiahui@outlook.com"
] | zhujiahui@outlook.com |
06a5d62253a4c5307e3f07e5b4263d38c33c0137 | 17c7bace346c9f49318becd1f9769c9aee3fa650 | /credit_calc/config.py | cde448593ebe35abcf9a8f3e754ebf53b07607a5 | [] | no_license | KonishchevDmitry/credit-calculator | 789a06bad7dfe27ed5284afcb5d8b620d5ba5ae1 | 4c4dd811416e968eb68bbd490966c285de99c2c5 | refs/heads/master | 2021-01-10T16:46:22.273992 | 2013-11-18T08:17:11 | 2013-11-18T08:17:11 | 51,710,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | from decimal import Decimal, DecimalException
from object_validator import validate
from object_validator import InvalidValueError
from object_validator import String, List, Dict, DictScheme
import python_config
from credit_calc.util import InvalidDateError
from credit_calc.util import get_date
class _Date(String):
def validate(self, obj):
super(_Date, self).validate(obj)
try:
return get_date(obj)
except InvalidDateError:
raise InvalidValueError(obj)
class _Amount(String):
def validate(self, obj):
super(_Amount, self).validate(obj)
try:
amount = Decimal(obj)
except DecimalException:
raise InvalidValueError(obj)
if amount <= 0:
raise InvalidValueError(obj)
return amount
class _Interest(String):
def validate(self, obj):
super(_Interest, self).validate(obj)
try:
interest = Decimal(obj)
except DecimalException:
raise InvalidValueError(obj)
if interest <= 0 or interest >= 100:
raise InvalidValueError(obj)
return interest
def get_credits(config_path):
return _get_config(config_path)["credits"]
def _get_config(config_path):
config = python_config.load(config_path)
try:
return validate("config", config, DictScheme({
"credits": List(DictScheme({
"amount": _Amount(),
"interest": _Interest(),
"start_date": _Date(),
"end_date": _Date(),
"payments": Dict(_Date(), _Amount(), optional=True),
}))
}))
except Exception as e:
raise Exception("Error while parsing '{}' configuration file: {}".format(config_path, e))
| [
"konishchev@gmail.com"
] | konishchev@gmail.com |
f0649e568131fbf007f89191ec6673659501d413 | 328397a6ff6e109069e02fa8fe42255910939cb8 | /venvs/edxapp/lib/python2.7/site-packages/xblock/__init__.py | 27a1cc562b2dbacb168d5c5618273230d192cfb3 | [] | no_license | UOMx/CITeS-VM-edxapp | fd1294367d090314dc46d8517027845fe178a2e7 | de3d1b297fa99d61cf32addb981cdfc55aec9891 | refs/heads/master | 2022-12-03T08:45:26.284451 | 2017-02-13T12:44:17 | 2017-02-13T12:44:17 | 81,821,365 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | """
XBlock Courseware Components
"""
import os
import warnings
import xblock.core
import xblock.fields
class XBlockMixin(xblock.core.XBlockMixin):
"""
A wrapper around xblock.core.XBlockMixin that provides backwards compatibility for the old location.
Deprecated.
"""
def __init__(self, *args, **kwargs):
warnings.warn("Please use xblock.core.XBlockMixin", DeprecationWarning, stacklevel=2)
super(XBlockMixin, self).__init__(*args, **kwargs)
# For backwards compatability, provide the XBlockMixin in xblock.fields
# without causing a circular import
xblock.fields.XBlockMixin = XBlockMixin
VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION.txt')
__version__ = open(VERSION_FILE).read().strip()
| [
"menuka.14@cse.mrt.ac.lk"
] | menuka.14@cse.mrt.ac.lk |
9703e11091f4c627c362b31dd90d4d91c92160f8 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/3/usersdata/107/321/submittedfiles/ex1.py | 652ffcb7e07cb3de6ae9c7173519b76c1e3e40b5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | # -*- coding: utf-8 -*-
from __future__ import division
a=input('digite o valor de a:')
b=input('digite o valor de b:')
c=input('digite o valor de c:')
d=(b**2)-(4*a*c)
if d>=0:
x1=((-b)+(d**1/2))/2*a
x2=((-b)-(d**1/2))/2*a
print x1('%.2f' %x1)
print x2('%.2f' %x2)
else:
print('não existe raízes reais:')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d40b3e7ce7ced0c63212ada2f3f056a7575618c6 | be36a3b4aec92734e7b8562eff411310d4d7ba78 | /core/utils.py | c2dea9e22447aca033fd7a4e75bacc37692c0dcf | [] | no_license | dpitkevics/Jooglin.Crawler | 9d409c7960557dbaaab3e925fc5de0c762185186 | 1a3b7c7fa5a1132413dff1ca73c655caf8e2a637 | refs/heads/master | 2021-01-22T06:54:37.682214 | 2015-04-22T14:51:18 | 2015-04-22T14:51:18 | 34,395,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | import types
import defer
def iterator(var):
if isinstance(var, types.GeneratorType):
return var
if isinstance(var, list) or isinstance(var, tuple):
return iter(var)
return iter((var,))
def isstring(obj):
try:
return isinstance(obj, str)
except NameError:
return isinstance(obj, str)
class DeferredList(defer.Deferred):
def __init__(self, deferredList):
"""Initialize a DeferredList"""
self.result_list = [None] * len(deferredList)
super(DeferredList, self).__init__()
self.finished_count = 0
for index, deferred in enumerate(deferredList):
deferred.add_callbacks(
self._cb_deferred,
self._cb_deferred,
callback_args=(index,),
errback_args=(index,)
)
def _cb_deferred(self, result, index):
"""(internal) Callback for when one of my deferreds fires.
"""
self.result_list[index] = result
self.finished_count += 1
if self.finished_count == len(self.result_list):
self.callback(self.result_list)
return result
| [
"daniels.pitkevics@gmail.com"
] | daniels.pitkevics@gmail.com |
7643eccc000d560f997e82014987c8affdaa2a73 | 7c13186da7fba1e4da5f6a9c85b8ef00b68797e0 | /utils/log.py | 497d7836db9badc3768d5809ec3eee654474f021 | [] | no_license | rohitsuratekar/FeedbackScan | 02d54949c293f1ce956b62b5597697db3bfe07ac | b7598a85cb9c92946edb96713f51baae73b22f0d | refs/heads/master | 2021-05-04T17:52:12.655790 | 2019-11-28T10:43:24 | 2019-11-28T10:43:24 | 120,281,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | """
Copyright © 2017 Rohit Suratekar
Code from this file is released under MIT Licence 2017.
use "Log" for logging information and "OUTPUT" for saving information
"""
import logging
import os
from settings import *
from utils.functions import get_uid
# Creates UID for current job
CURRENT_JOB = get_uid()
if not os.path.exists(OUTPUT_FOLDER):
os.makedirs(OUTPUT_FOLDER)
class AppFilter(logging.Filter):
"""
Adds custom field in log file
"""
def filter(self, record):
record.uid = CURRENT_JOB
return True
LOG = logging.getLogger('log')
LOG.setLevel(logging.INFO)
LOG.addFilter(AppFilter())
if STORE_SCRIPT_LOG:
log_file = logging.FileHandler(
OUTPUT_FOLDER + "/" + NAME_OF_SCRIPT_LOG_FILE)
log_file.setFormatter(
logging.Formatter('%(uid)s %(asctime)s %(filename)s : %(message)s'))
LOG.addHandler(log_file)
if PRINT_TO_CONSOLE:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(uid)s %(asctime)s %(filename)s : %(message)s')
console.setFormatter(formatter)
LOG.addHandler(console)
OUTPUT = logging.getLogger('output')
OUTPUT.setLevel(logging.INFO)
OUTPUT.addFilter(AppFilter())
output_file = logging.FileHandler(OUTPUT_FOLDER + "/" + NAME_OF_OUTPUT_FILE)
output_file.setFormatter(logging.Formatter('%(uid)s: %(message)s'))
OUTPUT.addHandler(output_file)
| [
"rohitsuratekar@gmail.com"
] | rohitsuratekar@gmail.com |
59cc24e786dee4a7056482c5231c640cfdb53a44 | aa853a9094fff4b6e9b0ddc7469be29ad5f0f811 | /poi_sale_discounts/__manifest__.py | 3d2b68437c88ed4c757428677067f4c312115397 | [] | no_license | blue-connect/illuminati | 40a13e1ebeaceee39f17caa360f79e8deeaebf58 | 6682e60630064641474ddb2d8cbc520e30f64832 | refs/heads/master | 2022-01-06T00:55:58.465611 | 2018-11-24T04:30:03 | 2018-11-24T04:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | ##############################################################################
#
# Poiesis Consulting, OpenERP Partner
# Copyright (C) 2013 Poiesis Consulting (<http://www.poiesisconsulting.com>).
# Developed by: Grover Menacho
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Discounts',
'version': '11.0.0.1',
'category': 'Sales',
'summary': 'Descuento por monto y porcentaje',
'description': """
Sale Discounts
===================================
This module adds discounts per amount and percentage to sale orders
""",
'author': 'Poiesis Consulting',
'website': 'http://www.poiesisconsulting.com',
'depends': ['sale'],
'data': [
'views/sale.xml',
],
'installable': True,
'active': False,
'application': True,
# 'certificate': 'certificate',
}
| [
"yori.quisbert@poiesisconsulting.com"
] | yori.quisbert@poiesisconsulting.com |
db9bcc7f0c51bda307511eeacac685e0fe2d1bf7 | 27034876dc372d24d0c659b35e2da394e8d633ca | /tests/test_server/test_base.py | 0b404657eba53018b803f44636f686ccffdf6812 | [
"MIT"
] | permissive | gc-ss/idom | 48458aeb1084889accd8026feb8e58900cff23b4 | bf2a6beb09700ebd81cb569af1ef22fcc46ffcb6 | refs/heads/main | 2023-04-19T13:07:27.925291 | 2021-05-08T18:26:11 | 2021-05-08T18:26:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | import pytest
import sanic
import idom
from idom.server.sanic import PerClientStateServer
from idom.server.utils import find_builtin_server_type
@idom.component
def AnyComponent():
pass
def test_no_application_until_running():
server = find_builtin_server_type("PerClientStateServer")(AnyComponent)
with pytest.raises(RuntimeError, match="No application"):
server.application
def test_cannot_register_app_twice():
server = PerClientStateServer(AnyComponent)
server.register(sanic.Sanic())
with pytest.raises(RuntimeError, match="Already registered"):
server.register(sanic.Sanic())
| [
"ryan.morshead@gmail.com"
] | ryan.morshead@gmail.com |
3c1cd7d7c6d73f40e800dbe2d10e8ddd73c6f16b | 1503cbcd6b50354523e8a720c880de0adb8dd678 | /bot | 9db29a7b760381bb5f060082443a1a84c747f54b | [] | no_license | caphrim007/botcheck | d7c1ef913c1cd38fcf1bee93fa3984a7d681c881 | 1408b1ea9a8ffc1f8c8358df41d077d59b3b9495 | refs/heads/master | 2016-09-11T05:13:00.556531 | 2013-02-05T17:47:41 | 2013-02-05T17:47:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,934 | #! /usr/bin/env python
import os, sys, getopt, glob
import signal
import createDaemon
import botcheck
import botsqlite
from time import sleep, strftime, localtime
# signal handler function
def handle_sig(signum, frame):
"""handle signal"""
global sigterm_received, sighup_received, debug_lines
debug_lines += [
"Signal handler called with signal %s\n" % signum ]
if signum == signal.SIGTERM:
sigterm_received = True
elif signum == signal.SIGHUP:
sighup_received = True
daemon = False
debug = False
server = None
port = None
# main process, parse command arguments
try:
opts, args = getopt.getopt(sys.argv[1:], '', ["daemon","debug","server=","port="] )
except getopt.GetoptError, msg:
sys.exit(1)
for o, a in opts:
if o in ('--daemon'):
daemon = True
if o in ('--debug'):
debug = True
if o in ('--server'):
server = a
if o in ('--port'):
port = int(a)
if daemon:
# go into daemon mode
retCode = createDaemon.createDaemon()
sqldb = botsqlite.botsqlite('bot.db')
nick = sqldb.random_nick()
nick_id = sqldb.get_nick_id(nick)
is_exempt = sqldb.nick_is_exempt(nick)
if (not is_exempt):
sqldb.add_exemption(nick, "all")
debug_lines = []
out_file_name = 'log/botcheck_%s_%s.out' % (strftime('%m-%d_%H:%M', localtime()),server)
# save our pid for signals
pid = os.getpid()
# open a log file for stdout and stderr
log = open(out_file_name, 'w')
if debug:
sys.stdout = log
sys.stderr = log
sys.stdout.flush()
# signal flag
sigterm_received = False
sighup_received = False
signal.signal( signal.SIGTERM, handle_sig )
signal.signal( signal.SIGHUP, handle_sig )
# report file stat results as floats
os.stat_float_times( True )
if server is None and port is None:
print "You must specify both an IRC server and port to connect to"
sys.exit(1)
server_id = sqldb.get_server_id(server,port)
sqldb.add_connection(pid,nick_id,server_id)
bot = botcheck.botcheck(nick, server, port)
bot.start()
| [
"caphrim007@gmail.com"
] | caphrim007@gmail.com | |
71579406432d7fe7285db017b4e9a222908657e8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02646/s083923096.py | 1d20b8ebcdffa787072d93b40af0fe261e1c510f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | A,V = map(int,input().split())
B,W = map(int,input().split())
T = int(input())
dist = abs(A-B)
diff = V-W
if dist > diff * T:
print("NO")
else:
print("YES") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4aa330b332c7e104942f09ca0c4c39a76b550364 | cda52154e416a8d9d629221391609c3b84e408fd | /bairanalysis/workflows/temporalpatterns.py | 31d147c56633d079e42d1315049cf31f9bffb8f1 | [] | no_license | gpiantoni/bairanalysis | b7b72bf696389a08dc0d4e03d2bd41917cb5e641 | fd22be962eb863384017e98fc6b5ac9a3e527971 | refs/heads/master | 2022-12-09T01:01:06.864491 | 2020-08-27T14:13:27 | 2020-08-27T14:13:27 | 183,632,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,467 | py | from nipype import MapNode, Node, Workflow
from nipype.interfaces.utility import IdentityInterface
from nipype.interfaces.fsl import FLIRT
from .preproc.preproc import create_workflow_preproc_spm
from .preproc.mri_realign import create_workflow_coreg_epi2t1w
from .glm.temporalpatterns import (
create_workflow_temporalpatterns_fsl,
)
input_node.inputs.t1w = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/anat/sub-beilen_ses-UMCU7Tdaym13_acq-wholebrain_T1w.nii')
input_node.inputs.t2star_fov = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/T2star/sub-beilen_ses-UMCU7Tdaym13_acq-visualcortex_T2star.nii.gz')
input_node.inputs.t2star_whole = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/T2star/sub-beilen_ses-UMCU7Tdaym13_acq-wholebrain_T2star.nii.gz')
input_node.inputs.bold = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/func/sub-beilen_ses-UMCU7Tdaym13_task-bairtemporalpattern_run-1_bold.nii')
input_node.inputs.events = str(
SUBJECTS_DIR
/ 'sub-beilen/ses-UMCU7Tdaym13/func/sub-beilen_ses-UMCU7Tdaym13_task-bairtemporalpattern_run-1_events.tsv')
def create_workflow_temporalpatterns_7T(subjects, runs):
input_node = Node(IdentityInterface(fields=[
'bold',
'events',
't2star_fov',
't2star_whole',
't1w',
]), name='input')
coreg_tstat = MapNode(
interface=FLIRT(), name='realign_result_to_anat',
iterfield=['in_file', ])
coreg_tstat.inputs.apply_xfm = True
w = Workflow('temporalpatterns_7T')
w_preproc = create_workflow_preproc_spm()
w_spatialobject = create_workflow_temporalpatterns_fsl()
w_coreg = create_workflow_coreg_epi2t1w()
w.connect(input_node, 'bold', w_preproc, 'input.bold')
w.connect(input_node, 'events', w_spatialobject, 'input.events')
w.connect(input_node, 't2star_fov', w_coreg, 'input.t2star_fov')
w.connect(input_node, 't2star_whole', w_coreg, 'input.t2star_whole')
w.connect(input_node, 't1w', w_coreg, 'input.t1w')
w.connect(input_node, 't1w', coreg_tstat, 'reference')
w.connect(w_preproc, 'realign.realigned_files', w_spatialobject, 'input.bold')
w.connect(w_preproc, 'realign.mean_image', w_coreg, 'input.bold_mean')
w.connect(w_spatialobject, 'output.T_image', coreg_tstat, 'in_file')
w.connect(w_coreg, 'output.mat_epi2t1w', coreg_tstat, 'in_matrix_file')
return w
| [
"github@gpiantoni.com"
] | github@gpiantoni.com |
d5a861fcdbbe4ab79ae9c3c44757e51ad51d78aa | cbd240e3b02113707e9dc9e7b6e4f956ecaf1ea8 | /mains/main_dgm_dessins_auxiliary_convnet_convvae.py | 60f7350a509f9c2a7bfcbb232acdba2ec3f4565c | [] | no_license | spell00/NeuralNetworksZoo | 3feb100886c22ecc8ff45987c68b40b56096434f | 2502dc3f835afd2111339800e5a8ae42f60e7359 | refs/heads/master | 2020-05-19T10:17:43.829983 | 2019-05-18T17:45:25 | 2019-05-18T17:45:25 | 184,967,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,784 | py | from numpy import genfromtxt
from torchvision import transforms, datasets
import torch
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
def __main__():
from data_preparation.GeoParser import GeoParser
from dimension_reduction.ordination import ordination2d
from sklearn.decomposition import PCA
from IPython.display import Image
import pandas as pd
import numpy as np
from models.semi_supervised.deep_generative_models.models.auxiliary_dgm import AuxiliaryDeepGenerativeModel
from utils.utils import dict_of_int_highest_elements, plot_evaluation
# files_destinations
home_path = "/home/simon/"
destination_folder = "annleukemia"
data_folder = "data"
results_folder = "results"
meta_destination_folder = "pandas_meta_df"
plots_folder_path = "/".join([home_path, destination_folder, results_folder, "plots/"])
#dataset_name = "gse33000_and_GSE24335_GSE44768_GSE44771_GSE44770"
dataset_name = "dessins"
activation = "relu"
#nrep = 3
betas=(0.9, 0.999)
vae_flavour = "o-sylvester"
early_stopping = 200
labels_per_class = 10000
n_epochs = 1000
warmup = 100
gt_input = 10000
# if ladder is yes builds a ladder vae. Do not combine with auxiliary (yet; might be possible and relatively
# not too hard to implement, but might be overkill. Might be interesting too)
translate = "n"
# Types of deep generative model
# Convolution neural network (convolutional VAE and convolutional classifier)
use_conv_ae = True #Not applicable if not sequence (images, videos, sentences, DNA...)
use_convnet = True
# Ladder VAE (L-VAE)
ladder = False
# Auxiliary Variational Auto-Encoder (A-VAE)
auxiliary = True
# Load pre-computed vae (unsupervised learning)
load_vae = False
lr = 1e-3
l1 = 0.
l2 = 0.
batch_size = 16
mc = 1 # seems to be a problem when mc > 1 for display only, results seem good
iw = 1 # seems to be a problem when iw > 1 for display only, results seem good
# Neurons layers
a_dim = 20
h_dims_classifier = [256]
h_dims = [256]
z_dims = [20]
# number of flows
number_of_flows = 3
num_elements = 1
# Files destinations
load_from_disk = True
load_merge = False
home_path = "/home/simon/"
destination_folder = "annleukemia"
data_folder = "data"
results_folder = "results"
meta_destination_folder = "pandas_meta_df"
plots_folder_path = "/".join([home_path, destination_folder,
results_folder, "plots/"])
dgm = AuxiliaryDeepGenerativeModel(vae_flavour, z_dims, h_dims, n_flows=number_of_flows, a_dim=a_dim,
num_elements=num_elements, is_hebb_layers=True,
gt_input=gt_input)
dgm.set_configs(home_path=home_path, results_folder=results_folder, data_folder=data_folder,
destination_folder=destination_folder, dataset_name=dataset_name, lr=lr,
meta_destination_folder="meta_pandas_dataframes", csv_filename="csv_loggers",
is_unlabelled=True)
dgm.load_local_dataset(root_train="/home/simon/annleukemia/data/kaggle_dessins/train",
root_valid="/home/simon/annleukemia/data/kaggle_dessins/valid",
root_test="/home/simon/annleukemia/data/kaggle_dessins/test", n_classes=31,
batch_size=batch_size, labels_per_class=labels_per_class,
extra_class=True, unlabelled_train_ds=True, normalize=True, mu=0.5, var=0.5)
is_example = False
# GET ordination from this!
train = np.vstack([x[0].data.numpy() for x in dgm.x_train])
# unlabelled_train = np.vstack([x[0].data.numpy() for x in dgm.unlabelled_x_train])
targets = np.vstack([x[1].data.numpy() for x in dgm.x_train])
labels = [x.tolist().index(1) for x in targets]
dgm.define_configurations(early_stopping=early_stopping, warmup=warmup, flavour=vae_flavour)
dgm.set_data(labels_per_class=labels_per_class, is_example=True, extra_class=True)
planes_classifier = [1, 8, 16, 32, 64, 128, 256]
classifier_kernels = [3, 3, 3, 3, 3, 3, 3]
classifier_pooling_layers = [True, True, True, True, True, True, False, False]
planes_ae = [1, 8, 16, 32, 64, 128, 256]
kernels_ae = [3, 3, 3, 3, 3, 3, 3]
padding_ae = [1, 1, 1, 1, 1, 1]
pooling_layers_ae = [1, 1, 1, 1, 1, 1]
dgm.set_conv_adgm_layers(h_dims=h_dims_classifier, input_shape=[1, 100, 100], hs_ae=h_dims,
use_conv_classifier=use_convnet, planes_classifier=planes_classifier,
classifier_kernels=classifier_kernels, classifier_pooling_layers=classifier_pooling_layers,
planes_ae=planes_ae, padding_ae=padding_ae, pooling_layers_ae=pooling_layers_ae,
kernels_ae=kernels_ae)
#dgm.set_dgm_layers()
# import the M1 in the M1+M2 model (Kingma et al, 2014). Not sure if it still works...
if load_vae:
print("Importing the model: ", dgm.model_file_name)
if use_conv_ae:
dgm.import_cvae()
else:
dgm.load_model()
# dgm.set_dgm_layers_pretrained()
dgm.cuda()
# dgm.vae.generate_random(False, batch_size, z1_size, [1, 28, 28])
dgm.run(n_epochs, auxiliary, mc, iw, lambda1=l1, lambda2=l2, verbose=1,
show_progress=10, show_pca_train=10, show_lda_train=10, show_pca_generated=10, clip_grad=0.1,
is_input_pruning=False, start_pruning=10000, show_lda_generated=10, warmup_n=-1, alpha_rate=0.1, t_max=.1)
if __name__ == "__main__":
__main__()
| [
"simonjpelletier@gmail.com"
] | simonjpelletier@gmail.com |
07317e3c804cedcc539ee97d5e4c6b5126832b5f | c97b9ae1bf06757ba61f90905e4d9b9dd6498700 | /venv/Lib/site-packages/tensorflow/python/keras/api/_v1/keras/preprocessing/__init__.py | 15ae238ba1d8576e0c3cd99c440aa03f8374c1f0 | [] | no_license | Rahulk1p/image-processor | f7ceee2e3f66d10b2889b937cdfd66a118df8b5d | 385f172f7444bdbf361901108552a54979318a2d | refs/heads/main | 2023-03-27T10:09:46.080935 | 2021-03-16T13:04:02 | 2021-03-16T13:04:02 | 348,115,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:3fa8cd64112c86a97ce1ef382a86af5034fac9d3e73f7de7c6fc2ce0c50db3e3
size 670
| [
"rksc.k1p@gmail.com"
] | rksc.k1p@gmail.com |
137aee76bca4f5d313b8daa2a18995728dc70147 | 644bcdabf35261e07c2abed75986d70f736cb414 | /python-project/Defis/Euler_84_Test.py | 34fe0128eb2eacdea1c5bbf7d41d1defe839bb75 | [] | no_license | matcianfa/playground-X1rXTswJ | f967ab2c2cf3905becafb6d77e89a31414d014de | 67859b496e407200afb2b1d2b32bba5ed0fcc3f0 | refs/heads/master | 2023-04-03T11:56:15.878757 | 2023-03-24T15:52:37 | 2023-03-24T15:52:37 | 122,226,979 | 5 | 20 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | #Ne pas oublier de changer le module à importer
module="Defis/Euler_84"
import sys
import io
#On récupère les données de l'utilisateur
sauvegarde_stdout=sys.stdout
sys.stdout=io.StringIO()
from Euler_84 import *
count1 = sys.stdout.getvalue()[:-1]
sys.stdout=sauvegarde_stdout
from ma_bao import *
#La réponse
reponse=101524
#message d'aide si besoin
help="N'oublie pas d'utiliser print pour afficher le resultat"
def send_msg(channel, msg):
print("TECHIO> message --channel \"{}\" \"{}\"".format(channel, msg))
def success():
send_msg("Tests validés","Bravo !")
afficher_correction(module)
print("TECHIO> success true")
def fail():
print("TECHIO> success false")
def test():
try:
assert str(count1) == str(reponse), "Le résultat obtenu est {} mais ce n'est pas le bon.".format(str(count1))
send_msg("Tests validés","Le résultat cherché est bien {}".format(str(count1)))
success()
except AssertionError as e:
fail()
send_msg("Oops! ", e)
if help:
send_msg("Aide 💡", help)
if __name__ == "__main__": test()
| [
"noreply@github.com"
] | matcianfa.noreply@github.com |
28274fb58547ee1ebcf33d9a281abab79d4ab29a | e7280a7cd9e6a03e5ef129e8f82167e6213eef9c | /website/webapps/django/myproject/myproject/sitemaps.py | 0e164fc2f818121b1e613a6b225c193f6cdbc9a8 | [
"MIT"
] | permissive | jensontham/pythonsingapore | da7b872880e7a9a3805ca4991bce889b558cf9d0 | 8a64afcf3e17d5d42e494f6f67b83f93861a63e4 | refs/heads/master | 2020-12-25T05:29:04.474391 | 2012-11-12T12:06:57 | 2012-11-12T12:06:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | """Sitemaps that are global to the project (i.e. the django-cms pages)."""
from django.contrib.sitemaps import Sitemap
from cms.utils.moderator import get_page_queryset
from cmsplugin_blog.models import EntryTitle
class BlogSitemap(Sitemap):
changefreq = "weekly"
priority = 0.7
def items(self):
return EntryTitle.objects.filter(entry__is_published=True)
def lastmod(self, obj):
return obj.entry.pub_date
def location(self, obj):
location = obj.get_absolute_url()
return location
class PagesSitemap(Sitemap):
changefreq = "weekly"
priority = 0.4
def items(self):
page_queryset = get_page_queryset(None)
all_pages = page_queryset.published().filter(login_required=False)
return all_pages
def lastmod(self, page):
pass
def location(self, obj):
location = obj.get_absolute_url()
return location
| [
"mbrochh@gmail.com"
] | mbrochh@gmail.com |
21d450387a2b29adafb5273f746a5d0cd2bc4a9c | 7437ad1203ff272a482e4a7c7266afdbc7a0e619 | /lra/models/gpu_16g/linear_transformer_exp_convspe_k128_shr/listops/r3/config.py | 7cccd5f68123865cd3aeeef76d176371cd488305 | [] | no_license | maximzubkov/spe | 4ccc59d538a2cb4e5f9b0118ef79933eed0b8d95 | d877feb0f6b935152e5431ce374606ba72c08d65 | refs/heads/main | 2023-08-23T02:08:14.253693 | 2021-10-05T17:25:36 | 2021-10-05T17:25:36 | 385,636,912 | 0 | 0 | null | 2021-10-05T17:25:37 | 2021-07-13T14:42:19 | Jupyter Notebook | UTF-8 | Python | false | false | 1,739 | py | # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
import functools
from fast_self_attention import fast_self_attention as favor
import jax
import jax_spe as spe
from lra_benchmarks.models.layers.spe import make_spe_transform_fn
from lra_benchmarks.listops.configs import base_listops_config
def get_config():
"""Get the default hyperparameter configuration."""
config = base_listops_config.get_config()
config.random_seed = 2
config.model_type = "transformer"
config.attention_fn = favor.make_fast_generalized_attention(
qkv_dim=config.qkv_dim // config.num_heads,
features_type='deterministic',
kernel_fn=jax.lax.exp,
lax_scan_unroll=16)
config.model_kwargs = dict(
add_pos_emb=False,
qk_transform_fn_factory=functools.partial(
make_spe_transform_fn,
spe_cls=spe.ConvSPE,
spe_kwargs=dict(
num_realizations=64,
kernel_size=128
),
shared=True
)
)
config.batch_size = 8
config.learning_rate = config.learning_rate / 32 * 8
config.num_train_steps = 10000
config.eval_frequency = config.eval_frequency * 4
return config
def get_hyper(hyper):
return hyper.product([])
| [
"zubkov.md@phystech.edu"
] | zubkov.md@phystech.edu |
55c3bc5b3dec8357a238c4cd264073832ffac18c | 492693d325dad3adcb09601c54a5b7b0d00cfdef | /drf_admin/apps/system/views/users.py | 0927063cdb15a3fb458c58aa591fd13a7613fb6a | [
"MIT"
] | permissive | HHHyuming/drf_admin | c682e7c284a9747175a81833aacb5e3fc67a2e42 | 956ab1a96964a8af06b0697e228a3d4238dce109 | refs/heads/master | 2023-03-19T23:52:06.521389 | 2021-03-10T15:28:50 | 2021-03-10T15:28:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,152 | py | # -*- coding: utf-8 -*-
"""
@author : Wang Meng
@github : https://github.com/tianpangji
@software : PyCharm
@file : users.py
@create : 2020/6/27 17:55
"""
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import mixins
from rest_framework.exceptions import ValidationError
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from drf_admin.apps.system.serializers.users import UsersSerializer, UsersPartialSerializer, ResetPasswordSerializer
from drf_admin.utils.views import AdminViewSet
from oauth.models import Users
from system.filters.users import UsersFilter
from system.models import Permissions
class UsersViewSet(AdminViewSet):
"""
create:
用户--新增
用户新增, status: 201(成功), return: 新增用户信息
destroy:
用户--删除
用户删除, status: 204(成功), return: None
multiple_delete:
用户--批量删除
用户批量删除, status: 204(成功), return: None
update:
用户--修改
用户修改, status: 200(成功), return: 修改后的用户信息
partial_update:
用户--局部修改
用户局部修改(激活/锁定), status: 200(成功), return: 修改后的用户信息
list:
用户--获取列表
用户列表信息, status: 200(成功), return: 用户信息列表
retrieve:
用户--详情
用户详情信息, status: 200(成功), return: 单个用户信息详情
"""
queryset = Users.objects.all()
serializer_class = UsersSerializer
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_class = UsersFilter
search_fields = ('username', 'name', 'mobile', 'email')
ordering_fields = ('id',)
def get_serializer_class(self):
if self.action == 'partial_update':
return UsersPartialSerializer
else:
return UsersSerializer
class ResetPasswordAPIView(mixins.UpdateModelMixin, GenericAPIView):
"""
patch:
用户--重置密码
用户重置密码, status: 200(成功), return: None
"""
queryset = Users.objects.all()
serializer_class = ResetPasswordSerializer
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
class PermissionsAPIView(APIView):
"""
get:
用户--获取用户拥有权限ID列表
获取用户拥有权限ID列表, status: 200(成功), return: 用户拥有权限ID列表
"""
def get(self, request, pk):
try:
user = Users.objects.get(id=pk)
except Users.DoesNotExist:
raise ValidationError('无效的用户ID')
# admin角色
if 'admin' in user.roles.values_list('name', flat=True) or user.is_superuser:
return Response(data={'results': Permissions.objects.values_list('id', flat=True)})
# 其他角色
return Response(data={'results': list(filter(None, set(user.roles.values_list('permissions__id', flat=True))))})
| [
"921781999@qq.com"
] | 921781999@qq.com |
3ce218574fdef85fc097503d423a3aab88bbc7ad | 8e25365117fdeb2e27a8d8207c6d150f839a57b0 | /sample_function.py | 2548d85a249f73d27f77b3891cc5cfccaa6739a9 | [] | no_license | rajagennu/python_hackerrank | 5feae3ee67f5f03613f365c788c1881b8ce6b691 | 863522cfa5365418019331e5042276cfbfaf4783 | refs/heads/master | 2022-11-18T13:23:20.165402 | 2020-07-18T16:08:20 | 2020-07-18T16:08:20 | 266,466,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from random import randint
def padding(modifier):
random_numer = randint(1, 10)
return random_numer + modifier
print(padding(5))
| [
"replituser@example.com"
] | replituser@example.com |
8eec87f0ec970c1c4218ea275e77e39df6aaba19 | 50914176887f9f21a3489a9407195ba14831354c | /guess_number_higher_or_lower.py | 526240182e29a6ff0eabd433fc3fab55af78b4ad | [] | no_license | nkukarl/leetcode | e8cfc2a31e64b68222ad7af631277f1f66d277bc | b1dbe37e8ca1c88714f91643085625ccced76e07 | refs/heads/master | 2021-01-10T05:42:04.022807 | 2018-02-24T03:55:24 | 2018-02-24T03:55:24 | 43,725,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | def guess(num):
N = 2
if N == num:
return 0
if num > N:
return -1
return 1
class Solution(object):
def guess_number(self, n):
low, high = 1, n
while low < high:
mid = (low + high) // 2
resp = guess(mid)
if 0 == resp:
return mid
if -1 == resp:
high = mid
else:
low = mid + 1
return high
| [
"kai.wang.nankai@gmail.com"
] | kai.wang.nankai@gmail.com |
d232aebeb075ae5e26d1a59f961d2a76c9897602 | 57eff74aacf082ef132d6944c0e4cd78fe6d29d7 | /tests/integration/templatetags/test_polls_tags.py | 960743983daa31ab044bb72c9558b8b0d0d67a89 | [
"BSD-3-Clause"
] | permissive | bitbike/django-machina | a3a850db5996907a38b48020b60e404b6f94ff59 | 0b772d5d3e107f41e901e4b488685ac179a10648 | refs/heads/master | 2020-04-03T04:17:30.900186 | 2018-10-24T02:47:33 | 2018-10-24T02:47:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,407 | py | import pytest
from django.contrib.auth.models import AnonymousUser
from django.template import Context
from django.template.base import Template
from django.test.client import RequestFactory
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.factories import GroupFactory
from machina.test.factories import PostFactory
from machina.test.factories import TopicPollFactory
from machina.test.factories import TopicPollOptionFactory
from machina.test.factories import TopicPollVoteFactory
from machina.test.factories import UserFactory
from machina.test.factories import create_category_forum
from machina.test.factories import create_forum
from machina.test.factories import create_topic
Forum = get_model('forum', 'Forum')
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
assign_perm = get_class('forum_permission.shortcuts', 'assign_perm')
@pytest.mark.django_db
class BasePollsTagsTestCase(object):
@pytest.fixture(autouse=True)
def setup(self):
self.loadstatement = '{% load forum_polls_tags %}'
self.request_factory = RequestFactory()
self.g1 = GroupFactory.create()
self.u1 = UserFactory.create()
self.u2 = UserFactory.create()
self.u1.groups.add(self.g1)
self.u2.groups.add(self.g1)
self.moderators = GroupFactory.create()
self.moderator = UserFactory.create()
self.moderator.groups.add(self.moderators)
self.superuser = UserFactory.create(is_superuser=True)
# Permission handler
self.perm_handler = PermissionHandler()
# Set up a top-level category
self.top_level_cat = create_category_forum()
# Set up some forums
self.forum_1 = create_forum(parent=self.top_level_cat)
self.forum_2 = create_forum(parent=self.top_level_cat)
# Set up some topics and posts
self.forum_1_topic = create_topic(forum=self.forum_1, poster=self.u1)
self.forum_2_topic = create_topic(forum=self.forum_2, poster=self.u2)
self.post_1 = PostFactory.create(topic=self.forum_1_topic, poster=self.u1)
self.post_2 = PostFactory.create(topic=self.forum_2_topic, poster=self.u2)
self.poll_1 = TopicPollFactory.create(topic=self.forum_1_topic)
self.poll_2 = TopicPollFactory.create(topic=self.forum_2_topic)
# Assign some permissions
assign_perm('can_see_forum', self.g1, self.forum_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
assign_perm('can_edit_own_posts', self.g1, self.forum_1)
assign_perm('can_delete_own_posts', self.g1, self.forum_1)
assign_perm('can_reply_to_topics', self.g1, self.forum_1)
assign_perm('can_see_forum', self.moderators, self.forum_1)
assign_perm('can_read_forum', self.moderators, self.forum_1)
assign_perm('can_edit_own_posts', self.moderators, self.forum_1)
assign_perm('can_delete_own_posts', self.moderators, self.forum_1)
assign_perm('can_edit_posts', self.moderators, self.forum_1)
assign_perm('can_delete_posts', self.moderators, self.forum_1)
assign_perm('can_vote_in_polls', self.g1, self.forum_1)
class TestHasBeenCompletedByTag(BasePollsTagsTestCase):
def test_can_tell_if_an_authenticated_user_has_already_voted_in_a_poll(self):
# Setup
def get_rendered(poll, user):
request = self.request_factory.get('/')
request.user = user
t = Template(
self.loadstatement + '{% if poll|has_been_completed_by:request.user %}HAS_VOTED'
'{% else %}HAS_NOT_VOTED{% endif %}')
c = Context({'poll': poll, 'request': request})
rendered = t.render(c)
return rendered
# Setup
poll_option_1 = TopicPollOptionFactory.create(poll=self.poll_1)
TopicPollOptionFactory.create(poll=self.poll_1)
TopicPollVoteFactory.create(poll_option=poll_option_1, voter=self.u1)
# Run & check
assert get_rendered(self.poll_1, self.u1) == 'HAS_VOTED'
assert get_rendered(self.poll_2, self.u1) == 'HAS_NOT_VOTED'
def test_can_if_an_anonymous_user_has_already_voted_in_a_poll(self):
# Setup
def get_rendered(poll, user):
request = self.request_factory.get('/')
request.user = user
t = Template(self.loadstatement + '{% if poll|has_been_completed_by:request.user %}'
'HAS_VOTED{% else %}HAS_NOT_VOTED{% endif %}')
c = Context({'poll': poll, 'request': request})
rendered = t.render(c)
return rendered
u2 = AnonymousUser()
u2.forum_key = 'dummy'
u3 = AnonymousUser()
poll_option_1 = TopicPollOptionFactory.create(poll=self.poll_1)
TopicPollOptionFactory.create(poll=self.poll_1)
TopicPollVoteFactory.create(poll_option=poll_option_1, anonymous_key='dummy')
# Run & check
assert get_rendered(self.poll_1, u2) == 'HAS_VOTED'
assert get_rendered(self.poll_2, u2) == 'HAS_NOT_VOTED'
assert get_rendered(self.poll_2, u3) == 'HAS_NOT_VOTED'
assert get_rendered(self.poll_2, u3) == 'HAS_NOT_VOTED'
| [
"morgan.aubert@zoho.com"
] | morgan.aubert@zoho.com |
d7d0d39eadd20554ddc16e5bd2fc3b0ce398ec14 | eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e | /atcoder/aising2020/c.py | 8197aed672f741ca2fbecff7d9be69c326c2c4ba | [] | no_license | reo11/AtCoder | 4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9 | 69c6d67f05cb9190d8fb07204488cd7ce4d0bed2 | refs/heads/master | 2023-08-28T10:54:50.859288 | 2023-08-22T18:52:47 | 2023-08-22T18:52:47 | 162,085,118 | 4 | 0 | null | 2023-07-01T14:17:28 | 2018-12-17T06:31:10 | Python | UTF-8 | Python | false | false | 340 | py | n = int(input())
ans = []
f = [0 for _ in range(10 ** 4 + 1)]
for x in range(1, 101):
for y in range(1, 101):
for z in range(1, 101):
tmp = x ** 2 + y ** 2 + z ** 2 + x * y + y * z + z * x
if tmp <= n:
f[tmp] += 1
for i in range(1, n + 1):
ans.append(str(f[i]))
print("\n".join(ans))
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
5d2768e0798d279f7587d06124782c383d8457ef | 9c8abb2013189633914073a668495122db0f77c1 | /settings.py | 37b1cc3ceb02f7fd5e1719a71c33eb74aed298a6 | [] | no_license | AshkenSC/Python-Space-Invaders | 160a63e88a74f0b21ea8a234c556c72feab75a54 | 84d7447b04065dd13b048d4f3583b81a31fbe8ea | refs/heads/master | 2020-04-15T10:17:16.399152 | 2019-01-30T15:27:20 | 2019-01-30T15:27:20 | 164,589,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,643 | py | class Settings():
'''the class to store all the settings of Space Invaders'''
def __init__(self):
'''initiate STATIC game settings'''
# screen settings
self.screen_width = 800
self.screen_height = 600
self.bg_color = (230, 230, 230)
# player ship settings
self.ship_speed_factor = 2.0
self.ship_limit = 3
# bullet settings
self.bullet_speed_factor = 2.5
self.bullet_width = 200
self.bullet_height = 18
self.bullet_color = 255, 0, 0
self.bullets_allowed = 5
# alien settings
self.fleet_drop_speed = 100
# game DIFFICULTY speed-up scale
self.speedup_scale = 1.1
# alien POINTS speed-up scale
# alien pts should increase along with difficulty variety
self.score_scale = 1.5
# call dynamic settings method
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
'''initialize DYNAMIC game settings'''
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 3
self.alien_speed_factor = 1
# fleet_direction = 1: move towards right
# fleet_direction = -1: move towards left
self.fleet_direction = 1
# SCORE record
self.alien_points = 50
def increase_speed(self):
'''SPEED increase and alien SCORE settings'''
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
self.alien_points = int(self.alien_points * self.score_scale) | [
"393940378@qq.com"
] | 393940378@qq.com |
7020eb68faae0860c627ff2294f7634b5de374a3 | dfa8337a94c6cdc8347589a8daf75accf556dd49 | /.local/bin/pylint | 7e1094266bdd2ee60efac6f0cdfe474d15bf1839 | [] | no_license | momentum-morehouse/django-uptact-GabeJunior-1196 | 2689fb43fd36ab676d78fbbbbd7dcb758ef6a98e | 84964e3b9f98d3fe6642c5918ec9f506bb6dcf06 | refs/heads/master | 2022-11-14T18:26:57.941198 | 2020-07-12T22:09:20 | 2020-07-12T22:09:20 | 277,572,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | #!/opt/virtualenvs/python3/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
| [
"replituser@example.com"
] | replituser@example.com | |
2a6375e0b917dbc1b0e991dbae5f952e1487d58f | 50b2a866c65e925dde2802ec1541885a7b0edb64 | /src/server_config.py | f43169e8ef471e0f0bc2428f2849a184726f1c90 | [] | no_license | sandumjacob/Dynamic-NIDS-Evaluation-Utility | 68aacc97f120bdc04abd37c3ea8c055923509cf1 | a9f6b286c435a39eb8a452a1b360e36b822a0f7a | refs/heads/master | 2021-03-12T06:47:46.154350 | 2020-03-12T19:59:55 | 2020-03-12T19:59:55 | 246,598,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | SERVER_IP = '127.0.0.1'
# SERVER_IP = '10.0.0.8'
SERVER_PORT = 5000
SERVER_BUFFER_SIZE = 1024
# How many clients to wait for connection from
CLIENT_COUNT = 1
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
8cd208cef461d1bf7645a4c31f9e5b7b4541aa5a | 250962c80383ecf9c2f94e2874c1e1f961f6a181 | /escpos/constants.py | a239e30a932526ceba5155148dd630c29d55b29d | [
"Apache-2.0"
] | permissive | kmee/pyescpos | aa59d6f2d1a9c99d3d9a55da1e1c543c49105da3 | b0a0040cd770c1658258a870caca1a33ff010460 | refs/heads/master | 2023-07-07T09:02:41.581454 | 2022-04-14T19:05:17 | 2022-04-14T19:05:17 | 106,326,996 | 0 | 1 | Apache-2.0 | 2022-04-20T18:29:21 | 2017-10-09T19:34:38 | Python | UTF-8 | Python | false | false | 1,339 | py | # -*- coding: utf-8 -*-
#
# escpos/constants.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
DEFAULT_ENCODING = 'utf-8'
DEFAULT_ENCODING_ERRORS = 'strict'
CASHDRAWER_DEFAULT_DURATION = 200
"""Duration for cash drawer activation (kick) in milliseconds.
See :meth:`~escpos.impl.epson.GenericESCPOS.kick_drawer` method for details.
"""
BACKOFF_DEFAULT_MAXTRIES = 3
"""Number of tries before give up. See :func:`escpos.retry.backoff`"""
BACKOFF_DEFAULT_DELAY = 3
"""Delay between retries (in seconds). See :func:`escpos.retry.backoff`"""
BACKOFF_DEFAULT_FACTOR = 2
"""Multiply factor in which delay will be increased for the next retry.
See :func:`escpos.retry.backoff`.
"""
| [
"daniel@base4.com.br"
] | daniel@base4.com.br |
e1bfe9e9f183400da3f8fca486753fb5c5098df0 | afe4f8281c1b80621c8b682d22aed6bfc8d15fc7 | /blogaccountsite/wsgi.py | ec4aecfe41d47ac1b3b5ed242f544180a30a2e7d | [] | no_license | mahidul-islam/oj | 9242f1cd2ce9a2520a2c63321be7728bb33d756c | fab39fa4bac6cb8ad6d72cb139a22afa75b1fd56 | refs/heads/master | 2022-12-12T07:16:03.097332 | 2018-07-04T13:57:36 | 2018-07-04T13:57:36 | 139,730,070 | 1 | 0 | null | 2021-09-07T23:56:02 | 2018-07-04T14:04:45 | CSS | UTF-8 | Python | false | false | 499 | py | """
WSGI config for blogaccountsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from whitenoise.django import DjangoWhiteNoise
from django.core.wsgi import get_wsgi_application
application = DjangoWhiteNoise(application)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blogaccountsite.settings")
application = get_wsgi_application()
| [
"mizihan84@gmail.com"
] | mizihan84@gmail.com |
dc22848f4ebe315f336530a4cd8573b1d181df82 | 39b9ae78f0bfb17fbdc8bbfa604f856e753154d3 | /src/aac_datasets/utils/collate.py | f7f3a8939d50e9fff207e1268e054f28361adb27 | [
"MIT"
] | permissive | Labbeti/aac-datasets | 9de00690dcf95fcdc39ec4c2f82b598576fa4bbb | 52133540542c3b1b1fba9546e795a96622979056 | refs/heads/main | 2023-08-31T00:17:24.325928 | 2023-05-11T08:54:25 | 2023-05-11T08:54:25 | 493,979,158 | 46 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,336 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Any, Dict, List, TypeVar, Union
import torch
from torch import Tensor
from torch.nn import functional as F
T = TypeVar("T")
class BasicCollate:
"""Collate object for :class:`~torch.utils.data.dataloader.DataLoader`.
Merge lists in dicts into a single dict of lists. No padding is applied.
"""
def __call__(self, batch_lst: List[Dict[str, Any]]) -> Dict[str, List[Any]]:
return list_dict_to_dict_list(batch_lst)
class AdvancedCollate:
"""Advanced collate object for :class:`~torch.utils.data.dataloader.DataLoader`.
Merge lists in dicts into a single dict of lists.
Audio will be padded if a fill value is given in `__init__`.
.. code-block:: python
:caption: Example
>>> collate = AdvancedCollate({"audio": 0.0})
>>> loader = DataLoader(..., collate_fn=collate)
>>> next(iter(loader))
... {"audio": tensor([[...]]), ...}
"""
def __init__(self, fill_values: Dict[str, Union[float, int]]) -> None:
super().__init__()
self.fill_values = fill_values
def __call__(self, batch_lst: List[Dict[str, Any]]) -> Dict[str, Any]:
batch_dic: Dict[str, Any] = list_dict_to_dict_list(batch_lst)
keys = list(batch_dic.keys())
for key in keys:
values = batch_dic[key]
if len(values) == 0:
if key in self.fill_values:
values = torch.as_tensor(values)
batch_dic[key] = values
continue
if key in self.fill_values:
values = list(map(torch.as_tensor, values))
else:
are_tensors = [isinstance(value, Tensor) for value in values]
if not all(are_tensors):
batch_dic[key] = values
continue
are_stackables = [value.shape == values[0].shape for value in values]
if all(are_stackables):
values = torch.stack(values)
batch_dic[key] = values
continue
if key in self.fill_values:
are_paddable = [
value.ndim > 0 and value.shape[:-1] == values[0].shape[:-1]
for value in values
]
if all(are_paddable):
target_length = max(audio_i.shape[-1] for audio_i in values)
values = torch.stack(
[
pad_last_dim(audio_i, target_length, self.fill_values[key])
for audio_i in values
]
)
batch_dic[key] = values
return batch_dic
def pad_last_dim(tensor: Tensor, target_length: int, pad_value: float) -> Tensor:
"""Left padding tensor at last dim.
:param tensor: Tensor of at least 1 dim. (..., T)
:param target_length: Target length of the last dim. If target_length <= T, the function has no effect.
:param pad_value: Fill value used to pad tensor.
:returns: A tensor of shape (..., target_length).
"""
pad_len = max(target_length - tensor.shape[-1], 0)
return F.pad(tensor, [0, pad_len], value=pad_value)
def list_dict_to_dict_list(
lst: List[Dict[str, T]],
key_mode: str = "intersect",
) -> Dict[str, List[T]]:
"""Convert list of dicts to dict of lists.
:param lst: The list of dict to merge.
:param key_mode: Can be "same" or "intersect".
If "same", all the dictionaries must contains the same keys otherwise a ValueError will be raised.
If "intersect", only the intersection of all keys will be used in output.
:returns: The dictionary of lists.
"""
if len(lst) == 0:
return {}
keys = set(lst[0].keys())
if key_mode == "same":
if not all(keys == set(item.keys()) for item in lst[1:]):
raise ValueError("Invalid keys for batch.")
elif key_mode == "intersect":
for item in lst[1:]:
keys = keys.intersection(item.keys())
else:
KEY_MODES = ("same", "intersect")
raise ValueError(
f"Invalid argument key_mode={key_mode}. (expected one of {KEY_MODES})"
)
return {key: [item[key] for item in lst] for key in keys}
| [
"etienne.labbe31@gmail.com"
] | etienne.labbe31@gmail.com |
bb5798af7f5b7ac852880514e51bfbacf911e296 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /v34oCTbkrceCZjgRE_10.py | c54979682cf50d42833333a04ece99accfcf5aa3 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | """
The **right shift** operation is similar to **floor division by powers of
two** , thus, the process is _repetitive_ and can be done _recursively_.
Sample calculation using the right shift operator ( `>>` ):
80 >> 3 = floor(80/2^3) = floor(80/8) = 10
-24 >> 2 = floor(-24/2^2) = floor(-24/4) = -6
-5 >> 1 = floor(-5/2^1) = floor(-5/2) = -3
Write a function that **mimics** (without the use of **> >**) the right shift
operator and returns the result from the two given integers.
### Examples
shift_to_right(80, 3) ➞ 10
shift_to_right(-24, 2) ➞ -6
shift_to_right(-5, 1) ➞ -3
shift_to_right(4666, 6) ➞ 72
shift_to_right(3777, 6) ➞ 59
shift_to_right(-512, 10) ➞ -1
### Notes
* There will be no negative values for the second parameter `y`.
* This challenge is more like recreating of the **right shift** operation, thus, **the use of the operator directly** is **prohibited**.
* You are expected to solve this challenge via **recursion**.
* An **iterative** version of this challenge can be found via this [link](https://edabit.com/challenge/noqQNSr5o9qzvXWzL).
"""
def shift_to_right(x, y):
return shift_to_right(x//2,y-1) if y else x
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
6904c6701cd55c3d5a8ea4a4ab5985c5fede348c | ffe606c85de9009d2c15356f82daa524c343b925 | /12.5.redux/data/mkInterRaw.py | 44cdb9a989c9b57e6034b64602279c91e4946483 | [] | no_license | jbinkleyj/story_writer | d88ff7e3360fb8afd12445d1cb237788636b3083 | dc5106a35f5fbce72f8cf0801c0ad4cbc0c9f12f | refs/heads/master | 2020-07-09T15:54:02.492373 | 2017-12-16T07:26:59 | 2017-12-16T07:26:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | import os
import argparse
from nltk.stem import WordNetLemmatizer as WNL
wnl = WNL()
with open("nouncount.raw") as f:
nouns = [x.split("\t")[0] for x in f.readlines()]
print(len(nouns))
nouns = [wnl.lemmatize(n,pos='n') for n in nouns]
print(len(set(nouns)))
exit()
def parseParams():
parser = argparse.ArgumentParser(description='none')
parser.add_argument('-hn', type=int, default=5000)
parser.add_argument('-taken', type=int, default=500)
parser.add_argument('-hv', type=int, default=10000)
parser.add_argument('-takev', type=int, default=250)
parser.add_argument('-nouns', type=str, default="nouncount.raw")
parser.add_argument('-verbs', type=str, default="verbcount.raw")
parser.add_argument('-out', type=str, default="train.raw")
args= parser.parse_args()
return args
def getvocab(args):
nouns = []
with open(args.nouns) as f:
i = 0
while i<args.taken:
l = next(f)
w,k = l.split('\t')
k = int(k)
if k<args.hn:
nouns.append(w)
i+=1
verbs = []
with open(args.verbs) as f:
i = 0
while i<args.takev:
l = next(f)
w,k = l.split('\t')
k = int(k)
if k<args.hv:
verbs.append(w)
i+=1
return set(nouns),set(verbs)
def main():
args = parseParams()
nouns, verbs = getvocab(args)
print(len(nouns))
print(nouns)
with open('train.idxs') as f:
tidx = set([int(x) for x in f.readlines()])
with open('nv.all') as f:
nv = [x for i,x in enumerate(f.readlines()) if i in tidx]
data = []
for l in nv:
tmp = []
for x in l.lower().split(" "):
w,cat = x.split("_")
if cat[0]=="n":
if w in nouns:
tmp.append(w+"_N")
elif cat[0]=="v":
if w in verbs:
tmp.append(w+"_V")
if not tmp:
tmp = ["<NO_ITEMS>"]
data.append(" ".join(tmp))
with open(args.out,'w') as f:
f.write("\n".join(data))
if __name__=="__main__":
main()
| [
"kedzior@uw.edu"
] | kedzior@uw.edu |
2686c10d717de9e0a0a843214f7f55cdae879bb8 | 937313d90f1cc8c0b4a6fa90f8ba33a9e31be71d | /events/contribution/doctype/contribution_item/contribution_item.py | 93d16ff6ec5d539d02ea947fa828b57b78ed84ab | [
"MIT"
] | permissive | bobzz-zone/korecent_gias | a7dafe2eaeecbd53f4a060e72098c5bfbba67bd5 | 4d456c6a4455b247cd6710f55bd7ebd30a615093 | refs/heads/master | 2021-01-20T05:19:03.577088 | 2017-08-28T13:07:21 | 2017-08-28T13:07:21 | 101,425,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, bobzz.zone@gmail.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ContributionItem(Document):
pass
| [
"bobzz.zone@gmail.com"
] | bobzz.zone@gmail.com |
5ce633162ef77335b572b7931a14ef3f1d121526 | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/primaires/scripting/actions/changer_stat.py | 9be53ad6a93dcc43d22bf3b07946e41a7ff1d81f | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,194 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action changer_stat."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Change la stat d'un personnage."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.changer_stat, "Personnage", "str", "Fraction")
@staticmethod
def changer_stat(personnage, nom_stat, valeur):
"""Modifie la stat d'un personnage.
Cette action doit être de préférence utilisée pour augmenter
les stats d'un personnage. Pour consommer des stats particulières
(comme la vitalité ou l'endurance), utilisez l'action
'consommer'.
Paramètres à préciser :
* personnage : le personnage dont on veut modifier la stat
* nom_stat : le nom de la stat
* valeur : la valeur de la nouvelle stat
Exemples d'utilisation :
changer_stat personnage "force" 90
changer_stat personnage "vitalite_max" 2000
changer_stat "mana" 80
Note : n'utilisez pas cette action pour tuer un personnage
(utilisez l'action 'tuer' pour ce faire).
Si vous voulez modifier la stat max d'un personnage, utilisez
"stat_max" (par exemple "vitalite_max"). Notez que les noms
des stats sont en minuscule et sans accent.
"""
valeur = int(valeur)
if valeur <= 0:
valeur = 1
if nom_stat not in personnage.stats:
raise ErreurExecution("stat {} inconnue".format(repr(nom_stat)))
setattr(personnage.stats, nom_stat, valeur)
| [
"stormi@laposte.net"
] | stormi@laposte.net |
5d1cfecb8fabf44a4d43ec619892469e0003e314 | 066e874cc6d72d82e098d81a220cbbb1d66948f7 | /migrations/versions/c7cdcf7845d7_.py | d523e12da43e9723bcb82b19f484e9cd93d3a248 | [] | no_license | webdeveloper001/flask-inboundlead | 776792485a998a0eaa4b14016c3a2066e75ff2a2 | d0a539d86342e9efc54d0c0a1adc02c609f0f762 | refs/heads/master | 2021-01-19T01:34:55.241144 | 2017-04-05T00:42:03 | 2017-04-05T00:42:03 | 87,248,885 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | """empty message
Revision ID: c7cdcf7845d7
Revises: d2d6e1b48e4b
Create Date: 2017-03-06 17:09:02.886878
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c7cdcf7845d7'
down_revision = 'd2d6e1b48e4b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('sales_rep', sa.Column('spreadSheetId', sa.String(length=120), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('sales_rep', 'spreadSheetId')
# ### end Alembic commands ###
| [
"jamesbrown1018@outlook.com"
] | jamesbrown1018@outlook.com |
699cf439264c0a513273c23ce5fd10abd352b4dc | de9b8b7192a0a81e9249823bb2b86f0b7e452863 | /.history/classes/Season_20171106231139.py | 014af206fdc7b7ef79d3e86456871c59ec64a42e | [
"MIT"
] | permissive | reecebenson/uwe-dadsa-tennis-a | f5eaeb1b96d4e61f29279514e68eeea8ad6533db | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | refs/heads/master | 2023-07-08T16:13:23.963348 | 2017-11-30T12:07:01 | 2017-11-30T12:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | # DADSA - Assignment 1
# Reece Benson
from classes import Player as Player
class Season():
_app = None
_j_data = None
_name = None
_players = { }
_rounds = { }
_rounds_raw = { }
_settings = { }
def __init__(self, _app, name, j_data):
# Set our application as a variable
self._app = _app
# Set our Season JSON Data in a variable
self._j_data = j_data
# Debug
if(self._app.debug):
print("[LOAD]: Loaded Season '{0}'".format(name))
# Set variables
self._name = name
self._settings = j_data['settings']
def name(self):
return self._name
def settings(self):
return self._settings
def players(self):
return self._players
def add_player(self, name, gender):
if(not gender in self.players()):
self._players[gender] = [ ]
# Append our Players to their specific gender category
self._players[gender].append(Player.Player(name, gender, len(self.players()[gender])))
def rounds(self):
return self._rounds
def set_rounds(self):
for gender in self._rounds_raw:
for rnd in self._rounds_raw[gender]:
print(rnd)
def set_rounds_raw(self, rounds):
self._rounds_raw = rounds
self.set_rounds()
| [
"me@reecebenson.me"
] | me@reecebenson.me |
9722073d175f94f7dab389b73191271fca8bf56a | c9dc1df17ecb9e279eb4403b83358363cdbe7fee | /project/cms/migrations/0042_robotsfile.py | 6d6177838930114ae197e834346653b665c93c69 | [] | no_license | m0nte-cr1st0/keyua | c3894a94c9bfe73409078be11cb1d3f64831054c | b964ebb7e260fbebdbc27e3a571fed6278196cac | refs/heads/master | 2022-11-25T16:03:51.882386 | 2020-01-09T12:57:54 | 2020-01-09T12:57:54 | 232,809,529 | 0 | 0 | null | 2022-11-22T02:24:49 | 2020-01-09T12:58:10 | Python | UTF-8 | Python | false | false | 524 | py | # Generated by Django 2.0 on 2018-05-31 05:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0041_page_show_on_sitemap'),
]
operations = [
migrations.CreateModel(
name='RobotsFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, null=True)),
],
),
]
| [
"dinamo.mutu111@gmail.com"
] | dinamo.mutu111@gmail.com |
8d1cdff2c9acddbe3598defdb2d60850a9dcf1f5 | 747febe786dd6b7fd6c63cfe73dbe3023354daa8 | /src/the_tale/the_tale/game/heroes/tests/test_position.py | f1979acb4fda6888d34a210e24e138b5f85f4429 | [
"BSD-3-Clause"
] | permissive | the-tale/the-tale | 4e4b8d91dc873a5fb935fe58e9721a877baa6d3f | e8450bd2332344da805b1851e728da5a3e5bf0ef | refs/heads/develop | 2023-08-01T13:53:46.835667 | 2022-12-25T18:04:56 | 2022-12-25T18:04:56 | 1,949,167 | 98 | 52 | BSD-3-Clause | 2023-02-15T18:57:33 | 2011-06-24T18:49:48 | Python | UTF-8 | Python | false | false | 2,558 | py |
import smart_imports
smart_imports.all()
class HeroPositionTest(utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.place_1, self.place_2, self.place_3 = game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account.id)
self.hero = self.storage.accounts_to_heroes[account.id]
self.road_1_2 = roads_logic.road_between_places(self.place_1, self.place_2)
self.road_2_3 = roads_logic.road_between_places(self.place_2, self.place_3)
def test_initialize(self):
self.assertNotEqual(self.hero.position.place_id, None)
self.assertEqual(self.hero.position.x, self.hero.position.place.x)
self.assertEqual(self.hero.position.y, self.hero.position.place.y)
self.assertEqual(self.hero.position.dx, 0)
self.assertEqual(self.hero.position.dy, 0)
self.assertFalse(self.hero.position.moved_out_place)
def test_set_position(self):
old_position = copy.deepcopy(self.hero.position)
self.hero.position.set_position(x=self.hero.position.x + 0.2,
y=self.hero.position.y - 0.7)
self.assertEqual(self.hero.position.place_id, None)
self.assertEqual(self.hero.position.x, old_position.x + 0.2)
self.assertEqual(self.hero.position.y, old_position.y - 0.7)
self.assertEqual(self.hero.position.cell_x, old_position.cell_x)
self.assertEqual(self.hero.position.cell_y, old_position.cell_y - 1)
self.hero.position.set_position(x=self.hero.position.x + 0.2,
y=self.hero.position.y - 0.7)
self.assertEqual(self.hero.position.place_id, None)
def test_can_visit_current_place__in_place(self):
pos = position.Position.create(place=self.place_1)
pos.set_position(x=pos.x + 0.4, y=pos.y - 0.2)
self.assertFalse(pos.can_visit_current_place(delta=0.1))
self.assertFalse(pos.can_visit_current_place(delta=0.3))
self.assertTrue(pos.can_visit_current_place(delta=0.45))
def test_can_visit_current_place__out_place(self):
pos = position.Position.create(place=self.place_1)
pos.set_position(x=pos.x + 1.4, y=pos.y - 0.2)
self.assertFalse(pos.can_visit_current_place(delta=0.1))
self.assertFalse(pos.can_visit_current_place(delta=0.3))
self.assertFalse(pos.can_visit_current_place(delta=0.45))
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
d2d4a2b1dfa9689dfae43aaf9826ae9ce6d9ec9e | 14421a12c4e80395567e676394d369fd9619bd32 | /Scripts/PythonMidLvl/18a.py | f086632f4babe33a1d49b02b67f075a425925d86 | [] | no_license | jawor92/Python-Udemy-Mobilo | 7b331e8197233c3116e43e0b3c1110b9b878762e | 8098508835121a1536c2753bc4eedbf17163c93d | refs/heads/master | 2020-12-09T21:39:09.366604 | 2020-01-12T19:31:09 | 2020-01-12T19:31:09 | 233,423,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 10 12:59:09 2019
@author: Mateusz.Jaworski
"""
instruction = ['hello', 'borrow money', 'thanks',' bye']
instructionaproved = []
for x in instruction:
instructionaproved.append(x)
print("Instructions: ", x)
if x == 'abort':
print('Aborting!')
instructionaproved.clear()
break
else:
print('List of instructions approved: ', instructionaproved)
| [
"jaworski92@gmail.com"
] | jaworski92@gmail.com |
29c97db78034eada0c7a09ca9c148802244cb74a | 5ee028ee2582a2d566c22a32097a1fcbed314fcc | /openwsn-sw/software/openvisualizer/openvisualizer/openUI/OpenFrameState.py | e9875efea59a982a991f6aeb7e4cae664ae400df | [] | permissive | ssciancalepore/BitTransfer | 70c5b271743ebe683d7a3a37d595dbab132f903e | b9d343b0219259f4870e9362b99c27f544014b89 | refs/heads/master | 2022-06-20T18:38:03.271254 | 2019-09-15T04:56:32 | 2019-09-15T04:56:32 | 199,583,953 | 1 | 1 | BSD-3-Clause | 2022-06-03T22:45:01 | 2019-07-30T05:53:29 | C | UTF-8 | Python | false | false | 3,641 | py | import json
import OpenFrame
import OpenTable
import OpenGuiLib
class OpenFrameState(OpenFrame.OpenFrame):
def __init__(self,guiParent,width=None,height=None,frameName="frame",row=0,column=0,columnspan=1):
# store params
self.guiParent = guiParent
self.frameName = frameName
self.row = row
self.column = column
# initialize the parent class
OpenFrame.OpenFrame.__init__(self,guiParent,
width=width,
height=height,
frameName=frameName,
row=row,
column=column,
columnspan=columnspan,)
# local variables
self.updatePeriod = None
temp = OpenGuiLib.HeaderLabel(self.container,text="data")
#temp.grid(row=0,column=0)
self.data = OpenTable.OpenTable(self.container)
self.data.grid(row=1,column=0)
temp = OpenGuiLib.HeaderLabel(self.container,text="meta")
#temp.grid(row=2,column=0)
self.meta = OpenTable.OpenTable(self.container)
#self.meta.grid(row=3,column=0)
#======================== public ==========================================
def startAutoUpdate(self,updatePeriod,updateFunc,updateParams):
self.updatePeriod = updatePeriod
self.updateFunc = updateFunc
self.updateParams = updateParams
self.after(self.updatePeriod,self._cb_autoUpdate)
def stopAutoUpdate(self):
self.updatePeriod = None
def update(self,dataAndMeta):
assert(isinstance(dataAndMeta,dict))
assert('meta' in dataAndMeta)
assert(isinstance(dataAndMeta['meta'],list))
assert('data' in dataAndMeta)
assert(isinstance(dataAndMeta['data'],list))
if len(dataAndMeta['meta'])>0 and ('columnOrder' in dataAndMeta['meta'][0]):
self.data.update(dataAndMeta['data'],columnOrder=dataAndMeta['meta'][0]['columnOrder'].split('.'))
else:
self.data.update(dataAndMeta['data'])
self.meta.update(dataAndMeta['meta'])
#======================== private =========================================
def _cb_autoUpdate(self):
self.update(json.loads(self.updateFunc(*self.updateParams).toJson()))
if self.updatePeriod:
self.after(self.updatePeriod,self._cb_autoUpdate)
###############################################################################
if __name__=='__main__':
import OpenWindow
examplewindow = OpenWindow.OpenWindow("OpenFrameState")
exampleframestate = OpenFrameState(examplewindow,
frameName='exampleframestate',
row=0,
column=0)
exampleframestate.show()
exampleframestate.update(
{
'data': [
{
'data1': 'dA1',
'data2': 'dA2',
'data3': 'dA3',
},
],
'meta': [
{
'meta1': 'm1',
'meta2': 'm2',
},
],
}
)
examplewindow.startGui()
| [
"savio.sciancalepore@gmail.com"
] | savio.sciancalepore@gmail.com |
7a5b06736ee55b26c419191273ec7ee857c0319d | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /test/legacy_test/test_executor_feed_non_tensor.py | b7e1f02beb4b7512d45fb152d4ee8fe542d75cef | [
"Apache-2.0"
] | permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 7,326 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import paddle
from paddle import fluid
class TestExecutor(unittest.TestCase):
def net(self):
lr = paddle.static.data(name="lr", shape=[], dtype='float32')
x = paddle.static.data(name="x", shape=[None, 1], dtype='float32')
y = paddle.static.data(name="y", shape=[None, 1], dtype='float32')
y_predict = paddle.static.nn.fc(x, size=1)
cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
opt = fluid.optimizer.Adam(learning_rate=lr)
opt.minimize(avg_cost)
return lr, avg_cost
def test_program_feed_float(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype(
'float32'
)
y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype(
'float32'
)
a = 0.01
_lr, _ = exe.run(
feed={'x': train_data, 'y': y_true, 'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
)
self.assertEqual(_lr._dtype(), lr.dtype)
self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32)
self.assertEqual(type(a), float)
def test_program_feed_int(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype(
'float32'
)
y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype(
'float32'
)
a = 0
_lr, _ = exe.run(
feed={'x': train_data, 'y': y_true, 'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
)
self.assertEqual(_lr._dtype(), lr.dtype)
self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32)
self.assertEqual(type(a), int)
def test_program_feed_list(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
lr, cost = self.net()
exe.run(startup_program)
train_data = [[1.0], [2.0], [3.0], [4.0]]
y_true = [[2.0], [4.0], [6.0], [8.0]]
a = 0
_lr, _ = exe.run(
feed={'x': train_data, 'y': y_true, 'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
)
self.assertEqual(_lr._dtype(), lr.dtype)
self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32)
self.assertEqual(type(y_true), list)
def test_compiled_program_feed_scalar(self):
main_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.scope_guard(scope):
lr, cost = self.net()
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
exe.run(startup_program)
compiled_prog = fluid.CompiledProgram(main_program)
train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype(
'float32'
)
y_true = numpy.array([[2.0], [4.0], [6.0], [8.0]]).astype(
'float32'
)
a = 0.01
_lr, _ = exe.run(
compiled_prog,
feed={'x': train_data, 'y': y_true, 'lr': a},
fetch_list=[lr, cost],
return_numpy=False,
)
self.assertEqual(_lr._dtype(), lr.dtype)
self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32)
self.assertEqual(type(a), float)
class TestAsLodTensor(unittest.TestCase):
def test_as_lodtensor_int32(self):
cpu = fluid.CPUPlace()
tensor = fluid.executor._as_lodtensor(
1.0, cpu, fluid.core.VarDesc.VarType.INT32
)
self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.INT32)
def test_as_lodtensor_fp64(self):
cpu = fluid.CPUPlace()
tensor = fluid.executor._as_lodtensor(
1, cpu, fluid.core.VarDesc.VarType.FP64
)
self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64)
def test_as_lodtensor_assertion_error(self):
cpu = fluid.CPUPlace()
self.assertRaises(AssertionError, fluid.executor._as_lodtensor, 1, cpu)
def test_as_lodtensor_type_error(self):
cpu = fluid.CPUPlace()
self.assertRaises(
TypeError,
fluid.executor._as_lodtensor,
{"a": 1},
cpu,
fluid.core.VarDesc.VarType.INT32,
)
def test_as_lodtensor_list(self):
cpu = fluid.CPUPlace()
tensor = fluid.executor._as_lodtensor(
[1, 2], cpu, fluid.core.VarDesc.VarType.FP64
)
self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64)
def test_as_lodtensor_tuple(self):
cpu = fluid.CPUPlace()
tensor = fluid.executor._as_lodtensor(
(1, 2), cpu, fluid.core.VarDesc.VarType.FP64
)
self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64)
def test_as_lodtensor_nested_list(self):
cpu = fluid.CPUPlace()
self.assertRaises(
TypeError,
fluid.executor._as_lodtensor,
[{1.2, 1.2}, {1, 2}],
cpu,
fluid.core.VarDesc.VarType.INT32,
)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | Shixiaowei02.noreply@github.com |
23c8af7aea382e4d3cfe4b8efe1bb5266fce4e09 | 5edd00b7d8f21fbd1cbb4a36722aba83f7f18656 | /test/12_dependency_versions/cibuildwheel_test.py | 55248bf20fc79a810d77636df181320684b887e3 | [
"MIT",
"BSD-2-Clause"
] | permissive | daleathan/cibuildwheel | ab24c8f3e597ba0948517430bd31935220e7b263 | 3e335044c61aa8da34d6fa904cae566cd1ce0808 | refs/heads/master | 2020-07-27T21:12:05.181999 | 2020-04-10T16:23:42 | 2020-04-10T16:23:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,266 | py | import os
import re
import pytest
import textwrap
import cibuildwheel.util
import utils
VERSION_REGEX = r'([\w-]+)==([^\s]+)'
def get_versions_from_constraint_file(constraint_file):
with open(constraint_file, encoding='utf8') as f:
constraint_file_text = f.read()
versions = {}
for package, version in re.findall(VERSION_REGEX, constraint_file_text):
versions[package] = version
return versions
@pytest.mark.parametrize('python_version', ['2.7', '3.5', '3.8'])
def test_pinned_versions(python_version):
if utils.platform == 'linux':
pytest.skip('linux doesn\'t pin individual tool versions, it pins manylinux images instead')
project_dir = os.path.dirname(__file__)
build_environment = {}
if python_version == '2.7':
constraint_filename = 'constraints-python27.txt'
build_pattern = '[cp]p27-*'
elif python_version == '3.5':
constraint_filename = 'constraints-python35.txt'
build_pattern = '[cp]p35-*'
else:
constraint_filename = 'constraints.txt'
build_pattern = '[cp]p38-*'
constraint_file = os.path.join(cibuildwheel.util.resources_dir, constraint_filename)
constraint_versions = get_versions_from_constraint_file(constraint_file)
for package in ['pip', 'setuptools', 'wheel', 'virtualenv']:
env_name = 'EXPECTED_{}_VERSION'.format(package.upper())
build_environment[env_name] = constraint_versions[package]
cibw_environment_option = ' '.join(
['{}={}'.format(k, v) for k, v in build_environment.items()]
)
# build and test the wheels
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
'CIBW_BUILD': build_pattern,
'CIBW_ENVIRONMENT': cibw_environment_option,
})
# also check that we got the right wheels
if python_version == '2.7':
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp27' in w or '-pp27' in w]
elif python_version == '3.5':
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp35' in w or '-pp35' in w]
elif python_version == '3.8':
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp38' in w or '-pp38' in w]
else:
raise ValueError('unhandled python version')
assert set(actual_wheels) == set(expected_wheels)
@pytest.mark.parametrize('python_version', ['2.7', '3.x'])
def test_dependency_constraints_file(tmp_path, python_version):
if utils.platform == 'linux':
pytest.skip('linux doesn\'t pin individual tool versions, it pins manylinux images instead')
project_dir = os.path.dirname(__file__)
tool_versions = {
'pip': '20.0.2',
'setuptools': '44.0.0' if python_version == '2.7' else '46.0.0',
'wheel': '0.34.2',
'virtualenv': '20.0.10',
}
constraints_file = tmp_path / 'constraints.txt'
constraints_file.write_text(textwrap.dedent(
'''
pip=={pip}
setuptools=={setuptools}
wheel=={wheel}
virtualenv=={virtualenv}
'''.format(**tool_versions)
))
build_environment = {}
for package_name, version in tool_versions.items():
env_name = 'EXPECTED_{}_VERSION'.format(package_name.upper())
build_environment[env_name] = version
cibw_environment_option = ' '.join(
['{}={}'.format(k, v) for k, v in build_environment.items()]
)
# build and test the wheels
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
'CIBW_BUILD': '[cp]p27-*' if python_version == '2.7' else '[cp]p3?-*',
'CIBW_ENVIRONMENT': cibw_environment_option,
'CIBW_DEPENDENCY_VERSIONS': str(constraints_file),
})
# also check that we got the right wheels
if python_version == '2.7':
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp27' in w or '-pp27' in w]
else:
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-cp27' not in w and '-pp27' not in w]
assert set(actual_wheels) == set(expected_wheels)
| [
"joerick@mac.com"
] | joerick@mac.com |
fae414b730695848b9b5b0e7e6095ee04e0a4ff6 | e6e81d0cd02223ca27f2c3f544b3c116e7617270 | /LeetCodePremium/1123.lowest-common-ancestor-of-deepest-leaves.py | 5196cf0120d686be09c5e23c898b8fa6345ad720 | [] | no_license | ashjambhulkar/objectoriented | 86166640b0546713095dd5d8804fc78d31782662 | 6f07b50590ceef231be38d6d7b8c73a40c1152e9 | refs/heads/master | 2022-05-03T23:28:38.674275 | 2022-04-26T21:37:31 | 2022-04-26T21:37:31 | 249,091,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | #
# @lc app=leetcode id=1123 lang=python3
#
# [1123] Lowest Common Ancestor of Deepest Leaves
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def lcaDeepestLeaves(self, root: TreeNode) -> TreeNode:
# self.deepest, self.lca = 0, None
# def helper(root, depth):
# self.deepest = max(self.deepest, depth)
# if not root:
# return depth
# left = helper(root.left, depth+1)
# right = helper(root.right, depth+1)
# if left == right == self.deepest:
# self.lca = root
# return max(left, right)
# helper(root, 0)
# return self.lca
def helper(root):
if not root:
return 0, None
h1, lca1 = helper(root.left)
h2, lca2 = helper(root.right)
if h1 > h2:
return h1+1, lca1
if h2 > h1:
return h2+1, lca2
return h1+1, root
return helper(root)[1]
# @lc code=end
| [
"ashjambhulkar@hotmail.com"
] | ashjambhulkar@hotmail.com |
f592e8dba7f277e3bda52fc030d724ef8cd02ce4 | 2fb7f13ec25781a49f0814d2010f9c090329e659 | /tests/test_builtins/uk/test_ukraine_spec.py | 4c946615b09bce648d28e031e487a321fa6d5292 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Shanmugapriya03/mimesis | a526769be2e35d297b7ec68c31f1072b6f51d64a | 649253fef05c6b5c362805000c1d7a99898aa0fe | refs/heads/master | 2020-08-06T03:58:49.276067 | 2019-10-04T06:42:57 | 2019-10-04T06:42:57 | 212,824,895 | 1 | 0 | MIT | 2019-10-04T13:46:56 | 2019-10-04T13:46:56 | null | UTF-8 | Python | false | false | 540 | py | import pytest
from mimesis.builtins import UkraineSpecProvider
from mimesis.enums import Gender
from mimesis.exceptions import NonEnumerableError
@pytest.fixture
def ukraine():
return UkraineSpecProvider()
@pytest.mark.parametrize(
'gender', [
Gender.FEMALE,
Gender.MALE,
],
)
def test_patronymic(ukraine, gender):
result = ukraine.patronymic(gender=gender)
assert result is not None
assert len(result) >= 4
with pytest.raises(NonEnumerableError):
ukraine.patronymic(gender='nil')
| [
"likid.geimfari@gmail.com"
] | likid.geimfari@gmail.com |
78b6b35553aa4d72ed3306ef2898427322922902 | c8b2cc8965283e2d940c2cd5432c3fa682e3638c | /examples/terminal.py | a7a3d6e85e53adb1af152c49103556500a56cb06 | [
"MIT"
] | permissive | gitter-badger/DevAssist | 1ae87fc3505f0feb7657921377f65b167a0b39ff | be0b4221b2d379cf9e8c65454f1329d6fb910f0c | refs/heads/master | 2021-01-16T19:48:45.694776 | 2016-02-10T15:33:44 | 2016-02-10T15:33:44 | 51,457,297 | 0 | 0 | null | 2016-02-10T17:20:25 | 2016-02-10T17:20:25 | null | UTF-8 | Python | false | false | 391 | py | from DevAssist import DevAssist
my_devassist = DevAssist()
my_devassist.process("")
while True:
# @TODO: Normalize input from different versions of python
user_input = raw_input("Human: ")
# Leave if the user is done
if user_input == "quit":
exit(0)
# Generate response
response = my_devassist.process(user_input)
# Print response
print response
| [
"valetolpegin@gmail.com"
] | valetolpegin@gmail.com |
a942bedde48b5e6541bc1f2dc7af4a10448cbf40 | fc0eda8560a26c88b790d236070ed0559d0dc4a4 | /leetcode/basicDS08_heap/b02_lc347_top_k_frequent_elements.py | 21a0b6c42751ec427c3d9bec2556f59a52f81e92 | [] | no_license | pankypan/DataStructureAndAlgo | b4bd417a16cdb594bbed2ca0220dbd63eb60f3c1 | 6c5d40d57d378994236549f8dea906c75121eadf | refs/heads/master | 2021-08-03T01:22:08.442709 | 2021-07-19T14:56:44 | 2021-07-19T14:56:44 | 279,599,190 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | # basicDS05_heap https://leetcode-cn.com/problems/top-k-frequent-elements/
import heapq
from typing import List
class Solution:
@staticmethod
def get_num_statics(nums) -> dict:
hash_table = dict()
for num in nums:
if num in hash_table:
hash_table[num] += 1
else:
hash_table[num] = 1
return hash_table
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
hash_table = self.get_num_statics(nums)
priority_queue = list()
count = 0
for key, val in hash_table.items():
if count < k:
heapq.heappush(priority_queue, [val, key])
else:
if val > priority_queue[0][0]:
heapq.heappop(priority_queue)
heapq.heappush(priority_queue, [val, key])
count += 1
return [item_lis[1] for item_lis in priority_queue]
if __name__ == '__main__':
s = Solution()
print(s.topKFrequent([1, 1, 1, 2, 2, 3], 2))
print(s.topKFrequent([1], 1))
| [
"1356523334@qq.com"
] | 1356523334@qq.com |
a25af68831ed484b843fa0dcc34da703e3117143 | adb280d422df64880debadd3b67a97f0c0869989 | /src/utils/logger.py | bad9ce3934a54795b6f9d74f01af27c3ae13835a | [
"Apache-2.0"
] | permissive | biothings/biothings_explorer_web_old | d924ff888a7c7e5a01c2e93cd65f3c8a795648e8 | ee7e60aa7a6eb5c9944921493063a76fdc1d3db2 | refs/heads/master | 2022-03-17T00:01:47.851518 | 2019-11-07T21:26:10 | 2019-11-07T21:26:10 | 82,010,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | import os, time, datetime
import logging
import inspect
def get_logger(logger_name, log_folder=None, timestamp="%Y%m%d", level=logging.DEBUG):
"""
Configure a logger object from logger_name and return (logger, logfile)
"""
from config import LOG_FILE_ROOT
# if doesn't specify a log folder, use the default one in config
if not log_folder:
log_folder = LOG_FILE_ROOT
if not os.path.exists(log_folder):
os.makedirs(log_folder)
if timestamp:
logfile = os.path.join(log_folder, '%s_%s.log' % (logger_name, time.strftime(timestamp, datetime.datetime.now().timetuple())))
else:
logfile = os.path.join(log_folder, '%s.log' % logger_name)
fmt = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s() ] - %(name)s - %(levelname)s -- %(message)s', datefmt="%H:%M:%S")
logger = logging.getLogger(logger_name)
logger.setLevel(level)
fh = logging.FileHandler(logfile)
fh.setFormatter(fmt)
fh.name = "logfile"
logger.addHandler(fh)
return (logger, logfile)
| [
"kevinxin@scripps.edu"
] | kevinxin@scripps.edu |
d8854d358f9e5cdd59f0e50511c3f669895a8b8b | 3a74764c3fc38f87cd2ed0ba9e96b23ad9a0677e | /bite_144/bite_144.py | df94d9a8f587deafa609f37f3a7497f89f2aafce | [] | no_license | nalwayv/bitesofpy | 7dbc7cb55c9bc3c111f67243759cf56a2b785f51 | 56b0f7f85fd4b18d11a1b5df8da0a95e5ba2dcaa | refs/heads/master | 2023-05-26T05:46:06.859108 | 2020-02-05T00:02:35 | 2020-02-05T00:02:35 | 216,651,358 | 2 | 0 | null | 2023-05-22T22:43:16 | 2019-10-21T19:41:25 | HTML | UTF-8 | Python | false | false | 1,944 | py | """
Bite 144. Calculate the Number of Months Passed
"""
from datetime import date
from dateutil.relativedelta import relativedelta
START_DATE = date(2018, 11, 1)
MIN_DAYS_TO_COUNT_AS_MONTH = 10
MONTHS_PER_YEAR = 12
def calc_months_passed(year, month, day):
"""Construct a date object from the passed in arguments.
If this fails due to bad inputs reraise the exception.
Also if the new date is < START_DATE raise a ValueError.
Then calculate how many months have passed since the
START_DATE constant. We suggest using dateutil.relativedelta!
One rule: if a new month is >= 10 (MIN_DAYS_TO_COUNT_AS_MONTH)
days in, it counts as an extra month.
For example:
date(2018, 11, 10) = 9 days in => 0 months
date(2018, 11, 11) = 10 days in => 1 month
date(2018, 12, 11) = 1 month + 10 days in => 2 months
date(2019, 12, 11) = 1 year + 1 month + 10 days in => 14 months
etc.
See the tests for more examples.
Return the number of months passed int.
"""
_date = date(year, month, day)
if _date < START_DATE:
raise ValueError("date too low")
if START_DATE == _date:
return 0
rd = relativedelta(_date, START_DATE)
_d = 1 if rd.days % MIN_DAYS_TO_COUNT_AS_MONTH == 0 else 0
return _d + rd.months + rd.years * MONTHS_PER_YEAR
if __name__ == "__main__":
print(calc_months_passed(2018, 11, 1) == 0)
print(calc_months_passed(2018, 11, 10) == 0)
print(calc_months_passed(2018, 11, 11) == 1)
print(calc_months_passed(2018, 12, 10) == 1)
print(calc_months_passed(2018, 12, 11) == 2)
print(calc_months_passed(2019, 12, 10) == 13)
print(calc_months_passed(2019, 12, 11) == 14)
try:
calc_months_passed('a',10,1)
except TypeError:
print('type error')
try:
calc_months_passed(2018, 10, 1)
except ValueError:
print('value error')
| [
"nalwayv@googlemail.com"
] | nalwayv@googlemail.com |
bed041cf24e432a76115526e3956a15fd8d80839 | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /For Irene/DFS/0938_Range_Sum_of_BST.py | 8028386c36fd192c2ea01a1603763c2e0bd3c142 | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 618 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rangeSumBST(self, root: Optional[TreeNode], low: int, high: int) -> int:
ans = 0
def dfs(node):
nonlocal ans
if not node:
return
if low <= node.val <= high:
ans += node.val
dfs(node.left)
dfs(node.right)
dfs(root)
return ans
| [
"f14051172@gs.ncku.edu.tw"
] | f14051172@gs.ncku.edu.tw |
be17f52a78be4a863aa7b18e00af390d2d11966e | c43c88015f9498aed5f3b5a339d245c31781444e | /Free/l10n_by_doc/report/report_waybill.py | 347ec855716e4142af55494c78208705ba538981 | [] | no_license | mulaudzicalvin/perpul | 65106d41d5197fea17628ac1a7fa7e581d29d75e | 00e3a5ee1771d2e09a48460ca23c2e9c2ef507d6 | refs/heads/master | 2020-03-09T18:39:33.131420 | 2018-02-05T05:17:36 | 2018-02-05T05:17:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Perpul
# Copyright (C) 2016 CodUP (<http://codup.com>).
#
##############################################################################
from flectra import api, models
from report_helper import QWebHelper
class ByWayBillReport(models.AbstractModel):
_name = 'report.l10n_by_doc.report_waybill'
@api.model
def render_html(self, docids, data=None):
Report = self.env['report']
report = Report._get_report_from_name('l10n_by_doc.report_waybill')
selected_modules = self.env[report.model].browse(docids)
docargs = {
'helper': QWebHelper(),
'doc_ids': docids,
'doc_model': report.model,
'docs': selected_modules,
}
return Report.render('l10n_by_doc.report_waybill', docargs)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"daniel.podvesker@perpul.co"
] | daniel.podvesker@perpul.co |
185d598106dfbf637a7c47faa42c6c4fe6450c31 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /emesene/rev1286-1505/base-trunk-1286/desktop.py | aecfa820abe7f3244e3c947efdbbd7b4a8c296bb | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,661 | py | """
Simple desktop integration for Python. This module provides desktop environment
detection and resource opening support for a selection of common and
standardised desktop environments.
Copyright (C) 2005, 2006 Paul Boddie <paul@boddie.org.uk>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
--------
Desktop Detection
-----------------
To detect a specific desktop environment, use the get_desktop function.
To detect whether the desktop environment is standardised (according to the
proposed DESKTOP_LAUNCH standard), use the is_standard function.
Opening URLs
------------
To open a URL in the current desktop environment, relying on the automatic
detection of that environment, use the desktop.open function as follows:
desktop.open("http://www.python.org")
To override the detected desktop, specify the desktop parameter to the open
function as follows:
desktop.open("http://www.python.org", "KDE") # Insists on KDE
desktop.open("http://www.python.org", "GNOME") # Insists on GNOME
Without overriding using the desktop parameter, the open function will attempt
to use the "standard" desktop opening mechanism which is controlled by the
DESKTOP_LAUNCH environment variable as described below.
The DESKTOP_LAUNCH Environment Variable
---------------------------------------
The DESKTOP_LAUNCH environment variable must be shell-quoted where appropriate,
as shown in some of the following examples:
DESKTOP_LAUNCH="kdialog --msgbox" Should present any opened URLs in
their entirety in a KDE message box.
(Command "kdialog" plus parameter.)
DESKTOP_LAUNCH="my\ opener" Should run the "my opener" program to
open URLs.
(Command "my opener", no parameters.)
DESKTOP_LAUNCH="my\ opener --url" Should run the "my opener" program to
open URLs.
(Command "my opener" plus parameter.)
Details of the DESKTOP_LAUNCH environment variable convention can be found here:
http://lists.freedesktop.org/archives/xdg/2004-August/004489.html
"""
__version__ = "0.2.3"
import os
import sys
try:
import subprocess
def _run(cmd, shell, wait):
opener = subprocess.Popen(cmd, shell=shell)
if wait: opener.wait()
return opener.pid
except ImportError:
import popen2
def _run(cmd, shell, wait):
opener = popen2.Popen3(cmd)
if wait: opener.wait()
return opener.pid
import commands
import webbrowser # fallback
override = ''
def get_desktop(dontoverride=False):
"""
Detect the current desktop environment, returning the name of the
environment. If no environment could be detected, None is returned.
"""
global override
if override and not dontoverride:
return 'override'
elif os.environ.has_key("KDE_FULL_SESSION") or \
os.environ.has_key("KDE_MULTIHEAD"):
return "KDE"
elif os.environ.has_key("DESKTOP_SESSION") and \
os.environ['DESKTOP_SESSION'] == 'xfce4':
return 'xfce4'
elif os.environ.has_key("GNOME_DESKTOP_SESSION_ID") or \
os.environ.has_key("GNOME_KEYRING_SOCKET"):
return "GNOME"
elif sys.platform == "darwin":
return "Mac OS X"
elif hasattr(os, "startfile"):
return "Windows"
else:
return None
def is_standard():
"""
Return whether the current desktop supports standardised application
launching.
"""
return os.environ.has_key("DESKTOP_LAUNCH")
def open(url, desktop=None, wait=0):
"""
Open the 'url' in the current desktop's preferred file browser. If the
optional 'desktop' parameter is specified then attempt to use that
particular desktop environment's mechanisms to open the 'url' instead of
guessing or detecting which environment is being used.
Suggested values for 'desktop' are "standard", "KDE", "GNOME", "Mac OS X",
"Windows" where "standard" employs a DESKTOP_LAUNCH environment variable to
open the specified 'url'. DESKTOP_LAUNCH should be a command, possibly
followed by arguments, and must have any special characters shell-escaped.
The process identifier of the "opener" (ie. viewer, editor, browser or
program) associated with the 'url' is returned by this function. If the
process identifier cannot be determined, None is returned.
An optional 'wait' parameter is also available for advanced usage and, if
'wait' is set to a true value, this function will wait for the launching
mechanism to complete before returning (as opposed to immediately returning
as is the default behaviour).
"""
detected = get_desktop()
if (desktop is None or desktop == "override") and detected == "override":
global override
arg = override.replace("%url%", commands.mkarg(url))
return _run(arg, 1, wait)
elif (desktop is None or desktop == "standard") and is_standard():
arg = "".join([os.environ["DESKTOP_LAUNCH"], commands.mkarg(url)])
return _run(arg, 1, wait)
elif (desktop is None or desktop == "Windows") and detected == "Windows":
try:
return os.startfile(url)
except OSError:
return
elif desktop is None:
desktop = detected
cmd = get_command(desktop, url)
if cmd:
return _run(cmd, 0, wait)
else:
webbrowser.open(url)
def get_command(desktop, url):
'''Test for desktops where the overriding is not verified.'''
if desktop == "KDE":
return ["kfmclient", "exec", url]
elif desktop == "GNOME":
return ["gnome-open", url]
elif desktop == 'xfce4':
return ["exo-open", url]
elif desktop == "Mac OS X":
return ["open", url]
elif desktop == "standard":
return ['$DESKTOP_LAUNCH']
elif desktop == "Windows":
return ['os.startfile()']
else:
return None
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
ac96d791fd218866f388d481fafeb181f964bfea | ac5c28044dcad1331aaf11ba2112e5d18ed53472 | /experiment/models.py | 873e6490b08737f900c4e4b79e6bbfdd642170fb | [
"MIT"
] | permissive | seakers/daphne_brain | 2c4cd278591db7e21d639dbaa87235383d1ac5f5 | 4e3220d41552d224fee375d85f7cbc8106de7fc8 | refs/heads/main | 2023-08-21T14:21:00.795145 | 2022-10-20T20:26:05 | 2022-10-20T20:26:05 | 85,344,849 | 1 | 1 | MIT | 2023-02-16T06:37:34 | 2017-03-17T18:51:12 | PLpgSQL | UTF-8 | Python | false | false | 2,230 | py | from django.db import models
# Experiment Context (to perform experiments with human subjects and Daphne)
from daphne_context.models import UserInformation
class ExperimentContext(models.Model):
user_information = models.OneToOneField(UserInformation, on_delete=models.CASCADE)
is_running = models.BooleanField()
experiment_id = models.IntegerField()
current_state = models.TextField()
# A data structure defining an experimental stage
class ExperimentStage(models.Model):
experimentcontext = models.ForeignKey(ExperimentContext, on_delete=models.CASCADE)
type = models.CharField(max_length=50)
start_date = models.DateTimeField()
end_date = models.DateTimeField()
end_state = models.TextField()
class ExperimentAction(models.Model):
experimentstage = models.ForeignKey(ExperimentStage, on_delete=models.CASCADE)
action = models.TextField()
date = models.DateTimeField()
# An allowed command for Daphne (to be used with experiments to limit functionalities programmatically)
class AllowedCommand(models.Model):
user_information = models.ForeignKey(UserInformation, on_delete=models.CASCADE)
# Command Type Choice
COMMAND_TYPES = (
('engineer', 'Engineer Commands'),
('analyst', 'iFEED Commands'),
('explorer', 'Explorer Commands'),
('historian', 'Historian Commands'),
('critic', 'Critic Commands'),
('engineer_instruments', 'Instruments Cheatsheet'),
('engineer_instrument_parameters', 'Instrument Parameters Cheatsheet'),
('engineer_measurements', 'Measurements Cheatsheet'),
('engineer_stakeholders', 'Stakeholders Cheatsheet'),
('engineer_objectives', 'Objectives Cheatsheet'),
('engineer_subobjectives', 'Subobjectives Cheatsheet'),
('historian_measurements', 'Historical Measurements Cheatsheet'),
('historian_missions', 'Historical Missions Cheatsheet'),
('historian_technologies', 'Historical Technologies Cheatsheet'),
('historian_space_agencies', 'Space Agencies Cheatsheet'),
)
command_type = models.CharField(max_length=40, choices=COMMAND_TYPES)
# Command number
command_descriptor = models.IntegerField()
| [
"ani300@gmail.com"
] | ani300@gmail.com |
bf7013d24eab75836cdf292866fe2c6c680b154d | 53f5f694a83800b4465bd0417820117832f0f97d | /ifcbdb/dashboard/migrations/0019_auto_20190528_0647.py | 884b52bac1da41ff17061cc72bb96b746517d698 | [
"MIT"
] | permissive | WHOIGit/ifcbdb | 083da3dd223e6791b35ff952dc2e6b2970b8db50 | 36cac457b31b614b7eda2c15e28cbd2fbba7d388 | refs/heads/master | 2023-08-31T03:55:05.539434 | 2023-06-22T11:48:05 | 2023-06-22T11:48:05 | 178,424,730 | 5 | 6 | MIT | 2023-08-30T17:35:42 | 2019-03-29T14:53:50 | Python | UTF-8 | Python | false | false | 1,183 | py | # Generated by Django 2.1.7 on 2019-05-28 06:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0018_auto_20190508_1659'),
]
operations = [
migrations.AlterField(
model_name='bin',
name='concentration',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='humidity',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='look_time',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='ml_analyzed',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='run_time',
field=models.FloatField(default=-9999999),
),
migrations.AlterField(
model_name='bin',
name='temperature',
field=models.FloatField(default=-9999999),
),
]
| [
"joefutrelle@gmail.com"
] | joefutrelle@gmail.com |
5f41b1f3ee4e8792dd6455938e861c3e1a67e80c | ef9ab6d3ebb22fea68901c0e681abc25e5379fa6 | /FanFilmE2/fanfilm/resources/lib/sources/pl/serialeco.py | dc4170f3b9a78c322736bf81789d6b1624837c71 | [] | no_license | OpenPE/eePlugins | b2098a082ee5a5d929a29683e2334dc3895cb4b5 | 8f4a2963d5489e760eb778a10f00c3b49356d517 | refs/heads/master | 2020-07-30T11:27:28.198034 | 2019-09-16T15:13:55 | 2019-09-16T15:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,135 | py | # -*- coding: utf-8 -*-
'''
Covenant Add-on
Copyright (C) 2018 :)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import requests
try:
import urlparse
except:
import urllib.parse as urlparse
try:
import HTMLParser
from HTMLParser import HTMLParser
except:
from html.parser import HTMLParser
try:
import urllib2
except:
import urllib.request as urllib2
from resources.lib.libraries import source_utils
from resources.lib.libraries import client
from ptw.debug import log_exception
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['seriale.co']
self.base_link = 'http://seriale.co'
self.session = requests.Session()
def contains_word(self, str_to_check, word):
if str(word).lower() in str(str_to_check).lower():
return True
return False
def contains_all_words(self, str_to_check, words):
for word in words:
if not self.contains_word(str_to_check, word):
return False
return True
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
titles = (tvshowtitle, localtvshowtitle)
return titles, year
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return self.search_ep(url[0], season, episode, url[1]) # url = titles & year
def search_ep(self, titles, season, episode, year):
try:
for title in titles:
data = {
'fid_name': title,
'sezon': season,
'odcinek': episode,
'title': title
}
result = requests.post('http://178.19.110.218/forumserialeco/skrypt/szukaj3.php', data=data).content
result = result.decode('utf-8')
h = HTMLParser()
result = h.unescape(result)
if result:
return title, season, episode
except Exception as e:
log_exception()
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
data = {
'fid_name': url[0],
'sezon': url[1],
'odcinek': url[2],
'title': url[0]
}
result = requests.post('http://178.19.110.218/forumserialeco/skrypt/szukaj3.php', data=data).content
result = result.decode('utf-8')
h = HTMLParser()
result = h.unescape(result)
if result:
wersja = re.findall("""wersja: <b>(.*?)<\/b>""", result)
id = re.findall("""url='(.*?)'""", result)
for item in zip(wersja, id):
try:
if item[1]:
info = self.get_lang_by_type(item[0])
content = client.request("http://seriale.co/frame.php?src=" + item[1])
video_link = str(client.parseDOM(content, 'iframe', ret='src')[0])
valid, host = source_utils.is_host_valid(video_link, hostDict)
if valid:
sources.append(
{'source': host, 'quality': 'SD', 'language': info[0], 'url': video_link,
'info': info[1], 'direct': False,
'debridonly': False})
else:
continue
except:
continue
return sources
except:
log_exception()
return sources
def get_lang_by_type(self, lang_type):
if "dubbing" in lang_type.lower():
if "kino" in lang_type.lower():
return 'pl', 'Dubbing Kino'
return 'pl', 'Dubbing'
elif 'lektor pl' in lang_type.lower():
return 'pl', 'Lektor'
elif 'lektor' in lang_type.lower():
return 'pl', 'Lektor'
elif 'napisy pl' in lang_type.lower():
return 'pl', 'Napisy'
elif 'napisy' in lang_type.lower():
return 'pl', 'Napisy'
elif 'POLSKI' in lang_type.lower():
return 'pl', None
elif 'pl' in lang_type.lower():
return 'pl', None
return 'en', None
def resolve(self, url):
return str(url)
| [
"zdzislaw22@windowslive.com"
] | zdzislaw22@windowslive.com |
dac5f9ee311610fe481bf4209faef487ff95852d | 304033f60097c489cbc60aab639be45ccdbef1a5 | /algorithms/boj/dijkstra/1162.py | f27956edaec3a3b91c338c4611930bbae2ba73f1 | [] | no_license | pgw928/TIL | 3d0c47c07bd1f5c73826daf8579a2b0e3f93cb95 | 765906f1e6eecad4ad8ec9bf704041433d7eb304 | refs/heads/master | 2023-06-29T05:46:30.039815 | 2021-08-10T17:38:11 | 2021-08-10T17:38:11 | 288,923,095 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | import sys
import heapq
from math import inf
from itertools import combinations
input = sys.stdin.readline
N, M, K = map(int, input().split())
pre = [tuple(map(int, input().split())) for _ in range(M)]
combs = combinations(range(M),K)
def dijkstra(start):
hq = []
heapq.heappush(hq, (0, start))
distance[start] = 0
while hq:
dist, node = heapq.heappop(hq)
if distance[node] < dist:
continue
for n_node, n_dist in graph[node]:
tmp = n_dist + distance[node]
if tmp < distance[n_node]:
distance[n_node] = tmp
heapq.heappush(hq, (tmp, n_node))
m = inf
for A in combs:
distance = [inf] * (N + 1)
graph = [[] for _ in range(M + 1)]
for i in range(M):
a, b, c = pre[i]
if i in A:
graph[a].append((b,0))
graph[b].append((a,0))
else:
graph[a].append((b,c))
graph[b].append((a,c))
dijkstra(1)
m = min(m, distance[N])
print(m) | [
"pku928@naver.com"
] | pku928@naver.com |
919f4cea47fb24fd61b0b8ef0e6c3b13d4cbcbb2 | cced1f1ad18c6d9c3b96b2ae53cac8e86846f1f5 | /Blog/comment/templatetags/comment.py | 986e54415fdcb2915afba673ef52a43bd8aa4d30 | [] | no_license | sug5806/portfolio | a3904be506a3746e16da57bba5926c38743783ad | b943955a52c622094a58fb9124323298261ae80a | refs/heads/master | 2022-12-10T06:23:38.472893 | 2019-07-05T04:56:59 | 2019-07-05T04:56:59 | 190,156,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | from django.contrib.contenttypes.models import ContentType
from django.template import Library
from django.template.loader import render_to_string
register = Library()
from comment.forms import CommentForm
from comment.models import Comment
@register.simple_tag(takes_context=True)
def show_comment(context, content_type, object_id):
# 폼 만들기
content_type = ContentType.objects.get_for_model(content_type)
form = CommentForm(initial={'content_type': content_type, 'object_id': object_id})
# 해당 하는 댓글 목록 뽑기
comments = Comment.objects.filter(content_type=content_type, object_id=object_id).all()
# 템플릿 렌더링
return render_to_string('comment/show_comment.html', {'form': form, 'object_list': comments},
request=context['request'])
| [
"sug5806@gmail.com"
] | sug5806@gmail.com |
07c38900b1136a95de01872886ab8a1d18c0f9fd | 63598a5a625265f01d2ab68475db7277b4649da0 | /minet/cli/facebook/utils.py | fc6f8f019e75ea8322d1e2c9ec4f95ef1e2b2f16 | [] | no_license | metinefendie/minet | dcb83829ccd574c94301b65a9a1c071b85ceb7c2 | 072723d75ed7c60c370866c92da0f299fe12da21 | refs/heads/master | 2022-04-25T13:35:31.998650 | 2020-04-23T16:51:59 | 2020-04-23T16:51:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | # =============================================================================
# Minet Facebook CLI Action Utils
# =============================================================================
#
# Miscellaneous helpers used by `minet fb`.
#
from http.cookies import SimpleCookie
from minet.utils import grab_cookies
from minet.cli.utils import die
FACEBOOK_URL = 'https://www.facebook.com/'
def fix_cookie(cookie_string):
cookie = SimpleCookie()
cookie.load(cookie_string)
# NOTE: those cookie items can rat you out
try:
del cookie['m_pixel_ratio']
del cookie['wd']
except KeyError:
pass
cookie['locale'] = 'en_US'
return '; '.join(key + '=' + morsel.coded_value for key, morsel in cookie.items())
def grab_facebook_cookie(namespace):
if namespace.cookie == 'firefox' or namespace.cookie == 'chrome':
get_cookie_for_url = grab_cookies(namespace.cookie)
if get_cookie_for_url is None:
die('Could not extract cookies from %s.' % namespace.cookie)
cookie = get_cookie_for_url(FACEBOOK_URL)
else:
cookie = namespace.cookie.strip()
if not cookie:
die([
'Relevant cookie not found.',
'A Facebook authentication cookie is necessary to be able to access Facebook pages.',
'Use the --cookie flag to choose a browser from which to extract the cookie or give your cookie directly.'
])
return fix_cookie(cookie)
| [
"guillaumeplique@gmail.com"
] | guillaumeplique@gmail.com |
b653a43ed41620c6a44b82f778844b4d44bfd45b | 536aa3e7833ead462ccb087e827f2a490e1b5216 | /stripe/api_resources/payout.py | 8227c4573a00b7fce53dc1804bcdc34647dbb857 | [
"MIT"
] | permissive | condemane/stripe-python | 83b6026fec6742535b352a396622453fb6fc8f84 | 1ca277051cf212c6ac8a22b49321cfe9e1ffd405 | refs/heads/master | 2021-07-23T16:39:05.896112 | 2017-11-03T02:47:45 | 2017-11-03T02:47:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from __future__ import absolute_import, division, print_function
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import UpdateableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
class Payout(CreateableAPIResource, UpdateableAPIResource,
ListableAPIResource):
OBJECT_NAME = 'payout'
def cancel(self):
self.refresh_from(self.request('post',
self.instance_url() + '/cancel'))
| [
"ob@stripe.com"
] | ob@stripe.com |
060f65c40048132d59b778c41e50f1d54ffe6d33 | 0130c8b14927097663157846adc4b146d67d2fda | /tests/common/test_run/apply_rms_prop_run.py | 7db08112a030e82d216c1e0b4300a05ddbe3e473 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] | permissive | Shigangli/akg | e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc | 3766c54e0b109541932d147a6b5643a334b82403 | refs/heads/master | 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,815 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""apply_rms_prop_run"""
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import apply_rms_prop
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
def apply_rms_prop_run(shape, dtype, lr, momentum, rho, epsilon, attrs=None):
"""run function for dsl function apply_rms_prop."""
if attrs is None:
attrs = {}
dtype = dtype.lower()
shapes = [shape, shape, shape, shape, (1,), (1,), (1,)]
types = [dtype, dtype, dtype, dtype, dtype, dtype, dtype]
op_attrs = [epsilon]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop, shapes, types,
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
_, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
return mod, expects, args
return mod
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop, shapes, types,
op_attrs=op_attrs, kernel_name="apply_rms_prop", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, 1, 2), expect=expects)
rtol, atol = get_rtol_atol("apply_rms_prop", dtype)
results = list(map(lambda x, y: compare_tensor(x, y, rtol=rtol, atol=atol), outputs, expects))
return inputs, outputs, expects, all(results)
def gen_data(shape, dtype, lr, momentum, rho, epsilon):
"""Generates input, output and expect data."""
var = random_gaussian(shape, miu=10, sigma=1.0).astype(dtype)
ms = np.abs(random_gaussian(shape, miu=4, sigma=0.1).astype(dtype))
mom = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
lr = np.array([lr]).astype(dtype)
momentum = np.array([momentum]).astype(dtype)
rho = np.array([rho]).astype(dtype)
inputs = [var, ms, mom, grad, lr, momentum, rho]
expects = apply_rms_prop_compute(var, ms, mom, grad, lr, momentum, rho, epsilon)
args = inputs
return inputs, expects, args
def apply_rms_prop_compute(var, ms, mom, grad, lr, momentum, rho, epsilon):
compute_dtype = "float32"
dtype = var.dtype
if dtype != compute_dtype:
var, ms, mom, grad, lr, momentum, rho = [t.astype(compute_dtype) for t in [
var, ms, mom, grad, lr, momentum, rho]]
# ms = rho * ms + (1-rho) * grad * grad
# mom = momentum * mom + lr * grad / sqrt(ms + epsilon)
# var = var - mom
one = np.array([1.0]).astype(compute_dtype)
ms_1 = rho * ms
ms_2 = (one - rho) * grad * grad
ms_update = ms_1 + ms_2
mom_1 = momentum * mom
mom_2_1 = lr * grad
mom_2_2 = one / np.sqrt(ms_update + epsilon)
mom_3 = mom_2_1 * mom_2_2
mom_update = mom_1 + mom_3
var_update = var - mom_update
expects = [var_update, ms_update, mom_update]
if var_update.dtype != dtype:
expects = [t.astype(dtype) for t in expects]
return expects
| [
"1027252281@qq.com"
] | 1027252281@qq.com |
fd1edb275f814cbcec1af4f8623c8ba04dfae2a3 | ccc81e5a1e6d2be66423e376919deec4b30875ce | /bot_led.py | c32a509bfbc5dc115091008e9e714a3b17fd4151 | [] | no_license | BitcoinOfThings/bot-pi | 04135b43d03430a2fcfe71e71330bd9b6ac2ba87 | 36556fef5f3a4e2916bb8464818ddb6f3c56c1b6 | refs/heads/master | 2020-09-26T08:05:17.102902 | 2019-12-26T22:44:37 | 2019-12-26T22:44:37 | 226,211,976 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | # listens for mqtt messages and controls led connected to rpi
import paho.mqtt.client as mqtt
import RPi.GPIO as GPIO
import json
#
LED = 32
def on_message(client, userdata, message) :
try:
#print(message.payload)
msg = json.loads(message.payload.decode('utf8'))
#print(msg["message"])
val = int(msg["message"])
print(val)
if (val == 0):
GPIO.output(LED, GPIO.LOW)
else:
GPIO.output(LED, GPIO.HIGH)
except Exception as ex:
print(ex)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LED, GPIO.OUT)
GPIO.output(LED, GPIO.LOW)
mqttc = mqtt.Client()
mqttc.username_pw_set(username="demo", password="demo")
mqttc.connect("mqtt.bitcoinofthings.com")
#mqttc.loop_start()
mqttc.on_message = on_message
mqttc.subscribe("demo")
try :
print("Send 0 or 1 to bot_demo to turn the LED on or off")
mqttc.loop_forever()
except KeyboardInterrupt:
pass
#cleanup resets the pin therefore the led will be shut off
GPIO.cleanup()
| [
"dfoderick@gmail.com"
] | dfoderick@gmail.com |
59a353c86bbd81769180b33bf609a5936e95282c | daf28ed3f20340a136f19d582019fbd2554b6ea6 | /python/Ds_phikkpi_cfg.py | bc75d02bbc626a93145b58b803555864b749fd35 | [] | no_license | NiharSaha/Ds_MC_Gen_fragment | ccb4d48a80e46c260da0c7c7e821ad4da12f6a43 | b9bf94cb4f0cf4ffa2619e4671a8c70ca37aaf5d | refs/heads/master | 2022-01-08T10:31:43.544326 | 2018-04-12T19:45:30 | 2018-04-12T19:45:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,729 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
from GeneratorInterface.EvtGenInterface.EvtGenSetting_cff import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(5020.0),
maxEventsToPrint = cms.untracked.int32(0),
ExternalDecays = cms.PSet(
EvtGen130 = cms.untracked.PSet(
decay_table = cms.string('GeneratorInterface/EvtGenInterface/data/DECAY_2010.DEC'),
operates_on_particles = cms.vint32(),
particle_property_file = cms.FileInPath('GeneratorInterface/EvtGenInterface/data/evt.pdl'),
## user_decay_file = cms.vstring('Run2Ana/lambdapkpi/data/lambdaC_kstar892_kpi.dec'),
list_forced_decays = cms.vstring('MyD_s+','MyD_s-'),
user_decay_embedded= cms.vstring(
"""
Alias MyD_s+ D_s+
Alias MyD_s- D_s-
ChargeConj MyD_s- MyD_s+
Alias Myphi phi
Decay MyD_s+
1.000 Myphi pi+ SVS;
Enddecay
CDecay MyD_s-
Decay Myphi
1.000 K+ K- VSS;
Enddecay
End
"""
)
),
parameterSets = cms.vstring('EvtGen130')
),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'HardQCD:all = on',
'PhaseSpace:pTHatMin = 0.', #min pthat
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
generator.PythiaParameters.processParameters.extend(EvtGenExtraParticles)
DsDaufilter = cms.EDFilter("PythiaMomDauFilter",
ParticleID = cms.untracked.int32(431),
MomMinPt = cms.untracked.double(4.),
MomMinEta = cms.untracked.double(-2.4),
MomMaxEta = cms.untracked.double(2.4),
DaughterIDs = cms.untracked.vint32(333, 211),
NumberDaughters = cms.untracked.int32(2),
DaughterID = cms.untracked.int32(333),
DescendantsIDs = cms.untracked.vint32(321 , -321),
NumberDescendants = cms.untracked.int32(2),
)
Dsrapidityfilter = cms.EDFilter("PythiaFilter",
ParticleID = cms.untracked.int32(431),
MinPt = cms.untracked.double(4.),
MaxPt = cms.untracked.double(500.),
MinRapidity = cms.untracked.double(-1.2),
MaxRapidity = cms.untracked.double(1.2),
)
ProductionFilterSequence = cms.Sequence(generator*DsDaufilter*Dsrapidityfilter)
| [
"peng43@purdue.edu"
] | peng43@purdue.edu |
b1d5d7cd5f9f1fb3404761cf7c3fd4aae7a133e5 | a3fddbf8d953bce9b84173c1ba48780e849f86ef | /dave/to_nxspe.py | 63548c2bb5691e8e29ada6a7bd7e8c15082e47b9 | [] | no_license | rosswhitfield/wand | 79f99bef519ed9c334fddcb5396ab66d56f2903e | 562b1f89acb46749e220081117e2cbda2014df36 | refs/heads/master | 2021-06-02T05:38:00.741277 | 2021-04-14T13:19:18 | 2021-04-14T13:19:18 | 97,755,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | from mantid.simpleapi import *
import numpy as np
import sys
E=36.9384
w = np.array([1.487,1.489])
van=LoadNexus('/SNS/users/rwp/wand/HB2C_2933_Van_processed_grouped.nxs')
for run in range(2952,4754):
ws = LoadEventNexus(Filename='/HFIR/HB2C/IPTS-7776/nexus/HB2C_{}.nxs.h5'.format(run))
ws = Integration(ws)
MaskDetectors(ws,DetectorList=range(16384))
ws = GroupDetectors(ws,CopyGroupingFromWorkspace=van)
ws.getAxis(0).setUnit("Wavelength")
for idx in xrange(ws.getNumberHistograms()):
ws.setX(idx, w)
ws=ws/van/ws.getRun().getProtonCharge()
ws=ConvertUnits(ws, Target='DeltaE', EMode='Direct', EFixed=E)
ws=Rebin(ws, Params='-0.1,0.2,0.3')
SaveNXSPE(ws, Filename='/HFIR/HB2C/IPTS-7776/shared/rwp/nxspe_group/HB2C_{}.nxspe'.format(run), Efixed=E, Psi=ws.getRun().getLogData('HB2C:Mot:s1').value[0])
| [
"whitfieldre@ornl.gov"
] | whitfieldre@ornl.gov |
486d98111acd3519a084b097828552d3cef1702d | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/12197021.py | af80b709441910822e79a0e64731bb6622b81026 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/12197021.py generated: Wed, 25 Jan 2017 15:25:28
#
# Event Type: 12197021
#
# ASCII decay Descriptor: [B+ -> D*(2010)+ anti-D0 K*0]cc with D* forced to (D0->Kpi) pi+ and K* into Kpi
#
from Configurables import Generation
Generation().EventType = 12197021
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_DstD0Kst0,Kpi,Kpi=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12197021
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
c84343b041f36369f7566eeaad96750410cb5505 | e5f5b70dc5ef5ee52d897845e68a3d6feef2d17b | /chart/bar.py | 204f6ab486c0cc317318bb2bb047d9ff9ffb7a34 | [
"MIT"
] | permissive | maxhumber/chart | e5b16bffac63b29f814d7778253da3e721bc013e | 8f19609c14dedc09b07e4682c703107d53cd9714 | refs/heads/master | 2020-07-01T21:29:18.914053 | 2020-04-29T18:00:36 | 2020-04-29T18:00:36 | 201,306,637 | 63 | 8 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | # HACK: to enable interactive development in Atom/Hydrogen
try:
from .preprocessing import RangeScaler
except ModuleNotFoundError:
from chart.preprocessing import RangeScaler
def create_label(label, label_width):
'''Add right padding to a text label'''
label = label[:label_width]
label = label.rjust(label_width)
label += ': '
return label
def build_row(value, label, width, mark):
'''Build a complete row of data'''
marks = value * mark
row = marks.ljust(width)
row = label + row
return row
def bar(x, y, width=30, label_width=None, mark='▇'):
'''A simple bar chart that prints to the console
:param x: list, array or series of numeric values
:param y: list, array or series of labels for the numeric values
:param width: integer for the character length of the x values
:param label_width: integer for the label character length
:param mark: unicode symbol to mark data values
>>> from chart import bar
>>> x = [500, 200, 900, 400]
>>> y = ['marc', 'mummify', 'chart', 'sausagelink']
>>> bar(x, y)
marc: ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇
mummify: ▇▇▇▇▇▇▇
chart: ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇
sausagelink: ▇▇▇▇▇▇▇▇▇▇▇▇▇
>>> import pandas as pd
>>> df = pd.DataFrame({
'artist': ['Tame Impala', 'Childish Gambino', 'The Knocks'],
'listens': [8_456_831, 18_185_245, 2_556_448]
})
>>> bar(df.listens, df.artist, width=20, label_width=11, mark='🔊')
Tame Impala: 🔊🔊🔊🔊🔊🔊🔊🔊🔊
Childish Ga: 🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊🔊
The Knocks: 🔊🔊🔊
'''
if not label_width:
label_width = max([len(l) for l in y])
labels = [create_label(l, label_width) for l in y]
values = RangeScaler((0, width), 0).fit_transform(x)
string_chart = ''
for value, label in zip(values, labels):
string_row = build_row(value, label, width, mark)
string_chart += string_row
string_chart += '\n'
print(string_chart)
| [
"max.humber@gmail.com"
] | max.humber@gmail.com |
ce1e831f63a122a4a600268f2c8e2738f7e4329c | 589363013048ea3962bdc1e8d7b0cb74481e4dbd | /2022/boj11723.py | 02631accea1f0fd66cded1a0f3b65c1dcd0b4509 | [] | no_license | zjvlzld/algoritm | 53b97698a6b4385a0636bb076514a9dd458c8adf | 4339aee51e46ee2fbf3d40bc97fb7fdb51447f50 | refs/heads/master | 2022-08-31T04:48:51.537065 | 2022-08-18T23:59:45 | 2022-08-18T23:59:45 | 178,524,683 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | import sys
T=int(input())
S=[-1 for _ in range(21)]
for _ in range(T):
get=sys.stdin.readline().rstrip().split(" ")
if(get[0]=='add'):
S[int(get[1])]=1
continue
elif(get[0]=='remove'):
S[int(get[1])]=-1
continue
elif(get[0]=='check'):
if S[int(get[1])]==1:
print(1)
else:
print(0)
continue
elif(get[0]=='toggle'):
S[int(get[1])]*=-1
continue
elif(get[0]=='all'):
S=[1 for _ in range(21)]
continue
elif(get[0]=='empty'):
S=[-1 for _ in range(21)]
continue | [
"jongsik1995@naver.com"
] | jongsik1995@naver.com |
fb5177041f67fbb172219a9d83558812f3e4e953 | d91f28f9b8882af0f4d36a43775e6ac35a8716f0 | /payroll/urls.py | 667371a3d2f962125bf46ef77fe054877239163f | [] | no_license | shaddysparks/web_system | 9ea74159ee74435a44d5c560b8a3cb0594cb22bf | 1c854a9a2566c51fbfcef2d7f1f9fbbb778c7711 | refs/heads/master | 2020-05-02T11:46:35.998041 | 2019-03-20T10:03:33 | 2019-03-20T10:03:33 | 177,938,855 | 1 | 0 | null | 2019-03-27T07:15:52 | 2019-03-27T07:15:52 | null | UTF-8 | Python | false | false | 131 | py | from django.urls import path
from . import views
app_name = 'payroll'
urlpatterns = [
path('', views.index, name='index'),
]
| [
"kyezaarnold63@gmail.com"
] | kyezaarnold63@gmail.com |
32c73283b5ac44a971e562d676c636b5d8f59a2b | 99b84337ae66ad2877544fd158f20e7f4cd96520 | /day11-20/day17/TCP(老师版)/TCPServer.py | be0ecc868dd22962f21507fc15cd062ce46e03e6 | [] | no_license | jiajiabin/python_study | cf145d54cabce2cb98914b3448ed7d0e5c1c146c | b4faaff26ee9728af2e80942ba6a7c7f6a8b0f86 | refs/heads/master | 2020-06-21T21:31:26.034978 | 2019-08-26T11:39:34 | 2019-08-26T11:39:34 | 197,556,254 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | import socket
# 服务端
# 1.配置套接字 首先设置数据传输方式为TCP,然后绑定IP和端口
# AF_INET 表示我们使用的IP地址是IPv4
# AF_INET6 表示使用的IP地址是IPv6
# SOCK_STREAM 表示使用的是TCP协议
# SOCK_DGRAM 表示使用UDP协议
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("192.168.53.133", 8923))
# 服务器上有很多的程序,很多的进程。通过端口找到指定的进程
# 2.监听客户端的链接请求,参数是最大并发数
server_socket.listen(3)
# 如果没有任何链接,程序就一直卡在这里
# 如果有人发起请求,则向下执行
while True:
# 3.建立链接,返回客户端的IP和端口
client_socket, addr = server_socket.accept()
# 接收客户端发来的数据了
data = client_socket.recv(1024)
# 不能超过4096,一般是2的幂数,如果不是2的幂数,可能出现中文乱码
print(data.decode("utf-8"))
# 发来的数据是二进制,需要解码后使用
client_socket.send("你也好腻害!!".encode("utf-8"))
| [
"2592668397@qq.com"
] | 2592668397@qq.com |
c7c165068d12252bd5b6e7058d0ef6e1db1d6bff | 06f7ffdae684ac3cc258c45c3daabce98243f64f | /vsts/vsts/licensing/v4_0/models/account_license_usage.py | 4fa792ae3d93eb20672766f3fe8c850211691444 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown"
] | permissive | kenkuo/azure-devops-python-api | 7dbfb35f1c9637c9db10207824dd535c4d6861e8 | 9ac38a97a06ee9e0ee56530de170154f6ed39c98 | refs/heads/master | 2020-04-03T17:47:29.526104 | 2018-10-25T17:46:09 | 2018-10-25T17:46:09 | 155,459,045 | 0 | 0 | MIT | 2018-10-30T21:32:43 | 2018-10-30T21:32:42 | null | UTF-8 | Python | false | false | 1,370 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AccountLicenseUsage(Model):
"""AccountLicenseUsage.
:param license:
:type license: :class:`AccountUserLicense <licensing.v4_0.models.AccountUserLicense>`
:param provisioned_count:
:type provisioned_count: int
:param used_count:
:type used_count: int
"""
_attribute_map = {
'license': {'key': 'license', 'type': 'AccountUserLicense'},
'provisioned_count': {'key': 'provisionedCount', 'type': 'int'},
'used_count': {'key': 'usedCount', 'type': 'int'}
}
def __init__(self, license=None, provisioned_count=None, used_count=None):
super(AccountLicenseUsage, self).__init__()
self.license = license
self.provisioned_count = provisioned_count
self.used_count = used_count
| [
"tedchamb@microsoft.com"
] | tedchamb@microsoft.com |
1ed1155b1470647513721c29c89919c8d698fef0 | dbec98678f62786fabf0dd32f7e1a464b67cca7f | /basic/migrations/0020_auto_20171024_1407.py | eb0ed79bd18802528ff3972927d104fced4861fa | [
"BSD-2-Clause"
] | permissive | kgdunn/django-peer-review-system | afc30052f37584e6db90a361d8d2c7613c7e500e | 8d013961e00d189fbbade5283128e956a27954f8 | refs/heads/master | 2023-07-07T06:39:00.357200 | 2020-03-20T11:49:25 | 2020-03-20T11:49:25 | 244,141,048 | 0 | 0 | BSD-2-Clause | 2023-08-28T17:25:13 | 2020-03-01T12:00:19 | Python | UTF-8 | Python | false | false | 505 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-24 12:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic', '0019_email_task'),
]
operations = [
migrations.AlterField(
model_name='person',
name='last_lis',
field=models.CharField(blank=True, max_length=200, verbose_name='Last known: lis_result_sourcedid'),
),
]
| [
"kgdunn@gmail.com"
] | kgdunn@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.