blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38bce75079ce164fe87dc117949cb92e2415a757 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/security/azure-mgmt-security/azure/mgmt/security/v2019_08_01/aio/operations/_iot_security_solutions_analytics_recommendation_operations.py | 747c3f884dbdb810d61fc76eb3ca65736afd6eef | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 10,172 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._iot_security_solutions_analytics_recommendation_operations import (
build_get_request,
build_list_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IotSecuritySolutionsAnalyticsRecommendationOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.security.v2019_08_01.aio.SecurityCenter`'s
:attr:`iot_security_solutions_analytics_recommendation` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self, resource_group_name: str, solution_name: str, aggregated_recommendation_name: str, **kwargs: Any
) -> _models.IoTSecurityAggregatedRecommendation:
"""Use this method to get the aggregated security analytics recommendation of yours IoT Security
solution. This aggregation is performed by recommendation name.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param solution_name: The name of the IoT Security solution. Required.
:type solution_name: str
:param aggregated_recommendation_name: Name of the recommendation aggregated for this query.
Required.
:type aggregated_recommendation_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IoTSecurityAggregatedRecommendation or the result of cls(response)
:rtype: ~azure.mgmt.security.v2019_08_01.models.IoTSecurityAggregatedRecommendation
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-08-01"))
cls: ClsType[_models.IoTSecurityAggregatedRecommendation] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
solution_name=solution_name,
aggregated_recommendation_name=aggregated_recommendation_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("IoTSecurityAggregatedRecommendation", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels/default/aggregatedRecommendations/{aggregatedRecommendationName}"
}
@distributed_trace
def list(
self, resource_group_name: str, solution_name: str, top: Optional[int] = None, **kwargs: Any
) -> AsyncIterable["_models.IoTSecurityAggregatedRecommendation"]:
"""Use this method to get the list of aggregated security analytics recommendations of yours IoT
Security solution.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param solution_name: The name of the IoT Security solution. Required.
:type solution_name: str
:param top: Number of results to retrieve. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IoTSecurityAggregatedRecommendation or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.v2019_08_01.models.IoTSecurityAggregatedRecommendation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-08-01"))
cls: ClsType[_models.IoTSecurityAggregatedRecommendationList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
solution_name=solution_name,
subscription_id=self._config.subscription_id,
top=top,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("IoTSecurityAggregatedRecommendationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/iotSecuritySolutions/{solutionName}/analyticsModels/default/aggregatedRecommendations"
}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
3673d31ddf8ca1389d90922a026c2a97552f3315 | 236ded75aefcf965c582538f1b56d21b161cd3c5 | /ethpki-Random/testDir/callct019.py | 4da9f9b228ff9028f8d95b249bf707cff77f623f | [] | no_license | rogerioita/ICPChain | 7fc1c7c5df289ed31fd2ad71cee400eac3d253cb | 87f2b001f5297396be6cb3aa9dd0c42236587038 | refs/heads/master | 2023-03-10T20:57:49.479052 | 2021-02-16T16:27:23 | 2021-02-16T16:27:23 | 336,806,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | #!/usr/bin/env python
import os
os.system('node ct019.js')
| [
"rogerio.santos@ga.ita.br"
] | rogerio.santos@ga.ita.br |
82c59e960c39ab218f51fa905bfc31f414b29f22 | c98a1f74ea576d670d094d5e5259bfe2e4449b88 | /PKUTreeMaker/test/CrabJobsSrc/MC/crab3_analysisWGToLNuG.py | afc4cbad75468874b348a42e7c1d3813ea0b8177 | [] | no_license | AndrewLevin/VBSWG_Ntuple | d71544d2b233eb0e88d185d947bbf32a32fbca18 | 37eac7ad4d03eb0d4a463f9f121c4e998f0c5a34 | refs/heads/main | 2023-02-21T11:42:28.642572 | 2020-12-01T15:31:37 | 2020-12-01T15:31:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'fullrun2_2017_version5_WGJets_v1_2'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.maxMemoryMB = 3000
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Fall17_17Nov2017_V32_MC_L1FastJet_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L1FastJet_AK4PFPuppi.txt','Fall17_17Nov2017_V32_MC_L2L3Residual_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L2L3Residual_AK4PFPuppi.txt','Fall17_17Nov2017_V32_MC_L2Relative_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi.txt','Fall17_17Nov2017_V32_MC_L3Absolute_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L3Absolute_AK4PFPuppi.txt']
config.JobType.psetName = 'analysis_mc.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
##config.Data.outputPrimaryDataset = 'VBS_WGAMMA_94X'
config.Data.inputDataset = '/WGToLNuG_01J_5f_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v3/MINIAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 2
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = 'fullrun2_2017_version5_WGJets_v1_2'
config.section_("Site")
config.Site.storageSite = 'T2_CN_Beijing'
| [
"15827238926@163.com"
] | 15827238926@163.com |
fa2eb58fd5bab605020259e31f53b962d74cc371 | adf65dbe1a15560f3b4930fa393c2327f29ab3c2 | /myapp/web/tests.py | d96c0860d6d36ca40c0104798f20c4fc07269a90 | [] | no_license | sanix-sandel/my_app | ebe59d186824f6539a2b392dd7cf992bccb221c0 | 652e83f2128039ed979cc90f9254fb54d21ebcea | refs/heads/master | 2023-03-30T09:21:46.513036 | 2021-03-26T06:06:09 | 2021-03-26T06:06:09 | 351,671,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from django.test import SimpleTestCase
from django.urls import reverse
class HomePageTests(SimpleTestCase):
def test_homepage_status_code(self):
response=self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_homepage_url_name(self):
response=self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200) | [
"sanicksikani@gmail.com"
] | sanicksikani@gmail.com |
87bcb438a3c173c9b0e8bb2115a0d3b8841aef86 | 20d88c37924ec96d5b5d02eb13edc93c21a25b9a | /Uniq/urls.py | efbcc5ab61794b6fd3d5dd26134c158836230ffd | [] | no_license | AthifSaheer/UniqWalls-Wallpaper | d51c9e99153473427ead8a7b16631f860502a09c | c392732d239fb3174bd6e7c7c9b758cf12545f20 | refs/heads/main | 2023-06-03T22:33:02.606873 | 2021-06-05T01:28:48 | 2021-06-05T01:28:48 | 337,617,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | """Uniq URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('UniqApp.urls')),
path('accounts/', include('django.contrib.auth.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"liteboook@gmail.com"
] | liteboook@gmail.com |
a272068aded03c076ca4db904383d5ed9b2fb3ff | 596f34bab8addf7626364a9c39da566c0de8ba2d | /applications/tensorflow/click_through_rate/dien/dien_infer.py | d6ce08a95a8823f101e2806813c85b3ff4d89b91 | [
"MIT",
"Apache-2.0"
] | permissive | WN1695173791/examples | 1661c404cc92cf4e4d138aa1b6b7f6cff3b5ecdb | ceb1a598ccc6136d6c145b9e8b7a5a3e938de7fe | refs/heads/master | 2023-04-06T17:02:48.823007 | 2021-04-15T12:55:26 | 2021-04-15T12:55:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,907 | py | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Traing CTR Model on Graphcore IPUs.
"""
import os
import time
import random
import argparse
import numpy as np
import logging
import tensorflow as tf
from collections import namedtuple
from tensorflow.python.ipu import loops
from tensorflow.python.ipu import utils
from tensorflow.python.ipu import ipu_compiler
from tensorflow.python.ipu.scopes import ipu_scope
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_outfeed_queue
import set_path
from common.utils import calc_auc, setup_logger
from common.embedding import get_dataset_embed, id_embedding, get_synthetic_dataset
import common.log as logger
from dien.dien_model import DIEN
VALIDATION_DATA_SIZE = 121216
tf_log = logging.getLogger('DIEN')
GraphOps = namedtuple(
'graphOps', ['graph',
'session',
'init',
'ops',
'placeholders',
'iterator',
'outfeed',
'saver'])
def get_tf_datatype(opts):
dtypes = opts["precision"].split('.')
master_dtype = tf.float16 if dtypes[1] == '16' else tf.float32
return master_dtype
def graph_builder(opts, uid_embedding, mid_embedding, cat_embedding, lr,
uids, mids, cats, mid_his, cat_his, mid_mask, target, sl,
use_negsampling=True):
master_dtype = get_tf_datatype(opts)
return DIEN(opts, uid_embedding, mid_embedding, cat_embedding, master_dtype)(uids, mids, cats, mid_his, cat_his, mid_mask, sl, None, None, lr, target)
def generic_graph(opts, is_training):
master_dtype = get_tf_datatype(opts)
graph = tf.Graph()
with graph.as_default():
placeholders = {}
placeholders["learning_rate"] = tf.placeholder(master_dtype, shape=[])
uid_embedding, mid_embedding, cat_embedding = id_embedding(opts, is_training, opts['seed'])
if opts['use_synthetic_data']:
dataset = get_synthetic_dataset(opts)
else:
dataset = get_dataset_embed(opts, False)
infeed = ipu_infeed_queue.IPUInfeedQueue(dataset,
feed_name = 'DIEN_dataset_infeed',
replication_factor = (opts['replicas']))
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue(feed_name="DIEN_outfeed",
replication_factor=opts['replicas'])
with ipu_scope('/device:IPU:0'):
def comp_fn():
def body(uids, mids, cats, mid_his, cat_his, mid_mask, target, sl):
prob, accuracy = graph_builder(opts, uid_embedding, mid_embedding, cat_embedding, placeholders['learning_rate'], uids, mids, cats, mid_his, cat_his, mid_mask, target, sl, use_negsampling=False)
with tf.control_dependencies([prob]):
return outfeed_queue.enqueue((prob, target, accuracy))
return loops.repeat(opts['batches_per_step'], body, [], infeed)
outputs = ipu_compiler.compile(comp_fn, [])
outfeed = outfeed_queue.dequeue()
saver = tf.train.Saver()
utils.move_variable_initialization_to_cpu()
init = tf.global_variables_initializer()
if opts['use_ipu_model']:
os.environ["TF_POPLAR_FLAGS"] = "--use_ipu_model"
ipu_options = utils.create_ipu_config(profiling=False,
profile_execution=False,
max_cross_replica_sum_buffer_size=10000000,
max_inter_ipu_copies_buffer_size=10000000)
ipu_options = utils.set_recomputation_options(ipu_options, allow_recompute=True)
ipu_options = utils.auto_select_ipus(ipu_options, [opts['replicas']])
utils.configure_ipu_system(ipu_options)
graph_outputs = [outputs]
sess = tf.Session(graph=graph)
return GraphOps(graph,
sess,
init,
graph_outputs,
placeholders,
infeed,
outfeed,
saver), uid_embedding, mid_embedding, cat_embedding
def inference(opts):
infer, uid_embedding, mid_embedding, cat_embedding = generic_graph(opts, False)
infer.session.run(infer.init)
infer.session.run(infer.iterator.initializer)
path = opts['model_path']
if path is not None and os.path.exists(path+".meta"):
infer.saver.restore(infer.session, path)
tf_log.debug(f"model {path} restored")
else:
tf_log.debug(f"Do not restore since no model under path {path}")
steps = VALIDATION_DATA_SIZE * opts['epochs'] / opts['batch_size'] / opts["batches_per_step"]
i = 0
stored_arr = []
tf_log.debug(f"steps: {steps}")
accs = []
total_time = 0
with uid_embedding.register(infer.session), mid_embedding.register(infer.session), cat_embedding.register(infer.session):
while i < steps:
start = time.time()
infer.session.run(infer.ops)
prob, target, acc = infer.session.run(infer.outfeed)
time_one_iteration = time.time() - start
if i > 0:
total_time = total_time + time_one_iteration
i += 1
accuracy = np.mean(acc)
accs.append(accuracy)
prob_1 = prob.reshape([opts['batches_per_step']*opts['batch_size'], 2*opts['replicas']])
prob_1 = prob_1[:, 0].tolist()
target_1 = target.reshape([opts['batches_per_step']*opts['batch_size'], 2*opts['replicas']])
target_1 = target_1[:, 0].tolist()
for p, t in zip(prob_1, target_1):
stored_arr.append([p, t])
throughput = opts["batch_size"] * opts["batches_per_step"] / time_one_iteration
tf_log.info(f"i={i // opts['batches_per_step']},validation accuracy: {accuracy}, throughput:{throughput}, latency:{time_one_iteration * 1000 / opts['batches_per_step']}")
test_auc = calc_auc(stored_arr)
test_acc = np.mean(accs)
tf_log.info(f"test_auc={test_auc:.4f} test_acc={test_acc:.4f}")
infer.session.close()
if steps > 1:
total_recomm_num = opts["batch_size"] * (i - 1) * opts["batches_per_step"]
throughput = float(total_recomm_num) / float(total_time)
latency = float(total_time) * 1000 / float((i - 1) * opts["batches_per_step"])
tf_log.info(f"Total recommendations: {total_recomm_num:d}")
tf_log.info(f"Process time in seconds is {total_time:.3f}")
tf_log.info(f"recommendations/second is {throughput:.3f}")
tf_log.info(f"latency in miliseconds is {latency:.3f}")
def add_model_arguments(parser):
parser.add_argument("--max-seq-len", type=int, default=100, help="sequence maximum length")
parser.add_argument("--hidden-size", type=int, default=36, help="hidden size")
parser.add_argument("--attention-size", type=int, default=36, help="attention size")
parser.add_argument("--precision", type=str, default="32.32", choices=["32.32"], help="Setting of Ops and Master datatypes")
parser.add_argument("--gru-type", type=str, default="PopnnGRU", choices=["TfnnGRU", "PopnnGRU"], help="choose GRU")
parser.add_argument("--augru-type", type=str, default="PopnnAUGRU", choices=["TfAUGRU", "PopnnAUGRU"], help="choose AUGRU")
return parser
def add_dataset_arguments(parser):
group = parser.add_argument_group('Dataset')
group.add_argument('--use-synthetic-data', default=False, action='store_true', help='Use synthetic data')
group.add_argument('--epochs', type=float, default=1, help='number of epochs')
group.add_argument('--batches-per-step', type=int, default=1600, help='Number of batches to perform on the device before returning to the host')
return parser
def add_training_arguments(parser):
group = parser.add_argument_group('Training')
group.add_argument('--seed', type=int, default=3, help = "set random seed")
group.add_argument('--batch-size', type=int, default=128, help = "set batch-size for training graph")
group.add_argument('--large-embedding', default=False, action='store_true', help="set small or large embedding size")
group.add_argument('--replicas', type=int, default=1, help = "Replicate graph over N workers to increase batch to batch-size*N")
group.add_argument('--model-path', type=str, default='./dnn_save_path/ckpt_noshuffDIEN3', help='Place to store and restore model')
group.add_argument('--use-ipu-model', default=False, action='store_true', help="use IPU model or not.")
group.add_argument('--use-ipu-emb', default=False, action='store_true', help = "Use host embeddig or put embedding on ipu.")
return parser
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "CTR Model Training in Tensorflow", add_help = False)
parser = add_model_arguments(parser)
parser = add_dataset_arguments(parser)
parser = add_training_arguments(parser)
parser = logger.add_arguments(parser)
args, _ = parser.parse_known_args()
args = vars(args)
logger.print_setting(args, is_dien=False, is_training=False)
setup_logger(logging.DEBUG, tf_log, name='dien_log.txt')
inference(args)
| [
"philb@graphcore.ai"
] | philb@graphcore.ai |
ae903173e3955111b0b3161395c12e408a48aa9e | 940d7b93fb27e8eead9b6e52bc5c7444666744dd | /python/src/Lib/test/test_unary.py | 9854f64d0ce1c69165d5d924590565058f570e20 | [
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi",
"Apache-2.0"
] | permissive | pilotx45/sl4a | d446531d310cc17d93f24aab7271a0813e8f628d | 150e3e46b5103a9b9a391034ef3fbc5bd5160d0f | refs/heads/master | 2022-03-24T19:48:30.340479 | 2022-03-08T16:23:58 | 2022-03-08T16:23:58 | 277,016,574 | 1 | 0 | Apache-2.0 | 2022-03-08T16:23:59 | 2020-07-04T01:25:36 | null | UTF-8 | Python | false | false | 1,802 | py | """Test compiler changes for unary ops (+, -, ~) introduced in Python 2.2"""
import unittest
from test.test_support import run_unittest, have_unicode
class UnaryOpTestCase(unittest.TestCase):
def test_negative(self):
self.assert_(-2 == 0 - 2)
self.assert_(-0 == 0)
self.assert_(--2 == 2)
self.assert_(-2L == 0 - 2L)
self.assert_(-2.0 == 0 - 2.0)
self.assert_(-2j == 0 - 2j)
def test_positive(self):
self.assert_(+2 == 2)
self.assert_(+0 == 0)
self.assert_(++2 == 2)
self.assert_(+2L == 2L)
self.assert_(+2.0 == 2.0)
self.assert_(+2j == 2j)
def test_invert(self):
self.assert_(-2 == 0 - 2)
self.assert_(-0 == 0)
self.assert_(--2 == 2)
self.assert_(-2L == 0 - 2L)
def test_no_overflow(self):
nines = "9" * 32
self.assert_(eval("+" + nines) == eval("+" + nines + "L"))
self.assert_(eval("-" + nines) == eval("-" + nines + "L"))
self.assert_(eval("~" + nines) == eval("~" + nines + "L"))
def test_negation_of_exponentiation(self):
# Make sure '**' does the right thing; these form a
# regression test for SourceForge bug #456756.
self.assertEqual(-2 ** 3, -8)
self.assertEqual((-2) ** 3, -8)
self.assertEqual(-2 ** 4, -16)
self.assertEqual((-2) ** 4, 16)
def test_bad_types(self):
for op in '+', '-', '~':
self.assertRaises(TypeError, eval, op + "'a'")
if have_unicode:
self.assertRaises(TypeError, eval, op + "u'a'")
self.assertRaises(TypeError, eval, "~2j")
self.assertRaises(TypeError, eval, "~2.0")
def test_main():
run_unittest(UnaryOpTestCase)
if __name__ == "__main__":
test_main()
| [
"damonkohler@gmail.com"
] | damonkohler@gmail.com |
805e86d4a9542ddd6fa295eb01cfeba9b28cf056 | 7e29e8e9979d05cd4521512a0e12ffd516b1cdd3 | /parts/migrations/0001_initial.py | e0d166212c7b992eb75d30fbb195f10e6f8cd762 | [] | no_license | ShipraShalini/UrParts | d838b9b21485c169136ca7ac6d7b892ac8245e33 | 1327681450b9a57b058f4a9a95d833c06f171095 | refs/heads/main | 2023-04-11T12:16:10.867606 | 2021-04-16T14:50:42 | 2021-04-16T14:50:42 | 358,036,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | # Generated by Django 3.2 on 2021-04-15 10:26
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Part",
fields=[
(
"uuid",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("manufacturer", models.CharField(max_length=100)),
("category", models.CharField(max_length=100)),
("model", models.CharField(max_length=100)),
("part", models.CharField(max_length=100)),
("part_category", models.CharField(max_length=100)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddConstraint(
model_name="part",
constraint=models.UniqueConstraint(
fields=("manufacturer", "category", "model", "part", "part_category"),
name="unique_part_entry",
),
),
]
| [
"code.shipra@gmail.com"
] | code.shipra@gmail.com |
951dd813a10fa627d6f3fcc50d81d97bc753ee18 | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/trainer_v2/per_project/transparency/splade_regression/data_loaders/pairwise_eval.py | 580e05023b05169f286b6833aa70f8dfbda1ea89 | [] | no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 3,775 | py | from collections import defaultdict
import tensorflow as tf
from typing import List, Iterable, Callable, Dict, Tuple, Set
from tensorflow.python.distribute.distribute_lib import Strategy
from transformers import AutoTokenizer
from trainer_v2.chair_logging import c_log
from trainer_v2.custom_loop.train_loop_helper import fetch_metric_result
from trainer_v2.per_project.transparency.splade_regression.data_loaders.iterate_data import iterate_triplet
from trainer_v2.per_project.transparency.splade_regression.path_helper import partitioned_triplet_path_format_str
pairwise_roles = ["q", "d1", "d2"]
def load_pairwise_mmp_data(target_partition: List[int]) -> List[Tuple[str, str, str]]:
c_log.info("load_pairwise_eval_data")
partitioned_format_str = partitioned_triplet_path_format_str()
triplet_list = []
for i in target_partition:
text_path = partitioned_format_str.format(i)
text_itr = iterate_triplet(text_path)
for triplet in text_itr:
triplet_list.append(triplet)
return triplet_list
def dict_to_tuple(encoded):
input_ids = encoded['input_ids']
attention_mask = encoded['attention_mask']
return input_ids, attention_mask
class PairwiseAccuracy(tf.keras.metrics.Mean):
def __init__(self, name='pairwise_accuracy', **kwargs):
super().__init__(name=name, **kwargs)
def update_state(self, s1, s2):
is_correct = tf.cast(tf.less(s2, s1), tf.float32)
is_correct_f = tf.reduce_mean(is_correct)
super(PairwiseAccuracy, self).update_state(is_correct_f)
# each instance is (query, d_pos, d_neg), where each of documents are (input_ids, attention_masks)
def build_pairwise_eval_dataset(
triplet_list, checkpoint_model_name, batch_size, max_seq_length) -> tf.data.Dataset:
c_log.info("build_pairwise_eval_dataset")
tokenizer = AutoTokenizer.from_pretrained(checkpoint_model_name)
def encode(text):
d = tokenizer(text, padding="max_length", max_length=max_seq_length)
return dict_to_tuple(d)
items = []
for q, d1, d2 in triplet_list:
e = encode(q), encode(d1), encode(d2)
items.append(e)
def get_generator() -> Iterable[Tuple]:
yield from items
int_list = tf.TensorSpec([None], dtype=tf.int32)
int_pair_list = (int_list, int_list)
output_signature = int_pair_list, int_pair_list, int_pair_list
dataset = tf.data.Dataset.from_generator(get_generator, output_signature=output_signature)
dataset = dataset.batch(batch_size)
return dataset
class PairwiseEval:
def __init__(self,
triplet_encoded: tf.data.Dataset,
strategy: Strategy,
model: tf.keras.models.Model
):
self.triplet_encoded = triplet_encoded
self.strategy = strategy
self.model = model
self.metrics = {
'pairwise_accuracy': PairwiseAccuracy()
}
@tf.function
def eval_fn(self, item):
q, d1, d2 = item
q_enc = self.model(q, training=False)
d1_enc = self.model(d1, training=False)
d2_enc = self.model(d2, training=False)
def score(q_enc, d_enc):
return tf.reduce_sum(tf.multiply(q_enc, d_enc), axis=1)
s1 = score(q_enc, d1_enc)
s2 = score(q_enc, d2_enc)
print(s1, s2)
for m in self.metrics.values():
m.update_state(s1, s2)
def do_eval(self):
c_log.info("PairwiseEval::do_eval")
iterator = iter(self.triplet_encoded)
for item in iterator:
args = item,
per_replica = self.strategy.run(self.eval_fn, args=args)
metrics = self.metrics
metric_res = fetch_metric_result(metrics)
return 0.0, metric_res
| [
"lesterny@gmail.com"
] | lesterny@gmail.com |
0a79c1a51d52335e7b62064a7d0b834bda785a9f | d07cc99f0729658b785961a7a93b55e0ccc045d8 | /APscheduler/OtherClass.py | 600c5666d00626bf2d7a8bc3726ad143347ef753 | [] | no_license | dajun928/MyPyCharm | 8b6f3c7ea934dcdbb88e126e810467612b7505ad | 5cde8db68b0f2396cd6e114d19e35cd025d52d98 | refs/heads/master | 2020-03-27T04:48:55.985426 | 2019-03-06T13:48:54 | 2019-03-06T13:48:54 | 145,970,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | #!/usr/bin/python
#coding=utf-8
class OtherClass:
def my_job02(self):
print 'task01'
if __name__ == '__main__':
# o=OtherClass()
# o.my_job02()
pass
| [
"1663177102@qq.com"
] | 1663177102@qq.com |
5b03a8a6be6ea64cd9268addff2dd028e033cce1 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /wt5/wt5/tasks_test.py | 680d1c69a9e75204e4b9ce58eaf356a7ffd22173 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,356 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for WT5 tasks."""
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import t5
import tensorflow.compat.v1 as tf
import wt5.wt5.mixtures # pylint:disable=unused-import
import wt5.wt5.tasks # pylint:disable=unused-import
tf.disable_v2_behavior()
tf.enable_eager_execution()
MixtureRegistry = t5.data.MixtureRegistry
TaskRegistry = t5.data.TaskRegistry
_SEQUENCE_LENGTH = {'inputs': 2048, 'targets': 512}
_TASKS = [
'esnli_v010',
'esnli_v010_0_expln',
'esnli_explanations_take100_v010',
'esnli_labels_skip100_v010',
'mnli_v002',
'cos_e_v001',
'cos_e_v001_0_expln_like_esnli',
'cos_e_explanations_take100_v001',
'cos_e_labels_skip100_v001',
'movie_rationales_v010',
'movie_rationales_v010_no_expl',
'imdb_reviews_v100',
'amazon_reviews_books_v1_00_v010',
]
_MIXTURES = [
'cos_e_100_explanations',
'esnli_100_explanations',
'esnli_mnli_all_explanations',
'imdb_reviews_movie_rationales',
'esnli_cos_e_transfer',
'movie_rationales_100_explanations',
'amazon_books_movies_equal',
]
class TasksTest(parameterized.TestCase):
@parameterized.parameters(((name,) for name in _TASKS))
def test_task(self, name):
task = TaskRegistry.get(name)
logging.info('task=%s', name)
ds = task.get_dataset(_SEQUENCE_LENGTH, 'train')
for d in ds:
logging.info(d)
break
@parameterized.parameters(((name,) for name in _MIXTURES))
def test_mixture(self, name):
mixture = MixtureRegistry.get(name)
logging.info('mixture=%s', name)
ds = mixture.get_dataset(_SEQUENCE_LENGTH, 'train')
for d in ds:
logging.info(d)
break
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
68a88a724a54aabed0770c4f341cb635b69320b7 | aa2c3743c265c3db8a246a04f26df8428d23dd06 | /tacker/api/v1/attributes.py | 82499d658e9d3e7b27ed8ffa2722e75555921d38 | [
"Apache-2.0"
] | permissive | SripriyaSeetharam/tacker | fb43740de8e791b7bfa121dd16c295dd380f03f0 | 0c5c2eb06fb6112b03b49c05c5cbffb0ba00587f | refs/heads/master | 2021-01-22T01:55:17.327221 | 2015-07-08T21:07:02 | 2015-07-08T21:07:02 | 38,065,799 | 1 | 0 | null | 2015-06-25T18:13:25 | 2015-06-25T18:13:24 | Python | UTF-8 | Python | false | false | 20,202 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import re
from tacker.common import constants
from tacker.common import exceptions as n_exc
from tacker.openstack.common import log as logging
from tacker.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
# Used by range check to indicate no limit for a bound.
UNLIMITED = None
def _verify_dict_keys(expected_keys, target_dict, strict=True):
"""Allows to verify keys in a dictionary.
:param expected_keys: A list of keys expected to be present.
:param target_dict: The dictionary which should be verified.
:param strict: Specifies whether additional keys are allowed to be present.
:return: True, if keys in the dictionary correspond to the specification.
"""
if not isinstance(target_dict, dict):
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
"with keys: %(expected_keys)s") %
{'target_dict': target_dict, 'expected_keys': expected_keys})
return msg
expected_keys = set(expected_keys)
provided_keys = set(target_dict.keys())
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
if not predicate(provided_keys):
msg = (_("Validation of dictionary's keys failed."
"Expected keys: %(expected_keys)s "
"Provided keys: %(provided_keys)s") %
{'expected_keys': expected_keys,
'provided_keys': provided_keys})
return msg
def is_attr_set(attribute):
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
def _validate_values(data, valid_values=None):
if data not in valid_values:
msg = (_("'%(data)s' is not in %(valid_values)s") %
{'data': data, 'valid_values': valid_values})
LOG.debug(msg)
return msg
def _validate_not_empty_string_or_none(data, max_len=None):
if data is not None:
return _validate_not_empty_string(data, max_len=max_len)
def _validate_not_empty_string(data, max_len=None):
msg = _validate_string(data, max_len=max_len)
if msg:
return msg
if not data.strip():
return _("'%s' Blank strings are not permitted") % data
def _validate_string_or_none(data, max_len=None):
if data is not None:
return _validate_string(data, max_len=max_len)
def _validate_string(data, max_len=None):
if not isinstance(data, basestring):
msg = _("'%s' is not a valid string") % data
LOG.debug(msg)
return msg
if max_len is not None and len(data) > max_len:
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
{'data': data, 'max_len': max_len})
LOG.debug(msg)
return msg
def _validate_boolean(data, valid_values=None):
try:
convert_to_boolean(data)
except n_exc.InvalidInput:
msg = _("'%s' is not a valid boolean value") % data
LOG.debug(msg)
return msg
def _validate_range(data, valid_values=None):
"""Check that integer value is within a range provided.
Test is inclusive. Allows either limit to be ignored, to allow
checking ranges where only the lower or upper limit matter.
It is expected that the limits provided are valid integers or
the value None.
"""
min_value = valid_values[0]
max_value = valid_values[1]
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if min_value is not UNLIMITED and data < min_value:
msg = _("'%(data)s' is too small - must be at least "
"'%(limit)d'") % {'data': data, 'limit': min_value}
LOG.debug(msg)
return msg
if max_value is not UNLIMITED and data > max_value:
msg = _("'%(data)s' is too large - must be no larger than "
"'%(limit)d'") % {'data': data, 'limit': max_value}
LOG.debug(msg)
return msg
def _validate_no_whitespace(data):
"""Validates that input has no whitespace."""
if len(data.split()) > 1:
msg = _("'%s' contains whitespace") % data
LOG.debug(msg)
raise n_exc.InvalidInput(error_message=msg)
return data
def _validate_mac_address(data, valid_values=None):
valid_mac = False
try:
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
except Exception:
pass
finally:
# TODO(arosen): The code in this file should be refactored
# so it catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
if valid_mac is False:
msg = _("'%s' is not a valid MAC address") % data
LOG.debug(msg)
return msg
def _validate_mac_address_or_none(data, valid_values=None):
if data is None:
return
return _validate_mac_address(data, valid_values)
def _validate_ip_address(data, valid_values=None):
try:
netaddr.IPAddress(_validate_no_whitespace(data))
except Exception:
msg = _("'%s' is not a valid IP address") % data
LOG.debug(msg)
return msg
def _validate_ip_pools(data, valid_values=None):
"""Validate that start and end IP addresses are present.
In addition to this the IP addresses will also be validated
"""
if not isinstance(data, list):
msg = _("Invalid data format for IP pool: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['start', 'end']
for ip_pool in data:
msg = _verify_dict_keys(expected_keys, ip_pool)
if msg:
LOG.debug(msg)
return msg
for k in expected_keys:
msg = _validate_ip_address(ip_pool[k])
if msg:
LOG.debug(msg)
return msg
def _validate_fixed_ips(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for fixed IP: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for fixed_ip in data:
if not isinstance(fixed_ip, dict):
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
LOG.debug(msg)
return msg
if 'ip_address' in fixed_ip:
# Ensure that duplicate entries are not set - just checking IP
# suffices. Duplicate subnet_id's are legitimate.
fixed_ip_address = fixed_ip['ip_address']
if fixed_ip_address in ips:
msg = _("Duplicate IP address '%s'") % fixed_ip_address
else:
msg = _validate_ip_address(fixed_ip_address)
if msg:
LOG.debug(msg)
return msg
ips.append(fixed_ip_address)
if 'subnet_id' in fixed_ip:
msg = _validate_uuid(fixed_ip['subnet_id'])
if msg:
LOG.debug(msg)
return msg
def _validate_nameservers(data, valid_values=None):
if not hasattr(data, '__iter__'):
msg = _("Invalid data format for nameserver: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for ip in data:
msg = _validate_ip_address(ip)
if msg:
# This may be a hostname
msg = _validate_regex(ip, HOSTNAME_PATTERN)
if msg:
msg = _("'%s' is not a valid nameserver") % ip
LOG.debug(msg)
return msg
if ip in ips:
msg = _("Duplicate nameserver '%s'") % ip
LOG.debug(msg)
return msg
ips.append(ip)
def _validate_hostroutes(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for hostroute: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['destination', 'nexthop']
hostroutes = []
for hostroute in data:
msg = _verify_dict_keys(expected_keys, hostroute)
if msg:
LOG.debug(msg)
return msg
msg = _validate_subnet(hostroute['destination'])
if msg:
LOG.debug(msg)
return msg
msg = _validate_ip_address(hostroute['nexthop'])
if msg:
LOG.debug(msg)
return msg
if hostroute in hostroutes:
msg = _("Duplicate hostroute '%s'") % hostroute
LOG.debug(msg)
return msg
hostroutes.append(hostroute)
def _validate_ip_address_or_none(data, valid_values=None):
if data is None:
return None
return _validate_ip_address(data, valid_values)
def _validate_subnet(data, valid_values=None):
msg = None
try:
net = netaddr.IPNetwork(_validate_no_whitespace(data))
if '/' not in data:
msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": data,
"cidr": net.cidr}
else:
return
except Exception:
msg = _("'%s' is not a valid IP subnet") % data
if msg:
LOG.debug(msg)
return msg
def _validate_subnet_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
for item in data:
msg = _validate_subnet(item)
if msg:
return msg
def _validate_subnet_or_none(data, valid_values=None):
if data is None:
return
return _validate_subnet(data, valid_values)
def _validate_regex(data, valid_values=None):
try:
if re.match(valid_values, data):
return
except TypeError:
pass
msg = _("'%s' is not a valid input") % data
LOG.debug(msg)
return msg
def _validate_regex_or_none(data, valid_values=None):
if data is None:
return
return _validate_regex(data, valid_values)
def _validate_uuid(data, valid_values=None):
if not uuidutils.is_uuid_like(data):
msg = _("'%s' is not a valid UUID") % data
LOG.debug(msg)
return msg
def _validate_uuid_or_none(data, valid_values=None):
if data is not None:
return _validate_uuid(data)
def _validate_uuid_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = _validate_uuid(item)
if msg:
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
def _validate_dict_item(key, key_validator, data):
# Find conversion function, if any, and apply it
conv_func = key_validator.get('convert_to')
if conv_func:
data[key] = conv_func(data.get(key))
# Find validator function
# TODO(salv-orlando): Structure of dict attributes should be improved
# to avoid iterating over items
val_func = val_params = None
for (k, v) in key_validator.iteritems():
if k.startswith('type:'):
# ask forgiveness, not permission
try:
val_func = validators[k]
except KeyError:
return _("Validator '%s' does not exist.") % k
val_params = v
break
# Process validation
if val_func:
return val_func(data.get(key), val_params)
def _validate_dict(data, key_specs=None):
if not isinstance(data, dict):
msg = _("'%s' is not a dictionary") % data
LOG.debug(msg)
return msg
# Do not perform any further validation, if no constraints are supplied
if not key_specs:
return
# Check whether all required keys are present
required_keys = [key for key, spec in key_specs.iteritems()
if spec.get('required')]
if required_keys:
msg = _verify_dict_keys(required_keys, data, False)
if msg:
LOG.debug(msg)
return msg
# Perform validation and conversion of all values
# according to the specifications.
for key, key_validator in [(k, v) for k, v in key_specs.iteritems()
if k in data]:
msg = _validate_dict_item(key, key_validator, data)
if msg:
LOG.debug(msg)
return msg
def _validate_dict_or_none(data, key_specs=None):
if data is not None:
return _validate_dict(data, key_specs)
def _validate_dict_or_empty(data, key_specs=None):
if data != {}:
return _validate_dict(data, key_specs)
def _validate_dict_or_nodata(data, key_specs=None):
if data:
return _validate_dict(data, key_specs)
def _validate_non_negative(data, valid_values=None):
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if data < 0:
msg = _("'%s' should be non-negative") % data
LOG.debug(msg)
return msg
def convert_to_boolean(data):
if isinstance(data, basestring):
val = data.lower()
if val == "true" or val == "1":
return True
if val == "false" or val == "0":
return False
elif isinstance(data, bool):
return data
elif isinstance(data, int):
if data == 0:
return False
elif data == 1:
return True
msg = _("'%s' cannot be converted to boolean") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_int(data):
try:
return int(data)
except (ValueError, TypeError):
msg = _("'%s' is not a integer") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_str_to_list(data):
"""Convert a value of the form 'key=value' to ['key', 'value'].
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key).
"""
kvp = [x.strip() for x in data.split('=', 1)]
if len(kvp) == 2 and kvp[0]:
return kvp
msg = _("'%s' is not of the form <key>=[value]") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_list_to_dict(kvp_list):
"""Convert a list of 'key=value' strings to a dict.
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key) or if any
of the keys appear more than once.
"""
if kvp_list == ['True']:
# No values were provided (i.e. '--flag-name')
return {}
kvp_map = {}
for kvp_str in kvp_list:
key, value = convert_kvp_str_to_list(kvp_str)
kvp_map.setdefault(key, set())
kvp_map[key].add(value)
return dict((x, list(y)) for x, y in kvp_map.iteritems())
def convert_none_to_empty_list(value):
return [] if value is None else value
def convert_none_to_empty_dict(value):
return {} if value is None else value
def convert_to_list(data):
if data is None:
return []
elif hasattr(data, '__iter__'):
return list(data)
else:
return [data]
HOSTNAME_PATTERN = ("(?=^.{1,254}$)(^(?:(?!\d+\.|-)[a-zA-Z0-9_\-]"
"{1,63}(?<!-)\.?)+(?:[a-zA-Z]{2,})$)")
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
# Note: In order to ensure that the MAC address is unicast the first byte
# must be even.
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
# Dictionary that maintains a list of validation functions
validators = {'type:dict': _validate_dict,
'type:dict_or_none': _validate_dict_or_none,
'type:dict_or_empty': _validate_dict_or_empty,
'type:dict_or_nodata': _validate_dict_or_nodata,
'type:fixed_ips': _validate_fixed_ips,
'type:hostroutes': _validate_hostroutes,
'type:ip_address': _validate_ip_address,
'type:ip_address_or_none': _validate_ip_address_or_none,
'type:ip_pools': _validate_ip_pools,
'type:mac_address': _validate_mac_address,
'type:mac_address_or_none': _validate_mac_address_or_none,
'type:nameservers': _validate_nameservers,
'type:non_negative': _validate_non_negative,
'type:range': _validate_range,
'type:regex': _validate_regex,
'type:regex_or_none': _validate_regex_or_none,
'type:string': _validate_string,
'type:string_or_none': _validate_string_or_none,
'type:not_empty_string': _validate_not_empty_string,
'type:not_empty_string_or_none':
_validate_not_empty_string_or_none,
'type:subnet': _validate_subnet,
'type:subnet_list': _validate_subnet_list,
'type:subnet_or_none': _validate_subnet_or_none,
'type:uuid': _validate_uuid,
'type:uuid_or_none': _validate_uuid_or_none,
'type:uuid_list': _validate_uuid_list,
'type:values': _validate_values,
'type:boolean': _validate_boolean}
# Define constants for base resource name
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
# attribute is not required, but will be generated by the plugin
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
# is different from an attribute that has been specified with a value of
# None. For example, if 'gateway_ip' is omitted in a request to
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
# and the default gateway_ip will be generated.
# However, if gateway_ip is specified as None, this means that
# the subnet does not have a gateway IP.
# The following is a short reference for understanding attribute info:
# default: default value of the attribute (if missing, the attribute
# becomes mandatory.
# allow_post: the attribute can be used on POST requests.
# allow_put: the attribute can be used on PUT requests.
# validate: specifies rules for validating data in the attribute.
# convert_to: transformation to apply to the value before it is returned
# is_visible: the attribute is returned in GET responses.
# required_by_policy: the attribute is required by the policy engine and
# should therefore be filled by the API layer even if not present in
# request body.
# enforce_policy: the attribute is actively part of the policy enforcing
# mechanism, ie: there might be rules which refer to this attribute.
# Identify the attribute used by a resource to reference another resource
RESOURCE_ATTRIBUTE_MAP = {}
PLURALS = {'extensions': 'extension'}
EXT_NSES = {}
# Namespaces to be added for backward compatibility
# when existing extended resource attributes are
# provided by other extension than original one.
EXT_NSES_BC = {}
def get_attr_metadata():
return {'plurals': PLURALS,
'xmlns': constants.XML_NS_V10,
constants.EXT_NS: EXT_NSES,
constants.EXT_NS_COMP: EXT_NSES_BC}
| [
"isaku.yamahata@intel.com"
] | isaku.yamahata@intel.com |
9aec651b26e2ac105c1b0d295e746bb1ed5c2c5b | 2ce27b05f45cef6ce3ae5c02b8e83e548def2fc6 | /INTERMEDIATE/Tuple/Slicing Tuple.py | 9e8bd24716bf46495184cff8f503efa307ce49d6 | [] | no_license | Ajay2521/Python | 775b7d99736e83e4d0c37302b91d1413dd2c0d3b | a426dd7717de8a5e60e584d208ae7120bb84c1b3 | refs/heads/master | 2022-12-01T17:49:12.672061 | 2020-08-15T14:55:12 | 2020-08-15T14:55:12 | 273,632,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | # In this lets see more about "tuple Datatype" in Python.
# A tuple in Python is used to store the sequence of various types of data.
# The items in the tuple are separated with the comma ( , ) and enclosed with the square brackets [ ].
# Characteristics of tuple are as follow :
# 1) tuple are "Ordered".
# 2) Elements of the tuple can be accessed by using "Index" as same as String.
# 3) tuple are "Mutable".
# 4) tuple can able to store various data elements.
# Here is the Program to understand "Slicing the tuple"
# The elements of the tuple can be accessed by using the slice operator [].
# The index starts from 0 and goes to length - 1 of the length of tuple.
# Syntax for getting sub - tuple by Slice and range is
# tuple_variable ( Start : Stop : Step Size )
# Start - Is the Starting Index position of the tuple.
# Stop - Is the Last Index position of the tuple.
# Step Size - Is the used to skip the nth element within the start and stop.
tuple = ( 1 , 2, 3, 4, 5 , 6 )
# Slicing the elements.
print ( "\nSlicing element in the index place 3 : " , tuple [ 3 ] )
# Slicing the elements using Range.
print ( "\nAll the value of the \"tuple\" is : " , tuple [ : ] )
print ( "\nAll the elements after the index value 2 is : " , tuple [ 2 : ] )
print ( "\nAll the elements in the range from index value 1 to index value 4 is : " , tuple [ 1 : 4 ] )
print ( "\nAll the elements in the range from index value 0 to index value 5 with the Step size oftwo element is : " , tuple [ 0 : 5 : 2 ] )
| [
"noreply@github.com"
] | Ajay2521.noreply@github.com |
5cc2c2bc4435d633df0d71b4786e520e48900f98 | 557314cb5444754cedc04d0e4c25654537268c9b | /pytorch/rnn/dataset.py | 671b0016776803b1d505899517c1dcf1c8142d44 | [] | no_license | wj-Mcat/python-code-snippet | 9a6d1a7ad4a95a87f64051aa2ce3ef5c3910ddc4 | c5ca58a7779676e20f15a631484c4f20e5c94688 | refs/heads/master | 2022-06-28T21:42:21.942328 | 2020-05-10T13:08:50 | 2020-05-10T13:08:50 | 262,786,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from torch.utils import data
class ATIS(data.Dataset):
"""
一个非常简单的Dataset示例代码
"""
def __init__(self, X, slots, intents):
self.X = X
self.slots = slots
self.intents = intents
self.size = X.shape[0]
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.X[idx], self.slots[idx], self.intents[idx]
| [
"1435130236@qq.com"
] | 1435130236@qq.com |
666bf4a3c1b1ac5a795c6d9f8c4380cf23a17a5b | c6f22a6155b0627bf792a321fccba2f5d3f1bf19 | /backend/home/migrations/0004_ghfdgfdgfd.py | a5ef0e3edb0bb387850c4161813dc5aa09fc29ff | [] | no_license | crowdbotics-apps/mobile-4-dec-dev-16281 | 938d8902c0f61057a968e09cb114a7a0a72966aa | e842ed5457ab19bc66c1de400159e919b33b29ba | refs/heads/master | 2023-01-22T09:31:48.333341 | 2020-12-04T14:21:17 | 2020-12-04T14:21:17 | 318,406,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | # Generated by Django 2.2.17 on 2020-12-04 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0003_gghhh"),
]
operations = [
migrations.CreateModel(
name="Ghfdgfdgfd",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("hgfhfh", models.BigIntegerField()),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
516f9676a2cae761aaa56ca9c7a04d95db500ed2 | 4f0385a90230c0fe808e8672bb5b8abcceb43783 | /LNH/day4-teacher/2 装饰器/4 装饰器修订.py | caadd80dff07be7347d92b71cc44056d91b65a9e | [] | no_license | lincappu/pycharmlearningproject | 4084dab7adde01db9fa82a12769a67e8b26b3382 | b501523e417b61373688ba12f11b384166baf489 | refs/heads/master | 2023-07-10T05:21:15.163393 | 2023-06-29T14:02:35 | 2023-06-29T14:02:35 | 113,925,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | # 有参装饰器的修订
# 1.参数
# 2.返回值
# 3.函数基本信息。
# import time
# from functools import wraps
#
# def timmer(func):
# @wraps(func)
# def inner(*args,**kwargs):
# start_time=time.time()
# res=func(*args,**kwargs)
# end_time=time.time()
# print('run time is :[%s]' % (end_time - start_time))
# return res
# inner.__doc__=
# return inner
#
# @timmer # index=timmer(index)
# def index(name):
# time.sleep(1)
# print('functiion index')
# print(name)
# return 123
#
#
# res=index('fls')
# print(res)
#
#
# import time
# from functools import wraps
#
# def timmer(func):
# @wraps(func)
# def inner(*args,**kwargs):
# start_time=time.time()
# res=func(*args,**kwargs)
# stop_time=time.time()
# print('run time is :[%s]' %(stop_time-start_time))
# return res
#
# return inner
#
# @timmer
# def index():
# '''
# index function
# :return:
# '''
# time.sleep(3)
# print('welcome to index page')
# return 123
#
# @timmer #home=timmer(home) #home=inner
# def home(name):
# time.sleep(2)
# print('welcome %s to home page' %name)
# return 456
#
# # res=index() # res=inner()
# # print(res)
# #
# # res=home('egon') #inner('egon')
# # print(res)
#
# # print(index.__doc__)
# print(help(index))
| [
"lincappu@163.com"
] | lincappu@163.com |
5a4560c909ecfcd1bb9a56752e943418a440e91d | 75a2ad10d18aea735eaf3e859eb3988d94e9c36a | /CodeUp/예제/1295_알파벳대소문자변환.py | 02812ad0fd6a295035d54f34dc0d50e34174084c | [] | no_license | sbtiffanykim/problem-solving | d2679b0405f9c2397d7af780721066bfbd812e32 | 10a6ec90d29c9126f56c9be2ee696ce30ca30bd5 | refs/heads/master | 2023-04-25T18:26:56.511663 | 2021-06-10T12:30:13 | 2021-06-10T12:30:13 | 360,552,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | """
#1295: 알파벳 대소문자 변환
"""
s = input()
for c in s:
if c >= "a" and c <= "z":
print(chr(ord(c) - ord("a") + ord("A")), end="")
elif c >= "A" and c <= "Z":
print(chr(ord(c) - ord("A") + ord("a")), end="")
else:
print(c, end="")
| [
"sbtiffanykim@gmail.com"
] | sbtiffanykim@gmail.com |
40f3ae34a777b65d2d3d26bfa8d4ea1629e3ab06 | a8b21ea5633df0f5f75b3ee906ab10ce2414f6b7 | /bandwitch/list_common_enzymes/data/__init__.py | 0fcf080bb0f594bb27fd1c47d09c1c5d87a44bac | [
"MIT"
] | permissive | Edinburgh-Genome-Foundry/BandWitch | 34229d336899325286686faf19ba97f6db6eba2a | b6597077fc8ba03e7a7ef271bbd24f81f90632f2 | refs/heads/master | 2023-07-31T22:34:18.651189 | 2022-06-14T12:32:10 | 2022-06-14T12:32:10 | 106,592,382 | 15 | 0 | MIT | 2020-09-20T18:50:07 | 2017-10-11T18:19:45 | Python | UTF-8 | Python | false | false | 608 | py | """Loads REBASE enzymes infos (mehtylation sensitivity and providers)"""
__all__ = ['enzymes_infos']
import os.path as osp
csv_path = osp.join(osp.dirname(osp.realpath(__file__)), "enzymes_infos.csv")
with open(csv_path, "r") as f:
_lines = f.read().split("\n")
_fields = _lines[0].split(";")
_replacements = dict([("N/A", False), ("+", True), ("-", True)] +
[(str(i), i) for i in range(50)])
enzymes_infos = {
_line.split(";")[0]: dict(zip(_fields, [
_replacements.get(e, e) for e in _line.split(";")]))
for _line in _lines[1:]
}
| [
"valentin.zulkower@gmail.com"
] | valentin.zulkower@gmail.com |
171264c4369b440af1782bfa01718fa47956f6f3 | 574c42033f1dc6028ae282417a8cb2485fb0cb22 | /tests/test_engine.py | 2dbe07bcf21552ddfc13b72e38a02996fd1e28ec | [
"MIT"
] | permissive | jasonlockporcelane/bloop | 7e8b49382d2542fadb6da9525c7c7de5e78bbf9e | f4dbefd3743b97d15bc46f6b90c00b261911e274 | refs/heads/master | 2021-01-16T21:15:57.663153 | 2016-06-13T06:08:52 | 2016-06-13T06:08:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,385 | py | import bloop
import bloop.engine
import bloop.exceptions
import bloop.tracking
import bloop.util
import pytest
import uuid
from unittest.mock import Mock
from test_models import ComplexModel, User
def test_missing_objects(engine):
"""
When objects aren't loaded, ObjectsNotFound is raised with a list of
missing objects
"""
# Patch batch_get_items to return no results
engine.client.batch_get_items = lambda *a, **kw: {}
users = [User(id=uuid.uuid4()) for _ in range(3)]
with pytest.raises(bloop.exceptions.NotModified) as excinfo:
engine.load(users)
assert set(excinfo.value.objects) == set(users)
def test_dump_key(engine):
class HashAndRange(bloop.new_base()):
foo = bloop.Column(bloop.Integer, hash_key=True)
bar = bloop.Column(bloop.Integer, range_key=True)
engine.bind(base=HashAndRange)
user = User(id=uuid.uuid4())
user_key = {"id": {"S": str(user.id)}}
assert bloop.engine.dump_key(engine, user) == user_key
obj = HashAndRange(foo=4, bar=5)
obj_key = {"bar": {"N": "5"}, "foo": {"N": "4"}}
assert bloop.engine.dump_key(engine, obj) == obj_key
def test_load_object(engine):
user_id = uuid.uuid4()
expected = {"User": {"Keys": [{"id": {"S": str(user_id)}}],
"ConsistentRead": True}}
response = {"User": [{"age": {"N": 5},
"name": {"S": "foo"},
"id": {"S": str(user_id)}}]}
def respond(input):
assert input == expected
return response
engine.client.batch_get_items = respond
user = User(id=user_id)
engine.load(user, consistent=True)
assert user.age == 5
assert user.name == "foo"
assert user.id == user_id
def test_load_objects(engine):
user1 = User(id=uuid.uuid4())
user2 = User(id=uuid.uuid4())
expected = {"User": {"Keys": [{"id": {"S": str(user1.id)}},
{"id": {"S": str(user2.id)}}],
"ConsistentRead": False}}
response = {"User": [{"age": {"N": 5},
"name": {"S": "foo"},
"id": {"S": str(user1.id)}},
{"age": {"N": 10},
"name": {"S": "bar"},
"id": {"S": str(user2.id)}}]}
def respond(input):
assert bloop.util.ordered(input) == bloop.util.ordered(expected)
return response
engine.client.batch_get_items = respond
engine.load((user1, user2))
assert user1.age == 5
assert user1.name == "foo"
assert user2.age == 10
assert user2.name == "bar"
def test_load_duplicate_objects(engine):
"""Duplicate objects are handled correctly when loading"""
user = User(id=uuid.uuid4())
expected = {"User": {"Keys": [{"id": {"S": str(user.id)}}],
"ConsistentRead": False}}
response = {"User": [{"age": {"N": 5},
"name": {"S": "foo"},
"id": {"S": str(user.id)}}]}
def respond(input):
assert bloop.util.ordered(input) == bloop.util.ordered(expected)
return response
engine.client.batch_get_items = respond
engine.load((user, user))
assert user.age == 5
assert user.name == "foo"
def test_load_missing_attrs(engine):
"""
When an instance of a Model is loaded into, existing attributes should be
overwritten with new values, or if there is no new value, should be deleted
"""
obj = User(id=uuid.uuid4(), age=4, name="user")
response = {"User": [{"age": {"N": 5},
"id": {"S": str(obj.id)}}]}
engine.client.batch_get_items = lambda input: response
engine.load(obj)
assert obj.age == 5
assert obj.name is None
def test_load_dump_unbound(engine):
class Model(bloop.new_base()):
id = bloop.Column(bloop.UUID, hash_key=True)
counter = bloop.Column(bloop.Integer)
obj = Model(id=uuid.uuid4(), counter=5)
value = {"User": [{"counter": {"N": 5}, "id": {"S": str(obj.id)}}]}
with pytest.raises(bloop.exceptions.UnboundModel) as excinfo:
engine._load(Model, value)
assert excinfo.value.model is Model
assert excinfo.value.obj is None
with pytest.raises(bloop.exceptions.UnboundModel) as excinfo:
engine._dump(Model, obj)
assert excinfo.value.model is Model
assert excinfo.value.obj is obj
def test_load_dump_subclass(engine):
"""Only the immediate Columns of a model should be dumped"""
class Admin(User):
admin_id = bloop.Column(bloop.Integer, hash_key=True)
engine.bind(base=User)
admin = Admin(admin_id=3)
# Set an attribute that would be a column on the parent class, but should
# have no meaning for the subclass
admin.email = "admin@domain.com"
dumped_admin = {"admin_id": {"N": "3"}}
assert engine._dump(Admin, admin) == dumped_admin
# Inject a value that would have meaning for a column on the parent class,
# but should not be loaded for the subclass
dumped_admin["email"] = {"S": "support@foo.com"}
same_admin = engine._load(Admin, dumped_admin)
assert not hasattr(same_admin, "email")
def test_load_dump_unknown(engine):
class NotModeled:
pass
obj = NotModeled()
value = {"User": [{"age": {"N": 5},
"name": {"S": "foo"},
"id": {"S": str(uuid.uuid4())}}]}
with pytest.raises(ValueError):
engine._load(NotModeled, value)
with pytest.raises(ValueError):
engine._dump(NotModeled, obj)
def test_load_missing_key(engine):
"""Trying to load objects with missing hash and range keys raises"""
user = User(age=2)
with pytest.raises(ValueError):
engine.load(user)
complex_models = [
ComplexModel(),
ComplexModel(name="no range"),
ComplexModel(date="no hash")
]
for model in complex_models:
with pytest.raises(ValueError):
engine.load(model)
@pytest.mark.parametrize(
"atomic_mode", [True, False], ids=lambda v: "atomic:"+str(v))
def test_load_snapshots(engine, atomic_mode):
"""Loading builds a snapshot for future atomic operations"""
user = User(id=uuid.uuid4())
# In the case of missing data, load may not return fields
# (or in the case of multi-view tables, non-mapped data)
engine.client.batch_get_items.return_value = {
"User": [
{"age": {"N": 5},
"id": {"S": str(user.id)},
"extra_field": {"freeform data": "not parsed"}}]}
engine.config["atomic"] = atomic_mode
engine.load(user)
# Cached snapshots are in dumped form
expected_condition = (
(User.age == {"N": "5"}) &
(User.email.is_(None)) &
(User.id == {"S": str(user.id)}) &
(User.joined.is_(None)) &
(User.name.is_(None))
)
actual_condition = bloop.tracking.get_snapshot(user)
assert actual_condition == expected_condition
def test_save_twice(engine):
"""Save sends full local values, not just deltas from last save"""
user = User(id=uuid.uuid4(), age=5)
expected = {
"Key": {"id": {"S": str(user.id)}},
"TableName": "User",
"ExpressionAttributeNames": {"#n0": "age"},
"ExpressionAttributeValues": {":v1": {"N": "5"}},
"UpdateExpression": "SET #n0=:v1"}
engine.save(user)
engine.save(user)
engine.client.update_item.assert_called_with(expected)
assert engine.client.update_item.call_count == 2
def test_save_list_with_condition(engine):
users = [User(id=uuid.uuid4()) for _ in range(3)]
condition = User.id.is_(None)
expected_calls = [
{"ConditionExpression": "(attribute_not_exists(#n0))",
"ExpressionAttributeNames": {"#n0": "id"},
"Key": {"id": {"S": str(user.id)}},
"TableName": "User"}
for user in users]
engine.save(users, condition=condition)
for expected in expected_calls:
engine.client.update_item.assert_any_call(expected)
assert engine.client.update_item.call_count == 3
def test_save_single_with_condition(engine):
user = User(id=uuid.uuid4())
condition = User.id.is_(None)
expected = {"TableName": "User",
"ExpressionAttributeNames": {"#n0": "id"},
"ConditionExpression": "(attribute_not_exists(#n0))",
"Key": {"id": {"S": str(user.id)}}}
engine.save(user, condition=condition)
engine.client.update_item.assert_called_once_with(expected)
def test_save_atomic_new(engine):
"""atomic save on new object should expect no columns to exist"""
user = User(id=uuid.uuid4())
expected = {
'ExpressionAttributeNames': {
'#n0': 'age', '#n3': 'j', '#n1': 'email',
'#n4': 'name', '#n2': 'id'},
'Key': {'id': {'S': str(user.id)}},
'TableName': 'User',
'ConditionExpression': (
'((attribute_not_exists(#n0)) AND (attribute_not_exists(#n1)) '
'AND (attribute_not_exists(#n2)) AND (attribute_not_exists(#n3))'
' AND (attribute_not_exists(#n4)))')}
engine.config["atomic"] = True
engine.save(user)
engine.client.update_item.assert_called_once_with(expected)
def test_save_atomic_condition(atomic):
user = User(id=uuid.uuid4())
# Pretend the id was already persisted in dynamo
bloop.tracking.sync(user, atomic)
# Mutate a field; part of the update but not an expected condition
user.name = "new_foo"
# Condition on the mutated field with a different value
condition = User.name == "expect_foo"
expected = {
"ExpressionAttributeNames": {"#n2": "id", "#n0": "name"},
"TableName": "User",
"ExpressionAttributeValues": {
":v4": {"S": "expect_foo"},
":v1": {"S": "new_foo"},
":v3": {"S": str(user.id)}},
'ConditionExpression': "((#n2 = :v3) AND (#n0 = :v4))",
"UpdateExpression": "SET #n0=:v1",
"Key": {"id": {"S": str(user.id)}}}
atomic.save(user, condition=condition)
atomic.client.update_item.assert_called_once_with(expected)
def test_save_condition_key_only(engine):
"""
Even when the diff is empty, an UpdateItem should be issued
(in case this is really a create - the item doesn't exist yet)
"""
user = User(id=uuid.uuid4())
condition = User.id.is_(None)
expected = {
"ConditionExpression": "(attribute_not_exists(#n0))",
"TableName": "User",
"ExpressionAttributeNames": {"#n0": "id"},
"Key": {"id": {"S": str(user.id)}}}
engine.save(user, condition=condition)
engine.client.update_item.assert_called_once_with(expected)
def test_save_set_only(engine):
user = User(id=uuid.uuid4())
# Expect a SET on email
user.email = "foo@domain.com"
expected = {
"Key": {"id": {"S": str(user.id)}},
"ExpressionAttributeNames": {"#n0": "email"},
"TableName": "User",
"UpdateExpression": "SET #n0=:v1",
"ExpressionAttributeValues": {":v1": {"S": "foo@domain.com"}}}
engine.save(user)
engine.client.update_item.assert_called_once_with(expected)
def test_save_del_only(engine):
user = User(id=uuid.uuid4(), age=4)
# Expect a REMOVE on age
del user.age
expected = {
"Key": {"id": {"S": str(user.id)}},
"ExpressionAttributeNames": {"#n0": "age"},
"TableName": "User",
"UpdateExpression": "REMOVE #n0"}
engine.save(user)
engine.client.update_item.assert_called_once_with(expected)
def test_delete_multiple_condition(engine):
users = [User(id=uuid.uuid4()) for _ in range(3)]
condition = User.id == "foo"
expected_calls = [
{"Key": {"id": {"S": str(user.id)}},
"ExpressionAttributeValues": {":v1": {"S": "foo"}},
"ExpressionAttributeNames": {"#n0": "id"},
"ConditionExpression": "(#n0 = :v1)",
"TableName": "User"}
for user in users]
engine.delete(users, condition=condition)
for expected in expected_calls:
engine.client.delete_item.assert_any_call(expected)
assert engine.client.delete_item.call_count == 3
def test_delete_atomic(atomic):
user = User(id=uuid.uuid4())
# Manually snapshot so we think age is persisted
bloop.tracking.sync(user, atomic)
expected = {
'ConditionExpression': '(#n0 = :v1)',
'ExpressionAttributeValues': {':v1': {'S': str(user.id)}},
'TableName': 'User',
'Key': {'id': {'S': str(user.id)}},
'ExpressionAttributeNames': {'#n0': 'id'}}
atomic.delete(user)
atomic.client.delete_item.assert_called_once_with(expected)
def test_delete_atomic_new(engine):
"""atomic delete on new object should expect no columns to exist"""
user = User(id=uuid.uuid4())
expected = {
'TableName': 'User',
'ExpressionAttributeNames': {
'#n2': 'id', '#n0': 'age', '#n4': 'name',
'#n3': 'j', '#n1': 'email'},
'Key': {'id': {'S': str(user.id)}},
'ConditionExpression': (
'((attribute_not_exists(#n0)) AND (attribute_not_exists(#n1)) '
'AND (attribute_not_exists(#n2)) AND (attribute_not_exists(#n3))'
' AND (attribute_not_exists(#n4)))')}
engine.config["atomic"] = True
engine.delete(user)
engine.client.delete_item.assert_called_once_with(expected)
def test_delete_new(engine):
"""
When an object is first created, a non-atomic delete shouldn't expect
anything.
"""
user_id = uuid.uuid4()
user = User(id=user_id)
expected = {
'TableName': 'User',
'Key': {'id': {'S': str(user_id)}}}
engine.delete(user)
engine.client.delete_item.assert_called_once_with(expected)
def test_delete_atomic_condition(atomic):
user_id = uuid.uuid4()
user = User(id=user_id, email='foo@bar.com')
# Manually snapshot so we think age is persisted
bloop.tracking.sync(user, atomic)
expected = {
'ExpressionAttributeNames': {
'#n2': 'id', '#n4': 'name', '#n0': 'email'},
'ConditionExpression':
'((#n0 = :v1) AND (#n2 = :v3) AND (#n4 = :v5))',
'TableName': 'User',
'ExpressionAttributeValues': {
':v5': {'S': 'foo'}, ':v1': {'S': 'foo@bar.com'},
':v3': {'S': str(user_id)}},
'Key': {'id': {'S': str(user_id)}}}
atomic.delete(user, condition=User.name.is_("foo"))
atomic.client.delete_item.assert_called_once_with(expected)
def test_query(engine):
""" Engine.query supports model and index-based queries """
index_query = engine.query(User.by_email)
assert index_query.model is User
assert index_query.index is User.by_email
model_query = engine.query(User)
assert model_query.model is User
assert model_query.index is None
def test_scan(engine):
""" Engine.scan supports model and index-based queries """
index_scan = engine.scan(User.by_email)
assert index_scan.model is User
assert index_scan.index is User.by_email
model_scan = engine.scan(User)
assert model_scan.model is User
assert model_scan.index is None
def test_context(engine):
engine.config["atomic"] = True
user_id = uuid.uuid4()
user = User(id=user_id, name="foo")
expected = {
"TableName": "User",
"UpdateExpression": "SET #n0=:v1",
"ExpressionAttributeValues": {":v1": {"S": "foo"}},
"ExpressionAttributeNames": {"#n0": "name"},
"Key": {"id": {"S": str(user_id)}}}
with engine.context(atomic=False) as eng:
eng.save(user)
engine.client.update_item.assert_called_once_with(expected)
# EngineViews can't bind
with pytest.raises(RuntimeError):
with engine.context() as eng:
eng.bind(base=bloop.new_base())
def test_unbound_engine_view():
"""Trying to mutate an unbound model through an EngineView fails"""
class UnboundModel(bloop.new_base()):
id = bloop.Column(bloop.String, hash_key=True)
instance = UnboundModel(id="foo")
with pytest.raises(bloop.exceptions.UnboundModel):
with bloop.Engine().context() as view:
view._dump(UnboundModel, instance)
def test_bind_non_model():
"""Can't bind things that don't subclass new_base()"""
engine = bloop.Engine()
engine.client = Mock(spec=bloop.client.Client)
with pytest.raises(ValueError):
engine.bind(base=object())
def test_bind_skip_abstract_models():
class Abstract(bloop.new_base()):
class Meta:
abstract = True
class Concrete(Abstract):
pass
class AlsoAbstract(Concrete):
class Meta:
abstract = True
class AlsoConcrete(AlsoAbstract):
pass
engine = bloop.Engine()
engine.client = Mock(spec=bloop.client.Client)
engine.bind(base=Abstract)
engine.client.create_table.assert_any_call(Concrete)
engine.client.validate_table.assert_any_call(Concrete)
engine.client.create_table.assert_any_call(AlsoConcrete)
engine.client.validate_table.assert_any_call(AlsoConcrete)
def test_bind_concrete_base():
engine = bloop.Engine()
engine.client = Mock(spec=bloop.client.Client)
class Concrete(bloop.new_base()):
pass
engine.bind(base=Concrete)
engine.client.create_table.assert_called_once_with(Concrete)
engine.client.validate_table.assert_called_once_with(Concrete)
def test_bind_different_engines():
first_engine = bloop.Engine()
first_engine.client = Mock(spec=bloop.client.Client)
second_engine = bloop.Engine()
second_engine.client = Mock(spec=bloop.client.Client)
class Concrete(bloop.new_base()):
pass
first_engine.bind(base=Concrete)
second_engine.bind(base=Concrete)
# Create/Validate are only called once per model, regardless of how many
# times the model is bound to different engines
first_engine.client.create_table.assert_called_once_with(Concrete)
first_engine.client.validate_table.assert_called_once_with(Concrete)
second_engine.client.create_table.assert_not_called()
second_engine.client.validate_table.assert_not_called()
# The model (and its columns) are bound to each engine's TypeEngine,
# regardless of how many times the model has been bound already
assert Concrete in first_engine.type_engine.bound_types
assert Concrete in second_engine.type_engine.bound_types
@pytest.mark.parametrize("op, plural", [
("save", True), ("load", True), ("delete", True),
("scan", False), ("query", False)], ids=str)
def test_unbound_operations_raise(engine, op, plural):
class Abstract(bloop.new_base()):
class Meta:
abstract = True
id = bloop.Column(bloop.Integer, hash_key=True)
engine.bind(base=Abstract)
engine.bind(base=User)
abstract = Abstract(id=5)
concrete = User(age=5)
with pytest.raises(bloop.exceptions.AbstractModelException) as excinfo:
operation = getattr(engine, op)
operation(abstract)
assert excinfo.value.model is abstract
if plural:
with pytest.raises(bloop.exceptions.AbstractModelException) as excinfo:
operation = getattr(engine, op)
operation([abstract, concrete])
assert excinfo.value.model is abstract
| [
"joe.mcross@gmail.com"
] | joe.mcross@gmail.com |
8a74c28a08a46463894b8bae23427862d8af28e7 | 76b1e713a3057e6f08abc116814af00891dbc2ef | /store/views/orders.py | 29e2e13ea138449e13d79a05a236a2f88ac6878f | [] | no_license | Jay28497/Django-Ecommerce-Website | ed17f6536fe4be4d6db658c46999bb05ec22d3f8 | 2697d376c8ff2720720183c0e475b188ff7b0e33 | refs/heads/master | 2023-03-31T15:20:56.008251 | 2021-04-10T12:21:08 | 2021-04-10T12:21:08 | 355,427,413 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from django.shortcuts import render
from django.views import View
from store.models.orders import Order
class OrderView(View):
def get(self, request):
customer = request.session.get('customer_id')
orders = Order.get_orders_by_customer(customer)
# print(customer, orders)
return render(request, 'store/orders.html', {'orders': orders})
| [
"jaykanjariya28@gmail.com"
] | jaykanjariya28@gmail.com |
7309d255ad74136c1423ec453f4a007ab3c5d182 | 8191c12eb7ebd4296b4e5d35e7de9b53bc767a5a | /docs/examples/configure_sm_pairing_properties.py | f0f907051e4833d81603f28245d9c3c837595939 | [
"MIT"
] | permissive | jreynders/BLESuite-1 | 758404823c71fb15ff8326a5611aed742065bda4 | 8335d47d76919b79f00cea72a1e58524f3440826 | refs/heads/master | 2023-02-20T22:21:35.891269 | 2022-11-08T22:09:06 | 2022-11-08T22:09:06 | 168,422,668 | 0 | 0 | MIT | 2023-02-08T20:01:18 | 2019-01-30T22:04:54 | Python | UTF-8 | Python | false | false | 1,763 | py | from blesuite.connection_manager import BLEConnectionManager
adapter = 0
role = 'central'
io_cap = 0x03
oob = 0x00
mitm = 0x01
bond = 0x01
lesc = 0x01
keypress = 0x00
ct2 = 0x01
rfu = 0x00
max_key_size = 16
initiator_key_distribution = 0x01
responder_key_distribution = 0x01
peer_device_address = "AA:BB:CC:DD:EE:FF"
peer_address_type = "public"
with BLEConnectionManager(adapter, role) as connection_manager:
# Get default Security Manager pairing properties to see baseline
print connection_manager.get_security_manager_protocol_default_pairing_parameters()
# Sets the default Security Manager pairing properties for all established connections
connection_manager.set_security_manager_protocol_default_pairing_parameters(io_cap, oob, mitm, bond, lesc,
keypress, ct2, rfu, max_key_size,
initiator_key_distribution,
responder_key_distribution)
print connection_manager.get_security_manager_protocol_default_pairing_parameters()
# initialize BLEConnection object
connection = connection_manager.init_connection(peer_device_address, peer_address_type)
# create connection
connection_manager.connect(connection)
# modify pairing parameters for just this connection
connection_manager.set_security_manager_protocol_pairing_parameters_for_connection(connection, io_cap=0x02)
# show the changes for the security manager made for the connection made in the last step
print connection_manager.get_security_manager_protocol_pairing_parameters_for_connection(connection)
| [
"taylor.trabun@nccgroup.trust"
] | taylor.trabun@nccgroup.trust |
f714ff4ca4736b9aef983bab85e1ddfc87a679aa | 19a32440205b2caeec67c73c10d917b5fb30a86a | /test/test_job_statistics.py | 6d40c6e536931197906776c433f0367c53ee3a96 | [
"MIT",
"Apache-2.0"
] | permissive | marrotte/isilon_sdk_python | 480e84312f5924a506aeb09c9c7cae79a2b9b7f4 | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | refs/heads/master | 2020-03-23T07:31:40.376316 | 2016-06-07T23:44:31 | 2016-06-07T23:44:31 | 141,277,076 | 1 | 0 | MIT | 2018-07-17T11:02:08 | 2018-07-17T11:02:08 | null | UTF-8 | Python | false | false | 1,239 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.job_statistics import JobStatistics
class TestJobStatistics(unittest.TestCase):
""" JobStatistics unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testJobStatistics(self):
"""
Test JobStatistics
"""
model = swagger_client.models.job_statistics.JobStatistics()
if __name__ == '__main__':
unittest.main() | [
"Alex.Pecoraro@isilon.com"
] | Alex.Pecoraro@isilon.com |
771c7b41555428226550940c68115a3ee5b20af5 | 9ecd7568b6e4f0f55af7fc865451ac40038be3c4 | /tianlikai/anhui/chuzhou_zhaobiao.py | 902f6f1c213fa802e3604080f3eb86588fe10015 | [] | no_license | jasonTLK/scrapy | f5ac6e575e902c077a07dc0eb9d228506f1a173f | 2de8245fbc8731cfd868bbd91168e26271045300 | refs/heads/master | 2021-01-20T04:22:23.080864 | 2017-04-28T07:46:29 | 2017-04-28T07:46:29 | 89,681,374 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,941 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy import Request, FormRequest
from scrapy.selector import Selector
try:
from scrapy.spiders import Spider
except:
from scrapy.spiders import BaseSpider as Spider
import datetime
from items.biding import biding_gov
from utils.toDB import *
# 安徽滁州招投标网站
# 招标信息
class hz_gov_Spider(scrapy.Spider):
name = "chuzhou_zhaobiao.py"
allowed_domains = ["www.czzbcg.com"]
custom_settings = {
"DOWNLOADER_MIDDLEWARES": {
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
'middlewares.useragent_middleware.RandomUserAgent': 400,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
# 'middlewares.proxy_middleware.ProxyMiddleware': 250,
# 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
# 'middlewares.retry_middleware.RetryWithProxyMiddleware': 300,
# 'middlewares.timestamp_middleware.TimestampMiddleware': 120
}
}
def start_requests(self):
urls = [
"http://www.czzbcg.com/czztb/jyxx/002001/002001001/MoreInfo.aspx?CategoryNum=002001001",
"http://www.czzbcg.com/czztb/jyxx/002002/002002001/MoreInfo.aspx?CategoryNum=002002001"
]
pages = [319, 126]
for i in range(len(urls)):
yield Request(urls[i], callback=self.parse, meta={'url': urls[i], 'page': pages[i]})
def parse(self, response):
cookies = response.headers['Set-Cookie']
url = response.meta['url']
page = response.meta['page']
selector = Selector(response)
start = 2
__VIEWSTATE = selector.xpath("//input[@id='__VIEWSTATE']/@value").extract()
headers = {
"Cookie": cookies,
"Referer": url,
"Host": "www.czzbcg.com"
}
while start <= page:
yield FormRequest(url=url,
formdata={
'__VIEWSTATE': __VIEWSTATE[0],
'__EVENTTARGET': 'MoreInfoList1$Pager',
'__EVENTARGUMENT': str(start)}, headers=headers,
callback=self.middle, meta={'page':str(start)})
start += 1
def middle(self, response):
print "当前是第:" + response.meta['page'] + "页"
selector = Selector(response)
urls = selector.xpath("//tr[@valign='top']//a/@href").extract()
names=[]
for i in urls:
names.append(selector.xpath("//a[@href='" + i + "']/text()").extract()[0].strip())
for i in range(len(names)):
url = "http://www.czzbcg.com" + "".join(urls[i])
str = "".join(names[i]) + "," + url
print str
yield Request(url=url, callback=self.parse2, meta={"info": str})
def parse2(self, response):
infos = response.meta["info"]
items = biding_gov()
items["url"] = response.url
items["name"] = "".join(infos).split(",")[0]
items["info"] = ""
items["create_time"] = datetime.datetime.now()
items["update_time"] = datetime.datetime.now()
page_info = "".join(response.body)
items["info"] = "".join(page_info)
db = MongodbHandle("172.20.3.10 ", 27017, "Biding_announcement")
db.get_insert(
"bid_anhui_ChuZhou",
{
"url": items["url"],
"name": items["name"],
"info": items["info"],
"create_time": items["create_time"],
"update_time": items["update_time"]
}
)
print items["url"]
print items["name"]
| [
"18723163167@163.com"
] | 18723163167@163.com |
63795b0448d88a2e9a955a030ab4db263b16b065 | 94313855bf5e26721a3b40329aeb485c359ace7e | /zorg_firmata/__init__.py | 85cbfecf4081d3cfeba68fa67b111a2a5828dee1 | [] | no_license | zorg/zorg-firmata | 29616725e07a672be021a87974ac8d042edf29aa | 5c15b23421c065f35ddc4a300a453cf33fc10931 | refs/heads/master | 2021-01-17T14:22:32.041605 | 2016-10-02T17:22:14 | 2016-10-02T17:22:14 | 38,935,991 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29 | py | from .adaptor import Firmata
| [
"gunthercx@gmail.com"
] | gunthercx@gmail.com |
0f68e2076139798798ae823d9174cb2c98a714ad | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/odtjoh001/question1.py | b2cbad1ff099511e3cfff70991ca97165e01ef48 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | year = eval(input("Enter a year:\n"))
if(year%4 == 0 and year % 100 !=0) or (year % 400 == 0):
print(year, "is a leap year.")
else:
print(year, "is not a leap year.")
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
6258638b9c51b8141d51fe91279f56e729151b39 | 12bedb87964f0093da27b7d6103996d19f355768 | /Exercise11.py | 805903c712058196defe72c6cb15c1df26c5c03b | [] | no_license | ErenBtrk/PythonDictionaryExercises | f77e9d09025cbff1592aa645018edcb23fbd1abe | 182146333843f582fe28006f889636f7d86c2352 | refs/heads/master | 2023-04-24T09:08:08.627173 | 2021-05-16T16:17:40 | 2021-05-16T16:17:40 | 367,856,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | '''
11. Write a Python program to multiply all the items in a dictionary
'''
my_dict = {'data1':5,'data2':-5,'data3':3}
total = 1
for key,value in my_dict.items():
total *= value
print(total) | [
"erenbtrk@hotmail.com"
] | erenbtrk@hotmail.com |
15ed13c2acac4ae704432839fec63024737531b7 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_7/bhrrae003/util.py | e84a57a6a42bd391b9d939d114e902f3ee049313 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | """Raeesa Behardien
BHRRAE003
Assignment 7
Question 2
02 May 2014"""
def create_grid(grid):
"""create a 4x4 grid"""
for a in range(4):
grid.append([0,0,0,0])
return grid
def print_grid (grid):
"""print out a 4x4 grid in 5-width columns within a box"""
print("+--------------------+")
for a in range(4):
symbol="|"
for b in range(4):
val=str(grid[a][b])
if val=='0':
val=' '
symbol+=val+' '*(5-(len(val)))
symbol+='|'
print(symbol)
print("+--------------------+")
def check_lost (grid):
"""return True if there are no 0 values and no adjacent values that are equal; otherwise False"""
for a in range(4):
for b in range(4):
if grid[a][b]==0:
return False
for c in range(3):
if grid[a][c]==grid[a][c+1] or grid[c][a]==grid[c+1][a]:
return False
else: return True
def check_won (grid):
"""return True if a value>=32 is found in the grid; otherwise False"""
for a in range(4):
for b in range(4):
if grid[a][b]>=32:
return True
else: return False
def copy_grid (grid):
"""return a copy of the grid"""
for a in range(4):
for b in range(4):
grid[a][b]=str(grid[a][b])
#mirror to copy grid
for c in range(4):
grid[c]="/".join(grid[c])
symbol="*".join(grid)
mirror=symbol.split("*")
for d in range(4):
mirror[d]=mirror[d].split('/')
for e in range(4):
for f in range(4):
mirror[e][f]=eval(mirror[e][f])
for g in range(4):
grid[g]=grid[g].split('/')
for h in range(4):
for i in range(4):
grid[h][i]=eval(grid[h][i])
return mirror
def grid_equal (grid1, grid2):
"""check if 2 grids are equal - return boolean value"""
if grid1==grid2:
return True
else:
return False
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
137080e9bd8a95d640e622851e583276962da7e6 | bbe5b336150c38f480a4c3a3a15e1d65a7dfc7d1 | /app/api/business/validators/brief_specialist_validator.py | f00b5ecdf09aff1115740720b7d3146d18b186be | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | AusDTO/dto-digitalmarketplace-api | 9135785c205fe04bbb07782c561c5c5f8cf8417d | af1f0c8979406f80223ab7a68266563abd80b2f4 | refs/heads/master | 2022-07-31T04:12:36.364555 | 2022-07-07T04:31:41 | 2022-07-07T04:31:41 | 62,025,672 | 6 | 7 | MIT | 2022-05-23T23:32:37 | 2016-06-27T04:34:37 | Python | UTF-8 | Python | false | false | 15,033 | py | # -*- coding: utf-8 -*-
import pendulum
from app.api.services import domain_service, suppliers
from app.api.business.validators import brief_lockout_validator
from app.api.business.brief.brief_business import get_lockout_dates
whitelist_fields = [
{'name': 'id', 'type': int},
{'name': 'title', 'type': str},
{'name': 'organisation', 'type': str},
{'name': 'summary', 'type': str},
{'name': 'location', 'type': list},
{'name': 'attachments', 'type': list},
{'name': 'contactNumber', 'type': str},
{'name': 'internalReference', 'type': str},
{'name': 'includeWeightingsEssential', 'type': bool},
{'name': 'essentialRequirements', 'type': list},
{'name': 'includeWeightingsNiceToHave', 'type': bool},
{'name': 'niceToHaveRequirements', 'type': list},
{'name': 'numberOfSuppliers', 'type': str},
{'name': 'evaluationType', 'type': list},
{'name': 'preferredFormatForRates', 'type': str},
{'name': 'maxRate', 'type': str},
{'name': 'budgetRange', 'type': str},
{'name': 'securityClearance', 'type': str},
{'name': 'industryBriefing', 'type': str},
{'name': 'securityClearanceObtain', 'type': str},
{'name': 'securityClearanceCurrent', 'type': str},
{'name': 'securityClearanceOther', 'type': str},
{'name': 'sellerCategory', 'type': str},
{'name': 'openTo', 'type': str},
{'name': 'sellers', 'type': dict},
{'name': 'startDate', 'type': str},
{'name': 'contractLength', 'type': str},
{'name': 'contractExtensions', 'type': str},
{'name': 'areaOfExpertise', 'type': str},
{'name': 'closedAt', 'type': str},
{'name': 'publish', 'type': bool},
{'name': 'comprehensiveTerms', 'type': bool},
{'name': 'sellerSelector', 'type': str},
{'name': 'originalClosedAt', 'type': str},
{'name': 'originalQuestionsClosedAt', 'type': str},
{'name': 'reasonToWithdraw', 'type': str}
]
class SpecialistDataValidator(object):
def __init__(self, data):
self.data = data
def validate_closed_at(self, minimum_days=2):
if 'closedAt' not in self.data or not self.data.get('closedAt'):
return False
parsed = pendulum.parse(self.data.get('closedAt')).in_timezone('Australia/Canberra').start_of('day')
if parsed < pendulum.now('Australia/Canberra').add(days=minimum_days).start_of('day'):
return False
if parsed > pendulum.now('Australia/Canberra').add(days=364).start_of('day'):
return False
return True
def validate_closed_at_lockout(self):
if 'closedAt' not in self.data or not self.data.get('closedAt'):
return False
closing_date = self.data.get('closedAt')
return brief_lockout_validator.validate_closed_at_lockout(closing_date)
def validate_title(self):
return True if self.data.get('title', '').replace(' ', '') else False
def validate_organisation(self):
return True if self.data.get('organisation', '').replace(' ', '') else False
def validate_summary(self):
return True if self.data.get('summary', '').replace(' ', '') else False
def validate_location(self):
if not self.data.get('location', []):
return False
if not len(self.data.get('location', [])) > 0:
return False
whitelist = [
'Australian Capital Territory',
'New South Wales',
'Northern Territory',
'Queensland',
'South Australia',
'Tasmania',
'Victoria',
'Western Australia',
'Offsite'
]
for location in self.data.get('location', []):
if location not in whitelist:
return False
return True
def validate_response_formats(self):
if len(self.data.get('evaluationType', [])) == 0:
return False
whitelist = [
'Responses to selection criteria',
'Résumés',
'References',
'Interviews',
'Scenarios or tests',
'Presentations'
]
has_responses = False
has_resume = False
for val in self.data.get('evaluationType', []):
if val not in whitelist:
return False
if val == whitelist[0]:
has_responses = True
if val == whitelist[1]:
has_resume = True
if not has_responses or not has_resume:
return False
return True
def validate_preferred_format_for_rates(self):
return (
True if
self.data.get('preferredFormatForRates') in ['dailyRate', 'hourlyRate']
else False
)
def validate_security_clearance(self):
return (
True
if self.data.get('securityClearance') in [
'noneRequired',
'abilityToObtain',
'mustHave',
'other'
]
else False
)
def validate_security_clearance_obtain(self):
if (
self.data.get('securityClearance') in ['abilityToObtain'] and
self.data.get('securityClearanceObtain') not in [
'baseline',
'nv1',
'nv2',
'pv'
]
):
return False
return True
def validate_security_clearance_current(self):
if (
self.data.get('securityClearance') in ['mustHave'] and
self.data.get('securityClearanceCurrent') not in [
'baseline',
'nv1',
'nv2',
'pv'
]
):
return False
return True
def validate_security_clearance_other(self):
if (
self.data.get('securityClearance') in ['other'] and
not self.data.get('securityClearanceOther').replace(' ', '')
):
return False
return True
def validate_work_already_done(self):
return True if self.data.get('workAlreadyDone').replace(' ', '') else False
def validate_start_date(self):
if 'startDate' not in self.data or not self.data.get('startDate', '').replace(' ', ''):
return False
parsed = pendulum.parse(self.data.get('startDate')).in_timezone('Australia/Canberra').start_of('day')
if parsed < pendulum.now('Australia/Canberra').start_of('day'):
return False
return True
def validate_contract_length(self):
return True if self.data.get('contractLength', '').replace(' ', '') else False
def remove_empty_criteria(self, criteria, includeWeightings):
if includeWeightings:
return [c for c in criteria if (
c['criteria'].replace(' ', '') or
c.get('weighting', '').replace(' ', '')
)]
else:
return [c for c in criteria if (
c['criteria'].replace(' ', '')
)]
def validate_evaluation_criteria_essential(self):
if not self.data.get('essentialRequirements'):
return False
self.data['essentialRequirements'] = self.remove_empty_criteria(
self.data.get('essentialRequirements'),
self.data.get('includeWeightingsEssential')
)
if not len(self.data.get('essentialRequirements')) > 0:
return False
weightings = 0
for criteria in self.data.get('essentialRequirements'):
if 'criteria' not in criteria:
return False
if not criteria['criteria'].replace(' ', ''):
return False
if self.data.get('includeWeightingsEssential'):
if 'weighting' not in criteria:
return False
if not criteria.get('weighting', '').replace(' ', ''):
return False
if int(criteria.get('weighting', '0')) == 0:
return False
weightings += int(criteria.get('weighting', ''))
if self.data.get('includeWeightingsEssential'):
if weightings != 100:
return False
return True
def validate_evaluation_criteria_nice_to_have(self):
if not self.data.get('niceToHaveRequirements'):
return False
self.data['niceToHaveRequirements'] = self.remove_empty_criteria(
self.data.get('niceToHaveRequirements'),
self.data.get('includeWeightingsNiceToHave')
)
weightings = 0
for criteria in self.data.get('niceToHaveRequirements'):
if self.data.get('includeWeightingsNiceToHave'):
if (
(
criteria['criteria'].replace(' ', '') and
not criteria.get('weighting', '').replace(' ', '')
) or (
not criteria['criteria'].replace(' ', '') and
criteria.get('weighting', '').replace(' ', '')
)
):
return False
if criteria.get('weighting', '').replace(' ', ''):
if int(criteria.get('weighting', '')) == 0:
return False
else:
weightings += int(criteria.get('weighting', ''))
if self.data.get('includeWeightingsNiceToHave'):
if weightings and weightings != 100:
return False
return True
def validate_contact_number(self):
if not self.data.get('contactNumber', '').replace(' ', ''):
return False
return True
def validate_seller_category(self):
if (
self.data.get('sellerCategory', '').replace(' ', '') and
not domain_service.get_by_name_or_id(int(self.data.get('sellerCategory')))
):
return False
return True
def validate_open_to(self):
if (
self.validate_seller_category() and
self.data.get('openTo') not in ['all', 'selected']
):
return False
return True
def validate_sellers(self):
if (
self.validate_seller_category() and
self.validate_open_to() and
self.data.get('openTo') in ['selected'] and
(
not self.data.get('sellers') or
len(self.data.get('sellers')) == 0
)
):
return False
for supplier_code in self.data.get('sellers', []):
supplier = suppliers.get_supplier_by_code(int(supplier_code))
if not supplier:
return False
return True
def validate_number_of_suppliers(self):
if not self.data.get('numberOfSuppliers', '').replace(' ', ''):
return False
if (
int(self.data.get('numberOfSuppliers')) <= 0 or
int(self.data.get('numberOfSuppliers')) > 100
):
return False
return True
def validate_required(self):
errors = []
if not self.validate_title():
errors.append('You must add a title')
if not self.validate_organisation():
errors.append('You must add the name of your department, agency or organisation')
if not self.validate_summary():
errors.append('You must add what the specialist will do')
if not self.validate_location():
errors.append('You must select a valid location of where the work can be done')
if not self.validate_seller_category():
errors.append('Invalid seller category/domain')
if not self.validate_open_to():
errors.append('Invalid openTo value')
if not self.validate_sellers():
errors.append('You must select some sellers')
if not self.validate_response_formats():
errors.append('Invalid response formats choice')
if not self.validate_number_of_suppliers():
errors.append('Invalid number of suppliers')
if not self.validate_preferred_format_for_rates():
errors.append('You must add background information')
if not self.validate_security_clearance():
errors.append('You must add security clearance details')
if not self.validate_security_clearance_obtain():
errors.append('You must select ability to obtain security clearance')
if not self.validate_security_clearance_current():
errors.append('You must select current security clearance')
if not self.validate_security_clearance_other():
errors.append('You must add other security clearance details')
if not self.validate_start_date():
errors.append('You must add an estimated start date')
if not self.validate_contract_length():
errors.append('You must add contract length')
if not self.validate_evaluation_criteria_essential():
errors.append(
'You must not have any empty essential criteria, any empty weightings, \
all weightings must be greater than 0, \
and add up to 100'
)
if not self.validate_evaluation_criteria_nice_to_have():
errors.append(
'You must not have any empty desirable criteria, any empty weightings, \
all weightings must be greater than 0, \
and add up to 100'
)
if not self.validate_closed_at():
errors.append('The closing date must be at least 2 days into the future or not more than one year long')
if not self.validate_closed_at_lockout():
lockout_dates = get_lockout_dates()
errors.append('The closing date cannot be between ' + lockout_dates['startDate'].strftime('%d %B') +
' and ' + lockout_dates['endDate'].strftime('%d %B %Y') +
', as Digital Marketplace is moving to BuyICT.')
if not self.validate_contact_number():
errors.append('Contact number must be a valid phone number, including an area code')
return errors
def validate(self, publish=False):
errors = []
try:
if publish:
errors = errors + self.validate_required()
request_keys = list(self.data.keys())
whitelisted_keys = [key['name'] for key in whitelist_fields]
for key in request_keys:
if key not in whitelisted_keys:
errors.append('Unexpected field "%s"' % key)
for key in whitelist_fields:
if key['name'] in request_keys and not isinstance(self.data.get(key['name'], None), key['type']):
errors.append('Field "%s" is invalid, unexpected type' % key['name'])
except Exception as e:
errors.append(str(e))
return errors
| [
"noreply@github.com"
] | AusDTO.noreply@github.com |
29d310e981165585e9df37a87690f77880cb6a57 | fd21d6384ba36aa83d0c9f05f889bdbf8912551a | /a10sdk/core/vrrp/vrrp_a_interface_trunk.py | 6bbfc3c25628a4e18801f09572c963b7c1a24d0d | [
"Apache-2.0"
] | permissive | 0xtobit/a10sdk-python | 32a364684d98c1d56538aaa4ccb0e3a5a87ecd00 | 1ea4886eea3a1609b2ac1f81e7326758d3124dba | refs/heads/master | 2021-01-18T03:08:58.576707 | 2014-12-10T00:31:52 | 2014-12-10T00:31:52 | 34,410,031 | 0 | 0 | null | 2015-04-22T19:05:12 | 2015-04-22T19:05:12 | null | UTF-8 | Python | false | false | 2,065 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class Trunk(A10BaseClass):
"""Class Description::
VRRP-A interface trunk.
Class trunk supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param both: {"description": "both a router and server interface", "format": "flag", "default": 0, "optional": true, "not-list": ["router-interface", "server-interface"], "type": "number"}
:param vlan: {"description": "VLAN ID", "format": "number", "optional": true, "maximum": 4094, "minimum": 1, "not": "no-heartbeat", "type": "number"}
:param router_interface: {"description": "interface to upstream router", "format": "flag", "default": 0, "optional": true, "not-list": ["server-interface", "both"], "type": "number"}
:param no_heartbeat: {"description": "do not send out heartbeat packet from this interface", "format": "flag", "default": 0, "optional": true, "not": "vlan", "type": "number"}
:param server_interface: {"description": "interface to real server", "format": "flag", "default": 0, "optional": true, "not-list": ["router-interface", "both"], "type": "number"}
:param trunk_val: {"description": "Ethernet Interface", "format": "number", "type": "number", "maximum": 16, "minimum": 1, "optional": false}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/vrrp-a/interface/trunk/{trunk_val}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "trunk_val"]
self.b_key = "trunk"
self.a10_url="/axapi/v3/vrrp-a/interface/trunk/{trunk_val}"
self.DeviceProxy = ""
self.both = ""
self.vlan = ""
self.router_interface = ""
self.no_heartbeat = ""
self.server_interface = ""
self.trunk_val = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
5286ddc0d57db0e1cb2d944f3d00ffae3b12fec8 | c73165911c1e9f62178376ae1e860f42bdaf74f6 | /backend/apps/plugin/serializers/base.py | c974a9ad47e7ce09d3b70d296fbc010faa35fef2 | [
"MIT"
] | permissive | codelieche/erp | aa5994b0c79e99c07aaf3ea440e4cf4389d433b7 | 96861ff63a63a93918fbd5181ffb2646446d0eec | refs/heads/main | 2022-12-22T13:30:23.398639 | 2021-10-22T16:26:28 | 2021-10-22T16:26:28 | 171,668,277 | 0 | 0 | MIT | 2022-12-10T02:32:50 | 2019-02-20T12:22:17 | Python | UTF-8 | Python | false | false | 343 | py | # -*- coding:utf-8 -*-
from rest_framework import serializers
class PluginInfoSerializer(serializers.Serializer):
"""
插件信息Serailizer
"""
code = serializers.CharField(help_text="插件Code")
name = serializers.CharField(help_text="插件名称")
description = serializers.CharField(help_text="插件描述")
| [
"codelieche@gmail.com"
] | codelieche@gmail.com |
0626e2f2f2b884a02c2f86b3e380d86b73fd69f4 | 1608a43a29821106d361ab80ce61255d4a715f3a | /src/pretix/base/migrations/0012_remove_order_tickets.py | 7486be3f8e8355f81dd467e5a8b6d22c1d9cf596 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | ilmjstrope/pretix | 873f1bb14eb1f8bf63de2d98295655ccea2c734a | 7fc56b77db16e7e0783a4ba52b8ed5ef09ce9558 | refs/heads/master | 2021-01-18T00:42:02.873888 | 2015-09-29T14:46:45 | 2015-09-29T14:46:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0011_auto_20150915_2020'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='tickets',
),
]
| [
"mail@raphaelmichel.de"
] | mail@raphaelmichel.de |
447d2060908ab3a461a172209dc183a9fae81b6d | b0532e7e48729702db60918a8ea44b72319dadc7 | /roomai/kuhnpoker/KuhnPokerAction.py | b7dcdecf26880b932be755ad1a6bc439b54099d5 | [
"MIT"
] | permissive | abcdcamey/RoomAI | 7186b39d2d55dafa98cf40288b73d2977087da41 | fe884645b65bff6205d089d24c508b5a37dfdf3b | refs/heads/master | 2020-03-25T20:16:12.254187 | 2018-08-02T15:33:39 | 2018-08-02T15:33:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | import roomai.common
class KuhnPokerAction(roomai.common.AbstractAction):
'''
The KuhnPoker action used by the normal players. There are only two actions: bet and check. Examples of usages: \n
>> import roomai.kuhnpoker\n
>> action = roomai.kuhnpoker.KuhnPokerAction.lookup("bet")\n
>> action.key\n
"bet"\n
>> action = roomai.kuhnpoker.KuhnPokerAction.lookup("check")\n
>> action.key\n
"check"\n
'''
def __init__(self, key):
if key not in ["check","bet"]:
raise ValueError("The key for KuhnPokerAction must be in [\"check\",\"bet\"]")
super(KuhnPokerAction,self).__init__(key)
self.__key__ = key
def __get_key__(self):
return self.__key__
key = property(__get_key__, doc="The key of the KuhnPoker action, \"bet\" or \"check\".")
@classmethod
def lookup(cls, key):
return AllKuhnActions[key]
def __deepcopy__(self, memodict={}, newinstance = None):
return KuhnPokerAction.lookup(self.key)
AllKuhnActions = {"bet":KuhnPokerAction("bet"),"check":KuhnPokerAction("check")}
| [
"lili1987mail@gmail.com"
] | lili1987mail@gmail.com |
44392f80ccc1f2b66ba022fbbfaedec999972af8 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /python_scripts/pimriscripts/mastersort/scripts_dir/p7580_run2L6.py | e7e36dc227bc5518683dc0475d734610c58036b9 | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7580', 'run2L6']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2961/E2961_e4354087/s4409419_1904_2L6_s24', '/ifs/scratch/pimri/soccog/test_working/7580/run2L6')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7580/run2L6','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7580/run2L6')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7580/run2L6'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7580/run2L6', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7580/run2L6', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7580/run2L6', '7580_run2L6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7580/run2L6', '7580_run2L6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
| [
"katherine@Katherines-MacBook-Pro.local"
] | katherine@Katherines-MacBook-Pro.local |
4a2279505e0d062e31700ce37d3373c049a1adec | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_001/ch35_2019_06_06_00_27_42_702661.py | 1c0d89cb25c60940d66f046a84d71724d1ce4de7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | deposito_inicial = float(input("Depósito inicial: "))
deposito_mensal = float(input("Depósito mensal: "))
taxa_de_juros = float(input("Taxa de juros: "))
total = deposito_inicial
juros = taxa_de_juros/100 + 1
mes = 0
while mes < 24:
total = total*juros + deposito_mensal
mes += 1
print("Saldo do mês {0} é de R${1:.2f}".format(mes, total))
print ("Total de rendimentos = R${0:.2f}".format(total-deposito_inicial-deposito_mensal*23)) | [
"you@example.com"
] | you@example.com |
136a3ffeda37fe653bc8b661374d35eefd307b4a | 71501709864eff17c873abbb97ffabbeba4cb5e3 | /llvm14.0.4/lldb/test/API/functionalities/scripted_process/dummy_scripted_process.py | 67850cf57a73dd66d6f168913268a148917aec6d | [
"NCSA",
"Apache-2.0",
"LLVM-exception"
] | permissive | LEA0317/LLVM-VideoCore4 | d08ba6e6f26f7893709d3285bdbd67442b3e1651 | 7ae2304339760685e8b5556aacc7e9eee91de05c | refs/heads/master | 2022-06-22T15:15:52.112867 | 2022-06-09T08:45:24 | 2022-06-09T08:45:24 | 189,765,789 | 1 | 0 | NOASSERTION | 2019-06-01T18:31:29 | 2019-06-01T18:31:29 | null | UTF-8 | Python | false | false | 2,858 | py | import os,struct, signal
from typing import Any, Dict
import lldb
from lldb.plugins.scripted_process import ScriptedProcess
from lldb.plugins.scripted_process import ScriptedThread
class DummyScriptedProcess(ScriptedProcess):
def __init__(self, target: lldb.SBTarget, args : lldb.SBStructuredData):
super().__init__(target, args)
self.threads[0] = DummyScriptedThread(self, None)
def get_memory_region_containing_address(self, addr: int) -> lldb.SBMemoryRegionInfo:
return None
def get_thread_with_id(self, tid: int):
return {}
def get_registers_for_thread(self, tid: int):
return {}
def read_memory_at_address(self, addr: int, size: int) -> lldb.SBData:
data = lldb.SBData().CreateDataFromCString(
self.target.GetByteOrder(),
self.target.GetCodeByteSize(),
"Hello, world!")
return data
def get_loaded_images(self):
return self.loaded_images
def get_process_id(self) -> int:
return 42
def should_stop(self) -> bool:
return True
def is_alive(self) -> bool:
return True
def get_scripted_thread_plugin(self):
return DummyScriptedThread.__module__ + "." + DummyScriptedThread.__name__
class DummyScriptedThread(ScriptedThread):
def __init__(self, process, args):
super().__init__(process, args)
def get_thread_id(self) -> int:
return 0x19
def get_name(self) -> str:
return DummyScriptedThread.__name__ + ".thread-1"
def get_state(self) -> int:
return lldb.eStateStopped
def get_stop_reason(self) -> Dict[str, Any]:
return { "type": lldb.eStopReasonSignal, "data": {
"signal": signal.SIGINT
} }
def get_stackframes(self):
class ScriptedStackFrame:
def __init__(idx, cfa, pc, symbol_ctx):
self.idx = idx
self.cfa = cfa
self.pc = pc
self.symbol_ctx = symbol_ctx
symbol_ctx = lldb.SBSymbolContext()
frame_zero = ScriptedStackFrame(0, 0x42424242, 0x5000000, symbol_ctx)
self.frames.append(frame_zero)
return self.frame_zero[0:0]
def get_register_context(self) -> str:
return struct.pack(
'21Q', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)
def __lldb_init_module(debugger, dict):
if not 'SKIP_SCRIPTED_PROCESS_LAUNCH' in os.environ:
debugger.HandleCommand(
"process launch -C %s.%s" % (__name__,
DummyScriptedProcess.__name__))
else:
print("Name of the class that will manage the scripted process: '%s.%s'"
% (__name__, DummyScriptedProcess.__name__)) | [
"kontoshi0317@gmail.com"
] | kontoshi0317@gmail.com |
b7d9ff78558e217ce2ba72d504a2fc2154ae91b1 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractNovelGilegatiCom.py | 555142d8fee58800bc37a387984e5f7fdc425216 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 519 | py | def extractNovelGilegatiCom(item):
'''
Parser for 'novel.gilegati.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Spirit Conductor', 'Spirit Conductor', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | [
"something@fake-url.com"
] | something@fake-url.com |
25c4685c444ce65edcdfff005e0060f97157f3b3 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/isosurface/colorbar/_tickcolor.py | 8d235833cab3bc7fb1364a4236a3091d952369eb | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 427 | py | import _plotly_utils.basevalidators
class TickcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="tickcolor", parent_name="isosurface.colorbar", **kwargs
):
super(TickcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
4f9b132d0127390b4ded48630da6093bf8a6a6c2 | 5946112229fe1d9a04b7536f076a656438fcd05b | /dev_env/lib/python3.8/site-packages/pygments/console.py | 6c024a8d3484cecf9ff30eea0e7135c8eace379a | [] | no_license | Gear-Droid/openCV_study_project | 3b117967eb8a28bb0c90088e1556fbc1d306a98b | 28c9a494680c4a280f87dd0cc87675dfb2262176 | refs/heads/main | 2023-05-14T14:27:42.284265 | 2021-06-05T00:16:09 | 2021-06-05T00:16:09 | 307,807,458 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,792 | py | # -*- coding: utf-8 -*-
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "red", "green", "yellow", "blue",
"magenta", "cyan", "gray"]
light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
"brightmagenta", "brightcyan", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%im" % (60 + x)
x += 1
del d, l, x
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)
| [
"Vladi003@yandex.ru"
] | Vladi003@yandex.ru |
0f29d4819c4edbbd216beaccd65020d52f2aab4c | 20176bf4fbd8aec139c7b5a27f2c2e155e173e6e | /data/all-pratic/oinam_singh/myprogram/dfEx4.py | e6046a036d10a2b7017578704d751ac80e542276 | [] | no_license | githubjyotiranjan/pytraining | 4ac4a1f83cc4270e2939d9d32c705019c5bc61c5 | 8b50c4ab7848bd4cbfdfbc06489768d577289c66 | refs/heads/master | 2020-03-19T06:22:20.793296 | 2018-06-15T20:08:11 | 2018-06-15T20:08:11 | 136,013,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import pandas as pd
data1={'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']}
df1=pd.DataFrame(data1,index=[0,1,2,3])
data2={'A':['A4','A5','A6','A7'],
'B':['B4','B5','B6','B7'],
'C':['C4','C5','C6','C7'],
'D':['D4','D5','D6','D7']
}
df2=pd.DataFrame(data2,index=[4,5,6,7])
data3={'A':['A8','A9','A10','A11'],
'B':['B8','B9','B10','B11'],
'C':['C8','C9','C10','C11'],
'D':['D8','D9','D10','D11']}
df3=pd.DataFrame(data3,index=[8,9,10,11])
dcon=pd.concat([df1,df2,df3])
print(dcon)
dcon1=pd.concat([df1,df2,df3],axis=1)
print(dcon1) | [
"jsatapathy007@gmail.com"
] | jsatapathy007@gmail.com |
86c175d1f1af29f44d196cc3b3948293dcccab2a | 01dad4d1d2ffaf2fa070e99fe828d42f59a9f9d1 | /src/pycrop2ml_ui/packages/SQ_Energy_Balance/src/openalea/Netradiationequivalentevaporation.py | 9b16174bb6d9032e26b78f9a1441551e454fadc7 | [
"BSD-3-Clause",
"MIT"
] | permissive | AgriculturalModelExchangeInitiative/Pycrop2ml_ui | 5e210facf9689348bb57c16060967118b7c5f49a | 3d5d2b87a74f0be306056b71808286922fef2945 | refs/heads/master | 2023-06-24T13:52:39.933728 | 2023-06-17T00:17:26 | 2023-06-17T00:17:26 | 193,912,881 | 0 | 4 | MIT | 2023-02-25T13:26:57 | 2019-06-26T13:44:34 | Jupyter Notebook | UTF-8 | Python | false | false | 2,389 | py | # coding: utf8
import numpy
from math import *
def model_netradiationequivalentevaporation(lambdaV = 2.454,
netRadiation = 1.566):
"""
- Description:
* Title: NetRadiationEquivalentEvaporation Model
* Author: Pierre Martre
* Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
Evapotranspiration and canopy and soil temperature calculations
* Institution: INRA/LEPSE Montpellier
* Abstract: It is given by dividing net radiation by latent heat of vaporization of water
- inputs:
* name: lambdaV
** parametercategory : constant
** min : 0
** datatype : DOUBLE
** max : 10
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** default : 2.454
** inputtype : parameter
** unit : MJ kg-1
** description : latent heat of vaporization of water
* name: netRadiation
** min : 0
** default : 1.566
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** variablecategory : state
** datatype : DOUBLE
** inputtype : variable
** unit : MJ m-2 d-1
** description : net radiation
- outputs:
* name: netRadiationEquivalentEvaporation
** min : 0
** variablecategory : auxiliary
** max : 5000
** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
** datatype : DOUBLE
** unit : g m-2 d-1
** description : net Radiation in Equivalent Evaporation
"""
netRadiationEquivalentEvaporation = netRadiation / lambdaV * 1000.0
return netRadiationEquivalentEvaporation | [
"ahmedmidingoyi@yahoo.fr"
] | ahmedmidingoyi@yahoo.fr |
dc0bfd07d422bf122ac4ac42148299a225288420 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/fabric/rsinterfacepolprofile.py | bae1e7ce721ce11040c8dcba34b373820c78aca0 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 9,330 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsInterfacePolProfile(Mo):
"""
A source relation to the port policy. Note that this relation is an internal object.
"""
meta = SourceRelationMeta("cobra.model.fabric.RsInterfacePolProfile", "cobra.model.fabric.PortP")
meta.cardinality = SourceRelationMeta.N_TO_M
meta.moClassName = "fabricRsInterfacePolProfile"
meta.rnFormat = "rsinterfacePolProfile-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Super Class for Relation from Node to Fabric Policies Deployed on Node"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fabric.CreatedBy")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childNamesAndRnPrefix.append(("cobra.model.fabric.CreatedBy", "source-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.fabric.NodeCfg")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.fabric.NodeToPolicy")
meta.superClasses.add("cobra.model.reln.To")
meta.rnPrefixes = [
('rsinterfacePolProfile-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "deplSt", "deplSt", 15582, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("delivered", "delivered", 1)
prop._addConstant("node-not-ready", "node-not-ready", 1073741824)
prop._addConstant("none", "none", 0)
prop._addConstant("not-registered-for-atg", "node-cannot-deploy-epg", 64)
prop._addConstant("not-registered-for-fabric-ctrls", "node-not-controller", 16)
prop._addConstant("not-registered-for-fabric-leafs", "node-not-leaf-for-fabric-policies", 4)
prop._addConstant("not-registered-for-fabric-node-group", "node-not-registered-for-node-group-policies", 32)
prop._addConstant("not-registered-for-fabric-oleafs", "node-not-capable-of-deploying-fabric-node-leaf-override", 2048)
prop._addConstant("not-registered-for-fabric-ospines", "node-not-capable-of-deploying-fabric-node-spine-override", 4096)
prop._addConstant("not-registered-for-fabric-pods", "node-has-not-joined-pod", 8)
prop._addConstant("not-registered-for-fabric-spines", "node-not-spine", 2)
prop._addConstant("not-registered-for-infra-leafs", "node-not-leaf-for-infra-policies", 128)
prop._addConstant("not-registered-for-infra-oleafs", "node-not-capable-of-deploying-infra-node-leaf-override", 512)
prop._addConstant("not-registered-for-infra-ospines", "node-not-capable-of-deploying-infra-node-spine-override", 1024)
prop._addConstant("not-registered-for-infra-spines", "node-not-spine-for-infra-policies", 256)
prop._addConstant("pod-misconfig", "node-belongs-to-different-pod", 8192)
prop._addConstant("policy-deployment-failed", "policy-deployment-failed", 2147483648)
meta.props.add("deplSt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 13974, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 11477, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 887
prop.defaultValueStr = "fabricPortP"
prop._addConstant("fabricLePortP", None, 888)
prop._addConstant("fabricPortP", None, 887)
prop._addConstant("fabricSpPortP", None, 889)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 11476, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
64a511b95ea60b2cca1c7f11eb3400f1df6d6211 | 6a61667e176b06ccdef07e84d79b382b2fb491bb | /app/interviews/tests/views/interview.py | d5b9842a2c394d0c54c84f5d15b493e834871cf6 | [] | no_license | vsokoltsov/Interview360Server | 333f08f13b33ef88928b3e4b844f60e72ebec809 | 252b0ebd77eefbcc945a0efc3068cc3421f46d5f | refs/heads/master | 2022-12-11T05:38:01.310133 | 2019-03-24T17:47:09 | 2019-03-24T17:47:09 | 95,320,167 | 2 | 3 | null | 2022-12-08T04:54:08 | 2017-06-24T20:09:08 | Python | UTF-8 | Python | false | false | 3,216 | py | from . import APITestCase, datetime, Token, Company, HR, CANDIDATE
import ipdb
class InterviewViewSetTests(APITestCase):
"""Tests for InterviewViewSet class."""
fixtures = [
"skill.yaml",
"user.yaml",
"auth_token.yaml",
"company.yaml",
"vacancy.yaml",
"interview.yaml"
]
def setUp(self):
"""Set up test dependencies."""
self.company = Company.objects.first()
date = datetime.datetime.now() + datetime.timedelta(days=10)
self.hr = self.company.get_employees_with_role(HR)[-1]
self.vacancy = self.company.vacancy_set.first()
self.candidate = self.company.get_employees_with_role(CANDIDATE)[-1]
self.interview = self.vacancy.interviews.first()
date = datetime.datetime.now() + datetime.timedelta(days=10)
self.token = Token.objects.get(user=self.hr)
self.candidate_token = Token.objects.get(user=self.candidate)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
self.form_data = {
'candidate_email': self.candidate.email,
'vacancy_id': self.vacancy.id,
'interviewee_ids': [
self.hr.email
],
'assigned_at': date
}
self.url = "/api/v1/companies/{}/interviews/".format(self.company.id)
def test_success_list_receiving(self):
"""Test success receiving list of the interviews."""
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
# TODO Fix after rebuilding interview tests with factory
# def test_success_retrieve_action(self):
# """Test success receiving detail interview."""
#
# self.client.credentials(
# HTTP_AUTHORIZATION='Token ' + self.candidate_token.key
# )
# response = self.client.get(
# self.url + "{}/".format(self.interview.id), format='json'
# )
# self.assertEqual(response.status_code, 200)
def test_success_interview_creation(self):
"""Test success creation of the interview."""
response = self.client.post(self.url, self.form_data, format='json')
self.assertEqual(response.status_code, 201)
self.assertTrue('interview' in response.data)
def test_failed_interview_creation(self):
"""Test failed creation of the interview."""
response = self.client.post(self.url, {}, format='json')
self.assertEqual(response.status_code, 400)
def test_success_interview_update(self):
"""Test success Interview's instance update."""
response = self.client.put(
self.url + "{}/".format(self.interview.id), self.form_data,
format='json'
)
self.assertEqual(response.status_code, 200)
self.assertTrue('interview' in response.data)
def test_success_interview_delete(self):
"""Test success Interview's instance delete."""
response = self.client.delete(
self.url + "{}/".format(self.interview.id), format='json'
)
self.assertEqual(response.status_code, 204)
| [
"vforvad@gmail.com"
] | vforvad@gmail.com |
ef80458b3c26c42b8ba73347abdde6d49679144f | 7db93c328243cd2f6ffcabb66b0d148bb0e3d198 | /lintcode/07BinaryTree/155MinDepthOfBinaryTree.py | e838e2337dde723bea7750ab777d5826379f940f | [
"MIT"
] | permissive | zhaoxinlu/leetcode-algorithms | 62cc67efdc1b0e8514c83bb7643b369b4f681948 | f5e1c94c99628e7fb04ba158f686a55a8093e933 | refs/heads/master | 2021-05-11T23:47:43.385660 | 2018-04-25T08:27:57 | 2018-04-25T08:27:57 | 117,520,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | # -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-03-10
算法思想: 二叉树的最小深度
"""
"""
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: The root of binary tree
@return: An integer
"""
def minDepth(self, root):
# write your code here
if not root:
return 0
if root.left == None and root.right == None:
return 1
if root.left:
left = self.minDepth(root.left)
else:
return self.minDepth(root.right) + 1
if root.right:
right = self.minDepth(root.right)
else:
return left + 1
return min(left, right) + 1 | [
"446571703@qq.com"
] | 446571703@qq.com |
cf70a287c7007e7788a1deba514996acca6a6361 | d326cd8d4ca98e89b32e6a6bf6ecb26310cebdc1 | /BioinformaticsStronghold/inod/inod.py | f56d33de9bff5554c7a1f662eb81fcda0118c1c3 | [] | no_license | dswisher/rosalind | d6af5195cdbe03adb5a19ed60fcbf8c05beac784 | 4519740350e47202f7a45ce70e434f7ee15c6afc | refs/heads/master | 2021-08-09T02:58:17.131164 | 2017-11-12T01:26:26 | 2017-11-12T01:26:26 | 100,122,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py |
import sys
if len(sys.argv) != 2:
print "Enter the number of leaf nodes."
sys.exit(1)
n = int(sys.argv[1])
print n - 2
| [
"big.swish@gmail.com"
] | big.swish@gmail.com |
335adfd99ad95714abcd6661f54e15f6570b44c8 | 6bb80d482bfd0cd5feb6f2d37c7235a27b3466d6 | /pretrained-model/multispeaker-separation/fastsep-4-mel.py | 1c1ff8e391e1667dc0c0bdcf0ef294f05dbbc4d2 | [
"MIT"
] | permissive | dakmatt/malaya-speech | deadb00e1aa8a03593721c26457f35158e67d96d | 957cfb1952760c30d3b4a2a2e60b7f142394cbd3 | refs/heads/master | 2023-04-03T13:56:53.675046 | 2021-04-19T03:31:40 | 2021-04-19T03:31:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,562 | py | import os
import warnings
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
warnings.filterwarnings('ignore')
import tensorflow as tf
import malaya_speech
import numpy as np
import IPython.display as ipd
import matplotlib.pyplot as plt
import malaya_speech.augmentation.waveform as augmentation
from malaya_speech.train.model import fastsplit, fastspeech, fastvc
from malaya_speech.train.model import sepformer_old as sepformer
from malaya_speech.utils import tf_featurization
import malaya_speech.train as train
import random
import pickle
from glob import glob
from sklearn.utils import shuffle
sr = 22050
speakers_size = 4
def get_data(combined_path, speakers_size = 4, sr = 22050):
with open(combined_path, 'rb') as fopen:
combined = pickle.load(fopen)
y = []
for i in range(speakers_size):
with open(combined_path.replace('combined', str(i)), 'rb') as fopen:
y_ = pickle.load(fopen)
y.append(y_)
return combined, y
def to_mel(y):
mel = malaya_speech.featurization.universal_mel(y)
mel[mel <= np.log(1e-2)] = np.log(1e-2)
return mel
def generate():
combined = glob('split-speaker-22k-train/combined/*.pkl')
while True:
combined = shuffle(combined)
for i in range(len(combined)):
x, y = get_data(combined[i])
yield {'combined': x, 'y': y, 'length': [len(x)]}
def get_dataset(batch_size = 8):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{'combined': tf.float32, 'y': tf.float32, 'length': tf.int32},
output_shapes = {
'combined': tf.TensorShape([None, 80]),
'y': tf.TensorShape([speakers_size, None, 80]),
'length': tf.TensorShape([None]),
},
)
dataset = dataset.padded_batch(
batch_size,
padded_shapes = {
'combined': tf.TensorShape([None, 80]),
'y': tf.TensorShape([speakers_size, None, 80]),
'length': tf.TensorShape([None]),
},
padding_values = {
'combined': tf.constant(np.log(1e-2), dtype = tf.float32),
'y': tf.constant(np.log(1e-2), dtype = tf.float32),
'length': tf.constant(0, dtype = tf.int32),
},
)
return dataset
return get
total_steps = 10000000
def model_fn(features, labels, mode, params):
lengths = features['length'][:, 0]
config = malaya_speech.config.fastspeech_config
dim = 256
config['encoder_hidden_size'] = dim
config['decoder_hidden_size'] = dim
config['encoder_num_hidden_layers'] = 4
config['encoder_num_attention_heads'] = 4
config = fastspeech.Config(vocab_size = 1, **config)
transformer = lambda: sepformer.Encoder_FastSpeech(
config.encoder_self_attention_params
)
decoder = lambda: fastvc.Decoder(config.decoder_self_attention_params)
model = sepformer.Model_Mel(
transformer, transformer, decoder, activation = None
)
logits = model(features['combined'], lengths)
outputs = tf.transpose(logits, [1, 2, 0, 3])
loss = fastsplit.calculate_loss(
features['y'], outputs, lengths, C = speakers_size
)
tf.identity(loss, 'total_loss')
tf.summary.scalar('total_loss', loss)
global_step = tf.train.get_or_create_global_step()
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss,
init_lr = 0.0001,
num_train_steps = total_steps,
num_warmup_steps = 100000,
end_learning_rate = 0.00001,
weight_decay_rate = 0.001,
beta_1 = 0.9,
beta_2 = 0.98,
epsilon = 1e-6,
clip_norm = 1.0,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode = mode, loss = loss, train_op = train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode = tf.estimator.ModeKeys.EVAL, loss = loss
)
return estimator_spec
train_hooks = [tf.train.LoggingTensorHook(['total_loss'], every_n_iter = 1)]
train_dataset = get_dataset()
save_directory = 'split-speaker-sepformer-mel'
train.run_training(
train_fn = train_dataset,
model_fn = model_fn,
model_dir = save_directory,
num_gpus = 1,
log_step = 1,
save_checkpoint_step = 3000,
max_steps = total_steps,
train_hooks = train_hooks,
eval_step = 0,
)
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
82f36f99b83a7f014359ee700cc6a8d0857c8d66 | 8ec910de801b424540abb4e6e955838a287663b6 | /CursoPython/Unidad8/Ejemplos/import_namespace.py | 732f5be705991ccca0ecc1b2ae701410af71fdbe | [] | no_license | hector81/Aprendiendo_Python | f4f211ace32d334fb6b495b1b8b449d83a7f0bf8 | 9c73f32b0c82f08e964472af1923f66c0fbb4c22 | refs/heads/master | 2022-12-28T03:41:20.378415 | 2020-09-28T09:15:03 | 2020-09-28T09:15:03 | 265,689,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | # import calculadora.suma
# suma(3,5)
# el resultado de la suma es: 8 | [
"noreply@github.com"
] | hector81.noreply@github.com |
a939e1424dc025b49be00eec0aa30b151b213231 | caf644aa3e6aa7551567f806481a0465870b7ad8 | /login/migrations/0001_initial.py | c58190f1c6d91c015ccb8552792bb1b1bcd7f4ff | [] | no_license | 270466585/restudy_dj | 86b79ec5924c9c998a2f6841a64509df21ccd885 | ba7295ecfd947e475cb328334cc70d68a49c3e51 | refs/heads/master | 2020-04-18T12:11:13.041477 | 2019-01-25T10:13:34 | 2019-01-25T10:13:34 | 167,526,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # Generated by Django 2.1.5 on 2019-01-25 05:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('password', models.CharField(max_length=256)),
('email', models.EmailField(max_length=254, unique=True)),
('sex', models.CharField(choices=[('male', '男'), ('female', '女')], default='男', max_length=32)),
('c_time', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'ordering': ['-c_time'],
},
),
]
| [
"27066585@qq.com"
] | 27066585@qq.com |
2a4db2a394411b0b979fb3fcab54c78eda1e0084 | 34d6ec6c9a459ab592f82137927107f967831400 | /week01/6-plus-one.py | 0ba349debae40b719c8d4e01eaa728d962cee038 | [
"MIT"
] | permissive | MiracleWong/algorithm-learning-camp | 228605311597dc3c29f73d4fb6b7abedc65d05a7 | aa5bee8f12dc25992aaebd46647537633bf1207f | refs/heads/master | 2023-07-15T21:34:11.229006 | 2021-09-05T09:06:16 | 2021-09-05T09:06:16 | 379,647,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
digits = [str(i) for i in digits]
num = int(''.join(digits))
num += 1
num = str(num)
res = []
for i in num:
res.append(int(i))
return res | [
"cfwr1991@126.com"
] | cfwr1991@126.com |
fdeb6676569e269227595aa1bdbadc6f6635586c | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py | 390ccd6773ba7d750cf4dfa82a4410d46de8b8af | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,333 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
ds = distributions
class DistributionTest(test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
ds.Normal,
ds.Bernoulli,
ds.Beta,
ds.Chi2,
ds.Exponential,
ds.Gamma,
ds.InverseGamma,
ds.Laplace,
ds.StudentT,
ds.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.test_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, random_ops.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape,
array_ops.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
array_ops.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.test_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = ds.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = ds.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.test_session():
normal = ds.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.test_session():
mu = 1.
sigma = 2.
normal = ds.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = ds.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = ds.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = ds.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
# Test case 3.
x = array_ops.placeholder(dtype=dtypes.int32)
is_scalar = normal._is_scalar_helper(x.get_shape(),
lambda: array_ops.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
def _GetFakeDistribution(self):
class FakeDistribution(ds.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
fake_distribution = self._GetFakeDistribution()
with self.test_session():
# Make a new session since we're playing with static shapes. [And below.]
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
if __name__ == "__main__":
test.main()
| [
"hanshuobest@163.com"
] | hanshuobest@163.com |
d50dfca35776427690928466b7cbd9a5e88a3c3d | 1a80fa7faf79b34c6eff3fa08226964c04bba0c7 | /centipede.py | b8d68543cb206384d962841f40fd72365e48ca1e | [
"MIT"
] | permissive | brentru/CircuitPython_Centipede_Chromebook | 4d274efd375c59b7197f7b4dd35948a7e85a53ab | b4972aadbfb3890b7b9137373f5c11ae7dd8a727 | refs/heads/master | 2021-01-25T10:00:16.383835 | 2018-02-28T20:20:03 | 2018-02-28T20:20:03 | 123,333,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,462 | py | """
Centipede for Chromebook for CircuitPython
Copyright (c) 2018, Brent Rubell for Adafruit Industries
Centipede_for_Chromebook_Enrollment by Amplified_Labs
Copyright (c) 2016, Amplified IT
See the full description at http://labs.amplifiedit.com/centipede
Support forums are available at https://plus.google.com/communities/100599537603662785064
Published under an MIT License https://opensource.org/licenses/MIT
"""
import time
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
import board
import neopixel
import digitalio
# Modify the following to fit WiFi/Enrollment credentials:
wifi_name = "adafruit_ssid"
wifi_pass = "adafruit_password"
"""
wifi_security options:
0 = open
1 = WEP
2 = WPA
"""
wifi_security = 2
username = "circuit"
pasword = "python"
kbd = Keyboard()
# american keyboard layout
layout = KeyboardLayoutUS(kbd)
# we're going to make this button compatable with the
# builtin A button on the Circuit Playground Express
start_btn = digitalio.DigitalInOut(board.D4)
start_btn.direction = Direction.INPUT
start_button.pull = Pull.UP
# using builtin cplayx led
led = DigitalInOut(board.D13)
led.direction = Direction.OUTPUT
def repeat_key(key, num_repeat):
# repeats keypresses int num_repeat times
for x in range(0, num_repeat):
kbd.press(keycode.key)
kbd.release_all()
time.sleep(1)
def wifi_config():
repeat_key(TAB, 3)
kbd.press(keycode.ENTER)
kbd.release_all()
# up arrow 2 times to open extra wifi settings
repeat_key(tab, 2)
kbd.press(keycode.ENTER)
kbd.release_all()
time.sleep(1)
# SSID Config
#TODO: split the ssid into strings so the keyboard can write it?
time.sleep(1)
kbd.press(keycode.TAB)
time.sleep(1)
if(wifi_security == 0):
repeatKey(TAB, 2)
else:
# type in wifi pass
kbd.press(keycode.ENTER)
time.sleep(.1)
time.sleep(10)
kbd.press(keycode.TAB)
kbd.press(keyboard.ENTER)
time.sleep(.2)
# enter entrollment
kbd.press(keyboard.ENTER)
time.sleep(1)
while True:
time.sleep(4)
if(start_btn.value == 1):
# run wifi config
led.value = 1
wifi_config()
time.sleep(5)
while(start_btn.value != 1):
time.sleep(1)
led.value = 0
# run credential config
credential_config()
# pulse the neopixel ring
| [
"robots199@me.com"
] | robots199@me.com |
2f20ea73c211654103ed77f7d94454777f5cfc0d | 37c5a0a8ee807ec7e40bd38d4ecb6d7a8d1e21cd | /src/python/serif/theory/icews_event_mention.py | 5aa8b7b09dae41264fbcd246ebabfc270080420a | [
"Apache-2.0"
] | permissive | BBN-E/text-open | 3321187f17b3fbc0317c95d32aa3a741c8e9769b | b486a66339a330e94d81850e6acb3a7e34df746e | refs/heads/main | 2023-06-10T09:45:57.952476 | 2023-05-25T18:44:11 | 2023-05-25T18:44:11 | 302,090,801 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | from serif.theory.proposition import Proposition
from serif.theory.serif_theory import SerifTheory
from serif.theory.value_mention import ValueMention
from serif.xmlio import _SimpleAttribute, _ChildTheoryElementList, _ReferenceAttribute, _ReferenceListAttribute
class ICEWSEventMention(SerifTheory):
participants = _ChildTheoryElementList('ICEWSEventParticipant')
event_code = _SimpleAttribute(is_required=True)
event_tense = _SimpleAttribute(is_required=True)
pattern_id = _SimpleAttribute(is_required=True)
time_value_mention = _ReferenceAttribute('time_value_mention_id',
cls=ValueMention,
is_required=False)
propositions = _ReferenceListAttribute('proposition_ids', cls=Proposition)
original_event_id = _SimpleAttribute(is_required=False)
is_reciprocal = _SimpleAttribute(bool, is_required=False)
| [
"hqiu@bbn.com"
] | hqiu@bbn.com |
b0e958e2cf63938be65865ac0cfdf533a47698b0 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /output/StudentProblem/10.21.12.1/7/1569572792.py | 26ebc69dac781be379b9c65043af3ac844948ef0 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | ============================= test session starts ==============================
platform darwin -- Python 3.7.4, pytest-5.4.1, py-1.8.1, pluggy-0.13.1
rootdir: /tmp
collected 1 item
../../../../../tmp F [100%]
=================================== FAILURES ===================================
____________________________________ test_5 ____________________________________
def test_5():
> assert divisors(10) == [1, 2, 5, 10]
/private/tmp/blabla.py:17:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
x = 10
def divisors(x: int):
result = []
for i in range(x + 1):
> if not(x % i):
E ZeroDivisionError: integer division or modulo by zero
/private/tmp/blabla.py:11: ZeroDivisionError
=========================== short test summary info ============================
FAILED ../../../../../tmp/::test_5 - ZeroDivisionError: integer division or m...
============================== 1 failed in 0.05s ===============================
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
2abe504e9ab45cb335111ffdbc077fec444f5b0c | dab7eb86a8ffe3fcf012a851b2bf243ff7e06088 | /longestpossibleappna_3898/wsgi.py | d89f34b75ea5f57ca3a45a3dcb580dbe20c3e61a | [] | no_license | crowdbotics-apps/longestpossibleappna-3898 | 54760668be0c30a41fd6235232bf7782e70958a7 | dc57d9786b53d7018cca6256c28a8202625345ce | refs/heads/master | 2023-05-26T07:21:41.191231 | 2020-04-30T14:23:13 | 2020-04-30T14:23:13 | 260,232,643 | 0 | 0 | null | 2021-06-10T17:01:00 | 2020-04-30T14:22:47 | Python | UTF-8 | Python | false | false | 427 | py | """
WSGI config for longestpossibleappna_3898 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'longestpossibleappna_3898.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
737c77a191f20cede4d4431f84652b00e894ce30 | d2970ef359537f553e86dc05015b265611bd8f4f | /Aiden/Ceaser_Cypher.py | 7c503c72fa83a9975e1d05fd1c12ae791c0e2549 | [] | no_license | idcrypt3/camp_2019_07_07 | cc68c28f9c84a0ad6ac893cb65a0a48502a09af6 | 4c748b60f1553072dbda9d4d226b39a32548521f | refs/heads/master | 2020-06-17T08:23:30.734953 | 2019-07-17T16:29:55 | 2019-07-17T16:29:55 | 195,860,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | alphabet = "abcdefghijklmnopqrstuvwxyz"
partialOne = ""
partialTwo = ""
newAlphabet = ""
newMessage = ""
message = input("Please enter a secret message: ").lower()
key = int(input("Please enter a number to shift by: "))
if key == 0:
newAlphabet = alphabet
elif key > 0:
partialOne = alphabet[:key]
partialTwo = alphabet[key:]
newAlphabet = partialTwo + partialOne
else:
partialOne = alphabet[:(26 + key)]
partialTwo = alphabet[(26 + key):]
newAlphabet = partialTwo + partialOne
newMessage = ""
for i in range(0,len(message)):
index = alphabet.find(message[i])
if index < 0:
newMessage += message[i]
else:
newMessage += newAlphabet[index]
print(newMessage)
| [
"idcrypt3@gmail.com"
] | idcrypt3@gmail.com |
25a6097e0dd1368a43cac42ba3c40ecfc7ad22aa | 03e91d7a923d94a4c0cd016e3c64cdefa7d0e1c5 | /order/migrations/0006_productinordermodel_image.py | 073bb3ff6d5e5c476b1fc8d43e10aeac1f91c923 | [] | no_license | win77g/irashop | 0e6afec6fd0397ee82484f718e90502cfc627efb | 9244c59ca69e263c24c9ad92ddf355b8f9ee4efc | refs/heads/master | 2023-06-30T03:17:09.333673 | 2021-08-05T21:10:26 | 2021-08-05T21:10:26 | 261,574,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # Generated by Django 2.2.11 on 2020-04-05 13:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0005_productinordermodel_size'),
]
operations = [
migrations.AddField(
model_name='productinordermodel',
name='image',
field=models.CharField(blank=True, default=None, max_length=128, null=True, verbose_name='Фото'),
),
]
| [
"win21g@mail.ru"
] | win21g@mail.ru |
41b624dfe2a84d1fa0848874a189accdc719f795 | c61bcb0732a1c92bbed4195573e68393400c9fb7 | /suorganizer/suorganizer/settings.py | 3dac5bc8cb21252fd0fb73a1d3018c49f0cd77d6 | [] | no_license | paulhendricks/django-unleashed | ba181454a644d53a555adf814769965bf7a2fded | 209bb4f42e8d6856ff760f46f4338834d96d711d | refs/heads/master | 2021-01-20T18:36:33.677366 | 2016-07-24T23:11:48 | 2016-07-24T23:11:48 | 61,044,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | """
Django settings for suorganizer project.
Generated by 'django-admin startproject' using Django 1.9.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1z#y4!g0b%!3x+kt#nk0#0q$2!40xw-0%w_pec$7$^yow$)9mj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'organizer',
'blog'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'suorganizer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'suorganizer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"paul.hendricks.2013@owu.edu"
] | paul.hendricks.2013@owu.edu |
cc23921f539f87f2abaf47cf8abbe9bab1429e24 | c8f10dd7dbb1a4cf2e22f5fc1cef6affa68013f9 | /myproject/crm/mixins.py | 64dd03a65e19c9dd476331f195ed2c36fd15df26 | [] | no_license | olivx/jc-challenge | 72f016d47c31fa7c7d8c57222eb60861dbc397ef | 55e8c24231605dcaec22f0d24d133e1702daa0c5 | refs/heads/master | 2021-01-12T09:19:17.059412 | 2017-02-13T03:06:24 | 2017-02-13T03:06:24 | 81,324,849 | 0 | 0 | null | 2017-02-13T03:03:19 | 2017-02-08T11:52:31 | Python | UTF-8 | Python | false | false | 244 | py | # -*- coding: utf-8 -*-
class CounterMixin(object):
def get_context_data(self, **kwargs):
context = super(CounterMixin, self).get_context_data(**kwargs)
context['count'] = self.get_queryset().count()
return context
| [
"rg3915@yahoo.com.br"
] | rg3915@yahoo.com.br |
0fa45dc47e6ff8104a20a15bfe36e559a5a7764e | d138deda43e36f6c79c5e3a9ef1cc62c6a92e881 | /python/paddle/amp/auto_cast.py | 441bc31b93684f94fd1dc36183679f493c03ada0 | [
"Apache-2.0"
] | permissive | seiriosPlus/Paddle | 51afd6f5c85c3ce41dd72953ee659d1539c19f90 | 9602a182b2a4979247c09df1ec283fc39cb4a981 | refs/heads/develop | 2021-08-16T16:05:10.848535 | 2020-12-27T15:15:19 | 2020-12-27T15:15:19 | 123,257,829 | 2 | 0 | Apache-2.0 | 2019-12-10T08:22:01 | 2018-02-28T08:57:42 | C++ | UTF-8 | Python | false | false | 2,562 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.dygraph.amp import amp_guard
__all__ = ['auto_cast']
def auto_cast(enable=True, custom_white_list=None, custom_black_list=None):
"""
Create a context which enables auto-mixed-precision(AMP) of operators executed in dynamic graph mode.
If enabled, the input data type (float32 or float16) of each operator is decided
by autocast algorithm for better performance.
Commonly, it is used together with `GradScaler` to achieve Auto-Mixed-Precision in
imperative mode.
Args:
enable(bool, optional): Enable auto-mixed-precision or not. Default is True.
custom_white_list(set|list, optional): The custom white_list. It's the set of ops that support
fp16 calculation and are considered numerically-safe and performance-critical. These ops
will be converted to fp16.
custom_black_list(set|list, optional): The custom black_list. The set of ops that support fp16
calculation and are considered numerically-dangerous and whose effects may also be
observed in downstream ops. These ops will not be converted to fp16.
Examples:
.. code-block:: python
import paddle
conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = conv2d(data)
print(conv.dtype) # FP16
with paddle.amp.auto_cast(enable=False):
conv = conv2d(data)
print(conv.dtype) # FP32
with paddle.amp.auto_cast(custom_black_list={'conv2d'}):
conv = conv2d(data)
print(conv.dtype) # FP32
a = paddle.rand([2,3])
b = paddle.rand([2,3])
with paddle.amp.auto_cast(custom_white_list={'elementwise_add'}):
c = a + b
print(c.dtype) # FP16
"""
return amp_guard(enable, custom_white_list, custom_black_list)
| [
"noreply@github.com"
] | seiriosPlus.noreply@github.com |
b991c27bef8a290364d95bb429d91db56f260232 | 8dffff5ff7f2645a50fd9846198e12e4c96a91da | /32-gcf.py | 40180be85f2c2280d2e4c6ac1c55700e901f6693 | [] | no_license | akshaypawar2508/Coderbyte-pythonSol | b233c5ee0c34e0413a26b24b423dae45342b9ade | 5c7d2028fe09fd02aad7808f88abc40fdea0f81e | refs/heads/master | 2022-01-03T09:44:18.635060 | 2014-07-31T13:32:08 | 2014-07-31T13:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | def Division(num1,num2):
while num2 != 0:
num1, num2 = num2, num1%num2
return num1
# keep this function call here
# to see how to enter arguments in Python scroll down
print Division(raw_input())
| [
"xzhu15@illinois.edu"
] | xzhu15@illinois.edu |
3de488bf4eb42746d03ff642d52f553da3b0a0a9 | 38828c16d3f6f466fe416067a099e139ba85a441 | /imageupload/migrations/0008_auto_20181011_1407.py | 677150fe5f7c6d89d7755a39b13514f4ee8858fc | [] | no_license | finebrush/takeatrips | 20c46329af0135f1bc3773a179520f78d042fc53 | 8641a669f3daca646e915cd82a69d5d61ee7ab3d | refs/heads/master | 2022-12-30T08:21:17.191846 | 2018-11-07T15:29:23 | 2018-11-07T15:29:23 | 155,531,775 | 0 | 0 | null | 2022-12-08T02:59:00 | 2018-10-31T09:33:15 | JavaScript | UTF-8 | Python | false | false | 973 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2018-10-11 05:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('imageupload', '0007_auto_20181011_1404'),
]
operations = [
migrations.AlterField(
model_name='uploadedimage',
name='bs_thename',
field=models.CharField(default='지역명', max_length=255, verbose_name='location name'),
),
migrations.AlterField(
model_name='uploadedimage',
name='bs_title',
field=models.CharField(default='소개 타이틀', max_length=255, verbose_name='Title of intro image'),
),
migrations.AlterField(
model_name='uploadedimage',
name='bs_writer',
field=models.CharField(default='소개 작가', max_length=255, verbose_name='writer of intro image'),
),
]
| [
"finebrush.mlab@gmail.com"
] | finebrush.mlab@gmail.com |
a348cd18fff127c70b3192663bfbcd78170a7dcf | 22d368661afd1ba00378d9da8eacadb86e2d4f95 | /vk/types/responses/__init__.py | 965c47e5089b6c9feb0aa36b023c7ff21b364cf9 | [
"MIT"
] | permissive | yilbegan/vk.py | e5e0887fde758e12577b394cd2636c48a5dc74be | 128029969edb57806b1d3d13a0a43613bc33abd3 | refs/heads/master | 2020-07-08T08:48:13.334133 | 2019-08-21T15:50:17 | 2019-08-21T15:50:17 | 203,623,364 | 3 | 1 | MIT | 2019-08-21T16:28:03 | 2019-08-21T16:28:03 | null | UTF-8 | Python | false | false | 358 | py | from . import others
from . import account
from . import apps
from . import appwidgets
from . import auth
from . import board
from . import database
from . import docs
from . import fave
from . import friends
from . import gift
from . import groups
from . import leadforms
from . import leads
from . import likes
from . import market
from . import messages
| [
"botyavs@gmail.com"
] | botyavs@gmail.com |
ae5aea420cf4046d5d1af7d4f13928738ec44541 | fdec477002fb0c5f013faf369d2a1e782172a1d6 | /shop/mainapp/api/api_views.py | 1b4b14641b6fdb7f493b43dfb8c352a846ffacea | [] | no_license | aimiranarzhigitova/API_projects | 19fb416479e5a76dab760f38621e643e2db609cb | 8256cc1bc8dc939453c61a39215e89dbd96fecb1 | refs/heads/master | 2023-05-16T08:52:51.209458 | 2021-06-06T09:44:53 | 2021-06-06T09:44:53 | 374,322,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | from collections import OrderedDict
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.generics import ListAPIView, ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.filters import SearchFilter
from rest_framework.pagination import PageNumberPagination
from .serializers import RegisterSerializer, UserSerializer, CategorySerializer, BaseProductSerializer, CustomerSerializer, CartProductSerializer, CartSerializers, OrderSerializer
from ..models import Category, Product, Customer, CartProduct, Cart, Order
from knox.models import AuthToken
# Register API
class RegisterAPI(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer (user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class ProductPagination(PageNumberPagination):
page_size = 50
page_size_query_param = 'page_size'
max_page_size = 60
def get_paginated_response(self, data):
return Response(OrderedDict([
('objects_count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('items', data)
]))
class CategoryListApiView(ListCreateAPIView):
serializer_class = CategorySerializer
queryset = Category.objects.all()
class CategoryApiView(RetrieveUpdateDestroyAPIView):
serializer_class = CategorySerializer
queryset = Category.objects.all()
class ProductListApiView(ListCreateAPIView):
serializer_class = BaseProductSerializer
pagination_class = ProductPagination
queryset = Product.objects.all()
filter_backends = [SearchFilter]
search_fields = ['ip']
class ProductDetailApiView(RetrieveUpdateDestroyAPIView):
serializer_class = BaseProductSerializer
queryset = Product.objects.all()
class CustomersListApiView(ListAPIView):
serializer_class = CustomerSerializer
queryset = Customer.objects.all()
class CartProductListApiView(ListAPIView):
serializer_class = CartProductSerializer
queryset = CartProduct.objects.all()
class CartListApiView(ListAPIView):
serializer_class = CartSerializers
queryset = Cart.objects.all()
class OrderListApiView(ListAPIView):
serializer_class = OrderSerializer
queryset = Order.objects.all() | [
"aymira.narzhigitova@gmail.com"
] | aymira.narzhigitova@gmail.com |
a1f8521f2ec491413388a76ac4552bbd48a926e7 | 61f9553eedc2ec936ea87f06da5b986091e3b8ff | /workspace/buildout-cache/eggs/Products.PloneHotfix20130618-1.1-py2.7.egg/Products/PloneHotfix20130618/dataitems.py | 36af227651b6d4f537f0310c48bc0e2e8c1b648b | [] | no_license | gruhter/gso | 47880b055455cc99d63eec72498048c857e7831b | c0eb949f8a06aab6b97329d51a6d046e2fc0a653 | refs/heads/master | 2016-09-01T18:28:05.589620 | 2015-05-14T19:38:18 | 2015-05-14T19:38:18 | 35,579,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | from OFS.CopySupport import CopyContainer
CopyContainer.cb_dataItems__roles__ = () | [
"gso@abv.bg"
] | gso@abv.bg |
efe00f1898d63c1220007f55cbf52362ae1563d3 | e43906683d87683705670655bc185d113b797f9c | /spectrumFit/apr2018dijetgamma.py | 6d4cf1cd6b365cac3b63ca8791c060ce063d51bd | [] | no_license | Yvonne-Ng/GP | 1fcba24faa868c86bee71da26386600e94d179d9 | 7dba2626fd417d3b6e432160ed49f09980b59d1e | refs/heads/master | 2020-03-11T14:22:32.271495 | 2018-09-11T13:29:03 | 2018-09-11T13:29:03 | 130,051,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | from runFunctions import spectrumGlobalFit
if __name__=="__main__":
#-----------a template config file -------#
config={#-----Title
"title": "TrijetBtagged1",
"useScaled": False,
#-----fit range
"xMinFit": 300,
"xMaxFit": 1500,
"xMinGP": 300,
"xMaxGP": 1500,
#-----Spectrum file input
"dataFile": "/lustre/SCRATCH/atlas/ywng/WorkSpace/r21/gp-toys/data/all/data/dijetgamma_mjj_g150_2j25_inclusive.h5",
"dataFileTDir": "",
"dataFileHist": "background_mjj_var",
#------put some placeholder file here
"officialFitFile":"/lustre/SCRATCH/atlas/ywng/WorkSpace/r21/gp-toys/data/all/Step1_SearchPhase_Zprime_mjj_var.h5",
#-----Fit function
"fitFunction": 0, #0: UA2; 1: 4 params
#initial parameter for fitting
"initParam": (7438.410338225633, 0.24951051678754332, 102.55526846085624, -271.9876795034993),
#the range of the parameter value within which it is throwing from
"initFitParam": [10000,10,100,300], #None(default): (9.6, -1.67, 56.87,-75.877 )
# the allowed range of variable values
"initRange": [(2000, 8000.),(-10, 10),(-100, 600.),(-500, 300.)] } #None(default): [(-100000, 1000000.),(-100., 100.),(-100., 100.),(-100., 100.)]
spectrumGlobalFit.spectrumGlobalFit(config)
| [
"yvonne.ng@cern.ch"
] | yvonne.ng@cern.ch |
60bafb492156b02c296679d940270394ce35ffce | 683a90831bb591526c6786e5f8c4a2b34852cf99 | /HackerRank/Interview/Strings/2_AlternatingCharacters.py | a5e68da537d3ea7f3412c42110cbce7e63191e9a | [] | no_license | dbetm/cp-history | 32a3ee0b19236a759ce0a6b9ba1b72ceb56b194d | 0ceeba631525c4776c21d547e5ab101f10c4fe70 | refs/heads/main | 2023-04-29T19:36:31.180763 | 2023-04-15T18:03:19 | 2023-04-15T18:03:19 | 164,786,056 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # https://www.hackerrank.com/challenges/alternating-characters/problem
# Tag(s): Greedy, strings
def alternatingCharacters(s):
flag_1 = 'A'
flag_2 = 'B'
x = 0
y = 0
n = len(s)
for i in range(n):
if s[i] == flag_1:
flag_1 = ('B' if flag_1 == 'A' else 'A')
else:
x += 1
if s[i] == flag_2:
flag_2 = ('B' if flag_2 == 'A' else 'A')
else:
y += 1
return min(x, y)
if __name__ == '__main__':
T = int(input())
for _ in range(T):
s = input()
print(alternatingCharacters(s))
| [
"davbetm@gmail.com"
] | davbetm@gmail.com |
260d7c448653d6a14a06b38e37e97db6a29a0c48 | c1db9d9bca3c908d5c30f3c02e7bc7bb2dc5b892 | /task/models.py | 1c1be9e97539791fc75e151a7adcf115623b147f | [] | no_license | rashidhamid139/Practice | 00e3aa1f3caa2648d8f62b1791687dd1313608ad | dcfe96a124687ec87545e34fb7021ef2d6e13bdb | refs/heads/master | 2023-03-17T13:27:13.719717 | 2021-03-04T16:28:56 | 2021-03-04T16:28:56 | 278,792,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | from django.db import models
# Create your models here.
class Task(models.Model):
title = models.CharField(max_length=255)
date = models.DateTimeField(auto_now_add=True)
completed = models.BooleanField(default=False)
class Meta:
ordering = ['completed', 'date']
def __str__(self):
return self.title
| [
"rashidhamid139@gmail.com"
] | rashidhamid139@gmail.com |
6398b36c28197d9034cac3de143b8dbaa16bb367 | d24a6e0be809ae3af8bc8daa6dacfc1789d38a84 | /other_contests/SMTB2019/A.py | aa4532f9e5732cb8c1cbfa619c0ff436ae54baa8 | [] | no_license | k-harada/AtCoder | 5d8004ce41c5fc6ad6ef90480ef847eaddeea179 | 02b0a6c92a05c6858b87cb22623ce877c1039f8f | refs/heads/master | 2023-08-21T18:55:53.644331 | 2023-08-05T14:21:25 | 2023-08-05T14:21:25 | 184,904,794 | 9 | 0 | null | 2023-05-22T16:29:18 | 2019-05-04T14:24:18 | Python | UTF-8 | Python | false | false | 367 | py | def solve(m1, d1, m2, d2):
if m1 == m2:
return 0
else:
return 1
def main():
m1, d1 = map(int, input().split())
m2, d2 = map(int, input().split())
res = solve(m1, d1, m2, d2)
print(res)
def test():
assert solve(11, 16, 11, 17) == 0
assert solve(11, 30, 12, 1) == 1
if __name__ == "__main__":
test()
main()
| [
"cashfeg@gmail.com"
] | cashfeg@gmail.com |
c5e22cbd61727df2534eb81db6c450a3d6f869f5 | 6564b596ec27e67ee1b48377da1e7cee59cdcfe9 | /shenfun/forms/operators.py | c53c62765757615f7146f02a7eeef8cc364b704c | [
"BSD-2-Clause"
] | permissive | GeraintPratten/shenfun | 077b13d904fd6bf6880c412f74300d78494bee11 | d92eb058c9969175da19b23926fb80148cf92ace | refs/heads/master | 2023-07-04T13:46:27.969149 | 2021-08-10T11:48:32 | 2021-08-10T11:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,644 | py | """
This module contains the implementation of operators acting on arguments.
"""
import numpy as np
import sympy as sp
import copy
from .arguments import Expr, BasisFunction, Function, Array
__all__ = ('div', 'grad', 'Dx', 'curl')
#pylint: disable=protected-access
def _expr_from_vector_components(comp, basis):
"""Return Expr composed of vector components `comp`
"""
terms, scales, indices = [], [], []
for i in range(len(comp)):
terms += comp[i]._terms
scales += comp[i]._scales
indices += comp[i]._indices
return Expr(basis, terms, scales, indices)
def div(test):
"""Return div(test)
Parameters
----------
test: Expr or BasisFunction
Must be rank > 0 (cannot take divergence of scalar)
"""
assert isinstance(test, (Expr, BasisFunction))
if isinstance(test, BasisFunction):
test = Expr(test)
ndim = test.dimensions
coors = test.function_space().coors
if coors.is_cartesian:
if ndim == 1: # 1D
v = np.array(test.terms())
v += 1
test._terms = v.tolist()
return test
else:
if test.num_components() == ndim**2: # second rank tensor
dv = []
for i in range(ndim):
dv.append(div(test[i]))
return _expr_from_vector_components(dv, test.basis())
else: # vector
d = Dx(test[0], 0, 1)
for i in range(1, ndim):
d += Dx(test[i], i, 1)
d.simplify()
return d
else:
if ndim == 1: # 1D
sg = coors.get_sqrt_det_g()
d = Dx(test*sg, 0, 1)*(1/sg)
return d
else:
if test.num_components() == ndim**2:
ct = coors.get_christoffel_second()
d = []
for i in range(ndim):
di = []
for j in range(ndim):
Sij = test[i][j]
di.append(Dx(Sij, j, 1))
for k in range(ndim):
Skj = test[k][j]
if not ct[i, j, k] == 0:
di.append(Skj*ct[i, j, k])
if not ct[k, k, j] == 0:
di.append(Sij*ct[k, k, j])
dj = di[0]
for j in range(1, len(di)):
dj += di[j]
dj.simplify()
d.append(dj)
return _expr_from_vector_components(d, test.basis())
else:
sg = coors.get_sqrt_det_g()
d = Dx(test[0]*sg, 0, 1)*(1/sg)
for i in range(1, ndim):
d += Dx(test[i]*sg, i, 1)*(1/sg)
d.simplify()
return d
def grad(test):
"""Return grad(test)
Parameters
----------
test: Expr or BasisFunction
Note
----
Increases the rank of Expr by one
"""
assert isinstance(test, (Expr, BasisFunction))
if isinstance(test, BasisFunction):
test = Expr(test)
ndim = test.dimensions
coors = test.function_space().coors
if coors.is_cartesian:
d = []
if test.num_components() > 1:
for i in range(test.num_components()):
for j in range(ndim):
d.append(Dx(test[i], j, 1))
else:
for i in range(ndim):
d.append(Dx(test, i, 1))
else:
gt = coors.get_contravariant_metric_tensor()
if test.num_components() > 1:
ct = coors.get_christoffel_second()
d = []
for i in range(ndim):
vi = test[i]
for j in range(ndim):
dj = []
for l in range(ndim):
sc = gt[l, j]
if not sc == 0:
dj.append(Dx(vi, l, 1)*sc)
for k in range(ndim):
if not sc*ct[i, k, l] == 0:
dj.append(test[k]*(sc*ct[i, k, l]))
di = dj[0]
for m in range(1, len(dj)):
di += dj[m]
d.append(di)
else:
d = []
for i in range(ndim):
dj = []
for j in range(ndim):
sc = gt[j, i]
if not sc == 0:
dj.append(Dx(test, j, 1)*sc)
di = dj[0]
for j in range(1, len(dj)):
di += dj[j]
d.append(di)
dv = _expr_from_vector_components(d, test.basis())
dv.simplify()
return dv
def Dx(test, x, k=1):
"""Return k'th order partial derivative in direction x
Parameters
----------
test: Expr or BasisFunction
x: int
axis to take derivative over
k: int
Number of derivatives
"""
assert isinstance(test, (Expr, BasisFunction))
if k > 1:
for _ in range(k):
test = Dx(test, x, 1)
return test
if isinstance(test, BasisFunction):
test = Expr(test)
test = copy.copy(test)
coors = test.function_space().coors
if coors.is_cartesian:
v = np.array(test.terms())
v[..., x] += k
test._terms = v.tolist()
else:
assert test.expr_rank() < 1, 'Cannot (yet) take derivative of tensor in curvilinear coordinates'
psi = coors.psi
v = copy.deepcopy(test.terms())
sc = copy.deepcopy(test.scales())
ind = copy.deepcopy(test.indices())
num_terms = test.num_terms()
for i in range(test.num_components()):
for j in range(num_terms[i]):
sc0 = sp.simplify(sp.diff(sc[i][j], psi[x], k), measure=coors._measure)
sc0 = coors.refine(sc0)
if not sc0 == 0:
v[i].append(copy.deepcopy(v[i][j]))
sc[i].append(sc0)
ind[i].append(ind[i][j])
v[i][j][x] += k
test._terms = v
test._scales = sc
test._indices = ind
return test
def curl(test):
"""Return curl of test
Parameters
----------
test: Expr or BasisFunction
"""
assert isinstance(test, (Expr, BasisFunction))
if isinstance(test, BasisFunction):
test = Expr(test)
test = copy.copy(test)
assert test.expr_rank() > 0
assert test.num_components() == test.dimensions
coors = test.function_space().coors
if coors.is_cartesian:
if test.dimensions == 3:
w0 = Dx(test[2], 1, 1) - Dx(test[1], 2, 1)
w1 = Dx(test[0], 2, 1) - Dx(test[2], 0, 1)
w2 = Dx(test[1], 0, 1) - Dx(test[0], 1, 1)
test._terms = w0.terms()+w1.terms()+w2.terms()
test._scales = w0.scales()+w1.scales()+w2.scales()
test._indices = w0.indices()+w1.indices()+w2.indices()
else:
assert test.dimensions == 2
test = Dx(test[1], 0, 1) - Dx(test[0], 1, 1)
else:
assert test.expr_rank() < 2, 'Cannot (yet) take curl of higher order tensor in curvilinear coordinates'
hi = coors.hi
sg = coors.get_sqrt_det_g()
if coors.is_orthogonal:
if test.dimensions == 3:
w0 = (Dx(test[2]*hi[2]**2, 1, 1) - Dx(test[1]*hi[1]**2, 2, 1))*(1/sg)
w1 = (Dx(test[0]*hi[0]**2, 2, 1) - Dx(test[2]*hi[2]**2, 0, 1))*(1/sg)
w2 = (Dx(test[1]*hi[1]**2, 0, 1) - Dx(test[0]*hi[0]**2, 1, 1))*(1/sg)
test = _expr_from_vector_components([w0, w1, w2], test.basis())
else:
assert test.dimensions == 2
test = (Dx(test[1]*hi[1]**2, 0, 1) - Dx(test[0]*hi[0]**2, 1, 1))*(1/sg)
else:
g = coors.get_covariant_metric_tensor()
if test.dimensions == 3:
w0 = np.sum([(Dx(test[i]*g[2, i], 1, 1) - Dx(test[i]*g[1, i], 2, 1))*(1/sg) for i in range(3)])
w1 = np.sum([(Dx(test[i]*g[0, i], 2, 1) - Dx(test[i]*g[2, i], 0, 1))*(1/sg) for i in range(3)])
w2 = np.sum([(Dx(test[i]*g[1, i], 0, 1) - Dx(test[i]*g[0, i], 1, 1))*(1/sg) for i in range(3)])
# This is an alternative (more complicated way):
#gt = coors.get_contravariant_metric_tensor()
#ww0 = grad(g[0, 0]*test[0] + g[0, 1]*test[1] + g[0, 2]*test[2])
#ww1 = grad(g[1, 0]*test[0] + g[1, 1]*test[1] + g[1, 2]*test[2])
#ww2 = grad(g[2, 0]*test[0] + g[2, 1]*test[1] + g[2, 2]*test[2])
#d0 = sg*(ww0[1]*gt[0, 2] + ww1[1]*gt[1, 2] + ww2[1]*gt[2, 2] - ww0[2]*gt[0, 1] - ww1[2]*gt[1, 1] - ww2[2]*gt[2, 1])
#d1 = sg*(ww0[2]*gt[0, 0] + ww1[2]*gt[1, 0] + ww2[2]*gt[2, 0] - ww0[0]*gt[0, 2] - ww1[0]*gt[1, 2] - ww2[0]*gt[2, 2])
#d2 = sg*(ww0[0]*gt[0, 1] + ww1[0]*gt[1, 1] + ww2[0]*gt[2, 1] - ww0[1]*gt[0, 0] - ww1[1]*gt[1, 0] - ww2[1]*gt[2, 0])
#w0 = d0*gt[0, 0] + d1*gt[1, 0] + d2*gt[2, 0]
#w1 = d0*gt[0, 1] + d1*gt[1, 1] + d2*gt[2, 1]
#w2 = d0*gt[0, 2] + d1*gt[1, 2] + d2*gt[2, 2]
test = _expr_from_vector_components([w0, w1, w2], test.basis())
else:
assert test.dimensions == 2
test = np.sum([(Dx(test[i]*g[1, i], 0, 1) - Dx(test[i]*g[0, i], 1, 1))*(1/sg) for i in range(2)])
test.simplify()
return test
| [
"mikaem@math.uio.no"
] | mikaem@math.uio.no |
364c7fbdbdd853836d7faa2d48f0d96d450b696b | eec9299fd80ed057585e84e0f0e5b4d82b1ed9a7 | /comment/migrations/0002_auto_20181126_2237.py | f43103e474077979c288a083e9963fdafb9ec8e6 | [] | no_license | aimiliya/mysite | f51967f35c0297be7051d9f485dd0e59b8bb60c2 | b8e3b639de6c89fb8e6af7ee0092ee744a75be41 | refs/heads/master | 2020-04-08T19:06:36.539404 | 2018-12-01T08:05:18 | 2018-12-01T08:05:18 | 159,640,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # Generated by Django 2.1.3 on 2018-11-26 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comment', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='coment',
options={'ordering': ['-comment_time']},
),
migrations.AddField(
model_name='coment',
name='parent_id',
field=models.IntegerField(default=0),
),
]
| [
"951416267@qq.com"
] | 951416267@qq.com |
1b8186b33b4e154abe2da78ebfd54ce03d98b9f8 | 9433ce01c6e2906c694b6f0956a4640e1872d4d2 | /src/test/python/wdbd/test_girlant_down.py | e562510bb938cca541c18a206e5bb1b08ea78b43 | [] | no_license | shwdbd/python_codepool | fcd7950fc1339994186461ae18c34cee238938ee | 92a4fb61d060f9a545499b6b7f99a4dc211d5009 | refs/heads/master | 2023-02-20T19:49:23.677824 | 2022-06-15T08:53:51 | 2022-06-15T08:53:51 | 209,431,254 | 0 | 1 | null | 2023-02-15T21:58:53 | 2019-09-19T00:56:03 | Python | UTF-8 | Python | false | false | 2,250 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : test_girlant_down.py
@Time : 2020/02/03 19:50:02
@Author : Jeffrey Wang
@Version : 1.0
@Contact : shwangjj@163.com
@Desc : 影集下载功能单元测试
'''
import unittest
import wdbd.codepool.ant.girl_picture_ant as girl_ant
import wdbd.codepool.ant.girl_ci as ci
import os
import shutil
class Test_Download_SingleListPage(unittest.TestCase):
"""测试 下载单个列表页面
"""
url = 'https://www.meitulu.com/t/1386/' # 共5个
down_dir = r'temp_files\girl\\'
def tearDown(self):
if os.path.exists(self.down_dir):
shutil.rmtree(self.down_dir)
# os.removedir(self.down_dir)
return super().tearDown()
# 耗时太长,谨慎测试
def test_success(self):
"""
测试下载整个集合的情况
"""
count_of_set = 5
r = girl_ant.download_single_listpage(self.url, self.down_dir)
self.assertEqual(count_of_set, r)
# 检查下载的目录数量
self.assertEqual(count_of_set, len(os.listdir(self.down_dir)))
def test_fail(self):
"""测试 下载失败的情况
"""
err_url = 'xxxx'
err_dir = 'z:\\xxx\\'
# 测试,文件夹不存在的情况
r = girl_ant.download_single_listpage(self.url, err_dir)
self.assertEqual(0, r)
# 测试,url不存在的情况
r = girl_ant.download_single_listpage(err_url, self.down_dir)
self.assertEqual(0, r)
class Test_Download_SinglePage(unittest.TestCase):
"""测试下载单个影集
"""
down_dir = ci.DOWN_DIR
def tearDown(self):
if os.path.exists(self.down_dir):
shutil.rmtree(self.down_dir)
return super().tearDown()
def test_download_single(self):
"""测试 单个页面下载
"""
url = 'https://www.meitulu.com/item/15267.html'
name = '[YOUWU尤物馆] VOL.099 木木hanna - 性感黑丝吊袜写真'
r = girl_ant.download_single(url)
self.assertEqual(38, r) # 下载文件数
dw_dir = ci.DOWN_DIR + name + '\\'
self.assertTrue(os.path.exists(dw_dir)) # 生成的文件夹
| [
"shwangjj@163.com"
] | shwangjj@163.com |
c62a44507b5b34f7b2ce5401b569a0453dfa4af0 | b0b8d735473c79bae43d939a605bc60c07137b46 | /devices/readers.py | 5a9e606339baaa6e4646113c8ba67d05ebc78fee | [] | no_license | frnhr/plc_lines | 39e965d7481bde72c04bf2091497dfb0ec49198e | 60366cb5fd3b06d1558da921fe301fdb7a5d017e | refs/heads/master | 2022-10-05T08:27:23.669929 | 2020-05-19T13:12:31 | 2020-05-19T13:12:31 | 243,630,119 | 0 | 0 | null | 2022-09-30T01:21:53 | 2020-02-27T22:31:06 | Python | UTF-8 | Python | false | false | 1,668 | py | from __future__ import annotations
import json
from typing import Optional
from django.conf import settings
from pylogix.eip import Response, PLC
class ReaderError(RuntimeError):
"""Failed to read PLC device."""
SUCCESS_STATUSES = ("Success", "Partial transfer")
class ReaderBase:
def __init__(self, ip, variable) -> None:
self.ip = ip
self.variable = variable
def read(self) -> Optional[str]:
try:
response = self._read()
except NotImplementedError:
raise
except Exception as e:
raise ReaderError() from e
if response.Status not in SUCCESS_STATUSES:
raise ReaderError(response.Status)
# TODO Do we need to continue reading if get 0x06 Partial transfer?
return str(response.Value) if response is not None else None
def _read(self) -> Response:
raise NotImplementedError()
class FakeReader(ReaderBase):
"""
This is a dummy PLC reader, used for development (since the developer
has zero experience with PLCs, let alone having one handy for tinkering).
Edit FAKE_PLC.json file to change the value which is read.
"""
def _read(self) -> Response:
with open(settings.FAKE_READER_FILE) as fake_reader_file:
fake_plcs = json.loads(fake_reader_file.read())
response_kwargs = fake_plcs[self.ip]
return Response(**response_kwargs)
class PLCReader(ReaderBase):
"""Real PLC reader."""
def _read(self) -> Response:
with PLC() as comm:
comm.Micro800 = True
comm.IPAddress = self.ip
return comm.Read(self.variable)
| [
"fran.hrzenjak@gmail.com"
] | fran.hrzenjak@gmail.com |
eeb25bb99a16c36f21171b4e54186e08259a1435 | 7fdf37c8bb0fe575a28a996ccff08445777d7a59 | /image_server/wx_app/migrations/0014_img_fsize.py | d7fdb6906bba047ed956c7a82c573a9bf51fdede | [] | no_license | bushitan/image_str | 8285884b3aef06935023afa69d49bfc3baecaf2a | dca6f38cffe1f1d1c72a3a098bc4b106a4f5914d | refs/heads/master | 2020-05-21T19:19:39.403015 | 2017-07-20T08:38:31 | 2017-07-20T08:38:31 | 62,543,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wx_app', '0013_img_user_id'),
]
operations = [
migrations.AddField(
model_name='img',
name='fsize',
field=models.IntegerField(default=0, null=True, verbose_name=b'\xe6\x96\x87\xe4\xbb\xb6\xe5\xa4\xa7\xe5\xb0\x8f', blank=True),
),
]
| [
"373514952@qq.com"
] | 373514952@qq.com |
b6faa6b647be48cc1f8a41e6d699d2e4bcdb91c4 | 1897bb1a06572018eee4ef30b56e5e12425a4085 | /12306/1.29scrapy中的去重/project29/project29/spiders/custom.py | 8a63c14e438f4d34c404792c4fee3a83aaf2c93f | [] | no_license | xiaozhiqi2000/spider_advanced | 3f16e140b2f92206ad1ac0298ee0a94f57ad067d | 0a32fcb1fd409ae1bf686a7ed9809c2ee277dec7 | refs/heads/master | 2020-05-27T09:58:06.127519 | 2016-10-17T07:02:37 | 2016-10-17T07:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | # -*- coding: utf-8 -*-
import scrapy
import json
from scrapy.http.request import Request
class CustomSpider(scrapy.Spider):
name = 'custom'
start_urls = ['https://kyfw.12306.cn/otn/userCommon/allProvince']
custom_settings = {
'DUPEFILTER_DEBUG': True,
# 'DUPEFILTER_CLASS': "project29.custom_filter.CustomURLFilter"
}
def parse_e(self, response):
self.logger.info(response.url)
self.logger.info(response.meta)
def parse(self, response):
self.logger.info("--------------------------")
j = json.loads(response.body)
for prov in j["data"]:
self.logger.info(prov["chineseName"])
yield Request(url='https://www.baidu.com/s?wd=1', callback = self.parse_e)
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e)
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e)
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e, meta = {"timestamp":"1"})
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e, meta = {"timestamp":"2"})
yield Request(url='https://www.baidu.com/s?wd=3', callback = self.parse_e, meta = {"timestamp":"2"})
| [
"xiaozhiqi2015@live.com"
] | xiaozhiqi2015@live.com |
03c6825d137329f515bb5c6d91bfd057aefa5a1d | 1e08e2c0a1cd9677b35347b9aedd579e8676ee41 | /blog/migrations/0004_blogtagindexpage.py | da35bd1521b74920e4add159baf03d570b2e6dcf | [
"MIT"
] | permissive | tbrlpld/wagtail-gatsby-blog-backend | 023eb4db9166cc860990bbf0414712932508dfa1 | f68f1d9e2577d5271960f142bf37dcbcdac6767a | refs/heads/master | 2022-11-30T11:01:48.115493 | 2020-08-18T17:40:46 | 2020-08-18T17:40:46 | 284,381,118 | 0 | 0 | MIT | 2020-08-18T17:47:24 | 2020-08-02T03:14:21 | Python | UTF-8 | Python | false | false | 752 | py | # Generated by Django 2.2.13 on 2020-06-12 01:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0045_assign_unlock_grouppagepermission'),
('blog', '0003_auto_20200612_0111'),
]
operations = [
migrations.CreateModel(
name='BlogTagIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| [
"tibor@lpld.io"
] | tibor@lpld.io |
71263850c4721086a1a743b2e881050df61695dc | caf794c8f43560ef71ba189191d1d8313af3c6ba | /datamanagement/add_generic_dataset.py | 8efe998de5033214280312401e780df473631eeb | [] | no_license | sanansakura/sisyphus | 007ac7f23edb2bb84ebeb32f6af60796df134b75 | eb82e2e141e896bdc5a980c44f908e4f68f68696 | refs/heads/master | 2021-04-24T02:57:15.308297 | 2020-03-24T21:49:26 | 2020-03-24T21:49:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,632 | py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import sys
import click
import json
import ast
import pandas as pd
from utils.constants import LOGGING_FORMAT
from utils.runtime_args import parse_runtime_args
from dbclients.tantalus import TantalusApi
import datamanagement.templates as templates
logging.basicConfig(format=LOGGING_FORMAT, stream=sys.stderr, level=logging.INFO)
REQUIRED_FIELDS = [
'filepaths',
'sample_id',
'library_id',
'storage_name',
'dataset_name',
'dataset_type',
]
OPTIONAL_FIELDS = [
'tag_name',
'aligner',
'sequence_lane_pks',
'reference_genome'
]
class ListParameter(click.Option):
def type_cast_value(self, ctx, value):
try:
return ast.literal_eval(value)
except:
raise click.BadParameter(value)
@click.group()
def input_type():
pass
@input_type.command()
@click.argument('json_file')
@click.option('--update', is_flag=True)
def json_input(**kwargs):
missing_input = False
#Parse the input json file
try:
with open(kwargs['json_file']) as f:
inputs = json.load(f)
except:
inputs = json.loads(kwargs['json_file'])
#Check that arguments have the right name
for key, val in inputs.iteritems():
if key not in REQUIRED_FIELDS + OPTIONAL_FIELDS:
raise Exception("Unrecognized input for {}".format(key))
#Check if all required arguments are present
for key in REQUIRED_FIELDS:
if key not in inputs:
logging.error("Missing input for {}".format(key))
missing_input = True
if missing_input:
raise Exception("Please add missing inputs")
for key in OPTIONAL_FIELDS:
if key not in inputs:
if key == 'sequence_lane_pks':
inputs[key] = []
else:
inputs[key] = None
inputs["update"] = kwargs['update']
#Call main with these arguments
add_generic_dataset(**inputs)
@input_type.command()
@click.argument('filepaths', nargs=-1)
@click.argument('sample_id', nargs=1)
@click.argument('library_id', nargs=1)
@click.option('--storage_name')
@click.option('--dataset_name')
@click.option('--dataset_type')
@click.option('--tag_name')
@click.option('--aligner')
@click.option('--sequence_lane_pks', cls=ListParameter, default='[]')
@click.option('--reference_genome', type=click.Choice(['HG18', 'HG19']))
@click.option('--update', is_flag=True)
def command_line(**kwargs):
missing_input = False
#Check if all required arguments are present
for key, val in kwargs.iteritems():
if not val and key in REQUIRED_FIELDS:
logging.error("Missing input for {}".format(key))
missing_input = True
if missing_input:
raise Exception("Please add missing inputs")
#Call main with these arguments
add_generic_dataset(**kwargs)
def add_generic_dataset(**kwargs):
tantalus_api = TantalusApi()
file_resource_pks = []
sample = tantalus_api.get(
"sample",
sample_id=kwargs['sample_id']
)
library = tantalus_api.get(
"dna_library",
library_id=kwargs['library_id']
)
#Add the file resource to tantalus
for filepath in kwargs['filepaths']:
logging.info("Adding file resource for {} to Tantalus".format(filepath))
resource, instance = tantalus_api.add_file(
storage_name=kwargs['storage_name'],
filepath=filepath,
update=kwargs['update']
)
file_resource_pks.append(resource["id"])
if "tag_name" in kwargs:
tag = tantalus_api.get("tag", name=kwargs["tag_name"])
tags = [tag["id"]]
else:
tags = []
ref_genome = kwargs.get("reference_genome")
aligner = kwargs.get("aligner")
if "sequence_lane_pks" in kwargs:
sequence_pks = map(str, kwargs["sequence_lane_pks"])
#Add the dataset to tantalus
sequence_dataset = tantalus_api.get_or_create(
"sequence_dataset",
name=kwargs['dataset_name'],
dataset_type=kwargs['dataset_type'],
sample=sample["id"],
library=library["id"],
sequence_lanes=sequence_pks,
file_resources=file_resource_pks,
reference_genome=ref_genome,
aligner=aligner,
tags=tags,
)
logging.info("Succesfully created sequence dataset with ID {}".format(sequence_dataset["id"]))
if __name__=='__main__':
input_type()
| [
"andrew.mcpherson@gmail.com"
] | andrew.mcpherson@gmail.com |
67b11257facdac50c09a31ffdcc0173abaefcf28 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_exhume.py | d04f4905b10af793887ef7eef394520e46e5b7d8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py |
#calss header
class _EXHUME():
def __init__(self,):
self.name = "EXHUME"
self.definitions = [u'to remove a dead body from the ground after it has been buried']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1a2483954aba597c54da8e7a9cd1c48efadc0a79 | 706f239f0df4586221e6a7aac001626ab531c224 | /src/client_libraries/python/dynamics/customerinsights/api/models/measure_metadata_py3.py | 35818f8ac79c086ff065198678dc705906946fb2 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | Global19-atlassian-net/Dynamics365-CustomerInsights-Client-Libraries | 9681d258c649b005a2379d32b23d374695a6fca4 | 0ce81ae25e97c3b8de12b97963a8c765c0248238 | refs/heads/main | 2023-02-28T20:39:33.622885 | 2021-02-09T23:34:38 | 2021-02-09T23:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,925 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MeasureMetadata(Model):
"""Represents metadata for a measure (or KPI).
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar display_name:
:vartype display_name: str
:param name: Gets the unique name of the measure
:type name: str
:param description: Gets the description of the measure.
:type description: str
:param definition:
:type definition: ~dynamics.customerinsights.api.models.MeasureDefinition
:param latest_evaluation:
:type latest_evaluation: ~dynamics.customerinsights.api.models.Evaluation
:param output:
:type output: ~dynamics.customerinsights.api.models.ScalarOutput
:param evaluation_stats:
:type evaluation_stats:
~dynamics.customerinsights.api.models.EvaluationStats
:param error_description:
:type error_description: ~dynamics.customerinsights.api.models.StringInfo
:param sql_validation_stats:
:type sql_validation_stats:
~dynamics.customerinsights.api.models.SqlValidationStats
:param evaluation_history: Gets the evaluation history for the measure.
(not persisted in store)
:type evaluation_history:
list[~dynamics.customerinsights.api.models.Evaluation]
:param output_history: Gets the output history for the measure. (not
persisted in store)
:type output_history:
list[~dynamics.customerinsights.api.models.ScalarOutput]
:ivar version: Gets the version number of this object.
:vartype version: long
:ivar updated_by: Gets the UPN of the user who last updated this record.
:vartype updated_by: str
:ivar updated_utc: Gets the time the object was last updated.
:vartype updated_utc: datetime
:ivar created_by: Gets the email address of the user who created this
record.
:vartype created_by: str
:ivar created_utc: Gets the time the object was initially created.
:vartype created_utc: datetime
:ivar instance_id: Gets the Customer Insights instance id associated with
this object.
:vartype instance_id: str
"""
_validation = {
'display_name': {'readonly': True},
'version': {'readonly': True},
'updated_by': {'readonly': True},
'updated_utc': {'readonly': True},
'created_by': {'readonly': True},
'created_utc': {'readonly': True},
'instance_id': {'readonly': True},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'definition': {'key': 'definition', 'type': 'MeasureDefinition'},
'latest_evaluation': {'key': 'latestEvaluation', 'type': 'Evaluation'},
'output': {'key': 'output', 'type': 'ScalarOutput'},
'evaluation_stats': {'key': 'evaluationStats', 'type': 'EvaluationStats'},
'error_description': {'key': 'errorDescription', 'type': 'StringInfo'},
'sql_validation_stats': {'key': 'sqlValidationStats', 'type': 'SqlValidationStats'},
'evaluation_history': {'key': 'evaluationHistory', 'type': '[Evaluation]'},
'output_history': {'key': 'outputHistory', 'type': '[ScalarOutput]'},
'version': {'key': 'version', 'type': 'long'},
'updated_by': {'key': 'updatedBy', 'type': 'str'},
'updated_utc': {'key': 'updatedUtc', 'type': 'iso-8601'},
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_utc': {'key': 'createdUtc', 'type': 'iso-8601'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
}
def __init__(self, *, name: str=None, description: str=None, definition=None, latest_evaluation=None, output=None, evaluation_stats=None, error_description=None, sql_validation_stats=None, evaluation_history=None, output_history=None, **kwargs) -> None:
super(MeasureMetadata, self).__init__(**kwargs)
self.display_name = None
self.name = name
self.description = description
self.definition = definition
self.latest_evaluation = latest_evaluation
self.output = output
self.evaluation_stats = evaluation_stats
self.error_description = error_description
self.sql_validation_stats = sql_validation_stats
self.evaluation_history = evaluation_history
self.output_history = output_history
self.version = None
self.updated_by = None
self.updated_utc = None
self.created_by = None
self.created_utc = None
self.instance_id = None
| [
"michaelajohnston@mac.com"
] | michaelajohnston@mac.com |
a5201f74952396af8a36123428177ee24f8d8dd1 | c65d9b487df6cdbbe6c4cb773f262ac13270e095 | /engine_modules/corporation/migrations/0007_auto_20151030_1102.py | e9e515d8af2970235652b1329b532c59639fab9e | [] | no_license | Neamar/corporate | 40c254e068f84d59109b25c49a7f613b4b9c7cdc | 3029e2e46087172d7ac187309b771b275446d0ce | refs/heads/master | 2021-06-21T20:26:34.471294 | 2021-03-06T08:40:53 | 2021-03-06T08:40:53 | 15,422,111 | 5 | 0 | null | 2020-07-24T19:38:15 | 2013-12-24T16:57:52 | Python | UTF-8 | Python | false | false | 769 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('corporation', '0006_auto_20150813_1714'),
]
operations = [
migrations.AlterField(
model_name='assetdelta',
name='category',
field=models.CharField(max_length=15, choices=[(b'effect-first', b'Eff. premier'), (b'effect-last', b'Eff. dernier'), (b'effect-crash', b'Eff. crash'), (b'detroit-inc', b'Detroit, Inc.'), (b'sabotage', b'Sabotage'), (b'extraction', b'Extraction'), (b'datasteal', b'Datasteal'), (b'market*bubble', b'Domination/Perte s\xc3\xa8che'), (b'invisible-hand', b'Main Invisible'), (b'votes', b'Votes')]),
),
]
| [
"neamar@neamar.fr"
] | neamar@neamar.fr |
dd0572748eea6b61edb50906f94377efe2355281 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs_with_max_replays_5/interreplay_39_r_3/replay_config.py | 11cfacf3dd3ff4ad4b224792869d3fc7f377c640 | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 pyretic.examples.firewall_for_sts_no_close', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "experiments/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs/interreplay_39_r_3/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
pass_through_whitelisted_messages=True)
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
f52e97d68bc016b621734e17240755ff95ec2b80 | 215fe73df20c3d44214c8693434617210f0aba9e | /barViz.py | c2bc1f3a0d9ef171f11b4e9ce1fb62d58ee4ca9c | [] | no_license | Schuck9/Game_Transition | 1c5ae2e902b6f6ae6ec636143edb377d7c010546 | 44450ddd0161578231d4a340f348c2f8d9dcfb64 | refs/heads/master | 2022-07-18T21:36:00.091202 | 2020-05-19T01:36:39 | 2020-05-19T01:36:39 | 260,609,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | """
A simple implementation of Ultimatum Game visualization
@date: 2020.5.18
@author: Tingyu Mo
"""
import numpy as np
import pandas as pd
import os
import time
import fractions
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
def bar_viz(data_path):
# matplotlib模块绘制直方图
# 读入数据
data = pd.read_excel(data_path)
save_path = os.path.join(os.getcwd(),"T1.jpg")
# 绘制直方图
# print(list(data.p))
data.dropna(subset=['p'], inplace=True)
data.dropna(subset=['q'], inplace=True)
plt.bar([1,3,5,7,9],list(data.p),label="Offer(p)")
plt.bar([2,4,6,8,10],list(data.q),label="Demond(q)")
# 添加x轴和y轴标签
plt.xlabel('mode')
plt.ylabel('Offer Or Demond')
meta_element = np.arange(10)
ax_label = [" ","0.5/1"," "," "," ","0.5"," "," "," "," 1"]
plt.xticks(meta_element,ax_label,fontsize=16)
# 添加标题
plt.legend()
plt.title('RG_D_EF_w0.1_u0.001 ')
# 显示图形
plt.savefig(save_path)
print("Figure has been saved to: ",save_path)
plt.show()
if __name__ == '__main__':
# RecordName ='2020-03-03-09-14-20'
# time_option = "all"
# pq_distribution_viz(RecordName,time_option)
# avg_pq_viz()
data_path ='./Hist.xlsx'
bar_viz(data_path) | [
"noreply@github.com"
] | Schuck9.noreply@github.com |
bc1188fe3e82e135991a580d3245a4232bca1a39 | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/Analysis/LMVis/tcHist.py | 83371f074e9cd421f12f1495b9a26991ead89c65 | [] | no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 1,682 | py | #!/usr/bin/python
##################
# tcHist.py
#
# Copyright David Baddeley, 2009
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
from pylab import *
import scipy as sp
def doTCHist(xvals, yvals, xbins, ybins, sat=1):
h = sp.histogram2d(xvals,yvals,[xbins,ybins])[0]
lh = log10(h + 1).T
#print lh.shape
X,Y = sp.meshgrid(xbins[:-1], ybins[:-1])
c = cm.RdYlGn(sp.minimum(sp.maximum(X/(X + Y), 0),1))
#print c.shape
sc = sp.minimum(sat*lh/lh.max(), 1)
r = c[:,:,:3]
r[:,:,0] = r[:,:,0]*sc
r[:,:,1] = r[:,:,1]*sc
r[:,:,2] = r[:,:,2]*sc
return r
def doInvTCHist(xvals, yvals, xbins, ybins, sat=1):
h = sp.histogram2d(xvals,yvals,[xbins,ybins])[0]
lh = log10(h + 1).T
#print lh.shape
X,Y = sp.meshgrid(xbins[:-1], ybins[:-1])
c = 1 - cm.RdYlGn(sp.minimum(sp.maximum(X/(X + Y), 0),1))
#print c.shape
sc = sp.minimum(sat*lh/lh.max(), 1)
r = c[:,:,:3]
r[:,:,0] = r[:,:,0]*sc
r[:,:,1] = r[:,:,1]*sc
r[:,:,2] = r[:,:,2]*sc
return 1-r
| [
"willi4m@zju.edu.cn"
] | willi4m@zju.edu.cn |
352ccb19b87d55b6408f0744e1b2d22f5f1fa9a8 | 3cf1535ce25f3f0a71bfd5c7697b0efd1a9ce08c | /Experiments/forontiers_jupyter/pipe_utils.py | 40ad6099d3b3d895a5d271f28ead36f985a5adde | [
"MIT"
] | permissive | junhull/Resic | 11ac59a2caf4399822f6280fee41275a7a1fd5a8 | 30b96870713a3dfb356122cb71576dd00be60329 | refs/heads/main | 2023-05-31T11:30:11.469612 | 2021-06-25T17:34:29 | 2021-06-25T17:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,157 | py | import os
from tkinter import filedialog
from tkinter import *
import itertools
import subprocess
from functools import partial
import Processing.genome_3nt as genome_3nt
# TODO note that orchestration is done in functions with clear explanation
def all_genome_pair_combinations():
yield from itertools.combinations("ACGT", 2)
def genome_3nt_factory(from_nt, to_nt):
"""
generates the pre and post 3nt genome processing functions for a certain NT pairing
returns 2 function objects
"""
pre = partial(genome_3nt.pre, nt_replacement=[from_nt, to_nt])
post = partial(genome_3nt.post, nt_replacement=[from_nt, to_nt])
return pre, post
# todo - this is a stub function - need to remove it and replace it with the calling of the real one
def transcriptom_func(one, two):
print("placeholder: transcriptome")
return two, one
def genome_3nt_all_combination_spec():
"""
returns a list of 3-ples of the form ('X_Y',pre,post) where pre and post are the
3nt genome preprocessing and postprocessing functions of X to Y genome mapping
for all combinations of 2 different nucleotides X,Y
"""
three_nt_spec = []
for from_nt, to_nt in all_genome_pair_combinations():
name = "%s_%s" % (from_nt, to_nt)
pre, post = genome_3nt_factory(from_nt, to_nt)
three_nt_spec.append((name, pre, post))
return three_nt_spec
# file selection screen function
def files_selector():
"""
returns the filenames list the user picked in the popup window
"""
root = Tk()
filenames = filedialog.askopenfilenames(initialdir=os.getcwd(), title="Select files",
filetypes=(("all files", "*.*"), ("fastq files", "*.fastq"),
("pileup files", "*.pileup"), ("fasta files", "*.fasta")))
filenames_list = root.tk.splitlist(filenames)
root.destroy()
return list(filenames_list)
def file_selector():
"""
returns the filename the user picked in the popup window
"""
root = Tk()
filename = filedialog.askopenfilename(initialdir=os.getcwd(), title="Select files",
filetypes=(("all files", "*.*"), ("fastq files", "*.fastq"),
("pileup files", "*.pileup"), ("fasta files", "*.fasta")))
root.destroy()
return filename
def folder_selector():
"""
returns the folder the user picked in the popup window
"""
root = Tk()
folder_selected = filedialog.askdirectory()
root.destroy()
return folder_selected
def print_structure(startpath):
"""
prints the directory structure from startpath
"""
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
def call_process(command):
res = subprocess.call(command, shell=True)
if res:
print("error") | [
"you@example.com"
] | you@example.com |
64f56a690ecf18c897dc21fc3c4486792d2c8951 | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/grr/grr/client/grr_response_client/local/binary_whitelist.py | 8c4e5b27c32b76d489f50185d4ea98a673d345b2 | [
"MIT",
"Apache-2.0"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 607 | py | #!/usr/bin/env python
"""Deployment-specific whitelisted binaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
def IsExecutionWhitelisted(cmd, args):
"""Check if a binary and args is whitelisted.
Args:
cmd: Canonical path to the binary.
args: List of arguments to be passed to the binary.
Returns:
Bool, True if it is whitelisted.
This function is not called directly but used by client_utils_common.py to
detect site-specific binaries that are allowed to run.
"""
del cmd, args # Unused.
return False
| [
"a.songer@protonmail.com"
] | a.songer@protonmail.com |
f4a5f6ff610a2ef870fbcd75f625332f734d48f0 | 7f52618136c8d9b9ba0ce8f89f3fcc90c4e6feb7 | /csa_new/csa_new/doctype/umpire/test_umpire.py | 680f95b417bffcbca637abc61e953926fe0cf808 | [
"MIT"
] | permissive | Jishnu70055/user_management | 7ade7f196f974ea0b3ddb220e3fca49665d9de3b | 82d3d2c85a62c7f1162633c164cb7d50e229d2fd | refs/heads/main | 2023-07-06T14:03:00.213723 | 2021-08-10T12:42:10 | 2021-08-10T12:42:10 | 394,649,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # Copyright (c) 2021, sd and Contributors
# See license.txt
# import frappe
import unittest
class TestUmpire(unittest.TestCase):
pass
| [
"jishnudq70055@gmail.com"
] | jishnudq70055@gmail.com |
93b46d4c10c77831d45ccf5c2f27fd29fb9f87e1 | a1bffcd8854e1843e56bb812d4d83b3161a5211e | /plugins/connection/lxc.py | 8fbdc1dcea4264bafa80c39b00f14d3869110c0c | [] | no_license | goneri/ansible.community | 1a71f9d98c164b77f8ed2ed7f558b4963005ff8f | f26f612dd0a3154050d90b51a75502018c95f6e4 | refs/heads/master | 2020-12-29T07:47:35.353515 | 2020-01-22T17:43:18 | 2020-01-22T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,212 | py | # (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Joerg Thalheim <joerg@higgsboson.tk>
connection: lxc
short_description: Run tasks in lxc containers via lxc python library
description:
- Run commands or put/fetch files to an existing lxc container using lxc python library
options:
remote_addr:
description:
- Container identifier
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_lxc_host
executable:
default: /bin/sh
description:
- Shell executable
vars:
- name: ansible_executable
- name: ansible_lxc_executable
'''
import os
import shutil
import traceback
import select
import fcntl
import errno
HAS_LIBLXC = False
try:
import lxc as _lxc
HAS_LIBLXC = True
except ImportError:
pass
from ansible import constants as C
from ansible import errors
from ansible_collections.ansible.community.plugins.module_utils._text import to_bytes, to_native
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
''' Local lxc based connections '''
transport = 'ansible.community.lxc'
has_pipelining = True
default_user = 'root'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.container_name = self._play_context.remote_addr
self.container = None
def _connect(self):
''' connect to the lxc; nothing to do here '''
super(Connection, self)._connect()
if not HAS_LIBLXC:
msg = "lxc bindings for python2 are not installed"
raise errors.AnsibleError(msg)
if self.container:
return
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
self.container = _lxc.Container(self.container_name)
if self.container.state == "STOPPED":
raise errors.AnsibleError("%s is not running" % self.container_name)
def _communicate(self, pid, in_data, stdin, stdout, stderr):
buf = {stdout: [], stderr: []}
read_fds = [stdout, stderr]
if in_data:
write_fds = [stdin]
else:
write_fds = []
while len(read_fds) > 0 or len(write_fds) > 0:
try:
ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
for fd in ready_writes:
in_data = in_data[os.write(fd, in_data):]
if len(in_data) == 0:
write_fds.remove(fd)
for fd in ready_reads:
data = os.read(fd, 32768)
if not data:
read_fds.remove(fd)
buf[fd].append(data)
(pid, returncode) = os.waitpid(pid, 0)
return returncode, b"".join(buf[stdout]), b"".join(buf[stderr])
def _set_nonblocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
return fd
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# python2-lxc needs bytes. python3-lxc needs text.
executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
read_stdout, write_stdout = None, None
read_stderr, write_stderr = None, None
read_stdin, write_stdin = None, None
try:
read_stdout, write_stdout = os.pipe()
read_stderr, write_stderr = os.pipe()
kwargs = {
'stdout': self._set_nonblocking(write_stdout),
'stderr': self._set_nonblocking(write_stderr),
'env_policy': _lxc.LXC_ATTACH_CLEAR_ENV
}
if in_data:
read_stdin, write_stdin = os.pipe()
kwargs['stdin'] = self._set_nonblocking(read_stdin)
self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name)
pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs)
if pid == -1:
msg = "failed to attach to container %s" % self.container_name
raise errors.AnsibleError(msg)
write_stdout = os.close(write_stdout)
write_stderr = os.close(write_stderr)
if read_stdin:
read_stdin = os.close(read_stdin)
return self._communicate(pid,
in_data,
write_stdin,
read_stdout,
read_stderr)
finally:
fds = [read_stdout,
write_stdout,
read_stderr,
write_stderr,
read_stdin,
write_stdin]
for fd in fds:
if fd:
os.close(fd)
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
if not os.path.exists(in_path):
msg = "file or module does not exist: %s" % in_path
raise errors.AnsibleFileNotFound(msg)
try:
src_file = open(in_path, "rb")
except IOError:
traceback.print_exc()
raise errors.AnsibleError("failed to open input file to %s" % in_path)
try:
def write_file(args):
with open(out_path, 'wb+') as dst_file:
shutil.copyfileobj(src_file, dst_file)
try:
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
msg = "failed to transfer file to %s" % out_path
raise errors.AnsibleError(msg)
finally:
src_file.close()
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
try:
dst_file = open(out_path, "wb")
except IOError:
traceback.print_exc()
msg = "failed to open output file %s" % out_path
raise errors.AnsibleError(msg)
try:
def write_file(args):
try:
with open(in_path, 'rb') as src_file:
shutil.copyfileobj(src_file, dst_file)
finally:
# this is needed in the lxc child process
# to flush internal python buffers
dst_file.close()
try:
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
msg = "failed to transfer file from %s to %s" % (in_path, out_path)
raise errors.AnsibleError(msg)
finally:
dst_file.close()
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
1e1be642f06a47ab43110db05f45feb8915a301f | 81d4debdcea628793d9f4c8fe0b0a324750c3ce9 | /meteva/base/fun/diagnosing.py | 4c0b81773645b2bab2a423637acf6eba4b64da45 | [] | no_license | fu0228/meteva | 6a4df63380df6bc443e20a2115eba0ec8ecc70b1 | 0fe30af3cda811d8260372b6029abb621e68e39c | refs/heads/master | 2023-01-28T00:18:58.666406 | 2020-12-06T13:25:36 | 2020-12-06T13:25:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,599 | py | import math
import meteva
from meteva.base.tool.math_tools import lon_lat_to_cartesian
from scipy.spatial import cKDTree
import numpy as np
import copy
import pandas as pd
import datetime
def accumulate_time(sta_ob,step,keep_all = True):
'''
观测数据累加
:param sta_ob:
:param step:
:param keep_all:
:return:
'''
times= sta_ob.loc[:,'time'].values
times = list(set(times))
times.sort()
times = np.array(times)
dtimes = times[1:] - times[0:-1]
min_dtime = np.min(dtimes)
rain_ac = None
for i in range(step):
rain1 = sta_ob.copy()
rain1["time"] = rain1["time"] + min_dtime * i
rain_ac = meteva.base.add_on_level_time_dtime_id(rain_ac,rain1,how="inner")
if not keep_all:
dtimes = times[:] - times[-1]
dh = (dtimes/min_dtime).astype(np.int32)
new_times = times[dh%step ==0]
rain_ac = meteva.base.in_time_list(rain_ac,new_times)
print("warning: accumulate_time函数将在后续升级中不再支持,请重新使用sum_of_sta函数满足相关需求")
return rain_ac
def accumulate_dtime(sta,step,keep_all = True):
'''观测数据累加'''
dtimes= sta.loc[:,'dtime'].values
dtimes = list(set(dtimes))
dtimes.sort()
dtimes = np.array(dtimes)
dhour_unit = dtimes[0]
if dhour_unit ==0:
dhour_unit = dtimes[1]
rain_ac = None
for i in range(step):
rain1 = sta.copy()
rain1["dtime"] = rain1["dtime"] + dhour_unit * i
#print(dhour_unit * i)
rain_ac = meteva.base.add_on_level_time_dtime_id(rain_ac,rain1,how="inner")
if not keep_all:
dh =((dtimes - dtimes[-1])/dhour_unit).astype(np.int32)
new_dtimes = dtimes[dh%step ==0]
rain_ac = meteva.base.in_dtime_list(rain_ac,new_dtimes)
return rain_ac
def change(sta,delta = 24,used_coords = "time"):
if used_coords == "time":
names_0 = meteva.base.get_stadata_names(sta)
names_1 = []
for name in names_0:
names_1.append(name + "_new")
sta1 = sta.copy()
meteva.base.set_stadata_names(sta1, names_1)
sta1["time"] = sta1["time"] + datetime.timedelta(hours= delta)
sta01 = meteva.base.combine_on_all_coords(sta1, sta)
fn = len(names_1)
dvalue = sta01.iloc[:, (-fn):].values - sta01.iloc[:, (-fn * 2):(-fn)].values
sta01.iloc[:, (-fn):] = dvalue
sta01 = sta01.drop(names_1, axis=1)
return sta01
else:
names_0 = meteva.base.get_stadata_names(sta)
names_1 = []
for name in names_0:
names_1.append(name+"_new")
sta1 = sta.copy()
meteva.base.set_stadata_names(sta1,names_1)
sta1["dtime"] = sta1["dtime"] + delta
sta01 = meteva.base.combine_on_all_coords(sta1,sta)
fn= len(names_1)
dvalue = sta01.iloc[:,(-fn):].values - sta01.iloc[:,(-fn * 2):(-fn)].values
sta01.iloc[:,(-fn):] = dvalue
sta01 = sta01.drop(names_1,axis=1)
return sta01
def t_rh_to_tw(temp,rh,rh_unit = "%"):
'''根据温度和相对湿度计算湿球温度'''
if isinstance(temp,pd.DataFrame):
sta1 = meteva.base.combine_on_all_coords(temp, rh)
meteva.base.set_stadata_names(sta1, ["t", "rh"])
sta2 = meteva.base.not_IV(sta1)
T = sta2.loc[:,"t"].values
RH = sta2["rh"].values
if(T[0]>120):
T -= 273.16
if rh_unit == "%":
pass
else:
RH = RH * 100
max_rh = np.max(RH)
min_rh = np.min(RH)
if max_rh>100 or min_rh <0:
print("相对湿度取值不能超过100%或小于0%")
return
if max_rh < 1:
print("警告:最大的相对湿度小于1%,请确认rh的单位是否为%,如果不是,请设置rh_unit = 1")
Tw = T * np.arctan(0.151977 * np.sqrt(RH + 8.313659)) + np.arctan(T + RH) - np.arctan(
RH - 1.676331) + 0.00391838 * np.power(RH, 1.5) * np.arctan(0.023101 * RH) - 4.686035
sta2["tw"] = Tw
sta = sta2.drop(["t", "rh"], axis=1)
return sta
else:
grid0 = meteva.base.get_grid_of_data(temp)
if temp.values[0,0,0,0,0,0] >120:
T = temp.values - 273.16
else:
T = temp.values
RH = rh.values
if rh_unit == "%":
RH /= 100
else:
pass
max_rh = np.max(RH)
min_rh = np.min(RH)
if max_rh>1 or min_rh <0:
print("相对湿度取值不能超过100%或小于0%")
return
if max_rh < 0.01:
print("警告:最大的相对湿度小于1%,请确认rh的单位是否为%,如果不是,请设置rh_unit = 1")
Tw = T * np.arctan(0.151977 * np.sqrt(RH + 8.313659)) + np.arctan(T + RH) - np.arctan(
RH - 1.676331) + 0.00391838 * np.power(RH, 1.5) * np.arctan(0.023101 * RH) - 4.686035
grd = meteva.base.grid_data(grid0,Tw)
return grd
def u_v_to_speed_angle(u,v):
'''
将u,v 转换成风速,风向
:param u:
:param v:
:return:
'''
if isinstance(u, pd.DataFrame):
sta = meteva.base.combine_on_all_coords(u, v)
datanames = meteva.base.get_stadata_names(sta)
nu = int(len(datanames)/2)
#nsta = len(sta.indexs)
ud = sta.iloc[:,6:(6+nu)].values.astype(np.float32).flatten()
vd = sta.iloc[:,(6+nu):].values.astype(np.float32).flatten()
s,a = meteva.base.tool.math_tools.u_v_to_s_d(ud,vd)
speed = sta.iloc[:,0:(6+nu)].copy()
angle = speed.copy()
speed.iloc[:,6:(6+nu)] = s[...]
angle.iloc[:, 6:(6 + nu)] = a[...]
names1 = []
names2 = []
for i in range(nu):
names1.append("speed"+str(i))
names2.append("angle"+str(i))
meteva.base.set_stadata_names(speed,names1)
meteva.base.set_stadata_names(angle,names2)
return speed,angle
else:
ud = u.values
vd = u.values
s, a = meteva.base.tool.math_tools.u_v_to_s_d(ud, vd)
grid = meteva.base.get_grid_of_data(u)
speed = meteva.base.grid_data(grid,s)
angle = meteva.base.grid_data(grid,a)
return speed,angle
def u_v_to_wind(u,v):
if isinstance(u,pd.DataFrame):
sta = meteva.base.combine_on_all_coords(u, v)
meteva.base.set_stadata_names(sta, ["u", "v"])
return sta
else:
grid0 = meteva.base.get_grid_of_data(u)
grid1 = meteva.base.grid(grid0.glon,grid0.glat,grid0.gtime,
dtime_list= grid0.dtimes,level_list=grid0.levels,member_list=["u","v"])
wind = meteva.base.grid_data(grid1)
wind.name = "wind"
wind.values[0, :, :, :, :, :] = u.values[0, :, :, :, :, :]
wind.values[1, :, :, :, :, :] = v.values[0, :, :, :, :, :]
return wind
def speed_angle_to_wind(speed,angle = None):
if isinstance(speed, pd.DataFrame):
if angle is not None:
sta = meteva.base.combine_on_all_coords(speed, angle)
else:
sta = speed.copy()
meteva.base.set_stadata_names(sta, ["speed", "angle"])
#speed = sta["speed"].values.astype(np.float32)
#angle = sta["angle"].values.astype(np.float32)
speed = sta["speed"].values.astype(np.float32)
angle = sta["angle"].values.astype(np.float32)
u = -speed * np.sin(angle * 3.14 / 180)
v = -speed * np.cos(angle * 3.14 / 180)
sta["u"] = u
sta["v"] = v
sta = sta.drop(["speed", "angle"], axis=1)
return sta
else:
speed_v = speed.values.squeeze()
angle_v = angle.values.squeeze()
grid0 = meteva.base.get_grid_of_data(speed)
grid1 = meteva.base.grid(grid0.glon,grid0.glat,grid0.gtime,
dtime_list=grid0.dtimes,level_list=grid0.levels,member_list=["u","v"])
wind = meteva.base.grid_data(grid1)
wind.name = "wind"
wind.values[0, :, :, :, :, :] = speed_v[:, :] * np.cos(angle_v[:, :] * math.pi /180)
wind.values[1, :, :, :, :, :] = speed_v[:, :] * np.sin(angle_v[:, :] * math.pi /180)
return wind
def t_dtp_to_rh(temp,dtp):
if isinstance(temp,pd.DataFrame):
sta = meteva.base.combine_on_all_coords(temp, dtp)
meteva.base.set_stadata_names(sta, ["t", "dtp"])
T = sta.loc[:,"t"].values
if(T[0]>120):
T -= 273.16
D = sta["dtp"].values
if D[0] >120:
D -= 273.16
e0 = 6.11 * np.exp(17.15 * T/(235 + T))
e1 = 6.11 * np.exp(17.15 * D / (235 + D))
rh = 100 * e1/e0
sta["rh"] = rh
sta = sta.drop(["t", "dtp"], axis=1)
return sta
else:
grid0 = meteva.base.get_grid_of_data(temp)
if temp.values[0,0,0,0,0,0] >120:
T = temp.values - 273.16
else:
T = temp.values
if dtp.values[0,0,0,0,0,0] >120:
D = dtp.values - 273.16
else:
D = dtp.values
e0 = 6.11 * np.exp(17.15 * T/(235 + T))
e1 = 6.11 * np.exp(17.15 * D / (235 + D))
rh = e0/e1
grd = meteva.base.grid_data(grid0,rh)
return grd
def t_rh_p_to_q(temp,rh,pressure,rh_unit = "%"):
'''
根据温度、相对湿度和气压计算比湿
:param temp: 温度,可以是摄氏度,也可以是绝对温度
:param rh: 相对湿度,可以是0-100,也可以是0-1
:param level: 气压,单位百帕,可以是整数,站点数据或网格数据
:return:
'''
if isinstance(temp,pd.DataFrame):
if not isinstance(pressure,pd.DataFrame):
level_s = temp.copy()
level_s.iloc[:,-1] = pressure
else:
level_s = pressure
sta1 = meteva.base.combine_on_all_coords(temp, rh)
sta2 = meteva.base.combine_on_all_coords(sta1, level_s)
meteva.base.set_stadata_names(sta2, ["t", "rh","p"])
sta2 = meteva.base.not_IV(sta2)
T = sta2.loc[:,"t"].values
R = sta2.loc[:,"rh"].values
P = sta2.loc[:,"p"].values
if(T[0]>120):
T -= 273.16
e0 = 6.11 * np.exp(5420 * (1.0 / 273.15 - 1 / (T + 273.15))) * 622
if rh_unit == "%":
R /= 100
else:
pass
max_rh = np.max(R)
min_rh = np.min(R)
if max_rh>1 or min_rh <0:
print("相对湿度取值不能超过100%或小于0%")
return
if max_rh < 0.01:
print("警告:最大的相对湿度小于1%,请确认rh的单位是否为%,如果不是,请设置rh_unit = 1")
q = e0 * R/P
sta2["q"] = q
sta = sta2.drop(["t", "rh","p"], axis=1)
return sta
else:
grid0 = meteva.base.get_grid_of_data(temp)
if temp.values[0,0,0,0,0,0] >120:
T = temp.values - 273.16
else:
T = temp.values
R = rh.values
if rh_unit == "%":
R /= 100
else:
pass
max_rh = np.max(R)
min_rh = np.min(R)
if max_rh>1 or min_rh <0:
print("相对湿度取值不能超过100%或小于0%")
return
e0 = 6.11 * np.exp(5420 * (1.0 / 273.15 - 1 / (T + 273.15))) * 622
if isinstance(pressure,float) or isinstance(pressure,float):
P = pressure
else:
P = pressure.values
q = e0 * R/P
grd = meteva.base.grid_data(grid0,q)
return grd
| [
"liucouhua@163.com"
] | liucouhua@163.com |
cfd15ad4fd1f86eb6e93d87b51cc4e985e79c39d | 3d9ec6fddb04669104091b2f3b6f8fd0617db64d | /api/cloud_provider/compute_model.py | 0665ec64b08f60672fbcf8a1d2059be952f4c802 | [
"Apache-2.0"
] | permissive | lixianseng-limeidong/KubeOperator | b4403de8b38de54b5d0f153b19da38e6870283b3 | 124d7d9320a563d7f9f82c82bb762b523f921567 | refs/heads/master | 2020-08-07T17:34:35.196221 | 2019-10-08T03:12:20 | 2019-10-08T03:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import logging
import os
import yaml
from fit2ansible.settings import CLOUDS_RESOURCE_DIR
logger = logging.getLogger(__name__)
compute_models = []
def load_compute_model():
with open((os.path.join(CLOUDS_RESOURCE_DIR, 'compute_model_meta.yml'))) as f:
logger.info('Load compute model meta')
compute_models.extend(yaml.load(f))
| [
"scydeai@qq.com"
] | scydeai@qq.com |
07f03ddd26a9d4f0c1263c883b35fa596a4e0cf2 | 517a7be46b56c7a9658255dc9d2abc5872ead589 | /Line_sweep/the_skyline_problem.py | 1ce50e56c6eb0758eaccfbaa556cedda5445be58 | [] | no_license | timwangmusic/Algorithm-Design | d1f1f7cdd6b769edbb0fa0d6cf5ddd641568baeb | 5aea290f55ec80d733c596fd6fa595adac776b97 | refs/heads/master | 2022-10-03T12:23:26.134172 | 2020-05-22T00:38:47 | 2020-05-22T03:55:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | import collections
import heapq
from typing import List
"""
Skyline problem
A city's skyline is the outer contour of the silhouette formed by all the buildings in that city when viewed
from a distance. Now suppose you are given the locations and height of all the buildings,
output the skyline formed by these buildings collectively.
Computational complexity:
O(N*logN) where N is number of buildings
"""
def getSkyline(buildings: List[List[int]]) -> List[List[int]]:
points = []
for left, right, height in buildings:
# x-axis, height and if the event is building starts
points.append((left, height, True))
points.append((right, height, False))
heap = [] # max heap
points.sort(key=lambda p: p[:1])
counter = collections.Counter() # tracking valid elements in heap
start, end = 0, 0
res = []
prev_h = None
# for each unique x-axis value, compute a height
while start < len(points):
x, h, _ = points[start]
while end < len(points):
ex, eh, end_building_start = points[end]
if ex > x:
break
if end_building_start:
counter[eh] += 1
heapq.heappush(heap, -eh)
else:
counter[eh] -= 1
end += 1
# remove invalid elements from heap
while heap and counter[-heap[0]] == 0:
heapq.heappop(heap)
cur_h = -heap[0]
if cur_h != prev_h:
res.append([x, cur_h])
prev_h = cur_h
start = end
return res
| [
"weihewang2012@gmail.com"
] | weihewang2012@gmail.com |
df9d3e526751041179700b307c8cfb940b7c8a4b | e2426d7c01500ca4a2df4e4555f217f957baf957 | /cows/xml/util.py | 9a9fbbd3fc3cc0537e00ab8b27933d2b0ba9c54a | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | cedadev/cows | 959a5e1ad220cfe0cce48a2131d6971106c765aa | db9ed729c886b271ce85355b97e39243081e8246 | refs/heads/master | 2020-03-16T15:17:45.710584 | 2018-05-09T10:35:47 | 2018-05-09T10:36:37 | 132,736,968 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | # BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
# Copyright (C) 2007 STFC & NERC (Science and Technology Facilities Council).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later.
# http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
Elementtree convenience utilities
@author: Stephen Pascoe
"""
def find_text(node, path):
"""Find a node's text or None
"""
return getattr(node.find(path), 'text', None)
def findall_text(node, path):
"""Find all n.text elements from a path.
"""
return [n.text for n in node.findall(path)]
def find_with(node, path, func):
"""If node.find(path) returns a node n return func(n) else return None.
"""
n = node.find(path)
if n is None:
return None
else:
return func(n)
def findall_with(node, path, func):
"""Find all func(n) for n in node.findall(path).
"""
return [func(n) for n in node.findall(path)]
| [
"ag.stephens@stfc.ac.uk"
] | ag.stephens@stfc.ac.uk |
864325413a3e37a779f35facb6b7925229555615 | fac6e2aeba6873719a349f3f088a22183f92466c | /oracle/python/protectOracle/protectOracle.py | 00a0c61070908e63a21adc4ecbae136f980d078c | [] | no_license | geoffaskam/scripts | b8cd177f19c9990f93317d245d8ea87fe4cbfff3 | 1909d2dad935f10a26992a17541407b07c6b7884 | refs/heads/master | 2023-08-20T14:26:36.900054 | 2021-09-13T10:32:11 | 2021-09-13T10:32:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,570 | py | #!/usr/bin/env python
"""Protect Oracle"""
# usage:
# ./protectOracle.py -v mycluster \
# -u myuser \
# -d mydomain.net \
# -p 'My Policy' \
# -j 'My New Job' \
# -z 'America/New_York' \
# -s myserver.mydomain.net \
# -db mydb
# import pyhesity wrapper module
from pyhesity import *
### command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True) # Cohesity cluster name or IP
parser.add_argument('-u', '--username', type=str, required=True) # Cohesity Username
parser.add_argument('-d', '--domain', type=str, default='local') # Cohesity User Domain
parser.add_argument('-j', '--jobname', type=str, required=True) # name of protection job
parser.add_argument('-p', '--policyname', type=str) # name of protection policy
parser.add_argument('-s', '--servername', type=str, required=True) # name of server to protect
parser.add_argument('-db', '--dbname', type=str) # name of DB to protect
parser.add_argument('-t', '--starttime', type=str, default='20:00') # job start time
parser.add_argument('-z', '--timezone', type=str, default='America/Los_Angeles') # timezone for job
parser.add_argument('-is', '--incrementalsla', type=int, default=60) # incremental SLA minutes
parser.add_argument('-fs', '--fullsla', type=int, default=120) # full SLA minutes
parser.add_argument('-sd', '--storagedomain', type=str, default='DefaultStorageDomain') # storage domain
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
jobname = args.jobname
policyname = args.policyname
servername = args.servername
dbname = args.dbname
starttime = args.starttime
timezone = args.timezone
incrementalsla = args.incrementalsla
fullsla = args.fullsla
storagedomain = args.storagedomain
# parse starttime
try:
(hour, minute) = starttime.split(':')
except Exception:
print('starttime is invalid!')
exit(1)
# authenticate
apiauth(vip, username, domain)
# find storage domain
sd = [sd for sd in api('get', 'viewBoxes') if sd['name'].lower() == storagedomain.lower()]
if len(sd) < 1:
print("Storage domain %s not found!" % storagedomain)
exit(1)
sdid = sd[0]['id']
# get oracle sources
sources = api('get', 'protectionSources?environments=kOracle')
# find policy
if policyname is not None:
policy = [p for p in api('get', 'protectionPolicies') if p['name'].lower() == policyname.lower()]
if len(policy) < 1:
print('Policy %s not found!' % policyname)
exit(1)
else:
policy = policy[0]
# find existing job
newJob = False
job = [j for j in api('get', 'protectionJobs?environments=kOracle&isActive=true&isDeleted=false') if j['name'].lower() == jobname.lower()]
if len(job) < 1:
if policyname is not None:
newJob = True
# create new job
job = {
"policyId": policy['id'],
"viewBoxId": sdid,
"createRemoteView": False,
"priority": "kMedium",
"incrementalProtectionSlaTimeMins": 60,
"alertingPolicy": [
"kFailure"
],
"sourceSpecialParameters": [],
"fullProtectionSlaTimeMins": 120,
"timezone": timezone,
"qosType": "kBackupHDD",
"environment": "kOracle",
"startTime": {
"minute": int(minute),
"hour": int(hour)
},
"parentSourceId": sources[0]['protectionSource']['id'],
"name": jobname,
"sourceIds": [],
"indexingPolicy": {
"disableIndexing": True
}
}
else:
print('Job %s not found!' % jobname)
exit(1)
else:
job = job[0]
# find server to add to job
server = [s for s in sources[0]['nodes'] if s['protectionSource']['name'].lower() == servername]
if len(server) < 1:
print('Server %s not found!' % servername)
exit(1)
serverId = server[0]['protectionSource']['id']
job['sourceIds'].append(serverId)
if dbname is not None:
# find db to add to job
db = [a for a in server[0]['applicationNodes'] if a['protectionSource']['name'].lower() == dbname.lower()]
if len(db) < 1:
print("Database %s not found!" % dbname)
exit(1)
dbIds = [db[0]['protectionSource']['id']]
print('Adding %s/%s to protection job %s...' % (servername, dbname, jobname))
else:
# or add all dbs to job
dbIds = [a['protectionSource']['id'] for a in server[0]['applicationNodes']]
print('Adding %s/* to protection job %s...' % (servername, jobname))
# update dblist for server
sourceSpecialParameter = [s for s in job['sourceSpecialParameters'] if s['sourceId'] == serverId]
if len(sourceSpecialParameter) < 1:
job['sourceSpecialParameters'].append({"sourceId": serverId, "oracleSpecialParameters": {"applicationEntityIds": dbIds}})
else:
for dbId in dbIds:
sourceSpecialParameter[0]['oracleSpecialParameters']['applicationEntityIds'].append(dbId)
sourceSpecialParameter[0]['oracleSpecialParameters']['applicationEntityIds'] = list(set(sourceSpecialParameter[0]['oracleSpecialParameters']['applicationEntityIds']))
job['sourceIds'] = list(set(job['sourceIds']))
if newJob is True:
# create new job
result = api('post', 'protectionJobs', job)
else:
# update existing job
result = api('put', 'protectionJobs/%s' % job['id'], job)
| [
"bseltzer@cohesity.com"
] | bseltzer@cohesity.com |
439d402a75de7bc30ffde90e79926a8c711ed6fc | f88f900c0384f6da82eeb749371ad44115527700 | /course-book/09-matching/0911-sift.py | 22566d262cac06e2ecb7a1028026128497681c01 | [] | no_license | aaron-kr/learning-opencv | eff382e8f0c822400f765451d57b192a63cd1b74 | 158239f0140569aec519fc1fbf255c54ef2567d2 | refs/heads/main | 2023-08-21T11:02:49.775425 | 2021-10-27T00:04:01 | 2021-10-27T00:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,619 | py | # 0911.py
# SIFT = Scale Invariant Feature Transform
import cv2
import numpy as np
#1
def distance(f1,f2):
x1,y1 = f1.pt
x2,y2 = f2.pt
return np.sqrt((x2-x1) ** 2 + (y2-y1) ** 2)
def filteringByDistance(kp, distE = 0.5):
size = len(kp)
mask = np.arange(1, size + 1).astype(np.bool8) # all True
for i, f1 in enumerate(kp):
if not mask[i]:
continue
else: # True
for j, f2 in enumerate(kp):
if i == j:
continue
if distance(f1,f2) < distE:
mask[j] = False
np_kp = np.array(kp)
return list(np_kp[mask])
#2
src = cv2.imread('../../img/chessboard.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
## siftF = cv2.SIFT_create()
siftF = cv2.SIFT_create(edgeThreshold = 80)
kp = siftF.detect(gray)
print('len(kp) = ', len(kp))
#3
kp = sorted(kp, key = lambda f: f.response, reverse = True)
## filtered_kp = list(filter(lambda f: f.response > 0.01, kp))
filtered_kp = filteringByDistance(kp, 10)
print('len(filtered_kp) = ', len(filtered_kp))
kp, des = siftF.compute(gray, filtered_kp)
print('des.shape = ', des.shape)
print('des.dtype = ', des.dtype)
print('des = ', des)
#4
dst2 = cv2.drawKeypoints(gray, filtered_kp, None, color = (0,0,255))
for f in filtered_kp:
x,y = f.pt
size = f.size
rect = ((x,y), (size,size), f.angle)
box = cv2.boxPoints(rect).astype(np.int32)
cv2.polylines(dst2, [box], True, (0,255,0), 2)
cv2.circle(dst2, (round(x), round(y)), round(f.size / 2), (255,0,0), 2)
cv2.imshow('dst2', dst2)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"jekkilekki@gmail.com"
] | jekkilekki@gmail.com |
bf2e7bde9751dd59167e44ff617162cc3d743610 | 482ed16cd1c8d721e98a9c460555802f7cce8906 | /run-tests/t226.py | c3179519a2fe24c123647a0afa2d80f4d0ce9940 | [
"MIT"
] | permissive | forkcodeaiyc/skulpt_parser | ea2347b2a452476854cf03412474fae63bca31c0 | dd592e9b91bcbbe0c5cfdb5c2da0fb5ae604a428 | refs/heads/master | 2023-09-04T11:32:09.760317 | 2021-10-11T22:58:18 | 2021-10-11T22:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | for const in (1, 2, 3):
print(const)
def f():
for const in (1, 2, 3):
print(const)
for object in (1, 2, 3):
print(object)
instanceof = 5
void = 6
var = 7
delete = 8
switch = 9
default = 10
catch = 11
print((instanceof, void, var, delete, switch, default, catch))
f()
| [
"albert-jan.nijburg@babylonhealth.com"
] | albert-jan.nijburg@babylonhealth.com |
1f42f6a26b04a814d2e50725049e505104c4b7cb | b9286e5866373b777739f8a13ee06afa810451d5 | /antisocial/main/tests/test_models.py | 2a872f2af2ccf6ce8b1c891b839a08896ddffdcb | [
"BSD-3-Clause"
] | permissive | peicheng/antisocial | ff476d965e5f37cf038c88b13146ffe30dcd8e27 | 7ba8da6aa58ee20e5f01870b30a62d478cc707c9 | refs/heads/master | 2021-01-12T12:18:25.290271 | 2016-10-25T13:27:34 | 2016-10-25T21:08:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | from django.test import TestCase
from antisocial.main.models import Feed, Entry, extract_published
from datetime import datetime, timedelta
def feed_factory():
return Feed.objects.create(
url="http://example.com/",
guid="1234",
title="test feed",
last_fetched=datetime.now(),
last_failed=datetime.now(),
next_fetch=datetime.now() + timedelta(hours=1)
)
def entry_factory(f):
return Entry.objects.create(
feed=f,
guid="entry1234",
title="test entry",
link="http://example.com/entry",
author="test author",
published=datetime.now(),
)
class DummyFeed(object):
feed = dict(guid="foo")
class DictObj(object):
def __init__(self, **kwargs):
self._d = kwargs
for k, v in kwargs.items():
setattr(self, k, v)
def __iter__(self):
return iter(self._d)
class TestHelpers(TestCase):
def test_extract_published_default(self):
r = extract_published(dict())
self.assertIsNotNone(r)
class TestFeed(TestCase):
def test_try_fetch(self):
f = feed_factory()
f.try_fetch()
self.assertEqual(f.backoff, 0)
def test_update_guid(self):
f = feed_factory()
f.update_guid(DummyFeed())
self.assertEqual(f.guid, "foo")
def test_update_etag(self):
f = feed_factory()
d = DictObj(etag='new one')
f.update_etag(d)
self.assertEqual(f.etag, 'new one')
def test_update_modified(self):
f = feed_factory()
d = DictObj(modified='new one')
f.update_modified(d)
self.assertEqual(f.modified, 'new one')
def test_update_entry_already_exists(self):
f = feed_factory()
e = entry_factory(f)
c = Entry.objects.count()
f.update_entry(dict(guid=e.guid))
# no new ones created
self.assertEqual(c, Entry.objects.count())
| [
"anders@columbia.edu"
] | anders@columbia.edu |
bae7da5bd3bf39389bc7010564d8a48d2f341187 | 0377a4135f9e8940809a62186b229295bed9e9bc | /剑指offer/素数对/solution.py | 8f7cd0843bb2c0886a8583a50ff0752e5a9d95fa | [] | no_license | neko-niko/leetcode | 80f54a8ffa799cb026a7f60296de26d59a0826b0 | 311f19641d890772cc78d5aad9d4162dedfc20a0 | refs/heads/master | 2021-07-10T10:24:57.284226 | 2020-09-13T11:28:45 | 2020-09-13T11:28:45 | 198,792,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # 给定一个正整数,编写程序计算有多少对质数的和等于输入的这个正整数,并输出结果。输入值小于1000。
# 如,输入为10, 程序应该输出结果为2。(共有两对质数的和为10,分别为(5,5),(3,7))
import math
def Solution(num):
cot = 0
for i in range(1, (num // 2)+1):
if Judge(i):
if Judge(num-i):
cot += 1
return cot
def Judge(num):
if num <= 2:
return False
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
return False
else:
continue
return True
print(Solution(10000000)) | [
"2361253285@qq.com"
] | 2361253285@qq.com |
863df0383c9787e542ce56e905fdafa519dba0cc | adecd0537a42ae728db1dea2b754c503dc533f9f | /docs/cycles.py | b92db1c1aa11ae72731c03e42d231df14990ecaa | [
"MIT"
] | permissive | mickaellalande/proplot | 596ab4a2c9c820b64b38bf6f54ccda440e98fe4a | 31bdb57f88190dc64f70bbd4d784b1af69ec36fc | refs/heads/master | 2022-12-17T12:00:43.428061 | 2020-09-23T15:04:10 | 2020-09-23T15:04:10 | 287,268,054 | 0 | 0 | MIT | 2020-08-13T12:02:26 | 2020-08-13T12:02:26 | null | UTF-8 | Python | false | false | 7,362 | py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles:
#
# Color cycles
# ============
#
# ProPlot defines **color cycles** as color palettes comprising sets of
# *distinct colors*. Unlike :ref:`colormaps <Colormaps>`, interpolation
# between these colors may not make sense. Color cycles are generally used
# with bar plots, line plots, and other distinct plot elements. ProPlot's
# named color cycles are actually registered as `~proplot.colors.ListedColormap`
# instances so that they can be `used with categorical data\
# <https://journals.ametsoc.org/view-large/figure/9538246/bams-d-13-00155_1-f5.tif>`__.
# Much more commonly, we build `property cycles\
# <https://matplotlib.org/3.1.0/tutorials/intermediate/color_cycle.html>`__
# from the `~proplot.colors.ListedColormap` colors using the
# `~proplot.constructor.Cycle` constructor function or by
# :ref:`drawing samples <ug_cycles_new>` from continuous colormaps.
#
# ProPlot adds several features to help you use color cycles effectively in
# your figures. This section documents the new registered color cycles,
# explains how to make and modify colormaps, and shows how to apply them to
# your plots.
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles_included:
#
# Included color cycles
# ---------------------
#
# Use `~proplot.demos.show_cycles` to generate a table of the color cycles
# registered by default and loaded from your ``~/.proplot/cycles`` folder.
# You can make your own color cycles using the `~proplot.constructor.Cycle`
# constructor function.
# %%
import proplot as plot
fig, axs = plot.show_cycles()
# %% [raw] raw_mimetype="text/restructuredtext"
# Changing the color cycle
# ------------------------
#
# You can make and apply new property cyclers with the
# `~proplot.constructor.Cycle` constructor function. Various plotting
# commands like `~matplotlib.axes.Axes.plot` and
# `~matplotlib.axes.Axes.scatter` now accept a `cycle` keyword arg, which is
# passed to `~proplot.constructor.Cycle` (see
# `~proplot.axes.cycle_changer`). To save your color cycle data and use
# it every time ProPlot is imported, simply pass ``save=True`` to
# `~proplot.constructor.Cycle`. If you want to change the global property
# cycler, pass a *name* to the :rcraw:`cycle` setting or pass the result of
# `~proplot.constructor.Cycle` to the :rcraw:`axes.prop_cycle` setting (see
# the :ref:`configuration guide <ug_config>`).
# %%
import numpy as np
lw = 5
state = np.random.RandomState(51423)
data = (state.rand(12, 6) - 0.45).cumsum(axis=0)
kwargs = {'legend': 'b', 'labels': list('abcdef')}
# Modify the default color cycle
plot.rc.cycle = '538'
fig, axs = plot.subplots(ncols=3, axwidth=1.9)
axs.format(suptitle='Changing the color cycle')
ax = axs[0]
ax.plot(data, lw=lw, **kwargs)
ax.format(title='Global color cycle')
# Pass the cycle to a plotting command
ax = axs[1]
ax.plot(data, cycle='qual1', lw=lw, **kwargs)
ax.format(title='Local color cycle')
# As above but draw each line individually
# Note that the color cycle is not reset with each plot call
ax = axs[2]
labels = kwargs['labels']
for i in range(data.shape[1]):
ax.plot(data[:, i], cycle='qual1', legend='b', label=labels[i], lw=lw)
ax.format(title='With multiple plot calls')
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles_new:
#
# Making new color cycles
# -----------------------
#
# You can make new color cycles with the `~proplot.constructor.Cycle`
# constructor function. One great way to make cycles is by sampling a
# colormap! Just pass the colormap name to `~proplot.constructor.Cycle`, and
# optionally specify the number of samples you want to draw as the last
# positional argument (e.g. ``plot.Cycle('Blues', 5)``).
#
# Positional arguments passed to `~proplot.constructor.Cycle` are interpreted
# by the `~proplot.constructor.Colormap` constructor, and the resulting
# colormap is sampled at discrete values. To exclude near-white colors on the
# end of a colormap, pass e.g. ``left=x`` to `~proplot.constructor.Cycle`, or
# supply a plotting command with e.g. ``cycle_kw={'left': x}``. See
# the :ref:`colormaps section <ug_cmaps>` for details.
#
# In the below example, several cycles are constructed from scratch, and the
# lines are referenced with colorbars and legends. Note that ProPlot allows
# you to :ref:`generate colorbars from lists of lines <ug_cbars>`.
# %%
import proplot as plot
import numpy as np
fig, axs = plot.subplots(ncols=2, share=0, axwidth=2.3)
state = np.random.RandomState(51423)
data = (20 * state.rand(10, 21) - 10).cumsum(axis=0)
# Cycle from on-the-fly monochromatic colormap
ax = axs[0]
lines = ax.plot(data[:, :5], cycle='plum', cycle_kw={'fade': 85}, lw=5)
fig.colorbar(lines, loc='b', col=1, values=np.arange(0, len(lines)))
fig.legend(lines, loc='b', col=1, labels=np.arange(0, len(lines)))
ax.format(title='Cycle from color')
# Cycle from registered colormaps
ax = axs[1]
cycle = plot.Cycle('blues', 'reds', 'oranges', 15, left=0.1)
lines = ax.plot(data[:, :15], cycle=cycle, lw=5)
fig.colorbar(lines, loc='b', col=2, values=np.arange(0, len(lines)), locator=2)
fig.legend(lines, loc='b', col=2, labels=np.arange(0, len(lines)), ncols=4)
ax.format(
title='Cycle from merged colormaps',
suptitle='Color cycles from colormaps'
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles_other:
#
# Cycles of other properties
# --------------------------
#
# `~proplot.constructor.Cycle` can also generate cyclers that change
# properties other than color. Below, a single-color dash style cycler is
# constructed and applied to the axes locally. To apply it globally, simply
# use ``plot.rc['axes.prop_cycle'] = cycle``.
# %%
import proplot as plot
import numpy as np
import pandas as pd
# Create cycle that loops through 'dashes' Line2D property
cycle = plot.Cycle(dashes=[(1, 0.5), (1, 1.5), (3, 0.5), (3, 1.5)])
# Generate sample data
state = np.random.RandomState(51423)
data = (state.rand(20, 4) - 0.5).cumsum(axis=0)
data = pd.DataFrame(data, columns=pd.Index(['a', 'b', 'c', 'd'], name='label'))
# Plot data
fig, ax = plot.subplots(axwidth=2.6, aspect=1)
ax.format(suptitle='Plot without color cycle')
obj = ax.plot(
data, lw=3, cycle=cycle, legend='ul',
legend_kw={'ncols': 2, 'handlelength': 3}
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_cycles_dl:
#
# Downloading color cycles
# ------------------------
#
# There are plenty of online interactive tools for generating and testing
# color cycles, including
# `i want hue <http://tools.medialab.sciences-po.fr/iwanthue/index.php>`__,
# `coolers <https://coolors.co>`__, and
# `viz palette <https://projects.susielu.com/viz-palette>`__.
#
# To add color cycles downloaded from any of these sources, save the cycle
# data to a file in your ``~/.proplot/cycles`` folder and call
# `~proplot.config.register_cycles` (or restart your python session), or use
# `~proplot.colors.ListedColormap.from_file`. The file name is used as the
# registered cycle name. See `~proplot.colors.ListedColormap.from_file` for a
# table of valid file extensions.
| [
"lukelbd@gmail.com"
] | lukelbd@gmail.com |
0bf18a73e83e14446d208b46726489c1f8870061 | e99dfc900052272f89d55f2fd284389de2cf6a73 | /apostello/loaddotenv.py | 4a4189db4fdd1e2b38c5c29fcd1b383351778925 | [
"MIT"
] | permissive | armenzg/apostello | a3e6ca3d34917608af79fbab4134ee4de1f5e8ee | 1827547b5a8cf94bf1708bb4029c0b0e834416a9 | refs/heads/master | 2021-01-18T18:16:02.364837 | 2017-03-22T20:34:21 | 2017-03-22T20:34:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | #!/usr/bin/env python
import os
from dotenv import load_dotenv
def loaddotenv():
"""Load env vars from .env file."""
fname = '.env'
dotenv_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', fname)
)
load_dotenv(dotenv_path)
| [
"montgomery.dean97@gmail.com"
] | montgomery.dean97@gmail.com |
7bf14c0c23f8195723424bf053dcaecb469ab14c | 8e90a7759ec7143427823547e0fbff58e0343aaa | /training_api/application/paths/services/path_service.py | a82588bb1dc00c46b4dd497a83fefd245a8bb3a8 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | BMW-InnovationLab/BMW-TensorFlow-Training-GUI | 646a6f86f26887e94351b4c572b7fe7f0842f75c | 06531dae14365986c86baf735fd149317f4bb67a | refs/heads/master | 2023-07-20T01:48:27.299962 | 2023-07-12T15:22:22 | 2023-07-12T15:22:22 | 227,429,492 | 1,030 | 198 | Apache-2.0 | 2023-05-22T17:40:23 | 2019-12-11T18:06:11 | Python | UTF-8 | Python | false | false | 528 | py | import json
from domain.models.paths import Paths
from domain.services.contract.abstract_path_service import AbstractPathService
class PathService(AbstractPathService):
"""
A class used to get paths from path.json and return object of type Paths
...
"""
def __init__(self):
with open("./assets/paths.json", 'r') as paths_file:
json_path = json.load(paths_file)
self.paths: Paths = Paths.parse_obj(json_path)
def get_paths(self) -> Paths:
return self.paths
| [
"Daniel.Jess@bmw.de"
] | Daniel.Jess@bmw.de |
ab66cbb9b29c88f353c308f5e5c9bb2d4fe923a6 | 5ca4a0d91f5bd119e80478b5bd3d43ed30133a42 | /film20/core/migrations/0003_auto__add_moderatedpersonlocalized__add_field_personlocalized_biograph.py | 0ff9cc77d7adec9c89c4e5790cc3f692bb33839a | [] | no_license | thuvh/filmmaster | 1fc81377feef5a9e13f792b329ef90f840404ec5 | dd6a2ee5a4951b2397170d5086c000169bf91350 | refs/heads/master | 2021-01-17T16:10:54.682908 | 2012-04-29T18:19:52 | 2012-04-29T18:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,680 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ModeratedPersonLocalized'
db.create_table('core_moderatedpersonlocalized', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('moderation_status', self.gf('django.db.models.fields.IntegerField')(default=-1)),
('moderation_status_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('moderation_status_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='moderatedpersonlocalized_moderated_objects', null=True, to=orm['auth.User'])),
('rejection_reason', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('LANG', self.gf('django.db.models.fields.CharField')(default='pl', max_length=2)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Person'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('surname', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('biography', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('core', ['ModeratedPersonLocalized'])
# Adding field 'PersonLocalized.biography'
db.add_column('core_personlocalized', 'biography', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
db.add_column('core_profile', 'phone_number', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting model 'ModeratedPersonLocalized'
db.delete_table('core_moderatedpersonlocalized')
# Deleting field 'PersonLocalized.biography'
db.delete_column('core_personlocalized', 'biography')
db.delete_column('core_profile', 'phone_number')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_to'", 'symmetrical': 'False', 'through': "orm['followers.Followers']", 'to': "orm['auth.User']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.character': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'Character'},
'character': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description_full': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'description_lead': ('django.db.models.fields.CharField', [], {'max_length': '350', 'null': 'True', 'blank': 'True'}),
'film': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Film']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_thumb': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_thumb_lost': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'importance': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Person']"})
},
'core.country': {
'Meta': {'object_name': 'Country'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.deferredtask': {
'Meta': {'object_name': 'DeferredTask'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {}),
'eta': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_tries': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'queue_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'try_cnt': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.film': {
'Meta': {'object_name': 'Film'},
'actors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'films_played'", 'to': "orm['core.Person']", 'through': "orm['core.Character']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'criticker_id': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'directors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'films_directed'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Person']"}),
'hires_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'imdb_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'is_enh9': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'popularity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'popularity_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'production_country': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'produced_in'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Country']"}),
'production_country_list': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'release_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'release_year': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'title_normalized': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'tmdb_import_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'verified_imdb_code': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'writers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'screenplays_written'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Person']"})
},
'core.filmcomparator': {
'Meta': {'object_name': 'FilmComparator'},
'compared_film': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'compared_films'", 'to': "orm['core.Film']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_film': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'main_films'", 'to': "orm['core.Film']"}),
'score': ('django.db.models.fields.DecimalField', [], {'default': "'0.000'", 'max_digits': '5', 'decimal_places': '3'})
},
'core.filmlocalized': {
'Meta': {'object_name': 'FilmLocalized', '_ormbases': ['core.ObjectLocalized']},
'description': ('django.db.models.fields.CharField', [], {'max_length': '15000', 'null': 'True', 'blank': 'True'}),
'fetched_description': ('django.db.models.fields.CharField', [], {'max_length': '15000', 'null': 'True', 'blank': 'True'}),
'fetched_description_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fetched_description_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'fetched_description_url_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'film': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Film']"}),
'object_localized': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.ObjectLocalized']", 'unique': 'True', 'primary_key': 'True'}),
'release_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'release_year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'title_normalized': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'core.filmlog': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'FilmLog'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '40000', 'null': 'True', 'blank': 'True'}),
'film': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Film']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'localized_title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'release_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'release_year': ('django.db.models.fields.IntegerField', [], {}),
'saved_by': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tag_list': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'version_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'core.filmranking': {
'Meta': {'object_name': 'FilmRanking'},
'average_score': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'film': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Film']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_votes': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'core.localizedprofile': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'unique_together': "(('user', 'LANG'),)", 'object_name': 'LocalizedProfile'},
'blog_title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.moderatedfilmlocalized': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'ModeratedFilmLocalized'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '15000', 'null': 'True', 'blank': 'True'}),
'film': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Film']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderation_status': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'moderation_status_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'moderation_status_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderatedfilmlocalized_moderated_objects'", 'null': 'True', 'to': "orm['auth.User']"}),
'rejection_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tag_list': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.moderatedpersonlocalized': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'ModeratedPersonLocalized'},
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderation_status': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'moderation_status_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'moderation_status_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderatedpersonlocalized_moderated_objects'", 'null': 'True', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Person']"}),
'rejection_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_comments': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permalink': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'version': ('django.db.models.fields.IntegerField', [], {})
},
'core.objectlocalized': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'ObjectLocalized'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Object']"}),
'tag_list': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.person': {
'Meta': {'ordering': "['surname']", 'object_name': 'Person'},
'actor_popularity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'actor_popularity_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'day_of_birth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'director_popularity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'director_popularity_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'hires_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'imdb_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'is_actor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_director': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_writer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'month_of_birth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'parent': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tmdb_import_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'verified_imdb_code': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'writer_popularity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'writer_popularity_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.personlocalized': {
'Meta': {'object_name': 'PersonLocalized', '_ormbases': ['core.ObjectLocalized']},
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'object_localized': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.ObjectLocalized']", 'unique': 'True', 'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Person']"}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'core.personlog': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'PersonLog'},
'actor_popularity': ('django.db.models.fields.IntegerField', [], {}),
'actor_popularity_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '40000', 'null': 'True', 'blank': 'True'}),
'day_of_birth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'director_popularity': ('django.db.models.fields.IntegerField', [], {}),
'director_popularity_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_actor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_director': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_writer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'month_of_birth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Person']"}),
'saved_by': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tag_list': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'version_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'writer_popularity': ('django.db.models.fields.IntegerField', [], {}),
'writer_popularity_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.profile': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'Profile'},
'aol': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'country': ('film20.userprofile.countries.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'criticker_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'foursquare_access_token': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'foursquare_user_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'gg': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'icq': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imdb_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'iphone_token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'jabber_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '6', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '6', 'blank': 'True'}),
'metacritic_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'mobile_first_login_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'mobile_last_login_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'mobile_login_cnt': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mobile_platform': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'myspace_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'recommendations_notice_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'recommendations_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'registration_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timezone_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'twitter_access_token': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'twitter_user_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'core.rating': {
'Meta': {'object_name': 'Rating'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'rated_as_actor'", 'null': 'True', 'to': "orm['core.Person']"}),
'director': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'rated_as_director'", 'null': 'True', 'to': "orm['core.Person']"}),
'film': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'film_ratings'", 'null': 'True', 'to': "orm['core.Film']"}),
'first_rated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'guess_rating_alg1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '4', 'blank': 'True'}),
'guess_rating_alg2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '4', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_displayed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'last_rated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'normalized': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '4', 'blank': 'True'}),
'number_of_comments': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_ratings': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Object']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.ratingcomparator': {
'Meta': {'object_name': 'RatingComparator'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'common_films': ('django.db.models.fields.IntegerField', [], {}),
'compared_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'compared_users'", 'to': "orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'main_users'", 'to': "orm['auth.User']"}),
'previous_save_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}),
'score2': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '4', 'decimal_places': '2'}),
'sum_difference': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.recomerror': {
'Meta': {'object_name': 'RecomError'},
'count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sum': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '16', 'decimal_places': '8'})
},
'core.recommendation': {
'Meta': {'object_name': 'Recommendation'},
'film': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'film_recommendation'", 'null': 'True', 'to': "orm['core.Film']"}),
'guess_rating_alg1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '4', 'blank': 'True'}),
'guess_rating_alg2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '4', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.searchkey': {
'Meta': {'object_name': 'SearchKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_letters': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'key_normalized': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'key_root': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Object']"}),
'object_localized': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ObjectLocalized']", 'null': 'True', 'blank': 'True'}),
'text_length': ('django.db.models.fields.IntegerField', [], {})
},
'core.shortreview': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'ShortReview', '_ormbases': ['core.Object']},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'kind': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'short_reviews'", 'null': 'True', 'to': "orm['core.Object']"}),
'parent': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'rating': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'short_reviews'", 'null': 'True', 'to': "orm['core.Rating']"}),
'review_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.shortreviewold': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'ShortReviewOld'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rating': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'short_reviewsold'", 'to': "orm['core.Rating']"}),
'review_text': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.userratingtimerange': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'object_name': 'UserRatingTimeRange'},
'first_rated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_rated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'followers.followers': {
'Meta': {'ordering': "('-created_at',)", 'unique_together': "(('from_user', 'to_user', 'status'),)", 'object_name': 'Followers'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_users'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_users'", 'to': "orm['auth.User']"})
},
'tagging.tag': {
'LANG': ('django.db.models.fields.CharField', [], {'default': "'pl'", 'max_length': '2'}),
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'category': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['core']
| [
"email@ibrahimcesar.com"
] | email@ibrahimcesar.com |
bec7895cdef7c093a11c9933c559bcc908c7a1b2 | 3839400cb89316ce591667f17c0f72c85b16b242 | /misc/mutalyzer-comparison/bin/hgvs-g-to-c | d346afda01cd8ec0c8a126fa0bcb3f0c43d687a3 | [
"Apache-2.0"
] | permissive | HealthVivo/hgvs-1 | 73f768345fb2144c1c737a00436e524c22a9423d | 26aba8877791b0f94f1e14a5a49c60bcdaf2e6fd | refs/heads/master | 2020-12-13T01:28:10.625165 | 2014-09-03T05:51:43 | 2014-09-03T05:51:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""compare hgvs and mutalyzer for c. and g. variants provided on the command line
The comparison has two basic cases:
1) When a c. variant is provided, it is converted to g. by both hgvs
and mutalyzer and the results are compared.
2) When a g. variant is provided, it is converted to c. variants for
all transcripts available by each tool. The members of each
result set with matching accessions are compared.
"""
import argparse
import codecs
import csv
import logging
import os
import sys
import hgvs.parser
import hgvs.dataproviders.uta
from hgvs.variantmapper import EasyVariantMapper
defaults = {
'uta-dsn': 'postgresql://localhost/uta',
}
fieldnames = ['Input Variant','Errors','Chromosomal Variant','Coding Variant(s)']
def parse_args(argv):
# parse command line for configuration files
ap = argparse.ArgumentParser(
description = __doc__,
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
)
ap.add_argument('--variant-file', '-f')
ap.add_argument('--verbose', '-v', default=0, action='count')
args = ap.parse_args(argv)
return args
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
opts = parse_args(sys.argv[1:])
if opts.verbose:
logger.setLevel(logging.INFO if opts.verbose == 1 else logging.DEBUG)
hp = hgvs.parser.Parser()
dp = hgvs.dataproviders.uta.connect(defaults['uta-dsn'])
evm = hgvs.variantmapper.EasyVariantMapper(hdp=dp)
print( "\t".join(fieldnames) )
in_fh = codecs.open(opts.variant_file,encoding='utf-8') if opts.variant_file else sys.stdin
for hgvs_g in in_fh:
hgvs_g = hgvs_g.strip()
if hgvs_g.startswith("#"):
continue
try:
var_g = hp.parse_hgvs_variant(hgvs_g)
assert var_g.type == 'g'
var_cs = [ evm.g_to_c(var_g,ac) for ac in evm.relevant_transcripts(var_g) ]
print("\t".join([hgvs_g,'',hgvs_g]+map(str,var_cs)))
except hgvs.exceptions.HGVSError as exc:
logger.error(hgvs_g, exc_info=1)
except Exception as e:
logger.error(hgvs_g, exc_info=1)
sys.exit(1)
| [
"reecehart@gmail.com"
] | reecehart@gmail.com | |
74ddc9a8dc922ee096e065d875cfc1e898f7a31d | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-SystemConfiguration/PyObjCTest/test_scpreferences.py | ee461b1fa5e0c25b9106f2e1df010723e61a60ec | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | from PyObjCTools.TestSupport import *
from SystemConfiguration import *
#from SecurityFoundation import SFAuthorization
class TestSCPreferences (TestCase):
def testConstants(self):
self.assertEqual(kSCPreferencesNotificationCommit, 1<<0)
self.assertEqual(kSCPreferencesNotificationApply, 1<<1)
def testFunctions(self):
self.assertIsInstance(SCPreferencesGetTypeID(), (int, long))
ref = SCPreferencesCreate(None, "pyobjc.test", "pyobjc.test")
self.assertIsInstance(ref, SCPreferencesRef)
self.assertResultIsBOOL(SCPreferencesLock)
self.assertArgIsBOOL(SCPreferencesLock, 1)
v = SCPreferencesLock(ref, False)
self.assertIsInstance(v, bool)
self.assertResultIsBOOL(SCPreferencesUnlock)
v = SCPreferencesUnlock(ref)
self.assertIsInstance(v, bool)
self.assertResultIsBOOL(SCPreferencesCommitChanges)
v = SCPreferencesCommitChanges(ref)
self.assertIsInstance(v, bool)
self.assertResultIsBOOL(SCPreferencesApplyChanges)
v = SCPreferencesApplyChanges(ref)
self.assertIsInstance(v, bool)
r = SCPreferencesGetSignature(ref)
self.assertIsInstance(r, CFDataRef)
r = SCPreferencesCopyKeyList(ref)
self.assertIsInstance(r, CFArrayRef)
l = []
def callback(ref, key, ctx):
l.append([ref, key, ctx])
ctx = object()
v = SCPreferencesSetCallback(ref, callback, ctx)
self.assertTrue(v is True)
self.assertResultIsBOOL(SCPreferencesAddValue)
r = SCPreferencesAddValue(ref, "use_python3", False)
self.assertTrue(r is True)
v = SCPreferencesGetValue(ref, "use_python3")
self.assertTrue(v is False)
v = SCPreferencesGetValue(ref, "use_python4")
self.assertTrue(v is None)
self.assertResultIsBOOL(SCPreferencesSetValue)
r = SCPreferencesSetValue(ref, "use_python3", "on newyearsday")
self.assertTrue(r is True)
self.assertResultIsBOOL(SCPreferencesRemoveValue)
r = SCPreferencesRemoveValue(ref, "use_python3")
self.assertResultIsBOOL(SCPreferencesScheduleWithRunLoop)
rl = CFRunLoopGetCurrent()
r = SCPreferencesScheduleWithRunLoop(ref, rl, kCFRunLoopCommonModes)
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 1.0, False)
self.assertResultIsBOOL(SCPreferencesUnscheduleFromRunLoop)
r = SCPreferencesUnscheduleFromRunLoop(ref, rl, kCFRunLoopCommonModes)
SCPreferencesSynchronize(ref)
def testSecurityIntegreation(self):
self.assertResultIsCFRetained(SCPreferencesCreateWithAuthorization)
@min_os_level('10.6')
def testFunctions10_6(self):
SCPreferencesSetDispatchQueue
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.