blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1eb0b5f131b64d53530596a02965c641b07e642a
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Conqueror of Empires/project/data.py
|
fc8e7a76d51033fe317d3e9b0cdd065b3b283cfc
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:2e685fd55394e2b706aae52098bf791f45158fbc6e6ccb10becc4caa7efc16c4
size 413
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
1cf125c2a540017320395c8cdd6a233922f6dcd9
|
034fff9750485929ef91c8ab8eb2a5a3b5f26c4a
|
/books/Python程式設計實務_第2版_博碩/書中附檔/9-5.py
|
e0dc05aca9638db40095fa87d6e628c10f307a64
|
[] |
no_license
|
sakanamax/LearnPython
|
c0dcaf26525e2c1420ae2f61924306f5137b4f20
|
2d042c90ea380d2b8b64421679ad576cbbbc6b9e
|
refs/heads/master
| 2021-01-13T17:27:47.394548
| 2020-03-01T13:44:00
| 2020-03-01T13:44:00
| 42,424,888
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
# -*- coding: utf-8 -*-
# 程式 9-5 (Python 3 version)
from bs4 import BeautifulSoup
import requests
import sys
if len(sys.argv) < 2:
print("用法:python 9-5.py <<target url>>")
exit(1)
url = sys.argv[1]
html = requests.get(url).text
sp = BeautifulSoup(html, 'html.parser')
all_links = sp.find_all('a')
for link in all_links:
href = link.get('href')
if href != None and href.startswith('http://'):
print(href)
|
[
"sakana@cycu.org.tw"
] |
sakana@cycu.org.tw
|
74c990030b5828f5aba33b0c759abf7fce25038a
|
42dd79c614b775e6e8e782ea7ab332aef44251b9
|
/extra_apps/xadmin/migrations/0002_log.py
|
2d0b47edefe861fd85eb8ca97914c891c8bb06a0
|
[] |
no_license
|
Annihilater/imooc
|
114575638f251a0050a0240d5a25fc69ef07d9ea
|
547046cff32ce413b0a4e21714cb9ab9ce19bc49
|
refs/heads/master
| 2020-05-03T09:06:18.247371
| 2019-12-04T09:24:55
| 2019-12-04T09:24:55
| 178,545,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,877
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-15 05:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("xadmin", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Log",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"action_time",
models.DateTimeField(
default=django.utils.timezone.now,
editable=False,
verbose_name="action time",
),
),
(
"ip_addr",
models.GenericIPAddressField(
blank=True, null=True, verbose_name="action ip"
),
),
(
"object_id",
models.TextField(blank=True, null=True, verbose_name="object id"),
),
(
"object_repr",
models.CharField(max_length=200, verbose_name="object repr"),
),
(
"action_flag",
models.PositiveSmallIntegerField(verbose_name="action flag"),
),
(
"message",
models.TextField(blank=True, verbose_name="change message"),
),
(
"content_type",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="contenttypes.ContentType",
verbose_name="content type",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
verbose_name="user",
),
),
],
options={
"ordering": ("-action_time",),
"verbose_name": "log entry",
"verbose_name_plural": "log entries",
},
)
]
|
[
"yanmiexingkong@gmail.com"
] |
yanmiexingkong@gmail.com
|
b33c0fd4e7e3d7f166a592e2a5141660b8ea686b
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/resources/azure-mgmt-resource/azure/mgmt/resource/features/v2015_12_01/aio/_feature_client.py
|
403e09981f0530df35255da9d45c697c70fd4f82
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,376
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import FeatureClientConfiguration
from .operations import FeatureClientOperationsMixin, FeaturesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class FeatureClient(FeatureClientOperationsMixin): # pylint: disable=client-accepts-api-version-keyword
"""Azure Feature Exposure Control (AFEC) provides a mechanism for the resource providers to
control feature exposure to users. Resource providers typically use this mechanism to provide
public/private preview for new features prior to making them generally available. Users need to
explicitly register for AFEC features to get access to such functionality.
:ivar features: FeaturesOperations operations
:vartype features: azure.mgmt.resource.features.v2015_12_01.aio.operations.FeaturesOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2015-12-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = FeatureClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.features = FeaturesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "FeatureClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
62a47856e123b77ea1d9154672240f5a843588cc
|
2bfa2e8d2e744e28141a5c5c79119f2f97e853c9
|
/openvino_training_extensions_simple/pytorch_toolkit/nncf/tests/sparsity/const/test_algo.py
|
4aef6b32e0da6caa76a310038f2cd0bd645185ea
|
[
"Apache-2.0"
] |
permissive
|
maxenergy/CPP_2020520
|
bed4c2fba0dc96c60b6bb20157d11003b00c6067
|
8453b4426dcb044251eaed38d01ba07557348113
|
refs/heads/master
| 2023-05-17T05:18:32.703231
| 2021-01-24T15:14:01
| 2021-01-24T15:14:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,980
|
py
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
import torch
from nncf.helpers import load_state
from nncf.algo_selector import create_compression_algorithm
from nncf.dynamic_graph import reset_context
from nncf.operations import UpdateWeight
from nncf.sparsity.const.algo import ConstSparsity
from nncf.sparsity.layers import BinaryMask
from nncf.utils import get_all_modules_by_type
from tests.quantization.test_functions import check_equal
from tests.sparsity.magnitude.test_helpers import MagnitudeTestModel
from tests.test_helpers import BasicConvTestModel, get_empty_config
sub_tensor = torch.tensor([[[[1., 0.],
[0., 1.]]]])
ref_mask_1 = torch.cat((sub_tensor, sub_tensor), 0)
sub_tensor = torch.tensor([[[[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.]]]])
ref_mask_2 = torch.cat((sub_tensor, sub_tensor), 1)
def test_can_create_const_sparse_algo__with_default():
model = BasicConvTestModel()
config = get_empty_config()
config["compression"] = {"algorithm": "const_sparsity"}
compression_algo = create_compression_algorithm(deepcopy(model), config)
assert isinstance(compression_algo, ConstSparsity)
sparse_model = compression_algo.model
assert len(list(sparse_model.modules())) == 6
model_conv = get_all_modules_by_type(model, 'Conv2d')
sparse_model_conv = get_all_modules_by_type(sparse_model, 'NNCFConv2d')
assert len(model_conv) == len(sparse_model_conv)
for module_name in model_conv:
scope = module_name.split('/')
scope[-1] = scope[-1].replace('Conv2d', 'NNCFConv2d')
sparse_module_name = '/'.join(scope)
assert sparse_module_name in sparse_model_conv
store = []
sparse_module = sparse_model_conv[sparse_module_name]
for op in sparse_module.pre_ops.values():
if isinstance(op, UpdateWeight) and isinstance(op.operand, BinaryMask):
ref_mask = torch.ones_like(sparse_module.weight)
assert torch.allclose(op.operand.binary_mask, ref_mask)
assert op.__class__.__name__ not in store
store.append(op.__class__.__name__)
def test_can_restore_binary_mask_on_magnitude_algo_resume():
config = get_empty_config()
config['compression'] = {"algorithm": "magnitude_sparsity", "weight_importance": "abs",
"params": {"schedule": "multistep", "sparsity_levels": [0.3, 0.5]}}
magnitude_algo = create_compression_algorithm(MagnitudeTestModel(), config)
sparse_model = magnitude_algo.model
with torch.no_grad():
sparse_model(torch.ones([1, 1, 10, 10]))
config = get_empty_config()
config["compression"] = {"algorithm": "const_sparsity"}
const_algo = create_compression_algorithm(MagnitudeTestModel(), config)
const_sparse_model = const_algo.model
load_state(const_sparse_model, sparse_model.state_dict())
op = const_sparse_model.conv1.pre_ops['0']
check_equal(ref_mask_1, op.operand.binary_mask)
op = const_sparse_model.conv2.pre_ops['0']
check_equal(ref_mask_2, op.operand.binary_mask)
def test_can_restore_binary_mask_on_magnitude_quant_algo_resume():
config = get_empty_config()
config["compression"] = [
{"algorithm": "magnitude_sparsity", "weight_importance": "abs",
"params": {"schedule": "multistep", "sparsity_levels": [0.3, 0.5]}},
{"algorithm": "quantization"}]
reset_context('orig')
reset_context('quantized_graphs')
magnitude_quant_algo = create_compression_algorithm(MagnitudeTestModel(), config)
# load_state doesn't support CPU + Quantization
sparse_model = torch.nn.DataParallel(magnitude_quant_algo.model)
sparse_model.cuda()
with torch.no_grad():
sparse_model(torch.ones([1, 1, 10, 10]))
reset_context('orig')
reset_context('quantized_graphs')
config = get_empty_config()
config["compression"] = [{"algorithm": "const_sparsity"}, {"algorithm": "quantization"}]
const_algo = create_compression_algorithm(MagnitudeTestModel(), config)
const_sparse_model = const_algo.model
load_state(const_sparse_model, sparse_model.state_dict())
op = const_sparse_model.module.conv1.pre_ops['0']
check_equal(ref_mask_1, op.operand.binary_mask)
op = const_sparse_model.module.conv2.pre_ops['0']
check_equal(ref_mask_2, op.operand.binary_mask)
|
[
"yuanyupeng-1989@163.com"
] |
yuanyupeng-1989@163.com
|
e0bd123e809bccda8767fea818f6e921d213287d
|
5de55e32a40a96287d5e10e93ac1e29fbe89628e
|
/docs/conf.py
|
531520920d0bf334ba30a5baeda4858c27e4ac59
|
[
"MIT"
] |
permissive
|
kukupigs/graphviz
|
b71600fcda2474c508ca8a95c323385b6d814f83
|
bb1af2e4eb0a3f47ae6193b1aa3dae319ec7c2bf
|
refs/heads/master
| 2023-04-30T02:44:23.616509
| 2021-05-15T13:01:07
| 2021-05-15T13:01:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,862
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(os.pardir))
import graphviz
# -- Project information -----------------------------------------------------
project = 'graphviz'
copyright = '2013-2021, Sebastian Bank'
author = 'Sebastian Bank'
# The short X.Y version
version = '0.17.dev0'
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints', # https://github.com/agronholm/sphinx-autodoc-typehints/issues/15
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'graphvizdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'graphviz.tex', 'graphviz Documentation',
'Sebastian Bank', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'graphviz', 'graphviz Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'graphviz', 'graphviz Documentation',
author, 'graphviz', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'py': ('https://docs.python.org/2', None),
'py3': ('https://docs.python.org/3', None),
}
# monkey patch, see https://github.com/sphinx-doc/sphinx/issues/2044
from sphinx.ext.autodoc import ClassLevelDocumenter, InstanceAttributeDocumenter
def add_directive_header(self, sig):
ClassLevelDocumenter.add_directive_header(self, sig)
InstanceAttributeDocumenter.add_directive_header = add_directive_header
|
[
"sebastian.bank@uni-leipzig.de"
] |
sebastian.bank@uni-leipzig.de
|
d6e6aa275b5933279375629fc50b65f54b108278
|
da6082484abe8d790a08f8df383f5ae8d4a8ba6b
|
/last_fm/celery.py
|
3a4de1f07536d46dde8cfa2ee00731c665a3d4ee
|
[] |
no_license
|
themylogin/last.fm.thelogin.ru
|
6d4e65eb62df4866616ccd42769194a46e4fd5b9
|
69dbe3b287be5e4a2eda1412a0ecd035bcaa3508
|
refs/heads/master
| 2022-11-27T16:58:56.804614
| 2021-03-25T12:57:48
| 2021-03-25T12:57:48
| 16,201,329
| 1
| 1
| null | 2022-11-22T01:08:32
| 2014-01-24T10:17:13
|
Python
|
UTF-8
|
Python
| false
| false
| 317
|
py
|
# -*- coding=utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
from themyutils.celery.beat import Cron
from themyutils.flask.celery import make_celery
from last_fm.app import app
from last_fm.db import db
__all__ = [b"celery", b"cron"]
celery = make_celery(app, db)
cron = Cron(celery)
|
[
"themylogin@gmail.com"
] |
themylogin@gmail.com
|
df9c19801ac4742d19bc68b65b1accd5590e6572
|
0fe394b10b39864915fcc4073a5fa050aa02502e
|
/SeriesOfMatplotlib/plot/american_womens_bachelor_degree_plot.py
|
1c035d41bd5910d64ac69ef06d1ee06493b0df28
|
[] |
no_license
|
JohnAssebe/Python
|
9997d47bba4a056fdcd74c6e5207fc52b002cbfd
|
b88a7c2472f245dc6a0e8900bbea490cb0e0beda
|
refs/heads/master
| 2022-05-14T10:08:37.311345
| 2022-05-09T19:48:53
| 2022-05-09T19:48:53
| 212,562,910
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
'''
@Author:Yohannes Assebe(John Assebe github)
'''
import pandas as pd
import random as ra
from matplotlib import pyplot as plt
plt.style.use('ggplot')
df=pd.read_csv('../percent_bachelors_degrees_women_usa.csv')
year=df['Year']
fields=[]
cols=df.columns.values
colors=['black','#521222','#155888','yellow','red','green','purple','blue']
for i in range(len(cols)-1):
fields.append(list(df[str(cols[i+1])]))
for i in range(len(cols)-1):
if i>=9:
plt.plot(year,fields[i],color=colors[16-i],label=cols[i+1],linewidth=2) # To avoid color repeat
else:
plt.plot(year,fields[i],label=cols[i+1],linewidth=2)
plt.title("Women bachelor degree percents in USA")
plt.xlabel("Year")
plt.ylabel("Bachelor degree percent")
plt.legend()
plt.tight_layout()
#plt.savefig('americanwomenliteracy.png')
plt.show()
|
[
"noreply@github.com"
] |
JohnAssebe.noreply@github.com
|
5f43eec0bd28a8fb8303514f2dfbc97dc5c91be7
|
34cb2555a5884f065a0e7b2dc7f8b075564e67e1
|
/account_invoice_margin/__manifest__.py
|
bb1418139039ee9f1f80bbda1a2fe11aa98aa675
|
[] |
no_license
|
valenciaricardos/odoo-usability
|
4a944bb06b8fdc7058c2bdf21e36ed0784f2e088
|
a32d091d837449225ae26c3be387b97c5310a457
|
refs/heads/10.0
| 2021-07-08T15:29:58.661172
| 2017-09-14T17:54:08
| 2017-09-14T17:54:08
| 106,070,984
| 0
| 1
| null | 2017-10-07T04:19:55
| 2017-10-07T04:19:55
| null |
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account Invoice Margin module for Odoo
# Copyright (C) 2015 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Invoice Margin',
'version': '0.1',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Copy standard price on invoice line and compute margins',
'description': """
This module copies the field *standard_price* of the product on the invoice line when the invoice line is created. The allows the computation of the margin of the invoice.
This module has been written by Alexis de Lattre from Akretion
<alexis.delattre@akretion.com>.
""",
'author': 'Akretion',
'website': 'http://www.akretion.com',
'depends': ['account'],
'data': [
'account_invoice_view.xml',
],
'installable': False,
}
|
[
"alexis.delattre@akretion.com"
] |
alexis.delattre@akretion.com
|
94d9210deda244836941983b492d85510b6f98ed
|
fb871dce626074d71978f48b4af124b975fe933b
|
/TXR120 activity code examples/driveForwards.py
|
86ad438d50b3a139f594573c2bf1fa5b2a722505
|
[] |
no_license
|
psychemedia/ev3robotics
|
49e1b1b663f7f1946d9fb69a16228584d85016cc
|
c5190da5d215f9dda571535215e3177d196fa186
|
refs/heads/master
| 2021-01-17T20:32:20.494015
| 2017-09-21T12:03:03
| 2017-09-21T12:03:03
| 60,197,304
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
from ev3dev.auto import *
from time import sleep
#Configure motors
LEFT_MOTOR = LargeMotor(OUTPUT_A)
RIGHT_MOTOR = LargeMotor(OUTPUT_B)
#Turn motors on
LEFT_MOTOR.run_forever(duty_cycle_sp=75)
RIGHT_MOTOR.run_forever(duty_cycle_sp=75)
#Drive for two seconds
sleep(2)
#Note: experiment with stop() commands via eg: stop | hold | coast
#LEFT_MOTOR.stop_command = 'brake'
#RIGHT_MOTOR.stop_command = 'brake'
#Turn motors off
LEFT_MOTOR.stop()
RIGHT_MOTOR.stop()
|
[
"tony.hirst@gmail.com"
] |
tony.hirst@gmail.com
|
7926fb11f584701d5dd9444ea87f3b895be01672
|
a0879bb909d645d5e64a019234671e2c55cf88fd
|
/TEST/20201115 마이다스아잍/3-2.py
|
7f029928907bce5e15a0a911fa99d418565d8d23
|
[] |
no_license
|
minseunghwang/algorithm
|
a6b94c2cbe0ac6a106e7aa8789321fae8a3d6964
|
e7d0125d5f8d65c94f3ce9140f840b33d28cc9da
|
refs/heads/master
| 2021-07-18T18:11:17.371674
| 2020-12-30T08:04:54
| 2020-12-30T08:04:54
| 227,807,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
cookies = [1, 4, 2, 6, 5, 3]
k = 2
combinations = []
queue = []
# 첫 데이터 생성
tmp = cookies[:] # deep copy
while tmp:
n = tmp.pop(0)
queue.append([[n], tmp[:]])
# DFS
while queue:
print(queue)
n = queue.pop() # [1/3] 꺼내
if n[1] == []: # [2/3] 목표냐?
combinations.append(n[0])
continue
# [3/3] 확장
e = n[1].pop(0) # 이번 쿠키 (남은 쿠키 중에 첫번째꺼)
if e > n[0][-1]: # 내가 마지막에 먹은거보다 이번 쿠키가 크냐?
queue.append([n[0][:] + [e], n[1][:]]) # 크면 먹기
queue.append([n[0][:], n[1][:]]) # 이번 쿠키는 안먹어
maxlen = max(len(x) for x in combinations)
maxcookies = sorted(list(x for x in combinations if len(x) == maxlen))
print(maxcookies)
print(maxcookies[k - 1])
|
[
"mins1011@hanmail.net"
] |
mins1011@hanmail.net
|
f94aec95d3e660b28d7a0d10cf1f8d58fa644f2d
|
900a7285b2fc4d203717e09c88e8afe5bba9144f
|
/axonius_api_client/tests/tests_api/tests_system/test_users.py
|
c9ae601cf277f01332aad50b455a8b76509f9384
|
[
"MIT"
] |
permissive
|
geransmith/axonius_api_client
|
5694eb60964141b3473d57e9a97929d4bff28110
|
09fd564d62f0ddf7aa44db14a509eaafaf0c930f
|
refs/heads/master
| 2022-11-23T01:43:52.205716
| 2020-06-12T14:15:38
| 2020-06-12T14:15:38
| 280,499,094
| 0
| 0
|
MIT
| 2020-07-17T18:35:48
| 2020-07-17T18:35:47
| null |
UTF-8
|
Python
| false
| false
| 2,914
|
py
|
# -*- coding: utf-8 -*-
"""Test suite."""
import pytest
from axonius_api_client.exceptions import ApiError, NotFoundError
from ...meta import TEST_USER
@pytest.mark.skip("Waiting for update to 3.3!") # XXX update public API for roles/users
class TestSystemUsers:
"""Pass."""
@pytest.fixture(scope="class")
def apiobj(self, api_system):
"""Pass."""
return api_system.users
def test__get(self, apiobj):
"""Pass."""
data = apiobj._get()
assert isinstance(data, list)
for x in data:
assert isinstance(x, dict)
def test__get_limit(self, apiobj):
"""Pass."""
data = apiobj._get(limit=1)
assert isinstance(data, list)
for x in data:
assert isinstance(x, dict)
assert len(data) == 1
def test__get_limit_skip(self, apiobj):
"""Pass."""
all_data = apiobj._get()
data = apiobj._get(limit=1, skip=1)
assert isinstance(data, list)
for x in data:
assert isinstance(x, dict)
if len(all_data) == 1:
assert len(data) == 0
else:
assert len(data) == 1
def test_add_get_update_delete(self, apiobj):
"""Pass."""
try:
apiobj.get(name=TEST_USER)
except NotFoundError:
pass
else:
apiobj.delete(name=TEST_USER)
added = apiobj.add(name=TEST_USER, password=TEST_USER)
assert isinstance(added, dict)
assert added["user_name"] == TEST_USER
assert not added["first_name"]
assert not added["last_name"]
assert added["password"] == ["unchanged"]
with pytest.raises(ApiError):
apiobj.add(name=TEST_USER, password=TEST_USER)
updated = apiobj.update(
name=TEST_USER,
firstname=TEST_USER,
lastname=TEST_USER,
password=TEST_USER + "X",
)
assert isinstance(updated, dict)
assert updated["user_name"] == TEST_USER
assert updated["first_name"] == TEST_USER
assert updated["last_name"] == TEST_USER
assert updated["password"] == ["unchanged"]
roles = apiobj.parent.roles.get()
rolename = roles[-1]["name"]
updated_role = apiobj.update_role(name=TEST_USER, rolename=rolename)
assert updated_role["role_name"] == rolename
deleted = apiobj.delete(name=TEST_USER)
assert isinstance(deleted, list)
assert not [x for x in deleted if x["uuid"] == added["uuid"]]
with pytest.raises(ApiError):
apiobj.update(name=TEST_USER)
with pytest.raises(NotFoundError):
apiobj.get(name=TEST_USER)
def test_add_bad_role(self, apiobj):
"""Pass."""
val = "xxx"
with pytest.raises(NotFoundError):
apiobj.add(name=val, password=val, rolename="flimflam")
|
[
"jimbosan@gmail.com"
] |
jimbosan@gmail.com
|
e564564366bd44ba5c752ec4ee3b82ceaae09f0a
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/create_app_code_auto_v2_request.py
|
ea23fe5edb6f1eba5782ccd9696d32d010218d7b
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,846
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateAppCodeAutoV2Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'app_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'app_id': 'app_id'
}
def __init__(self, instance_id=None, app_id=None):
"""CreateAppCodeAutoV2Request
The model defined in huaweicloud sdk
:param instance_id: 实例ID
:type instance_id: str
:param app_id: 应用编号
:type app_id: str
"""
self._instance_id = None
self._app_id = None
self.discriminator = None
self.instance_id = instance_id
self.app_id = app_id
@property
def instance_id(self):
"""Gets the instance_id of this CreateAppCodeAutoV2Request.
实例ID
:return: The instance_id of this CreateAppCodeAutoV2Request.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this CreateAppCodeAutoV2Request.
实例ID
:param instance_id: The instance_id of this CreateAppCodeAutoV2Request.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def app_id(self):
"""Gets the app_id of this CreateAppCodeAutoV2Request.
应用编号
:return: The app_id of this CreateAppCodeAutoV2Request.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this CreateAppCodeAutoV2Request.
应用编号
:param app_id: The app_id of this CreateAppCodeAutoV2Request.
:type app_id: str
"""
self._app_id = app_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateAppCodeAutoV2Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
baf846c29d7abd610de1db8208469e9648543bcf
|
943cf86aca0eb23bd61a8705c05a9853d0a71c3d
|
/test_document_from_pubmed.py
|
15a285c0fe71666befe5ec12d05ab0e3c0909f67
|
[
"BSD-3-Clause"
] |
permissive
|
rmatam/vivotools
|
91f60b19c5d797d39ffb6c20eeb0f1e8048fdd57
|
b60b36c5ac6c672a5af8b2a7d1a646bebd026026
|
refs/heads/master
| 2021-01-20T08:19:01.113299
| 2015-12-20T22:21:01
| 2015-12-20T22:21:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
"""
test_document_from_pubmed.py -- use Entrez to retrive a document entry from
Pubmed, and format it as a reusable python structure
Version 0.1 MC 2013-12-28
-- Initial version.
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2013, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.1"
import vivotools as vt
from datetime import datetime
from Bio import Entrez
print datetime.now(),"Start"
pmids = [
"18068940",
"18089571",
"19206997",
"21822356",
"19247798",
"21493414",
"12099768",
"11410031",
"20143936",
"16934145"
]
for pmid in pmids:
handle = Entrez.efetch(db="pubmed", id=pmid, retmode="xml")
records = Entrez.parse(handle)
for record in records:
print "\n",pmid,vt.document_from_pubmed(record)
print datetime.now(),"Finish"
|
[
"mconlon@ufl.edu"
] |
mconlon@ufl.edu
|
01249f147a2f8d1dcd5f70439aa9d68d55c997e4
|
bdda8da38c77194d2fceb29e31e9b564f015259d
|
/myapp/runScript/User.py
|
e3094939d9f61864940cf347bca1d74a732fd999
|
[] |
no_license
|
Hanlen520/myproject
|
54f2b4c4a77af768ae278f44922cd42befab3b08
|
eb420d7686bd502b300aadd8ba28cb28f03935f5
|
refs/heads/master
| 2020-04-13T13:51:26.667841
| 2018-12-26T13:23:34
| 2018-12-26T13:23:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,310
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from myapp.serializers import Login1
from myapp.models import Login
from rest_framework import generics, filters, viewsets, mixins
from rest_framework.views import APIView
import logging
from rest_framework.response import Response
from rest_framework import status
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.authentication import SessionAuthentication
# 搜索用户名、邮箱
class SearchUser(mixins.ListModelMixin, viewsets.GenericViewSet):
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
queryset = Login.objects.filter(isdelete=False)
serializer_class = Login1
filter_backends = (filters.SearchFilter,)
search_fields = ('username', 'email')
# 删除用户
class DeleteUser(generics.RetrieveDestroyAPIView):
queryset = Login.objects.all().filter(isdelete=False)
serializer_class = Login1
def perform_destroy(self, instance):
Login.objects.filter(user_id=instance.user_id).update(isdelete=True)
# 更新用户信息
class UpdateUser(generics.RetrieveUpdateAPIView):
serializer_class = Login1
queryset = Login.objects.filter(isdelete=False)
# 重置密码
class ResetPwd(generics.RetrieveUpdateAPIView):
serializer_class = Login1
queryset = Login.objects.filter(isdelete=False)
def update(self, request, *args, **kwargs):
data = request.data
# hash = hashlib.sha1()
try:
# hash.update(bytes(data['password']))
# password = hash.hexdigest()
t = Login.objects.get(user_id=data['user_id'])
t.set_password(data['password'])
t.save()
except Exception as e:
logging.exception(e)
return Response(data, status=status.HTTP_200_OK)
# 批量删除用户
class DeleteUsers(APIView):
queryset = Login.objects.all().filter(isdelete=False)
serializer_class = Login1
def put(self, request):
t = {'code': 200, 'message': '成功'}
data = request.data
for i in data["ids"]:
try:
Login.objects.filter(user_id=i).update(isdelete=True)
except Exception as e:
logging.exception(e)
return Response(t)
|
[
"zhengxq@dxy.cn"
] |
zhengxq@dxy.cn
|
128a8225ef5aa2c8a88217d582fb77e064710a23
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/gjdsuv001/question3.py
|
bc93359eb4e9fc41676816f51ca1c166ab26a3e0
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
def message():
first_name = input("Enter first name:\n")
last_name=input("Enter last name:\n")
money=eval(input("Enter sum of money in USD:\n"))
country=input("Enter country name:\n")
print()
print("Dearest",first_name,)
print("It is with a heavy heart that I inform you of the death of my father,")
print("General Fayk ",last_name,", your long lost relative from Mapsfostol." ,sep="")
print ("My father left the sum of ",money,"USD for us, your distant cousins.", sep="")
print ("Unfortunately, we cannot access the money as it is in a bank in ", country,".",sep="")
print("I desperately need your assistance to access this money.")
print ("I will even pay you generously, 30% of the amount - ",money*0.3,"USD,", sep="")
print ("for your help. Please get in touch with me at this email address asap.")
print ("Yours sincerely")
print ("Frank",last_name,)
message()
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
704c36ab9cda3860edd3b1fc17cfd5f8c7febc2f
|
5a71ca1f5c964f803350e3c1238cb48986db565c
|
/coinlib/tests/utils/test_decorators.py
|
f309cff6a37f9cb9bc36bc92982bdf409a599424
|
[] |
no_license
|
tetocode/coinliball
|
fd644cbc16039ecad7e43228ea4e287ead5c8e5f
|
41ebbac13c1fbba98aedaa766b9a505cb157f374
|
refs/heads/master
| 2022-09-28T21:58:08.130006
| 2020-06-04T03:00:56
| 2020-06-04T03:00:56
| 269,247,318
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
from coinlib.utils.decorators import dedup
def test_dedup():
@dedup(lambda x: x)
def gen(it):
yield from it
assert list(range(3)) == list(gen(range(3)))
assert list(range(3)) == list(gen(list(range(3)) * 2))
@dedup(lambda x: x, cache_limit=0)
def gen(it):
yield from it
assert list(range(3)) == list(gen(range(3)))
assert list(range(3)) * 2 == list(gen(list(range(3)) * 2))
@dedup(lambda x: x, cache_limit=2)
def gen(it):
yield from it
assert [1, 2, 3, 1] == list(gen([1, 2, 3, 2, 1]))
|
[
"_"
] |
_
|
b559d304d58a2b548b321a7b94fc9bd9c30cd860
|
bacc65263d0308d455333add3ff918ee42498f32
|
/pilarfc_crowdbotics_123/settings.py
|
e4fbdbe907c6641a2a41bf63b3ad341bf211488d
|
[] |
no_license
|
crowdbotics-users/pilarfc-crowdbotics-123
|
7d248119ab44ddc8e3c6fb0de866796530d98ba0
|
aea7b9bf645ac60fbe173f6517727675743a60be
|
refs/heads/master
| 2020-03-13T09:39:30.290774
| 2018-04-25T22:02:42
| 2018-04-25T22:02:42
| 131,068,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,600
|
py
|
"""
Django settings for pilarfc_crowdbotics_123 project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&4^-!j4j)+fx7f!lb7jia0b&50^xkaye341#zox&iq9s@j5%qz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pilarfc_crowdbotics_123.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pilarfc_crowdbotics_123.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
DATABASES = {
'default': env.db()
}
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
|
[
"sp.gharti@gmail.com"
] |
sp.gharti@gmail.com
|
bd689ff301eace5907505fe34ea81ed7774859e8
|
27b1d48534a3f35d1286498e84df8333250f51a8
|
/Exam-Preparation/Mid Exam/treasure_hunt.py
|
0a7d90b968fe4f26fca2191c7289aa538ed23735
|
[] |
no_license
|
zhyordanova/Python-Fundamentals
|
f642f841629c99fcdd12ace79ea813d62972f36f
|
07c16053e5d03bd7cfb51ace8ef277c2c62cd927
|
refs/heads/main
| 2023-03-19T03:05:12.764726
| 2021-03-15T08:20:39
| 2021-03-15T08:20:39
| 345,451,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
loot = input().split("|")
command = input()
while not command == "Yohoho!":
tokens = command.split()
if tokens[0] == "Loot":
items = tokens[1:]
[loot.insert(0, x) for x in items if x not in loot]
elif tokens[0] == "Drop":
index = int(tokens[1])
if 0 <= index < len(loot):
# item = loot.pop(index)
loot.append(loot.pop(index))
elif tokens[0] == "Steal":
count = int(tokens[1])
# if count > len(loot):
# count = len(loot)
stolen = loot[- count:]
loot = loot[:- count]
print(', '.join(stolen))
command = input()
if len(loot) == 0:
print("Failed treasure hunt.")
else:
average_gain = sum([len(x) for x in loot]) / len(loot)
print(f"Average treasure gain: {average_gain:.2f} pirate credits.")
|
[
"zhyordanova88@gmail.com"
] |
zhyordanova88@gmail.com
|
02f1fdb651c4922014a525edd14cf31d10ac08d1
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/320/usersdata/287/88404/submittedfiles/lecker.py
|
85af5097a574ebcfbb8bd3fd8a186df71506f904
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
# -*- coding: utf-8 -*-
import math
a=int(input('Digite o primeiro valor: '))
b=int(input('Digite o segundo valor: '))
c=int(input('Digite o terceiro valor: '))
d=int(input('Digite o quarto valor: '))
A=0
B=0
C=0
D=0
if a>b:
A=1
if b>a or b>c:
B=1
if c>b or c>d:
C=1
if d>c:
D=1
lecker=A+B+C+D
if lecker==1:
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4e53ec0a5b834c3009372939a3bcc6d387cb6878
|
9ecf6dfe87999f0c510c1d67e2b1cf6ca5b53e52
|
/example/voxelize_chair.py
|
cd739c8722d72501d5c0aedf805d9f26d41f9adc
|
[
"MIT"
] |
permissive
|
marian42/mesh_to_sdf
|
b43cdcb87c2845bebc7b928ccacbf0d2c0a9b50d
|
66036a747e82e7129f6afc74c5325d676a322114
|
refs/heads/master
| 2023-04-17T10:23:31.431009
| 2021-03-11T23:31:12
| 2021-03-11T23:31:12
| 234,407,235
| 847
| 95
|
MIT
| 2023-04-11T16:05:04
| 2020-01-16T20:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 778
|
py
|
import numpy as np
from mesh_to_sdf import get_surface_point_cloud, scale_to_unit_sphere
import trimesh
import skimage, skimage.measure
import os
mesh = trimesh.load('example/chair.obj')
mesh = scale_to_unit_sphere(mesh)
print("Scanning...")
cloud = get_surface_point_cloud(mesh, surface_point_method='scan', scan_count=20, scan_resolution=400)
cloud.show()
os.makedirs("test", exist_ok=True)
for i, scan in enumerate(cloud.scans):
scan.save("test/scan_{:d}.png".format(i))
print("Voxelizing...")
voxels = cloud.get_voxels(128, use_depth_buffer=True)
print("Creating a mesh using Marching Cubes...")
vertices, faces, normals, _ = skimage.measure.marching_cubes(voxels, level=0)
mesh = trimesh.Trimesh(vertices=vertices, faces=faces, vertex_normals=normals)
mesh.show()
|
[
"mail@marian42.de"
] |
mail@marian42.de
|
1d06f0506e12e305f72fcf5c58609e5187a954f9
|
e74e55d174772975a3c16d4aa1210215fa698528
|
/oim_cms/utils.py
|
8a04ac90651f889c22066ac36e6cc785af2af1c5
|
[
"Apache-2.0"
] |
permissive
|
tawazz/oim-cms
|
2c0417863fef24f3a3b2d75dfdd88d8850b9354d
|
eef09ef42b20a08ff25f3c4ff12d50db14d54416
|
refs/heads/master
| 2021-01-11T01:53:15.879813
| 2016-10-19T02:24:07
| 2016-10-19T02:24:07
| 70,650,009
| 0
| 0
| null | 2016-10-12T01:25:30
| 2016-10-12T01:25:30
| null |
UTF-8
|
Python
| false
| false
| 5,063
|
py
|
from __future__ import unicode_literals, absolute_import
from django.http import HttpResponse
from djqscsv import render_to_csv_response
from restless.dj import DjangoResource
class CSVDjangoResource(DjangoResource):
"""Extend the restless DjangoResource class to add a CSV export endpoint.
"""
@classmethod
def as_csv(cls, request):
resource = cls()
if not hasattr(resource, "list_qs"):
return HttpResponse(
"list_qs not implemented for {}".format(cls.__name__))
resource.request = request
return render_to_csv_response(
resource.list_qs(), field_order=resource.VALUES_ARGS)
class FieldsFormatter(object):
"""
A formatter object to format specified fields with cofigured formatter object.
This takes a
``request`` parameter , a http request object
``formatters`` parameter: a dictionary of keys (a dotted lookup path to
the desired attribute/key on the object) and values(a formatter object).
for propertis without a configed formatter method, return the raw value directly.
This method will replace the old value with formatted value.
Example::
preparer = FieldsFormatter(request,fields={
# ``user`` is the key the client will see.
# ``author.pk`` is the dotted path lookup ``FieldsPreparer``
# will traverse on the data to return a value.
'photo': format_fileField,
})
"""
def __init__(self, formatters):
super(FieldsFormatter, self).__init__()
self._formatters = formatters
def format(self, request, data):
"""
format data with configured formatter object
data can be a list or a single object
"""
if data:
if isinstance(data, list):
# list object
for row in data:
self.format_object(request, row)
else:
# a single object
self.format_object(request, data)
return data
def format_object(self, request, data):
"""
format a simgle object.
Uses the ``lookup_data`` method to traverse dotted paths.
Replace the value with formatted value, if required.
"""
if not self._formatters:
# No fields specified. Serialize everything.
return data
for lookup, formatter in self._formatters.items():
if not formatter:
continue
data = self.format_data(request, lookup, data, formatter)
return data
def format_data(self, request, lookup, data, formatter):
"""
Given a lookup string, attempts to descend through nested data looking for
the value ,format the value and then replace the old value with formatted value.
Can work with either dictionary-alikes or objects (or any combination of
those).
Lookups should be a string. If it is a dotted path, it will be split on
``.`` & it will traverse through to find the final value. If not, it will
simply attempt to find either a key or attribute of that name & return it.
Example::
>>> data = {
... 'type': 'message',
... 'greeting': {
... 'en': 'hello',
... 'fr': 'bonjour',
... 'es': 'hola',
... },
... 'person': Person(
... name='daniel'
... )
... }
>>> lookup_data('type', data)
'message'
>>> lookup_data('greeting.en', data)
'hello'
>>> lookup_data('person.name', data)
'daniel'
"""
parts = lookup.split('.')
if not parts or not parts[0]:
return formatter(request, data)
part = parts[0]
remaining_lookup = '.'.join(parts[1:])
if hasattr(data, 'keys') and hasattr(data, '__getitem__'):
# Dictionary enough for us.
try:
value = data[part]
if remaining_lookup:
# is an object
self.format_data(
request, remaining_lookup, value, formatter)
else:
# is a simple type value
data[part] = formatter(request, value)
except:
# format failed, ignore
pass
else:
try:
value = getattr(data, part)
# Assume it's an object.
if remaining_lookup:
# is an object
self.format_data(
request, remaining_lookup, value, formatter)
else:
# is a simple type value
setattr(data, part, formatter(request, value))
except:
# format failed, ignore
pass
return data
|
[
"ashley@ropable.com"
] |
ashley@ropable.com
|
607a3c53f503aaa66c8d67493bd4f30be15d4b1a
|
ec09932ef977bb0f9117193c3200b0807c588542
|
/unreleased/azure-mgmt-search/azure/mgmt/search/models/check_name_availability_input.py
|
581958b5b33330d131bc6863bc2e77ec646619c6
|
[
"MIT"
] |
permissive
|
biggorog/azure-sdk-for-python
|
26b9111f5d939e68374e2ff92dd5a3e127100a49
|
31e15670100f57a03aac9ada5fd83eedc230e291
|
refs/heads/master
| 2020-12-24T11:37:13.806672
| 2016-11-04T20:50:04
| 2016-11-04T20:50:04
| 73,026,650
| 1
| 0
| null | 2016-11-06T23:48:38
| 2016-11-06T23:48:37
| null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckNameAvailabilityInput(Model):
"""Input of check name availability API.
Variables are only populated by the server, and will be ignored when
sending a request.
:param name: The Search service name to validate. Search service names
must only contain lowercase letters, digits or dashes, cannot use dash
as the first two or last one characters, cannot contain consecutive
dashes, and must be between 2 and 60 characters in length.
:type name: str
:ivar type: The type of the resource whose name is to be validated. This
value must always be 'searchServices'. Default value: "searchServices" .
:vartype type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "searchServices"
def __init__(self, name):
self.name = name
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
9773cad5194ddefe461acc4a4d927159764b2b90
|
fd0c7c10303ce93590af1a03ecf0fc272a396ade
|
/scripts/build_mod_picoblade.py
|
b761e0377b2d3ff3f37ea708c4f493ef48e2d744
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
adamgreig/agg-kicad
|
13eae98991a266955cac434ba9f43b69b1d5dbd5
|
5bf13b52fcacf7aedbacb884e0884819083a3bb7
|
refs/heads/master
| 2023-02-04T12:25:03.905819
| 2023-02-02T02:01:10
| 2023-02-02T02:01:10
| 45,762,615
| 157
| 52
|
MIT
| 2022-07-19T12:39:05
| 2015-11-08T01:42:57
|
Python
|
UTF-8
|
Python
| false
| false
| 5,989
|
py
|
"""
build_mod_picoblade.py
Copyright 2019 Adam Greig
Licensed under the MIT licence, see LICENSE file for details.
Generate foorprints for Molex Picoblade connectors.
"""
from __future__ import print_function, division
import os
import sys
import time
import math
import argparse
from sexp import parse as sexp_parse, generate as sexp_generate
from kicad_mod import fp_line, fp_text, pad, draw_square, model
# Settings ====================================================================
# Courtyard clearance
# Use 0.25 for IPC nominal and 0.10 for IPC least
ctyd_gap = 0.25
# Courtyard grid
ctyd_grid = 0.05
# Courtyard line width
ctyd_width = 0.01
# Silk line width
silk_width = 0.15
# Fab layer line width
fab_width = 0.01
# Ref/Val font size (width x height)
font_size = (1.0, 1.0)
# Ref/Val font thickness
font_thickness = 0.15
# Ref/Val font spacing from centre to top/bottom edge
font_halfheight = 0.7
# End Settings ================================================================
def top_smd_refs(name):
out = []
ctyd_h = 3.0 + 0.6 + 1.3 + 2*ctyd_gap
ctyd_y = ctyd_h/2 - 1.3/2 - ctyd_gap
y = ctyd_h / 2.0 + font_halfheight
out.append(fp_text("reference", "REF**", (0, -y+ctyd_y),
"F.Fab", font_size, font_thickness))
out.append(fp_text("value", name, (0, y+ctyd_y),
"F.Fab", font_size, font_thickness))
return out
def top_smd_pads(pins):
x = -(pins-1)/2 * 1.25
pads = []
for pin in range(pins):
pads.append(pad(pin+1, "smd", "rect", (x, 0), [0.8, 1.3],
["F.Cu", "F.Mask", "F.Paste"]))
x += 1.25
return pads
def top_smd_mount(pins):
x = -(pins-1)/2 * 1.25 - 3.6 + 2.1/2
out = []
for xx in (x, -x):
out.append(pad("", "smd", "rect", (xx, 0.6+3.0/2+1.3/2), (2.1, 3.0),
["F.Cu", "F.Mask", "F.Paste"]))
return out
def top_smd_silk(pins):
out = []
w = silk_width
lyr = "F.SilkS"
box_w = (pins-1)*1.25 + 2*3.6 - 2*2.1 - silk_width
box_h = 3.5 # XXX
box_y = box_h/2 + 0.2
nw, ne, se, sw, _ = draw_square(box_w, box_h, (0, box_y), lyr, w)
out.append(fp_line((nw[0]+0.8, nw[1]), nw, lyr, w))
out.append(fp_line(nw, sw, lyr, w))
out.append(fp_line(sw, se, lyr, w))
out.append(fp_line(se, ne, lyr, w))
out.append(fp_line((ne[0]-0.8, ne[1]), ne, lyr, w))
return out
def top_smd_fab(pins):
out = []
w = fab_width
lyr = "F.Fab"
# Outline box
box_w = (pins-1)*1.25 + 2*3.6 - 2*2.1
box_h = 3.5 # XXX
box_y = box_h/2 + 0.2
nw, ne, se, sw, sq = draw_square(box_w, box_h, (0, box_y), lyr, w)
out += sq
# Mounting pins
_, _, _, _, sq = draw_square(1.8, 2.8, (nw[0]-1.8/2, sw[1]-2.8/2), lyr, w)
out += sq
_, _, _, _, sq = draw_square(1.8, 2.8, (ne[0]+1.8/2, sw[1]-2.8/2), lyr, w)
out += sq
# Connector pins
x = -(pins-1)/2 * 1.25
for pin in range(pins):
_, _, _, _, sq = draw_square(0.32, 0.6, (x, 2.6), lyr, w)
out += sq
x += 1.25
return out
def top_smd_ctyd(pins):
w = 1.25*(pins-1) + 2*3.6 + 2*ctyd_gap
h = 3.0 + 0.6 + 1.3 + 2*ctyd_gap
grid = 2 * ctyd_grid
w = grid * int(math.ceil(w / (2*ctyd_grid)))
h = grid * int(math.ceil(h / (2*ctyd_grid)))
y = h/2 - 1.3/2 - ctyd_gap
centre = (0, y)
_, _, _, _, sq = draw_square(w, h, centre, "F.CrtYd", ctyd_width)
return sq
def top_smd_model(pins):
return [
model(
"${KISYS3DMOD}/Connector_Molex.3dshapes/" +
"Molex_PicoBlade_53398-{:02d}71_1x{:02d}".format(pins, pins) +
"-1MP_P1.25mm_Vertical.step",
(0, -1.3/25.4, 0),
(1, 1, 1),
(0, 0, 0))]
def top_smd_fp(pins):
name = "MOLEX-PICOBLADE-53398-{:02d}71".format(pins)
tedit = format(int(time.time()), 'X')
sexp = ["module", name, ("layer", "F.Cu"), ("tedit", tedit)]
sexp += [["attr", "smd"]]
sexp += top_smd_refs(name)
sexp += top_smd_silk(pins)
sexp += top_smd_fab(pins)
sexp += top_smd_ctyd(pins)
sexp += top_smd_mount(pins)
sexp += top_smd_pads(pins)
sexp += top_smd_model(pins)
return name, sexp_generate(sexp)
def main(prettypath, verify=False, verbose=False):
for pins in range(2, 15):
for generator in (top_smd_fp,):
# Generate the footprint
name, fp = generator(pins)
path = os.path.join(prettypath, name + ".kicad_mod")
if verify and verbose:
print("Verifying", path)
# Check if the file already exists and isn't changed
if os.path.isfile(path):
with open(path) as f:
old = f.read()
old = [n for n in sexp_parse(old) if n[0] != "tedit"]
new = [n for n in sexp_parse(fp) if n[0] != "tedit"]
if new == old:
continue
# If not, either verification failed or we should output the new fp
if verify:
return False
else:
with open(path, "w") as f:
f.write(fp)
# If we finished and didn't return yet, verification has succeeded
if verify:
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("prettypath", type=str,
help="Path to footprints to process")
parser.add_argument("--verify", action="store_true",
help="Verify libraries are up to date")
parser.add_argument("--verbose", action="store_true",
help="Print out every library verified")
args = vars(parser.parse_args())
result = main(**args)
if args['verify']:
if result:
print("OK: all footprints up-to-date.")
sys.exit(0)
else:
print("Error: footprints not up-to-date.", file=sys.stderr)
sys.exit(1)
|
[
"adam@adamgreig.com"
] |
adam@adamgreig.com
|
0c4bd2476662725bc512930b35d9f2125533ec69
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/sistema-de-medicamentos-pytest/.venv/lib/python2.7/site-packages/tests/test_sort_query.py
|
3e14b9edc323ac1925cc4a9556e6985ce77414d7
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 5,669
|
py
|
from pytest import raises
import sqlalchemy as sa
from sqlalchemy_utils import sort_query
from sqlalchemy_utils.functions import QuerySorterException
from tests import TestCase
class TestSortQuery(TestCase):
def test_without_sort_param_returns_the_query_object_untouched(self):
query = self.session.query(self.Article)
sorted_query = sort_query(query, '')
assert query == sorted_query
def test_column_ascending(self):
query = sort_query(self.session.query(self.Article), 'name')
assert 'ORDER BY article.name ASC' in str(query)
def test_column_descending(self):
query = sort_query(self.session.query(self.Article), '-name')
assert 'ORDER BY article.name DESC' in str(query)
def test_skips_unknown_columns(self):
query = self.session.query(self.Article)
sorted_query = sort_query(query, '-unknown')
assert query == sorted_query
def test_non_silent_mode(self):
query = self.session.query(self.Article)
with raises(QuerySorterException):
sort_query(query, '-unknown', silent=False)
def test_join(self):
query = (
self.session.query(self.Article)
.join(self.Article.category)
)
query = sort_query(query, 'name', silent=False)
assert 'ORDER BY article.name ASC' in str(query)
def test_calculated_value_ascending(self):
query = self.session.query(
self.Category, sa.func.count(self.Article.id).label('articles')
)
query = sort_query(query, 'articles')
assert 'ORDER BY articles ASC' in str(query)
def test_calculated_value_descending(self):
query = self.session.query(
self.Category, sa.func.count(self.Article.id).label('articles')
)
query = sort_query(query, '-articles')
assert 'ORDER BY articles DESC' in str(query)
def test_subqueried_scalar(self):
article_count = (
sa.sql.select(
[sa.func.count(self.Article.id)],
from_obj=[self.Article.__table__]
)
.where(self.Article.category_id == self.Category.id)
.correlate(self.Category.__table__)
)
query = self.session.query(
self.Category, article_count.label('articles')
)
query = sort_query(query, '-articles')
assert 'ORDER BY articles DESC' in str(query)
def test_aliased_joined_entity(self):
alias = sa.orm.aliased(self.Category, name='categories')
query = self.session.query(
self.Article
).join(
alias, self.Article.category
)
query = sort_query(query, '-categories-name')
assert 'ORDER BY categories.name DESC' in str(query)
def test_joined_table_column(self):
query = self.session.query(self.Article).join(self.Article.category)
sorted_query = sort_query(query, 'category-name')
assert 'category.name ASC' in str(sorted_query)
def test_multiple_columns(self):
query = self.session.query(self.Article)
sorted_query = sort_query(query, 'name', 'id')
assert 'article.name ASC, article.id ASC' in str(sorted_query)
def test_column_property(self):
self.Category.article_count = sa.orm.column_property(
sa.select([sa.func.count(self.Article.id)])
.where(self.Article.category_id == self.Category.id)
.label('article_count')
)
query = self.session.query(self.Category)
sorted_query = sort_query(query, 'article_count')
assert 'article_count ASC' in str(sorted_query)
def test_column_property_descending(self):
self.Category.article_count = sa.orm.column_property(
sa.select([sa.func.count(self.Article.id)])
.where(self.Article.category_id == self.Category.id)
.label('article_count')
)
query = self.session.query(self.Category)
sorted_query = sort_query(query, '-article_count')
assert 'article_count DESC' in str(sorted_query)
def test_relationship_property(self):
query = self.session.query(self.Category)
query = sort_query(query, 'articles')
assert 'ORDER BY' not in str(query)
def test_hybrid_property(self):
query = self.session.query(self.Category)
query = sort_query(query, 'articles_count')
assert 'ORDER BY (SELECT count(article.id) AS count_1' in str(query)
def test_hybrid_property_descending(self):
query = self.session.query(self.Category)
query = sort_query(query, '-articles_count')
assert (
'ORDER BY (SELECT count(article.id) AS count_1'
) in str(query)
assert ' DESC' in str(query)
def test_relation_hybrid_property(self):
query = (
self.session.query(self.Article)
.join(self.Article.category)
.correlate(self.Article.__table__)
)
query = sort_query(query, '-category-articles_count')
assert 'ORDER BY (SELECT count(article.id) AS count_1' in str(query)
def test_aliased_hybrid_property(self):
alias = sa.orm.aliased(
self.Category,
name='categories'
)
query = (
self.session.query(self.Article)
.outerjoin(alias, self.Article.category)
.options(
sa.orm.contains_eager(self.Article.category, alias=alias)
)
)
query = sort_query(query, '-categories-articles_count')
assert 'ORDER BY (SELECT count(article.id) AS count_1' in str(query)
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
8016219e342f85c4bd7d288538db7b88088d5f0d
|
abfc3a43aabace5afd55910279af61e5849963a2
|
/examples/app.py
|
b8b9796f6f16a785768f50131c482e0d9b5467e7
|
[] |
no_license
|
kemingy/ventu
|
6b89c70222e961ef8bd52a6c3a1816edde664d8c
|
4a13f9c212c3d468de7ad234d036870857ae7499
|
refs/heads/master
| 2023-08-17T04:45:34.643958
| 2021-10-06T12:48:47
| 2021-10-06T12:48:47
| 249,647,511
| 8
| 0
| null | 2020-06-29T04:13:36
| 2020-03-24T08:03:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,203
|
py
|
import argparse
import logging
import numpy as np
import torch
from pydantic import BaseModel, confloat, constr
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
from ventu import Ventu
# request schema used for validation
class Req(BaseModel):
# the input sentence should be at least 2 characters
text: constr(min_length=2)
class Config:
# examples used for health check and warm-up
schema_extra = {
'example': {'text': 'my cat is very cut'},
'batch_size': 16,
}
# response schema used for validation
class Resp(BaseModel):
positive: confloat(ge=0, le=1)
negative: confloat(ge=0, le=1)
class ModelInference(Ventu):
def __init__(self, *args, **kwargs):
# initialize super class with request & response schema, configs
super().__init__(*args, **kwargs)
# initialize model and other tools
self.tokenizer = DistilBertTokenizer.from_pretrained(
'distilbert-base-uncased')
self.model = DistilBertForSequenceClassification.from_pretrained(
'distilbert-base-uncased-finetuned-sst-2-english')
def preprocess(self, data: Req):
# preprocess a request data (as defined in the request schema)
tokens = self.tokenizer.encode(data.text, add_special_tokens=True)
return tokens
def batch_inference(self, data):
# batch inference is used in `socket` mode
data = [torch.tensor(token) for token in data]
with torch.no_grad():
result = self.model(torch.nn.utils.rnn.pad_sequence(data, batch_first=True))[0]
return result.numpy()
def inference(self, data):
# inference is used in `http` mode
with torch.no_grad():
result = self.model(torch.tensor(data).unsqueeze(0))[0]
return result.numpy()[0]
def postprocess(self, data):
# postprocess a response data (returned data as defined in the response schema)
scores = (np.exp(data) / np.exp(data).sum(-1, keepdims=True)).tolist()
return {'negative': scores[0], 'positive': scores[1]}
def create_model():
logger = logging.getLogger()
formatter = logging.Formatter(
fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
model = ModelInference(Req, Resp, use_msgpack=True)
return model
def create_app():
"""for gunicorn"""
return create_model().app
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Ventu service')
parser.add_argument('--mode', '-m', default='http', choices=('http', 'unix', 'tcp'))
parser.add_argument('--host', default='localhost')
parser.add_argument('--port', '-p', default=8080, type=int)
parser.add_argument('--socket', '-s', default='batching.socket')
args = parser.parse_args()
model = create_model()
if args.mode == 'unix':
model.run_unix(args.socket)
elif args.mode == 'tcp':
model.run_tcp(args.host, args.port)
else:
model.run_http(args.host, args.port)
|
[
"kemingy94@gmail.com"
] |
kemingy94@gmail.com
|
5d1bedf2cbaa792dbc6e79da13882ee28ac23f02
|
5085dfd5517c891a1f5f8d99bf698cd4bf3bf419
|
/011.py
|
d86312728c2b2022599d385aa1a6c6bbb68e2b38
|
[] |
no_license
|
Lightwing-Ng/100ExamplesForPythonStarter
|
01ffd4401fd88a0b997656c8c5f695c49f226557
|
56c493d38a2f1a1c8614350639d1929c474de4af
|
refs/heads/master
| 2020-03-10T22:07:37.340512
| 2018-04-15T13:16:30
| 2018-04-15T13:16:30
| 129,611,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
"""
* @author: Lightwing Ng
* email: rodney_ng@iCloud.com
* created on Apr 14, 2018, 8:00 PM
* Software: PyCharm
* Project Name: Tutorial
题目:古典问题:有一对兔子,从出生后第3个月起每个月都生一对兔子,小兔子长到第三个月后每个月又生一对兔子,假如兔子都不死,问每个月的兔子总数为多少?
程序分析:兔子的规律为数列1,1,2,3,5,8,13,21....
"""
a = 1
b = 1
for i in range(1, 22):
print("%12ld, %12ld" % (a, b))
a = a + b
b = a + b
|
[
"rodney_ng@icloud.com"
] |
rodney_ng@icloud.com
|
96b00132f49b7b82711d9026c6d8585b771ebd65
|
5813c01847dd998df24510f1219a44c10717e65f
|
/flamingo/plugins/feeds.py
|
6e06bc90a44416fa14250f12bd15cb5b8a4dde47
|
[
"Apache-2.0"
] |
permissive
|
robert-figura/flamingo
|
da52e8e3f3cf29f59f393ebae23b3a732f4143bd
|
63cbc5519b6e6b23a31d4e5153dd84976b6ef39f
|
refs/heads/master
| 2020-08-03T19:14:23.390513
| 2019-09-12T18:42:14
| 2019-09-30T14:35:51
| 211,857,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,266
|
py
|
import logging
from feedgen.feed import FeedGenerator
logger = logging.getLogger('flamingo.plugins.Feeds')
class Feeds:
def pre_build(self, context):
FEEDS_DOMAIN = getattr(context.settings, 'FEEDS_DOMAIN', '/')
FEEDS = getattr(context.settings, 'FEEDS', [])
for feed_config in FEEDS:
try:
content = {
'type': 'feed',
'feed_type': feed_config['type'],
'output': feed_config['output'],
'url': '/' + feed_config['output'],
}
if 'lang' in feed_config:
content['lang'] = feed_config['lang']
fg = FeedGenerator()
fg.id(feed_config['id'])
fg.title(feed_config['title'])
for i in feed_config['contents'](context):
fe = fg.add_entry()
# setup required entry attributes
fe_title = i['content_title']
fe_link = {
'href': '{}{}'.format(FEEDS_DOMAIN, i['url']),
'rel': 'alternate'
}
if 'entry-id' in feed_config:
fe_id = feed_config['entry-id'](i)
else:
fe_id = i['output']
if 'updated' in feed_config:
fe_updated = feed_config['updated'](i)
else:
fe_updated = ''
# check entry attributes
missing_attributes = []
if not fe_id:
missing_attributes.append('id')
if not fe_title:
missing_attributes.append('title')
if not fe_updated:
missing_attributes.append('updated')
if missing_attributes:
logger.error('%s is missing attributes: %s',
i['path'] or i,
', '.join(missing_attributes))
return
# optional attributes
fe.id(fe_id)
fe.title(fe_title)
fe.updated(fe_updated)
fe.link(fe_link)
if i['content_body']:
fe.content(i['content_body'], type='html')
if i['authors']:
for author in i['authors']:
fe.author({
'name': author,
})
if i['summary']:
fe.summary(str(i['summary']))
# generate output
if feed_config['type'] == 'atom':
content['content_body'] = fg.atom_str().decode()
elif feed_config['type'] == 'rss':
content['content_body'] = fg.rss_str().decode()
context.contents.add(**content)
except Exception:
logger.error("feed '%s' setup failed", feed_config['id'],
exc_info=True)
|
[
"f.scherf@pengutronix.de"
] |
f.scherf@pengutronix.de
|
531687d1a028a86dd301e9e9d25034c0d5df19cd
|
b10c3eb402e155da2eba5d4e6175f0ea145685be
|
/Practice/Python/py-introduction-to-sets.py
|
d7854cad492b52c7eb421ebea5946ecf5d4168f3
|
[] |
no_license
|
prayagsrivastava/HackerRank
|
2c3cff07a7de125d016eaa9510f488ea7f72015c
|
e5a5fbd6286ac3cc44a65c4408e6c983fdeb0c8e
|
refs/heads/master
| 2023-08-18T10:03:48.473061
| 2021-10-12T19:33:56
| 2021-10-12T19:33:56
| 405,563,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
def average(array):
a = set(array)
s = sum(a)/len(a)
return f"{s:.3f}"
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
"""
>> a = raw_input()
5 4 3 2
>> lis = a.split()
>> print (lis)
['5', '4', '3', '2']
>> newlis = list(map(int, lis))
>> print (newlis)
[5, 4, 3, 2]
>> myset = {1, 2} # Directly assigning values to a set
>> myset = set() # Initializing a set
>> myset = set(['a', 'b']) # Creating a set from a list
>> myset
{'a', 'b'}
>> myset.add('c')
>> myset
{'a', 'c', 'b'}
>> myset.add('a') # As 'a' already exists in the set, nothing happens
>> myset.add((5, 4))
>> myset
{'a', 'c', 'b', (5, 4)}
>> myset.update([1, 2, 3, 4]) # update() only works for iterable objects
>> myset
{'a', 1, 'c', 'b', 4, 2, (5, 4), 3}
>> myset.update({1, 7, 8})
>> myset
{'a', 1, 'c', 'b', 4, 7, 8, 2, (5, 4), 3}
>> myset.update({1, 6}, [5, 13])
>> myset
{'a', 1, 'c', 'b', 4, 5, 6, 7, 8, 2, (5, 4), 13, 3}
>> myset.discard(10)
>> myset
{'a', 1, 'c', 'b', 4, 5, 7, 8, 2, 12, (5, 4), 13, 11, 3}
>> myset.remove(13)
>> myset
{'a', 1, 'c', 'b', 4, 5, 7, 8, 2, 12, (5, 4), 11, 3}
Both the discard() and remove() functions take a single value as an argument
and removes that value from the set.
If that value is not present, discard() does nothing,
but remove() will raise a KeyError exception.
>> a = {2, 4, 5, 9}
>> b = {2, 4, 11, 12}
>> a.union(b) # Values which exist in a or b
{2, 4, 5, 9, 11, 12}
>> a.intersection(b) # Values which exist in a and b
{2, 4}
>> a.difference(b) # Values which exist in a but not in b
{9, 5}
>> a.union(b) == b.union(a)
True
>> a.intersection(b) == b.intersection(a)
True
>> a.difference(b) == b.difference(a)
False
"""
|
[
"prayag@outlook.in"
] |
prayag@outlook.in
|
9f8af437d7187a9b8c4ce3def70c252d1776f115
|
5f447244723386902a5fbbb94ae45e5e04ec4d93
|
/08-函数/动手试一试/user_albums.py
|
2cd6e951005923b7ec008c846c7de6d3b3d4c750
|
[] |
no_license
|
xuelang201201/PythonCrashCourse
|
2c0a633773340b748100a3349267e693ed2703da
|
55c729ec53c7870a327e5017e69ac853b024d58a
|
refs/heads/master
| 2022-09-12T06:13:14.302904
| 2020-05-30T07:40:24
| 2020-05-30T07:40:24
| 264,503,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
"""
用户的专辑:在为完成练习 8-7 编写的程序中,编写一个 while 循环,让用户输入一个专辑的歌手和名称。获取这些信息后,
使用它们来调用函数 make_album(),并将创建的字典打印出来。在这个 while 循环中,务必要提供退出途径。
"""
def make_album(artist, title, tracks=int()):
album_dict = {
'artist': artist.title(),
'title': title.title(),
}
if tracks:
album_dict['tracks'] = tracks
return album_dict
title_prompt = "\nWhat album are you thinking of? "
artist_prompt = "Who's the artist? "
print("Enter 'quit' at any time to stop.")
while True:
title = input(title_prompt)
if title == 'quit':
break
artist = input(artist_prompt)
if artist == 'quit':
break
album = make_album(title=title, artist=artist)
print(album)
print("\nThanks for responding!")
|
[
"xuelang201201@gmail.com"
] |
xuelang201201@gmail.com
|
a8e929b25a61e9552c4e32c67d6c3fa98304d4f0
|
79f42fd0de70f0fea931af610faeca3205fd54d4
|
/base_lib/ChartDirector/pythondemo_cgi/realtimedemo.py
|
51f8dc21d7e9e74fb3657b15b89712634d16efbc
|
[
"IJG"
] |
permissive
|
fanwen390922198/ceph_pressure_test
|
a900a6dc20473ae3ff1241188ed012d22de2eace
|
b6a5b6d324e935915090e791d9722d921f659b26
|
refs/heads/main
| 2021-08-27T16:26:57.500359
| 2021-06-02T05:18:39
| 2021-06-02T05:18:39
| 115,672,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,605
|
py
|
#!/usr/bin/python
from pychartdir import *
#
# In this demo, the generated web page needs to load the "cdjcv.js" Javascript file. For ease of
# installation, we put "cdjcv.js" in the same directory as this script. However, if this script is
# installed in a CGI only directory (such as cgi-bin), the web server would not allow the browser to
# access this non-CGI file.
#
# To get around this potential issue, a special load resource script is used to load these files.
# Instead of using:
#
# <SCRIPT SRC="cdjcv.js">
#
# we now use:
#
# <SCRIPT SRC="loadresource.py?file=cdjcv.js">
#
# If this script is not in a CGI only directory, you may replace the following loadResource string
# with an empty string "" to improve performance.
#
loadResource = "loadresource.py?file="
print("Content-type: text/html\n")
print("""
<!DOCTYPE html>
<html>
<head>
<title>Simple Realtime Chart</title>
<script type="text/javascript" src="%(loadResource)scdjcv.js"></script>
</head>
<body style="margin:0px">
<table cellspacing="0" cellpadding="0" border="0">
<tr>
<td align="right" colspan="2" style="background:#000088; color:#ffff00; padding:0px 4px 2px 0px;">
<a style="color:#FFFF00; font:italic bold 10pt Arial; text-decoration:none" href="http://www.advsofteng.com/">
Advanced Software Engineering
</a>
</td>
</tr>
<tr valign="top">
<td style="width:130px; background:#c0c0ff; border-right:black 1px solid; border-bottom:black 1px solid;">
<br />
<br />
<div style="font:12px Verdana; padding:10px;">
<b>Update Period</b><br />
<select id="UpdatePeriod" style="width:110px">
<option value="5">5</option>
<option value="10" selected="selected">10</option>
<option value="20">20</option>
<option value="30">30</option>
<option value="60">60</option>
</select>
<br /><br /><br />
<b>Time Remaining</b><br />
<div style="width:108px; border:#888888 1px inset;">
<div style="margin:3px" id="TimeRemaining">0</div>
</div>
</div>
</td>
<td>
<div style="font: bold 20pt Arial; margin:5px 0px 0px 5px;">
Simple Realtime Chart
</div>
<hr style="border:solid 1px #000080" />
<div style="padding:0px 5px 5px 10px">
<!-- ****** Here is the image tag for the chart image ****** -->
<img id="ChartImage1" src="realtimechart.py?chartId=demoChart1">
</div>
</td>
</tr>
</table>
<script type="text/javascript">
//
// Executes once every second to update the countdown display. Updates the chart when the countdown reaches 0.
//
function timerTick()
{
// Get the update period and the time left
var updatePeriod = parseInt(document.getElementById("UpdatePeriod").value);
var timeLeft = Math.min(parseInt(document.getElementById("TimeRemaining").innerHTML), updatePeriod) - 1;
if (timeLeft == 0)
// Can update the chart now
JsChartViewer.get('ChartImage1').streamUpdate();
else if (timeLeft < 0)
// Reset the update period
timeLeft += updatePeriod;
// Update the countdown display
document.getElementById("TimeRemaining").innerHTML = timeLeft;
}
window.setInterval("timerTick()", 1000);
</script>
</body>
</html>
""" % {
"loadResource" : loadResource
})
|
[
"fanwen@sscc.com"
] |
fanwen@sscc.com
|
f7b39d51deee02aba20f484a17ca18c9719fd1b2
|
a2f6e449e6ec6bf54dda5e4bef82ba75e7af262c
|
/venv/Lib/site-packages/pandas/tests/generic/test_frame.py
|
334651eb42df4f06b3c9078bb7055ef7355b38f6
|
[] |
no_license
|
mylonabusiness28/Final-Year-Project-
|
e4b79ccce6c19a371cac63c7a4ff431d6e26e38f
|
68455795be7902b4032ee1f145258232212cc639
|
refs/heads/main
| 2023-07-08T21:43:49.300370
| 2021-06-05T12:34:16
| 2021-06-05T12:34:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:c2f9e8ecaccad5fbdf2183633d20185b2b53f18c7eca55721455ca2366e3ce31
size 7441
|
[
"chuksajeh1@gmail.com"
] |
chuksajeh1@gmail.com
|
2209465c41bec4b36500babebeff0c642f924940
|
b0f0bd131bbfc287f2d8393fcf6aaabd99b17e05
|
/db_create.py
|
5f7295adc0feb3e6fe82b6a31f45750f7ee23f24
|
[] |
no_license
|
jreiher2003/casino
|
b19380364d57fba7bc3a772a19cf8caa8b55585c
|
bd6b12740c84c82dd5589a9aa414d6053c997739
|
refs/heads/master
| 2021-01-11T14:50:29.024326
| 2017-02-12T12:33:08
| 2017-02-12T12:33:08
| 80,230,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
import pymongo
from pymongo import MongoClient
from datetime import datetime
conn = MongoClient("mongodb://finn:finn7797@localhost:27017/casino")
db = conn.casino.test_col
# blog_record = {}
blog_record = {
"author": "erlichson",
"body": "This is a test body",
"comments": [
{
"body":"this is a comment",
"email": "jeffreiher@gmail.com",
"author": "Jeff Reiher 1"
},
{
"body":"This is another comment",
"email":"jreiher2003@yahoo.com",
"author":"Finn Gotti"
}
],
"date": datetime.utcnow(),
"permalink": "This_is_a_test_Post",
"tags": ["cycling", "mongodb", "swimming"],
"title": "This is a test Post"
}
# blog_record2 = {}
blog_record2 = {
"author": "mcnuts",
"body": "This is body of second record",
"comments": [
{
"body":"this is a comment record 2",
"email": "jeff_record2@gmail.com",
"author": "Jeff Reiher 2"
},
{
"body":"This is another comment 2",
"email":"reiher@yahoo.com",
"author":"Gotti record 2"
}
],
"date": datetime.utcnow(),
"permalink": "This_is_a_test_Post_record_2",
"tags": ["howbahdaw", "bitchassness", "fucking"],
"title": "This is a test Post Record 2"
}
db.insert(blog_record)
db.insert(blog_record2)
conn.close()
|
[
"jreiher2003@yahoo.com"
] |
jreiher2003@yahoo.com
|
617fb94131635549bc2bcc55ed6c5531eed3c24c
|
94e6b634335d310daed51687ccb6206ce10c7807
|
/permutation_sequence.py
|
b6216a32418a1b28690a18106e2cf89712e5dfaa
|
[
"MIT"
] |
permissive
|
lutianming/leetcode
|
4fdbdd852353e1682794ee4b2557389810f07293
|
848c7470ff5fd23608cc954be23732f60488ed8a
|
refs/heads/master
| 2021-01-19T11:45:25.352432
| 2015-07-26T11:57:21
| 2015-07-26T11:57:21
| 18,801,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
class Solution:
# @return a string
def getPermutation(self, n, k):
num = 1
for i in range(n):
num *= (i+1)
if k < 1 or k > num:
return None
k = k-1
seq = [i+1 for i in range(n)]
permutation = ''
subk = 1
for i in range(n, 0, -1):
for j in range(1, i):
subk *= j
index = k / subk
k = k % subk
subk = 1
permutation += str(seq.pop(index))
return permutation
s = Solution()
print(s.getPermutation(1, 1))
|
[
"lutianming1005@gmail.com"
] |
lutianming1005@gmail.com
|
5ac3f83fe5ba0066a76f4ced6242806d952e696c
|
ddf002d1084d5c63842a6f42471f890a449966ee
|
/basics/Python/PYTHON --------/oops/aaaa.py
|
8fccdfcb19262491e0eb8104cd18e8fae6d1bba7
|
[] |
no_license
|
RaghavJindal2000/Python
|
0ab3f198cbc5559bdf46ac259c7136356f7f09aa
|
8e5c646585cff28ba3ad9bd6c384bcb5537d671a
|
refs/heads/master
| 2023-01-01T23:56:02.073029
| 2020-10-18T19:30:01
| 2020-10-18T19:30:01
| 263,262,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
class Sample:
def __init__(self,x):
self.x = x
def __add__(self,other):
print(self.x*other.x)
a=Sample(5)
b=Sample(10)
a+b
|
[
"40332753+RaghavJindal2000@users.noreply.github.com"
] |
40332753+RaghavJindal2000@users.noreply.github.com
|
b7d6112a988c2054ed227e17d4af0f39335b9cfb
|
d2fe0e203df127f0a823ca5f1cc2a50c3ae7451e
|
/dask_image/ndfilters/_gaussian.py
|
675334d6f433a149add1adf587a7fc70f257133e
|
[
"BSD-3-Clause"
] |
permissive
|
awesome-archive/dask-image
|
78e8f1666d59293dc69fb34afbda23de2c3822fb
|
21047b4d7e882441754b94894013cb3ec9b5b396
|
refs/heads/master
| 2021-06-19T01:39:04.882036
| 2019-02-14T17:06:30
| 2019-02-14T17:06:30
| 170,795,341
| 0
| 0
|
BSD-3-Clause
| 2020-01-13T04:40:59
| 2019-02-15T03:20:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,181
|
py
|
# -*- coding: utf-8 -*-
import numbers
import numpy
import scipy.ndimage.filters
from . import _utils
def _get_sigmas(input, sigma):
ndim = input.ndim
nsigmas = numpy.array(sigma)
if nsigmas.ndim == 0:
nsigmas = numpy.array(ndim * [nsigmas[()]])
if nsigmas.ndim != 1:
raise RuntimeError(
"Must have a single sigma or a single sequence."
)
if ndim != len(nsigmas):
raise RuntimeError(
"Must have an equal number of sigmas to input dimensions."
)
if not issubclass(nsigmas.dtype.type, numbers.Real):
raise TypeError("Must have real sigmas.")
nsigmas = tuple(nsigmas)
return nsigmas
def _get_border(input, sigma, truncate):
sigma = numpy.array(_get_sigmas(input, sigma))
if not isinstance(truncate, numbers.Real):
raise TypeError("Must have a real truncate value.")
half_shape = tuple(numpy.ceil(sigma * truncate).astype(int))
return half_shape
@_utils._update_wrapper(scipy.ndimage.filters.gaussian_filter)
def gaussian_filter(input,
sigma,
order=0,
mode='reflect',
cval=0.0,
truncate=4.0):
sigma = _get_sigmas(input, sigma)
depth = _get_border(input, sigma, truncate)
depth, boundary = _utils._get_depth_boundary(input.ndim, depth, "none")
result = input.map_overlap(
scipy.ndimage.filters.gaussian_filter,
depth=depth,
boundary=boundary,
dtype=input.dtype,
sigma=sigma,
order=order,
mode=mode,
cval=cval,
truncate=truncate
)
return result
@_utils._update_wrapper(scipy.ndimage.filters.gaussian_gradient_magnitude)
def gaussian_gradient_magnitude(input,
sigma,
mode='reflect',
cval=0.0,
truncate=4.0,
**kwargs):
sigma = _get_sigmas(input, sigma)
depth = _get_border(input, sigma, truncate)
depth, boundary = _utils._get_depth_boundary(input.ndim, depth, "none")
result = input.map_overlap(
scipy.ndimage.filters.gaussian_gradient_magnitude,
depth=depth,
boundary=boundary,
dtype=input.dtype,
sigma=sigma,
mode=mode,
cval=cval,
truncate=truncate,
**kwargs
)
return result
@_utils._update_wrapper(scipy.ndimage.filters.gaussian_laplace)
def gaussian_laplace(input,
sigma,
mode='reflect',
cval=0.0,
truncate=4.0,
**kwargs):
sigma = _get_sigmas(input, sigma)
depth = _get_border(input, sigma, truncate)
depth, boundary = _utils._get_depth_boundary(input.ndim, depth, "none")
result = input.map_overlap(
scipy.ndimage.filters.gaussian_laplace,
depth=depth,
boundary=boundary,
dtype=input.dtype,
sigma=sigma,
mode=mode,
cval=cval,
truncate=truncate,
**kwargs
)
return result
|
[
"noreply@github.com"
] |
awesome-archive.noreply@github.com
|
f4724ae50443e1ccbbe1fff7f928824f75702d3d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02555/s538632738.py
|
23cc590fdcbf56a8cc888767b524fbc44b81c985
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
S = int(input())
mod = 10 ** 9 + 7
f = [1]
for i in range(1,2000):
f.append((f[-1] * i) % mod)
def comb(n,r):
return f[n] * (pow(f[r], mod-2, mod) * pow(f[n-r], mod-2, mod) % mod) % mod
ans = 0
for i in range(1,700):
s = S - (3 * i)
if s < 0:
break
ans += comb(s+i-1, i-1)
ans %= mod
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
dd13976ac5b9b1d7d3c91724bd781689aea63400
|
bd8532378ad2a61240faaa7be8ef44c60c055a2a
|
/rabona/data/leagues/Campeonato Scotiabank/San Luis/San Luis.py
|
bab8b8776851cf081d0374f9e793d0e82d7e98f0
|
[] |
no_license
|
nosoyyo/rabona
|
278a9dfe158e342261343b211fb39b911e993803
|
b0af3ab5806675fbf81b038633a74943118a67bb
|
refs/heads/master
| 2020-03-16T06:56:55.277293
| 2018-05-30T11:45:51
| 2018-05-30T11:45:51
| 132,565,989
| 2
| 1
| null | 2018-05-30T11:45:52
| 2018-05-08T06:44:11
|
Python
|
UTF-8
|
Python
| false
| false
| 220
|
py
|
club_info = {'club_url': 'https://www.futbin.com///18/leagues/Campeonato%20Scotiabank?page=1&club=112668', 'club_logo': 'https://cdn.futbin.com/content/fifa18/img/clubs/112668.png', 'club_name': 'San Luis'}
players = {}
|
[
"oyyoson@gmail.com"
] |
oyyoson@gmail.com
|
f0a909ca5858215c7001a0ceeb0fc4ee93eddcb0
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Math/Number Theory/Primality Tests/Does it divide/solution.py
|
28584b27d406a332fb41da73ba15b6bd2ef85ba0
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 438
|
py
|
def is_prime(x):
if x <= 1:
return False
if x == 2 or x == 3:
return True
if x % 2 == 0 or x % 3 == 0:
return False
for i in range(5, int(x ** .5) + 1, 6):
if x % i == 0 or x % (i + 2) == 0:
return False
return True
t = int(input())
for _ in range(t):
n = int(input())
if n == 1:
print('YES')
continue
print('NO' if is_prime(n + 1) else 'YES')
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
bd63cfe681913e3127aeabfd924d544aba02e68e
|
32c56293475f49c6dd1b0f1334756b5ad8763da9
|
/google-cloud-sdk/lib/third_party/kubernetes/client/models/v1beta1_api_service_list.py
|
881cd62e4beaeedd82f3052769f50e052fb43ba9
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/socialliteapp
|
b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494
|
85bb264e273568b5a0408f733b403c56373e2508
|
refs/heads/master
| 2022-11-20T03:01:47.654498
| 2020-02-01T20:29:43
| 2020-02-01T20:29:43
| 282,403,750
| 0
| 0
|
MIT
| 2020-07-25T08:31:59
| 2020-07-25T08:31:59
| null |
UTF-8
|
Python
| false
| false
| 5,935
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1APIServiceList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1beta1APIService]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1beta1APIServiceList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1beta1APIServiceList.
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1APIServiceList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1APIServiceList.
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1APIServiceList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1beta1APIServiceList.
:return: The items of this V1beta1APIServiceList.
:rtype: list[V1beta1APIService]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1beta1APIServiceList.
:param items: The items of this V1beta1APIServiceList.
:type: list[V1beta1APIService]
"""
if items is None:
raise ValueError('Invalid value for `items`, must not be `None`')
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1beta1APIServiceList.
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1APIServiceList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1APIServiceList.
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1APIServiceList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1APIServiceList.
:return: The metadata of this V1beta1APIServiceList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1APIServiceList.
:param metadata: The metadata of this V1beta1APIServiceList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1APIServiceList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"jonathang132298@gmail.com"
] |
jonathang132298@gmail.com
|
3885aebb72cf173ed1f34761cc278cbade2a89c0
|
292cec77b5003a2f80360d0aee77556d12d990f7
|
/typings/fs/opener/appfs.pyi
|
2482685ff2801c22c4fb2e0cd2c5f3b9f9f5127e
|
[
"Apache-2.0"
] |
permissive
|
yubozhao/BentoML
|
194a6ec804cc1c6dbe7930c49948b6707cbc3c5f
|
d4bb5cbb90f9a8ad162a417103433b9c33b39c84
|
refs/heads/master
| 2022-12-17T00:18:55.555897
| 2022-12-06T00:11:39
| 2022-12-06T00:11:39
| 178,978,385
| 3
| 0
|
Apache-2.0
| 2020-12-01T18:17:15
| 2019-04-02T01:53:53
|
Python
|
UTF-8
|
Python
| false
| false
| 506
|
pyi
|
import typing
from typing import Text, Union
from ..appfs import _AppFS
from ..subfs import SubFS
from .base import Opener
from .parse import ParseResult
from .registry import registry
if typing.TYPE_CHECKING: ...
@registry.install
class AppFSOpener(Opener):
protocols = ...
_protocol_mapping = ...
def open_fs(
self,
fs_url: Text,
parse_result: ParseResult,
writeable: bool,
create: bool,
cwd: Text,
) -> Union[_AppFS, SubFS[_AppFS]]: ...
|
[
"noreply@github.com"
] |
yubozhao.noreply@github.com
|
f4a25896e546374e0f6ccf4c1c1b90b47bf1f1f4
|
89caf19c6e2c9c0c7590226266a1f1fb2bd13d56
|
/day1/08-strings.py
|
b7f7fdf17be5e5abf806af3112b5227f57247cbc
|
[] |
no_license
|
ash/python-in-5days
|
2af3e4477210fd6e8710b148a1a3c187604695c2
|
76252fe2b905d1aee0772908742a6bb814a6ef99
|
refs/heads/master
| 2021-04-06T20:47:06.667013
| 2018-03-21T15:06:05
| 2018-03-21T15:06:05
| 125,269,790
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
# Quoting strings: single or double quotes are equivalent.
# Choose the one that makes it easy to add quotes inside a string:
s1 = 'single " quote'
s2 = "double ' quotes"
# Escpaing characters with a backslash:
s3 = 'escape \' continue'
print(s3)
# More escaped symbols:
# \n = newline
# \t = tab
# \r = line feed
# \b = bell
# Unicode is OK:
city = 'Zürich'
print(city)
# You can force an encoding if needed:
print(city.encode(encoding='Latin-1'))
# To sort, transform Straßse to strasse:
s = 'Straße'
print(s.casefold())
print(s)
# Create a strings explicitly:
s = str(123)
print(type(s))
# There are also other 'string' types:
# bytes()
# bytearray()
# This is how you concatenate strings:
big_string = "first part " "second part"
big_string = "first part " + "second part"
print(big_string)
# String repetion using *
string1 = "Hello!"
print(string1 * 10)
print(10 * string1)
# When printing a string, make sure all parts are strings:
print('123' + str(456))
print(int('123') + 456)
|
[
"andy@shitov.ru"
] |
andy@shitov.ru
|
26d577fdef3604793c953c49f2c7d23bcabe0c42
|
4264d47b39469ff508c15aa54960b67d56082855
|
/scripts/design.py
|
359f3a688400c5b2326159c5da75cda4cf36a9a4
|
[] |
no_license
|
EthanHolleman/GLOE-DRIP
|
5097edad289b9f1e67349540c8959d84a59a0137
|
ae77f2acc45be99727643466a25d69f1d13c9d48
|
refs/heads/main
| 2023-04-28T19:08:53.301899
| 2021-05-11T17:08:26
| 2021-05-11T17:08:26
| 360,753,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
# Read an SRA table with sample information and create a "design"
# table that can be used by the R metagene package for plotting
# by experimental group. In the sra file the experimental group
# should be under a column called "modification" and the sample
# name should be under "Sample Name" (case sensitive).
import pandas as pd
import argparse
from pathlib import Path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('sra', help='Path to sra table to read.')
parser.add_argument('out', help='Path to output table.')
parser.add_argument('--delim', default=',', help='Delimiter of sra file.')
return parser.parse_args()
def read_sra(filepath, delim=',', **kwargs):
return pd.read_csv(str(filepath), sep=delim, **kwargs)
def design_frame(sra_frame):
groups = list(set(list(sra_frame['modification'])))
groups = [g.replace(' ', '_') for g in groups]
print(groups)
group_matrix = {}
for index, row in sra_frame.iterrows():
sample_row = {g: 0 for g in groups}
sample_row[str(row['modification']).replace(' ','_')] = 1
group_matrix[str(row['Sample Name'])] = sample_row
return pd.DataFrame(group_matrix).transpose()
def main():
args = get_args()
sra_df = read_sra(args.sra)
design_df = design_frame(sra_df)
design_df.to_csv(args.out, sep='\t')
if __name__ == '__main__':
main()
|
[
"etholleman@ucdavis.edu"
] |
etholleman@ucdavis.edu
|
9448924fade59de01de7a10fa8d88ce40717702b
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Logit/trend_PolyTrend/cycle_7/ar_/test_artificial_128_Logit_PolyTrend_7__20.py
|
cf1fa0ed89ce35dc406211254b9440c735a0c415
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 265
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
c5496dfe565e03a669ff94ddc35eb0624c222896
|
3274e44458cd20e0c9b04fc41ef0e981f2bfaa34
|
/forms.py
|
b3e3630316a6ed949969a3cdab42bd095bd9e398
|
[
"MIT"
] |
permissive
|
ThanlonSmith/news_cms
|
b354f59cc8d31b3495c0fa445375fe9c04ea2604
|
3a5b593334c3edd13682a0280d1ba003732e4c9a
|
refs/heads/master
| 2022-12-17T17:26:39.176484
| 2020-09-27T09:33:48
| 2020-09-27T09:33:48
| 298,996,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,489
|
py
|
# coding:utf8
# @Time : 2020/7/2 上午1:26
# @Author : Erics
# @File : forms.py
# @Software: PyCharm
from flask_wtf import FlaskForm # pip install flask_wtf -i https://mirrors.aliyun.com/pypi/simple
from wtforms import StringField, PasswordField, SubmitField, SelectField, FileField, TextAreaField
from wtforms.validators import DataRequired, EqualTo, ValidationError, Length
from models import User, Cate
from flask import session
class RegisterForm(FlaskForm):
name = StringField(
label='账号',
validators=[
DataRequired('账号不能为空!'),
Length(max=16, message='账号不大于%(max)d位!')
],
description='账号',
render_kw={
'class': 'form-control',
"placeholder": "请输入账号!",
'maxlength': '16',
}
)
pwd = PasswordField(
label="密码",
validators=[
DataRequired("密码不能为空!"),
Length(min=6, max=20, message='密码长度必须大于%(min)d不少于%(max)d位!')
],
description="密码",
render_kw={
"class": "form-control",
"placeholder": "请输入密码!",
'maxlength': '20',
'minlength': '6',
}
)
re_pwd = PasswordField(
label=u"确认密码",
validators=[
DataRequired("确认密码不能为空!"),
Length(min=6, max=20, message='密码长度必须大于%(min)d不少于%(max)d位!'),
EqualTo('pwd', message="两次输入密码不一致!")
],
description="确认密码",
render_kw={
"class": "form-control",
"placeholder": "请输入确认密码!",
'maxlength': '20',
'minlength': '6',
}
)
verification_code = StringField(
label="验证码",
validators=[
DataRequired("验证码不能为空!"),
Length(min=4, max=4, message='请输入%(max)d位验证码!'),
],
description="验证码",
render_kw={
"class": "form-control",
"placeholder": "请输入验证码!",
'maxlength': '4',
'minlength': '4',
}
)
submit = SubmitField(
"注册",
render_kw={
"class": "btn btn-primary"
}
)
# 自定义字段验证规则:validate_字段名
# 自定义账号验证功能:账号是否已经存在
def validate_name(self, field):
name = field.data
user_count = User.query.filter_by(name=name).count()
if user_count > 0:
raise ValidationError("账号已存在,不能重复注册!")
# 自定义验证码验证功能:验证码是否正确
def validate_verification_code(self, field):
code = field.data # 4j28
if session.get("code") and session.get("code").lower() != code.lower():
raise ValidationError("验证码不正确!")
class LoginForm(FlaskForm):
name = StringField(
label='账号',
validators=[
DataRequired('账号不能为空!'),
Length(max=16, message='账号不大于%(max)d位!')
],
description='账号',
render_kw={
'class': 'form-control',
"placeholder": "请输入账号!",
'maxlength': '16',
}
)
pwd = PasswordField(
label="密码",
validators=[
DataRequired("密码不能为空!"),
Length(min=6, max=20, message='密码长度必须大于%(min)d不少于%(max)d位!')
],
description="密码",
render_kw={
"class": "form-control",
"placeholder": "请输入密码!",
'maxlength': '20',
'minlength': '6',
}
)
submit = SubmitField(
"登录",
render_kw={
"class": "btn btn-primary"
}
)
class ArticleAddForm(FlaskForm):
def __init__(self, *args, **kwargs):
super(ArticleAddForm, self).__init__(*args, **kwargs)
cate = Cate.query.all() # [<Cate 2>, <Cate 1>]
self.cate_id.choices = [(i.id, i.title) for i in cate]
self.cate_id.choices.reverse()
# print(self.cate_id.choices) # [(1, '科技'), (2, '社会')]
title = StringField(
label='标题',
description='标题',
validators=[
DataRequired('标题不能为空!'),
Length(max=30, message='标题长度不应该大于%(max)d位!')
],
render_kw={
'class': 'form-control',
'placeholder': '请输入标题',
'maxlength': '30'
}
)
cate_id = SelectField(
label='分类',
description='分类',
validators=[
DataRequired('分类不能为空!'),
],
choices=None,
default=1,
coerce=int,
render_kw={
'class': 'form-control'
}
)
logo = FileField(
label='封面',
description='封面',
validators=[
DataRequired('封面不能为空!')
],
render_kw={
'class': 'form-control-static'
}
)
content = TextAreaField(
label="内容",
validators=[
DataRequired("内容不能为空!"),
Length(max=2000, message='不得超过%(max)d字')
],
description="内容",
render_kw={
"style": "height:300px;",
"id": "content"
}
)
submit = SubmitField(
"发布",
render_kw={
"class": "btn btn-danger",
}
)
class ArticleEditForm(FlaskForm):
def __init__(self, *args, **kwargs):
super(ArticleEditForm, self).__init__(*args, **kwargs)
cate = Cate.query.all() # [<Cate 2>, <Cate 1>]
self.cate_id.choices = [(i.id, i.title) for i in cate]
self.cate_id.choices.reverse()
# print(self.cate_id.choices) # [(1, '科技'), (2, '社会')]
title = StringField(
label='标题',
description='标题',
validators=[
DataRequired('标题不能为空!'),
Length(max=30, message='标题长度不应该大于%(max)d位!')
],
render_kw={
'class': 'form-control',
'placeholder': '请输入标题',
'maxlength': '30'
}
)
cate_id = SelectField(
label='分类',
description='分类',
validators=[
DataRequired('分类不能为空!'),
],
choices=None,
default=1,
coerce=int,
render_kw={
'class': 'form-control'
}
)
logo = FileField(
label='封面',
description='封面',
validators=[
DataRequired('封面不能为空!')
],
render_kw={
'class': 'form-control-static'
}
)
content = TextAreaField(
label="内容",
validators=[
DataRequired("内容不能为空!"),
Length(max=2000, message='不得超过%(max)d字')
],
description="内容",
render_kw={
"style": "height:300px;",
"id": "content"
}
)
submit = SubmitField(
"编辑",
render_kw={
"class": "btn btn-primary"
}
)
|
[
"erics1996@yeah.net"
] |
erics1996@yeah.net
|
e45767923ad9500f39828ce955a405c8f8f532c1
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/Yfm3h3nT3apARd4gC_15.py
|
f993b37a58929b8e7570a3eb9c3d4bd005e786b1
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
"""
Create a function that takes a list consisting of dice rolls from 1-6. Return
the sum of your rolls with the following conditions:
1. If a 1 is rolled, that is bad luck. The next roll counts as 0.
2. If a 6 is rolled, that is good luck. The next roll is multiplied by 2.
3. The list length will always be 3 or higher.
### Examples
rolls([1, 2, 3]) ➞ 4
# The second roll, 2, counts as 0 as a result of rolling 1.
rolls([2, 6, 2, 5]) ➞ 17
# The 2 following the 6 was multiplied by 2.
rolls([6, 1, 1]) ➞ 8
# The first roll makes the second roll worth 2, but the
# second roll was still 1 so the third roll doesn't count.
### Notes
Even if a 6 is rolled after a 1, 6 isn't summed but the 6's "effect" still
takes place.
"""
def rolls(lst):
k=lst[0]
for i in range(1,len(lst)):
s=lst[i]
if(lst[i-1]==1):
s=0
if(lst[i-1]==6):
s=2*lst[i]
k=k+s
return k
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
d4b7f92e65fef54a3974ebb8980dc6f72e48d271
|
a616222ab88766f7b74e3224f3584feec558c39e
|
/log1.py
|
be88aeff621887a48400c7aff29b3cf3e669ef25
|
[] |
no_license
|
sundeepkakarla/python2
|
fdf736968961f44a6e83124d0e8636ec8432ea99
|
c8987fe1c86b91bdd58b6455e682d1c1fccffed8
|
refs/heads/master
| 2021-01-19T20:33:43.831511
| 2016-11-20T05:03:11
| 2016-11-20T05:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
import logging
logging.basicConfig(level=logging.DEBUG)
logging.info('Program started')
logging.warn("Warnig message")
logging.exception("Exception message")
logging.info("program ended")
|
[
"sambapython@gmail.com"
] |
sambapython@gmail.com
|
9255c63f88713989d591023379e353dc29631e7e
|
1749147fb24b13803d3437e0ae94250d67d618bd
|
/titanic/titanic_predict_surv.py
|
d04c873ba7f7ca7d7774a3f44dd32820e330f5ab
|
[] |
no_license
|
MJK0211/bit_seoul
|
65dcccb9336d9565bf9b3bc210b1e9c1c8bd840e
|
44d78ce3e03f0a9cf44afafc95879e4e92d27d54
|
refs/heads/master
| 2023-02-06T00:45:52.999272
| 2020-12-26T07:47:30
| 2020-12-26T07:47:30
| 311,308,648
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,726
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv('./data/titanic/csv/train.csv')
test = pd.read_csv('./data/titanic/csv/test.csv')
# print(train.shape) #(891, 12)
# print(test.shape) #(418, 11)
# print(train.head())
# print(train.tail())
# Survived - 생존 여부 (0 = 사망, 1 = 생존)
# Pclass - 티켓 클래스 (1 = 1등석, 2 = 2등석, 3 = 3등석)
# Sex - 성별
# Age - 나이
# SibSp - 함께 탑승한 자녀 / 배우자 의 수
# Parch - 함께 탑승한 부모님 / 아이들 의 수
# Ticket - 티켓 번호
# Fare - 탑승 요금
# Cabin - 수하물 번호
# Embarked - 선착장 (C = Cherbourg, Q = Queenstown, S = Southampton)
# train의 칼럼별 결측치 합계
# print(train.isnull().sum())
# PassengerId 0
# Survived 0
# Pclass 0
# Name 0
# Sex 0
# Age 177
# SibSp 0
# Parch 0
# Ticket 0
# Fare 0
# Cabin 687
# Embarked 2
# test의 칼럼별 결측치 합계
# print(test.isnull().sum())
# PassengerId 0
# Pclass 0
# Name 0
# Sex 0
# Age 86
# SibSp 0
# Parch 0
# Ticket 0
# Fare 1
# Cabin 327
# Embarked 0
def bar_chart(feature):
survived = train[train['Survived']==1][feature].value_counts()
dead = train[train['Survived']==0][feature].value_counts()
df = pd.DataFrame([survived, dead])
df.index = ['Survived', 'Dead']
df.plot(kind='bar', stacked=True, figsize=(15,8))
# bar_chart('Embarked')
#Pclass, Sex, SibSp, Parch, Embarked
train = train.drop(['Cabin', 'Embarked', 'Ticket', 'PassengerId'],axis=1)
test = test.drop(['Cabin', 'Embarked', 'Ticket', 'PassengerId'],axis=1)
# print(test)
train["Age"].fillna(train.groupby("Sex")["Age"].transform("mean"), inplace=True)
test["Age"].fillna(test.groupby("Sex")["Age"].transform("mean"), inplace=True)
test["Fare"].fillna(test.groupby("Sex")["Fare"].transform("median"), inplace=True)
# print(train.isnull().sum())
# print(test.isnull().sum())
train_test_data = [train, test]
for dataset in train_test_data:
dataset['Title'] = dataset['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
sex_mapping = {"male": 0, "female": 1}
title_mapping = {"Mr":0, "Miss":1, "Mrs":2, "Master":3, "Dr":3, "Rev":3, "Col":3, "Major":3,
"Mlle":3, "countess":3, "Ms":3, "Lady":3, "Jonkheer":3, "Don":3, "Dona":3,
"Mme":3, "Capt":3, "Sir":3 }
for dataset in train_test_data:
dataset['Title'] = dataset['Title'].map(title_mapping) #title 맵핑 '0', '1', '2'
dataset['Sex'] = dataset['Sex'].map(sex_mapping) #Sex '남:0', '여:1'로 맵핑
dataset.drop('Name', axis=1, inplace=True) #Name에서 Title 추출 후, 필요없는 데이터이기 때문에 삭제!
# bar_chart('Title')
# plt.show()
# print(test)
#Binning - Age를 10대, 20대, 30대, 40대, 50대, 50대 이상으로 분류
for dataset in train_test_data:
dataset.loc[dataset['Age'] <= 19, 'Age'] = 0, #0 : 10대
dataset.loc[(dataset['Age'] > 19) & (dataset['Age'] <= 29), 'Age'] = 1, #1 : 20대
dataset.loc[(dataset['Age'] > 29) & (dataset['Age'] <= 39), 'Age'] = 2, #2 : 30대
dataset.loc[(dataset['Age'] > 39) & (dataset['Age'] <= 49), 'Age'] = 3, #3 : 40대
dataset.loc[(dataset['Age'] > 49) & (dataset['Age'] <= 59), 'Age'] = 4, #4 : 50대
dataset.loc[dataset['Age'] > 59, 'Age'] = 5, #5 : 50대이상
# print(train.groupby('Survived')['Age'].value_counts())
# bar_chart('Age')
# plt.show()
|
[
"kimminjong0211@gmail.com"
] |
kimminjong0211@gmail.com
|
3cd04be986dd9051c15d607bd9520043a0296705
|
f0bc59dc9aab005ef977957e6ea6b91bbe430952
|
/2019-06-06-ten-tips-python-web-devs-kennedy/code/top_10_web_explore/ex07_viewmodels/pypi_vm/views/account_view.py
|
536097402ca04631d85bc4f41202854bac972a11
|
[
"Apache-2.0"
] |
permissive
|
Wintellect/WintellectWebinars
|
3ac0f6ae02d2d52eefb80f4f06d70f44e0d66095
|
5a59d9742c340022d58ec7e2cda69a1eba0feb53
|
refs/heads/master
| 2023-03-02T06:31:25.457579
| 2022-04-29T19:26:55
| 2022-04-29T19:26:55
| 87,122,981
| 68
| 124
|
Apache-2.0
| 2023-03-01T02:39:17
| 2017-04-03T21:33:32
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
import flask
from flask import Response
from pypi_vm.infrastructure import cookie_auth
from pypi_vm.infrastructure.view_modifiers import response
from pypi_vm.services import user_service
from pypi_vm.viewmodels.account.account_home_viewmodel import AccountHomeViewModel
from pypi_vm.viewmodels.account.login_viewmodel import LoginViewModel
from pypi_vm.viewmodels.account.register_viewmodel import RegisterViewModel
blueprint = flask.Blueprint('account', __name__, template_folder='templates')
# ################### INDEX #################################
@blueprint.route('/account')
@response(template_file='account/index.html')
def index():
vm = AccountHomeViewModel()
if not vm.user:
return flask.redirect('/account/login')
return vm.to_dict()
# ################### REGISTER #################################
@blueprint.route('/account/register', methods=['GET'])
@response(template_file='account/register.html')
def register_get():
vm = RegisterViewModel()
return vm.to_dict()
@blueprint.route('/account/register', methods=['POST'])
@response(template_file='account/register.html')
def register_post():
vm = RegisterViewModel()
vm.validate()
if vm.error:
return vm.to_dict()
# create user
user = user_service.create_user(vm.email, vm.name, vm.password)
response_val: Response = flask.make_response()
cookie_auth.set_auth(response_val, user.id)
return flask.redirect('/account', Response=response_val)
# ################### LOGIN #################################
@blueprint.route('/account/login', methods=['GET'])
@response(template_file='account/login.html')
def login_get():
vm = LoginViewModel()
return vm.to_dict()
@blueprint.route('/account/login', methods=['POST'])
@response(template_file='account/login.html')
def login_post():
vm = LoginViewModel()
vm.validate()
if vm.error:
return vm.to_dict()
headers = dict(Location='/account')
response_val: Response = flask.Response(status=302, headers=headers)
cookie_auth.set_auth(response_val, vm.user.id)
return response_val
# ################### LOGOUT #################################
@blueprint.route('/account/logout')
def logout():
headers = dict(Location='/')
response_val: Response = flask.Response(status=302, headers=headers)
cookie_auth.logout(response_val)
return response_val
|
[
"mikeckennedy@gmail.com"
] |
mikeckennedy@gmail.com
|
30a54b9384bc265e2460c72854893e255db26a59
|
c5b21cec89c743475b918bcbb0f36c492417f295
|
/djangomysqlrestcrudswagger/djangomysqlrestcrudswagger/wsgi.py
|
fefb24816b834800701be025cdd3ba520f88436f
|
[] |
no_license
|
roytuts/django
|
f41acc8ca9d06177e76fc4b4824a5d6cbbf83328
|
7f32edcfb6b197680c189e3fe6e0d9971ea87e91
|
refs/heads/master
| 2023-06-10T11:54:12.184492
| 2021-06-28T04:53:40
| 2021-06-28T04:53:40
| 282,702,115
| 17
| 46
| null | 2020-12-06T22:32:24
| 2020-07-26T17:43:52
|
Python
|
UTF-8
|
Python
| false
| false
| 429
|
py
|
"""
WSGI config for djangomysqlrestcrudswagger project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangomysqlrestcrudswagger.settings')
application = get_wsgi_application()
|
[
"email@email.com"
] |
email@email.com
|
b55bf0d5a78fc51ec3c3a90b25964149808978ac
|
95857cfe06ba533fc38e4266b3ca40914505ec9f
|
/01_hello.py
|
bd65d05e00075141282d58aa5a1b406435df8d22
|
[] |
no_license
|
ccwu0918/python_basic
|
184b8d974ea8e0f615b0fd046a59177b7d045099
|
e5ff6d45dc80805bcda0d9949254f5ad2acfc306
|
refs/heads/master
| 2023-06-10T18:59:52.705045
| 2021-07-02T16:05:55
| 2021-07-02T16:05:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
# 練習 Hello World,註解符號
print("Hello World")
"""
單行註解,井字號
大段落文字的註解,三個引號
"""
a = 3
b = 7
c = a * b
print(a, "*", b, "=", c)
print(7%3)
name = "Vincent "
print("My name is ", name)
# 同時可設定多個變數
a, b = 3, 7
# 練習題:如何做兩數交換?
|
[
"shinjia168@gmail.com"
] |
shinjia168@gmail.com
|
2b2b1b0623fbd155f773f0883f1016437d0e214b
|
af7b245364a2532fb1fd1bff915be196830d8699
|
/begin/urls.py
|
c50c5aa2a5b87ae23811ca38b32cca44550e600c
|
[] |
no_license
|
NabhyaKhoria/NBS
|
433db9962031624db790de3e76450a31ce6287fb
|
25792daf1c10eddb66c1e6ebf6ca06acc8ae3b73
|
refs/heads/main
| 2023-07-01T16:52:38.850449
| 2021-07-10T18:22:19
| 2021-07-10T18:22:19
| 384,763,911
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('',views.home, name='home'),
]
|
[
"you@example.com"
] |
you@example.com
|
85d8bfbe235614b4b0ed925893f339a1255e6207
|
29fd3daff8c31764c00777e67d2cc9b3e94ba761
|
/examples/appB_examples/pdb_demo_trace.py
|
36218fc7e9e2118913539b455203de4f100688d2
|
[] |
no_license
|
mwoinoski/crs1906
|
06a70a91fc99e2d80e2ed3cea5724afa22dce97d
|
202f7cc4cae684461f1ec2c2c497ef20211b3e5e
|
refs/heads/master
| 2023-06-23T17:13:08.163430
| 2023-06-12T21:44:39
| 2023-06-12T21:44:39
| 39,789,380
| 1
| 2
| null | 2022-01-26T20:43:18
| 2015-07-27T17:54:56
|
Python
|
UTF-8
|
Python
| false
| false
| 566
|
py
|
"""pdb_demo_trace.py - Example of pdb from chapter 4"""
import pdb
class PdbDemo(object):
def __init__(self, name, num_loops):
self._name = name
self._count = num_loops
def count(self):
for i in range(self._count):
pdb.set_trace()
print(i)
return '{} executed {} of {} loops'\
.format(self._name, i+1, self._count)
if __name__ == '__main__':
obj_name = 'pdb demo'
print('Starting ' + obj_name + '...')
pd = PdbDemo(obj_name, 5)
result = pd.count()
print(result)
|
[
"michaelw@articulatedesign.us.com"
] |
michaelw@articulatedesign.us.com
|
bb40e04886ed2422e53222096f14b7e314049118
|
f281d0d6431c1b45c6e5ebfff5856c374af4b130
|
/DAY100~199/DAY197-BOJ6002-Job Hunt/joohyuk.py
|
6745ebd98856cfab08e5396547a8f43ac8b9118d
|
[] |
no_license
|
tachyon83/code-rhino
|
ec802dc91dce20980fac401b26165a487494adb4
|
b1af000f5798cd12ecdab36aeb9c7a36f91c1101
|
refs/heads/master
| 2022-08-13T09:10:16.369287
| 2022-07-30T11:27:34
| 2022-07-30T11:27:34
| 292,142,812
| 5
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
import sys
si = sys.stdin.readline
d, p, c, f, s = [int(e) for e in si().split()]
g, earnings = [[] for _ in range(c+1)], [0 for _ in range(c+1)]
while p:
p -= 1
a, b = [int(e) for e in si().split()]
g[a].append([b, 0])
while f:
f -= 1
a, b, w = [int(e) for e in si().split()]
g[a].append([b, w])
earnings[s], flag = d, False
for i in range(c):
for city in range(c+1):
for np in g[city]:
if earnings[np[0]] < d-np[1]+earnings[city]:
earnings[np[0]] = d-np[1]+earnings[city]
if i == c-1:
flag = True
if flag:
print(-1)
else:
print(max(earnings))
|
[
"noreply@github.com"
] |
tachyon83.noreply@github.com
|
70e74cc0a23a38a39b7158bb70228fdd02552163
|
a28a3665af439ad3d9f401d180856b0489341ffd
|
/plot_dumped_spikes.py
|
5781e18c464a56d2fef9b00a92fb8f074783e2eb
|
[] |
no_license
|
Jegp/spike_conv_nets
|
c75f8bfc8f977ed94e4bc8d6d37cd02ac65b5961
|
c11b469b6d7896d787c77dca789be26f3d3d98b4
|
refs/heads/master
| 2023-06-24T03:20:16.378273
| 2021-07-16T17:20:43
| 2021-07-16T17:20:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
import numpy as np
import matplotlib.pyplot as plt
dumps_per_rate = {
5: [0, 0, 0, 0, 0, 0, 4, 6],
10: [0, 0, 0, 0, 0, 0, 9, 37],
25: [0, 0, 1, 181, 1204, 3426, 6153, 8864],
50: [0, 68, 2174, 7877, 13170, 18043, 22405, 26850],
100: [0, 5580, 16308, 25460, 33980, 42078, 53812, 61430],
150: [111, 14327, 28150, 40500, 56905, 64934, 71760, 78475],
200: [1604, 22257, 38695, 58896, 68124, 77028, 85174, 94407],
}
x_ticks = sorted(dumps_per_rate.keys())
dumps_per_input = {i+1: [] for i in range(len(dumps_per_rate[5]))}
for k in x_ticks:
for i, v in enumerate(dumps_per_rate[k]):
dumps_per_input[i+1].append(v)
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
plt.axhspan(38000, 56000, color='orange', label="Back pressure", alpha=0.25)
plt.axhspan(56000, 95000, color='red', label="Dead", alpha=0.25)
for i in dumps_per_input:
ax.semilogy(x_ticks, dumps_per_input[i], label="{} sources".format(i),
marker='.', linewidth=2)
ax.set_xticks(x_ticks)
ax.set_xlabel("Input rate (Hz)")
ax.set_ylabel("Dumped spikes")
plt.legend(bbox_to_anchor=(1.01, 1), loc='upper left')
plt.grid()
plt.tight_layout()
plt.savefig("dumped_spikes_per_input_rate_and_num_sources.pdf")
plt.show()
|
[
"chanokin@gmail.com"
] |
chanokin@gmail.com
|
2bbf7771c9c7a453bd61e602ff988dfd4a3b4ae0
|
ac216a2cc36f91625e440247986ead2cd8cce350
|
/recipes/recipes/infra_frontend_tester.py
|
35f8b23c7f2b499284c5540051977b9d62c9f7c2
|
[
"BSD-3-Clause"
] |
permissive
|
xinghun61/infra
|
b77cdc566d9a63c5d97f9e30e8d589982b1678ab
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
refs/heads/master
| 2023-01-12T21:36:49.360274
| 2019-10-01T18:09:22
| 2019-10-01T18:09:22
| 212,168,656
| 2
| 1
|
BSD-3-Clause
| 2023-01-07T10:18:03
| 2019-10-01T18:22:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,772
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'depot_tools/bot_update',
'depot_tools/gclient',
'infra_checkout',
'recipe_engine/buildbucket',
'recipe_engine/cipd',
'recipe_engine/context',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/step',
]
def RunSteps(api):
assert api.platform.is_linux, 'Unsupported platform, only Linux is supported.'
cl = api.buildbucket.build.input.gerrit_changes[0]
project_name = cl.project
assert project_name in ('infra/infra', 'infra/infra_internal'), (
'unknown project: "%s"' % project_name)
patch_root = project_name.split('/')[-1]
internal = (patch_root == 'infra_internal')
api.gclient.set_config(patch_root)
api.bot_update.ensure_checkout(patch_root=patch_root)
api.gclient.runhooks()
packages_dir = api.path['start_dir'].join('packages')
ensure_file = api.cipd.EnsureFile()
ensure_file.add_package('infra/nodejs/nodejs/${platform}',
'node_version:10.15.3')
api.cipd.ensure(packages_dir, ensure_file)
node_path = api.path['start_dir'].join('packages', 'bin')
env = {
'PATH': api.path.pathsep.join([str(node_path), '%(PATH)s'])
}
if internal:
RunInfraInternalFrontendTests(api, env)
else:
RunInfraFrontendTests(api, env)
def RunInfraInternalFrontendTests(api, env):
cwd = api.path['checkout'].join('appengine', 'chromiumdash')
with api.context(env=env, cwd=cwd):
api.step('chromiumdash npm install', ['npm', 'ci'])
api.step('chromiumdash bower install', ['npx', 'bower', 'install'])
api.step(
'chromiumdash run-wct', ['npx', 'run-wct', '--dep', 'third_party'])
api.step(
'chromiumdash generate js coverage report', ['npx', 'nyc', 'report'])
def RunInfraFrontendTests(api, env):
cwd = api.path['checkout'].join('appengine', 'findit')
with api.context(env=env, cwd=cwd):
api.step('findit npm install', ['npm', 'ci'])
api.step('findit run-wct', ['npx', 'run-wct', '--base', 'ui/',
'--dep', 'third_party'])
api.step('findit generate js coverage report', ['npx', 'nyc', 'report'])
cwd = api.path['checkout'].join('crdx', 'chopsui')
with api.context(env=env, cwd=cwd):
api.step('chopsui npm install', ['npm', 'ci'])
api.step('chopsui bower install', ['npx', 'bower', 'install'])
api.step('chopsui run-wct', ['npx', 'run-wct', '--prefix', 'test',
'--dep', 'bower_components'])
api.step('chopsui generate js coverage report', ['npx', 'nyc', 'report'])
cwd = api.path['checkout'].join('appengine', 'monorail')
RunFrontendTests(api, env, cwd, 'monorail')
cwd = api.path['checkout'].join('go', 'src', 'infra', 'appengine',
'dashboard', 'frontend')
RunFrontendTests(api, env, cwd, 'chopsdash')
cwd = api.path['checkout'].join('go', 'src', 'infra', 'appengine',
'sheriff-o-matic', 'frontend')
with api.context(env=env, cwd=cwd):
api.step('sheriff-o-matic npm install', ['npm', 'ci'])
api.step('sheriff-o-matic bower install', ['npx', 'bower', 'install'])
api.step('sheriff-o-matic run-wct', ['npx', 'run-wct'])
api.step('sheriff-o-matic generate js coverage report',
['npx', 'nyc', 'report'])
def RunFrontendTests(api, env, cwd, app_name):
with api.context(env=env, cwd=cwd):
api.step(('%s npm install' % app_name), ['npm', 'ci'])
api.step(('%s test' % app_name), ['npm', 'run', 'test'])
def GenTests(api):
yield (
api.test('basic') +
api.buildbucket.try_build(project='infra/infra'))
yield (
api.test('basic-internal') +
api.buildbucket.try_build(project='infra/infra_internal'))
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
1ce69570b147409c64f71b6f95f4f9f4424df5fd
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/sok2_20180605144707.py
|
415fc2182c0374df330c14fda7cf3dfab26de737
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424
| 2018-07-04T17:21:13
| 2018-07-04T17:21:13
| 139,749,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
row1 = [0,0,0,0,0,0,0,0,0]
row2 = [0,0,0,5,0,6,0,0,0]
row3 = [0,0,1,0,0,0,0,3,0]
row4 = [0,9,5,0,0,0,2,0,0]
row5 = [0,0,0,0,0,1,6,0,7]
row6 = [1,0,6,0,0,9,0,0,5]
row7 = [7,0,0,8,0,3,9,0,0]
row8 = [0,3,8,9,0,0,0,2,0]
row9 = [0,5,0,0,2,0,7,0,0]
print(row1)
print(row2)
print(row3)
print("")
print(row4)
print(row5)
print(row6)
print("")
print(row7)
print(row8)
print(row9)
while True:
x = input("Wprowadz x y z:")
try:
if int(x[0])==1:
row1[int(x[2])-1]=x[4]
print("ok")
except ValueError: # przechwytuje wyjątek literę i kończy program.
print("Wprowadz cyfrę!")
continue
r11 = row1[0:3]
r12 = row1[3:6]
r13 = row1[6:9]
print(r11)
print(*r11, +"-"+,*r12, +"-"+, *r13, sep=' ' )
print(row2)
print(row3)
print(""),
print(row4)
print(row5)
print(row6)
print("")
print(row7)
print(row8)
print(row9)
#print(new)
#rds.insert(index, "is")
|
[
"inz.kamil.wos@gmail.com"
] |
inz.kamil.wos@gmail.com
|
4e2cb5d3446ab650d78bd50b0f6c501c5b253419
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-iec/huaweicloudsdkiec/v1/model/create_firewall_request.py
|
5f5b939d3a460d290d4882cf2d16021fd5122cc7
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,151
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateFirewallRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'CreateFirewallRequestBody'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""CreateFirewallRequest
The model defined in huaweicloud sdk
:param body: Body of the CreateFirewallRequest
:type body: :class:`huaweicloudsdkiec.v1.CreateFirewallRequestBody`
"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this CreateFirewallRequest.
:return: The body of this CreateFirewallRequest.
:rtype: :class:`huaweicloudsdkiec.v1.CreateFirewallRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateFirewallRequest.
:param body: The body of this CreateFirewallRequest.
:type body: :class:`huaweicloudsdkiec.v1.CreateFirewallRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateFirewallRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
db5d2df161a6e83e117857449c89f34e77bbc1e9
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/46/usersdata/133/19356/submittedfiles/funcoes1.py
|
6e1d19cdc94d09fc1fc04dfb30e04fc08e20562e
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def crescente (lista):
c = 0
for i in range(0, len(lista) - 1, 1):
if (lista[i] > lista[i+1]):
c = c + 1
if c == 0:
return True
else:
return False
def decrescente (lista):
c = 0
for i in range (0, len(lista)-1, 1):
if(lista[i] < lista[i+1]):
c = c + 1
if c == 0:
return True
else:
return False
def iguais(lista):
c = 0
for i in range(0, len(lista) - 1, 1):
if(lista[i] == lista[i+1]):
c = c + 1
if c!=0:
return True
else:
return False
def insere_lista(lista, n):
for i in range(0, n, 1):
lista.append(input('Digite o elemento:'))
return lista
n = int(input('Digite o número de elementos da lista:'))
a = []
b = []
c = []
print('Preencha a lista 1:')
a = insere_lista(a, n)
print('Preencha a lista 2:')
b = insere_lista(b, n)
print('Preencha a lista 3:')
c = insere_lista(c, n)
if crescente(a):
print('S')
print('N')
print('N')
else:
print('N')
if decrescente(a):
print ('S')
print('N')
else:
print('N')
if iguais(a):
print('S')
else:
print('N')
if crescente(b):
print('S')
print('N')
print('N')
else:
print('N')
if decrescente(b):
print ('S')
print('N')
else:
print('N')
if iguais(b):
print('S')
else:
print('N')
if crescente(c):
print('S')
print('N')
print('N')
else:
print('N')
if decrescente(c):
print ('S')
print('N')
else:
print('N')
if iguais(c):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
8d0f295d36ca43741e772ae4a4d619e059a69763
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_pitcher.py
|
4935937eb1a295682a937ebc12940f067750b650
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
#calss header
class _PITCHER():
def __init__(self,):
self.name = "PITCHER"
self.definitions = [u'a large, round container for liquids that has a flat base, a handle, and a very narrow raised opening at the top for pouring: ', u'a container for holding liquids that has a handle and a shaped opening at the top for pouring: ', u'a player who pitches the ball in a baseball game']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
3201eddccde78cd5194c5e90ad30468489386f20
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/api/src/opentrons/hardware_control/emulation/module_server/server.py
|
5a3d696eb7b834587678e7f64d99bb63f8ebdd00
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
"""Server notifying of module connections."""
import asyncio
import logging
from typing import Dict, Set
from opentrons.hardware_control.emulation.module_server.models import (
ModuleConnection,
Message,
)
from opentrons.hardware_control.emulation.proxy import ProxyListener
from opentrons.hardware_control.emulation.settings import ModuleServerSettings
from typing_extensions import Final
log = logging.getLogger(__name__)
MessageDelimiter: Final = b"\n"
class ModuleStatusServer(ProxyListener):
"""The module status server is the emulator equivalent of inotify. A client
can know when an emulated module connects or disconnects.
Clients connect and read JSON messages (See models module).
"""
def __init__(self, settings: ModuleServerSettings) -> None:
"""Constructor
Args:
settings: app settings
"""
self._settings = settings
self._connections: Dict[str, ModuleConnection] = {}
self._clients: Set[asyncio.StreamWriter] = set()
def on_server_connected(
self, server_type: str, client_uri: str, identifier: str
) -> None:
"""Called when a new module has connected.
Args:
server_type: the type of module
client_uri: the url string for a driver to connect to
identifier: unique id for connection
Returns: None
"""
log.info(f"On connected {server_type} {client_uri} {identifier}")
connection = ModuleConnection(
module_type=server_type, url=client_uri, identifier=identifier
)
self._connections[identifier] = connection
for c in self._clients:
c.write(
Message(status="connected", connections=[connection]).json().encode()
)
c.write(b"\n")
def on_server_disconnected(self, identifier: str) -> None:
"""Called when a module has disconnected.
Args:
identifier: unique id for the connection
Returns: None
"""
log.info(f"On disconnected {identifier}")
try:
connection = self._connections[identifier]
del self._connections[identifier]
for c in self._clients:
c.write(
Message(status="disconnected", connections=[connection])
.json()
.encode()
)
c.write(MessageDelimiter)
except KeyError:
log.exception("Failed to find identifier")
async def run(self) -> None:
"""Run the server."""
server = await asyncio.start_server(
self._handle_connection, host=self._settings.host, port=self._settings.port
)
await server.serve_forever()
async def _handle_connection(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
"""Handle a client connection to the server."""
log.info("Client connected to module server.")
# A client connected. Send a dump of all connected modules.
m = Message(status="dump", connections=list(self._connections.values()))
writer.write(m.json().encode())
writer.write(MessageDelimiter)
self._clients.add(writer)
while True:
if b"" == await reader.read():
self._clients.remove(writer)
break
log.info("Client disconnected from module server.")
|
[
"noreply@github.com"
] |
Opentrons.noreply@github.com
|
0058c0c97e9d9d87841883e5c30387db9b77b14e
|
bdf86d69efc1c5b21950c316ddd078ad8a2f2ec0
|
/venv/Lib/site-packages/scrapy/selector/lxmlsel.py
|
c610d120ad1f1822117219bab6d57d09323f7f09
|
[] |
no_license
|
DuaNoDo/PythonProject
|
543e153553c58e7174031b910fd6451399afcc81
|
2c5c8aa89dda4dec2ff4ca7171189788bf8b5f2c
|
refs/heads/master
| 2020-05-07T22:22:29.878944
| 2019-06-14T07:44:35
| 2019-06-14T07:44:35
| 180,941,166
| 1
| 1
| null | 2019-06-04T06:27:29
| 2019-04-12T06:05:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
"""
XPath selectors based on lxml
"""
from scrapy.utils.deprecate import create_deprecated_class
from .unified import Selector, SelectorList
__all__ = ['HtmlXPathSelector', 'XmlXPathSelector', 'XPathSelector',
'XPathSelectorList']
def _xpathselector_css(self, *a, **kw):
raise RuntimeError('.css() method not available for %s, '
'instantiate scrapy.Selector '
'instead' % type(self).__name__)
XPathSelector = create_deprecated_class(
'XPathSelector',
Selector,
{
'__slots__': (),
'_default_type': 'html',
'css': _xpathselector_css,
},
new_class_path='scrapy.Selector',
old_class_path='scrapy.selector.XPathSelector',
)
XmlXPathSelector = create_deprecated_class(
'XmlXPathSelector',
XPathSelector,
clsdict={
'__slots__': (),
'_default_type': 'xml',
},
new_class_path='scrapy.Selector',
old_class_path='scrapy.selector.XmlXPathSelector',
)
HtmlXPathSelector = create_deprecated_class(
'HtmlXPathSelector',
XPathSelector,
clsdict={
'__slots__': (),
'_default_type': 'html',
},
new_class_path='scrapy.Selector',
old_class_path='scrapy.selector.HtmlXPathSelector',
)
XPathSelectorList = create_deprecated_class('XPathSelectorList', SelectorList)
|
[
"teadone@naver.com"
] |
teadone@naver.com
|
a1805cdf71db4000bbdb1c7c3b2df4c64fdb8be0
|
fa6776ade56e05d1448ddb6c81cb78ed207cc7a9
|
/mlfromscratch/unsupervised_learning/genetic_algorithm.py
|
005dc1993e1ab570475c62c5a321edb4134e1d7d
|
[
"MIT"
] |
permissive
|
lyrl/ML-From-Scratch
|
28eef91c84c8595f43745d8a6a3f6d082ea23346
|
a7d43fd1eb352035179c5f333bec082d0083362c
|
refs/heads/master
| 2021-06-27T19:27:08.053663
| 2017-09-15T08:43:55
| 2017-09-15T08:43:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,423
|
py
|
import string
import numpy as np
class GeneticAlgorithm():
"""An implementation of a Genetic Algorithm which will try to produce the user
specified target string.
Parameters:
-----------
target_string: string
The string which the GA should try to produce.
population_size: int
The number of individuals (possible solutions) in the population.
mutation_rate: float
The rate (or probability) of which the alleles (chars in this case) should be
randomly changed.
"""
def __init__(self, target_string, population_size, mutation_rate):
self.target = target_string
self.population_size = population_size
self.mutation_rate = mutation_rate
self.letters = [" "] + list(string.letters)
def _initialize(self):
""" Initialize population with random strings """
self.population = []
for _ in range(self.population_size):
# Select random letters as new individual
individual = "".join(np.random.choice(self.letters, size=len(self.target)))
self.population.append(individual)
def _calculate_fitness(self):
""" Calculates the fitness of each individual in the population """
population_fitness = []
for individual in self.population:
# Calculate loss as the alphabetical distance between
# the characters in the individual and the target string
loss = 0
for i in range(len(individual)):
letter_i1 = self.letters.index(individual[i])
letter_i2 = self.letters.index(self.target[i])
loss += abs(letter_i1 - letter_i2)
fitness = 1 / (loss + 1e-6)
population_fitness.append(fitness)
return population_fitness
def _mutate(self, individual):
""" Randomly change the individual's characters with probability
self.mutation_rate """
individual = list(individual)
for j in range(len(individual)):
# Make change with probability mutation_rate
if np.random.random() < self.mutation_rate:
individual[j] = np.random.choice(self.letters)
# Return mutated individual as string
return "".join(individual)
def _crossover(self, parent1, parent2):
""" Create children from parents by crossover """
# Select random crossover point
cross_i = np.random.randint(0, len(parent1))
child1 = parent1[:cross_i] + parent2[cross_i:]
child2 = parent2[:cross_i] + parent1[cross_i:]
return child1, child2
def run(self, iterations):
# Initialize new population
self._initialize()
for epoch in range(iterations):
population_fitness = self._calculate_fitness()
fittest_individual = self.population[np.argmax(population_fitness)]
highest_fitness = max(population_fitness)
# If we have found individual which matches the target => Done
if fittest_individual == self.target:
break
# Set the probabilities that the individuals should be selected as parents
# proportionate to the individuals fitness
parent_probs = [fitness / sum(population_fitness) for fitness in population_fitness]
# Determine the next generation
new_population = []
for i in np.arange(0, self.population_size, 2):
# Select two parents randomly according to probabilities
parents = np.random.choice(self.population, size=2, p=parent_probs, replace=False)
# Perform crossover to produce offspring
child1, child2 = self._crossover(parents[0], parents[1])
# Save mutated offspring for next generation
new_population += [self._mutate(child1), self._mutate(child2)]
print ("[%d Closest Candidate: '%s', Fitness: %.2f]" % (epoch, fittest_individual, highest_fitness))
self.population = new_population
print ("[%d Answer: '%s']" % (epoch, fittest_individual))
def main():
target_string = "Genetic Algorithm"
population_size = 100
mutation_rate = 0.05
genetic_algorithm = GeneticAlgorithm(target_string,
population_size,
mutation_rate)
print ("")
print ("+--------+")
print ("| GA |")
print ("+--------+")
print ("Description: Implementation of a Genetic Algorithm which aims to produce")
print ("the user specified target string. This implementation calculates each")
print ("candidate's fitness based on the alphabetical distance between the candidate")
print ("and the target. A candidate is selected as a parent with probabilities proportional")
print ("to the candidate's fitness. Reproduction is implemented as a single-point")
print ("crossover between pairs of parents. Mutation is done by randomly assigning")
print ("new characters with uniform probability.")
print ("")
print ("Parameters")
print ("----------")
print ("Target String: '%s'" % target_string)
print ("Population Size: %d" % population_size)
print ("Mutation Rate: %s" % mutation_rate)
print ("")
genetic_algorithm.run(iterations=1000)
if __name__ == "__main__":
main()
|
[
"eriklindernoren@live.se"
] |
eriklindernoren@live.se
|
52be8e15b25ae2d05f9c682e3964907f3b834520
|
59cf95f3344bc8284b325691ac9e01a988d0390a
|
/Session21.py
|
2ee32100cbd79b7d8ff851b50d644c7d8c1a0dd2
|
[] |
no_license
|
ishantk/GW2021PY1
|
8932282895c8a3a53d64f83e2710541beca8e4a7
|
0d20ad4103f90568e165b35ff571c4672de16147
|
refs/heads/master
| 2023-08-01T05:02:39.358314
| 2021-09-17T12:09:40
| 2021-09-17T12:09:40
| 387,378,623
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
import os
print(os.name)
print(os.uname())
print(os.getlogin())
print(os.getcwd())
print(os.getppid())
path_to_directory = "/Users/ishantkumar/Downloads/profilepage"
path_to_file = "/Users/ishantkumar/Downloads/Ishant.pdf"
print(os.path.isdir(path_to_directory))
print(os.path.isfile(path_to_file))
# path_to_directory = "/Users/ishantkumar/Downloads/GW2020PY1"
# os.mkdir(path_to_directory)
files = os.walk(path_to_directory)
files = list(files)
for file in files:
print(file)
for data in files[0]:
print(data, len(data))
print("Directories:", len(files[0][1]))
print("Files:", len(files[0][2]))
# Mini Project: File Explorer -> Extract different types of files :)
|
[
"er.ishant@gmail.com"
] |
er.ishant@gmail.com
|
9d4313101fa758d947c55a7a942fc0fbb6582ba0
|
1c2428489013d96ee21bcf434868358312f9d2af
|
/ultracart/models/coupon_amount_off_subtotal_with_items_purchase.py
|
41fa3bf9b7bd54a00159e397d0b0dd8c1f743eb2
|
[
"Apache-2.0"
] |
permissive
|
UltraCart/rest_api_v2_sdk_python
|
7821a0f6e0e19317ee03c4926bec05972900c534
|
8529c0bceffa2070e04d467fcb2b0096a92e8be4
|
refs/heads/master
| 2023-09-01T00:09:31.332925
| 2023-08-31T12:52:10
| 2023-08-31T12:52:10
| 67,047,356
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,078
|
py
|
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CouponAmountOffSubtotalWithItemsPurchase(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'currency_code': 'str',
'discount_amount': 'float',
'items': 'list[str]',
'required_purchase_quantity': 'int'
}
attribute_map = {
'currency_code': 'currency_code',
'discount_amount': 'discount_amount',
'items': 'items',
'required_purchase_quantity': 'required_purchase_quantity'
}
def __init__(self, currency_code=None, discount_amount=None, items=None, required_purchase_quantity=None): # noqa: E501
"""CouponAmountOffSubtotalWithItemsPurchase - a model defined in Swagger""" # noqa: E501
self._currency_code = None
self._discount_amount = None
self._items = None
self._required_purchase_quantity = None
self.discriminator = None
if currency_code is not None:
self.currency_code = currency_code
if discount_amount is not None:
self.discount_amount = discount_amount
if items is not None:
self.items = items
if required_purchase_quantity is not None:
self.required_purchase_quantity = required_purchase_quantity
@property
def currency_code(self):
"""Gets the currency_code of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
The ISO-4217 three letter currency code the customer is viewing prices in # noqa: E501
:return: The currency_code of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this CouponAmountOffSubtotalWithItemsPurchase.
The ISO-4217 three letter currency code the customer is viewing prices in # noqa: E501
:param currency_code: The currency_code of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
:type: str
"""
if currency_code is not None and len(currency_code) > 3:
raise ValueError("Invalid value for `currency_code`, length must be less than or equal to `3`") # noqa: E501
self._currency_code = currency_code
@property
def discount_amount(self):
"""Gets the discount_amount of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
The amount of shipping discount # noqa: E501
:return: The discount_amount of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
:rtype: float
"""
return self._discount_amount
@discount_amount.setter
def discount_amount(self, discount_amount):
"""Sets the discount_amount of this CouponAmountOffSubtotalWithItemsPurchase.
The amount of shipping discount # noqa: E501
:param discount_amount: The discount_amount of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
:type: float
"""
self._discount_amount = discount_amount
@property
def items(self):
"""Gets the items of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
A list of items of which a quantity of one or many must be purchased for coupon to be valid. # noqa: E501
:return: The items of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
:rtype: list[str]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this CouponAmountOffSubtotalWithItemsPurchase.
A list of items of which a quantity of one or many must be purchased for coupon to be valid. # noqa: E501
:param items: The items of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
:type: list[str]
"""
self._items = items
@property
def required_purchase_quantity(self):
"""Gets the required_purchase_quantity of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
The quantity of items that must be purchased for the discount to be applied. # noqa: E501
:return: The required_purchase_quantity of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
:rtype: int
"""
return self._required_purchase_quantity
@required_purchase_quantity.setter
def required_purchase_quantity(self, required_purchase_quantity):
"""Sets the required_purchase_quantity of this CouponAmountOffSubtotalWithItemsPurchase.
The quantity of items that must be purchased for the discount to be applied. # noqa: E501
:param required_purchase_quantity: The required_purchase_quantity of this CouponAmountOffSubtotalWithItemsPurchase. # noqa: E501
:type: int
"""
self._required_purchase_quantity = required_purchase_quantity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CouponAmountOffSubtotalWithItemsPurchase, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CouponAmountOffSubtotalWithItemsPurchase):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"perry@ultracart.com"
] |
perry@ultracart.com
|
9525643788f81bbc4fd703048134022606188cc6
|
3a4fbde06794da1ec4c778055dcc5586eec4b7d2
|
/_google_app_engine-projects/django-gae2django/django/core/management/color.py
|
9e1f4bc3c7035f41cb793f3cff4c5c80232312fc
|
[
"Apache-2.0"
] |
permissive
|
raychorn/svn_python-django-projects
|
27b3f367303d6254af55c645ea003276a5807798
|
df0d90c72d482b8a1e1b87e484d7ad991248ecc8
|
refs/heads/main
| 2022-12-30T20:36:25.884400
| 2020-10-15T21:52:32
| 2020-10-15T21:52:32
| 304,455,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
"""
Sets up the terminal color scheme.
"""
import sys
from django.utils import termcolors
def supports_color():
"""
Returns True if the running system's terminal supports color, and False
otherwise.
"""
unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if unsupported_platform or not is_a_tty:
return False
return True
def color_style():
"""Returns a Style object with the Django color scheme."""
if not supports_color():
return no_style()
class dummy: pass
style = dummy()
style.ERROR = termcolors.make_style(fg='red', opts=('bold',))
style.ERROR_OUTPUT = termcolors.make_style(fg='red', opts=('bold',))
style.NOTICE = termcolors.make_style(fg='red')
style.SQL_FIELD = termcolors.make_style(fg='green', opts=('bold',))
style.SQL_COLTYPE = termcolors.make_style(fg='green')
style.SQL_KEYWORD = termcolors.make_style(fg='yellow')
style.SQL_TABLE = termcolors.make_style(opts=('bold',))
return style
def no_style():
"""Returns a Style object that has no colors."""
class dummy:
def __getattr__(self, attr):
return lambda x: x
return dummy()
|
[
"raychorn@gmail.com"
] |
raychorn@gmail.com
|
8e50fe8cb7476ae32c0df2bdf7c48b016713463d
|
eb0345c732b9525db372283fe6105f553d6bddbf
|
/backendapi/prescription/serializers.py
|
596707772dc9f5393e68fbef67bfdfef64734622
|
[] |
no_license
|
mahidulmoon/djreact-smart-medic
|
1f0a6b3de8981858d4234b4da8d76b52e1911f70
|
9d07d93aa1ff27558cd496a4aa94167c8983958a
|
refs/heads/master
| 2021-05-22T01:49:00.155572
| 2020-04-04T05:13:27
| 2020-04-04T05:13:27
| 252,911,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
from rest_framework import serializers
from .models import Prescription
class PrescriptionSerializer(serializers.ModelSerializer):
class Meta:
model=Prescription
fields="__all__"
|
[
"mahidulmoon@gmail.com"
] |
mahidulmoon@gmail.com
|
e1e603be0d0b6b5a04022895a942d4b2ce6476f7
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/sql/azure-mgmt-sql/generated_samples/server_advanced_threat_protection_settings_create_min.py
|
6c0235f2a980a9217c5d24fd3ecae5af2e6b9d20
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.sql import SqlManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-sql
# USAGE
python server_advanced_threat_protection_settings_create_min.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SqlManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.server_advanced_threat_protection_settings.begin_create_or_update(
resource_group_name="threatprotection-4799",
server_name="threatprotection-6440",
advanced_threat_protection_name="Default",
parameters={"properties": {"state": "Disabled"}},
).result()
print(response)
# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2021-11-01-preview/examples/ServerAdvancedThreatProtectionSettingsCreateMin.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
22c9bd34bc65eeaa2531ecd17303521ed2520d03
|
abd7504f6562babf79fb4e86af7529b2cb40fb54
|
/pkg/p2/algebraic/Composite.py
|
f7484bcffe062e091103b6467f18fc30e7da1383
|
[] |
no_license
|
aivazis/p2
|
266c1728554b3f7a89e72f09ba2d9e5ff8d4447d
|
fd9a82d7dafa815dd68f679eb2b4b1a6287d02ea
|
refs/heads/main
| 2022-01-08T12:45:16.646028
| 2022-01-01T17:31:10
| 2022-01-01T17:31:10
| 225,452,981
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,458
|
py
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2022 all rights reserved
class Composite:
"""
Mix-in class that provides an implementation of the subset of the interface of {Node} that
requires traversal of the expression graph rooted at nodes with dependencies.
This class assumes that its instances provide {operands}, a tuple of their dependencies on
other nodes
"""
# types
from .exceptions import CircularReferenceError
# interface
@property
def operands(self):
"""
A sequence of my direct dependents
"""
# the default implementation stores my operands in a private member
return self._operands
@operands.setter
def operands(self, operands):
"""
Adjust my operands
"""
# process the incoming sequence and save
self._operands = self._store(self._ingest(operands=operands))
# all done
return
@property
def span(self):
"""
Return a sequence over my entire dependency graph
"""
# i am a node in my dependency graph
yield self
# go through my operands
for operand in self.operands:
# and ask them for their span
yield from operand.span
# all done
return
# classifiers
@property
def literals(self):
"""
Return a sequence over the nodes in my dependency graph that encapsulate foreign objects
"""
# go through my operands
for operand in self.operands:
# and ask them for literals in their span
yield from operand.literals
# all done
return
@property
def operators(self):
"""
Return a sequence over the composite nodes in my dependency graph
"""
# i am one
yield self
# go through my operands
for operand in self.operands:
# and ask them for operators in their span
yield from operand.operators
# all done
return
@property
def variables(self):
"""
Return a sequence over the variables in my dependency graph
"""
# go through my operands
for operand in self.operands:
# and ask them for variables in their span
yield from operand.variables
# all done
return
# structural classifiers
@property
def leaves(self):
"""
Return a sequence over the leaves in my dependency graph
"""
# go through my operands:
for operand in self.operands:
# and ask them for leaves in their span
yield from operand.leaves
# all done
return
@property
def composites(self):
"""
Return a sequence over the composites in my dependency graph
"""
# i am one
yield self
# go through my operands:
for operand in self.operands:
# and ask them for leaves in their span
yield from operand.composites
# all done
return
# metamethods
def __init__(self, operands, **kwds):
# chain up
super().__init__(**kwds)
# save my direct dependencies
self.operands = operands
# all done
return
# implementation details
def _ingest(self, operands):
"""
Convert {operands} into nodes
"""
# go through operands
for operand in operands:
# if this is not a node instance
if not isinstance(operand, self.node):
# make it a literal
operand = self.literal(value=operand)
# hand it off
yield operand
# all done
return
def _substitute(self, current, replacement, clean):
"""
Adjust the operands by substituting {replacement} for {current} in the sequence of operands
"""
# if i'm the one being replaced
if current is self:
# just return the {replacement}
return replacement
# if i'm among the {clean} nodes
if self in clean:
# do nothing
return self
# add me to the clean pile
clean.add(self)
# otherwise, make a pile for my potentially adjusted operands
operands = []
# initially, i am not known to have replaced any of my operands
modified = False
# go through my operands
for op in self.operands:
# if this one is marked {clean}
if op in clean:
# add it to the list of operands
operands.append(op)
# and carry on
continue
# otherwise, ask it to perform the substitution
r = op._substitute(current=current, replacement=replacement, clean=clean)
# add it or its replacement to the pile
operands.append(r)
# record whether an update was performed
modified |= (r is not op)
# if any substitutions were made
if modified:
# replace my operands
self.operands = operands
# all done
return self
# the default storage mechanism for operands
_store = tuple
# storage for the operands
_operands = ()
# end of file
|
[
"michael.aivazis@para-sim.com"
] |
michael.aivazis@para-sim.com
|
25b84c72c3109809a8cb05a72d74a669ff8fe826
|
4059573793d0ee5b74c9dd919aa2945dad2fe426
|
/Searching Algorithms/binary_search.py
|
4d3d525420e9d3930077f4a8fae78ac7b8ab65cb
|
[] |
no_license
|
nayanika2304/DataStructuresPractice
|
04ea6d9248a63983abdd2b983632ba5907eed9d4
|
f3c815ff113ce3977cc743360b77fb21c9f9b383
|
refs/heads/master
| 2022-12-08T05:28:22.897414
| 2020-08-29T18:17:57
| 2020-08-29T18:17:57
| 282,513,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
'''
Compare x with the middle element.
If x matches with middle element, we return the mid index.
Else If x is greater than the mid element, then x can only lie in right half subarray after the mid element. So we recur for right half.
Else (x is smaller) recur for the left half.
'''
# Python3 Program for recursive binary search.
'''
takes the number to search, array,
l= start
r= length
'''
# Returns index of x in arr if present, else -1
def binarySearch(arr, l, r, x):
print(l,r)
# Check base case
if r >= l:
mid = l + (r - l) // 2
# If element is present at the middle itself
if arr[mid] == x:
return mid
# If element is smaller than mid, then it
# can only be present in left subarray
elif arr[mid] > x:
return binarySearch(arr, l, mid - 1, x)
# Else the element can only be present
# in right subarray
else:
return binarySearch(arr, mid + 1, r, x)
else:
# Element is not present in the array
return -1
# Driver Code
arr = [2, 3, 4, 10, 40]
x = 10
# Function call
result = binarySearch(arr, 0, len(arr) - 1, x)
if result != -1:
print("Element is present at index % d" % result)
else:
print("Element is not present in array")
|
[
"nayanikabhargava1993@gmail.com"
] |
nayanikabhargava1993@gmail.com
|
85b3990a7902a3249be40266a3a934ab4f52f997
|
9cc6721acb439db2e7cff8eb4dbff4b6e14040d5
|
/코딩테스트(책)/problems/3.dfs_bfs/6.감시피하기_x.py
|
44a193abe9ce52b0c1122ad0e432887afc033494
|
[] |
no_license
|
young31/Algorithm
|
35c6ec6b6d9b192f9d0e6e6f6484f33c92100232
|
bfcccfa798d031a930490efa24d9b2263bd4b984
|
refs/heads/master
| 2021-11-04T14:01:53.827508
| 2021-10-25T06:35:24
| 2021-10-25T06:35:24
| 196,034,851
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,864
|
py
|
# input
n1 = 5
arr1 = [
[0, 1, 0, 0, 2],
[2, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 2, 0, 0]
]
n2 = 4
arr2 = [
[1, 1, 1, 2],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 0]
]
# answer: yes
# algo
def search(arr, x, y, move):
n = len(arr)
res = []
is_T = False
if move == 0:
dx, dy = 1, 0
elif move == 1:
dx, dy = -1, 0
elif move == 2:
dx, dy = 0, 1
elif move == 3:
dx, dy = 0, -1
tmp = []
while 1:
nx = x+dx
ny = y+dy
if not is_feasible(n, nx, ny):
break
else:
if arr[nx][ny] == 0:
tmp.append((nx, ny))
elif arr[nx][ny] == 2:
is_T = True
break
x, y = nx, ny
if is_T:
if tmp:
res += tmp
else: # 선생이 있는데 막을 방법이 없으면 긴급상황(무조건 못 피함)
return 'FLAG'
return res
def is_feasible(n, x, y):
if 0 <= x < n and 0 <= y < n:
return True
return False
def main(n, arr):
students = []
for i in range(n):
for j in range(n):
if arr[i][j] == 1:
students.append((i, j))
# 막을 구간을 생성해서 해당 구간을 모두 방어하면 회피 성공으로 해결
to_block = []
for i, j in students:
for m in range(4):
tmp = search(arr, i, j, m)
if tmp == 'FLAG':
return 'NO'
if tmp:
to_block.append(tmp)
# 막을 곳이 남아 있는지 체크
remains = [True for _ in range(len(to_block))]
n_block = 0
for ib, block in enumerate(to_block):
if not block:
continue
hist_remove = []
# 해당 구간은 일단 막고 시작; 구간 내 몇개나 설치할 지는 아래에서 결정
if remains[ib]:
remains[ib] = False
for i, xy in enumerate(block):
remove = False
for ib_, block_ in enumerate(to_block[ib+1:], ib+1):
# 공통 좌표가 있으면 해당 부분은 무조건 막고 진행; 해결된 구간도 함께 해결한 것으로 처리
if xy in block_:
remains[ib_] = False
remove = True
block_.remove(xy)
if n_block > 3:
return 'NO'
if remove:
n_block += 1
hist_remove.append(remove)
# 공통된 부분이 없다면 해당 구간만 해결
if sum(hist_remove) == 0:
n_block += 1
if n_block <= 3 and sum(remains) == 0:
return 'YES'
else:
return 'NO'
print(main(n1, arr1))
print(main(n2, arr2))
|
[
"migael38317@gmail.com"
] |
migael38317@gmail.com
|
1921e361eac11d34257a95c03ac18db950c86452
|
ac99fc4c74c6306cf23ebc3ddbcbd992b985387d
|
/tests/myapp/settings.py
|
55161837d3bbd70265ce48e2ae0356a77b458656
|
[
"MIT"
] |
permissive
|
jamalex/django-nested-intervals
|
d7f010d13abd28efd0867fa683bfded04fb91931
|
b2d80db554762e95b24c7b08217e5bcbed8f40b7
|
refs/heads/master
| 2020-03-28T07:31:37.486839
| 2018-09-09T21:14:06
| 2018-09-09T21:14:06
| 147,907,242
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
from __future__ import unicode_literals
import os
import django
DIRNAME = os.path.dirname(__file__)
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3'
}
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'nested_intervals',
'myapp',
)
STATIC_URL = '/static/'
SECRET_KEY = 'abc123'
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ROOT_URLCONF = 'myapp.urls'
|
[
"jamalex@gmail.com"
] |
jamalex@gmail.com
|
5be62ca99e20643780c83602ab8fb32b229abe89
|
5f0eeef355fa84b165d4e0707e8874755cc03259
|
/chp03_oscillation/NOC_3_09_wave_b/NOC_3_09_wave_b.pyde
|
b43cee586b23ca1e67378fd57b2a37121536b205
|
[] |
no_license
|
kidult00/NatureOfCode-Examples-Python
|
5835fbed114f3991b9986852f31d29a0a46d7e53
|
42461590deebbe305d5815ff0d207ff974335ad5
|
refs/heads/master
| 2021-05-11T04:47:53.999705
| 2018-03-07T15:54:12
| 2018-03-07T15:54:12
| 117,946,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
pyde
|
# The Nature of Code - Python Version
# [kidult00](https://github.com/kidult00)
def setup():
size(250, 200)
smooth()
global startAngle, angleVel
startAngle = 0.0
angleVel = 0.2
def draw():
background(255)
stroke(0)
fill(0, 50)
strokeWeight(2)
global startAngle, angleVel
startAngle += 0.015
angle = startAngle
for x in range(0, width+1, 24):
y = map(sin(angle), -1, 1, 0, height)
ellipse(x, y, 48, 48)
angle += angleVel
|
[
"sysulj@gmail.com"
] |
sysulj@gmail.com
|
3ec8cdf29f18ab3b98ea97b5e714e0b0770ed1e6
|
148072ce210ca4754ea4a37d83057e2cf2fdc5a1
|
/src/core/w3af/w3af/plugins/attack/db/sqlmap/lib/request/direct.py
|
dab662acd830c943a6bb530956ff9115e04cb2cb
|
[] |
no_license
|
ycc1746582381/webfuzzer
|
8d42fceb55c8682d6c18416b8e7b23f5e430c45f
|
0d9aa35c3218dc58f81c429cae0196e4c8b7d51b
|
refs/heads/master
| 2021-06-14T18:46:59.470232
| 2017-03-14T08:49:27
| 2017-03-14T08:49:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import time
from extra.safe2bin.safe2bin import safecharencode
from lib.core.agent import agent
from lib.core.common import Backend
from lib.core.common import calculateDeltaSeconds
from lib.core.common import extractExpectedValue
from lib.core.common import getCurrentThreadData
from lib.core.common import getUnicode
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import isListLike
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.dicts import SQL_STATEMENTS
from lib.core.enums import CUSTOM_LOGGING
from lib.core.enums import DBMS
from lib.core.enums import EXPECTED
from lib.core.settings import UNICODE_ENCODING
from lib.utils.timeout import timeout
def direct(query, content=True):
select = True
query = agent.payloadDirect(query)
query = agent.adjustLateValues(query)
threadData = getCurrentThreadData()
if Backend.isDbms(DBMS.ORACLE) and query.upper().startswith("SELECT ") and " FROM " not in query.upper():
query = "%s FROM DUAL" % query
for sqlTitle, sqlStatements in SQL_STATEMENTS.items():
for sqlStatement in sqlStatements:
if query.lower().startswith(sqlStatement) and sqlTitle != "SQL SELECT statement":
select = False
break
if select and not query.upper().startswith("SELECT "):
query = "SELECT %s" % query
logger.log(CUSTOM_LOGGING.PAYLOAD, query)
output = hashDBRetrieve(query, True, True)
start = time.time()
if not select and "EXEC " not in query.upper():
_ = timeout(func=conf.dbmsConnector.execute, args=(query,), duration=conf.timeout, default=None)
elif not (output and "sqlmapoutput" not in query and "sqlmapfile" not in query):
output = timeout(func=conf.dbmsConnector.select, args=(query,), duration=conf.timeout, default=None)
hashDBWrite(query, output, True)
elif output:
infoMsg = "resumed: %s..." % getUnicode(output, UNICODE_ENCODING)[:20]
logger.info(infoMsg)
threadData.lastQueryDuration = calculateDeltaSeconds(start)
if not output:
return output
elif content:
if output and isListLike(output):
if len(output[0]) == 1:
output = [_[0] for _ in output]
retVal = getUnicode(output, noneToNull=True)
return safecharencode(retVal) if kb.safeCharEncode else retVal
else:
return extractExpectedValue(output, EXPECTED.BOOL)
|
[
"everping@outlook.com"
] |
everping@outlook.com
|
82942fcabae1643320272fa31c19c206e0b8e146
|
f6db8d85a3b41eed543959314d65927353a8229c
|
/.history/W5/mapsafood/settings_20201202154801.py
|
dd4788e75db51881d2911e9cc646c3ed5af86df3
|
[] |
no_license
|
NFEL/DjangoPaeez99
|
d573cc8e36500f08bc104d76f7a2628062d86c2f
|
621636bfb47d71f2a4f45037b7264dd5ebc7cdd7
|
refs/heads/main
| 2023-01-27T22:05:57.788049
| 2020-12-08T10:08:28
| 2020-12-08T10:08:28
| 304,553,353
| 1
| 2
| null | 2020-10-16T07:33:04
| 2020-10-16T07:33:03
| null |
UTF-8
|
Python
| false
| false
| 3,954
|
py
|
"""
Django settings for mapsafood project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wopis*_*o3rtlblge=mm)pb)*ynu66zc+wqt&bs8l2*v=z1g%$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
LOGGING = {
'version': 1,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/tmp/django.log',
}
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['file'],
}
}
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'django.contrib.gis',
'user_profile',
'geolocation',
'restaurant',
'crispy_form'
]
# DJANGO_ALLOW_ASYNC_UNSAFE = False
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = [
'127.0.0.1',
]
ROOT_URLCONF = 'mapsafood.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mapsafood.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'mapsafood',
'USER': 'nfel',
'PASSWORD': '1',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
]
MEDIA_ROOT = BASE_DIR / 'media/'
MEDIA_URL = '/media/'
|
[
"nfilsaraee@gmail.com"
] |
nfilsaraee@gmail.com
|
973af6afd92942add0a57a3941085fd6e2d66174
|
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
|
/blimgui/dist/pyglet/font/__init__.py
|
18313153ed4a927df01f0629f5dc04a01d122ae6
|
[
"MIT"
] |
permissive
|
juso40/bl2sdk_Mods
|
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
|
29f79c41cfb49ea5b1dd1bec559795727e868558
|
refs/heads/master
| 2023-08-15T02:28:38.142874
| 2023-07-22T21:48:01
| 2023-07-22T21:48:01
| 188,486,371
| 42
| 110
|
MIT
| 2022-11-20T09:47:56
| 2019-05-24T20:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 7,514
|
py
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2022 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Load fonts.
pyglet will automatically load any system-installed fonts. You can add additional fonts
(for example, from your program resources) using :meth:`add_file` or
:meth:`add_directory`. These fonts are then available in the same way as system-installed fonts::
from pyglet import font
font.add_file('action_man.ttf')
action_man = font.load('Action Man', 16)
# or
from pyglet import resource
resource.add_font('action_man.ttf')
action_man = font.load('Action Man')
See the :mod:`pyglet.font.base` module for documentation on the base classes used
by this package.
"""
import os
import sys
import weakref
import pyglet
from pyglet import gl
if not getattr(sys, 'is_pyglet_doc_run', False):
if pyglet.compat_platform == 'darwin':
from pyglet.font.quartz import QuartzFont
_font_class = QuartzFont
elif pyglet.compat_platform in ('win32', 'cygwin'):
from pyglet.libs.win32.constants import WINDOWS_7_OR_GREATER
if WINDOWS_7_OR_GREATER:
if pyglet.options["advanced_font_features"] is True:
from pyglet.font.directwrite import Win32DirectWriteFont
_font_class = Win32DirectWriteFont
else:
from pyglet.font.win32 import GDIPlusFont
_font_class = GDIPlusFont
else:
from pyglet.font.win32 import GDIPlusFont
_font_class = GDIPlusFont
else:
from pyglet.font.freetype import FreeTypeFont
_font_class = FreeTypeFont
def have_font(name):
"""Check if specified system font name is available."""
return _font_class.have_font(name)
def load(name=None, size=None, bold=False, italic=False, stretch=False, dpi=None):
"""Load a font for rendering.
:Parameters:
`name` : str, or list of str
Font family, for example, "Times New Roman". If a list of names
is provided, the first one matching a known font is used. If no
font can be matched to the name(s), a default font is used. In
pyglet 1.1, the name may be omitted.
`size` : float
Size of the font, in points. The returned font may be an exact
match or the closest available. In pyglet 1.1, the size may be
omitted, and defaults to 12pt.
`bold` : bool
If True, a bold variant is returned, if one exists for the given
family and size.
`italic` : bool
If True, an italic variant is returned, if one exists for the given
family and size.
`dpi` : float
The assumed resolution of the display device, for the purposes of
determining the pixel size of the font. Defaults to 96.
:rtype: `Font`
"""
# Arbitrary default size
if size is None:
size = 12
if dpi is None:
dpi = 96
# Find first matching name
if type(name) in (tuple, list):
for n in name:
if _font_class.have_font(n):
name = n
break
else:
name = None
# Locate or create font cache
shared_object_space = gl.current_context.object_space
if not hasattr(shared_object_space, 'pyglet_font_font_cache'):
shared_object_space.pyglet_font_font_cache = weakref.WeakValueDictionary()
shared_object_space.pyglet_font_font_hold = []
font_cache = shared_object_space.pyglet_font_font_cache
font_hold = shared_object_space.pyglet_font_font_hold
# Look for font name in font cache
descriptor = (name, size, bold, italic, stretch, dpi)
if descriptor in font_cache:
return font_cache[descriptor]
# Not in cache, create from scratch
font = _font_class(name, size, bold=bold, italic=italic, stretch=stretch, dpi=dpi)
# Save parameters for new-style layout classes to recover
# TODO: add properties to the Font classes, so these can be queried:
font.size = size
font.bold = bold
font.italic = italic
font.stretch = stretch
font.dpi = dpi
# Cache font in weak-ref dictionary to avoid reloading while still in use
font_cache[descriptor] = font
# Hold onto refs of last three loaded fonts to prevent them being
# collected if momentarily dropped.
del font_hold[3:]
font_hold.insert(0, font)
return font
def add_file(font):
"""Add a font to pyglet's search path.
In order to load a font that is not installed on the system, you must
call this method to tell pyglet that it exists. You can supply
either a filename or any file-like object.
The font format is platform-dependent, but is typically a TrueType font
file containing a single font face. Note that to use a font added with this method,
you should pass the face name (not the file name) to :meth::py:func:`pyglet.font.load` or any
other place where you normally specify a font.
:Parameters:
`font` : str or file-like object
Filename or file-like object to load fonts from.
"""
if isinstance(font, str):
font = open(font, 'rb')
if hasattr(font, 'read'):
font = font.read()
_font_class.add_font_data(font)
def add_directory(directory):
"""Add a directory of fonts to pyglet's search path.
This function simply calls :meth:`pyglet.font.add_file` for each file with a ``.ttf``
extension in the given directory. Subdirectories are not searched.
:Parameters:
`dir` : str
Directory that contains font files.
"""
for file in os.listdir(directory):
if file[-4:].lower() == '.ttf':
add_file(os.path.join(directory, file))
__all__ = ('add_file', 'add_directory', 'load', 'have_font')
|
[
"justin.sostmann@googlemail.com"
] |
justin.sostmann@googlemail.com
|
144d632a68c20edf5f33c6a21eba24ba413b5060
|
61eae81a1780141ba1323adb93d3d41c4ad0cc3e
|
/src/model_bank/dataset_2018_7_13_lcp_recognition_model.py
|
66b9a61b10443213dac0a710aa181f548137a8c5
|
[] |
no_license
|
lsy125/AE-signal-model
|
0783cda7eee972cce3ab1abf682047baf3d739a1
|
da11717ffc8bf74dbb2d6f818e73f0b839f0ab5a
|
refs/heads/master
| 2020-04-02T08:46:05.140848
| 2018-10-22T02:01:48
| 2018-10-22T02:01:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,219
|
py
|
from keras.layers import *
from keras.models import Sequential, Model
from keras.utils import plot_model
from keras import regularizers
# self lib
from src.utils.helpers import direct_to_dir
def lcp_recognition_binary_model():
visible_in = Input(shape=(6000, 1))
conv_1 = Conv1D(5, kernel_size=5, activation='relu')(visible_in)
maxpool_1 = MaxPooling1D(pool_size=3, strides=2)(conv_1)
dropout_1 = Dropout(0.4)(maxpool_1)
conv_2 = Conv1D(20, kernel_size=5, activation='relu')(dropout_1)
maxpool_2 = MaxPooling1D(pool_size=3, strides=2)(conv_2)
conv_3 = Conv1D(32, kernel_size=5, activation='relu')(maxpool_2)
maxpool_3 = MaxPooling1D(pool_size=3, strides=2)(conv_3)
flatten = Flatten()(maxpool_3)
dropout_2 = Dropout(0.5)(flatten)
dense_1 = Dense(10, activation='relu')(dropout_2)
# dense_2 = Dense(20, activation='relu')(dense_1)
# dense_3 = Dense(80, activation='relu')(dense_2)
visible_out = Dense(1, activation='sigmoid')(dense_1)
model = Model(inputs=visible_in, outputs=visible_out)
print(model.summary())
return model
def lcp_recognition_binary_model_2():
'''
refer Online, VGG concept
model = Sequential()
model.add(Conv1D(64, 3, activation='relu', input_shape=(6000, 1)))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(128, 3, activation='relu'))
model.add(Conv1D(128, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(256, 3, activation='relu'))
model.add(Conv1D(256, 3, activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
:return:
'''
model = Sequential()
model.add(Conv1D(64, 3, activation='relu', input_shape=(6000, 1)))
# model.add(Conv1D(16, 3, activation='relu'))
model.add(MaxPooling1D(3, strides=2))
model.add(Dropout(0.3))
# model.add(Conv1D(32, 3, activation='relu'))
# model.add(Conv1D(32, 3, activation='relu'))
# model.add(MaxPooling1D(3, strides=2))
# model.add(Dropout(0.3))
# model.add(Conv1D(64, 3, activation='relu'))
# model.add(Conv1D(64, 3, activation='relu'))
# model.add(MaxPooling1D(3, strides=2))
# model.add(Dropout(0.3))
#
# model.add(Conv1D(128, 3, activation='relu'))
# model.add(Conv1D(128, 3, activation='relu'))
# model.add(MaxPooling1D(3, strides=2))
# model.add(Dropout(0.3))
# model.add(Conv1D(256, 3, activation='relu'))
# model.add(Conv1D(256, 3, activation='relu'))
model.add(GlobalAveragePooling1D())
# model.add(Dropout(0.5))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())
return model
def lcp_recognition_binary_model_3():
'''
Dual layer
'''
visible_in = Input(shape=(6000, 1))
# Part a
conv_a_1 = Conv1D(32, kernel_size=5, activation='relu', name='conv_a_1')(visible_in)
conv_a_2 = Conv1D(32, kernel_size=5, activation='relu', name='conv_a_2')(conv_a_1)
maxpool_a_1 = MaxPooling1D(pool_size=3, strides=2, name='maxp_a_1')(conv_a_2)
drop_a_1 = Dropout(0.3, name='drop_a_1')(maxpool_a_1)
conv_a_3 = Conv1D(64, kernel_size=5, activation='relu', name='conv_a_3')(drop_a_1)
conv_a_4 = Conv1D(128, kernel_size=5, activation='relu', name='conv_a_4', use_bias=False)(conv_a_3)
maxpool_a_2 = MaxPooling1D(pool_size=3, strides=2, name='maxp_a_2')(conv_a_4)
gap_a_1 = GlobalAveragePooling1D(name='gap_a_1')(maxpool_a_2)
# Part b
conv_b_1 = Conv1D(32, kernel_size=5, activation='relu', name='conv_b_1')(visible_in)
conv_b_2 = Conv1D(32, kernel_size=5, activation='relu', name='conv_b_2')(conv_b_1)
maxpool_b_1 = MaxPooling1D(pool_size=3, strides=2, name='maxp_b_1')(conv_b_2)
drop_b_1 = Dropout(0.3, name='drop_b_1')(maxpool_b_1)
conv_b_3 = Conv1D(128, kernel_size=5, activation='relu', name='conv_b_3')(drop_b_1)
# drop_b_2 = Dropout(0.3, name='drop_b_2')(conv_b_3)
# conv_b_4 = Conv1D(128, kernel_size=5, activation='relu', name='conv_b_4')(drop_b_2)
# maxpool_b_2 = MaxPooling1D(pool_size=3, strides=2, name='maxp_b_2')(conv_b_4)
gap_b_1 = GlobalAveragePooling1D(name='gap_b_1')(conv_b_3)
# Layer 2
merge_1 = concatenate([gap_a_1, gap_b_1])
dense_1 = Dense(50, activation='relu', name='dense_1')(merge_1)
drop_1 = Dropout(0.2, name='drop_1')(dense_1)
visible_out = Dense(1, activation='sigmoid', name='dense_2')(drop_1)
model = Model(inputs=visible_in, outputs=visible_out)
print(model.summary())
save_model_plot = direct_to_dir(where='result') + 'lcp_recognition_binary_model_3.png'
plot_model(model, to_file=save_model_plot)
return model
# TESTING AT LAPTOP
def model_1():
visible_in = Input(shape=(6000, 1))
conv_1 = Conv1D(filters=32, kernel_size=5, strides=1, activation='relu', name='Conv_a_1')(visible_in)
gap = GlobalAveragePooling1D()(conv_1)
visible_out = Dense(1, activation='sigmoid')(gap)
model = Model(inputs=visible_in, outputs=visible_out)
print(model.summary())
# lcp_recognition_binary_model_2()
|
[
"hooyuheng@gmail.com"
] |
hooyuheng@gmail.com
|
8afd85abc6361f6690923f4902b20607b778ad70
|
87b7d7948aa51fdb4a27540240579788896369ea
|
/code/runs_sacred/model_data_random/_sources/data_set_file_8dbb1c73ef6f7d6c76679b005f0b994b.py
|
492729d1daa74ff2dbc6c7004f3cccb8805d5d13
|
[] |
no_license
|
Samuel-Levesque/Projet_GLO7030
|
6f13accd63b52107ec3e3a0b9b5f52edccda7c8d
|
557bce3235f09723900f65c6e3b44a0ed9d2b519
|
refs/heads/master
| 2022-01-16T12:49:22.884798
| 2019-05-05T18:38:35
| 2019-05-05T18:38:35
| 177,038,991
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,055
|
py
|
import random
import warnings
warnings.filterwarnings('ignore') # to suppress some matplotlib deprecation warnings
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import ast
import cv2
import matplotlib.pyplot as plt
import os
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from utility import save_object
def create_encoding_deconding_dict(path_data):
'''
Crée un dictionnaire d'encoding des labels et un dictionnaire de decoding des labels
:param path_data:
:return:
'''
filenames = os.listdir(path_data)
filenames=sorted(filenames)
en_dict = {}
counter = 0
for fn in filenames:
en_dict[fn[:-4].split('/')[-1].replace(' ', '_')] = counter
counter += 1
dec_dict = {v: k for k, v in en_dict.items()}
save_object(en_dict,"saves_obj/en_dict.pk")
save_object(dec_dict, "saves_obj/dec_dict.pk")
return en_dict,dec_dict
pass
#Pour une classe
class DoodlesDataset(Dataset):
"""Doodles csv dataset.
adapté de https://www.kaggle.com/leighplt/pytorch-starter-kit/notebook
Dataset Pytorch pour une seul catégorie. Pour faire un dataset complet on concatène plusieurs de ces dataset
"""
def __init__(self, csv_file, root_dir,nrows,encoding_dict=None, mode='train', skiprows=None, size=224, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations. ex :airplane.csv
root_dir (string): Directory with all the csv.
mode (string): Train or test mode.
nrows (int): Number of rows of file to read. Useful for reading pieces of large files.
skiprows (list-like or integer or callable):
Line numbers to skip (0-indexed) or number of lines to skip (int) at the start of the file.
size (int): Size of output image.
transform (callable, optional): Optional transform to be applied (pas utile pour l'instant)
on a sample.
"""
self.root_dir = root_dir
file = os.path.join(self.root_dir, csv_file)
self.size = size
self.mode = mode
self.doodle = pd.read_csv(file, usecols=['drawing'], nrows=nrows, skiprows=skiprows) #Data set pandas
# self.transform = transform
if self.mode == 'train':
self.txt_label= csv_file.replace(' ', '_')[:-4]
self.label = encoding_dict[self.txt_label]
@staticmethod
def _draw(raw_strokes, size=256, largeur_trait=6):
BASE_SIZE = 256
img = np.full((BASE_SIZE, BASE_SIZE), 255,dtype=np.uint8)
for t, stroke in enumerate(raw_strokes):
for i in range(len(stroke[0]) - 1):
color = 0
_ = cv2.line(img, (stroke[0][i], stroke[1][i]),
(stroke[0][i + 1], stroke[1][i + 1]), color, largeur_trait)
if size != BASE_SIZE:
return cv2.resize(img, (size, size))
else:
return img
def __len__(self):
return len(self.doodle)
def __getitem__(self, idx):
raw_strokes = ast.literal_eval(self.doodle.drawing[idx])
sample = self._draw(raw_strokes, size=self.size, largeur_trait=6)
# if self.transform:
# sample = self.transform(sample)
if self.mode == 'train':
return (sample[None] / 255).astype('float32'), self.label
else:
return (sample[None] / 255).astype('float32')
#Pour toutes les classe, nb_row par classe
def create_huge_data_set(path,nb_rows=1000,size_image=224,encoding_dict=None,skip_rows=None,filenames=None,mode="train"):
'''
Concatène les dataset de plusieurs classes
:param path: path où se trouve le dossier avec les csv
:param nb_rows: Nombre de rows par classes
:param size_image:
:param filenames: si on veut des classe particulières ex : [airplane.csv, angel.csv]
:return:
'''
if filenames==None:
filenames = os.listdir(path)
doodles = ConcatDataset([DoodlesDataset(fn,path,nrows=nb_rows, size=size_image,
skiprows=skip_rows,encoding_dict=encoding_dict,mode=mode)
for fn in filenames])
return doodles
def generate_random_dataset( path, nb_row_valid,nb_rows_test,nb_rows,dict_nb_lignes, size_image=224, encoding_dict=None,filenames=None):
'''
Pour chaque classe dans filenames, on prend nb_rows données aléatoire dans le fichier
:param path:
:param nb_row_valid:
:param nb_rows_test:
:param nb_rows:
:param size_image:
:param encoding_dict:
:param filenames:
:return:
'''
if filenames==None:
filenames = os.listdir(path)
nb_lignes_skip = nb_row_valid + nb_rows_test
list_dataset=[]
for fn in filenames:
n = dict_nb_lignes[fn]
skip =list(range(1,nb_lignes_skip)) +sorted(random.sample(range(nb_lignes_skip,n), n - nb_rows-nb_lignes_skip))
data_set=DoodlesDataset(fn, path, nrows=nb_rows, size=size_image,
skiprows=skip, encoding_dict=encoding_dict, mode="train")
list_dataset.append(data_set)
doodles = ConcatDataset(list_dataset)
return doodles
def create_dict_nb_ligne(path,filenames=None):
'''
dictionnaire du nombre de ligne dans les fichiers csv
:param path:
:return:
'''
if filenames==None:
filenames = os.listdir(path)
dict_nb_ligne={}
for fn in filenames:
n = sum(1 for line in open(path + fn)) - 1
dict_nb_ligne[fn]=n
save_object(dict_nb_ligne,"saves_obj/dict_nb_ligne.pk")
return dict_nb_ligne
def imshow(img_tensor):
npimg = img_tensor.numpy()
# print(npimg)
plt.imshow(npimg,cmap="gray")
plt.show()
if __name__ == "__main__":
path = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'
# path = 'D:/User/William/Documents/Devoir/Projet Deep/data/train_simplified/'
filenames = os.listdir(path)
filenames = [path + x for x in filenames]
size_image = 224
select_nrows = 1000
csv_file=filenames[0].split('/')[-1]
#Créer data set pour un csv file en particulier
# essai=DoodlesDataset(csv_file, path,nrows=select_nrows, size=size_image,skiprows=range(1,10))
# loader=DataLoader(essai,batch_size=10)
# for image, label in loader:
# print(image)
# t1=image[0,0,:,:]
# #imshow(t1)
# print(label)
doodles = ConcatDataset([DoodlesDataset(fn.split('/')[-1], path,
nrows=select_nrows, size=size_image) for fn in filenames])
loader = DataLoader(doodles, batch_size=2,shuffle=True)
i=0
for image, label in loader:
# print(image)
t1 = image[0, 0, :, :]
t2=image[1,0,:,:]
# imshow(t1)
# imshow(t2)
i+=2
print(i)
print(label)
print("end")
|
[
"44324703+William-Bourget@users.noreply.github.com"
] |
44324703+William-Bourget@users.noreply.github.com
|
8a307d78726f1ec71e12b3c0c22f5ac21db4bcf9
|
dd38578f9622b1ea54838340711a96d57fcfbbc6
|
/kbm/migrations/0004_auto_20200629_0017.py
|
29157ee0c71fb1bf849f5a93c6227cfca901170a
|
[] |
no_license
|
nabaman/sistem-informasi-akademik
|
544d3563c922f105d310bb6377d236e6022fcb2c
|
1ffb46e86ab76c6c4b98a10862acf01a5676d574
|
refs/heads/master
| 2022-11-17T05:54:07.808086
| 2020-07-01T06:02:28
| 2020-07-01T06:02:28
| 257,820,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
# Generated by Django 3.0.5 on 2020-06-28 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kbm', '0003_auto_20200609_2156'),
]
operations = [
migrations.AlterField(
model_name='data_jurusan',
name='jurusan',
field=models.CharField(max_length=30, null=True),
),
]
|
[
"naba.alvian@gmail.com"
] |
naba.alvian@gmail.com
|
373405fdc1325b6237b78fad3cdd074864c92bc5
|
45f6c7f6e7160c5535d74582c6ba165ee21ab56e
|
/test/test_van/test_saved_lists.py
|
740bb5997167057c1ca394ba2c8000ab159b836b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
move-coop/parsons
|
1b0bcfcbb8c0e1edb26d06664c6ecb7370eae864
|
553b0ede3e41cc9811c48d768d05953f8cf75312
|
refs/heads/main
| 2023-08-21T12:34:34.004784
| 2023-08-17T17:46:09
| 2023-08-17T17:46:09
| 204,343,221
| 240
| 122
|
NOASSERTION
| 2023-09-14T20:55:15
| 2019-08-25T19:56:21
|
Python
|
UTF-8
|
Python
| false
| false
| 7,500
|
py
|
import unittest
import os
import requests_mock
import unittest.mock as mock
from parsons import VAN, Table
from test.utils import validate_list
from parsons.utilities import cloud_storage
class TestSavedLists(unittest.TestCase):
def setUp(self):
self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False)
def tearDown(self):
pass
@requests_mock.Mocker()
def test_get_saved_lists(self, m):
json = {
"count": 1,
"items": [
{
"savedListId": 517612,
"listCount": 974656,
"name": "LikelyParents(16andunder)_DWID_S... - MN",
"doorCount": 520709,
"description": "null",
}
],
"nextPageLink": None,
}
m.get(self.van.connection.uri + "savedLists", json=json)
expected = ["savedListId", "listCount", "name", "doorCount", "description"]
self.assertTrue(validate_list(expected, self.van.get_saved_lists()))
@requests_mock.Mocker()
def test_get_saved_list(self, m):
saved_list_id = 517612
json = {
"savedListId": 517612,
"listCount": 974656,
"name": "LikelyParents(16andunder)_DWID_S... - MN",
"doorCount": 520709,
"description": "null",
}
m.get(self.van.connection.uri + f"savedLists/{saved_list_id}", json=json)
# expected = ['savedListId', 'listCount', 'name', 'doorCount', 'description']
self.assertEqual(self.van.get_saved_list(saved_list_id), json)
def test_upload_saved_list(self):
cloud_storage.post_file = mock.MagicMock()
cloud_storage.post_file.return_value = "https://box.com/my_file.zip"
self.van.connection._soap_client = mock.MagicMock()
self.van.get_folders = mock.MagicMock()
self.van.get_folders.return_value = [{"folderId": 1}]
tbl = Table([["VANID"], ["1"], ["2"], ["3"]])
self.van.upload_saved_list(
tbl, "GOTV List", 1, replace=True, url_type="S3", bucket="tmc-scratch"
)
assert self.van.connection._soap_client.service.CreateAndStoreSavedList.called
@requests_mock.Mocker()
def test_upload_saved_list_rest(self):
cloud_storage.post_file = mock.MagicMock()
cloud_storage.post_file.return_value = "https://box.com/my_file.zip"
self.van.get_folders = mock.MagicMock()
self.van.get_folders.return_value = [{"folderId": 1}]
tbl = Table([["VANID"], ["1"], ["2"], ["3"]])
response = self.van.upload_saved_list_rest(
tbl=tbl,
url_type="S3",
folder_id=1,
list_name="GOTV List",
description="parsons test list",
callback_url="https://webhook.site/69ab58c3-a3a7-4ed8-828c-1ea850cb4160",
columns=["VANID"],
id_column="VANID",
bucket="tmc-scratch",
overwrite=517612,
)
self.assertIn("jobId", response)
@requests_mock.Mocker()
def test_get_folders(self, m):
json = {
"count": 2,
"items": [
{"folderId": 5046, "name": "#2018_MN_active_universe"},
{"folderId": 2168, "name": "API Generated Lists"},
],
"nextPageLink": None,
}
m.get(self.van.connection.uri + "folders", json=json)
expected = ["folderId", "name"]
self.assertTrue(validate_list(expected, self.van.get_folders()))
@requests_mock.Mocker()
def test_get_folder(self, m):
folder_id = 5046
json = {"folderId": 5046, "name": "#2018_MN_active_universe"}
m.get(self.van.connection.uri + f"folders/{folder_id}", json=json)
self.assertEqual(json, self.van.get_folder(folder_id))
@requests_mock.Mocker()
def test_export_job_types(self, m):
json = {
"count": 1,
"items": [{"exportJobTypeId": 4, "name": "SavedListExport"}],
"nextPageLink": None,
}
m.get(self.van.connection.uri + "exportJobTypes", json=json)
expected = ["exportJobTypeId", "name"]
self.assertTrue(validate_list(expected, self.van.get_export_job_types()))
@requests_mock.Mocker()
def test_export_job_create(self, m):
saved_list_id = 517612
json = {
"status": "Completed",
"errorCode": "null",
"exportJobGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37",
"activistCodes": "null",
"canvassFileRequestId": 448,
"dateExpired": "2018-09-08T16:04:00Z",
"surveyQuestions": "null",
"webhookUrl": "https://www.nothing.com/",
"downloadUrl": "https://ngpvan.blob.core.windows.net/canvass-files-savedlistexport/bf4d1297-1c77-3fb2-03bd-f0acda122d37_2018-09-08T13:03:27.7191831-04:00.csv", # noqa: E501
"savedListId": 517612,
"districtFields": "null",
"canvassFileRequestGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37",
"customFields": "null",
"type": 4,
"exportJobId": 448,
}
m.post(self.van.connection.uri + "exportJobs", json=json, status_code=201)
# expected = [
# 'status',
# 'errorCode',
# 'exportJobGuid',
# 'activistCodes',
# 'canvassFileRequestId',
# 'dateExpired',
# 'surveyQuestions',
# 'webhookUrl',
# 'downloadUrl',
# 'savedListId',
# 'districtFields',
# 'canvassFileRequestGuid',
# 'customFields',
# 'type',
# 'exportJobId']
self.assertEqual(json, self.van.export_job_create(saved_list_id))
@requests_mock.Mocker()
def test_get_export_job(self, m):
export_job_id = 448
json = {
"status": "Completed",
"errorCode": "null",
"exportJobGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37",
"activistCodes": "null",
"canvassFileRequestId": 448,
"dateExpired": "2018-09-08T16:04:00Z",
"surveyQuestions": "null",
"webhookUrl": "https://www.nothing.com/",
"downloadUrl": "https://ngpvan.blob.core.windows.net/canvass-files-savedlistexport/bf4d1297-1c77-3fb2-03bd-f0acda122d37_2018-09-08T13:03:27.7191831-04:00.csv", # noqa: E501
"savedListId": 517612,
"districtFields": "null",
"canvassFileRequestGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37",
"customFields": "null",
"type": 4,
"exportJobId": 448,
}
# expected = [
# 'status',
# 'errorCode',
# 'exportJobGuid',
# 'activistCodes',
# 'canvassFileRequestId',
# 'dateExpired',
# 'surveyQuestions',
# 'webhookUrl',
# 'downloadUrl',
# 'savedListId',
# 'districtFields',
# 'canvassFileRequestGuid',
# 'customFields',
# 'type',
# 'exportJobId']
m.get(self.van.connection.uri + f"exportJobs/{export_job_id}", json=json)
self.assertEqual(json, self.van.get_export_job(export_job_id))
|
[
"noreply@github.com"
] |
move-coop.noreply@github.com
|
73cc2b03cd8658a38a334360c792ef745c6ead8b
|
b94c4c88aa3661696a8f8a6677c845a882d4091e
|
/src/main.py
|
b0ed8bfe850e44f49de124164adce4397b6cf652
|
[
"MIT"
] |
permissive
|
pestefo/Coon
|
b4f1e62b97a330844140f00057a5bda184cf774e
|
8caa8b683b54b0c0047cbe1095ccf5576352c6ac
|
refs/heads/master
| 2021-01-02T21:45:19.393150
| 2020-02-11T16:49:36
| 2020-02-11T16:49:36
| 239,814,056
| 0
| 0
|
MIT
| 2020-02-11T16:52:15
| 2020-02-11T16:47:54
|
Python
|
UTF-8
|
Python
| false
| false
| 654
|
py
|
"""Script for Default keyword"""
import sys
import currency
from currency.workflow import Workflow3
def main(workflow):
"""The main workflow entry function"""
method = str(workflow.args.pop(0))
if method in currency.__all__:
workflow.run(getattr(currency, method))
else:
workflow.run(currency.help_me)
if __name__ == "__main__":
WF = Workflow3(
default_settings={
"favorites": ["EUR", "CNY", "JPY", "GBP"]
},
update_settings={
"github_slug": "tomy0000000/coon",
"frequency": 7
},
help_url="https://git.io/fjD6M")
sys.exit(WF.run(main))
|
[
"tomy0000000@gmail.com"
] |
tomy0000000@gmail.com
|
7f0a093f4f97b501f6e1600dd3dd4537b88a9ef0
|
375e5bca82843647941068bd7634cf7adf2015ca
|
/tests/test_transform_affine3.py
|
d2f6651ed72b7052691ec6a9f54e6165a33fa6de
|
[
"MIT"
] |
permissive
|
civodlu/trw
|
cd57e7bded7fdb0a9d623ed9cd50645fab96583b
|
11c59dea0072d940b036166be22b392bb9e3b066
|
refs/heads/master
| 2023-02-08T09:56:39.203340
| 2023-02-07T14:22:16
| 2023-02-07T14:22:16
| 195,147,670
| 12
| 2
|
MIT
| 2020-10-19T15:24:11
| 2019-07-04T01:19:31
|
Python
|
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
import os
import math
import unittest
import trw
import torch
import numpy as np
class TestTransformsAffine(unittest.TestCase):
def test_2d_identity_nn(self):
matrix2 = [
[1, 0, 0],
[0, 1, 0],
]
matrix2 = torch.FloatTensor(matrix2)
images = torch.arange(2 * 5 * 10, dtype=torch.float32).view((2, 1, 5, 10))
images_tfm2 = trw.transforms.affine_transform(images, matrix2, interpolation='nearest')
assert int((images == images_tfm2).all()) == 1
def test_3d_identity_nn(self):
matrix = [
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
]
matrix = torch.FloatTensor(matrix)
images = torch.arange(2 * 5 * 10 * 3, dtype=torch.float32).view((2, 1, 5, 10, 3))
images_tfm = trw.transforms.affine_transform(images, matrix)
assert torch.max((images - images_tfm).abs()) < 1e-4
def test_2d_translation_nn(self):
images = torch.arange(2 * 5 * 10, dtype=torch.float).view((2, 1, 5, 10))
m = [
[1, 0, -1],
[0, 1, -2],
[0, 0, 1]
],
m = torch.FloatTensor(m)[0]
m = trw.transforms.to_voxel_space_transform(m, images[0].shape)
images_tfm = trw.transforms.affine_transform(images, torch.cat((m.unsqueeze(0), m.unsqueeze(0))), interpolation='nearest')
assert torch.max(torch.abs(images[:, :, 2:, 1:] - images_tfm[:, :, :-2, :-1])) < 1e-4
def test_2d_image(self):
matrix = trw.transforms.affine_transformation_translation([80, 0])
matrix = torch.mm(matrix, trw.transforms.affine_transformation_rotation2d(1 * math.pi / 4))
from PIL import Image
image_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'tutorials', 'input_images', '2007_008764.jpg')
images = Image.open(image_path)
images = np.asarray(images).transpose((2, 0, 1))
images = images.reshape([1] + list(images.shape))
images = torch.from_numpy(images).float()
images_tfm = trw.transforms.affine_transform(
images,
trw.transforms.to_voxel_space_transform(matrix, images[0].shape),
interpolation='nearest')
i = np.uint8(images_tfm.numpy())[0, 0]
options = trw.train.Options()
root = options.workflow_options.logging_directory
Image.fromarray(np.stack((i, i, i), axis=2)).save(os.path.join(root, 'transformed.png'))
def test_affine_2d_joint(self):
options = trw.train.Options()
root = options.workflow_options.logging_directory
from PIL import Image
image_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'tutorials', 'input_images', '2007_008764.jpg')
images = Image.open(image_path)
images.save(os.path.join(root, 'affine_original.png'))
images = np.asarray(images).transpose((2, 0, 1))
images = images.reshape([1] + list(images.shape))
images = torch.from_numpy(images).float()
batch = {
'images': images,
'images_joint': images
}
tfm = trw.transforms.TransformAffine([45, 50], [0.7, 1.3], .5, padding_mode='reflection')
for n in range(10):
transformed_batch = tfm(batch)
i = np.uint8(transformed_batch['images'].numpy())[0, 0]
Image.fromarray(np.stack((i, i, i), axis=2)).save(os.path.join(root, f'affine_transformed_{n}.png'))
assert (transformed_batch['images'] == transformed_batch['images_joint']).all()
def test_affine_3d_joint(self):
options = trw.train.Options()
root = options.workflow_options.logging_directory
shape = [32, 64, 96]
shape2 = [32 // 2, 64 // 2, 96 // 2]
images = torch.ones(shape, dtype=torch.float32)
images[shape2[0]-5:shape2[0]+5, shape2[1]-10:shape2[1]+10, shape2[2]-15:shape2[2]+15] = 3.0
images = images.unsqueeze(0).unsqueeze(0) # add N, C components
batch = {
'images': images,
'images_joint': images
}
i = images.numpy()[0, 0]
np.save(os.path.join(root, f'affine_transformed_3d_original.png'), i)
tfm = trw.transforms.TransformAffine(0, 1, 0.9)
for n in range(10):
transformed_batch = tfm(batch)
i = transformed_batch['images'].numpy()[0, 0]
np.save(os.path.join(root, f'affine_transformed_3d_{n}.png'), i)
if __name__ == '__main__':
unittest.main()
|
[
"civodlu@gmail.com"
] |
civodlu@gmail.com
|
a80d0ebfec737cf90fc2e079f2ebe80f10496421
|
562cc46d23d69f399a5f807d11ac567d8f30b567
|
/env/bin/symilar
|
b0ebfe4bd208743b9437624b9eda91935d85e19b
|
[] |
no_license
|
JahanzebNawaz/DJANGO-CRUD
|
ab77a31c84134f10aee43b4fdc1900b4223b36ba
|
b3e848f8752c3755bcd26aeefe59eaedbcc06e8d
|
refs/heads/master
| 2022-01-26T13:29:30.218067
| 2020-05-03T11:43:58
| 2020-05-03T11:43:58
| 220,016,612
| 0
| 0
| null | 2020-05-03T11:43:59
| 2019-11-06T14:32:25
|
Python
|
UTF-8
|
Python
| false
| false
| 258
|
#!/home/jk/JKROOT/GITHUB_REPO/DJANGO-CRUD/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
|
[
"jahanzaib.ch9996@gmail.com"
] |
jahanzaib.ch9996@gmail.com
|
|
f6e12a2fecf32423abd6f204856a0263e6998195
|
f0526130407daf1484ba0a228fb16683aa0fa509
|
/muchopper/common/queries.py
|
c45db0eec3bcf31cd446399bd333b29bb42491fa
|
[
"Apache-2.0"
] |
permissive
|
ericschdt/muchopper
|
f8538f9e58b3512a3341731bc983fa2fbe5a749f
|
d621b8853360cfdc9f745aefbb9f9ec5d02d1a06
|
refs/heads/master
| 2020-08-06T21:10:18.316372
| 2019-10-06T11:30:17
| 2019-10-06T11:30:17
| 213,155,935
| 0
| 0
|
NOASSERTION
| 2019-10-06T11:30:18
| 2019-10-06T11:27:11
| null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
import shlex
import sqlalchemy
from . import model
def base_filter(q, include_closed=False):
if not include_closed:
q = q.filter(
model.MUC.is_open == True # NOQA
)
return q.filter(
model.MUC.is_hidden == False # NOQA
)
def base_query(session, *,
include_closed=False,
with_avatar_flag=False):
if with_avatar_flag:
q = session.query(
model.MUC,
model.PubliclyListedMUC,
model.Avatar.address != None, # NOQA
).join(
model.PubliclyListedMUC,
).outerjoin(
model.Avatar,
)
else:
q = session.query(
model.MUC,
model.PubliclyListedMUC
).join(
model.PubliclyListedMUC
)
return base_filter(q, include_closed=include_closed)
def common_query(session, *,
min_users=1,
**kwargs):
q = base_query(session, **kwargs)
if min_users > 0:
q = q.filter(
model.MUC.nusers_moving_average > min_users
)
return q.order_by(
model.MUC.nusers_moving_average.desc(),
model.MUC.address.asc(),
)
def chain_condition(conditional, new):
if conditional is None:
return new
return sqlalchemy.or_(conditional, new)
def filter_keywords(keywords, min_length):
keywords = set(
keyword
for keyword in (
keyword.strip()
for keyword in keywords
)
if len(keyword) >= min_length
)
return keywords
def prepare_keywords(query_string, min_length=3):
keywords = shlex.split(query_string)
return filter_keywords(keywords, min_length)
def apply_search_conditions(q,
keywords,
search_address,
search_description,
search_name):
for keyword in keywords:
conditional = None
if search_address:
conditional = chain_condition(
conditional,
model.PubliclyListedMUC.address.ilike("%" + keyword + "%")
)
if search_description:
conditional = chain_condition(
conditional,
model.PubliclyListedMUC.description.ilike("%" + keyword + "%")
)
if search_name:
conditional = chain_condition(
conditional,
model.PubliclyListedMUC.name.ilike("%" + keyword + "%")
)
q = q.filter(conditional)
return q
|
[
"j.wielicki@sotecware.net"
] |
j.wielicki@sotecware.net
|
ef7700a3b968cac227b909d38a28fa784053110b
|
36e4a3581877736a501a74bfdfc10bbbd4386b8a
|
/tests/conftest.py
|
23314e52187708a98fe75147a8829b79b72f0019
|
[
"Apache-2.0"
] |
permissive
|
valmac/lean-cli
|
1620300e7bf9428df269bae26b47a4615525144a
|
88a191afadf7bfe766665fa67c552390cb2e3951
|
refs/heads/main
| 2023-03-08T10:40:15.541980
| 2021-02-11T00:52:47
| 2021-02-11T00:52:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,500
|
py
|
import os
from pathlib import Path
import pytest
from pyfakefs.fake_filesystem import FakeFilesystem
from responses import RequestsMock
from lean.container import container
# conftest.py is ran by pytest before loading each testing module
# Fixtures defined in here are therefore available in all testing modules
@pytest.fixture(autouse=True)
def mock_filesystem(fs: FakeFilesystem) -> FakeFilesystem:
"""A pytest fixture which mocks the filesystem before each test."""
# The "fs" argument triggers pyfakefs' own pytest fixture to register
# After pyfakefs has started all filesystem actions will happen on a fake in-memory filesystem
# Create a fake home directory and set the cwd to an empty directory
fs.create_dir(Path.home() / "testing")
os.chdir(Path.home() / "testing")
# Reset singletons so that fresh Path instances get created
container.reset_singletons()
return fs
@pytest.fixture(autouse=True)
def requests_mock() -> RequestsMock:
"""A pytest fixture which mocks the requests library before each test.
If a test makes an HTTP request which hasn't been mocked, the request will fail.
"""
with RequestsMock() as mock:
yield mock
@pytest.fixture(autouse=True)
def reset_container_overrides() -> None:
"""A pytest fixture which makes sure all container and provider overrides are reset before each test."""
for provider in container.traverse():
provider.reset_override()
container.reset_override()
|
[
"jaspervmerle@gmail.com"
] |
jaspervmerle@gmail.com
|
d92546f21afa2d9f3d90a90399e84e3bb189d0eb
|
1966d4ee937abc2febb80af14ea37b3316428ee9
|
/HackerRank/By Challenge/Charging the Batteries/python/solution.py
|
5159ded20b702117fd27463677e5700549ec4b56
|
[] |
no_license
|
spanktastic2120/fun
|
8083ea33c014062ef791192f1d25d2c3bc45c7fa
|
de23f404a0505576e00730d06b32aac4ae1e7e75
|
refs/heads/master
| 2021-06-03T23:39:31.305198
| 2019-08-05T07:55:13
| 2019-08-05T07:55:13
| 18,019,488
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
#!/bin/python
import sys
if __name__ == "__main__":
n, m, k = map(int, raw_input().strip().split(' '))
sockets = []
for _ in xrange(m):
x, y = map(int, raw_input().strip().split(' '))
# left
if not x:
sockets.append(y)
# bottom
elif not y:
sockets.append((4*n) - x)
# right
elif x == n:
sockets.append((3*n) - y)
# top
else:
sockets.append(n + x)
#print sockets[-1]
sockets = sorted(sockets)
best = n*4
# check contiguous sockets
for i in xrange(m-k):
if sockets[i+k-1] - sockets[i] < best:
best = sockets[i+k-1] - sockets[i]
if not best:
break
# check wrapped sockets
if best:
for i in xrange(k-1):
if ((sockets[i] + (4 * n)) - sockets[i + 1 - k]) < best:
best = (sockets[i] + (4 * n)) - sockets[i + 1 - k]
if not best:
break
print best
|
[
"dictator@iwantmyowncountry.info"
] |
dictator@iwantmyowncountry.info
|
5ea2d8e12d59b152aba52323d7c529c2a61cbda6
|
c6fbf6df12cb7de82b3060a7bc12fee9b46e5990
|
/Chapter10/ch10-9.ML_ann.py
|
6c23988b2637bec8bdfe79e24d198ebb675f9bea
|
[
"MIT"
] |
permissive
|
tschoi6712/pythonDataAnalysis2nd
|
5f0d07493bb835c76aa9fbe7100834e5a8341912
|
63e366d4dee52f7e4df6cf4d988a85d6de5b00e4
|
refs/heads/master
| 2020-08-09T14:42:55.831842
| 2019-10-10T06:53:24
| 2019-10-10T06:53:24
| 214,108,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
"""
예측분석과 머신러닝 - (9) Neural Networks: pip install theanets nose_parameterized
conda install m2w64-toolchain
"""
import numpy as np
import theanets
import multiprocessing
from sklearn import datasets
from sklearn.metrics import accuracy_score
rain = .1 * np.load('ch10.rain.npy')
rain[rain < 0] = .05/2
dates = np.load('ch10.doy.npy')
x = np.vstack((dates[:-1], np.sign(rain[:-1])))
x = x.T
y = np.vstack(np.sign(rain[1:]),)
N = int(.9 * len(x))
train = [x[:N], y[:N]]
valid = [x[N:], y[N:]]
net = theanets.Regressor(layers=[2, 3, 1])
net.train(train, valid, learning_rate=0.1, momentum=0.5)
pred = net.predict(x[N:]).ravel()
print("Pred Min", pred.min(), "Max", pred.max())
print("Y Min", y.min(), "Max", y.max())
print("Accuracy", accuracy_score(y[N:], pred >= .5))
|
[
"tschoi6712@gmail.com"
] |
tschoi6712@gmail.com
|
77c20440aaf6a3fb84a2337d4e42929b8d240a79
|
6d1bf00636259c1a65842a8dd49ea2037218cc8d
|
/Admin_console/Summary_Report.py
|
017d7ef82d692280cd82d33e66e200171626829a
|
[] |
no_license
|
chetandg123/Release_1.2
|
efb4b0844b57638d23ac09783e0cd751893058ad
|
f9ff8add5930c7779ab1954c779f8f0e8cd1e908
|
refs/heads/master
| 2022-12-05T21:12:14.671813
| 2020-08-20T21:33:50
| 2020-08-20T21:33:50
| 288,701,130
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,632
|
py
|
import time
import unittest
from selenium.webdriver.support.select import Select
from Data.parameters import Data
from get_dir import pwd
from reuse_func import GetData
class Test_summaryreport(unittest.TestCase):
@classmethod
def setUpClass(self):
self.data = GetData()
self.p = pwd()
self.driver = self.data.get_driver()
self.data.open_cqube_appln(self.driver)
self.data.page_loading(self.driver)
self.data.login_to_adminconsole(self.driver)
def test_summary_icon(self):
count = 0
self.data.page_loading(self.driver)
self.driver.find_element_by_xpath("//*[@id='summary']/img").click()
self.data.page_loading(self.driver)
if 'summary-statistics' in self.driver.current_url:
print("Summmary statistics report page is present ")
else:
print('Summary report page is not displayed')
count = count + 1
self.assertEqual(0,count,msg='Summary report page is not working')
self.driver.find_element_by_id('homeBtn').click()
self.data.page_loading(self.driver)
def test_dashboard_summary(self):
count = 0
self.driver.find_element_by_id(Data.Dashboard).click()
time.sleep(2)
self.driver.find_element_by_xpath("//*[@id='summary']/div/td[2]").click()
self.data.page_loading(self.driver)
if 'summary-statistics' in self.driver.current_url:
print("Summmary statistics report page is present ")
else:
print('Summary report page is not displayed')
count = count + 1
self.assertEqual(0,count,msg='Summary report page is not working')
self.driver.find_element_by_id('homeBtn').click()
self.data.page_loading(self.driver)
def test_check_summary(self):
self.data.page_loading(self.driver)
self.driver.find_element_by_id(Data.Dashboard).click()
time.sleep(2)
self.driver.find_element_by_xpath("//*[@id='summary']/div/td[2]").click()
self.data.page_loading(self.driver)
reports =self.driver.find_elements_by_tag_name('h2')
count = len(reports)
for i in range(len(reports)):
print(reports[i].text)
self.assertNotEqual(0,count,msg='All summary reports are not present')
if count > 6:
print("summary report of all files to be updated")
self.data.page_loading(self.driver)
self.driver.find_element_by_id('homeBtn').click()
self.data.page_loading(self.driver)
@classmethod
def tearDownClass(cls):
cls.driver.close()
|
[
"ubuntu@ip-172-31-31-236.ap-south-1.compute.internal"
] |
ubuntu@ip-172-31-31-236.ap-south-1.compute.internal
|
fbd63bd85e48a99cbd17501c5e7a58f779e502b2
|
a55756b40e0d14b6a4c366b55be65966e1934ec2
|
/Recursion ADD(1).py
|
8362065f9df3c9311c4efe9b885631a0f0d57111
|
[] |
no_license
|
rajeevj0909/FunFiles
|
97323135bb5120d2d3a2458847a73f91dfa944bf
|
85f4a430f0e71d1fee9c753bceb566ec319e5da0
|
refs/heads/master
| 2022-07-02T15:43:11.495874
| 2020-05-16T23:29:02
| 2020-05-16T23:29:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
def add(list1,counter,total):
n=len(list1)
if n==counter:
print(total)
else:
total=add(list1[counter],counter,total)+total
return(total)
return counter
counter = counter + 1
list1=[1,2,3,4]
counter=0
total=0
add(list1,counter,total)
|
[
"noreply@github.com"
] |
rajeevj0909.noreply@github.com
|
a0b302a8a168a912fbece6444aa9e7f4710361c3
|
c15a28ae62eb94dbf3ed13e2065195e572a9988e
|
/Cook book/src/11/passing_a_socket_file_descriptor_between_processes/server.py
|
05d6b07b5f08832aaaca3ea0ad4f14ffc9c19a1c
|
[] |
no_license
|
xuyuchends1/python
|
10798c92840a1a59d50f5dc5738b2881e65f7865
|
545d950a3d2fee799902658e8133e3692939496b
|
refs/heads/master
| 2021-01-25T07:07:04.812140
| 2020-02-28T09:25:15
| 2020-02-28T09:25:15
| 93,647,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
# server.py
import socket
import struct
def send_fd(sock, fd):
'''
Send a single file descriptor.
'''
sock.sendmsg([b'x'],
[(socket.SOL_SOCKET, socket.SCM_RIGHTS, struct.pack('i', fd))])
ack = sock.recv(2)
assert ack == b'OK'
def server(work_address, port):
# Wait for the worker to connect
work_serv = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
work_serv.bind(work_address)
work_serv.listen(1)
worker, addr = work_serv.accept()
# Now run a TCP/IP server and send clients to worker
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
s.bind(('', port))
s.listen(1)
while True:
client, addr = s.accept()
print('SERVER: Got connection from', addr)
send_fd(worker, client.fileno())
client.close()
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print('Usage: server.py server_address port', file=sys.stderr)
raise SystemExit(1)
server(sys.argv[1], int(sys.argv[2]))
|
[
"xuyuchends@163.com"
] |
xuyuchends@163.com
|
069de552aee7711007a0052316643c96cc5c88fd
|
5facb7b4baccff021b357ad67c966e2d92d665d1
|
/stemming.py
|
3a6a5d2215913259aacdefe3190af4f7568fe7da
|
[] |
no_license
|
sadirahman/NLP
|
73eeb1e32e7027c35b79ba3f6953b9ec51098cb7
|
844419ffd7bbb41217ab273753249757a4433865
|
refs/heads/master
| 2020-04-09T21:58:21.130129
| 2018-12-06T13:57:18
| 2018-12-06T13:57:18
| 160,617,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
ps = PorterStemmer()
example_words =["python","pythoner","pythoning","pythoned","pythonly"]
for w in example_words:
print(ps.stem(w))
new_text = "It is very important to be pythonly while you are pythoning with python ."
word =word_tokenize(new_text)
for i in word:
print(ps.stem(i))
|
[
"42184120+sadirahman@users.noreply.github.com"
] |
42184120+sadirahman@users.noreply.github.com
|
18c9a3ba254ee73daa90283860550d2af14d7777
|
242da8865e037f9fffb76269c3acddb73ce9fa14
|
/packages/pyright-internal/src/tests/samples/typeGuard1.py
|
ebeda0031c0154c1009d096010bb082e40aa8e7f
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
khyveasna11111908/pyright
|
f42eceae044f6fbc27552c1765b03ebd345a451c
|
493d47807b96137995e4bb6ca341930e4de911f9
|
refs/heads/main
| 2023-08-30T00:08:36.191799
| 2021-09-25T19:17:13
| 2021-09-25T19:17:13
| 410,361,483
| 1
| 1
|
NOASSERTION
| 2021-09-25T19:15:23
| 2021-09-25T19:15:22
| null |
UTF-8
|
Python
| false
| false
| 1,635
|
py
|
# This sample tests the TypeGuard functionality
# that allows user-defined functions to perform
# conditional type narrowing.
# pyright: reportMissingModuleSource=false
import os
from typing import Any, List, Literal, Tuple, TypeVar, Union
from typing_extensions import TypeGuard
_T = TypeVar("_T")
def is_two_element_tuple(a: Tuple[_T, ...]) -> TypeGuard[Tuple[_T, _T]]:
return True
def func1(a: Tuple[int, ...]):
if is_two_element_tuple(a):
t1: Literal["Tuple[int, int]"] = reveal_type(a)
else:
t2: Literal["Tuple[int, ...]"] = reveal_type(a)
def is_string_list(val: List[Any], allow_zero_entries: bool) -> TypeGuard[List[str]]:
if allow_zero_entries and len(val) == 0:
return True
return all(isinstance(x, str) for x in val)
def func2(a: List[Union[str, int]]):
if is_string_list(a, True):
t1: Literal["List[str]"] = reveal_type(a)
else:
t2: Literal["List[str | int]"] = reveal_type(a)
# This should generate an error because TypeGuard
# has no type argument.
def bad1(a: int) -> TypeGuard:
return True
# This should generate an error because TypeGuard
# has too many type arguments.
def bad2(a: int) -> TypeGuard[str, int]:
return True
# This should generate an error because TypeGuard
# does not accept an elipsis.
def bad3(a: int) -> TypeGuard[...]:
return True
# This should generate an error because TypeGuard
# has does not accept a module.
def bad4(a: int) -> TypeGuard[os]:
return True
def bad5(a: int) -> TypeGuard[int]:
# This should generate an error because only
# bool values can be returned.
return 3
|
[
"erictr@microsoft.com"
] |
erictr@microsoft.com
|
72655be7c8a19587230e487b7743481687a52f88
|
fe1e601b67a30e9215fe858e16b9c2965f8a3576
|
/bloomFilter.py
|
02f8a1f3bd5e7973d83bc77df4793f64c425fd90
|
[] |
no_license
|
z-o-e/algoFun
|
8a53b0804d9ddb019133981ebd677b768d25cee6
|
539f89bece5791a1aadfafbddc3ce7c48fbf12ac
|
refs/heads/master
| 2020-04-19T00:49:53.957430
| 2014-10-22T20:32:17
| 2014-10-22T20:32:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,571
|
py
|
# bloom filter is a space-efficient probabilistic data structure to check whether an element is a member of a very large set.
# False positive are possible, false negative are impossible (100% recall)
# probability of a false positive depends on the density of 1's in the array and the number of hash functions
# (the number of 1's is approximately the number of elements inserted times the number of hash functions)
# suppose bitArraySize: b, hashFunctionSize: h, streaElementSize: n
# throwing d darts at t targets at random: (1-1/t)^d = (1-1/t)^(t*d/t) ~= e^(-d/t)
# for example: suppose b=1b, h=5, n=100m, that is t = 10^9, d = 5*10^8
# the fraction of 0's e^(-1/2), fraction of 1's: 1-e^(-1/2), fraction of false positive (1-e^(-1/2))^5
import math
# @param funcs: a list of hash functions
# @param filterSize: an int representing the size of bloom filter -- a bit vector
class bloomFilter:
def __init__(self, funcs, filterSize):
self.funcs = funcs
self.bitArray = [0]*filterSize
def _dec2Binary(self, dec):
if dec==0:
return [0]
res = []
while dec:
res = [dec%2] + res
dec //= 2
return res
def set(self, streamElem):
elem = self._dec2Binary(streamElem)
for func in self.funcs:
idx = func(elem, len(self.bitArray))
self.bitArray[idx] |= 1
def lookup(self, newElem):
elem = self._dec2Binary(newElem)
for func in self.funcs:
idx = func(elem, len(self.bitArray))
if self.bitArray[idx]==0:
return False
return True
def estimateFP(self,streamSize):
zeros = math.exp(-len(self.funcs)*streamSize/len(self.bitArray))
fp = (1-zeros)**(len(self.funcs))
return fp
# h1, h2 take odd-numbered, even-numbered bits startinf from the right of binary representation of x
def h1(x,modula):
odds = []
for i in range(len(x)-1,-1,-2):
odds = [x[i]]+odds
res = 0
i = 0
while odds:
t = odds.pop()
if t==1:
res += 2**i
i+=1
return res%modula
def h2(x,modula):
evens = []
for i in range(len(x)-2,-1,-2):
evens = [x[i]]+evens
res = 0
i = 0
while evens:
t = evens.pop()
if t==1:
res += 2**i
i+=1
return res%modula
funcs = [h1, h2]
filterSize = 11
test = bloomFilter(funcs, filterSize)
test.set(25)
test.set(159)
test.lookup(25)
test.lookup(159)
test.lookup(30)
test.bitArray
|
[
"hyzhang22@gmail.com"
] |
hyzhang22@gmail.com
|
a4781f355e0c7465556c9f5f554524e357c8f228
|
0e722188b0a0c2302fab5b2917e9c39394664db3
|
/gshs_auth/decorators.py
|
768fdc4efcb9a5bed86fed80905ca350361d8498
|
[] |
no_license
|
Qwaz/GSHSmeal
|
6df1d773a883b729c4d87c6f6b1c3990ed0e4d3c
|
6f2af5f538a1b9b9db8f791041ff65a5c97b96d8
|
refs/heads/master
| 2020-04-17T09:12:47.694626
| 2015-03-04T10:27:56
| 2015-03-04T10:27:56
| 20,253,318
| 0
| 1
| null | 2014-08-20T01:56:00
| 2014-05-28T10:14:37
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
from functools import wraps
from django.shortcuts import redirect
def admin_login(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
if request.user.is_staff:
return func(request, *args, **kwargs)
else:
return redirect('home')
return wrapper
|
[
"qwazpia@gmail.com"
] |
qwazpia@gmail.com
|
59576815e0896221abafb383026b4ff38ea0df21
|
f4ec787e34b74823875a8074d2f7c10d6207f206
|
/tests/util/alert_server.py
|
2e17d1448e570ccf024752181c78bbba66ce8f5b
|
[
"Apache-2.0"
] |
permissive
|
Sweety-Network/sweety-blockchain-test
|
d0fc0fbd27e5b6970597824de43db3fb9e3bceed
|
44373ff1b6a7a2a1dcb39d3d6fa01cd15e3e5fb6
|
refs/heads/master
| 2023-09-04T15:57:19.853659
| 2021-10-08T04:00:10
| 2021-10-08T04:00:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,143
|
py
|
import argparse
import asyncio
import logging
from pathlib import Path
from typing import Any
from aiohttp import web
log = logging.getLogger(__name__)
class AlertServer:
shut_down: bool
shut_down_event: asyncio.Event
log: Any
app: Any
alert_file_path: Path
port: int
@staticmethod
async def create_alert_server(alert_file_path: Path, port):
self = AlertServer()
self.log = log
self.shut_down = False
self.app = web.Application()
self.shut_down_event = asyncio.Event()
self.port = port
routes = [
web.get("/status", self.status),
]
self.alert_file_path = alert_file_path
self.app.add_routes(routes)
return self
async def status(self, request):
file_text = self.alert_file_path.read_text()
return web.Response(body=file_text, content_type="text/plain")
async def stop(self):
self.shut_down_event.set()
async def run(self):
runner = web.AppRunner(self.app, access_log=None)
await runner.setup()
site = web.TCPSite(runner, None, self.port)
await site.start()
async def run_and_wait(file_path, port):
server = await AlertServer.create_alert_server(Path(file_path), port)
await server.run()
await server.shut_down_event.wait()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-file_path", type=str, dest="file_path")
parser.add_argument("-port", type=str, dest="port")
port = None
file_path = None
for key, value in vars(parser.parse_args()).items():
if key == "port":
port = value
elif key == "file_path":
file_path = value
else:
print(f"Invalid argument {key}")
if port is None or file_path is None:
print(
"Missing arguments, example usage:\n\n"
"python sweety/util/alert_server.py -p 4000 -file_path /home/user/alert.txt\n"
)
quit()
return asyncio.get_event_loop().run_until_complete(run_and_wait(file_path, port))
if __name__ == "__main__":
main()
|
[
"tlwu@njmlab.com"
] |
tlwu@njmlab.com
|
62dcbc0c418cf6583d090a0a4451f0b217365320
|
4b44a299bafbd4ca408ce1c89c9fe4a449632783
|
/python3/06_Collections/02_Tuples/01_tuples.py
|
490cc61a2d1d027663715e7fff988af2cc75274f
|
[] |
no_license
|
umunusb1/PythonMaterial
|
ecd33d32b2de664eaaae5192be7c3f6d6bef1d67
|
1e0785c55ccb8f5b9df1978e1773365a29479ce0
|
refs/heads/master
| 2023-01-23T23:39:35.797800
| 2020-12-02T19:29:00
| 2020-12-02T19:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Purpose: Working with tuples
"""
mylist = [12, 34.8, 500000, [6, 8], (5,)]
print('type(mylist)', type(mylist))
print('len(mylist)', len(mylist))
mytuple = (12, 34.8, 500000, [6, 8], (5,))
print('\ntype(mytuple)', type(mytuple))
print('len(mytuple)', len(mytuple))
print(mytuple)
print()
another_tuple = (99.9,)
print('type(another_tuple)', type(another_tuple))
print('len(another_tuple) ', len(another_tuple))
# NOTE: For tuple with single element, place comma at the end
# to recognize it as tuple; else recognizes as individual element
print()
empty_tuple = tuple() # ()
print('type(empty_tuple):', type(empty_tuple))
print('len(empty_tuple) :', len(empty_tuple))
print()
mytuple = 1, 2, 3
print('type(mytuple) :', type(mytuple))
print('len(mytuple) :', len(mytuple))
print(mytuple)
|
[
"uday3prakash@gmail.com"
] |
uday3prakash@gmail.com
|
d50ee8a96e31ec7c4404e0ea515495d6127da418
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayUserAuthZhimaperInnerApplyResponse.py
|
aa55cb52b9d5249d124fbd30a25e134c9e2a5866
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,403
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayUserAuthZhimaperInnerApplyResponse(AlipayResponse):
def __init__(self):
super(AlipayUserAuthZhimaperInnerApplyResponse, self).__init__()
self._access_token = None
self._auth_token_type = None
self._refresh_token = None
@property
def access_token(self):
return self._access_token
@access_token.setter
def access_token(self, value):
self._access_token = value
@property
def auth_token_type(self):
return self._auth_token_type
@auth_token_type.setter
def auth_token_type(self, value):
self._auth_token_type = value
@property
def refresh_token(self):
return self._refresh_token
@refresh_token.setter
def refresh_token(self, value):
self._refresh_token = value
def parse_response_content(self, response_content):
response = super(AlipayUserAuthZhimaperInnerApplyResponse, self).parse_response_content(response_content)
if 'access_token' in response:
self.access_token = response['access_token']
if 'auth_token_type' in response:
self.auth_token_type = response['auth_token_type']
if 'refresh_token' in response:
self.refresh_token = response['refresh_token']
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
bb28f5fb6011fac3c82ea587eebe8da09c4224ef
|
d2c4151eff768af64946ababc2e41c13d8973cd3
|
/ARC104/b.py
|
626f3f54124699e2c5c7d4b325852a552aa235d2
|
[] |
no_license
|
Intel-out-side/AtCoder
|
2de19b71981247135432aed2d6d9c2a16c3ab7f0
|
0c419d2df15fff02032432cb1b1323612484e16e
|
refs/heads/master
| 2022-06-23T04:21:12.886072
| 2022-06-13T14:39:07
| 2022-06-13T14:39:07
| 235,240,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
I = input().split()
N = int(I[0])
S = I[1]
class AccumulatedSum:
"""
累積和を勝手に計算します。
0-indexedなので注意
"""
def __init__(self, A:list):
self.N = len(A)
self.accumulated_sum = A.copy()
for i in range(1, self.N):
self.accumulated_sum[i] += self.accumulated_sum[i-1]
def getSumIn(self, left:int, right:int) -> int:
"""
[l, r]区間の和を計算します。左右端も含みます。
"""
if left == 0:
return self.accumulated_sum[right]
return self.accumulated_sum[right] - self.accumulated_sum[left-1]
A = [0] * N
T = [0] * N
C = [0] * N
G = [0] * N
for i in range(N):
if S[i] == "A":
A[i] = 1
if S[i] == "T":
T[i] = 1
if S[i] == "C":
C[i] = 1
if S[i] == "G":
G[i] = 1
Aacc = AccumulatedSum(A)
Tacc = AccumulatedSum(T)
Cacc = AccumulatedSum(C)
Gacc = AccumulatedSum(G)
ans = 0
for i in range(N):
for j in range(i, N):
if Aacc.getSumIn(i, j) == Tacc.getSumIn(i, j) and Cacc.getSumIn(i, j) == Gacc.getSumIn(i, j):
ans += 1
print(ans)
|
[
"so.eng.eng.1rou@gmail.com"
] |
so.eng.eng.1rou@gmail.com
|
294ca33e70eb3070b97b28d0251b424150e1b996
|
d7e41697c8d82f5ef3f9555af7d3d204803f0a99
|
/test/mitmproxy/utils/test_strutils.py
|
4759f1db47ceb9e0a34ee9a9ca78e176b3d0be98
|
[
"MIT"
] |
permissive
|
shitongzhu/mitmproxy
|
aba9681765b71c385b2b20fda303c71090a04376
|
08f35286d3af9ad39046b0b3663913701086be9a
|
refs/heads/master
| 2023-08-19T07:03:52.226411
| 2021-09-04T10:56:33
| 2021-09-04T10:56:33
| 316,844,544
| 0
| 0
|
MIT
| 2020-11-29T00:02:33
| 2020-11-29T00:02:33
| null |
UTF-8
|
Python
| false
| false
| 4,828
|
py
|
import pytest
from mitmproxy.utils import strutils
def test_always_bytes():
assert strutils.always_bytes(bytes(range(256))) == bytes(range(256))
assert strutils.always_bytes("foo") == b"foo"
with pytest.raises(ValueError):
strutils.always_bytes("\u2605", "ascii")
with pytest.raises(TypeError):
strutils.always_bytes(42, "ascii")
def test_always_str():
with pytest.raises(TypeError):
strutils.always_str(42)
assert strutils.always_str("foo") == "foo"
assert strutils.always_str(b"foo") == "foo"
assert strutils.always_str(None) is None
def test_escape_control_characters():
assert strutils.escape_control_characters("one") == "one"
assert strutils.escape_control_characters("\00ne") == ".ne"
assert strutils.escape_control_characters("\nne") == "\nne"
assert strutils.escape_control_characters("\nne", False) == ".ne"
assert strutils.escape_control_characters("\u2605") == "\u2605"
assert (
strutils.escape_control_characters(bytes(bytearray(range(128))).decode()) ==
'.........\t\n..\r.................. !"#$%&\'()*+,-./0123456789:;<'
'=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~.'
)
assert (
strutils.escape_control_characters(bytes(bytearray(range(128))).decode(), False) ==
'................................ !"#$%&\'()*+,-./0123456789:;<'
'=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~.'
)
with pytest.raises(ValueError):
strutils.escape_control_characters(b"foo")
def test_bytes_to_escaped_str():
assert strutils.bytes_to_escaped_str(b"foo") == "foo"
assert strutils.bytes_to_escaped_str(b"\b") == r"\x08"
assert strutils.bytes_to_escaped_str(br"&!?=\)") == r"&!?=\\)"
assert strutils.bytes_to_escaped_str(b'\xc3\xbc') == r"\xc3\xbc"
assert strutils.bytes_to_escaped_str(b"'") == r"'"
assert strutils.bytes_to_escaped_str(b'"') == r'"'
assert strutils.bytes_to_escaped_str(b"'", escape_single_quotes=True) == r"\'"
assert strutils.bytes_to_escaped_str(b'"', escape_single_quotes=True) == r'"'
assert strutils.bytes_to_escaped_str(b"\r\n\t") == "\\r\\n\\t"
assert strutils.bytes_to_escaped_str(b"\r\n\t", True) == "\r\n\t"
assert strutils.bytes_to_escaped_str(b"\n", True) == "\n"
assert strutils.bytes_to_escaped_str(b"\\n", True) == "\\ \\ n".replace(" ", "")
assert strutils.bytes_to_escaped_str(b"\\\n", True) == "\\ \\ \n".replace(" ", "")
assert strutils.bytes_to_escaped_str(b"\\\\n", True) == "\\ \\ \\ \\ n".replace(" ", "")
with pytest.raises(ValueError):
strutils.bytes_to_escaped_str("such unicode")
def test_escaped_str_to_bytes():
assert strutils.escaped_str_to_bytes("foo") == b"foo"
assert strutils.escaped_str_to_bytes("\x08") == b"\b"
assert strutils.escaped_str_to_bytes("&!?=\\\\)") == br"&!?=\)"
assert strutils.escaped_str_to_bytes("\\x08") == b"\b"
assert strutils.escaped_str_to_bytes("&!?=\\\\)") == br"&!?=\)"
assert strutils.escaped_str_to_bytes("\u00fc") == b'\xc3\xbc'
with pytest.raises(ValueError):
strutils.escaped_str_to_bytes(b"very byte")
def test_is_mostly_bin():
assert not strutils.is_mostly_bin(b"foo\xFF")
assert strutils.is_mostly_bin(b"foo" + b"\xFF" * 10)
assert not strutils.is_mostly_bin("")
def test_is_xml():
assert not strutils.is_xml(b"foo")
assert strutils.is_xml(b"<foo")
assert strutils.is_xml(b" \n<foo")
def test_clean_hanging_newline():
s = "foo\n"
assert strutils.clean_hanging_newline(s) == "foo"
assert strutils.clean_hanging_newline("foo") == "foo"
def test_hexdump():
assert list(strutils.hexdump(b"one\0" * 10))
ESCAPE_QUOTES = [
"'" + strutils.SINGLELINE_CONTENT + strutils.NO_ESCAPE + "'",
'"' + strutils.SINGLELINE_CONTENT + strutils.NO_ESCAPE + '"'
]
def test_split_special_areas():
assert strutils.split_special_areas("foo", ESCAPE_QUOTES) == ["foo"]
assert strutils.split_special_areas("foo 'bar' baz", ESCAPE_QUOTES) == ["foo ", "'bar'", " baz"]
assert strutils.split_special_areas(
"""foo 'b\\'a"r' baz""",
ESCAPE_QUOTES
) == ["foo ", "'b\\'a\"r'", " baz"]
assert strutils.split_special_areas(
"foo\n/*bar\nbaz*/\nqux",
[r'/\*[\s\S]+?\*/']
) == ["foo\n", "/*bar\nbaz*/", "\nqux"]
assert strutils.split_special_areas(
"foo\n//bar\nbaz",
[r'//.+$']
) == ["foo\n", "//bar", "\nbaz"]
def test_escape_special_areas():
assert strutils.escape_special_areas('foo "bar" baz', ESCAPE_QUOTES, "*") == 'foo "bar" baz'
esc = strutils.escape_special_areas('foo "b*r" b*z', ESCAPE_QUOTES, "*")
assert esc == 'foo "b\ue02ar" b*z'
assert strutils.unescape_special_areas(esc) == 'foo "b*r" b*z'
|
[
"stzhu.ac@gmail.com"
] |
stzhu.ac@gmail.com
|
b81de21a3b245370ce540d63715dc874f1e5a1f5
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/timber/testcase/firstcases/testcase1_023.py
|
bd7a990dcc4d20bd7e9c133c55b4eed72482d212
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,842
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.naman14.timber',
'appActivity' : 'com.naman14.timber.activities.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.naman14.timber/com.naman14.timber.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase023
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
driver.press_keycode(82)
driver.press_keycode(82)
driver.press_keycode(82)
element = getElememtBack(driver, "new UiSelector().text(\"Sort by\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(82)
element = getElememt(driver, "new UiSelector().resourceId(\"com.naman14.timber:id/albumArt\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"She\")", "new UiSelector().className(\"android.widget.TextView\").instance(10)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.naman14.timber:id/albumArt\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Navigate up\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Library\")", "new UiSelector().className(\"android.widget.CheckedTextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Navigate up\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Heartbeat\")", "new UiSelector().className(\"android.widget.TextView\").instance(17)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Report any bugs here\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"1_023\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.naman14.timber'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.