blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
feb2dc16e2789132a98ac763eb08a63bb6ff086e | 2ca7eda87460f702bec33708d8a494d8c701a7b2 | /tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check.py | 9279c37bb527a972aa8867a79d31b0c5e9777dc4 | [
"Apache-2.0"
] | permissive | xiaolinpeter/tensorflow | 7f931b294a434d731185131c22034c6b68cdf2b7 | 28aa08fc1e017355fc1118913bd988cf7890bec5 | refs/heads/master | 2021-05-19T06:55:47.491635 | 2020-03-31T09:02:33 | 2020-03-31T09:05:54 | 251,556,442 | 2 | 0 | Apache-2.0 | 2020-03-31T09:24:32 | 2020-03-31T09:24:31 | null | UTF-8 | Python | false | false | 7,115 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains function to log if devices are compatible with mixed precision."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.client import device_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import gpu_util
from tensorflow.python.platform import tf_logging
_COMPAT_CHECK_PREFIX = 'Mixed precision compatibility check (mixed_float16): '
_COMPAT_CHECK_OK_PREFIX = _COMPAT_CHECK_PREFIX + 'OK'
_COMPAT_CHECK_WARNING_PREFIX = _COMPAT_CHECK_PREFIX + 'WARNING'
_COMPAT_CHECK_WARNING_SUFFIX = (
'If you will use compatible GPU(s) not attached to this host, e.g. by '
'running a multi-worker model, you can ignore this warning. This message '
'will only be logged once')
def _dedup_strings(device_strs):
"""Groups together consecutive identical strings.
For example, given:
['GPU 1', 'GPU 2', 'GPU 2', 'GPU 3', 'GPU 3', 'GPU 3']
This function returns:
['GPU 1', 'GPU 2 (x2)', 'GPU 3 (x3)']
Args:
device_strs: A list of strings, each representing a device.
Returns:
A copy of the input, but identical consecutive strings are merged into a
single string.
"""
new_device_strs = []
for device_str, vals in itertools.groupby(device_strs):
num = len(list(vals))
if num == 1:
new_device_strs.append(device_str)
else:
new_device_strs.append('%s (x%d)' % (device_str, num))
return new_device_strs
def _log_device_compatibility_check(policy_name, device_attr_list):
"""Logs a compatibility check if the devices support the policy.
Currently only logs for the policy mixed_float16.
Args:
policy_name: The name of the dtype policy.
device_attr_list: A list of DeviceAttributes.
"""
if policy_name != 'mixed_float16':
# TODO(b/145686977): Log if the policy is 'mixed_bfloat16'. This requires
# checking if a TPU is available.
return
supported_device_strs = []
unsupported_device_strs = []
for device in device_attr_list:
if device.device_type == 'GPU':
name, cc = gpu_util.compute_capability_from_device_desc(device)
name = name or 'Unknown GPU'
if cc:
device_str = '%s, compute capability %s.%s' % (name, cc[0], cc[1])
if cc >= (7, 0):
supported_device_strs.append(device_str)
else:
unsupported_device_strs.append(device_str)
else:
unsupported_device_strs.append(
name + ', no compute capability (probably not an Nvidia GPU)')
if unsupported_device_strs:
warning_str = _COMPAT_CHECK_WARNING_PREFIX + '\n'
if supported_device_strs:
warning_str += ('Some of your GPUs may run slowly with dtype policy '
'mixed_float16 because they do not all have compute '
'capability of at least 7.0. Your GPUs:\n')
elif len(unsupported_device_strs) == 1:
warning_str += ('Your GPU may run slowly with dtype policy mixed_float16 '
'because it does not have compute capability of at least '
'7.0. Your GPU:\n')
else:
warning_str += ('Your GPUs may run slowly with dtype policy '
'mixed_float16 because they do not have compute '
'capability of at least 7.0. Your GPUs:\n')
for device_str in _dedup_strings(supported_device_strs +
unsupported_device_strs):
warning_str += ' ' + device_str + '\n'
warning_str += ('See https://developer.nvidia.com/cuda-gpus for a list of '
'GPUs and their compute capabilities.\n')
warning_str += _COMPAT_CHECK_WARNING_SUFFIX
tf_logging.warn(warning_str)
elif not supported_device_strs:
tf_logging.warn('%s\n'
'The dtype policy mixed_float16 may run slowly because '
'this machine does not have a GPU. Only Nvidia GPUs with '
'compute capability of at least 7.0 run quickly with '
'mixed_float16.\n%s' % (_COMPAT_CHECK_WARNING_PREFIX,
_COMPAT_CHECK_WARNING_SUFFIX))
elif len(supported_device_strs) == 1:
tf_logging.info('%s\n'
'Your GPU will likely run quickly with dtype policy '
'mixed_float16 as it has compute capability of at least '
'7.0. Your GPU: %s' % (_COMPAT_CHECK_OK_PREFIX,
supported_device_strs[0]))
else:
tf_logging.info('%s\n'
'Your GPUs will likely run quickly with dtype policy '
'mixed_float16 as they all have compute capability of at '
'least 7.0' % _COMPAT_CHECK_OK_PREFIX)
_logged_compatibility_check = False
def log_device_compatibility_check(policy_name, skip_local):
"""Logs a compatibility check if the devices support the policy.
Currently only logs for the policy mixed_float16. A log is shown only the
first time this function is called.
Args:
policy_name: The name of the dtype policy.
skip_local: If True, do not call list_local_devices(). This is useful since
if list_local_devices() and tf.config.set_visible_devices() are both
called, TensorFlow will crash. However, since GPU names and compute
capabilities cannot be checked without list_local_devices(), setting this
to True means the function will only warn if there are no GPUs.
"""
global _logged_compatibility_check
# In graph mode, calling list_local_devices may initialize some session state,
# so we only call it in eager mode.
if not context.executing_eagerly() or _logged_compatibility_check:
return
_logged_compatibility_check = True
device_attr_list = device_lib.list_local_devices()
if not skip_local:
_log_device_compatibility_check(policy_name, device_attr_list)
return
# TODO(b/146009447): Create an API to replace list_local_devices(), then
# remove the skip_local paramater.
gpus = config.list_physical_devices('GPU')
if not gpus and policy_name == 'mixed_float16':
tf_logging.warn(
'%s\n'
'The dtype policy mixed_float16 may run slowly because '
'this machine does not have a GPU.\n%s' %
(_COMPAT_CHECK_WARNING_PREFIX, _COMPAT_CHECK_WARNING_SUFFIX))
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
16e44fe8b2dcdb7050cf93f7d7f693492e0b9d39 | 075786cd6b8b5d3e943162512bbc3950532734f3 | /player/human.py | 84b9008c187e6edd22e571255debd7daa32ab40a | [] | no_license | VincentVelthuizen/Menace | 470c6744de65a2685be92ed9d450d1dfea5c0bad | 196498200cbdbfba9ccd2b1497efacf7c63b4171 | refs/heads/master | 2023-05-02T20:48:25.636309 | 2021-05-20T12:37:49 | 2021-05-20T12:37:49 | 320,262,025 | 0 | 2 | null | 2021-05-20T12:37:50 | 2020-12-10T12:13:00 | Python | UTF-8 | Python | false | false | 708 | py | import player
from board import Board, _state_set_cell
class Human(player.Player):
keys = {113: (0, 0), 119: (0, 1), 101: (0, 2),
97: (1, 0), 115: (1, 1), 100: (1, 2),
122: (2, 0), 120: (2, 1), 99: (2, 2)}
# The human player object needs to be able to talk to the computer user through a UI
def __init__(self, ui):
self.ui = ui
# Asking the human player for input means waiting until the user (finally) gives 'valid' feedback
def move(self, board):
while True:
self.ui.tick()
move = self.ui.get_move(board)
if move in self.keys:
coordinate = self.keys[move]
return coordinate
| [
"mail@vincentvelthuizen.com"
] | mail@vincentvelthuizen.com |
4d62a2fb14ec957250f83aec716fc37141077cda | 73b3ca8a063778f30fc259110519791bedd67801 | /ticketplace/settings.py | f3d224c9f1d212ba90381440d8aa2b7ea111fc85 | [] | no_license | DasomJung24/ticketplace | f57fb2368443026c185766f28e778545acb7d647 | 930e34f4e498ecf588bcb094b16ada57ac43ddf3 | refs/heads/master | 2023-01-29T04:33:34.924023 | 2020-12-13T16:59:16 | 2020-12-13T16:59:16 | 320,288,514 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,093 | py | """
Django settings for ticketplace project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kyj+yb&9bzl=v=#xuwesi6e3$_hzq81yt(+bi&ffn$5$u2paf2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'movie',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ticketplace.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ticketplace.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"jeongdasom6@gmail.com"
] | jeongdasom6@gmail.com |
8e9683fe6b02f5131b9205be402928e216b2f878 | adb145c78bbc9fa557abef3333c85882cb2442fe | /examples/show_channels.py | c769d6dacb1c3c3a9195c9be5af8f44c5d5965e8 | [] | no_license | mgpwanderer/pyst3 | 574b40c7edbd058048c393c923b7f581b6ef2799 | b7ef58b8dab6ceeb0c23e498d9f21e61afaa9b4c | refs/heads/master | 2021-01-18T13:08:55.205169 | 2014-11-21T09:04:31 | 2014-11-21T09:04:31 | 17,709,524 | 9 | 4 | null | 2020-05-08T15:48:08 | 2014-03-13T12:38:39 | Python | UTF-8 | Python | false | false | 915 | py | """
Example to get list of active channels
"""
import asterisk.manager
import sys
manager = asterisk.manager.Manager()
try:
# connect to the manager
try:
manager.connect('localhost')
manager.login('user', 'secret')
# get a status report
response = manager.status()
print response
response = manager.command('core show channels concise')
print response.data
manager.logoff()
except asterisk.manager.ManagerSocketException, (errno, reason):
print "Error connecting to the manager: %s" % reason
sys.exit(1)
except asterisk.manager.ManagerAuthException, reason:
print "Error logging in to the manager: %s" % reason
sys.exit(1)
except asterisk.manager.ManagerException, reason:
print "Error: %s" % reason
sys.exit(1)
finally:
# remember to clean up
manager.close()
| [
"areski@gmail.com"
] | areski@gmail.com |
769cedf41185d39d17f661de4cfba647bc7c158c | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/robort_20200727104801.py | 2010285cce961f66b93daecfab4a32147f02ab60 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | def uniquePaths(m,n):
# use dynamic programming and answer is at arr[m][n]
# let's create and empty grid with 0's
grid = [[0] * m] * n
# then using the top down uproach we shall prefill all the
uniquePaths(3,2) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
12826fe19b895ac45cd98838364927f7bb08dd9c | ed962cd83f09d9f14f4528c3b2e6ae55d48de5b3 | /wagtail-repository/wagtail/core/apps.py | dd6ee694395a3e97cc26a142e7ac0ec899d0e0ff | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | TobiasSkovgaardJepsen/wagtail-on-heroku | 9eefc4346a88191b8a2f5c902db4b2645fdbad67 | 17e4720f86023225e0704890688998a80bb87a17 | refs/heads/master | 2022-12-19T03:54:51.766911 | 2018-01-20T14:41:33 | 2018-01-20T14:41:33 | 117,421,808 | 0 | 1 | BSD-3-Clause | 2022-12-07T23:51:05 | 2018-01-14T10:46:23 | Python | UTF-8 | Python | false | false | 292 | py | from django.apps import AppConfig
class WagtailCoreAppConfig(AppConfig):
name = 'wagtail.core'
label = 'wagtailcore'
verbose_name = "Wagtail core"
def ready(self):
from wagtail.core.signal_handlers import register_signal_handlers
register_signal_handlers()
| [
"tsj@aau114974.mynet"
] | tsj@aau114974.mynet |
6ff1d93caaa9b376c31d963dc66cd9a3cb8fc42b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_weighted.py | e340dc692dc99f8c2fded0864e71b23542626ba9 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _WEIGHTED():
def __init__(self,):
self.name = "WEIGHTED"
self.definitions = weight
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['weight']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e16d6b0c732d957e8b90b55aaf84a77a990b923a | 36216b52f6d3c9b5d16e9d93c56540fe07bc5f5a | /backstage/server/forms.py | 71e701d5357e296b3d5d578887a50bac8171c822 | [
"MIT"
] | permissive | zerolfx/eoj3 | 3984676d1e29ad5d04f06a41836ece3f1a452054 | 156060399d1c3e5f7bcdbf34eaffbe2be66e1b20 | refs/heads/master | 2020-08-10T19:55:36.278006 | 2019-10-11T10:26:20 | 2019-10-11T10:27:19 | 214,410,171 | 1 | 0 | MIT | 2019-10-11T10:39:12 | 2019-10-11T10:39:11 | null | UTF-8 | Python | false | false | 367 | py | from django import forms
from dispatcher.models import Server
class ServerEditForm(forms.ModelForm):
class Meta:
model = Server
fields = ['name', 'ip', 'port', 'token', 'concurrency', 'runtime_multiplier', 'version', 'master']
class ServerUpdateTokenForm(forms.Form):
new_password = forms.CharField(min_length=4, max_length=128, label='New Password')
| [
"scottyugochang@hotmail.com"
] | scottyugochang@hotmail.com |
c63ce4b9e4bf4d5e9cee09e0aea032917a339c41 | 838f063e516b979364bdddb7a8604f9c3ff405d8 | /tests/gcloud/database_ddl_test.py | 1d0ce1560405f41a462e23ba85afa985200059d2 | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/cloud-spanner-emulator | d205193c7c3c265a47a822e1df574271c8522759 | 53eaa404d303fb2dc03f3b444553aa9bb24c3786 | refs/heads/master | 2023-08-29T12:33:41.780107 | 2023-08-11T08:15:10 | 2023-08-11T08:15:10 | 251,420,886 | 236 | 38 | Apache-2.0 | 2023-09-07T12:35:45 | 2020-03-30T20:28:25 | C++ | UTF-8 | Python | false | false | 3,012 | py | #
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for Cloud Spanner gcloud command for ddl statements to CREATE/ALTER/DROP CHANGE STREAM."""
from tests.gcloud import emulator
class GCloudDatabaseDdlTest(emulator.TestCase):
# TODO: Test returned strings from ddl.
def testUpdateDDLChangeStream(self):
# Create an instance.
self.RunGCloud(
'spanner',
'instances',
'create',
'test-instance',
'--config=emulator-config',
'--description=Test Instance',
'--nodes',
'3',
)
# Create the database.
self.assertEqual(
self.RunGCloud(
'spanner',
'databases',
'create',
'test-database',
'--instance=test-instance',
'--ddl=CREATE TABLE mytable (a INT64, b INT64) PRIMARY KEY(a)',
),
self.JoinLines(''),
)
# Perform an update to create a change stream.
self.RunGCloud(
'spanner',
'databases',
'ddl',
'update',
'test-database',
'--instance=test-instance',
'--ddl=CREATE CHANGE STREAM myChangeStream FOR ALL',
)
# Perform an update to alter a change stream's value capture type.
self.RunGCloud(
'spanner',
'databases',
'ddl',
'update',
'test-database',
'--instance=test-instance',
(
'--ddl=ALTER CHANGE STREAM myChangeStream SET OPTIONS ('
" value_capture_type = 'NEW_VALUES' )"
),
)
# Perform an update to alter a change stream's retention period.
self.RunGCloud(
'spanner',
'databases',
'ddl',
'update',
'test-database',
'--instance=test-instance',
(
'--ddl=ALTER CHANGE STREAM myChangeStream SET OPTIONS ('
" retention_period = '3d' )"
),
)
# Perform an update to suspend a change stream.
self.RunGCloud(
'spanner',
'databases',
'ddl',
'update',
'test-database',
'--instance=test-instance',
'--ddl=ALTER CHANGE STREAM myChangeStream DROP FOR ALL',
)
# Perform an update to drop a change stream.
self.RunGCloud(
'spanner',
'databases',
'ddl',
'update',
'test-database',
'--instance=test-instance',
'--ddl=DROP CHANGE STREAM myChangeStream',
)
if __name__ == '__main__':
emulator.RunTests()
| [
"noreply@github.com"
] | GoogleCloudPlatform.noreply@github.com |
97b8ec6308ec18fdc55cc64668317b9d601f77e6 | 2d20823359e012c3d5942ec72b2442e2d5e3f2d7 | /demo/World population.spx.py | 777a6503622bacd601997903f984450c5894e330 | [
"MIT"
] | permissive | urbach/jupytext | 8fa20d6f83abb6c09ad4cd952c6e8748e3183643 | 6d3a38505ae539975085f9d5b4e457c9566a7977 | refs/heads/master | 2020-04-24T14:55:49.401909 | 2019-02-22T08:43:37 | 2019-02-22T08:43:37 | 172,044,219 | 1 | 0 | MIT | 2019-02-22T10:14:39 | 2019-02-22T10:14:39 | null | UTF-8 | Python | false | false | 3,282 | py | # ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent,.lgt.py:light,.spx.py:sphinx,md,Rmd
# text_representation:
# extension: .py
# format_name: sphinx
# format_version: '1.1'
# jupytext_version: 1.0.0-dev
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
# A quick insight at world population
## Collecting population data
In the below we retrieve population data from the
[World Bank](http://www.worldbank.org/)
using the [wbdata](https://github.com/OliverSherouse/wbdata) python package
"""
import pandas as pd
import wbdata as wb
pd.options.display.max_rows = 6
pd.options.display.max_columns = 20
###############################################################################
# Corresponding indicator is found using search method - or, directly,
# the World Bank site.
wb.search_indicators('Population, total') # SP.POP.TOTL
# wb.search_indicators('area')
# => https://data.worldbank.org/indicator is easier to use
###############################################################################
# Now we download the population data
indicators = {'SP.POP.TOTL': 'Population, total',
'AG.SRF.TOTL.K2': 'Surface area (sq. km)',
'AG.LND.TOTL.K2': 'Land area (sq. km)',
'AG.LND.ARBL.ZS': 'Arable land (% of land area)'}
data = wb.get_dataframe(indicators, convert_date=True).sort_index()
data
###############################################################################
# World is one of the countries
data.loc['World']
###############################################################################
# Can we classify over continents?
data.loc[(slice(None), '2017-01-01'), :]['Population, total'].dropna(
).sort_values().tail(60).index.get_level_values('country')
###############################################################################
# Extract zones manually (in order of increasing population)
zones = ['North America', 'Middle East & North Africa',
'Latin America & Caribbean', 'Europe & Central Asia',
'Sub-Saharan Africa', 'South Asia',
'East Asia & Pacific'][::-1]
###############################################################################
# And extract population information (and check total is right)
population = data.loc[zones]['Population, total'].swaplevel().unstack()
population = population[zones]
assert all(data.loc['World']['Population, total'] == population.sum(axis=1))
###############################################################################
# ## Stacked area plot with matplotlib
import matplotlib.pyplot as plt
""
plt.clf()
plt.figure(figsize=(10, 5), dpi=100)
plt.stackplot(population.index, population.values.T / 1e9)
plt.legend(population.columns, loc='upper left')
plt.ylabel('Population count (B)')
plt.show()
###############################################################################
# ## Stacked bar plot with plotly
import plotly.offline as offline
import plotly.graph_objs as go
offline.init_notebook_mode()
""
data = [go.Scatter(x=population.index, y=population[zone], name=zone, stackgroup='World')
for zone in zones]
fig = go.Figure(data=data,
layout=go.Layout(title='World population'))
offline.iplot(fig)
| [
"marc.wouts@gmail.com"
] | marc.wouts@gmail.com |
0c7496f34ee608feab34d8444ee4d5c33dc88ec5 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /factors_of_influence/fids/sunrgbd.py | d7687853f656cc3995e06bd4997ecd3bd6b68748 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 3,865 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines SUNRGBD, segmentation (including Mseg) and depth.
SUN RGB-D Dataset a Scene Understanding Benchmark
Website: https://rgbd.cs.princeton.edu/
Paper:
SUN RGB-D: A RGB-D scene understanding benchmark suite.
S. Song, S. Lichtenberg, and J. Xiao. In CVPR, 2015.
Features/Modalities:
1. RGB image
2. Semantic segmentation
3. Depth image
4. Object detection (2D & 3D)
5. Room layout
Currently only image, semantic segmentation and depth are used.
"""
from typing import Text
import numpy as np
from factors_of_influence import dataset_dirs
from factors_of_influence.fids import mseg_base
from factors_of_influence.fids import utils
DEPTH = 'depth'
MSEG = 'mseg'
ALL = 'all'
DEPTH_FILE_PATTERN = dataset_dirs.SUNRGBD_DEPTH_DIR + '/{}/{:08d}.png'
class SUNRGBD(mseg_base.MSegBase):
"""Import SUNRGBD."""
def __init__(self, sunrgb_config = MSEG):
super().__init__(mseg_name='SUNRGB-D',
mseg_original_name='sunrgbd-38',
mseg_base_name='sunrgbd-37',
mseg_dirname='SUNRGBD',
mseg_train_dataset=True,
mseg_config=sunrgb_config)
self.feature_names = self.get_features_from_config(sunrgb_config)
def get_features_from_config(self, sunrgb_config):
"""Return features based on SUNRGBD config."""
if sunrgb_config == DEPTH:
return ['image', 'depth']
elif sunrgb_config == MSEG:
return self.MSEG_FEATURE_NAMES
elif sunrgb_config == ALL:
return self.MSEG_FEATURE_NAMES + ['depth']
else:
raise ValueError(f'SUNRGBD config {sunrgb_config} not valid!')
def _info_features(self):
info_features = super()._info_features()
if 'depth' in self.feature_names:
info_features['depth'] = dict(
default_clip_min=0.369, default_clip_max=8.0)
return info_features
@staticmethod
def _convert_depth_to_m(depth_raw):
"""Converts depth (uint16) to cm (float)."""
# Follows the SUNRGBD Matlab Toolbox [SMT]:
# https://rgbd.cs.princeton.edu/data/SUNRGBDtoolbox.zip
# [SMT]: depth = bitor(bitshift(depth,-3), bitshift(depth,16-3));
# matlab's bitshift(..., -3) is a right shift (of 3); and
# matlab's bitshift(..., 13) is a left shift:
depth_raw = np.bitwise_or(np.right_shift(depth_raw, np.uint16(3)),
np.left_shift(depth_raw, np.uint16(13)))
# [SMT]: depth = single(depthInpaint)/1000;
depth_in_meter = depth_raw.astype(np.float32)/1000.0
# [SMT]: depth(depth >8)=8;
# Note practical max is around 5m (given sensors and indoor environments).
depth_in_meter = np.minimum(depth_in_meter, 8)
return depth_in_meter
def get_feature(self, split, curr_id, feature_name):
"""Returns a feature. Can be a numpy array or path to an image."""
if feature_name in self.MSEG_FEATURE_NAMES:
return super().get_feature(split, curr_id, feature_name)
if feature_name in ['depth']:
depth_id = int(curr_id.split('-')[1])
depth_split = 'train' if split == 'train' else 'test'
depth_file_name = DEPTH_FILE_PATTERN.format(depth_split, depth_id)
depth_raw = utils.load_image_cv2_any_color_any_depth(depth_file_name)
return self._convert_depth_to_m(depth_raw), True
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
761631e008d73e304e87b40d11cc0bc377475f41 | d30233316dd25fa1fe757f46769fba2da4934876 | /GRADER/File.py | 6d9daec59ae5214854e9ed492dcfb42a1e9cb1f7 | [] | no_license | saumya-singh/CodeGrader | 7fb1ca75bc07a76c7ff0c506f22d22cfaa8661b0 | 6338da979cff00c7a12b8988d2d1886663278f14 | refs/heads/master | 2020-03-10T19:21:44.775031 | 2018-04-14T19:44:35 | 2018-04-14T19:44:35 | 129,546,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | import requests
import os
class File:
base_url = "http://192.168.43.190:8080/CodeGrader2"
base_directory = "/tmp"
def __init__(self, submission_id, file_name):
self.submission_id = submission_id
self.file_name = file_name.strip()
self.filename_noext, file_ext = os.path.splitext(self.file_name)
self.file_ext = file_ext[1:]
def downloadFile(self):
r = requests.get(self.getRemoteFileUrl())
if not os.path.exists(self.getLocalDestinationDir()):
os.makedirs(self.getLocalDestinationDir())
print self.getRemoteFileUrl()
output = open( self.getLocalFileLocation() , 'w')
output.write(r.text)
output.close()
return True
def getRemoteFileUrl(self):
return File.base_url+"/"+self.submission_id+"/"+self.file_name
def getLocalDestinationDir(self):
return File.base_directory+"/sol_"+self.submission_id
def getLocalFileLocation(self):
return self.getLocalDestinationDir()+"/" + self.file_name
def getLanguage(self):
return self.file_ext.lower()
def getClassName(self):
return self.filename_noext
def getFileName(self):
return self.file_name
if __name__ == "__main__":
f = File("1234", "armstrong.c", "1")
#print f.downloadFile()
#print f.getFileContent()
print f.getTestFilePath("1")
| [
"saumya.singh0993@gmail.com"
] | saumya.singh0993@gmail.com |
b9ad598aef5d9c6ff63715cc8682439a6df16879 | 5d32d0e65aa3bfa677fd1b8c92569e07e9b82af1 | /Section 4 - Lists/Fruit Machine 2.py | e78f93ec3c25930ea64edd0961d61c27284f6398 | [
"CC0-1.0"
] | permissive | pdst-lccs/lccs-python | b74ef2a02ac8ad2637f713fff5559f4e56c9827d | 95cb7ece05716521e9951d7a40de8fb20a88021f | refs/heads/master | 2023-05-28T00:46:57.313972 | 2023-05-22T10:16:43 | 2023-05-22T10:16:43 | 240,501,524 | 21 | 18 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | # Event: LCCS Python Fundamental Skills Workshop
# Date: May 2018
# Author: Joe English, PDST
# eMail: computerscience@pdst.ie
# Purpose: A program to simulate a fruit machine
# Description: To run this program the file fruits.txt must exist in the runtime folder
# This program reads the entire file in one command (read)
# The contents of the file are saved in a variable called fileContents
# The string is split into a list of tokens called fruits
# The choice command is used to select a random element from fruits
# Program to simulate a fruit machine!
import random
# Open the fruits file (already created)
fruitFile = open("fruits.txt","r")
# Read the entire file
fileContents = fruitFile.read()
# Close the file
fruitFile.close()
# Split the content into a list
fruits = fileContents.split()
# Spin! Display three fruits
print(random.choice(fruits))
print(random.choice(fruits))
print(random.choice(fruits))
# This line is just here for debugging purposes
# print(fruits)
| [
"noreply@github.com"
] | pdst-lccs.noreply@github.com |
99676168522f6040813b9ddb4402a4be3081a0d5 | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW02_20210630162753.py | ba3ab937a9adc7f59ef0592f1d2524cc1d73c68b | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,432 | py | """
Georgia Institute of Technology - CS1301
HW02 - Conditionals and Loops
Collaboration Statement:
"""
#########################################
"""
Function Name: snackBar()
Parameters: snack (str), ingredient (str), yourMoney (float)
Returns: whether you can get the snack (bool)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def snackBar(snack, ingredient, yourMoney):
if snack == 'Hotdog':
if not ingredient == 'Gluten' and not ingredient == 'Meat' and yourMoney >= 5.99:
return True
else:
return False
if snack == 'Veggie Burger':
if not ingredient == 'Gluten' and yourMoney >= 5.99:
return True
else:
return False
if snack == 'Chili Bowl':
if not ingredient == 'Meat' and yourMoney >= 3.99:
return True
else:
return False
if snack == 'Chili Cheese Fries':
if not ingredient == 'Meat' and not ingredient == 'Diary' and yourMoney >= 4.99:
return True
else:
return False
"""
Function Name: waterGames()
Parameters: gameName (str), numPlayers (int), totalFriends (int)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def waterGames(gameName, numPlayers, totalFriends):
percentPlaying = numPlayers / totalFriends
if percentPlaying < 0.3:
print('Let’s choose something else.')
elif percentPlaying >= 0.3 and percentPlaying < 0.75:
print('We will {} for a little bit!'.format(gameName))
elif percentPlaying >= 0.75:
print("Let's " + gameName + '!!!')
"""
Function Name: summerShopping()
Parameters: clothingItem (str), size (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def summerShopping(clothingItem, size):
if clothingItem == 'shorts':
if size == 'S':
print("2 colors are available in this item and size.")
elif size == 'M':
print("1 colors are available in this item and size.")
elif size == 'L':
print("No colors are available in this item and size.")
if clothingItem == 'tank':
if size == 'S':
print("1 colors are available in this item and size.")
elif size == 'M':
print("1 colors are available in this item and size.")
elif size == 'L':
print("2 colors are available in this item and size.")
if clothingItem == 'flipflops':
if size == 'S':
print("1 colors are available in this item and size.")
elif size == 'M':
print("1 colors are available in this item and size.")
elif size == 'L':
print("2 colors are available in this item and size.")
"""
Function Name: stopGame()
Parameters: initialPrice (float), finalPrice (float), percentGrowth (float)
Returns: numberOfDays (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def stopGame(initialPrice, finalPrice, percentGrowth):
if finalPrice <= initialPrice:
return 0
newPrice = initialPrice
days = 0
while (newPrice <= finalPrice):
newPrice = newPrice * (1 + (percentGrowth/100))
days += 1
return days
"""
Function Name: adventure()
Parameters: startDay (int), stopDay (int), hikeLimit(int)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def adventure(startDay, stopDay, hikeLimit):
numberOfHikes = 0
for i in range(startDay, stopDay+1):
if i % 3 == 0 and i % 4 == 0 and numberOfHikes < hikeLimit:
print('Roadtrip!')
elif i % 3 == 0 and numberOfHikes < hikeLimit:
print('Hike')
numberOfHikes += 1
if numberOfHikes == hikeLimit:
print('No more hikes')
return 'yay'
print(stopGame(232.0, 20000.0, 15.0))
adventure(4, 29, 3)
| [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
b5b8ba1d6d74bfc6140163460ff7ee5b0e2234ff | 4d27d69c22f9c405e1d11baa7d3872b7075c68fa | /day3/oploadpic.py | 3f1229049a597b4ea32491649f0c3c45f4bce2dc | [] | no_license | zhentestnice/selenium_test1 | 99f547104e5b547e78c9fb1dd3860a6a97c91d63 | 9bbb8578b84447c6b14adefd3122e2b8ac437dc4 | refs/heads/master | 2021-08-23T04:13:25.251992 | 2017-12-03T07:09:14 | 2017-12-03T07:09:14 | 112,907,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,764 | py | import time
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
driver = webdriver.Chrome()
driver.implicitly_wait(30)
driver.maximize_window()
driver.get("http://localhost/index.php?m=admin&c=public&a=login")
#1.登录
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("userpass").send_keys("password")
driver.find_element_by_name("userverify").send_keys("1234")
driver.find_element_by_class_name("Btn").click()
#2.商品管理
driver.find_element_by_link_text("商品管理").click()
#3.添加商品
driver.find_element_by_link_text("添加商品").click()
#4.商品名称
#有一种特殊的网页,比如左边或上面有导航条
#其中"商品管理"和"添加商品"属于页面根节点的网页
#商品名称属于frame框架中的子网页
#现在需要切换网页
driver.switch_to.frame("mainFrame") #切换到子框架
driver.find_element_by_name("name").send_keys("iphone 2")
#5.商品分类
driver.find_element_by_id("1").click()
driver.find_element_by_id("2").click()
driver.find_element_by_id("6").click()
#driver.find_element_by_id("7").click()
#双击是一种特殊的元素操作,被封装到ActionChains这个类里,java封装到Actions这个类里
#链表必须以perform()结束
ActionChains(driver).double_click(driver.find_element_by_id("7")).perform()
#driver.find_element_by_link_text("选择当前分类").click()
#6.商品品牌
pinpai = driver.find_element_by_tag_name("select")
Select(pinpai).select_by_visible_text("苹果 (Apple)")
#7.上传图片
driver.find_element_by_link_text("商品图册").click()
#有些页面控件是javascript在页面加载之后生成的,有时页面加载完,但javascript的控件还没创建好,所以需要加time.sleep提高程序的稳定性
#implicitly_wait(是用来判断页面是否加载完毕
time.sleep(2)
#driver.find_element_by_css_selector("filePicker label").click()
#class="webuploader-element-invisible"不可见控件
#因为真正负责上传问价您的页面元素是<input type="file"...>
#这个控件可以直接输入图片的路径
driver.find_element_by_name("file").send_keys("D:/111.png")
driver.find_element_by_css_selector(".uploadBtn.state-finish.state-ready").click()
time.sleep(3)
driver.switch_to.alert.accept()
#7.提交
driver.find_element_by_class_name("button_search").click()
#driver.find_element_by_class_name("button_search").click()
#问题:
#页面太长,点击不了下面的按钮,怎么操作滚动条
#range是区间的
ac = ActionChains(driver)
for i in range(10):
ac.send_keys(Keys.ARROW_DOWN)
ac.perform()
driver.execute_script("window.scrollTo(200,100)") #横竖坐标滚动 | [
"51Testing"
] | 51Testing |
933a90d77bcc6337f44e77eb422d6513ca2f3a4e | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/alwayson/testcase/firstcases/testcase5_028.py | 6cf58969ae1d92078ea9a7ace0fa943a46deb2cd | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,631 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.tomer.alwayson',
'appActivity' : 'com.tomer.alwayson.activities.PreferencesActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.tomer.alwayson/com.tomer.alwayson.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase028
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"Customize Watchface\")", "new UiSelector().className(\"android.widget.TextView\").instance(9)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Text\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"M\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"F\")", "new UiSelector().className(\"android.widget.TextView\").instance(11)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Memo text\")", "new UiSelector().className(\"android.widget.TextView\").instance(15)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Cancel\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Styles\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Set the text color\")", "new UiSelector().className(\"android.widget.TextView\").instance(13)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Text & Font\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Full calendar\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Text\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"S\")", "new UiSelector().className(\"android.widget.TextView\").instance(12)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"July 2018\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Full calendar\")", "new UiSelector().className(\"android.widget.TextView\").instance(13)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Battery style\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"5_028\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.tomer.alwayson'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
| [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
5e4cb27bc88ca962e358de8631e842d4b3395cfb | 5292189eb99d9a69b4e417dfed352e7de0844b0e | /scripts/generate_enriched_texts.py | d76137474504d68ed6cc0d8f876971e1e90b30da | [
"MIT"
] | permissive | Envinorma/data-tasks | e1197ac3deada7edc5406933b65fd099bd412f6d | 7aa12b5def1b8a7a10c9651fb02267592fef0368 | refs/heads/main | 2022-10-26T21:38:39.952029 | 2022-06-12T08:46:38 | 2022-06-12T08:46:38 | 364,975,968 | 0 | 0 | MIT | 2022-10-11T12:25:53 | 2021-05-06T16:41:49 | Python | UTF-8 | Python | false | false | 1,265 | py | # DEPRECATED
'''
Script for generating all versions of a specific AM using its
structured version and its parametrization.
'''
# from typing import Optional, Tuple
# from envinorma.parametrization.am_with_versions import AMVersions, generate_am_with_versions
# from envinorma.utils import write_json
# from tasks.data_build.config import DATA_FETCHER
# TEST_ID = 'JORFTEXT000023081678'
# def _create_folder_and_generate_parametric_filename(am_id: str, version_desc: Tuple[str, ...]) -> str:
# raise NotImplementedError()
# def _dump(am_id: str, versions: Optional[AMVersions]) -> None:
# if not versions:
# return
# for version_desc, version in versions.items():
# filename = _create_folder_and_generate_parametric_filename(am_id, version_desc)
# write_json(version.to_dict(), filename)
# def handle_am(am_id: str) -> None:
# metadata = DATA_FETCHER.load_am_metadata(am_id)
# if not metadata:
# raise ValueError(f'AM {am_id} not found.')
# final_am = generate_am_with_versions(
# DATA_FETCHER.safe_load_most_advanced_am(am_id), DATA_FETCHER.load_or_init_parametrization(am_id), metadata
# )
# _dump(am_id, final_am.am_versions)
# if __name__ == '__main__':
# handle_am(TEST_ID)
| [
"remi.delbouys@laposte.net"
] | remi.delbouys@laposte.net |
37276aeb06dcad99c2d20af20c2879662c23e92f | 6e8f2e28479566dbaa338300b2d61f784ff83f97 | /.history/code/live_20210420075102.py | 5aaca3d8bdf9d7202c4559c9a90c0e239879462c | [] | no_license | eeng5/CV-final-project | 55a7d736f75602858233ebc380c4e1d67ab2b866 | 580e28819560b86f6974959efb1d31ef138198fc | refs/heads/main | 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,547 | py | import os
import cv2
import sys
import numpy as np
from models import SimpleModel
from preprocess import Datasets
import hyperparameters as hp
import tensorflow as tf
from skimage.transform import resize
from PIL import Image, ImageFont, ImageDraw
from scipy.spatial import distance as dist
from imutils import face_utils
from imutils.video import VideoStream
import fastai
import fastai.vision
import imutils
import argparse
import time
import dlib
from skimage import transform
from keras.preprocessing import image
def createPixelArray(arr):
array = image
array = np.array(arr, dtype=np.uint8)/225.
array = transform.resize(array, (48, 48, 1))
array = [array]
return array
weights_str = "/Users/Natalie/Desktop/cs1430/CV-final-project/code/checkpoints/simple_model/041321-113618/your.weights.e015-acc0.6121.h5"
os.chdir(sys.path[0])
model = SimpleModel()
model(tf.keras.Input(shape=(hp.img_size, hp.img_size)))
model.load_weights(weights_str, by_name=False)
model.compile(
optimizer=model.optimizer,
loss=model.loss_fn,
metrics=["sparse_categorical_accuracy"],
)
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
vs = VideoStream(src=0).start()
start = time.perf_counter()
data = []
time_value = 0
out = cv2.VideoWriter(
"liveoutput.avi", cv2.VideoWriter_fourcc("M", "J", "P", "G"), 10, (450, 253)
)
while True:
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(48, 48))
for coords in face_coord:
X, Y, w, h = coords
H, W, _ = frame.shape
X_1, X_2 = (max(0, X - int(w)), min(X + int(1.3 * w), W))
Y_1, Y_2 = (max(0, Y - int(0.1 * h)), min(Y + int(1.3 * h), H))
img_cp = gray[Y_1:Y_1+48, X_1:X_1+48].copy()
img_mod = createPixelArray(img_cp)
prediction = model.predict(img_mod)
prediction = np.argmax(prediction)
cv2.rectangle(
img=frame,
pt1=(X_1, Y_1),
pt2=(X_2, Y_2),
color=(128, 128, 0),
thickness=2,
)
cv2.putText(
frame,
str(prediction),
(10, frame.shape[0] - 25),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(225, 255, 255),
2,)
cv2.imshow("frame", frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
vs.stop()
out.release()
cv2.destroyAllWindows()
| [
"natalie_rshaidat@brown.edu"
] | natalie_rshaidat@brown.edu |
ed7ad0ebeb496183ba4b5ae5a8803c223274731c | 6b1dd40d16ae6169e7ed780c5062e88d10502c85 | /Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/902_cv_LOO_524-1.py | da31953b17d71903f208c8f4e306fb38ac469b9e | [
"MIT"
] | permissive | hehuanlin123/DeepLearning | 8a59680a341cfc525d50aa5afc3e44202ca4acc4 | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | refs/heads/master | 2022-07-12T09:26:08.617883 | 2019-06-10T11:31:37 | 2019-06-10T11:31:37 | 183,748,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,572 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 24 21:09:29 2018
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
import sys
sys.path.append('/home/kazuki_onodera/Python')
import lgbmextension as ex
import lightgbm as lgb
import gc
import utils
utils.start(__file__)
#==============================================================================
SEED = 71
X = pd.concat([utils.read_pickles('../data/101_train'),
utils.read_pickles('../data/102_train'),
utils.read_pickles('../data/103_train')], axis=1)
y = utils.read_pickles('../data/label').TARGET
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.05,
'max_depth': -1,
'num_leaves': 127,
'max_bin': 100,
'colsample_bytree': 0.5,
'subsample': 0.5,
'nthread': 64,
'bagging_freq': 1,
'seed': SEED,
'verbose': -1
}
categorical_feature = ['NAME_CONTRACT_TYPE',
'CODE_GENDER',
'FLAG_OWN_CAR',
'FLAG_OWN_REALTY',
'NAME_TYPE_SUITE',
'NAME_INCOME_TYPE',
'NAME_EDUCATION_TYPE',
'NAME_FAMILY_STATUS',
'NAME_HOUSING_TYPE',
'OCCUPATION_TYPE',
'WEEKDAY_APPR_PROCESS_START',
'ORGANIZATION_TYPE',
'FONDKAPREMONT_MODE',
'HOUSETYPE_MODE',
'WALLSMATERIAL_MODE',
'EMERGENCYSTATE_MODE']
dtrain = lgb.Dataset(X, y,
categorical_feature=categorical_feature)
ret = lgb.cv(param, dtrain, 9999, nfold=5,
early_stopping_rounds=50, verbose_eval=None,
seed=SEED)
print(f"NO drop auc-mean {ret['auc-mean'][-1]}")
for c in X.columns:
print(f'drop {c}')
gc.collect()
categorical_feature_ = categorical_feature[:]
if c in categorical_feature_:
categorical_feature_.remove(c)
dtrain = lgb.Dataset(X.drop(c, axis=1), y,
categorical_feature=categorical_feature_)
ret = lgb.cv(param, dtrain, 9999, nfold=5,
# categorical_feature=categorical_feature,
early_stopping_rounds=50, verbose_eval=None,
seed=SEED)
print(f"auc-mean {ret['auc-mean'][-1]}")
#==============================================================================
utils.end(__file__)
| [
"szkfzx@szkfzxdeiMac.local"
] | szkfzx@szkfzxdeiMac.local |
f58417c1d198d5d79a6b11bba9b19fb2b7416ef0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_perish.py | f448a3104103b61dfd8543a82aa79519714189e1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py |
#calss header
class _PERISH():
def __init__(self,):
self.name = "PERISH"
self.definitions = [u'to die, especially in an accident or by being killed, or to be destroyed: ', u'If material such as rubber or leather perishes, it decays and starts to break into pieces: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5814d5b9897fce2326f8464326357614ccef0682 | ad9782856ec2f860fccbefa5e75a896691b8e1cc | /MonteCarlo/test/dropLargeRespace/VBF_HToZZTo4L_M125_14TeV_powheg2_JHUgenV702_pythia8_LHE_GEN_SIM_OT_Tilted_362_200_Pixel_4021_dropLargeRespace.py | a4a679fc47b806bb6339c6dcefcd23818ed9126b | [] | no_license | OSU-CMS/VFPix | 7fe092fc5a973b4f9edc29dbfdf44907664683e5 | 4c9fd903219742a4eba1321dc4181da125616e4c | refs/heads/master | 2020-04-09T05:52:05.644653 | 2019-01-09T13:44:22 | 2019-01-09T13:44:22 | 30,070,948 | 0 | 0 | null | 2018-11-30T13:15:54 | 2015-01-30T12:26:20 | Python | UTF-8 | Python | false | false | 6,517 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/Generator/python/VBF_HToZZTo4L_M125_14TeV_powheg2_JHUgenV702_pythia8_cfi.py --conditions auto:phase2_realistic -n 100 --era Phase2C2 --eventcontent FEVTDEBUG --relval 9000,100 -s LHE,GEN,SIM --datatier GEN-SIM --beamspot HLLHC --geometry Extended2023D4 --fileout step2_SIM.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('SIM',eras.Phase2C2)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023D4Reco_cff')
process.load('Configuration.Geometry.GeometryExtended2023D4_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedHLLHC_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(5)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('VFPix/MonteCarlo/python/VBF_HToZZTo4L_M125_14TeV_powheg2_JHUgenV702_pythia8_cfi.py nevts:100'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.FEVTDEBUGoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('step2_SIM.root'),
outputCommands = process.FEVTDEBUGEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '')
process.generator = cms.EDFilter("Pythia8HadronizerFilter",
PythiaParameters = cms.PSet(
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'pythia8PowhegEmissionVetoSettings',
'processParameters'),
processParameters = cms.vstring('POWHEG:nFinal = 3'),
pythia8CUEP8M1Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:pT0Ref=2.4024',
'MultipartonInteractions:ecmPow=0.25208',
'MultipartonInteractions:expPow=1.6'),
pythia8CommonSettings = cms.vstring('Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on'),
pythia8PowhegEmissionVetoSettings = cms.vstring('POWHEG:veto = 1',
'POWHEG:pTdef = 1',
'POWHEG:emitted = 0',
'POWHEG:pTemt = 0',
'POWHEG:pThard = 0',
'POWHEG:vetoCount = 100',
'SpaceShower:pTmaxMatch = 2',
'TimeShower:pTmaxMatch = 2')
),
comEnergy = cms.double(14000.0),
filterEfficiency = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1)
)
process.externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/14TeV/powheg/V2/VBF_HZZ4L_NNPDF30_14TeV_M125_JHUGenV702/v2/VBF_HZZ4L_NNPDF30_14TeV_M125_JHUGenV702.tgz'),
nEvents = cms.untracked.uint32(100),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
# Path and EndPath definitions
process.lhe_step = cms.Path(process.externalLHEProducer)
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGoutput_step = cms.EndPath(process.FEVTDEBUGoutput)
# Schedule definition
process.schedule = cms.Schedule(process.lhe_step,process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.FEVTDEBUGoutput_step)
# filter all path with the production filter sequence
for path in process.paths:
if path in ['lhe_step']: continue
getattr(process,path)._seq = process.generator * getattr(process,path)._seq
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
inputDir = "VFPix/MonteCarlo/data/OT_Tilted_362_200_Pixel_4021_dropLargeRespace/"
fileNames =["pixbar.xml","pixelProdCuts.xml","pixelStructureTopology.xml","pixelsens.xml","pixfwd.xml","tracker.xml","trackerProdCuts.xml","trackerRecoMaterial.xml","trackerStructureTopology.xml","trackersens.xml","pixel.xml"]
for i in range (0, len (process.XMLIdealGeometryESSource.geomXMLFiles)):
xmlFile = process.XMLIdealGeometryESSource.geomXMLFiles[i]
fileName = xmlFile.split("/")[-1]
if fileName in fileNames:
process.XMLIdealGeometryESSource.geomXMLFiles[i] = inputDir + fileName
| [
"juliette.alimena@cern.ch"
] | juliette.alimena@cern.ch |
be72477f2c2a81bb266c758a9409a5ecc183c251 | 652121d51e6ff25aa5b1ad6df2be7eb341683c35 | /programs/e2proclst.py | 4ed2507cb5f9c3544cff4642cdfa9f78535076d1 | [] | no_license | jgalaz84/eman2 | be93624f1c261048170b85416e517e5813992501 | 6d3a1249ed590bbc92e25fb0fc319e3ce17deb65 | refs/heads/master | 2020-04-25T18:15:55.870663 | 2015-06-05T20:21:44 | 2015-06-05T20:21:44 | 36,952,784 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,932 | py | #!/usr/bin/env python
# This program performs simple processing of .LST files
# Author: Steven Ludtke, 10/06/14 (sludtke@bcm.edu)
# Copyright (c) 2014- Baylor College of Medicine
#
# This software is issued under a joint BSD/GNU license. You may use the
# source code in this file under either license. However, note that the
# complete EMAN2 and SPARX software packages have some GPL dependencies,
# so you are responsible for compliance with the licenses of these packages
# if you opt to use BSD licensing. The warranty disclaimer below holds
# in either instance.
#
# This complete copyright notice must be included in any revised version of the
# source code. Additional authorship citations may be added, but existing
# author citations must be preserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 2111-1307 USA
#
from EMAN2 import *
from math import *
import os
import sys
def main():
progname = os.path.basename(sys.argv[0])
usage = """Usage:\nproclst.py [options] <lst 1> <lst 2> ... \nSimple manipulations of LST files. If your goal is to produce an actual image file rather than the
sort of virtual stack represented by .lst files, use e2proc2d.py or e2proc3d.py instead. Those programs will treat LST files as normal image files for input.\n."""
parser = EMArgumentParser(usage=usage,version=EMANVERSION)
####################
# parser.add_argument("--average", action="store_true", help="Averages all input images (without alignment) and writes a single output image")
parser.add_argument("--merge",type=str,help="Specify the output name here. This will concatenate all of the input .lst files into a single output",default=None)
parser.add_argument("--create",type=str,help="Input files should be image files. Specify an .lst file to create here with references to all of the images in the inputs.")
parser.add_argument("--mergesort",type=str,help="Specify the output name here. This will merge all of the input .lst files into a single (resorted) output",default=None)
parser.add_argument("--retype",type=str,help="If a lst file is referencing a set of particles from particles/imgname__oldtype.hdf, this will change oldtype to the specified string in-place (modifies input files)",default=None)
parser.add_argument("--minlosnr",type=float,help="Integrated SNR from 1/200-1/20 1/A must be larger than this",default=0,guitype='floatbox', row=8, col=0)
parser.add_argument("--minhisnr",type=float,help="Integrated SNR from 1/10-1/4 1/A must be larger than this",default=0,guitype='floatbox', row=8, col=1)
parser.add_argument("--verbose", "-v", dest="verbose", action="store", metavar="n", type=int, help="verbose level [0-9], higner number means higher level of verboseness",default=1)
parser.add_argument("--ppid", type=int, help="Set the PID of the parent process, used for cross platform PPID",default=-1)
(options, args) = parser.parse_args()
if len(args)<1 :
parser.error("At least one lst file required")
sys.exit(1)
logid=E2init(sys.argv,options.ppid)
if options.create != None:
lst=LSXFile(options.create,False)
for f in args:
n=EMUtil.get_image_count(f)
if options.verbose : print "Processing {} images in {}".format(n,f)
for i in xrange(n):
lst.write(-1,i,f)
sys.exit(0)
if options.retype != None:
if options.minlosnr>0 or options.minhisnr>0 :
print "ERROR: --minlosnr and --minhisnr not compatible with --retype"
sys.exit(1)
# if the user provided the leading __ for us, we strip it off and add it back later
if options.retype[:2]=="__" :
options.retype=options.retype[2:]
for f in args:
if options.verbose : print "Processing ",f
lst=LSXFile(f,True)
a=lst.read(0)
if a[1][:10]!="particles/" :
print "To use the --retype option, the .lst file must reference image files in particles/*"
if options.verbose>1 :
b=base_name(a[1])
print "{} -> {}".format(a[1],b+"__"+options.retype+".hdf")
# loop over the images in the lst file
for i in xrange(len(lst)):
im=lst.read(i)
outname="particles/{}__{}.hdf".format(base_name(im[1]),options.retype)
lst.write(i,im[0],outname,im[2])
lst.normalize() # clean up at the end
if options.verbose>1 : print len(lst)," particles adjusted"
if options.verbose : print "Done processing {} files".format(len(args))
if options.merge!=None:
if options.minlosnr>0 or options.minhisnr>0 :
print "ERROR: --minlosnr and --minhisnr not compatible with --merge. Please use --mergesort instead."
sys.exit(1)
# create/update output lst
lsto=LSXFile(options.merge)
ntot=0
# loop over input files
for f in args:
lst=LSXFile(f,True)
ntot+=len(lst)
for i in xrange(len(lst)):
im=lst.read(i)
lsto.write(-1,im[0],im[1],im[2])
if options.verbose : print "{} particles added to {}".format(ntot,options.merge)
if options.mergesort!=None:
# create/update output lst
lsto=LSXFile(options.mergesort)
ntot=0
# loop over input files
ptcls=[]
pfiles=set()
for f in args:
lst=LSXFile(f,True)
ntot+=len(lst)
for i in xrange(len(lst)):
im=lst.read(i)
ptcls.append((im[1],im[0],im[2]))
pfiles.add(im[1])
ptcls.sort()
# remove particles in files not meeting our criteria
if options.minlosnr>0 or options.minhisnr>0 :
# the list conversion here is so we are iterating over a copy and not modifying the set while we iterate over it
for pfile in list(pfiles):
js=js_open_dict(info_name(pfile))
ctf=js["ctf"][0]
js.close()
r1=int(floor(1.0/(200.0*ctf.dsbg))) # lowsnr is 200-20 A
r2=int(ceil(1.0/(20.0*ctf.dsbg)))
r3=int(floor(1.0/(10.0*ctf.dsbg))) # hisnr is 10 to 4 A
r4=int(ceil(1.0/(4.0*ctf.dsbg)))
losnr=sum(ctf.snr[r1:r2])/(r2-r1)
hisnr=sum(ctf.snr[r3:r4])/(r4-r3)
if losnr<options.minlosnr or hisnr<options.minhisnr:
pfiles.remove(pfile)
if options.verbose: print pfile," removed due to SNR criteria"
nwrt=0
for i in ptcls:
if i[0] in pfiles :
lsto.write(-1,i[1],i[0],i[2])
nwrt+=1
if options.verbose :
if nwrt==ntot : print "{} particles in {}".format(ntot,options.mergesort)
else : print "{} of {} particles written to {}".format(nwrt,ntot,options.mergesort)
E2end(logid)
if __name__ == "__main__":
main()
| [
"jgalaz@gmail.com"
] | jgalaz@gmail.com |
ccc42f46382a7123266fc8d929e483370e32deca | 892efbbd60049f22c5e271a0e49f505e9f6029e1 | /doc/examples/plot_holes_and_peaks.py | a9c3a7fe4e8a26085f62c53f4856ad170652fcd5 | [
"BSD-3-Clause"
] | permissive | teoliphant/scikit-image | 95338caa2876f2c6360a8e164b7cc2d4127f2038 | d0415e6df475157705fd1ef2af69b16e4f7e38cc | refs/heads/master | 2020-12-30T19:23:04.806465 | 2012-11-02T23:02:06 | 2012-11-02T23:02:06 | 6,649,519 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,601 | py | """
===============================
Filling holes and finding peaks
===============================
In this example, we fill holes (i.e. isolated, dark spots) in an image using
morphological reconstruction by erosion. Erosion expands the minimal values of
the seed image until it encounters a mask image. Thus, the seed image and mask
image represent the maximum and minimum possible values of the reconstructed
image.
We start with an image containing both peaks and holes:
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.exposure import rescale_intensity
image = data.moon()
# Rescale image intensity so that we can see dim features.
image = rescale_intensity(image, in_range=(50, 200))
# convenience function for plotting images
def imshow(image, **kwargs):
plt.figure(figsize=(5, 4))
plt.imshow(image, **kwargs)
plt.axis('off')
imshow(image)
plt.title('original image')
"""
.. image:: PLOT2RST.current_figure
Now we need to create the seed image, where the minima represent the starting
points for erosion. To fill holes, we initialize the seed image to the maximum
value of the original image. Along the borders, however, we use the original
values of the image. These border pixels will be the starting points for the
erosion process. We then limit the erosion by setting the mask to the values
of the original image.
"""
import numpy as np
from skimage.morphology import reconstruction
seed = np.copy(image)
seed[1:-1, 1:-1] = image.max()
mask = image
filled = reconstruction(seed, mask, method='erosion')
imshow(filled, vmin=image.min(), vmax=image.max())
plt.title('after filling holes')
"""
.. image:: PLOT2RST.current_figure
As shown above, eroding inward from the edges removes holes, since (by
definition) holes are surrounded by pixels of brighter value. Finally, we can
isolate the dark regions by subtracting the reconstructed image from the
original image.
"""
imshow(image - filled)
plt.title('holes')
"""
.. image:: PLOT2RST.current_figure
Alternatively, we can find bright spots in an image using morphological
reconstruction by dilation. Dilation is the inverse of erosion and expands the
*maximal* values of the seed image until it encounters a mask image. Since this
is an inverse operation, we initialize the seed image to the minimum image
intensity instead of the maximum. The remainder of the process is the same.
"""
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
rec = reconstruction(seed, mask, method='dilation')
imshow(image - rec)
plt.title('peaks')
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| [
"tsyu80@gmail.com"
] | tsyu80@gmail.com |
09efec9a6022b0570ad0e33b0dff86b355536217 | adc6d8ee596e4710c3241332758bb6990bdd8914 | /subDM/subNormDM/brilloContraste.py | fbeff8b5ec28d286245d03e284c4ef261b484cfc | [] | no_license | NatalyTinoco/Trabajo-de-grado_Artefactos | cf9491c47a8a23ce5bab7c52498093a61319f834 | 5cc4e009f94c871c7ed0d820eb113398ac66ec2f | refs/heads/master | 2022-03-20T00:51:48.420253 | 2019-11-24T19:10:40 | 2019-11-24T19:10:40 | 197,964,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 18 20:07:00 2019
@author: Nataly
"""
import numpy as np
def contraste(img):
im11 = img
#arreglo = np.array(im11.size)
#print(im11.size)
#total = arreglo[0] * arreglo[1]
arreglo=im11.shape
#arreglo=list(arreglo)
total = arreglo[0] * arreglo[1]
i = 0
suma = 0
while i < arreglo[0]:
j = 0
while j < arreglo[1]:
suma = suma + im11[i, j]
j+=1
i+=1
brillo = suma / total
i = 0
while i < arreglo[0]:
j = 0
while j < arreglo[1]:
aux = im11[i, j] - brillo
suma = suma + aux
j+=1
i+=1
cont = suma * suma
cont = np.sqrt(suma / total)
contraste = int(cont)
#print("El contraste de la imagen es: ", contraste)
return contraste
def brillo(img):
im10 = img
arreglo=im10.shape
#arreglo=list(arreglo)
total = arreglo[0] * arreglo[1]
i = 0
suma = 0
while i < arreglo[0]:
j = 0
while j < arreglo[1]:
suma = suma + im10[i, j]
j+=1
i+=1
brillo = suma / total
brillo = int(brillo)
#print("El brillo de la imagen es: ", brillo)
| [
"51056570+NatalyTinoco@users.noreply.github.com"
] | 51056570+NatalyTinoco@users.noreply.github.com |
dd8bb1c250b24c436a67fde4a1a0d51018deae0f | 1af44bdcbc3c15d3f6e436a7924dfd45f504ab3a | /01.jump to python/chpter 7/1_Regular_Expression/322_4.py | e7264c33ea0b3f9081a31f5eecfb71e933afd995 | [] | no_license | wql7654/bigdata_exam | f57c8b475690cbc5978009dbf8008bedff602e2a | c07ee711bb84407428ba31165185b9607b6825e8 | refs/heads/master | 2023-04-07T00:50:59.563714 | 2021-05-25T02:46:43 | 2021-05-25T02:46:43 | 180,915,985 | 0 | 0 | null | 2023-03-25T01:08:09 | 2019-04-12T02:36:08 | Jupyter Notebook | UTF-8 | Python | false | false | 491 | py | import re
file_name=["foo.bar","autoexec.bat","sendmail.cf","sandstrom.p"]
#확장자가 bat인 파일은 제외해야 하는 조건 추가
p = re.compile(".*[.]([^b].?.?|.[^a]?.?|..?[^t]?)$")
for file in file_name:
m = p.search(file)
print(m)
#확장자길이가 1~3개까지 가능
#확장자의 글자의 갯수가 2이상이 되도록 "?"를 추가하여
# ver 2에서 추가한 확장자가 'bat'인 파일을 제거하기 위한 요구사항을 만족했다. | [
"studerande5@gmail.com"
] | studerande5@gmail.com |
6e4f75ac35fb9b664f40bc19a40e3bf93dc0da7b | 3bae1ed6460064f997264091aca0f37ac31c1a77 | /core/sort/sort.py | 46dd25711428d5aa0b51bd19a417355790d4ac24 | [] | no_license | racktivity/ext-pylabs-core | 04d96b80ac1942754257d59e91460c3a141f0a32 | 53d349fa6bee0ccead29afd6676979b44c109a61 | refs/heads/master | 2021-01-22T10:33:18.523799 | 2017-06-08T09:09:28 | 2017-06-08T09:09:28 | 54,314,984 | 0 | 0 | null | 2017-06-08T09:09:29 | 2016-03-20T11:55:01 | Python | UTF-8 | Python | false | false | 4,647 | py | # <License type="Sun Cloud BSD" version="2.2">
#
# Copyright (c) 2005-2009, Sun Microsystems, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Neither the name Sun Microsystems, Inc. nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# </License>
from heapq import heapify, heappop, heappush
from itertools import islice, cycle
from tempfile import gettempdir
import os
import pylabs
def merge(chunks,key=None):
if key is None:
key = lambda x : x
values = []
for index, chunk in enumerate(chunks):
try:
iterator = iter(chunk)
value = iterator.next()
except StopIteration:
try:
chunk.close()
os.remove(chunk.name)
chunks.remove(chunk)
except:
pylabs.q.logger.log("StopIterationException", 5)
else:
heappush(values,((key(value),index,value,iterator,chunk)))
while values:
k, index, value, iterator, chunk = heappop(values)
yield value
try:
value = iterator.next()
except StopIteration:
try:
chunk.close()
os.remove(chunk.name)
chunks.remove(chunk)
except:
pylabs.q.logger.log("StopIterationException", 5)
else:
heappush(values,(key(value),index,value,iterator,chunk))
def batch_sort(input, output, header, key=None,buffer_size=32000,tempdirs=[]):
if not tempdirs:
tempdirs.append(gettempdir())
input_file = file(input,'rb',64*1024)
try:
input_iterator = iter(input_file)
chunks = []
try:
for tempdir in cycle(tempdirs):
current_chunk = list(islice(input_iterator,buffer_size))
if current_chunk:
current_chunk.sort(key=key)
output_chunk = file(os.path.join(tempdir,'%06i'%len(chunks)),'w+b',64*1024)
output_chunk.writelines(current_chunk)
output_chunk.flush()
output_chunk.seek(0)
chunks.append(output_chunk)
else:
break
except:
for chunk in chunks:
try:
chunk.close()
os.remove(chunk.name)
except:
pylabs.q.logger.log("StopIterationException", 5)
if output_chunk not in chunks:
try:
output_chunk.close()
os.remove(output_chunk.name)
except:
pylabs.q.logger.log("StopIterationException", 5)
return
finally:
input_file.close()
output_file = file(output,'wb',64*1024)
try:
output_file.write(header[0])
output_file.write(header[1])
output_file.write(header[2])
output_file.write(header[3])
output_file.write(header[4])
output_file.writelines(merge(chunks,key))
finally:
for chunk in chunks:
try:
chunk.close()
os.remove(chunk.name)
except:
pylabs.q.logger.log("StopIterationException", 5)
output_file.close() | [
"devnull@localhost"
] | devnull@localhost |
c283a8c68863c409076408a410ce155dddcf590e | 11a246743073e9d2cb550f9144f59b95afebf195 | /codeforces/793/a.py | 0905d62437f790cd23e8d77eb696802d02271c3b | [] | no_license | ankitpriyarup/online-judge | b5b779c26439369cedc05c045af5511cbc3c980f | 8a00ec141142c129bfa13a68dbf704091eae9588 | refs/heads/master | 2020-09-05T02:46:56.377213 | 2019-10-27T20:12:25 | 2019-10-27T20:12:25 | 219,959,932 | 0 | 1 | null | 2019-11-06T09:30:58 | 2019-11-06T09:30:57 | null | UTF-8 | Python | false | false | 268 | py | def main():
n, k = map(int, input().split())
a = list(map(int, input().split()))
target = min(a)
mods = set([x % k for x in a])
if len(mods) != 1:
print(-1)
return
ans = sum((x - target) // k for x in a)
print(ans)
main()
| [
"arnavsastry@gmail.com"
] | arnavsastry@gmail.com |
89beec6259029120fe9cf824913120a3783b0624 | 202f687add55894f77d88a84f1f7e84605301a0c | /4.scrapy框架/collectips_itemloader$$$/collectips/items.py | f10731bea40faca39daba4f78510340eecfef775 | [] | no_license | SuneastChen/python_crawler_learning | 6f8ef3b8409ad3c0a9ed900ccd89e7180df5a9bd | 6651177ef177231acd9638c39c809bb8f62d5df0 | refs/heads/master | 2020-03-07T22:45:39.515235 | 2018-04-02T13:53:55 | 2018-04-02T13:53:55 | 127,763,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, TakeFirst
import re
class IPItemLoader(ItemLoader): #继承itemloader,自定义类
default_output_processor = TakeFirst() # 默认输出第一个值
def re_speed(value):
return re.search('\d+\.\d*', value).group()
class CollectipsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
IP = scrapy.Field()
PORT = scrapy.Field()
POSITION = scrapy.Field(
input_processor=MapCompose(lambda x : x.strip(),))
TYPE = scrapy.Field()
SPEED = scrapy.Field(
input_processor=MapCompose(re_speed,))
LAST_CHECK_TIME = scrapy.Field()
| [
"1050521852@qq.com"
] | 1050521852@qq.com |
dbb210648c4d90f7249728ed0cec7c1512ae0bec | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/stringMethods_20200707100427.py | cea2c06286512296eee3554a7f49dc894e2a3569 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | def array(arr):
newArr = []
for i in range(len(arr)):
b =
print(b)
array(["[6,7,5]","[1,8]"]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
a69b18877b22eeed94b6afd73ed99375d2f964fb | 43378f262acb3bbf6af8d4c0dc30d149fa5ba302 | /hello/migrations/0004_question_choice4.py | 880832f360284e679052b9a1931b4fdf9fded106 | [] | no_license | c-bata/django-squash-squashed-migratoins | 570d68de550f89ad710968a9c3f9cb353cba91a6 | 292a8d72d6eded7663a7e79ba94e1e3876c1250c | refs/heads/master | 2021-05-18T00:54:29.764796 | 2020-03-29T10:35:13 | 2020-03-29T10:35:16 | 251,034,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # Generated by Django 3.1 on 2020-03-29 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hello', '0003_question_choice3'),
]
operations = [
migrations.AddField(
model_name='question',
name='choice4',
field=models.CharField(default='', max_length=20),
),
]
| [
"contact@c-bata.link"
] | contact@c-bata.link |
f0439f14153f366ba24c74df955485bb042f9030 | 9cbd523cdedc727f62c887612e8ae2c25c909964 | /tests/UI_test/functional/smoke_test_remote_parallel/test_TID_048.py | 5fc487754f89ef89b5248b32bade46944f6dc4fc | [] | no_license | louiscklaw/QA_test_scripts | 8a71d0bed99fae3b0dac4cd9414b3e34dcf5beed | 58b73594332053272d8dce2c812c93297259c782 | refs/heads/master | 2023-01-27T15:48:29.477848 | 2020-12-06T10:05:19 | 2020-12-06T10:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import os,sys
from pprint import pprint
import random
from time import sleep
sys.path.append(os.path.dirname(__file__))
from path_config import *
from urls import *
from steps import *
from pages.config import *
from jp import *
from urls import *
from setupLocalChrome import *
from test_TID_046 import *
def test_TID_048(json_metadata, table_num=41, food_quantity=5):
# clear before test
(r_browser, c_browser) = tour_TID_046(json_metadata, table_num, food_quantity)
check_TID_032.run_check(json_metadata, r_browser)
check_TID_048.run_check(json_metadata, r_browser, table_num)
return (r_browser, c_browser)
| [
"louiscklaw@gmail.com"
] | louiscklaw@gmail.com |
adb8edcc5d7786f61e95e57ac2b102bbbfebd784 | 3ea99519e25ec1bb605947a94b7a5ceb79b2870a | /modern_python/modernpython/lib/python3.6/site-packages/mypy/test/testinfer.py | 5a1475e15009fe67c361fb7640b73957a433ca8c | [] | no_license | tech-cow/spazzatura | 437c7502a0654a3d3db2fd1e96ce2e3e506243c0 | 45fc0932186d2ef0c5044745a23507a692cfcc26 | refs/heads/master | 2022-09-01T12:01:11.309768 | 2018-11-15T04:32:03 | 2018-11-15T04:32:03 | 130,414,653 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,344 | py | """Test cases for type inference helper functions."""
from typing import List, Optional, Tuple, Union
from mypy.test.helpers import Suite, assert_equal
from mypy.checkexpr import map_actuals_to_formals
from mypy.nodes import ARG_POS, ARG_OPT, ARG_STAR, ARG_STAR2, ARG_NAMED
from mypy.types import AnyType, TupleType, Type, TypeOfAny
from mypy.test.typefixture import TypeFixture
class MapActualsToFormalsSuite(Suite):
"""Test cases for checkexpr.map_actuals_to_formals."""
def test_basic(self) -> None:
self.assert_map([], [], [])
def test_positional_only(self) -> None:
self.assert_map([ARG_POS],
[ARG_POS],
[[0]])
self.assert_map([ARG_POS, ARG_POS],
[ARG_POS, ARG_POS],
[[0], [1]])
def test_optional(self) -> None:
self.assert_map([],
[ARG_OPT],
[[]])
self.assert_map([ARG_POS],
[ARG_OPT],
[[0]])
self.assert_map([ARG_POS],
[ARG_OPT, ARG_OPT],
[[0], []])
def test_callee_star(self) -> None:
self.assert_map([],
[ARG_STAR],
[[]])
self.assert_map([ARG_POS],
[ARG_STAR],
[[0]])
self.assert_map([ARG_POS, ARG_POS],
[ARG_STAR],
[[0, 1]])
def test_caller_star(self) -> None:
self.assert_map([ARG_STAR],
[ARG_STAR],
[[0]])
self.assert_map([ARG_POS, ARG_STAR],
[ARG_STAR],
[[0, 1]])
self.assert_map([ARG_STAR],
[ARG_POS, ARG_STAR],
[[0], [0]])
self.assert_map([ARG_STAR],
[ARG_OPT, ARG_STAR],
[[0], [0]])
def test_too_many_caller_args(self) -> None:
self.assert_map([ARG_POS],
[],
[])
self.assert_map([ARG_STAR],
[],
[])
self.assert_map([ARG_STAR],
[ARG_POS],
[[0]])
def test_tuple_star(self) -> None:
any_type = AnyType(TypeOfAny.special_form)
self.assert_vararg_map(
[ARG_STAR],
[ARG_POS],
[[0]],
self.tuple(any_type))
self.assert_vararg_map(
[ARG_STAR],
[ARG_POS, ARG_POS],
[[0], [0]],
self.tuple(any_type, any_type))
self.assert_vararg_map(
[ARG_STAR],
[ARG_POS, ARG_OPT, ARG_OPT],
[[0], [0], []],
self.tuple(any_type, any_type))
def tuple(self, *args: Type) -> TupleType:
return TupleType(list(args), TypeFixture().std_tuple)
def test_named_args(self) -> None:
self.assert_map(
['x'],
[(ARG_POS, 'x')],
[[0]])
self.assert_map(
['y', 'x'],
[(ARG_POS, 'x'), (ARG_POS, 'y')],
[[1], [0]])
def test_some_named_args(self) -> None:
self.assert_map(
['y'],
[(ARG_OPT, 'x'), (ARG_OPT, 'y'), (ARG_OPT, 'z')],
[[], [0], []])
def test_missing_named_arg(self) -> None:
self.assert_map(
['y'],
[(ARG_OPT, 'x')],
[[]])
def test_duplicate_named_arg(self) -> None:
self.assert_map(
['x', 'x'],
[(ARG_OPT, 'x')],
[[0, 1]])
def test_varargs_and_bare_asterisk(self) -> None:
self.assert_map(
[ARG_STAR],
[ARG_STAR, (ARG_NAMED, 'x')],
[[0], []])
self.assert_map(
[ARG_STAR, 'x'],
[ARG_STAR, (ARG_NAMED, 'x')],
[[0], [1]])
def test_keyword_varargs(self) -> None:
self.assert_map(
['x'],
[ARG_STAR2],
[[0]])
self.assert_map(
['x', ARG_STAR2],
[ARG_STAR2],
[[0, 1]])
self.assert_map(
['x', ARG_STAR2],
[(ARG_POS, 'x'), ARG_STAR2],
[[0], [1]])
self.assert_map(
[ARG_POS, ARG_STAR2],
[(ARG_POS, 'x'), ARG_STAR2],
[[0], [1]])
def test_both_kinds_of_varargs(self) -> None:
self.assert_map(
[ARG_STAR, ARG_STAR2],
[(ARG_POS, 'x'), (ARG_POS, 'y')],
[[0, 1], [0, 1]])
def test_special_cases(self) -> None:
self.assert_map([ARG_STAR],
[ARG_STAR, ARG_STAR2],
[[0], []])
self.assert_map([ARG_STAR, ARG_STAR2],
[ARG_STAR, ARG_STAR2],
[[0], [1]])
self.assert_map([ARG_STAR2],
[(ARG_POS, 'x'), ARG_STAR2],
[[0], [0]])
self.assert_map([ARG_STAR2],
[ARG_STAR2],
[[0]])
def assert_map(self,
caller_kinds_: List[Union[int, str]],
callee_kinds_: List[Union[int, Tuple[int, str]]],
expected: List[List[int]],
) -> None:
caller_kinds, caller_names = expand_caller_kinds(caller_kinds_)
callee_kinds, callee_names = expand_callee_kinds(callee_kinds_)
result = map_actuals_to_formals(
caller_kinds,
caller_names,
callee_kinds,
callee_names,
lambda i: AnyType(TypeOfAny.special_form))
assert_equal(result, expected)
def assert_vararg_map(self,
caller_kinds: List[int],
callee_kinds: List[int],
expected: List[List[int]],
vararg_type: Type,
) -> None:
result = map_actuals_to_formals(
caller_kinds,
[],
callee_kinds,
[],
lambda i: vararg_type)
assert_equal(result, expected)
def expand_caller_kinds(kinds_or_names: List[Union[int, str]]
) -> Tuple[List[int], List[Optional[str]]]:
kinds = []
names = [] # type: List[Optional[str]]
for k in kinds_or_names:
if isinstance(k, str):
kinds.append(ARG_NAMED)
names.append(k)
else:
kinds.append(k)
names.append(None)
return kinds, names
def expand_callee_kinds(kinds_and_names: List[Union[int, Tuple[int, str]]]
) -> Tuple[List[int], List[Optional[str]]]:
kinds = []
names = [] # type: List[Optional[str]]
for v in kinds_and_names:
if isinstance(v, tuple):
kinds.append(v[0])
names.append(v[1])
else:
kinds.append(v)
names.append(None)
return kinds, names
| [
"yuzhoujr@yuzhou-7480.internal.synopsys.com"
] | yuzhoujr@yuzhou-7480.internal.synopsys.com |
bc9f5067a043260d80975c5066c32c9c519df9e1 | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/577200_Make_unique_file_name/recipe-577200.py | a59a9b0faba37a4fd185f0bfaaf2d8ab21caa915 | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 848 | py | '''
function for making unique non-existent file name
with saving source file extension
'''
import os
import sys
__author__ = 'Denis Barmenkov <denis.barmenkov@gmail.com>'
__source__ = 'http://code.activestate.com/recipes/577200-make-unique-file-name/'
def add_unique_postfix(fn):
if not os.path.exists(fn):
return fn
path, name = os.path.split(fn)
name, ext = os.path.splitext(name)
make_fn = lambda i: os.path.join(path, '%s(%d)%s' % (name, i, ext))
for i in xrange(2, sys.maxint):
uni_fn = make_fn(i)
if not os.path.exists(uni_fn):
return uni_fn
return None
def demo():
script_path = sys.argv[0]
print 'script file: %s' % script_path
fn_unique = add_unique_postfix(script_path)
print 'with unique postfix: %s' % fn_unique
if __name__ == '__main__':
demo()
| [
"betty@qburst.com"
] | betty@qburst.com |
0b186051c425659027a1a7dad9b073ba965873b2 | 7e45c50b01863103d540d156a03437b64b2896b3 | /tests/console/commands/test_check.py | 0df223b7498638f3c2c9eb9fc07ec8bd78c68878 | [
"MIT",
"LGPL-3.0-only",
"LGPL-2.1-only",
"BSD-4-Clause",
"GPL-2.0-only",
"Apache-2.0",
"BSD-2-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-or-later",
"LGPL-3.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"GPL-2.0-or-later",
"GPL-3.0-only"
] | permissive | AhmedRedaAmin/poetry | e7ac5ecc332da13cb9768ca286d5f49aec01750d | 5ba06bb44201cace7461f245e5f6440a168426ab | refs/heads/master | 2020-04-01T02:16:10.826116 | 2018-10-12T15:33:26 | 2018-10-12T15:33:26 | 152,771,840 | 0 | 0 | MIT | 2018-10-12T15:31:10 | 2018-10-12T15:31:09 | null | UTF-8 | Python | false | false | 268 | py | from cleo.testers import CommandTester
def test_about(app):
command = app.find("check")
tester = CommandTester(command)
tester.execute([("command", command.get_name())])
expected = """\
All set!
"""
assert tester.get_display(True) == expected
| [
"sebastien@eustace.io"
] | sebastien@eustace.io |
81cf2206986eae587556c8ed802ef919b41191b3 | 966efb6db04789f795474ee5047c497ce3c8c9dd | /100/q37.py | 03e23cb87cfcad1d7b43088c8295fbcd6d6391c9 | [] | no_license | gitmengzh/100-Python-exercises | 43b52ced1688fc30da61025183bcbc7d9f63446f | 00746148cececfed4beb2cd29a983a382aa419c8 | refs/heads/master | 2020-07-06T08:16:40.539517 | 2019-10-01T13:23:56 | 2019-10-01T13:23:56 | 202,952,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | '''
定义一个函数,生成一个List,List内容为1-20的平方,然后打印List前五个值
'''
def printList5():
l = []
for i in range(1,21):
l.append(i**2)
print(l[:5])
test = printList5()
| [
"mengzh1618@gmail.com"
] | mengzh1618@gmail.com |
3ab390a92b158a2faf4ee829165e0ad9cf072fec | dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0 | /src/main/scala/contest/155/SmallestStringWithSwaps.py | ae3db42c83edd3b2678ee0651eb398bec50107b6 | [] | no_license | joestalker1/leetcode | 8a5cdda17abd33c3eef859732f75d7bec77a9d0e | ae392ddbc7eb56cb814b9e9715043c98a89a6314 | refs/heads/master | 2023-04-13T22:09:54.407864 | 2023-04-09T19:22:54 | 2023-04-09T19:22:54 | 131,803,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | from collections import defaultdict
class Solution:
def smallestStringWithSwaps(self, s, pairs):
if not s:
return None
if len(pairs) == 0:
return s
def find(parent, i):
if parent[i] != i:
p = find(parent, parent[i])
parent[i] = p
return parent[i]
def union(parent, i, j):
p1 = find(parent, i)
p2 = find(parent, j)
if p1 != p2:
parent[p1] = p2
parent = [i for i in range(len(s))]
for i,j in pairs:
union(parent, i, j)
chars = defaultdict(list)
for i in range(len(s)):
chars[find(parent, i)].append(s[i])
for k in chars:
chars[k].sort()
res = []
for i in range(len(s)):
res.append(chars[find(parent, i)].pop(0))
return ''.join(res)
sol = Solution()
print(sol.smallestStringWithSwaps("udyyek", [[3,3],[3,0],[5,1],[3,1],[3,4],[3,5]]))#"deykuy"
#print(sol.smallestStringWithSwaps(s = "dcab", pairs = [[0,3],[1,2],[0,2]]))
#print(sol.smallestStringWithSwaps(s = "dcab", pairs = [[0,3],[1,2]]))
| [
"stalker.comp@gmail.com"
] | stalker.comp@gmail.com |
34f7f8f4141f68a3303100cff04e36a6121b6fd0 | cc0caf0362909490377a44b08a726dca2d093c4f | /principal_planb.py | a757ef7fb2bf186aca27ade6fb3d85c822c8aaa4 | [] | no_license | stefifm/Testing | bf334f97425ac4463e86e39a5bf97061827214c8 | 4a4cf4f93f050fe12244235774448a46f9a226db | refs/heads/master | 2023-01-04T12:25:16.951844 | 2020-11-03T00:24:13 | 2020-11-03T00:24:13 | 294,291,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | import planb
print("Este es el plan b")
def principal():
n = 16
participantes = []
planb.carga_automatica(participantes)
planb.orden_sort(participantes)
planb.mostrar_participantes(participantes)
#generaciópn de los cruces
print("OCTAVOS\n")
planb.octavos(participantes)
print("INTENTO DE CUARTOS\n")
cuartos = planb.ganadores(participantes)
planb.cruces(cuartos)
print("\nINTENTO DE SEMIFINAL\n")
semis = planb.ganadores(cuartos)
planb.cruces(semis)
print("\nINTENTO DE FINAL\n")
final = planb.ganadores(semis)
pri, seg = planb.final(final)
print("El primero es:",pri)
print("El segundo es:",seg)
if __name__ == "__main__":
principal() | [
"bruerastefania@gmail.com"
] | bruerastefania@gmail.com |
dadac39624c61550b9c4d7a21b0dbee6e168b988 | dd4d1a61ec680a86d4b569490bf2a898ea0d7557 | /appengine/predator/analysis/culprit.py | 5b0046ceb5e8bcbfbe29b14926c7099efe54fc84 | [
"BSD-3-Clause"
] | permissive | mcgreevy/chromium-infra | f1a68914b47bcbe3cd8a424f43741dd74fedddf4 | 09064105713603f7bf75c772e8354800a1bfa256 | refs/heads/master | 2022-10-29T23:21:46.894543 | 2017-05-16T06:22:50 | 2017-05-16T06:22:50 | 91,423,078 | 1 | 1 | BSD-3-Clause | 2022-10-01T18:48:03 | 2017-05-16T06:23:34 | Python | UTF-8 | Python | false | false | 4,439 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import namedtuple
class Culprit(namedtuple('Culprit',
['project', 'components', 'cls', 'regression_range', 'algorithm'])):
"""The result of successfully identifying the culprit of a crash report.
That is, this is what ``Predator.FindCultprit`` returns. It encapsulates
all the information predator discovered during its various analyses.
Args:
project (str): the most-suspected project
components (list of str): the suspected crbug components.
cls (list of ??): the suspected CLs.
regression_range (tuple): a pair of the last-good and first-bad versions.
algorithm (str): What algorithm was used to produce this object.
"""
__slots__ = ()
@property
def fields(self):
return self._fields
# TODO(http://crbug/644476): better name for this method.
def ToDicts(self):
"""Convert this object to a pair of anonymous dicts for JSON.
Returns:
(analysis_result_dict, tag_dict)
The analysis result is a dict like below:
{
# Indicate if Findit found any suspects_cls, project,
# components or regression_range.
"found": true,
"suspected_project": "chromium-v8", # Which project is most suspected.
"feedback_url": "https://.."
"suspected_cls": [
{
"revision": "commit-hash",
"url": "https://chromium.googlesource.com/chromium/src/+/...",
"review_url": "https://codereview.chromium.org/issue-number",
"project_path": "third_party/pdfium",
"author": "who@chromium.org",
"time": "2015-08-17 03:38:16",
"reason": "a plain string with '\n' as line break to expla..."
"reason": [('MinDistance', 1, 'minimum distance is 0.'),
('TopFrame', 0.9, 'top frame is2nd frame.')],
"changed_files": [
{"file": "file_name1.cc",
"blame_url": "https://...",
"info": "minimum distance (LOC) 0, frame #2"},
{"file": "file_name2.cc",
"blame_url": "https://...",
"info": "minimum distance (LOC) 20, frame #4"},
...
],
"confidence": 0.60
},
...,
],
"regression_range": [ # Detected regression range.
"53.0.2765.0",
"53.0.2766.0"
],
"suspected_components": [ # A list of crbug components to file bugs.
"Blink>JavaScript"
]
}
The code review url might not always be available, because not all
commits go through code review. In that case, commit url should
be used instead.
The tag dict are allowed key/value pairs to tag the analysis result
for query and monitoring purpose on Findit side. For allowed keys,
please refer to crash_analysis.py and fracas_crash_analysis.py:
For results with normal culprit-finding algorithm: {
'found_suspects': True,
'has_regression_range': True,
'solution': 'core_algorithm',
}
For results using git blame without a regression range: {
'found_suspects': True,
'has_regression_range': False,
'solution': 'blame',
}
If nothing is found: {
'found_suspects': False,
}
"""
result = {}
result['found'] = (
bool(self.project) or
bool(self.components) or
bool(self.cls) or
bool(self.regression_range))
if self.regression_range:
result['regression_range'] = self.regression_range
if self.project:
result['suspected_project'] = self.project
if self.components:
result['suspected_components'] = self.components
if self.cls:
result['suspected_cls'] = [cl.ToDict() for cl in self.cls]
tags = {
'found_suspects': bool(self.cls),
'has_regression_range': bool(self.regression_range),
'found_project': bool(self.project),
'found_components': bool(self.components),
'solution': self.algorithm,
}
return result, tags
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
f1b4f05f3d60bb61c3fb15df319d3bfab9891807 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p00002/s318942277.py | e8ba2d4572b2599ea579aebe12436eac026105c3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # -*-coding:utf-8-*-
def get_input():
while True:
try:
yield "".join(input())
except EOFError:
break
if __name__=="__main__":
array = list(get_input())
for i in range(len(array)):
temp = array[i].split()
a = int(temp[0])
b = int(temp[1])
ans = a + b
print(len(str(ans))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4accc9e2844547831a443a791d21841b2e5915b5 | ba03b99d73886349d66883b8c328b8eff805772d | /307 range sum query - mutable.py | 49e74e5d9b3363ce8e56bcc3cd1106b2f2c1ff0a | [] | no_license | liyi0206/leetcode-python | 38cc33eb74b006e7e6609eda86e1ae8d5e278247 | 2c4a54070b20d2fe33b81d889ad0ad0c6aa5fb5c | refs/heads/master | 2016-09-12T22:54:09.622652 | 2016-05-26T05:20:44 | 2016-05-26T05:20:44 | 59,178,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
self.nums,self.n =nums,len(nums)
# self.sums is the sume of self value and ++lowbit values.
# ++lowbit would be larger sibling or parent++ larger sibling.
self.sums =[0]*(self.n+1)
for i in xrange(self.n):
self.add(i+1,nums[i]) # update self.sums
def update(self, i, val):
"""
:type i: int
:type val: int
:rtype: int
"""
self.add(i+1,val-self.nums[i]) # update self.sums
self.nums[i]=val
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
if not self.nums: return 0 # edge case
return self.sum(j+1)-self.sum(i)
### UTILS ###
def lowbit(self,x):
return x&(-x)
def add(self,x,val): # for update, idx ++lowbit, sums[idx]+=delta_val
while x<=self.n: # stop rule x<=n
self.sums[x]+=val
x+=self.lowbit(x)
def sum(self,x): # for sumRange, idx --lowbit, res+=sums[idx]
res=0 # stop rule x>0
while x>0:
res+=self.sums[x]
x-=self.lowbit(x)
return res
nums=[1,3,5]
numArray = NumArray(nums)
print numArray.sumRange(0,2) #9
numArray.update(1,2)
print numArray.sumRange(0,2) #8 | [
"ly.protegee@gmail.com"
] | ly.protegee@gmail.com |
edc8f7043c8ab86364b5bdf6be0c590f63897936 | a5f0e7c09c36bb2fc91f95e5f3ec7f95c0ed305e | /cafe_backend/apps/users/migrations/0014_auto_20190713_0401.py | 06098044ac3a7f73d18794b9f487be6e535f1f9f | [] | no_license | ecmascriptguru/cafe_backend | e703047c7f04d68596f76dcbff06828afbf5cc68 | 0c4152692d68e951481b39f0789bc58e94e0d20c | refs/heads/master | 2022-10-26T00:31:50.070430 | 2020-06-18T15:30:02 | 2020-06-18T15:30:02 | 184,465,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # Generated by Django 2.0.9 on 2019-07-12 20:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0013_employee'),
]
operations = [
migrations.AlterModelOptions(
name='table',
options={'ordering': ('user__first_name',), 'verbose_name': 'Table', 'verbose_name_plural': 'Tables'},
),
]
| [
"ecmascript.guru@gmail.com"
] | ecmascript.guru@gmail.com |
ea12358f5a23a570db2306e5474ccf2056c99a16 | 86a26119af259e3858cb5e57ea2e41e3b25c5fa7 | /Python Project/Employee_Home.py | 621e7d19f6311f44bdc6037990004706ce2ea09c | [] | no_license | deshmukhshweta/project2 | 747ca7972a7bfdc4aed20dbb4ee3f6d2f009ca83 | 8bf07454d259456dc616e7283c266b35fe7b870d | refs/heads/master | 2020-04-19T09:57:05.541157 | 2019-01-29T09:27:01 | 2019-01-29T09:27:01 | 168,125,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,602 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 4.13
# In conjunction with Tcl version 8.6
# May 31, 2018 12:17:17 AM
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import Employee_Home_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = Tk()
top = Employee_Home (root)
Employee_Home_support.init(root, top)
root.mainloop()
w = None
def create_Employee_Home(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = Toplevel (root)
top = Employee_Home (w)
Employee_Home_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Employee_Home():
global w
w.destroy()
w = None
class Employee_Home:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
font9 = "-family {Segoe UI} -size 14 -weight bold -slant roman" \
" -underline 0 -overstrike 0"
top.geometry("273x498+429+126")
top.title("Employee Home")
top.configure(background="#d89ed0")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.Frame1 = Frame(top)
self.Frame1.place(relx=0.0, rely=0.0, relheight=0.99, relwidth=1.01)
self.Frame1.configure(relief=GROOVE)
self.Frame1.configure(borderwidth="5")
self.Frame1.configure(relief=GROOVE)
self.Frame1.configure(background="#9ea0d8")
self.Frame1.configure(highlightbackground="#d9d9d9")
self.Frame1.configure(highlightcolor="black")
self.Frame1.configure(width=275)
self.Label1 = Label(self.Frame1)
self.Label1.place(relx=0.07, rely=0.02, height=31, width=224)
self.Label1.configure(activebackground="#f9f9f9")
self.Label1.configure(activeforeground="black")
self.Label1.configure(background="#9ea0d8")
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(font=font9)
self.Label1.configure(foreground="#000000")
self.Label1.configure(highlightbackground="#d9d9d9")
self.Label1.configure(highlightcolor="black")
self.Label1.configure(text='''Employee Home''')
self.Button1 = Button(self.Frame1)
self.Button1.place(relx=0.11, rely=0.16, height=34, width=207)
self.Button1.configure(activebackground="#d9d9d9")
self.Button1.configure(activeforeground="#000000")
self.Button1.configure(background="#9ea0d8")
self.Button1.configure(command=Employee_Home_support.admin_stocker)
self.Button1.configure(disabledforeground="#a3a3a3")
self.Button1.configure(font=font9)
self.Button1.configure(foreground="#000000")
self.Button1.configure(highlightbackground="#d9d9d9")
self.Button1.configure(highlightcolor="#000000")
self.Button1.configure(pady="0")
self.Button1.configure(text='''Stocker''')
self.Button1_1 = Button(self.Frame1)
self.Button1_1.place(relx=0.11, rely=0.32, height=34, width=207)
self.Button1_1.configure(activebackground="#d9d9d9")
self.Button1_1.configure(activeforeground="#000000")
self.Button1_1.configure(background="#9ea0d8")
self.Button1_1.configure(command=Employee_Home_support.admin_dispatcher)
self.Button1_1.configure(disabledforeground="#a3a3a3")
self.Button1_1.configure(font=font9)
self.Button1_1.configure(foreground="#000000")
self.Button1_1.configure(highlightbackground="#d9d9d9")
self.Button1_1.configure(highlightcolor="black")
self.Button1_1.configure(pady="0")
self.Button1_1.configure(text='''Dispatcher''')
self.Button1_2 = Button(self.Frame1)
self.Button1_2.place(relx=0.11, rely=0.48, height=34, width=207)
self.Button1_2.configure(activebackground="#d9d9d9")
self.Button1_2.configure(activeforeground="#000000")
self.Button1_2.configure(background="#9ea0d8")
self.Button1_2.configure(command=Employee_Home_support.admin_product)
self.Button1_2.configure(disabledforeground="#a3a3a3")
self.Button1_2.configure(font=font9)
self.Button1_2.configure(foreground="#000000")
self.Button1_2.configure(highlightbackground="#d9d9d9")
self.Button1_2.configure(highlightcolor="black")
self.Button1_2.configure(pady="0")
self.Button1_2.configure(text='''Product''')
self.Button1_3 = Button(self.Frame1)
self.Button1_3.place(relx=0.13, rely=0.65, height=34, width=207)
self.Button1_3.configure(activebackground="#d9d9d9")
self.Button1_3.configure(activeforeground="#000000")
self.Button1_3.configure(background="#9ea0d8")
self.Button1_3.configure(command=Employee_Home_support.admin_sales)
self.Button1_3.configure(disabledforeground="#a3a3a3")
self.Button1_3.configure(font=font9)
self.Button1_3.configure(foreground="#000000")
self.Button1_3.configure(highlightbackground="#d9d9d9")
self.Button1_3.configure(highlightcolor="black")
self.Button1_3.configure(pady="0")
self.Button1_3.configure(text='''Sales''')
self.Button1_4 = Button(self.Frame1)
self.Button1_4.place(relx=0.13, rely=0.81, height=34, width=207)
self.Button1_4.configure(activebackground="#d9d9d9")
self.Button1_4.configure(activeforeground="#000000")
self.Button1_4.configure(background="#9ea0d8")
self.Button1_4.configure(command=Employee_Home_support.admin_logout)
self.Button1_4.configure(disabledforeground="#a3a3a3")
self.Button1_4.configure(font=font9)
self.Button1_4.configure(foreground="#000000")
self.Button1_4.configure(highlightbackground="#d9d9d9")
self.Button1_4.configure(highlightcolor="black")
self.Button1_4.configure(pady="0")
self.Button1_4.configure(text='''Logout''')
if __name__ == '__main__':
vp_start_gui()
| [
"123deshmukhshweta@gmail.com"
] | 123deshmukhshweta@gmail.com |
c291ea9fcc40f9a2104f72fda45a772db6fc67c3 | 6fce07f704880861ed472706cce973ff81be9ca4 | /tests/test_threadutils.py | bdb8ac5bf21cd304d9f63fe29c2823f749a52ad5 | [
"MIT"
] | permissive | AbdulSaleh/dialog-probing | 6645d5c2be10dc0342d6f6c7a768e46e4080c068 | 12a04e7ca3363d428aca96e8c2c2ce2ec518a767 | refs/heads/master | 2023-04-08T22:29:13.531668 | 2020-06-17T19:27:49 | 2020-06-17T19:27:49 | 210,482,746 | 9 | 2 | MIT | 2023-03-24T23:29:34 | 2019-09-24T01:14:42 | Python | UTF-8 | Python | false | false | 3,078 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.thread_utils import SharedTable
from multiprocessing import Process
import parlai.core.testing_utils as testing_utils
import unittest
import random
import time
@testing_utils.skipIfGPU
class TestSharedTable(unittest.TestCase):
"""Make sure the package is alive."""
def test_init_from_dict(self):
d = {'a': 0, 'b': 1, 'c': 1.0, 'd': True, 1: False, 2: 2.0}
st = SharedTable(d)
for k, v in d.items():
assert st[k] == v
def test_get_set_del(self):
st = SharedTable({'key': 0})
try:
st['none']
self.fail('did not fail on nonexistent key')
except KeyError:
pass
st['key'] = 1
assert st['key'] == 1
st['key'] += 1
assert st['key'] == 2
try:
st['key'] = 2.1
self.fail('cannot change type of value for set keys')
except TypeError:
pass
del st['key']
assert 'key' not in st, 'key should have been removed from table'
try:
st['key'] = True
self.fail('cannot change removed key')
except KeyError:
pass
def test_iter_keys(self):
st = SharedTable({'key': 0, 'ctr': 0.0, 'val': False, 'other': 1})
assert len(st) == 4
del st['key']
assert len(st) == 3, 'length should decrease after deleting key'
keyset1 = set(iter(st))
keyset2 = set(st.keys())
assert keyset1 == keyset2, 'iterating should return keys'
assert len(keyset1) == 3, ''
def test_concurrent_access(self):
st = SharedTable({'cnt': 0})
def inc():
for _ in range(50):
with st.get_lock():
st['cnt'] += 1
time.sleep(random.randint(1, 5) / 10000)
threads = []
for _ in range(5): # numthreads
threads.append(Process(target=inc))
for t in threads:
t.start()
for t in threads:
t.join()
assert st['cnt'] == 250
def test_torch(self):
try:
import torch
except ImportError:
# pass by default if no torch available
return
st = SharedTable({'a': torch.FloatTensor([1]), 'b': torch.LongTensor(2)})
assert st['a'][0] == 1.0
assert len(st) == 2
assert 'b' in st
del st['b']
assert 'b' not in st
assert len(st) == 1
if torch.cuda.is_available():
st = SharedTable(
{'a': torch.cuda.FloatTensor([1]), 'b': torch.cuda.LongTensor(2)}
)
assert st['a'][0] == 1.0
assert len(st) == 2
assert 'b' in st
del st['b']
assert 'b' not in st
assert len(st) == 1
if __name__ == '__main__':
unittest.main()
| [
"a_saleh@matlaber5.media.mit.edu"
] | a_saleh@matlaber5.media.mit.edu |
4dc6472a0122338c0b83a65c1f1562003a7e74ba | 204db0d292bfe63253f737e6a73c443c0359b629 | /HitAnalyzer/test/draw_clustersize.py | 492ee21a02d8d12bd42b8b20cc2db6eee1db8939 | [] | no_license | gitytakahas/DPGAnalysis-SiPixelTools | 69d6de11974be421086ccb19d6fe760ddec986ae | 9088daece2f85f4fd0f5def51cdb30203f4b3b23 | refs/heads/master | 2021-01-17T08:31:08.047601 | 2017-06-28T14:38:01 | 2017-06-28T14:38:01 | 64,846,554 | 0 | 0 | null | 2016-08-03T13:04:29 | 2016-08-03T13:04:29 | null | UTF-8 | Python | false | false | 3,935 | py | from officialStyle import officialStyle
from ROOT import TFile, TTree, TH2F, TCanvas, gROOT, gStyle, TH1F, TLegend
import copy
gROOT.SetBatch(True)
officialStyle(gStyle)
gStyle.SetOptTitle(0)
def LegendSettings(leg):
leg.SetBorderSize(0)
leg.SetFillColor(10)
leg.SetLineColor(0)
leg.SetFillStyle(0)
leg.SetTextSize(0.03)
leg.SetTextFont(42)
#type='zerobias'
#type='random'
layers = [1,2,3,4]
#layers = [1]
xmax = [10, 3, 2, 1]
types = ['random', 'zerobias']
ladder = [13, 29, 45, 65]
lmax = [6.5, 14.5, 22.5, 32.5]
h_occupancy = {}
for type in types:
file = TFile('Myroot_' + type + '.root')
tree = file.Get('cluster_tree')
h_occupancy_ = []
for layer in layers:
hname = 'hist_L' + str(layer)
# hist = TH2F(hname, hname, 56, -28, 28, 10000,0,10000)
hist = TH2F(hname, hname, 20, -28, 28, 10000,0,10000)
hist.GetXaxis().SetTitle('Z (mm)')
hist.GetYaxis().SetTitle('Cluster size')
tree.Draw("ch:zPos >> " + hname, "subid==1 && layer==" + str(layer))
cname = 'canvas_' + str(layer)
canvas = TCanvas(cname)
canvas.SetGridx()
canvas.SetGridy()
hist.Draw('colz')
hist_occ = hist.ProfileX()
hist_occ.GetYaxis().SetNdivisions(505)
hist_occ.Sumw2()
hist_occ.SetLineColor(1)
# hist_occ.Draw('psame')
canvas.SaveAs('plot/cluster_L'+str(layer) + '_' + type + '.gif')
## zoom
hname_zoom = 'hist_zoom_L' + str(layer)
hist_zoom = TH2F(hname_zoom, hname_zoom, 20, -28, 28, 100,0,200)
hist_zoom.GetXaxis().SetTitle('Z (mm)')
hist_zoom.GetYaxis().SetTitle('Cluster size')
tree.Draw("ch:zPos >> " + hname_zoom, "subid==1 && layer==" + str(layer))
cname_zoom = 'canvas_zoom_' + str(layer)
canvas_zoom = TCanvas(cname_zoom)
canvas_zoom.SetGridx()
canvas_zoom.SetGridy()
hist_zoom.Draw('colz')
# hist_occ.Draw('psame')
hist.Draw('candlex(10000311) same')
canvas_zoom.SaveAs('plot/cluster_zoom_L'+str(layer) + '_' + type + '.gif')
# h_occupancy_.append(copy.deepcopy(hist_zoom))
h_occupancy_.append(copy.deepcopy(hist))
# h_occupancy_.append(copy.deepcopy(hist_occ))
h_occupancy[type] = h_occupancy_
print h_occupancy
# LegendSettings(leg,len(hists))
gStyle.SetPadRightMargin(0.1)
gStyle.SetPadLeftMargin(0.18)
types.reverse()
for layer in layers:
cname = 'occupancy_' + str(layer)
canvas_layer = TCanvas(cname)
leg = TLegend(0.5,0.7,0.9,0.9)
LegendSettings(leg)
for index, type in enumerate(types):
# h_occupancy[type][layer-1].Scale(1./h_occupancy[type][layer-1].GetSumOfWeights())
h_occupancy[type][layer-1].SetLineWidth(2)
h_occupancy[type][layer-1].SetLineColor(index+1)
h_occupancy[type][layer-1].SetMarkerColor(index+1)
h_occupancy[type][layer-1].SetLineStyle(index+1)
h_occupancy[type][layer-1].GetXaxis().SetTitle('Z (mm)')
h_occupancy[type][layer-1].GetYaxis().SetTitle('Cluster size')
h_occupancy[type][layer-1].GetYaxis().SetTitleOffset(1.5)
h_occupancy[type][layer-1].GetYaxis().SetRangeUser(0,200)
h_occupancy[type][layer-1].SetMaximum(h_occupancy[type][layer-1].GetMaximum()*1.5)
h_occupancy[type][layer-1].SetMinimum(0)
if index==0:
h_occupancy[type][layer-1].Draw('h')
h_occupancy[type][layer-1].Draw('candlex(10000311)')
# leg.AddEntry(h_occupancy[type][layer-1], 'Layer'+str(layer), '')
else:
# h_occupancy[type][layer-1].Draw('hsame')
h_occupancy[type][layer-1].Draw('candlex(10000311) same')
leg.AddEntry(h_occupancy[type][layer-1], type, 'lep')
leg.Draw()
canvas_layer.SaveAs('plot/cluster_profile_L' + str(layer) + '.gif')
| [
"Yuta.Takahashi@cern.ch"
] | Yuta.Takahashi@cern.ch |
8c54cbf7d9b12f2b8648a69b8b076a0ef55f1036 | 7dcd8ca463f3d0d727ed631a35ef112d38d193f2 | /Python/3. Image Processing/negative.py | f4a0f8d1cfbd8b719dda6bc5ee456fa1302eff0d | [
"MIT"
] | permissive | shoaibrayeen/Data-Science-With-Python-And-R | 03b38da9e8b0ebead34c51efa44f7e5052f773c4 | 2f4f398a2ea414395c4ff04b38c777f96f78bab2 | refs/heads/master | 2021-07-10T23:38:10.627283 | 2020-10-06T05:02:32 | 2020-10-06T05:02:32 | 199,718,898 | 0 | 1 | MIT | 2020-10-06T05:02:33 | 2019-07-30T19:59:58 | Jupyter Notebook | UTF-8 | Python | false | false | 263 | py | import numpy as np
from PIL import Image
img = Image.open("./Image/boat.png")
img_array=np.array(img,dtype=np.float32)
img_array[:,:] =255-img_array[:,:]
img_array = img_array.astype(np.uint8)
img=Image.fromarray(img_array)
img.save("./Image/boatNegative.png")
| [
"noreply@github.com"
] | shoaibrayeen.noreply@github.com |
f17f725e2579c9a17a0f4ca272b528a7a3edb257 | 83830aff551b9f9c13a24d602c26cdc6559f2bd2 | /gozerplugs/shakespear.py | 642132cd7800703c2a784e6a17f02b399c51a8fa | [] | no_license | polichism/my-gozerbot | 8182a826aec731e49d44c595fd1dc7837e811db5 | ea86f2b7713457fc7a73f1227b969b230debda48 | refs/heads/master | 2021-01-17T18:03:23.135742 | 2014-04-04T11:44:28 | 2014-04-04T11:44:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,985 | py | # plugs/shakespear.py
#
#
""" uses the random lib """
__copyright__ = 'this file is in the public domain'
__revision__ = '$Id: shakespeare.py 517 2006-12-21 05:00:00Z deck $'
from gozerbot.generic import handle_exception
from gozerbot.commands import cmnds
from gozerbot.examples import examples
from gozerbot.plughelp import plughelp
import re, random
plughelp.add('shakespear', 'display a shakespearean insult')
set_a=["Away I say",
"Bathe thyself",
"Be not deaf",
"Behold thy mirror",
"Beware my sting",
"Clean thine ears",
"Drink up eisel",
"Eat a crododile",
"Eat my knickers",
"Fie upon thee",
"Forsooth say I",
"Get thee gone",
"Get thee hence",
"Grow unsightly warts",
"Hear me now",
"Hear this pox alert",
"I'll see thee hang'd",
"Kiss my codpiece",
"Lead apes in hell",
"Methinks you stinks",
"My finger in thine eye",
">>Phui<< I say",
"Remove thine ass hence",
"Resign not thy day gig",
"Sit thee on a spit",
"Sorrow on thee",
"Swim with leeches",
"Thou dost intrude",
"Thy mother wears armor",
"Trip on thy sword",
"Tune thy lute",
"Why, how now putz",
"Wipe thy ugly face"]
set_b=["artless",
"bawdy",
"beslubbering",
"bootless",
"cankerous",
"churlish",
"cockered",
"clouted",
"craven",
"currish",
"dankish",
"dissembling",
"droning",
"errant",
"fawning",
"fobbing",
"fool-born",
"froward",
"frothy",
"gleeking",
"goatish",
"gorbellied",
"ill-nurtured",
"impertinent",
"incestuous",
"incurable",
"infectious",
"jarring",
"loggerheaded",
"lumpish",
"loutish",
"mammering",
"mangled",
"mewling",
"paunchy",
"pribbling",
"puking",
"puny",
"qualling",
"rank",
"reeky",
"roguish",
"rump-fed",
"ruttish",
"saucy",
"spleeny",
"spongy",
"surly",
"tardy-gaited",
"tottering",
"unmuzzled",
"vain",
"venomed",
"warped",
"wayward",
"weedy",
"whoreson",
"wretched",
"yeasty"]
set_c=["addlepated",
"base-court",
"bat-fowling",
"beef-witted",
"beetle-headed",
"boil-brained",
"clapper-clawed",
"clay-brained",
"codpiece-sniffing",
"common-kissing",
"crook-pated",
"dismal-dreaming",
"dizzy-eyed",
"doghearted",
"dread-bolted",
"earth-vexing",
"elf-skinned",
"fat-kidneyed",
"fen-sucked",
"flap-mouthed",
"fly-bitten",
"folly-fallen",
"fool-born",
"foul-practicing",
"full-gorged",
"guts-griping",
"half-faced",
"hasty-witted",
"hedge-born",
"hell-hated",
"idle-headed",
"ill-breeding",
"ill-nurtured",
"knotty-pated",
"mad-brained",
"milk-livered",
"motley-minded",
"onion-eyed",
"plume-plucked",
"pottle-deep",
"pox-marked",
"reeling-ripe",
"rough-hewn",
"rude-growing",
"rump-fed",
"shard-borne",
"sheep-biting",
"spur-galled",
"swag-bellied",
"tardy-gaited",
"tickle-brained",
"toad-spotted",
"unchin-snouted",
"weather-bitten"]
set_d=["apple-john",
"baggage",
"barnacle",
"bladder",
"boar-pig",
"bugbear",
"bum-bailey",
"canker-blossom",
"clack-dish",
"clotpole",
"coxcomb",
"codpiece",
"death-token",
"dewberry",
"dotard",
"flap-dragon",
"flax-wench",
"flea",
"flirt-gill",
"foot-licker",
"fustilarian",
"giglet",
"gudgeon",
"haggard",
"harpy",
"hedge-pig",
"horn-beast",
"hugger-mugger",
"jolthead",
"knave",
"lewdster",
"lout",
"maggot-pie",
"malt-worm",
"mammet",
"measle",
"minnow",
"miscreant",
"moldwarp",
"mumble-news",
"nit",
"nut-hook",
"pigeon-egg",
"pignut",
"pumpion",
"puttock",
"ratsbane",
"rudesby",
"scut",
"skainsmate",
"strumpet",
"varlot",
"vassal",
"wagtail",
"water-fly",
"whey-face",
"winter-cricket"]
def handle_insult(bot, ievent):
ievent.reply(random.choice(set_a)+" "+random.choice(set_b)+" "+random.choice(set_c)+" "+random.choice(set_d))
cmnds.add('insult', handle_insult, 'USER')
examples.add('insult', 'show a shakespearean insult', 'insult')
| [
"blaxter@gmail.com"
] | blaxter@gmail.com |
163a84077a6b26e6de2ab3e58360644ced3eac16 | 043160352216a7fc21be4c8a44507e00f523bf80 | /test/functional/rpc_spork.py | 1f5f0298165fba056629b95302833155549086b0 | [
"MIT"
] | permissive | odinyblockchain/odinycoin | 5ef2a1bca374230882c91e8c6717bbb8faf889ad | 183751aac9357455913f1d8a415b1dcb04225ee0 | refs/heads/master | 2022-12-18T14:14:02.535216 | 2020-09-20T22:05:14 | 2020-09-20T22:05:14 | 295,208,711 | 0 | 2 | MIT | 2020-09-18T10:33:17 | 2020-09-13T18:06:52 | C++ | UTF-8 | Python | false | false | 2,830 | py | #!/usr/bin/env python3
# Copyright (c) 2019 The Odinycoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# -*- coding: utf-8 -*-
from time import sleep
from test_framework.test_framework import OdinycoinTestFramework
from test_framework.util import set_node_times, assert_equal
class Odinycoin_RPCSporkTest(OdinycoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-staking=1']] * self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with clean chain
self._initialize_chain_clean()
self.enable_mocktime()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Performs tests on the Spork RPC"
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def run_test(self):
self.log_title()
set_node_times(self.nodes, self.mocktime)
sporkName = "SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT"
# 0 - check SPORK 8 status from node 1 (must be inactive)
assert_equal(False, self.is_spork_active(1, sporkName))
# 1 - activate SPORK 8 with nodes[0]
assert_equal("success", self.activate_spork(0, sporkName))
sleep(1)
# check SPORK 8 status from nodes[1] (must be active)
assert_equal(True, self.is_spork_active(1, sporkName))
# 2 - Adjust time to 1 sec in the future and deactivate SPORK 8 with node[0]
self.mocktime += 1
set_node_times(self.nodes, self.mocktime)
assert_equal("success", self.deactivate_spork(0, sporkName))
sleep(1)
# check SPORK 8 value from nodes[1] (must be inactive again)
assert_equal(False, self.is_spork_active(1, sporkName))
# 3 - Adjust time to 1 sec in the future and set new value (mocktime) for SPORK 8 with node[0]
self.mocktime += 1
set_node_times(self.nodes, self.mocktime)
assert_equal("success", self.set_spork(0, sporkName, self.mocktime))
sleep(1)
# check SPORK 8 value from nodes[1] (must be equal to mocktime)
assert_equal(self.mocktime, self.get_spork(1, sporkName))
# 4 - Stop nodes and check value again after restart
self.log.info("Stopping nodes...")
self.stop_nodes()
self.log.info("Restarting node 1...")
self.start_node(1, [])
assert_equal(self.mocktime, self.get_spork(1, sporkName))
self.log.info("%s: TEST PASSED" % self.__class__.__name__)
if __name__ == '__main__':
Odinycoin_RPCSporkTest().main()
| [
"71228635+odinyblockchain@users.noreply.github.com"
] | 71228635+odinyblockchain@users.noreply.github.com |
5324cccd48635b974ca2c7204c7c9e487799df0a | 6b14d9a64a578239e5612e6098320b61b45c08d9 | /OCT16/02.py | 57cc74270ae7b19782b96a16b7b3c68a44bd1fac | [
"MIT"
] | permissive | Razdeep/PythonSnippets | 498c403140fec33ee2f0dd84801738f1256ee9dd | 76f9313894f511c487a99bc38bdf0fe5e594caf5 | refs/heads/master | 2020-03-26T08:56:23.067022 | 2018-11-26T05:36:36 | 2018-11-26T05:36:36 | 144,726,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | # Styling graphs
import matplotlib.pyplot as plt
plt.plot([1,2,3,4],[5,8,7,25],'r--')
# plt.plot([1,2,3,4],[5,8,7,25],'g^') # Shows green triangles
plt.title('Rain in december')
plt.xlabel('Days in december')
plt.ylabel('Inches in rain')
plt.show() | [
"rrajdeeproychowdhury@gmail.com"
] | rrajdeeproychowdhury@gmail.com |
42824fb36a1dc24acbcb8076fba9574ee8f0bf72 | bc047ab30357479f40f2106af46d9e0c0c1a8bb4 | /accounts/migrations/0008_auto_20200811_1457.py | 5335030c4d28fd3badc63dff70c2fb0ccfad61d6 | [] | no_license | kahdichienja/uniminus2 | 10c838b450ce1c3e2f0f5b840cc060e6fa26a418 | d9243f1654432d16697f4f6d4a8206c2a4179541 | refs/heads/master | 2022-11-30T05:03:38.243675 | 2020-08-11T13:55:35 | 2020-08-11T13:55:35 | 285,979,880 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Generated by Django 2.1.7 on 2020-08-11 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_personalfileupload'),
]
operations = [
migrations.AlterField(
model_name='personalfileupload',
name='kcse_cert',
field=models.FileField(blank=True, null=True, upload_to='kcsecert'),
),
]
| [
"ago@localhost.localdomain"
] | ago@localhost.localdomain |
d31f9f3dea79d590895bdcac0b013ca328a820d6 | 3a28b1a12d0710c06f6360381ad8be6cf3707907 | /modular_model/triHPC/triHPCThermo/HPCAllTrays37CstmLiqEtlp_px_N2.py | f39a795e60701a5238d3eca02648ee7c6712b6ee | [] | no_license | WheatZhang/DynamicModelling | 6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02 | ea099245135fe73e8c9590502b9c8b87768cb165 | refs/heads/master | 2020-06-15T14:12:50.373047 | 2019-07-05T01:37:06 | 2019-07-05T01:37:06 | 195,319,788 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | def LiqEtlp_px_N2(P,T,x_N2):
x = (P-5.41573658e+02)/2.47804900e-01
y = (T--1.78069279e+02)/7.24480000e-03
z = (x_N2-9.96540601e-01)/9.95332218e-04
output = \
1*-8.06034533e+03
liq_etlp = output*1.00000000e+00+0.00000000e+00
return liq_etlp | [
"1052632241@qq.com"
] | 1052632241@qq.com |
d0fafe1c03d3be10fa89d812b4598501ee240c1a | 1065ec75d9ee668ffd7aafc6a8de912d7c2cee6f | /addons/script.icechannel.extn.extra.uk/plugins/livetv_uk/islam_channel_ltvi.py | 45d34e60e67ec4c94ae5ac0736e202cbe32e6204 | [] | no_license | bopopescu/kodiprofile | 64c067ee766e8a40e5c148b8e8ea367b4879ffc7 | 7e78640a569a7f212a771aab6a4a4d9cb0eecfbe | refs/heads/master | 2021-06-11T17:16:15.498281 | 2016-04-03T06:37:30 | 2016-04-03T06:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | '''
Ice Channel
'''
from entertainment.plugnplay.interfaces import LiveTVIndexer
from entertainment.plugnplay import Plugin
from entertainment import common
class islam_channel(LiveTVIndexer):
implements = [LiveTVIndexer]
display_name = "Islam Channel"
name = "islam_channel"
other_names = "islam_channel,Islam Channel"
import xbmcaddon
import os
addon_id = 'script.icechannel.extn.extra.uk'
addon = xbmcaddon.Addon(addon_id)
img = os.path.join( addon.getAddonInfo('path'), 'resources', 'images', name + '.png' )
regions = [
{
'name':'United Kingdom',
'img':addon.getAddonInfo('icon'),
'fanart':addon.getAddonInfo('fanart')
},
]
languages = [
{'name':'English', 'img':'', 'fanart':''},
]
genres = [
{'name':'International', 'img':'', 'fanart':''}
]
addon = None
| [
"sokasoka@hotmail.com"
] | sokasoka@hotmail.com |
e73fbfaaa91a9301ec2a18d4f2a6130034fe5553 | d5b48163d236ca770be8e687f92192e2971397e8 | /116.py | d7870f37e24c92963d66021b29540135312aafc5 | [] | no_license | Kunal352000/python_program | 191f5d9c82980eb706e11457c2b5af54b0d2ae95 | 7a1c645f9eab87cc45a593955dcb61b35e2ce434 | refs/heads/main | 2023-07-12T19:06:19.121741 | 2021-08-21T11:58:41 | 2021-08-21T11:58:41 | 376,606,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | num=int(input("Enter a number: "))
for i in range(num):
for j in range(num-1-i):
print(" ",end="")
for j in range(num):
print("*",end="")
print()
| [
"noreply@github.com"
] | Kunal352000.noreply@github.com |
dec76e7957ea062e6fdd0bc4d7e16cd9a404bade | 992d0d5e06813f6dff323e4b528cd39b4cbaa955 | /pytorch入门与实践/Fast-Neural-Style/utils.py | e07c2f6fef99b3c70c5e359c84229fa87699be98 | [] | no_license | happy-luck/pytorch-study | 9997d6b92785df4d6b4b0eb8c8f8ab9ee15bfc2b | 137a7eb0d76ad2bc3e731aade2bfda4586e7d21a | refs/heads/master | 2022-11-17T16:34:02.974645 | 2020-07-16T01:32:33 | 2020-07-16T01:32:33 | 279,233,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,629 | py | # coding:utf8
from itertools import chain
import visdom
import torch as t
import time
import torchvision as tv
import numpy as np
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
def gram_matrix(y):
'''
输入 b,c,h,w
输出 b,c,c
'''
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
class Visualizer():
'''
封装了visdom的基本操作,但是你仍然可以通过`self.vis.function`
调用原生的visdom接口
'''
def __init__(self, env='default', **kwargs):
import visdom
self.vis = visdom.Visdom(env=env, **kwargs)
# 画的第几个数,相当于横座标
# 保存(’loss',23) 即loss的第23个点
self.index = {}
self.log_text = ''
def reinit(self, env='default', **kwargs):
'''
修改visdom的配置
'''
self.vis = visdom.Visdom(env=env, **kwargs)
return self
def plot_many(self, d):
'''
一次plot多个
@params d: dict (name,value) i.e. ('loss',0.11)
'''
for k, v in d.items():
self.plot(k, v)
def img_many(self, d):
for k, v in d.items():
self.img(k, v)
def plot(self, name, y):
'''
self.plot('loss',1.00)
'''
x = self.index.get(name, 0)
self.vis.line(Y=np.array([y]), X=np.array([x]),
win=name,
opts=dict(title=name),
update=None if x == 0 else 'append'
)
self.index[name] = x + 1
def img(self, name, img_):
'''
self.img('input_img',t.Tensor(64,64))
'''
if len(img_.size()) < 3:
img_ = img_.cpu().unsqueeze(0)
self.vis.image(img_.cpu(),
win=name,
opts=dict(title=name)
)
def img_grid_many(self, d):
for k, v in d.items():
self.img_grid(k, v)
def img_grid(self, name, input_3d):
'''
一个batch的图片转成一个网格图,i.e. input(36,64,64)
会变成 6*6 的网格图,每个格子大小64*64
'''
self.img(name, tv.utils.make_grid(
input_3d.cpu()[0].unsqueeze(1).clamp(max=1, min=0)))
def log(self, info, win='log_text'):
'''
self.log({'loss':1,'lr':0.0001})
'''
self.log_text += ('[{time}] {info} <br>'.format(
time=time.strftime('%m%d_%H%M%S'), \
info=info))
self.vis.text(self.log_text, win='log_text')
def __getattr__(self, name):
return getattr(self.vis, name)
def get_style_data(path):
'''
加载风格图片,
输入: path, 文件路径
返回: 形状 1*c*h*w, 分布 -2~2
'''
style_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
])
style_image = tv.datasets.folder.default_loader(path)
style_tensor = style_transform(style_image)
return style_tensor.unsqueeze(0)
def normalize_batch(batch):
'''
输入: b,ch,h,w 0~255
输出: b,ch,h,w -2~2
'''
mean = batch.data.new(IMAGENET_MEAN).view(1, -1, 1, 1)
std = batch.data.new(IMAGENET_STD).view(1, -1, 1, 1)
mean = t.autograd.Variable(mean.expand_as(batch.data))
std = t.autograd.Variable(std.expand_as(batch.data))
return (batch / 255.0 - mean) / std
| [
"18813129242@163.com"
] | 18813129242@163.com |
27b844b352de333d17ec109d4f30f57512010ac0 | 8d2a124753905fb0455f624b7c76792c32fac070 | /pytnon-month01/month01-shibw-notes/day10-shibw/exercise01-定义类.py | abe7abecd735db16f3f2a77f7971105114d5bfaa | [] | no_license | Jeremy277/exercise | f38e4f19aae074c804d265f6a1c49709fd2cae15 | a72dd82eb2424e4ae18e2f3e9cc66fc4762ec8fa | refs/heads/master | 2020-07-27T09:14:00.286145 | 2019-09-17T11:31:44 | 2019-09-17T11:31:44 | 209,041,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | #定义 Dog类
#Dog中的数据有 name kinds color
#Dog的行为有
# eat 打印 狗吃xx
# run 打印 狗正在以xxkm/h的速度飞奔
class Dog:
def __init__(self,name,kinds,color):
self.name = name
self.kinds = kinds
self.color = color
def eat(self,food):
print('%s正在吃%s' % (self.name,food))
def run(self,speed):
print('%s的%s正在以%skm/h的速度飞奔' %(self.color,self.kinds,speed))
#创建两个Dog对象
#调用__init__
wangcai = Dog('旺财','中华田园犬','黄色')
wangcai.eat('骨头')
wangcai.run(40)
#将Dog对象的地址赋值给doudou(两个变量指向一个对象)
doudou = wangcai
# doudou.eat('狗粮')#
# wangcai.eat('火腿肠')
doudou.name = '豆豆'
wangcai.eat('排骨')#豆豆正在吃排骨
list01 = [wangcai,doudou,Dog('儿子','哈士奇','灰色')]
list02 = list01
list01[2].color = '白色'
print(list02[2].color)#?
| [
"13572093824@163.com"
] | 13572093824@163.com |
02ce301afe155e8d1301e152c047e372786ae63a | 43852c47c9bf8f1d7d54f564a7130bb667df5110 | /python/ray/air/util/check_ingest.py | 79ea645049007a3275126cf71523096461375f9d | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | jon-chuang/ray | 654de3518b77a9b4285ef402053a3db3e341ec94 | adf2b92afcb4688251388838210e18b8721871b9 | refs/heads/master | 2023-05-26T09:04:24.349126 | 2023-02-20T02:33:40 | 2023-02-20T02:33:40 | 206,287,690 | 1 | 1 | Apache-2.0 | 2023-05-20T08:02:24 | 2019-09-04T09:55:00 | Python | UTF-8 | Python | false | false | 7,803 | py | #!/usr/bin/env python
import sys
import time
from typing import Optional
import numpy as np
import ray
from ray.air import session
from ray.air.config import DatasetConfig, ScalingConfig
from ray.data import Dataset, DatasetIterator, Preprocessor
from ray.data.preprocessors import BatchMapper, Chain
from ray.train._internal.dataset_spec import DataParallelIngestSpec
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
class DummyTrainer(DataParallelTrainer):
"""A Trainer that does nothing except read the data for a given number of epochs.
It prints out as much debugging statistics as possible.
This is useful for debugging data ingest problem. This trainer supports normal
scaling options same as any other Trainer (e.g., num_workers, use_gpu).
"""
def __init__(
self,
*args,
scaling_config: Optional[ScalingConfig] = None,
num_epochs: int = 1,
prefetch_blocks: int = 1,
batch_size: Optional[int] = 4096,
**kwargs
):
if not scaling_config:
scaling_config = ScalingConfig(num_workers=1)
super().__init__(
train_loop_per_worker=DummyTrainer.make_train_loop(
num_epochs, prefetch_blocks, batch_size
),
*args,
scaling_config=scaling_config,
**kwargs
)
def preprocess_datasets(self):
print("Starting dataset preprocessing")
start = time.perf_counter()
super().preprocess_datasets()
print("Preprocessed datasets in", time.perf_counter() - start, "seconds")
if self.preprocessor:
print("Preprocessor", self.preprocessor)
print(
"Preprocessor transform stats:\n\n{}".format(
self.preprocessor.transform_stats()
)
)
@staticmethod
def make_train_loop(
num_epochs: int, prefetch_blocks: int, batch_size: Optional[int]
):
"""Make a debug train loop that runs for the given amount of epochs."""
def train_loop_per_worker():
import pandas as pd
rank = session.get_world_rank()
data_shard = session.get_dataset_shard("train")
start = time.perf_counter()
epochs_read, batches_read, bytes_read = 0, 0, 0
batch_delays = []
print("Starting train loop on worker", rank)
for epoch in range(num_epochs):
epochs_read += 1
batch_start = time.perf_counter()
for batch in data_shard.iter_batches(
prefetch_blocks=prefetch_blocks, batch_size=batch_size
):
batch_delay = time.perf_counter() - batch_start
batch_delays.append(batch_delay)
batches_read += 1
if isinstance(batch, pd.DataFrame):
bytes_read += int(
batch.memory_usage(index=True, deep=True).sum()
)
elif isinstance(batch, np.ndarray):
bytes_read += batch.nbytes
else:
# NOTE: This isn't recursive and will just return the size of
# the object pointers if list of non-primitive types.
bytes_read += sys.getsizeof(batch)
session.report(
dict(
bytes_read=bytes_read,
batches_read=batches_read,
epochs_read=epochs_read,
batch_delay=batch_delay,
)
)
batch_start = time.perf_counter()
delta = time.perf_counter() - start
print("Time to read all data", delta, "seconds")
print(
"P50/P95/Max batch delay (s)",
np.quantile(batch_delays, 0.5),
np.quantile(batch_delays, 0.95),
np.max(batch_delays),
)
print("Num epochs read", epochs_read)
print("Num batches read", batches_read)
print("Num bytes read", round(bytes_read / (1024 * 1024), 2), "MiB")
print(
"Mean throughput", round(bytes_read / (1024 * 1024) / delta, 2), "MiB/s"
)
if rank == 0:
print("Ingest stats from rank=0:\n\n{}".format(data_shard.stats()))
return train_loop_per_worker
@DeveloperAPI
def make_local_dataset_iterator(
dataset: Dataset,
preprocessor: Preprocessor,
dataset_config: DatasetConfig,
) -> DatasetIterator:
"""A helper function to create a local
:py:class:`DatasetIterator <ray.data.DatasetIterator>`,
like the one returned by :meth:`~ray.air.session.get_dataset_shard`.
This function should only be used for development and debugging. It will
raise an exception if called by a worker instead of the driver.
Args:
dataset: The input Dataset.
preprocessor: The preprocessor that will be applied to the input dataset.
dataset_config: The dataset config normally passed to the trainer.
"""
runtime_context = ray.runtime_context.get_runtime_context()
if runtime_context.worker.mode == ray._private.worker.WORKER_MODE:
raise RuntimeError(
"make_local_dataset_iterator should only be used by the driver "
"for development and debugging. To consume a dataset from a "
"worker or AIR trainer, see "
"https://docs.ray.io/en/latest/ray-air/check-ingest.html."
)
dataset_config = dataset_config.fill_defaults()
spec = DataParallelIngestSpec({"train": dataset_config})
spec.preprocess_datasets(preprocessor, {"train": dataset})
training_worker_handles = [None]
it = spec.get_dataset_shards(training_worker_handles)[0]["train"]
return it
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--num-epochs", "-e", type=int, default=1, help="Number of epochs to read."
)
parser.add_argument(
"--prefetch-blocks",
"-b",
type=int,
default=1,
help="Number of blocks to prefetch when reading data.",
)
args = parser.parse_args()
# Generate a synthetic dataset of ~10GiB of float64 data. The dataset is sharded
# into 100 blocks (parallelism=100).
dataset = ray.data.range_tensor(50000, shape=(80, 80, 4), parallelism=100)
# An example preprocessor chain that just scales all values by 4.0 in two stages.
preprocessor = Chain(
BatchMapper(lambda df: df * 2, batch_format="pandas"),
BatchMapper(lambda df: df * 2, batch_format="pandas"),
)
# Setup the dummy trainer that prints ingest stats.
# Run and print ingest stats.
trainer = DummyTrainer(
scaling_config=ScalingConfig(num_workers=1, use_gpu=False),
datasets={"train": dataset},
preprocessor=preprocessor,
num_epochs=args.num_epochs,
prefetch_blocks=args.prefetch_blocks,
dataset_config={"train": DatasetConfig()},
batch_size=None,
)
print("Dataset config", trainer.get_dataset_config())
trainer.fit()
# Print memory stats (you can also use "ray memory --stats-only" to monitor this
# during the middle of the run.
try:
print(
"Memory stats at end of ingest:\n\n{}".format(
ray._private.internal_api.memory_summary(stats_only=True)
)
)
except Exception:
print("Error getting Ray memory stats")
| [
"noreply@github.com"
] | jon-chuang.noreply@github.com |
b73a6050107b0d880b9466a83b8d551cad4b616e | 1cca2891740d5ed6925f1ab0b1ade7ff814ff504 | /vcenter/migrations/0013_auto_20161108_1507.py | f4ad2c4c4c8dfb12d7d7e376945182a65457de56 | [] | no_license | sj741231/stockstar-vsa | bac5dd747e3ccfd4c36067b79ae30b1e88dc4597 | f5877567b6d7a0e3ab9895416ea95d02f3b572a4 | refs/heads/master | 2021-01-24T08:32:42.055321 | 2017-06-05T14:57:49 | 2017-06-12T03:15:55 | 93,385,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-11-08 07:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vcenter', '0012_racks'),
]
operations = [
migrations.RemoveField(
model_name='racks',
name='idc',
),
migrations.DeleteModel(
name='Racks',
),
]
| [
"shi.jin@126.com"
] | shi.jin@126.com |
5a921dd7bcd488ff6820ab9bfe93341267a5e720 | 6045075c734d65a3cec63d3ae15f8f9f13836559 | /solutions/0331_Verify_Preorder_Serialization_of_a_Binary_Tree/iter_by_degrees.py | e65ad1f91954b9fd44643c2c6c773ed853e9edd4 | [] | no_license | zh-wang/leetcode | c058470fdf84fb950e3d4f974b27826718942d05 | 6322be072e0f75e2da28b209c1dbb31593e5849f | refs/heads/master | 2021-12-28T02:49:11.964213 | 2021-08-25T06:29:21 | 2021-08-25T06:29:21 | 189,919,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | class Solution:
def isValidSerialization(self, preorder: str) -> bool:
arr = preorder.split(',')
# In a binary tree, if we consider null as leaves, then
# all non-null node provides 2 outdegree and 1 indegree (2 children and 1 parent), except root all null node provides 0 outdegree and 1 indegree (0 child and 1 parent).
# Suppose we try to build this tree. During building, we record the difference between out degree and in degree diff = outdegree - indegree. When the next node comes, we then decrease diff by 1, because the node provides an in degree. If the node is not null, we increase diff by2, because it provides two out degrees. If a serialization is correct, diff should never be negative and diff will be zero when finished.
diff = 1
for v in arr:
# each node provide a indgree
diff -= 1
if diff < 0: # indgree larger than outdgree
return False
if v != '#':
diff += 2 # non-empty node provide two outdgrees
return diff == 0 # indgree must be equal to outdgree
| [
"viennakanon@gmail.com"
] | viennakanon@gmail.com |
fc72d5ac87d23dad96d62f98d314106ccd272a48 | 2b56aaec923a2e7939734c6743c934ad960aef38 | /Greedy/2847.py | b043bd8d61bc02bfaaa4f0e73e87d66a2974d15e | [] | no_license | SAE-HUN/Algorithms | 461757fd5167fed14d7b5aca88fe004a5892c91a | aa2cc4d04e506d762706ae62e93400f1b57c82f0 | refs/heads/master | 2023-05-11T22:42:32.125790 | 2021-06-01T05:21:06 | 2021-06-01T05:21:06 | 306,549,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | n = int(input())
scores = [int(input()) for _ in range(n)]
answer = 0
for i in range(n-1, 0, -1):
if scores[i-1]>scores[i]-1:
answer += scores[i-1] - (scores[i] - 1)
scores[i-1] = scores[i] - 1
print(answer)
| [
"noreply@github.com"
] | SAE-HUN.noreply@github.com |
c7cc71d2629e6981c09a205d398e0048180f2f04 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02628/s935095687.py | 13c1c31e7df5d777d0ddb3c50f0327140819f7c9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | import itertools
[N, K] = [int(i) for i in input().split()]
price = [int(i) for i in input().split()]
price.sort()
sum = 0
for i in range(K):
sum += price[i]
print(sum) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1ce23d141a5c561f7101d4c1db3f247966d83ba3 | 22cec5da2b1fb83dcc9cf7c888f1e2078b05b62e | /flora/cmds/stop.py | 7c8e12aaf6e9946367cbd633d88b76f9ccb049fb | [
"Apache-2.0"
] | permissive | JuEnPeHa/flora-blockchain | 649d351e096e73222ab79759c71e191e42da5d34 | 656b5346752d43edb89d7f58aaf35b1cacc9a366 | refs/heads/main | 2023-07-18T08:52:51.353754 | 2021-09-07T08:13:35 | 2021-09-07T08:13:35 | 399,297,784 | 0 | 0 | Apache-2.0 | 2021-08-24T01:30:45 | 2021-08-24T01:30:44 | null | UTF-8 | Python | false | false | 1,402 | py | import sys
from pathlib import Path
import click
from flora.util.service_groups import all_groups, services_for_groups
async def async_stop(root_path: Path, group: str, stop_daemon: bool) -> int:
from flora.daemon.client import connect_to_daemon_and_validate
daemon = await connect_to_daemon_and_validate(root_path)
if daemon is None:
print("Couldn't connect to flora daemon")
return 1
if stop_daemon:
r = await daemon.exit()
await daemon.close()
print(f"daemon: {r}")
return 0
return_val = 0
for service in services_for_groups(group):
print(f"{service}: ", end="", flush=True)
if not await daemon.is_running(service_name=service):
print("Not running")
elif await daemon.stop_service(service_name=service):
print("Stopped")
else:
print("Stop failed")
return_val = 1
await daemon.close()
return return_val
@click.command("stop", short_help="Stop services")
@click.option("-d", "--daemon", is_flag=True, type=bool, help="Stop daemon")
@click.argument("group", type=click.Choice(all_groups()), nargs=-1, required=True)
@click.pass_context
def stop_cmd(ctx: click.Context, daemon: bool, group: str) -> None:
import asyncio
sys.exit(asyncio.get_event_loop().run_until_complete(async_stop(ctx.obj["root_path"], group, daemon)))
| [
"github@floracoin.farm"
] | github@floracoin.farm |
c5e03d03ec50a0095dff0e4e1b820f5760f7df64 | e121dcc5d23e225891420e730549b9cc7ebe8e88 | /python/lib/direct/test/ModelScreenShot.py | aa59a44301d37bea98c26ab6ca7d651b90d3a4ea | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | PlumpMath/panda3d-3 | 4f4cf7627eddae9b7f30795e0a0657b01fdf670d | 5c0be0e1cd46b422d28d5b81ffb1e8b28c3ac914 | refs/heads/master | 2021-01-25T06:55:36.209044 | 2014-09-29T14:24:53 | 2014-09-29T14:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | import direct
from panda3d.pandac import loadPrcFileData
from panda3d.direct.showbase.DirectObject import DirectObject
from panda3d.direct.directbase.DirectStart import *
from panda3d.pandac import *
import panda3d.direct.gui.DirectGuiGlobals as DGG
from panda3d.direct.gui.DirectGui import *
from panda3d.direct.task import Task
from panda3d.direct.directnotify import DirectNotifyGlobal
import math
from operator import *
import ModelScreenShotGlobals
class ModelScreenShot(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory("ModelScreenShot")
def __init__(self):
# Grab a list of models to capture screenshots of from an array in
# the globals file
self.modelsToView = ModelScreenShotGlobals.models
self.models = []
# Attach all the models listed to render and save a pointer to them
# in an array. Then hide the model.
for model in self.modelsToView:
m = loader.loadModel(model)
m.reparentTo(render)
self.models.append(m)
m.hide()
# Set a nice farplane far, far away
self.lens = base.camera.getChild(0).node().getLens()
self.lens.setFar(10000)
# Hide the cursor
self.props = WindowProperties()
self.props.setCursorHidden(0)
base.win.requestProperties(self.props)
# Method for getting the distance to an object from the camera
def getDist(obj, lens):
rad = obj.getBounds().getRadius()
fov = lens.getFov()
dist = rad / math.tan(deg2Rad(min(fov[0], fov[1]/2.0)))
return dist
# Determin the optimal camera position
def getOptCamPos(obj, dist):
cen = obj.getBounds().getCenter()
camPos = VBase3(cen.getX(), -dist, cen.getZ())
return camPos
# Generate screenshots
def generatePics():
for model in self.models:
model.show()
base.camera.setPos(getOptCamPos(model, getDist(model, self.lens)))
uFilename = model.getName().replace('.egg','.jpg')
self.notify.info("screenshot %s camera pos: %s" % (uFilename, base.camera.getPos()))
base.graphicsEngine.renderFrame()
base.screenshot(namePrefix = uFilename, defaultFilename = 0)
model.hide()
generatePics()
mss = ModelScreenShot()
run()
| [
"ralf.kaestner@gmail.com"
] | ralf.kaestner@gmail.com |
74dc0aa2f64c2ed5f40a01ad6b6f54b7cf178236 | 46563ccc5da11bb4b68bc2b27a40524af4d241b9 | /Dynamic_Routing_Between_Capsules/params.py | c8bc9eb8b09396b18606467f9d60a5aaed6ec2c2 | [
"MIT"
] | permissive | rahul-c1/Implementations-of-Deep-Learning-Models | e7e974b943782cb8b8afc5b6158ffee27f1a2248 | 4c1fe059b7c46c22790f716ca57d51bddc6248ac | refs/heads/master | 2020-03-24T09:58:46.521183 | 2018-01-20T05:22:55 | 2018-01-20T05:22:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | conv1_params = {
'filters': 256,
'kernel_size': 9,
'strides': 1,
'padding': 'valid',
'activation': 'relu'
}
conv2_params = {
'filters': 256,
'kernel_size': 9,
'strides': 2,
'padding': 'valid',
'activation': 'relu',
}
batch_size = 256
input_shape = [28, 28, 1]
primary_capsules_shape = [1152, 8]
digits_capsules_params = {
'num_capsule': 10,
'dim_capsule': 16,
'routing_iterations': 3
}
dense1, dense2 = 512, 1024
margin_loss_lambda = 0.5
reconstruction_loss_coefficient = 0.0005 | [
"maxpanziyuan@gmail.com"
] | maxpanziyuan@gmail.com |
e4f81b86df0300aaaa88eb3081f241403e987142 | 183bb8e9998a3eeebdc6dd0a5bf77525ef005a1f | /ribbit/ribbit_app/migrations/0008_auto__del_field_bookpasser_content__del_field_bookpasser_location__add.py | 5124b0c9c3937fa7db6e75d1fb8c501b19ae4227 | [] | no_license | gzpgg3x/SEARSHackPresentable | dcc7d3187bc459af5e8c535af8644d5d0fba7b05 | 0a78b6555f6e126506fa4f684a6b1d93b106d69a | refs/heads/master | 2020-05-30T10:39:29.211767 | 2014-06-08T16:08:05 | 2014-06-08T16:08:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,167 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'bookPasser.content'
db.delete_column(u'ribbit_app_bookpasser', 'content')
# Deleting field 'bookPasser.location'
db.delete_column(u'ribbit_app_bookpasser', 'location')
# Adding field 'bookPasser.brand'
db.add_column(u'ribbit_app_bookpasser', 'brand',
self.gf('django.db.models.fields.CharField')(default='', max_length=40, blank=True),
keep_default=False)
# Adding field 'bookPasser.product'
db.add_column(u'ribbit_app_bookpasser', 'product',
self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'bookPasser.content'
db.add_column(u'ribbit_app_bookpasser', 'content',
self.gf('django.db.models.fields.CharField')(default='', max_length=40, blank=True),
keep_default=False)
# Adding field 'bookPasser.location'
db.add_column(u'ribbit_app_bookpasser', 'location',
self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True),
keep_default=False)
# Deleting field 'bookPasser.brand'
db.delete_column(u'ribbit_app_bookpasser', 'brand')
# Deleting field 'bookPasser.product'
db.delete_column(u'ribbit_app_bookpasser', 'product')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ribbit_app.bookpasser': {
'Meta': {'object_name': 'bookPasser'},
'brand': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'product': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'ribbit_app.branch': {
'Meta': {'object_name': 'Branch'},
'branchaddress': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'branchname': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'branchphone': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ribbit_app.shout': {
'Meta': {'object_name': 'Shout'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'book': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'branchname': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'count': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'lng': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
u'ribbit_app.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'follows': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_by'", 'symmetrical': 'False', 'to': u"orm['ribbit_app.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['ribbit_app'] | [
"gzpgg3x@yahoo.com"
] | gzpgg3x@yahoo.com |
0465cbce511a108a717722c70a4f8a2073edcec1 | 988fc095bc5487fdbb2b293106e012efde4a35d8 | /tests/functional_tests.py | 4335907934dfd8743b63da981ba201b36988ebb1 | [
"MIT"
] | permissive | Nagasaki45/pathtag | d614ea77ab551be8bae22ce21ae5e65603f4a038 | dfd66186959715f71f6ecd583521f0cd03f2f17e | refs/heads/master | 2020-12-20T22:53:37.714411 | 2018-09-11T23:06:49 | 2018-09-11T23:06:49 | 28,537,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | import unittest
import shutil
import os
import tarfile
import subprocess
from mutagen.easyid3 import EasyID3
MATERIALS_DIR = 'tests/materials'
BACKUP = 'backup.tar'
class MainTest(unittest.TestCase):
def setUp(self):
# Backup MATERIALS_DIR
with tarfile.TarFile(BACKUP, 'w') as backup:
backup.add(MATERIALS_DIR)
# Run pathtag.py on it
subprocess.check_call(['python', 'pathtag.py', MATERIALS_DIR])
def tearDown(self):
# Remove manipulated dir
shutil.rmtree(MATERIALS_DIR)
# Restore the backup
with tarfile.TarFile(BACKUP) as backup:
backup.extractall()
# Remove backup
os.remove(BACKUP)
def load_track(self, *args):
args = [MATERIALS_DIR] + list(args)
return EasyID3(os.path.join(*args))
def test_standard_behavior(self):
track = self.load_track('artist', 'album', 'track.mp3')
self.assertEqual(track['artist'], ['artist'])
self.assertEqual(track['album'], ['album'])
def test_unknown_album(self):
track = self.load_track('artist', 'unknown_album_track.mp3')
self.assertEqual(track['album'], ['Unknown'])
def test_illegal_path_no_dir(self):
track = self.load_track('illegal_path_track.mp3')
self.assertEqual(track['album'], ['asdasd']) # Original value
self.assertEqual(track['artist'], ['asdasd']) # Original value
def test_illegal_path_too_nested(self):
track = self.load_track(
'artist', 'album', 'illegal_path_dir', 'illegal_path_track.mp3'
)
self.assertEqual(track['album'], ['asdasd']) # Original value
self.assertEqual(track['artist'], ['asdasd']) # Original value
if __name__ == '__main__':
unittest.main()
| [
"nagasaki45@gmail.com"
] | nagasaki45@gmail.com |
e055d3f40ff6a4d1d3f8f95db9dc115b493d590d | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/logic/v20190501/list_integration_account_map_content_callback_url.py | 622e2572a913a2f8a5e4fcf62ea045a1208cb715 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 5,729 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListIntegrationAccountMapContentCallbackUrlResult',
'AwaitableListIntegrationAccountMapContentCallbackUrlResult',
'list_integration_account_map_content_callback_url',
]
@pulumi.output_type
class ListIntegrationAccountMapContentCallbackUrlResult:
"""
The workflow trigger callback URL.
"""
def __init__(__self__, base_path=None, method=None, queries=None, relative_path=None, relative_path_parameters=None, value=None):
if base_path and not isinstance(base_path, str):
raise TypeError("Expected argument 'base_path' to be a str")
pulumi.set(__self__, "base_path", base_path)
if method and not isinstance(method, str):
raise TypeError("Expected argument 'method' to be a str")
pulumi.set(__self__, "method", method)
if queries and not isinstance(queries, dict):
raise TypeError("Expected argument 'queries' to be a dict")
pulumi.set(__self__, "queries", queries)
if relative_path and not isinstance(relative_path, str):
raise TypeError("Expected argument 'relative_path' to be a str")
pulumi.set(__self__, "relative_path", relative_path)
if relative_path_parameters and not isinstance(relative_path_parameters, list):
raise TypeError("Expected argument 'relative_path_parameters' to be a list")
pulumi.set(__self__, "relative_path_parameters", relative_path_parameters)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="basePath")
def base_path(self) -> str:
"""
Gets the workflow trigger callback URL base path.
"""
return pulumi.get(self, "base_path")
@property
@pulumi.getter
def method(self) -> str:
"""
Gets the workflow trigger callback URL HTTP method.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def queries(self) -> Optional['outputs.WorkflowTriggerListCallbackUrlQueriesResponseResult']:
"""
Gets the workflow trigger callback URL query parameters.
"""
return pulumi.get(self, "queries")
@property
@pulumi.getter(name="relativePath")
def relative_path(self) -> str:
"""
Gets the workflow trigger callback URL relative path.
"""
return pulumi.get(self, "relative_path")
@property
@pulumi.getter(name="relativePathParameters")
def relative_path_parameters(self) -> Optional[Sequence[str]]:
"""
Gets the workflow trigger callback URL relative path parameters.
"""
return pulumi.get(self, "relative_path_parameters")
@property
@pulumi.getter
def value(self) -> str:
"""
Gets the workflow trigger callback URL.
"""
return pulumi.get(self, "value")
class AwaitableListIntegrationAccountMapContentCallbackUrlResult(ListIntegrationAccountMapContentCallbackUrlResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIntegrationAccountMapContentCallbackUrlResult(
base_path=self.base_path,
method=self.method,
queries=self.queries,
relative_path=self.relative_path,
relative_path_parameters=self.relative_path_parameters,
value=self.value)
def list_integration_account_map_content_callback_url(integration_account_name: Optional[str] = None,
key_type: Optional[str] = None,
map_name: Optional[str] = None,
not_after: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIntegrationAccountMapContentCallbackUrlResult:
"""
Use this data source to access information about an existing resource.
:param str integration_account_name: The integration account name.
:param str key_type: The key type.
:param str map_name: The integration account map name.
:param str not_after: The expiry time.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['integrationAccountName'] = integration_account_name
__args__['keyType'] = key_type
__args__['mapName'] = map_name
__args__['notAfter'] = not_after
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:logic/v20190501:listIntegrationAccountMapContentCallbackUrl', __args__, opts=opts, typ=ListIntegrationAccountMapContentCallbackUrlResult).value
return AwaitableListIntegrationAccountMapContentCallbackUrlResult(
base_path=__ret__.base_path,
method=__ret__.method,
queries=__ret__.queries,
relative_path=__ret__.relative_path,
relative_path_parameters=__ret__.relative_path_parameters,
value=__ret__.value)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
0124697bac9f6283a8e32edd133b7c0657ef6f02 | 1eb0213140ada1c48edc5fb97b439d6556e6c3a9 | /0x0A-python-inheritance/7-base_geometry.py | 06615bc37e1f43fbd3545438200a215ece54b58c | [] | no_license | HeimerR/holbertonschool-higher_level_programming | 53d2a3c536fd9976bb7fea76dd2ecf9a6ba3297e | 892c0f314611c0a30765cf673e8413dbee567a2d | refs/heads/master | 2020-05-18T02:24:11.829328 | 2020-04-30T03:59:04 | 2020-04-30T03:59:04 | 184,112,468 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | #!/usr/bin/python3
""" Module base geometry
"""
class BaseGeometry:
""" empty class"""
def area(self):
""" area not defined"""
raise Exception('area() is not implemented')
def integer_validator(self, name, value):
"""validates value"""
if type(value) is not int:
raise TypeError('{} must be an integer'.format(name))
if value <= 0:
raise ValueError('{} must be greater than 0'.format(name))
| [
"ing.heimer.rojas@gmail.com"
] | ing.heimer.rojas@gmail.com |
8a5dca801b4ec186f2b749ce1e27347e1b1e1750 | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200912b_python2m6/day11_201121/sample/file_3_open.py | d6381dd6d88b7b7cebd514435788b08f725a6bd2 | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
python file I/O
Opening Files
open()
first look
"""
# case 3. open file in specified full path
# different way to represent path in windows system
print("[info] open file in specified full path")
print("[info] opening file_open.txt ...")
f = open("D:/workspace/pycharm201803/ceit4101python/module_8_fileio/file_open.txt")
print("[info] closing ...")
f.close()
print("[info] done.")
| [
"lada314@gmail.com"
] | lada314@gmail.com |
7cbe4efbda319a44a4a563e56cc6bc8cae7c5f04 | c7967ec500b210513aa0b1f540144c931ca687ac | /알고리즘 스터디/개인공부/TwoPointer/PermutationSummation.py | 576694b4fd00b0f800b32c15f1f8c4361e775e12 | [] | no_license | sunminky/algorythmStudy | 9a88e02c444b10904cebae94170eba456320f8e8 | 2ee1b5cf1f2e5f7ef87b44643210f407c4aa90e2 | refs/heads/master | 2023-08-17T01:49:43.528021 | 2023-08-13T08:11:37 | 2023-08-13T08:11:37 | 225,085,243 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | # https://www.acmicpc.net/problem/2143
import sys
# 누적합 구하기
def accumulation(number):
result = dict()
for i in range(len(number)):
total = 0
for j in range(i, len(number)):
total += number[j]
result[total] = result.get(total, 0) + 1 # 이 부분합이 나올 수 있는 경우의 수를 구함
return result
if __name__ == '__main__':
target = int(sys.stdin.readline())
sys.stdin.readline()
arr1 = [*map(int, sys.stdin.readline().split())] # 배열1
acc1 = accumulation(arr1) # 배열1의 부분합의 등장 횟수
sys.stdin.readline()
arr2 = [*map(int, sys.stdin.readline().split())] # 배열2
acc2 = accumulation(arr2) # 배열2의 부분합의 등장 횟수
acc1_key = sorted(acc1.keys()) # 배열1의 부분합 정렬
acc2_key = sorted(acc2.keys()) # 배열2의 부분합 정렬
answer = 0
## 투포인터 ##
a1_idx = 0
a2_idx = len(acc2_key) - 1
while a1_idx < len(acc1_key) and a2_idx >= 0:
calc = acc1_key[a1_idx] + acc2_key[a2_idx] # 두 부분합의 합
# 타겟인 경우
if calc == target:
answer += acc1[acc1_key[a1_idx]] * acc2[acc2_key[a2_idx]]
if calc <= target:
a1_idx += 1
else:
a2_idx -= 1
print(answer) | [
"suns1502@gmail.com"
] | suns1502@gmail.com |
a36d671af009a8c76753ff5416319589a3318f3c | 1f08436bab6cd03bcfb257e8e49405cbc265195a | /3_list/Sample/list_ex20.py | a063117240f1387a45dd6d1559b3fcf38182856c | [] | no_license | kuchunbk/PythonBasic | e3ba6322f256d577e37deff09c814c3a374b93b2 | a87135d7a98be8830d30acd750d84bcbf777280b | refs/heads/master | 2020-03-10T04:28:42.947308 | 2018-04-17T04:25:51 | 2018-04-17T04:25:51 | 129,192,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | '''Question:
Write a Python program access the index of a list.
'''
# Python code:
nums = [5, 15, 35, 8, 98]
for num_index, num_val in enumerate(nums):
print(num_index, num_val)
'''Output sample:
0 5
1 15
2 35
3 8
4 98
''' | [
"kuchunbk@gmail.com"
] | kuchunbk@gmail.com |
c8feaa8ecfa5607b14bf76c8344255b16073b91b | 51ce07a419abe50f49e7bb6a6c036af291ea2ef5 | /3.Algorithm/04. Stack1/DFS.py | d2435fd628dfe83323c14e92d7e2adee161ae3b2 | [] | no_license | salee1023/TIL | c902869e1359246b6dd926166f5ac9209af7b1aa | 2905bd331e451673cbbe87a19e658510b4fd47da | refs/heads/master | 2023-03-10T09:48:41.377704 | 2021-02-24T10:47:27 | 2021-02-24T10:47:27 | 341,129,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # 재귀
def dfs(v):
# 방문체크
visited[v] = 1
print(v, end=' ')
# v의 인접한 정점중에서 방문안한 정점을 재귀호출
for w in range(1, V+1):
if G[v][w] == 1 and visited[w] == 0:
dfs(w)
# --------------------------------------------
V, E = map(int, input().split()) # 정점, 간선
temp = list(map(int, input().split())) # 간선들
G = [[0]*(V+1) for _ in range(V+1)] # 인접 행렬
visited = [0]*(V+1) # 방문 체크
# 간선들을 인접행렬에 저장
for i in range(E):
s, e = temp[2*i], temp[2*i+1]
G[s][e] = 1
G[e][s] = 1
dfs(1)
# 반복
'''
def dfs2(s,V):
# 초기화, 스택 생성, visitied[] 생성 및 초기화
visited = [0]*(V+1)
stack = []
stack.append(s) # 시작 노드 push()
visited[s] = 1
while stack: # 스택이 비어있지 않으면 반복
n = stack.pop() # 탐색할 노드 선택
for i in range(1,V+1):
if adj[n][i] == 1 and visited[i] == 0: # n에 인접한 노드가 있고, 방문안한 노드일 때,
stack.append(i)
visited[i] = 1
# --------------------------------------------------
V, E = map(int, input().split()) # V 정점 개수, E 간선 개수
adj = [[0]*(V+1) for _ in range(V+1)]
tmp = list(map(int, input().split()))
for i in range(E):
n1, n2 = tmp[i*2], tmp[i*2+1]
adj[n1][n2] = 1
adj[n2][n1] = 1 # 무방향 그래프인 경우
dfs(1, V)
'''
| [
"dltmddk1023@gmail.com"
] | dltmddk1023@gmail.com |
0a364178e1a3a1ca5c09b5d161d750af22a4a947 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20180101/get_virtual_network_gateway_advertised_routes.py | b50d1b09c509d3f5dc2c559d307478ea00d98982 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayAdvertisedRoutesResult',
'AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult',
'get_virtual_network_gateway_advertised_routes',
'get_virtual_network_gateway_advertised_routes_output',
]
@pulumi.output_type
class GetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
List of virtual network gateway routes
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.GatewayRouteResponse']]:
"""
List of gateway routes
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(GetVirtualNetworkGatewayAdvertisedRoutesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayAdvertisedRoutesResult(
value=self.value)
def get_virtual_network_gateway_advertised_routes(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult:
"""
List of virtual network gateway routes
:param str peer: The IP address of the peer
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180101:getVirtualNetworkGatewayAdvertisedRoutes', __args__, opts=opts, typ=GetVirtualNetworkGatewayAdvertisedRoutesResult).value
return AwaitableGetVirtualNetworkGatewayAdvertisedRoutesResult(
value=__ret__.value)
@_utilities.lift_output_func(get_virtual_network_gateway_advertised_routes)
def get_virtual_network_gateway_advertised_routes_output(peer: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
virtual_network_gateway_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVirtualNetworkGatewayAdvertisedRoutesResult]:
"""
List of virtual network gateway routes
:param str peer: The IP address of the peer
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
9f935df7a693a88e5ff198c8cdeb82c876498221 | 46404c77e04907225475e9d8be6e0fd33227c0b1 | /max value of exp.py | 97a9e0dd3173d1d935cda977191f6d3427639305 | [] | no_license | govardhananprabhu/DS-task- | 84b46e275406fde2d56c301fd1b425b256b29064 | bf54f3d527f52f61fefc241f955072f5ed9a6558 | refs/heads/master | 2023-01-16T07:41:27.064836 | 2020-11-27T11:52:50 | 2020-11-27T11:52:50 | 272,928,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | """
Given an algebraic expression of the form (x1 + x2 + x3 + . . . + xn) * (y1 + y2 + . . . + ym) and
(n + m) integers. Find the maximum value of the expression using the given
integers.
Consstraint :
n <= 50
m <= 50
-50 <= x1, x2, .. xn <= 50
H 6
T 2000
Tag cisco mathematics
In des
First line contains 2 space separated integers n,m, denotes the count of integers.
Second line contains n+m space separated integers.
Ot des
Print the max value
2 2
1 2 3 4
25
3 1
1 2 3 4
24
5 4
1 3 2 5 4 88 12 21 11
4982
1 1
11 10
110
3 3
1 4 22 1 33 2
980
Exp
The expression is (x1 + x2) * (y1 + y2) and
the given integers are 1, 2, 3 and 4. Then
maximum value is (1 + 4) * (2 + 3) = 25
Hint
A simple solution is to consider all possible combinations of n numbers and remaining m numbers and calculating their values, from which maximum value can be derived.
"""
def MaxValues(arr, n, m) :
sum = 0
INF = 1000000000
MAX = 50
for i in range(0, (n + m)) :
sum += arr[i]
arr[i] += 50
dp = [[0 for x in range(MAX * MAX + 1)]
for y in range( MAX + 1)]
dp[0][0] = 1
for i in range(0, (n + m)) :
for k in range(min(n, i + 1), 0, -1) :
for j in range(0, MAX * MAX + 1) :
if (dp[k - 1][j]) :
dp[k][j + arr[i]] = 1
max_value = -1 * INF
for i in range(0, MAX * MAX + 1) :
if (dp[n][i]) :
temp = i - 50 * n
max_value = max(max_value, temp * (sum - temp))
print(max_value)
n,m=map(int,input().split())
arr = list(map(int,input().split()))
MaxValues(arr, n, m)
| [
"noreply@github.com"
] | govardhananprabhu.noreply@github.com |
0b9eebfc0db17524b69f3646238b25cf55e6c715 | 714a8942a8a761d4ff1aa5cc38f68cd414da295d | /django_restful/wsgi.py | 5f86f0839de9ded1feb6f165af75437128cbd607 | [] | no_license | eliblurr/django-restful | 9e9a1188242439a2486a18a3d6f8dab3f9be4952 | 0fdf688d41ad32a5a63cf34680e6a96c572e5337 | refs/heads/main | 2023-06-04T08:18:25.286212 | 2021-06-23T20:36:39 | 2021-06-23T20:36:39 | 379,537,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for django_restful project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_restful.settings')
application = get_wsgi_application()
| [
"segbawuel@aiti-kace.com"
] | segbawuel@aiti-kace.com |
3ef7f37ef8d1957487abd9ace0dad6904448428b | edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81 | /Python 300/09. Function/227.py | 42218bd6f628a285d7163e3e929121048f5fcec1 | [] | no_license | narinn-star/Python | 575cba200de35b9edf3832c4e41ccce657075751 | 14eba211cd3a9e9708a30073ba5b31d21d39eeef | refs/heads/master | 2023-05-25T22:57:26.079294 | 2021-06-07T15:29:39 | 2021-06-07T15:29:39 | 331,647,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | #functions _ 함수 정의
def print_mxn(line, n):
num = int(len(line)/n)
for i in range(num + 1):
print(line[i * n : i * n + n])
print_mxn("가나다라마바사아자차", 3) | [
"skfls2618@naver.com"
] | skfls2618@naver.com |
f146ecaa2404f714b15e20fe8f193a13c2fd7061 | c6ed9aa97166d4778b89321b580af80c543bacc9 | /randoms/kthlargest.py | 8869a6258042894d7e85fa2c7e867ca9be147be2 | [] | no_license | bradyz/sandbox | 381bcaf2f3719dee142a00858f7062aeff98d1ab | ff90335b918886d5b5956c6c6546dbfde5e7f5b3 | refs/heads/master | 2021-01-23T09:03:54.697325 | 2018-02-27T20:47:48 | 2018-02-27T20:47:48 | 21,292,856 | 10 | 0 | null | 2015-09-03T16:53:15 | 2014-06-28T00:29:18 | Python | UTF-8 | Python | false | false | 1,059 | py | from random import randrange
def kth(a, k):
def pivot(s, e):
val = a[s]
left = s
right = e
while left < right:
while left < right and a[left] <= val:
left += 1
while a[right] > val:
right -= 1
if left < right:
a[left], a[right] = a[right], a[left]
a[s] = a[right]
a[right] = val
return right
l = len(a)
idx = 0
while idx != l - k:
tmp = pivot(idx, l-1)
print("tmp: " + str(tmp) + " val: " + str(a[tmp]))
if tmp > l - k + 1:
idx -= 1
else:
idx += 1
print(a)
return a[-k:]
if __name__ == "__main__":
arr = [randrange(100) for _ in range(10)]
el = 2
print(str(el) + " elements")
print(arr)
print(kth(arr, el))
# t = int(input())
# for _ in range(t):
# el = int(input())
# arr = [int(val) for val in raw_input().split()]
# print(el)
# print(arr)
# print(kth(arr, el))
| [
"brady.zhou@utexas.edu"
] | brady.zhou@utexas.edu |
dc89937d05510bc33d593090df17b6f5fabdfb40 | ef0d8fd55fbdb526e20d6c2b05e601f1d86587c5 | /frappe/utils/bench_helper.py | b920347f7af0e8d5ce3bb5003349a9293c47e97f | [
"MIT"
] | permissive | indictranstech/v4_frappe | 8976e84c14346196b8895ad6274740dca7fd6504 | dba708c8aa83f503b9f4a264850307111a2b5f19 | refs/heads/master | 2021-09-26T12:26:29.994294 | 2018-10-30T06:09:36 | 2018-10-30T06:09:36 | 103,262,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | from __future__ import unicode_literals
import click
import frappe
import importlib
def main():
click.Group(commands=get_app_groups())()
def get_cli_options():
pass
def get_app_groups():
ret = {}
for app in get_apps():
app_group = get_app_group(app)
if app_group:
ret[app] = app_group
return ret
def get_app_group(app):
app_commands = get_app_commands(app)
if app_commands:
return click.Group(name=app, commands=app_commands)
def get_app_commands(app):
try:
app_command_module = importlib.import_module(app + '.commands')
except ImportError:
return []
ret = {}
for command in getattr(app_command_module, 'commands', []):
ret[command.name] = command
return ret
def get_apps():
return frappe.get_all_apps(with_internal_apps=False, sites_path='.')
if __name__ == "__main__":
main()
| [
"sagarshiragawakar@gmail.com"
] | sagarshiragawakar@gmail.com |
263cb4742e4e6ddcd2bb7b0b9ffff06d93e4e06d | 3c6b3b0a92e5a290ba69d0f73af51ac82aff3509 | /assignments/development/chess-top-100-p2.py | a0f31d9eb1b9a23a30afa30fb6798ba02ba27b67 | [] | no_license | sarae17/2019-T-111-PROG | ba6c6db7075acba16bbcd23e4c0d3db6e2bb374f | 017287b3300ec4fe809bfc81fee856ffb17b4800 | refs/heads/master | 2020-09-10T14:36:53.715479 | 2019-11-13T13:41:04 | 2019-11-13T13:41:04 | 221,722,173 | 1 | 0 | null | 2019-11-14T14:54:15 | 2019-11-14T14:54:14 | null | UTF-8 | Python | false | false | 3,092 | py | # The following constants indicate the position of the respective
# fields in the tuple stored as the value for the key in the players dictionary
RANK = 0
COUNTRY = 1
RATING = 2
BYEAR = 3
def open_file(filename):
''' Open the given file name and returns the corresponding file stream, or None if the file cannot be opened '''
try:
file_stream = open(filename, 'r')
return file_stream
except FileNotFoundError:
return None
def create_players_dict(file_stream):
''' Reads the given file stream and returns a dictionary in which
the name of a chess player is the key, the value is a tuple: (rank, country, rating, b-year)
'''
the_dict = {}
for line in file_stream: # process each line
rank, name, country, rating, byear = line.split(';')
# The name is one field separated by ","
lastname, firstname = name.split(",")
# Strip leading spaces
firstname = firstname.strip()
lastname = lastname.strip()
country = country.strip()
key = "{} {}".format(firstname, lastname)
value_tuple = (int(rank), country, int(rating), int(byear))
the_dict[key] = value_tuple
return the_dict
def create_dict_with_key(dict_players, attribute_key):
''' Uses a players dictionary to create a dictionary
in which an attribute in the values of dict_players are keys and a list of player names are values
'''
the_dict = {}
for chess_player, chess_player_data in dict_players.items():
key = chess_player_data[attribute_key]
if key in the_dict:
name_list = the_dict[key]
name_list.append(chess_player)
else:
name_list = [chess_player]
the_dict[key] = name_list
return the_dict
def get_average_rating(players, dict_players):
''' Returns the average ratings for the given players'''
ratings = [ dict_players[player][RATING] for player in players]
average = sum(ratings)/len(ratings)
return average
def print_sorted(the_dict, dict_players):
''' Prints information sorted on the key of the_dict '''
sorted_dict = sorted(the_dict.items())
for key, players in sorted_dict:
average_rating = get_average_rating(players, dict_players)
print("{} ({}) ({:.1f}):".format(key, len(players), average_rating))
for player in players:
rating = dict_players[player][RATING]
print("{:>40}{:>10d}".format(player, rating))
def print_header(header_str):
print(header_str)
dashes = '-' * len(header_str)
print(dashes)
# The main program starts here
filename = input("Enter filename: ")
file_stream = open_file(filename)
if file_stream:
dict_players = create_players_dict(file_stream)
dict_countries = create_dict_with_key(dict_players, COUNTRY)
dict_years = create_dict_with_key(dict_players, BYEAR)
print_header("Players by country:")
print_sorted(dict_countries, dict_players)
print()
print_header("Players by birth year:")
print_sorted(dict_years, dict_players) | [
"hrafnl@gmail.com"
] | hrafnl@gmail.com |
2aee15990e576ff4d1f8ba52fe34188c61efc469 | 056adbbdfb968486ecc330f913f0de6f51deee33 | /277-find-the-celebrity/find-the-celebrity.py | 5d5b0edbef91abbf8d99870dd9924f1dea1c0472 | [] | no_license | privateHmmmm/leetcode | b84453a1a951cdece2dd629c127da59a4715e078 | cb303e610949e953b689fbed499f5bb0b79c4aea | refs/heads/master | 2021-05-12T06:21:07.727332 | 2018-01-12T08:54:52 | 2018-01-12T08:54:52 | 117,215,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | # -*- coding:utf-8 -*-
#
# Suppose you are at a party with n people (labeled from 0 to n - 1) and among them, there may exist one celebrity. The definition of a celebrity is that all the other n - 1 people know him/her but he/she does not know any of them.
#
#
#
# Now you want to find out who the celebrity is or verify that there is not one. The only thing you are allowed to do is to ask questions like: "Hi, A. Do you know B?" to get information of whether A knows B. You need to find out the celebrity (or verify there is not one) by asking as few questions as possible (in the asymptotic sense).
#
#
#
# You are given a helper function bool knows(a, b) which tells you whether A knows B. Implement a function int findCelebrity(n), your function should minimize the number of calls to knows.
#
#
#
# Note: There will be exactly one celebrity if he/she is in the party. Return the celebrity's label if there is a celebrity in the party. If there is no celebrity, return -1.
#
# The knows API is already defined for you.
# @param a, person a
# @param b, person b
# @return a boolean, whether a knows b
# def knows(a, b):
class Solution(object):
def findCelebrity(self, n):
"""
:type n: int
:rtype: int
"""
possible = 0
for i in range(1, n):
if knows(possible, i):
possible = i
for i in range(0, n):
if possible != i and (not knows(i, possible) or knows(possible, i)):
return -1
# if possible != i and (not knows(i, possible)):
# return -1
return possible
| [
"hyan90@ucsc.edu"
] | hyan90@ucsc.edu |
3190629752516f8a00989529c3e5b4122ecccdc3 | c2c813717d1ab5df2e912d510595e71eb26d505a | /mineralization/clean code/test_M2-M2_convert.py | c9e69fda0456cdcb52c1b39fb8a98d84aba9b61d | [] | no_license | danielrgreen/toothmin | 7dfaa17dea34c6b42b8196652fb0d1ebcaf4b798 | 307d675225c69340745454ba220df1a5c4089d7a | refs/heads/master | 2020-12-24T16:34:07.595761 | 2017-11-08T19:07:15 | 2017-11-08T19:07:15 | 13,531,025 | 0 | 0 | null | 2014-07-25T19:47:58 | 2013-10-12T23:44:17 | Python | UTF-8 | Python | false | false | 5,230 | py | # Daniel Green, Gregory Green, 2014
# drgreen@fas.harvard.edu
# Human Evolutionary Biology
# Center for Astrophysics
# Harvard University
#
# Mineralization Model Re-Size:
# this code takes a larger mineralization model
# and produces images demonstrating mineral density
# increase over time, total density over time, or
# calculates final isotope distributions at full
# or partial resolution.
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline
import scipy.special as spec
from time import time
def tooth_timing_convert_curv2lin(conversion_times, a1, s1, o1, max1, s2, o2, max2):
t1_ext = a1*spec.erf(s1*(conversion_times-o1))+(max1-a1)
t1_pct = t1_ext / max1
t2_ext = t1_pct * max2
converted_times = (t2_ext-o2)/s2
return converted_times
def tooth_timing_convert_lin2curv(conversion_times, s1, o1, max1, a2, s2, o2, max2):
t1_ext = (s1*conversion_times)+o1
t1_pct = t1_ext / max1
t2_ext = t1_pct * max2
converted_times = (spec.erfinv((a2+t2_ext-max2)/a2) + (o2*s2)) / s2
return converted_times
def tooth_timing_convert(conversion_times, a1, s1, o1, max1, a2, s2, o2, max2):
'''
Takes an array of events in days occurring in one tooth, calculates where
these will appear spatially during tooth extension, then maps these events
onto the spatial dimensions of a second tooth, and calculates when similar
events would have occurred in days to produce this mapping in the second
tooth.
Inputs:
conversion_times: a 1-dimensional numpy array with days to be converted.
a1, s1, o1, max1: the amplitude, slope, offset and max height of the error
function describing the first tooth's extension, in mm,
over time in days.
a2, s2, o2, max2: the amplitude, slope, offset and max height of the error
function describing the second tooth's extension, in mm,
over time in days.
Returns: converted 1-dimensional numpy array of converted days.
'''
t1_ext = a1*spec.erf(s1*(conversion_times-o1))+(max1-a1)
t1_pct = t1_ext / max1
t2_ext = t1_pct * max2
converted_times = (spec.erfinv((a2+t2_ext-max2)/a2) + (o2*s2)) / s2
return converted_times
def spline_input_signal(iso_values, value_days, smoothness):
'''
Takes a series of iso_values, each lasting for a number of days called value_days,
and interpolates to create a water history of the appropriate length iso_values*value_days.
Has blood and water data from sheep 962 arranged from birth and outputs a
day-by-day spline-smoothed version.
'''
spline_data_days = np.arange(np.size(iso_values))*value_days
spline_output = InterpolatedUnivariateSpline(spline_data_days, iso_values, k=smoothness)
days = np.arange(value_days*np.size(iso_values))
water_spl = spline_output(days)
return water_spl[:584]
def main():
m1_m2_params = np.array([21.820, .007889, 29.118, 35., 67.974, 0.003352, -25.414, 41.]) # 'synch86', outlier, 100k
m2_m1_params = np.array([67.974, 0.003352, -25.414, 41., 21.820, .007889, 29.118, 35.]) # 'synch86', outlier, 100k
m2_m2_params_curv2lin = np.array([67.974, 0.003352, -25.414, 41., (41./416.), -8.3, 41.]) # 'synch86', outlier, 100k
daily_d18O_360 = 10.*np.sin((2*np.pi/360.)*(np.arange(600.)))-11.
daily_d18O_180 = 10.*np.sin((2*np.pi/180.)*(np.arange(600.)))-11.
daily_d18O_090 = 10.*np.sin((2*np.pi/90.)*(np.arange(600.)))-11.
daily_d18O_045 = 10.*np.sin((2*np.pi/45.)*(np.arange(600.)))-11.
days = np.arange(84., 684.)
converted_days = tooth_timing_convert_curv2lin(days, *m2_m2_params_curv2lin)
M2_test1 = np.ones(days.size)
M2_test1[:] = 5.
M2_test1[50:100] = 15.
M2_test1[150:200] = 25.
M2_test1[250:300] = 35.
M2_test1[350:400] = 45.
M2_test1[450:500] = 55.
M1_test1_tmp = np.ones(converted_days.size)
for k,d in enumerate(converted_days):
print k,d
d = int(d)
M1_test1_tmp[d:] = M2_test1[k]
M1_test1 = M1_test1_tmp
M1_test1 = M1_test1[84:]
print 'days =', days
print 'converted days =', converted_days
print 'm2 = ', M2_test1
print 'm1 = ', M1_test1
t_save = time()
print days.size, M1_test1.size, M2_test1.size, days[:-84].size
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax1text = 'M2->M2, M2_days start@84, M2/M2 plotted w/diff day_arrays'
ax1.text(0, 50, ax1text, fontsize=8)
ax1.plot(days, M2_test1, 'k--', linewidth=1.0)
ax1.plot(converted_days[:-84], M1_test1, 'b-', linewidth=1.0)
ax1.set_ylim(-5, 65)
ax1.set_xlim(-50, 600)
ax1 = fig.add_subplot(2,1,2)
ax1text = 'M2->M2, M2_days start@84, M2/M2 plotted on same'
ax1.text(0, 50, ax1text, fontsize=8)
ax1.plot(np.arange(np.size(M2_test1)), M2_test1, 'k--', linewidth=1.0)
ax1.plot(np.arange(np.size(M1_test1)), M1_test1, 'b-', linewidth=1.0)
ax1.set_ylim(-5, 65)
ax1.set_xlim(-50, 600)
fig.savefig('M2-M2_convert_testing_{0}.svg'.format(t_save), dpi=300, bbox_inches='tight')
plt.show()
return 0
if __name__ == '__main__':
main()
| [
"drgreen@fas.harvard.edu"
] | drgreen@fas.harvard.edu |
6eae2990fa0e566303822d2384af4be325e85c9e | 8d49df8fd04ef5cc5123b956470ab70344d39cc7 | /crash_course/ch17/python_repos.py | cb63fa657aecd488a86f9c05b5e7f4f98cea46e5 | [
"BSD-3-Clause"
] | permissive | dantin/python-by-example | 314c1d97bb527f65e5ada59ee1a72d6df4d881b3 | 5769c7a332ebd60fd54e477b6813f2f2a0f3f37f | refs/heads/master | 2022-12-10T04:49:52.771288 | 2019-03-05T03:41:02 | 2019-03-05T03:41:02 | 143,107,516 | 0 | 0 | BSD-3-Clause | 2022-12-08T02:47:35 | 2018-08-01T05:21:53 | Python | UTF-8 | Python | false | false | 1,487 | py | # -*- coding: utf-8 -*-
import pygal
import requests
from pygal.style import LightColorizedStyle as LCS, LightenStyle as LS
# Make an API call and store the response.
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
r = requests.get(url)
print('Status code:', r.status_code)
# Store API response in a variable.
response_dict = r.json()
print('Total repositories:', response_dict['total_count'])
# Explore information about the repositories.
repo_dicts = response_dict['items']
names, plot_dicts = [], []
for repo_dict in repo_dicts:
names.append(repo_dict['name'])
# Get the project description, if one is available.
description = repo_dict['description']
if not description:
description = 'No description provided.'
plot_dict = {
'value': repo_dict['stargazers_count'],
'label': str(description),
'xlink': repo_dict['html_url'],
}
plot_dicts.append(plot_dict)
# Make visualization.
my_style = LS('#333366', base_style=LCS)
my_style.title_font_size = 24
my_style.label_font_size = 14
my_style.major_label_font_size = 18
my_config = pygal.Config()
my_config.x_label_rotation = 45
my_config.show_legend = False
my_config.tuncate_label = 15
my_config.show_y_guides = False
my_config.width = 1000
chart = pygal.Bar(my_config, style=my_style)
chart.title = 'Most-Starred Python Projects on GitHub'
chart.x_labels = names
chart.add('', plot_dicts)
chart.render_to_file('python_repos.svg')
| [
"noreply@github.com"
] | dantin.noreply@github.com |
fa07e2b4fabebab106934800e1c21dbd1ebddb1a | abfa0fcab2bc9a9c3cccbc3a8142cdd4b2a66ee9 | /251-Flatten 2D Vector.py | f488e83697174a77d26799a5b6cad491014a04f2 | [] | no_license | JinnieJJ/leetcode | 20e8ccf3f8919028c53e0f0db86bcc2fbc7b6272 | 26c6ee936cdc1914dc3598c5dc74df64fa7960a1 | refs/heads/master | 2021-04-15T09:18:08.450426 | 2021-03-06T01:53:27 | 2021-03-06T01:53:27 | 126,275,814 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | class Vector2D(object):
def __init__(self, vec2d):
"""
Initialize your data structure here.
:type vec2d: List[List[int]]
"""
self.stack = vec2d
self.i = 0
self.j = -1
def next(self):
"""
:rtype: int
"""
return self.stack[self.i][self.j]
def hasNext(self):
"""
:rtype: bool
"""
if not self.stack:
return False
self.j += 1
while True:
if self.j < len(self.stack[self.i]):
return True
self.i += 1
if self.i >= len(self.stack):
return False
self.j = 0
# Your Vector2D object will be instantiated and called as such:
# i, v = Vector2D(vec2d), []
# while i.hasNext(): v.append(i.next())
| [
"noreply@github.com"
] | JinnieJJ.noreply@github.com |
b9462913cf8d146f69d2293389e30e4c52a4d3dc | 8e0f89612fb278c07cd871ea29685917caec4e0d | /second_trik_bubble_short.py | a3f219a8bac5489cd00249ab9006136a0bb42ad7 | [] | no_license | amritat123/list_Questions | 169836e87863f1d97776a9f9da0dd14e0512ac8a | d98982fd16a2165d7fd4bea8c89014897a1a0f72 | refs/heads/main | 2023-06-20T06:35:41.533536 | 2021-07-10T07:52:04 | 2021-07-10T07:52:04 | 369,066,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | num=[10,80,50,40,30,20]
l=len(num)
i=0
while i<l:
j=0
while j<i:
if num[i]>num[j]:
pass
j=j+1
i+=1
print(num[-j]) this is buble sort")) | [
"you@example.com"
] | you@example.com |
3671cea80cef5e56d1c8b1e9baab67b8c3992441 | a6803cfd1fad3e8ae7c5f6ee8a34302516f4f3ec | /640_extent/resnet_extent640_4layer_dropout25.py | 611f2d6ffd305f1d41ea5b6d72839634ffaf81d1 | [] | no_license | LasseGoransson/bachelor-code | 906ae4bbd22f6bb73b4727b62268115f186d922a | 016af73719792252bae58b11a2a43a674109abd7 | refs/heads/master | 2022-08-24T11:59:56.017903 | 2020-05-26T08:22:28 | 2020-05-26T08:22:28 | 238,145,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,002 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import neptune
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.models import Sequential
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D,GlobalAveragePooling2D, Concatenate, Reshape,GlobalMaxPooling2D, Activation, Input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import numpy as np
import pandas
import os
import pathlib
import datetime
import math
import sys
# GPU setup
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
# Config loading
train_path = "../../bachelor-data/allTrain.csv"
validate_path ="../../bachelor-data/allTest.csv"
image_dir = "../../bachelor-data/data_640x1030_extentW/"
checkpointpath = "../../bachelor-data/checkpoints/"
modelName = sys.argv[0]
learning_rate = 0.001
image_height =1030
image_width = 640
batch_size = 4
numEpochs = 200
conf= {
"train_path": train_path,
"validate_path": validate_path,
"image_dir": image_dir,
"modelName": modelName,
"learning_rate": learning_rate,
"image_height": image_height,
"image_width": image_width,
"batch_size": batch_size,
"numEpochs": numEpochs,
"aspectImages": "true"
}
# select project
neptune.init('lassegoransson/xrayPredictor')
# Data generators
train_df = pandas.read_csv(train_path)
validate_df = pandas.read_csv(validate_path)
train_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
vertical_flip=True
)
val_datagen = ImageDataGenerator(
rescale=1./255,
)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory=image_dir,
x_col="filename",
y_col='label',
target_size=(image_height, image_width),
batch_size=batch_size,
shuffle=True,
class_mode="raw",
color_mode="rgb"
)
val_generator = val_datagen.flow_from_dataframe(
dataframe=validate_df,
directory=image_dir,
x_col="filename",
y_col='label',
target_size=(image_height, image_width),
batch_size=batch_size,
shuffle=True,
class_mode="raw",
color_mode="rgb"
)
# Model
RESNET = keras.applications.resnet.ResNet50(include_top=False, weights='imagenet', input_shape=(image_height,image_width,3), pooling="avg")
model = tf.keras.Sequential()
#for layer in RESNET.layers:
# model.add(layer)
#for l in model.layers:
# l.trainable=False
# Projection
#model.add(Conv2D(3,(1,1),input_shape=(image_height,image_width,1),padding="same"))
model.add(RESNET)
#model.layers[1].trainable=True
model.add(Dropout(0.25))
model.add(Dense(512,Activation("relu")))
model.add(Dropout(0.25))
model.add(Dense(256,Activation("relu")))
model.add(Dropout(0.25))
model.add(Dense(124,Activation("relu")))
model.add(Dropout(0.25))
model.add(Dense(64,Activation("relu")))
model.add(Dense(1))
optimize = keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(optimizer=optimize,
loss='MSE',
metrics=['mse']
)
class NeptuneMonitor(Callback):
def on_epoch_end(self, epoch, logs={}):
neptune.send_metric('val_loss', epoch, logs['val_loss'])
neptune.send_metric('loss', epoch, logs['loss'])
neptune.send_metric('learning_rate', epoch, float(tf.keras.backend.get_value(self.model.optimizer.lr)))
filepath=str(checkpointpath)+"model_"+str(modelName)+"_checkpoint-"+str(image_height)+"x"+str(image_width)+"-{epoch:03d}-{val_loss:.16f}.hdf5"
RLR = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, verbose=1, mode='min', min_delta=0.0001, cooldown=0)
checkpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='min')
earlyStop = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', patience=10, restore_best_weights=True,verbose=1)
with neptune.create_experiment(name=modelName, params=conf) as npexp:
neptune_monitor = NeptuneMonitor()
callbacks_list = [checkpoint, neptune_monitor, RLR, earlyStop]
model.summary()
history = model.fit(train_generator,validation_data=val_generator,verbose=1 , epochs=numEpochs, steps_per_epoch=train_generator.n/train_generator.batch_size , callbacks=callbacks_list)
import glob
list_of_files = glob.glob(checkpointpath+"*") # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
modelfileName = latest_file
npexp.send_artifact(modelfileName)
tmp = modelfileName.split('-')[4].split('.')
val = float(tmp[0]+"."+tmp[1])
neptune.send_metric('val_loss', val)
| [
"you@example.com"
] | you@example.com |
fd161207e0b369b7d7a6b1809a4cab6f575b57a7 | 35dbd536a17d7127a1dd1c70a2903ea0a94a84c2 | /src/sudo/settings.py | 82c6025fc5de974d3bcce10c3ee45c17c23f942a | [
"Apache-2.0",
"BUSL-1.1",
"BSD-3-Clause"
] | permissive | nagyist/sentry | efb3ef642bd0431990ca08c8296217dabf86a3bf | d9dd4f382f96b5c4576b64cbf015db651556c18b | refs/heads/master | 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 | BSD-3-Clause | 2022-12-16T19:13:54 | 2015-12-17T09:42:42 | Python | UTF-8 | Python | false | false | 1,881 | py | """
sudo.settings
~~~~~~~~~~~~~
:copyright: (c) 2020 by Matt Robenolt.
:license: BSD, see LICENSE for more details.
"""
from django.conf import settings
# Default url to be redirected to after elevating permissions
REDIRECT_URL = getattr(settings, "SUDO_REDIRECT_URL", "/")
# The querystring argument to be used for redirection
REDIRECT_FIELD_NAME = getattr(settings, "SUDO_REDIRECT_FIELD_NAME", "next")
# How long should sudo mode be active for? Duration in seconds.
COOKIE_AGE = getattr(settings, "SUDO_COOKIE_AGE", 10800)
# The domain to bind the sudo cookie to. Default to the current domain.
COOKIE_DOMAIN = getattr(settings, "SUDO_COOKIE_DOMAIN", settings.SESSION_COOKIE_DOMAIN)
# Should the cookie only be accessible via http requests?
# Note: If this is set to False, any JavaScript files have the ability to access
# this cookie, so this should only be changed if you have a good reason to do so.
COOKIE_HTTPONLY = getattr(settings, "SUDO_COOKIE_HTTPONLY", True)
# The name of the cookie to be used for sudo mode.
COOKIE_NAME = getattr(settings, "SUDO_COOKIE_NAME", "sudo")
# Restrict the sudo cookie to a specific path.
COOKIE_PATH = getattr(settings, "SUDO_COOKIE_PATH", "/")
# Only transmit the sudo cookie over https if True.
# By default, this will match the current protocol. If your site is
# https already, this will be True.
COOKIE_SECURE = getattr(settings, "SUDO_COOKIE_SECURE", None)
# An extra salt to be added into the cookie signature
COOKIE_SALT = getattr(settings, "SUDO_COOKIE_SALT", "")
# The name of the session attribute used to preserve the redirect destination
# between the original page request and successful sudo login.
REDIRECT_TO_FIELD_NAME = getattr(settings, "SUDO_REDIRECT_TO_FIELD_NAME", "sudo_redirect_to")
# The url for the sudo page itself. May be a url or a view name
URL = getattr(settings, "SUDO_URL", "sudo.views.sudo")
| [
"noreply@github.com"
] | nagyist.noreply@github.com |
65b1050ec71da8aaaec7459c66e049e59bb12264 | b007d88e6726452ffa8fe80300614f311ae5b318 | /LeetCode/monthly_challenges/2021/march/27_palindrome_substring_count.py | 8d68e0b1d81402b18b72f054addcaa1466345fa8 | [] | no_license | jinurajan/Datastructures | ec332b12b8395f42cb769e771da3642f25ba7e7f | 647fea5d2c8122468a1c018c6829b1c08717d86a | refs/heads/master | 2023-07-06T14:42:55.168795 | 2023-07-04T13:23:22 | 2023-07-04T13:23:22 | 76,943,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | """
Palindromic Substrings
Given a string, your task is to count how many palindromic substrings in this string.
The substrings with different start indexes or end indexes are counted as different substrings even they consist of same characters.
Example 1:
Input: "abc"
Output: 3
Explanation: Three palindromic strings: "a", "b", "c".
Example 2:
Input: "aaa"
Output: 6
Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa".
"""
class Solution:
def countSubstrings(self, s: str) -> int:
def is_palindrome(s):
return s == s[::-1]
count = 0
n = len(s)
for l in range(n):
for r in range(l, n):
count += is_palindrome(s[l:r + 1])
return count
print(Solution().countSubstrings("abcd"))
| [
"jinu.p.r@gmail.com"
] | jinu.p.r@gmail.com |
ac8191849d0491fd40e042225c0b9272cab1af55 | 1af5bbc95a39c505897be519841e02f4ebb0e4f9 | /jtyoui/neuralNetwork/paddle/ernie/run_msra.py | f0b5e8399e4e33de8a7b94c7348cda4d62c0c986 | [
"MIT"
] | permissive | BarryZM/Jtyoui | 037868f7211ee07ddbd4c9c5c3382e290c67fd25 | 08609671237bd5d83d98e1fa796d32ddfc92c274 | refs/heads/master | 2020-09-12T12:10:24.577574 | 2019-11-16T13:52:18 | 2019-11-16T13:52:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,661 | py | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/4/11 11:44
# @Author: Jtyoui@qq.com
from jtyoui.neuralNetwork.paddle.ernie.transformer_encoder import encoder, pre_process_layer
from jtyoui.neuralNetwork.paddle.ernie.vocab import vocal
import os
import numpy as np
from paddle import fluid
ERNIE_MODEL_PARAMETER = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "relu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"max_position_embeddings": 513,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 18000
}
ERNIE_LABEL_MAP = {
"B-PER": 0, # 人名
"I-PER": 1,
"B-ORG": 2, # 机构名
"I-ORG": 3,
"B-LOC": 4, # 地名
"I-LOC": 5,
"O": 6
}
# 需要自己更改
model_path, config, label_map_config = None, ERNIE_MODEL_PARAMETER, ERNIE_LABEL_MAP
def pad_batch_data(inst, pad_idx=0, input_mask=False):
return_list = []
max_len = max(len(inst) for inst in inst)
inst_data = np.array([inst + list([pad_idx] * (max_len - len(inst))) for inst in inst])
return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
if input_mask:
input_mask_data = np.array([[1] * len(inst) + [0] * (max_len - len(inst)) for inst in inst])
input_mask_data = np.expand_dims(input_mask_data, axis=-1)
return_list += [input_mask_data.astype("float32")]
return return_list if len(return_list) > 1 else return_list[0]
def prepare_batch_data(example):
words = [1] + [vocal[word] for word in example if word in vocal] + [2]
padded_token_ids, input_mask = pad_batch_data([words], 0, True)
padded_text_type_ids = pad_batch_data([[0] * len(words)])
padded_position_ids = pad_batch_data([list(range(len(words)))])
padded_label_ids = pad_batch_data([[8] * len(words)], len(label_map_config) - 1)
return_list = [padded_token_ids, padded_text_type_ids, padded_position_ids, input_mask, padded_label_ids]
yield return_list
def data_generator(input_str):
def wrapper():
for batch_data in prepare_batch_data(input_str):
yield batch_data
return wrapper
def init_checkpoint(exe, init_checkpoint_path, main_program):
def existed(var):
if not fluid.io.is_persistable(var):
return False
return os.path.exists(os.path.join(init_checkpoint_path, var.name))
fluid.io.load_vars(exe, init_checkpoint_path, main_program=main_program, predicate=existed)
def evaluate(exe, program, reader, graph_vars):
fetch_list = [graph_vars["labels"].name, graph_vars["infers"].name]
total_number = None
while True:
reader.start()
try:
_, np_infers = exe.run(program=program, fetch_list=fetch_list)
total_number = [ls[0] for ls in np_infers[1:-1]]
except Exception as e:
print(e)
reader.reset()
break
return total_number
def create_model():
reader = fluid.layers.py_reader(capacity=50, shapes=[[-1, 256, 1]] * 5, lod_levels=[0] * 5, use_double_buffer=True,
dtypes=['int64'] * 3 + ['float32', 'int64'])
src_ids, sent_ids, pos_ids, input_mask, labels = fluid.layers.read_file(reader)
self_attn_mask = fluid.layers.matmul(x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(x=[self_attn_mask] * config['num_attention_heads'], axis=1)
n_head_self_attn_mask.stop_gradient = True
param_initializer = fluid.initializer.TruncatedNormal(config['initializer_range'])
emb_out = fluid.layers.embedding(
input=src_ids,
size=[config['vocab_size'], config['hidden_size']],
dtype="float32",
param_attr=fluid.ParamAttr(name="word_embedding", initializer=param_initializer), is_sparse=False)
position_emb_out = fluid.layers.embedding(
input=pos_ids,
size=[config['max_position_embeddings'], config['hidden_size']],
dtype="float32",
param_attr=fluid.ParamAttr(name="pos_embedding", initializer=param_initializer))
sent_emb_out = fluid.layers.embedding(
sent_ids,
size=[config['type_vocab_size'], config['hidden_size']],
dtype="float32",
param_attr=fluid.ParamAttr(name="sent_embedding", initializer=param_initializer))
emb_out += position_emb_out + sent_emb_out
emb_out = pre_process_layer(emb_out, 'nd', config['hidden_dropout_prob'], name='pre_encoder')
enc_out = encoder(
n_layer=config['num_hidden_layers'],
enc_input=emb_out,
attn_bias=n_head_self_attn_mask,
n_head=config['num_attention_heads'],
d_key=config['hidden_size'] // config['num_attention_heads'],
d_value=config['hidden_size'] // config['num_attention_heads'],
d_model=config['hidden_size'],
d_inner_hid=config['hidden_size'] * 4,
prepostprocess_dropout=config['hidden_dropout_prob'],
attention_dropout=config['attention_probs_dropout_prob'],
relu_dropout=0,
hidden_act=config['hidden_act'],
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=param_initializer,
name='encoder')
log = fluid.layers.fc(input=enc_out, size=len(label_map_config), num_flatten_dims=2,
param_attr=fluid.ParamAttr(name="cls_seq_label_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(name="cls_seq_label_out_b",
initializer=fluid.initializer.Constant(0.)))
ret_labels = fluid.layers.reshape(x=labels, shape=[-1, 1])
ret_infers = fluid.layers.reshape(x=fluid.layers.argmax(log, axis=2), shape=[-1, 1])
graph_vars = {"labels": ret_labels, "infers": ret_infers}
for v in graph_vars.values():
v.persistable = True
return reader, graph_vars
def match(words, init_st: list):
"""抽取实体函数
:param words:需要抽取的文字
:param init_st:初始化参数。st()
:return:数字列表,这些数字是在label_map_config中配置的
"""
init_st[2].decorate_tensor_provider(data_generator(words))
number = evaluate(*init_st)
return number
def st(new_model_path=None, new_config=None, new_label_map_config=None) -> list:
"""初始化模型,只需要加载一次即可
:param new_model_path: 模型路径
:param new_config: 模型配置参数
:param new_label_map_config: 模型实体映射
"""
global model_path, config, label_map_config
if new_model_path:
model_path = new_model_path
if new_config:
config = new_config
if new_label_map_config:
label_map_config = new_label_map_config
exe = fluid.Executor(fluid.CPUPlace())
startup_program = fluid.Program()
test_program = fluid.Program()
with fluid.program_guard(test_program, startup_program):
with fluid.unique_name.guard():
test_reader, graph_vars = create_model()
test_program = test_program.clone(for_test=True)
exe.run(startup_program)
init_checkpoint(exe, model_path, main_program=startup_program)
return [exe, test_program, test_reader, graph_vars]
if __name__ == '__main__':
# 默认的模型参数和映射表
ERNIE_MODEL_PATH = 'D://model'
s = st(ERNIE_MODEL_PATH)
print(match('我叫刘万光我是贵阳市南明村永乐乡水塘村的村民', s))
| [
"jtyoui@qq.com"
] | jtyoui@qq.com |
d26161caf55999dd250bc6d91896d5ba2425d3d6 | 09912a852e0e20d6a475ef904724f80072a68359 | /eds/FrontEnd/server/openmtc-ngsi/lib/futile/string/__init__.py | bb3f869e1ce8737c0fb47dcff6b960676b61f2db | [
"Apache-2.0"
] | permissive | elastest/elastest-device-emulator-service | 034aa19438383df0975bf86d49e231342d63002f | f512355c5fde6bf027d23558e256b96e2296e0f2 | refs/heads/master | 2021-03-09T15:13:30.676138 | 2020-01-13T12:02:02 | 2020-01-13T12:02:02 | 91,440,225 | 3 | 9 | Apache-2.0 | 2018-12-03T14:59:27 | 2017-05-16T09:26:10 | Python | UTF-8 | Python | false | false | 414 | py | import string
letters_digits_underscore = string.letters + string.digits + "_"
class InvalidIdentifier(ValueError):
pass
def is_identifier(s):
if not s or s[0] not in string.letters:
return False
for c in s:
if c not in letters_digits_underscore:
return False
return True
def check_identifier(s):
if not is_identifier(s):
raise InvalidIdentifier(s)
| [
"sro"
] | sro |
d500b52694b7a8b3507cac32ae78b4c9b63e125c | 21e7753732296bfdfb6dd9a9b58c7c6b8d90a1e5 | /ArraysAndStrings/zSearch/zSearch_test.py | 8e6103cc3afa695d7c5b2498d27d8054250359c9 | [] | no_license | rongfeng-china/python-algorithms-and-data-structures | eb8514b44d7ff97dd7c4deda2d8ea888a5aa8d04 | a69241bb7b684bc7d00acdd46c2fc214f7b61887 | refs/heads/master | 2020-03-13T09:08:13.375870 | 2015-12-11T07:37:30 | 2015-12-11T07:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from zSearch import search
import pytest
def test_zSearch():
string = 'baabaa'
pattern = 'aab'
assert(search(string, pattern) == 1)
| [
"prathamt@outlook.com"
] | prathamt@outlook.com |
1ca9d2844fd7f25bef3d7f66f59a24fc318cab89 | 8da9d3c3e769ead17f5ad4a4cba6fb3e84a9e340 | /src/chila/codexGtk/base/app/connectors/gen/ConnectionPerformer.py | a1ae9d34245a5cb3b3994f97850b732c8cbcaa19 | [] | no_license | blockspacer/chila | 6884a540fafa73db37f2bf0117410c33044adbcf | b95290725b54696f7cefc1c430582f90542b1dec | refs/heads/master | 2021-06-05T10:22:53.536352 | 2016-08-24T15:07:49 | 2016-08-24T15:07:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | #!/usr/bin/env python
connInstanceSeq = [ ['Application', 'base.application'],
['MainWindow', 'base.mainWindow'],
['FileChooser', 'base.fileChooser.open'],
['FileChooser', 'base.fileChooser.saveAs'],
['ActionExecuter', 'base.actionExecuter'],
['CmdNetServer', 'base.appCommand.netServer'],
['FunDynExecuter', 'base.appCommand.funDynExecuter'],
['FunDynXMLParser', 'base.appCommand.funDynXMLParser'],
['CmdNetTextMsgCreator', 'base.appCommand.msgCreator'] ]
| [
"chilabot@gmail.com"
] | chilabot@gmail.com |
1d03ea638ca2af8b958398f4ea023c74b7a6fa67 | dfb6a80dda5882a1c2be87b0b6e1e7a87a7b4c20 | /test/test_task_status.py | 5a10bfb8adbc5c54b1f995ceca7541743c5b7690 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | artikcloud/artikcloud-python | a090201bea9fadbdf5dd57d94d9085b03b34f927 | c5489b2fca27fd9a8bcea99f309e02cb690dd349 | refs/heads/master | 2020-12-26T03:33:00.657575 | 2017-12-28T20:40:05 | 2017-12-28T20:40:05 | 55,102,598 | 13 | 11 | null | 2017-03-18T03:22:58 | 2016-03-30T22:38:07 | Python | UTF-8 | Python | false | false | 793 | py | # coding: utf-8
"""
ARTIK Cloud API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import artikcloud
from artikcloud.rest import ApiException
from artikcloud.models.task_status import TaskStatus
class TestTaskStatus(unittest.TestCase):
""" TaskStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testTaskStatus(self):
"""
Test TaskStatus
"""
model = artikcloud.models.task_status.TaskStatus()
if __name__ == '__main__':
unittest.main()
| [
"jswattonjue@gmail.com"
] | jswattonjue@gmail.com |
548e856a0fa3281825d8b21721b32cb264401c73 | af3ff734866bca30f1821976649d197457f6f66a | /corpus_builder/spiders/newspaper/bhorerkagoj.py | 6f54a1479df963016770b2dd2f87afbaa691e9b6 | [
"MIT"
] | permissive | anwar03/corpus-builder | e69b26e3985ffdc27970117825069b9df2e4387e | 2c3e0d39a0a3dabacda0335d18779a0647bad118 | refs/heads/master | 2022-12-08T21:31:16.051904 | 2020-09-01T08:35:14 | 2020-09-01T08:35:14 | 280,804,773 | 0 | 0 | null | 2020-07-19T06:15:28 | 2020-07-19T06:15:27 | null | UTF-8 | Python | false | false | 1,897 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from corpus_builder.items import TextEntry
from corpus_builder.templates.spider import CommonSpider
class BhorerkagojSpider(CommonSpider):
name = 'bhorerkagoj'
allowed_domains = ['bhorerkagoj.net']
base_url = 'http://www.bhorerkagoj.net' + '/online'
start_request_url = base_url
content_body = {
'css': 'div.entry p::text'
}
allowed_configurations = [
['start_page'],
['start_page', 'end_page'],
['category', 'start_page'],
['category', 'start_page', 'end_page']
]
rules = (
Rule(LinkExtractor(
allow='\/\d{4}\/\d{2}\/\d{2}\/\d+\.php$'
),
callback='parse_content'),
)
def request_index(self, response):
categories = []
if not self.category:
categories = list(set(response.css('#navcatlist a::attr("href")').re('(?<=category/).*')))
else:
categories = response.css('#navcatlist a::attr("href")').re('category/{0}'.format(self.category))
if not categories:
categories = list(set(response.css('#navcatlist a::attr("href")').re('(?<=category/).*')))
raise ValueError('invalid category slug. available slugs: \'%s\'' % "', '".join(categories))
for category in categories:
for page in range(self.start_page, self.end_page + 1):
yield scrapy.Request(self.base_url + '/category/' + category + '/page/{0}'.format(str(page)),
callback=self.start_news_requests)
def start_news_requests(self, response):
news_links = list(set(response.css('.news-box h3 a::attr("href")').extract()))
for link in news_links:
yield self.make_requests_from_url(link)
| [
"aniruddha@adhikary.net"
] | aniruddha@adhikary.net |
35715b56113b406fe9d97f7c6d4013cdd75747a5 | 6c2dbc8d4e536220fb3b1cc72aa8104aea8b0698 | /aiogram/methods/copy_message.py | b707580e2d3e903bc6c6470534da52b4c04b5823 | [
"MIT"
] | permissive | aiogram/aiogram | f8f98a0beb63bd4d93ea810638d5792569bf354b | 04bd0c9e7c5421c060183b90d515050f41377bc1 | refs/heads/dev-3.x | 2023-08-30T21:20:13.018174 | 2023-08-28T23:01:54 | 2023-08-28T23:01:54 | 111,210,856 | 4,287 | 1,250 | MIT | 2023-09-10T21:34:03 | 2017-11-18T14:11:13 | Python | UTF-8 | Python | false | false | 5,003 | py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, Union
from ..types import (
UNSET_PARSE_MODE,
ForceReply,
InlineKeyboardMarkup,
MessageEntity,
MessageId,
ReplyKeyboardMarkup,
ReplyKeyboardRemove,
)
from ..types.base import UNSET_PROTECT_CONTENT
from .base import TelegramMethod
class CopyMessage(TelegramMethod[MessageId]):
"""
Use this method to copy messages of any kind. Service messages and invoice messages can't be copied. A quiz :class:`aiogram.methods.poll.Poll` can be copied only if the value of the field *correct_option_id* is known to the bot. The method is analogous to the method :class:`aiogram.methods.forward_message.ForwardMessage`, but the copied message doesn't have a link to the original message. Returns the :class:`aiogram.types.message_id.MessageId` of the sent message on success.
Source: https://core.telegram.org/bots/api#copymessage
"""
__returning__ = MessageId
__api_method__ = "copyMessage"
chat_id: Union[int, str]
"""Unique identifier for the target chat or username of the target channel (in the format :code:`@channelusername`)"""
from_chat_id: Union[int, str]
"""Unique identifier for the chat where the original message was sent (or channel username in the format :code:`@channelusername`)"""
message_id: int
"""Message identifier in the chat specified in *from_chat_id*"""
message_thread_id: Optional[int] = None
"""Unique identifier for the target message thread (topic) of the forum; for forum supergroups only"""
caption: Optional[str] = None
"""New caption for media, 0-1024 characters after entities parsing. If not specified, the original caption is kept"""
parse_mode: Optional[str] = UNSET_PARSE_MODE
"""Mode for parsing entities in the new caption. See `formatting options <https://core.telegram.org/bots/api#formatting-options>`_ for more details."""
caption_entities: Optional[List[MessageEntity]] = None
"""A JSON-serialized list of special entities that appear in the new caption, which can be specified instead of *parse_mode*"""
disable_notification: Optional[bool] = None
"""Sends the message `silently <https://telegram.org/blog/channels-2-0#silent-messages>`_. Users will receive a notification with no sound."""
protect_content: Optional[bool] = UNSET_PROTECT_CONTENT
"""Protects the contents of the sent message from forwarding and saving"""
reply_to_message_id: Optional[int] = None
"""If the message is a reply, ID of the original message"""
allow_sending_without_reply: Optional[bool] = None
"""Pass :code:`True` if the message should be sent even if the specified replied-to message is not found"""
reply_markup: Optional[
Union[InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply]
] = None
"""Additional interface options. A JSON-serialized object for an `inline keyboard <https://core.telegram.org/bots/features#inline-keyboards>`_, `custom reply keyboard <https://core.telegram.org/bots/features#keyboards>`_, instructions to remove reply keyboard or to force a reply from the user."""
if TYPE_CHECKING:
# DO NOT EDIT MANUALLY!!!
# This section was auto-generated via `butcher`
def __init__(
__pydantic__self__,
*,
chat_id: Union[int, str],
from_chat_id: Union[int, str],
message_id: int,
message_thread_id: Optional[int] = None,
caption: Optional[str] = None,
parse_mode: Optional[str] = UNSET_PARSE_MODE,
caption_entities: Optional[List[MessageEntity]] = None,
disable_notification: Optional[bool] = None,
protect_content: Optional[bool] = UNSET_PROTECT_CONTENT,
reply_to_message_id: Optional[int] = None,
allow_sending_without_reply: Optional[bool] = None,
reply_markup: Optional[
Union[InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply]
] = None,
**__pydantic_kwargs: Any,
) -> None:
# DO NOT EDIT MANUALLY!!!
# This method was auto-generated via `butcher`
# Is needed only for type checking and IDE support without any additional plugins
super().__init__(
chat_id=chat_id,
from_chat_id=from_chat_id,
message_id=message_id,
message_thread_id=message_thread_id,
caption=caption,
parse_mode=parse_mode,
caption_entities=caption_entities,
disable_notification=disable_notification,
protect_content=protect_content,
reply_to_message_id=reply_to_message_id,
allow_sending_without_reply=allow_sending_without_reply,
reply_markup=reply_markup,
**__pydantic_kwargs,
)
| [
"noreply@github.com"
] | aiogram.noreply@github.com |
34f4866b56d43fe79f5e293b9d8e3990e176d8a2 | c3015c988450974e1ab5ea71eac4baf921d9bde7 | /Day_10/2_combination_sum_II.py | fba8d99e8b244e5f2dae28c6bd98d51c8253cc1b | [] | no_license | anoubhav/30-Day-SDE-Challenge | ce758c450e32d0c353e7ba00aaaf37771fe69d32 | 175edd6aba8a8b45482f485dddfafa43c267246c | refs/heads/master | 2023-01-20T09:54:26.685272 | 2020-11-23T18:39:40 | 2020-11-23T18:39:40 | 286,107,830 | 3 | 2 | null | 2020-10-01T16:51:49 | 2020-08-08T19:35:50 | Python | UTF-8 | Python | false | false | 1,301 | py | # Q: https://leetcode.com/problems/combination-sum-ii/
def combSumBackTrackDFS(candidates, target):
# Time complexity: O(2 ^ N), where N is length of candidate. Each element in candidate can be included or not.
ans = []
candidates.sort() # as duplicates are allowed in candidates
n = len(candidates)
def recurse(tot, comb, ind, n):
if tot == 0:
ans.append(comb.copy())
else:
i = ind
while i < n:
c = candidates[i]
if tot - c >= 0:
# each number only used once; Hence, i + 1
recurse(tot - c, comb + [c], i + 1, n)
# ensure the next no. added to the combination is not same as current, as all possibilities starting from current have been explored. Below loop is only possible by sorting.
i += 1
while i < n and candidates[i] == c:
i += 1
else:
# sorted candidates
break
recurse(target, [], 0, n)
return ans
candidates = [10,1,2,7,6,1,5]
target = 8
# candidates = [2,5,2,1,2]
# target = 5
print(combSumBackTrackDFS(candidates, target))
1, 2, 2, 2, 5 | [
"anoubhav.agarwaal@gmail.com"
] | anoubhav.agarwaal@gmail.com |
13b0b204500bf8394736ff1df09efa7565a20da4 | af55d33a21cda3e3b9fe10224105eef9f97ad8ec | /saseumn/util.py | 864b43487ac5e322bf4c6226fc1b59b7603e91b7 | [
"MIT"
] | permissive | saseumn/website | 7a06a4a6abafe7da0c5afb0fc4ba2226ab7ce0d7 | 34790dc7db0f7bbb1736761c5738e4d74066f518 | refs/heads/master | 2021-01-01T17:24:49.013633 | 2019-04-03T18:06:32 | 2019-04-03T18:06:32 | 98,065,352 | 3 | 1 | MIT | 2019-03-30T23:12:10 | 2017-07-23T00:26:26 | HTML | UTF-8 | Python | false | false | 2,246 | py | import logging
import random
import re
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from functools import wraps
import hashlib
from urllib.parse import urljoin, urlparse
from flask import abort, flash, redirect, request, url_for
from flask_login import current_user
from saseumn.config import Config
VALID_USERNAME = re.compile(r"^[A-Za-z_][A-Za-z\d_]*$")
# decorators
def admin_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not (current_user.is_authenticated and current_user.admin):
flash("You don't have permission to view this page.", "danger")
return redirect(url_for("base.index"))
return f(*args, **kwargs)
return wrapper
def random_string(length=32, alpha="012346789abcdef"):
""" Generates a random string of length length using characters from alpha. """
characters = [random.choice(alpha) for x in range(length)]
return "".join(characters)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc
def get_redirect_target():
for target in request.values.get("next"), request.referrer:
if not target:
continue
if is_safe_url(target):
return target
def redirect_back(endpoint, **values):
target = request.form.get("next", url_for("users.profile"))
if not target or not is_safe_url(target):
target = url_for(endpoint, **values)
return redirect(target)
def hash_file(file, algorithm=hashlib.sha256):
# file is a file-like object
contents = file.read()
return algorithm(contents).hexdigest()
def send_email(recipient, subject, body, from_addr="example@exmaple.org"):
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
credentials = Config.get_email_credentials()
if not credentials:
return
server.login(*credentials)
msg = MIMEMultipart()
msg["From"] = from_addr
msg["To"] = recipient
msg["Subject"] = subject
msg.attach(MIMEText(body, "plain"))
server.sendmail(from_addr, recipient, msg.as_string())
| [
"failed.down@gmail.com"
] | failed.down@gmail.com |
e50d06dfd3630d296a367aa92bf4288a9c9d0649 | 2a2e503746bd2118047d830a3c2b1910ea7a7b0a | /第七章/1.py | 5efb312df1717a83226bf2a913c2d4fd5afedf0a | [] | no_license | hc973591409/python-auto | dabd1bfae7a5f24b49e25bd855ef8456494aa2b8 | c4182d81693ea93d27965c261ad7dffce2cd180a | refs/heads/master | 2020-04-01T10:53:23.490608 | 2018-10-16T12:58:30 | 2018-10-16T12:58:30 | 153,136,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | ?匹配零次或一次前面的分组。可以关闭贪婪模式
*匹配零次或多次前面的分组。
+匹配一次或多次前面的分组。
{n}匹配 n 次前面的分组。
{n,}匹配 n 次或更多前面的分组。
{,m}匹配零次到 m 次前面的分组。
{n,m}匹配至少 n 次、至多 m 次前面的分组。
{n,m}?或*?或+?对前面的分组进行非贪心匹配。
^spam 意味着字符串必须以 spam 开始。
spam$意味着字符串必须以 spam 结束。
.匹配所有字符,换行符除外。
\d、 \w 和\s 分别匹配数字、单词和空格。
\D、 \W 和\S 分别匹配出数字、单词和空格外的所有字符。
[abc]匹配方括号内的任意字符(诸如 a、 b 或 c)。
[^abc]匹配不在方括号内的任意字符。
# 不区分大小写的匹配模式regex对象
robocop = re.compile(r'robocop', re.I) | [
"973591409@qq.com"
] | 973591409@qq.com |
2b8ae9922a31196bcf079263540885dd6b9a5baf | c440ae324c8d5487679b066b62e64176487b4f6a | /mysite/mysite/views.py | 19b9b86888396084e221b42691a1a5db89fabcd3 | [] | no_license | gangyou/python_execrise | d19eef8acf9e6565e56b27204184ca018d0e7712 | 32afdd9b45a0ecc6c966471bda0d3e03ac632aea | refs/heads/master | 2021-01-23T05:35:21.659681 | 2014-02-20T01:24:26 | 2014-02-20T01:24:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | from django.http import HttpResponse
from django.shortcuts import render_to_response
import datetime
import MySQLdb
def hello(request):
return HttpResponse("Hello World")
def current_datetime(request):
now = datetime.datetime.now()
return render_to_response('dateapp/current_datetime.html', {'current_date': now})
def hours_ahead(request, offset):
try:
offset = int(offset)
except ValueError:
raise Http404()
next_time = datetime.datetime.now() + datetime.timedelta(hours=offset)
return render_to_response('dateapp/hours_ahead.html', locals())
def display_meta(request):
values = request.META.items()
values.sort()
html = []
for k, v in values:
html.append('<tr><td>%s</td><td>%s</td></tr>' % (k,v))
return HttpResponse('<table>%s</table>' % '\n'.join(html))
def login(request):
if request.method != 'POST':
raise Http404('Only POSTs are allowed')
try:
m = Memeberr | [
"gangyou@gmail.com"
] | gangyou@gmail.com |
cef1ccebbe861a3c2822fb09619b360df1476b15 | 0af55951ccc6da45001bfdc80dad6a9607334849 | /pyvko/aspects/albums.py | d7b7e36da7beb53194a68a3763e99967e169a589 | [
"MIT"
] | permissive | djachenko/pyvko | a1e73095aaa88abc1786f3a1192b3b8ec9dcf85e | 4d2534bd392d073c9ade0ed7c51d021b1d8f6426 | refs/heads/master | 2022-11-14T05:46:30.038235 | 2022-10-08T15:05:31 | 2022-10-08T15:05:31 | 201,685,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | from abc import abstractmethod, ABC
from pathlib import Path
from typing import Dict, List
from vk import API
from pyvko.api_based import ApiMixin, ApiBased
from pyvko.attachment.attachment import Attachment
from pyvko.attachment.photo import Photo
from pyvko.shared.photos_uploader import AlbumPhotoUploader
from pyvko.shared.utils import get_all
class Album(ApiBased, Attachment):
def __init__(self, api: API, api_object: dict) -> None:
super().__init__(api)
self.__name = api_object["title"]
self.__id = api_object["id"]
self.__owner_id = api_object["owner_id"]
@property
def name(self) -> str:
return self.__name
@property
def id(self) -> int:
return self.__id
def get_photos(self) -> List[Photo]:
parameters = self.get_request()
photos_descriptions = get_all(parameters, self.api.photos.get)
photos = [Photo(photo_object) for photo_object in photos_descriptions]
return photos
def get_request(self, parameters: Dict = None) -> dict:
parameters = parameters.copy()
parameters.update({
"owner_id": self.__owner_id,
"album_id": self.__id
})
return super().get_request(parameters)
def set_cover(self, cover: Photo):
request = self.get_request({
"photo_id": cover.id
})
self.api.photos.makeCover(**request)
def add_photo(self, path: Path) -> Photo:
uploader = AlbumPhotoUploader(self.api, self.id, -self.__owner_id)
return uploader.upload(path)
# region Attachment
@property
def type(self) -> str:
return "album"
@property
def owner_id(self) -> int:
return self.__owner_id
@property
def media_id(self) -> int:
return self.id
# endregion Attachment
class Albums(ApiMixin, ABC):
@property
@abstractmethod
def id(self) -> int:
pass
def __get_albums(self, parameters: Dict = None) -> List[Album]:
request = self.__get_owned_request(parameters)
result = self.api.photos.getAlbums(**request)
albums = [Album(self.api, album_object) for album_object in result["items"]]
return albums
def get_all_albums(self) -> List[Album]:
return self.__get_albums()
def get_album_by_id(self, album_id: int) -> Album:
albums_list = self.__get_albums({
"album_ids": [album_id]
})
assert len(albums_list) == 1
return albums_list[0]
def create_album(self, name: str) -> Album:
parameters = {
"title": name,
"group_id": abs(self.id),
"upload_by_admins_only": 1
}
parameters = self.get_request(parameters)
response = self.api.photos.createAlbum(**parameters)
created_album = Album(self.api, response)
return created_album
def __get_owned_request(self, parameters: Dict = None) -> dict:
if parameters is None:
parameters = {}
else:
parameters = parameters.copy()
assert "owner_id" not in parameters
parameters.update({
"owner_id": self.id
})
return self.get_request(parameters)
| [
"i.s.djachenko@gmail.com"
] | i.s.djachenko@gmail.com |
81ae7d5d9cb2b61b86e720254a4097c66638524c | 72a58c62d62210e853ef09fdee65bf6ffb8972bd | /src/lib/telegram/utils/webhookhandler.py | 04fc127aaa8840233bc1901e805cff440e5c8e26 | [
"MIT"
] | permissive | thonkify/thonkify | 93ade2489f20fb80c5e8e27fe67b9b231ada62bd | 2cb4493d796746cb46c8519a100ef3ef128a761a | refs/heads/master | 2023-09-01T00:03:10.398583 | 2018-03-16T09:18:24 | 2018-03-16T09:18:24 | 99,354,595 | 17 | 3 | MIT | 2023-09-05T02:27:42 | 2017-08-04T15:10:50 | Python | UTF-8 | Python | false | false | 4,111 | py | import logging
from telegram import Update
from future.utils import bytes_to_native_str
from threading import Lock
try:
import ujson as json
except ImportError:
import json
try:
import BaseHTTPServer
except ImportError:
import http.server as BaseHTTPServer
logging.getLogger(__name__).addHandler(logging.NullHandler())
class _InvalidPost(Exception):
def __init__(self, http_code):
self.http_code = http_code
super(_InvalidPost, self).__init__()
class WebhookServer(BaseHTTPServer.HTTPServer, object):
def __init__(self, server_address, RequestHandlerClass, update_queue, webhook_path, bot):
super(WebhookServer, self).__init__(server_address, RequestHandlerClass)
self.logger = logging.getLogger(__name__)
self.update_queue = update_queue
self.webhook_path = webhook_path
self.bot = bot
self.is_running = False
self.server_lock = Lock()
self.shutdown_lock = Lock()
def serve_forever(self, poll_interval=0.5):
with self.server_lock:
self.is_running = True
self.logger.debug('Webhook Server started.')
super(WebhookServer, self).serve_forever(poll_interval)
self.logger.debug('Webhook Server stopped.')
def shutdown(self):
with self.shutdown_lock:
if not self.is_running:
self.logger.warn('Webhook Server already stopped.')
return
else:
super(WebhookServer, self).shutdown()
self.is_running = False
# WebhookHandler, process webhook calls
# Based on: https://github.com/eternnoir/pyTelegramBotAPI/blob/master/
# examples/webhook_examples/webhook_cpython_echo_bot.py
class WebhookHandler(BaseHTTPServer.BaseHTTPRequestHandler, object):
server_version = 'WebhookHandler/1.0'
def __init__(self, request, client_address, server):
self.logger = logging.getLogger(__name__)
super(WebhookHandler, self).__init__(request, client_address, server)
def do_HEAD(self):
self.send_response(200)
self.end_headers()
def do_GET(self):
self.send_response(200)
self.end_headers()
def do_POST(self):
self.logger.debug('Webhook triggered')
try:
self._validate_post()
clen = self._get_content_len()
except _InvalidPost as e:
self.send_error(e.http_code)
self.end_headers()
else:
buf = self.rfile.read(clen)
json_string = bytes_to_native_str(buf)
self.send_response(200)
self.end_headers()
self.logger.debug('Webhook received data: ' + json_string)
update = Update.de_json(json.loads(json_string), self.server.bot)
self.logger.debug('Received Update with ID %d on Webhook' % update.update_id)
self.server.update_queue.put(update)
def _validate_post(self):
if not (self.path == self.server.webhook_path and 'content-type' in self.headers and
self.headers['content-type'] == 'application/json'):
raise _InvalidPost(403)
def _get_content_len(self):
clen = self.headers.get('content-length')
if clen is None:
raise _InvalidPost(411)
try:
clen = int(clen)
except ValueError:
raise _InvalidPost(403)
if clen < 0:
raise _InvalidPost(403)
return clen
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions.
It overrides ``BaseHTTPRequestHandler.log_message``, which logs to ``sys.stderr``.
The first argument, FORMAT, is a format string for the message to be logged. If the format
string contains any % escapes requiring parameters, they should be specified as subsequent
arguments (it's just like printf!).
The client ip is prefixed to every message.
"""
self.logger.debug("%s - - %s" % (self.address_string(), format % args))
| [
"david.dellsperger@gmail.com"
] | david.dellsperger@gmail.com |
ba0dd254252f4869fbf6ee211fed3184068abb40 | 78ef0d7736075ee33ac4230f47c078bbf2b0e014 | /news/tests/factories.py | 260f2c6c40c3bea038e82e97e4b5815de433bef0 | [
"Apache-2.0"
] | permissive | PyAr/pyarweb | e22e9350bf107329e5a79c2368fb182958a134d2 | 5f88d1ea0cea9bd67547b70dc2c8bbaa3b8b9d03 | refs/heads/master | 2023-08-31T10:24:53.220031 | 2023-08-29T16:21:57 | 2023-08-29T16:21:57 | 17,032,696 | 64 | 108 | Apache-2.0 | 2023-09-07T04:02:53 | 2014-02-20T19:28:31 | Python | UTF-8 | Python | false | false | 757 | py | from factory import SubFactory, Sequence, post_generation
from factory.django import DjangoModelFactory
from events.tests.factories import UserFactory
from news.models import NewsArticle
class NewsArticleFactory(DjangoModelFactory):
class Meta:
model = NewsArticle
owner = SubFactory(UserFactory)
title = Sequence(lambda n: 'news_title_%i' % n)
@post_generation
def set_created(obj, create, extracted, **kwargs):
"""
Update the creation time of the built instance. As it is an auto-generated field, we must
set its value after creation.
To use: NewsArticleFactory(set_created='1985-10-26 09:00Z')
"""
if extracted:
obj.created = extracted
obj.save()
| [
"noreply@github.com"
] | PyAr.noreply@github.com |
bdb367c22ca5a5d0cbfd7aeadad6fc5d05cd73de | 38d2ae7fd3ff660704bfeef51087454e6a52191e | /python/prob433/single/prob433.py | 21ef79f73b7c23492a384f457270093fff40d8f0 | [] | no_license | afraenkel/project-euler | 59e8083d4ab3931957e86231636c19ffbc7153d1 | 7db1869f77ca5e5c18f0537b814df2da8175b288 | refs/heads/master | 2020-04-15T17:30:06.586563 | 2016-08-29T22:38:02 | 2016-08-29T22:38:02 | 40,340,384 | 0 | 0 | null | 2015-11-01T09:10:56 | 2015-08-07T04:03:19 | C | UTF-8 | Python | false | false | 1,217 | py |
import itertools as it
# Let E(x0, y0) be the number of steps it takes to determine
# the greatest common divisor of x0 and y0 with Euclid's algorithm.
# Define S(N) as the sum of E(x,y) for 1 ≤ x,y ≤ N.
# We have S(1) = 1, S(10) = 221 and S(100) = 39826.
# Find S(5·10^6).
def E(a, b):
k = 0
while b:
a, b = b, a%b
k += 1
return k
N = 10
lens = (x for k in it.count(1) for x in it.repeat(k,2))
cols = it.count(2)
d = 0
for c,l in zip(cols, lens):
first_row = 2*c - l
if first_row > N:
break
for r in range(first_row, first_row + l):
if r > N:
break
f = (r-c)
incr = 0
while (r+incr) <= N:
d += E(r, c)*( (N-c-incr)//(c+incr) )
incr += f
d += (N - r + 1) // c
d += (N-1)
d *= 2
d += (N-1)*N//2
d += N
print(d)
# This starts getting slow at n=1000
# Use the fact that:
# (1) E(a,b) = E(b,a) (obvious)
# (2) E(a,b) = E(ka, kb) for all a,b,k (clear from euclid algo)
# above is not enough
# probably compute a bunch of gcd steps at each step using memoizing
def S(n):
d = 0
for x in range(1,n+1):
for y in range(1,n+1):
d += E(x,y)
return d
| [
"aaron.fraenkel@gmail.com"
] | aaron.fraenkel@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.