blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d65940db9f881edfd9ca3bda5bfa15a82458d624 | a5d418868bdc1d81719cd111f2787f2c5137518c | /Django/myshop/myshop/settings.py | 8f554185f4f2edeec5784b269018af5b5b4bd069 | [] | no_license | sohn0356-git/TIL | aa0e89b2d163a342fc99e93b0552b73bf1db9e4a | 9c7447326caed4d25044a0ab559744ffc469924c | refs/heads/master | 2023-07-15T23:33:19.650261 | 2021-08-24T11:04:12 | 2021-08-24T11:04:12 | 327,533,782 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,797 | py | """
Django settings for myshop project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3ne+(5)$-3skdjds^55sr9(7uaj&4kf9w$hic5#w$%oua4kzp^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shop.apps.ShopConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
UPLOAD_DIR = os.path.join(BASE_DIR,'static/img')
LOG_FILE = os.path.join(BASE_DIR, 'log/mylog.log')
LOGGING = {
'version': 1,
# 기존의 로깅 설정을 비활성화 할 것인가?
'disable_existing_loggers': False,
# 포맷터
# # 로그 레코드는 최종적으로 텍스트로 표현됨
# # 이 텍스트의 포맷 형식 정의
# # 여러 포맷 정의 가능
'formatters': {
'format1': {
'format': '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s',
'datefmt': '%d/%b/%Y %H:%M:%S'
},
'format2': {
'format': '%(levelname)s %(message)s'
},
},
# 핸들러
# 로그 레코드로 무슨 작업을 할 것인지 정의
# 여러 핸들러 정의 가능
'handlers': {
# 로그 파일을 만들어 텍스트로 로그레코드 저장
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': LOG_FILE,
'formatter': 'format1',
},
# 콘솔(터미널)에 출력
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'format2',
}
},
# 로거
# 로그 레코드 저장소
# 로거를 이름별로 정의
'loggers': {
'users': {
'handlers': ['file'],
'level': 'DEBUG',
},
'items': {
'handlers': ['console'],
'level': 'DEBUG',
}
},
} | [
"sohn0356@gmail.com"
] | sohn0356@gmail.com |
cb9b5dc4f0168d8f2df193838c989f3a0f42d2f5 | 376b0e12d2a6f89b04f88b55c778d33aca397225 | /schemas/node_types.py | 5f0c4e9aeb3df78cb05532e36e944eea9624ac5d | [] | no_license | junhoyeo/Partial-SNN | 4fd00d78e4d55bba28b96091754ee518a5d1450b | ba93c67617ced22cab156194a1feda2f17b83a40 | refs/heads/master | 2020-11-29T22:31:23.473774 | 2019-12-26T19:18:40 | 2019-12-26T19:18:40 | 230,231,025 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | class NodeTypes:
def __init__(self):
self.INPUT = 0
self.HIDDEN = 1
self.OUTPUT = 2
def get_repr(self, type_id: int):
return ['INPUT', 'HIDDEN', 'OUTPUT'][type_id]
node_types = NodeTypes()
| [
"32605822+JunhoYeo@users.noreply.github.com"
] | 32605822+JunhoYeo@users.noreply.github.com |
c0659028d4ace55fd4c78abd86052a13c0bc6824 | 149b97574e4cfd57d48295fa94b6dacf205a77f4 | /bin/csscombine | e6697d8b6f272c6f3ef025959d7f3cd4fe42f388 | [] | no_license | wraithan/zamboni-lib | e347b2ee1ab2f37e6a30cbcc522e26f2611f3691 | 2a5013afb39a7970d60f161d570dad92238c67bd | refs/heads/master | 2021-01-18T19:46:52.837975 | 2012-09-27T23:59:42 | 2012-09-27T23:59:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | #!/Users/jeff/.dotfiles/.virtualenvs/newz/bin/python2.6
# EASY-INSTALL-ENTRY-SCRIPT: 'cssutils==0.9.7b3','console_scripts','csscombine'
__requires__ = 'cssutils==0.9.7b3'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('cssutils==0.9.7b3', 'console_scripts', 'csscombine')()
)
| [
"jbalogh@mozilla.com"
] | jbalogh@mozilla.com | |
e32501e632de4af625fe75cc3246a166456f0446 | ab4491a86039d873fb3a3e2d7567f700fc920074 | /r_2_project/r_2_project/settings.py | 5ccc09ad296862851e68288a40ea89c19a19b08d | [] | no_license | cs-fullstack-2019-spring/django-review2-templates-cw-Joshtg1104 | d89c640a129a6c961d9158532ff8fdca61099bc4 | 606df2721967576ec8d5cc91c3630d6b4dd8c761 | refs/heads/master | 2020-04-25T12:15:34.416541 | 2019-03-01T23:39:07 | 2019-03-01T23:39:07 | 172,772,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | """
Django settings for r_2_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ih1@a1p%hd1(k(y^r7u1a*-=c^tsua+^5fiw@u_ka5e=z$=vf-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'r_2_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'r_2_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'r_2_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
| [
"joshtg1104@gmail.com"
] | joshtg1104@gmail.com |
5268fe9cb03c5cfbc5cfdb88100e2c8a97791bba | a28a3665af439ad3d9f401d180856b0489341ffd | /variable_poisson_test.py | 1d9a236ce78306a6a9a0229d82431066cd1fe08a | [] | no_license | Jegp/spike_conv_nets | c75f8bfc8f977ed94e4bc8d6d37cd02ac65b5961 | c11b469b6d7896d787c77dca789be26f3d3d98b4 | refs/heads/master | 2023-06-24T03:20:16.378273 | 2021-07-16T17:20:43 | 2021-07-16T17:20:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | import spynnaker8 as sim
import numpy as np
import matplotlib.pyplot as plt
sim.setup(timestep=1.)
np.random.seed(13)
n_neurons = 10
n_changes = 5
duration = 100.0 # ms
rate = 100.0 # hz
rates = np.zeros((n_neurons, n_changes))
n_per_change = n_neurons // n_changes
for i in range(n_changes):
s = i * n_per_change
e = s + n_per_change
rates[s:e, i] = rate
durations = np.ones((n_neurons, n_changes)) * duration
starts = np.repeat([np.arange(n_changes) * duration],
n_neurons, axis=0)
pop = sim.Population(n_neurons, # number of sources
sim.extra_models.SpikeSourcePoissonVariable,
# source type
{'rates': rates,
'starts': starts,
'durations': durations
}, # source spike times
label="MF",
additional_parameters={'seed': 24534}
)
pop.record('spikes')
sim_time = duration * n_changes * 1.1
sim.run(sim_time)
neo = pop.get_data()
spikes = neo.segments[0].spiketrains
sim.end()
# print(spikes)
plt.figure()
for i, spks in enumerate(spikes):
plt.plot(spks, i * np.ones_like(spks), '.b', markersize=1.)
print(rates)
plt.show()
| [
"chanokin@gmail.com"
] | chanokin@gmail.com |
4719103af69a4c3cf9138cad6914a95b7cbcab47 | 78be2d943509d2aac34e9de3cb85c6ed95706333 | /ras_backstage/decorators/jwt_decorators.py | b67727f53365ae8a863a49c50c260c62b113edc8 | [] | no_license | uk-gov-mirror/ONSdigital.ras-backstage | 70a1016bc57eba3302c6e53c99be159d7df7bf2d | a173984dddb466210df500046f4e7b11ec4bb679 | refs/heads/master | 2021-09-15T21:47:09.274789 | 2018-06-08T09:33:11 | 2018-06-08T09:33:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | from functools import wraps
import logging
from structlog import wrap_logger
from ras_backstage.exception.exceptions import NoJWTError
logger = wrap_logger(logging.getLogger(__name__))
def get_jwt(request):
def extract_session(original_function):
@wraps(original_function)
def extract_session_wrapper(*args, **kwargs):
encoded_jwt_token = request.headers.get('Authorization')
if encoded_jwt_token:
return original_function(encoded_jwt_token, *args, **kwargs)
else:
raise NoJWTError
return extract_session_wrapper
return extract_session
| [
"noreply@github.com"
] | uk-gov-mirror.noreply@github.com |
ce456148ab5569f96da64f5b2b27e3ae1eaa97e8 | 7c0136323c67478932784a46c6fe15483bef50a3 | /gwcs/tags/selectortags.py | e0c73e78a6b0ad3c49106db8d198880d161e1ad5 | [] | no_license | bsipocz/gwcs | e770aef150e722bfcd7eaec5bffc5fbe50f216af | bbc305e88174a11b3d197b82023a72ec99774ae3 | refs/heads/master | 2022-10-10T19:01:00.342140 | 2015-10-08T13:31:29 | 2015-10-08T13:31:29 | 45,719,996 | 0 | 0 | null | 2015-11-07T02:55:54 | 2015-11-07T02:55:54 | null | UTF-8 | Python | false | false | 5,120 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import numpy as np
from numpy.testing import assert_array_equal
from astropy.modeling import models
from astropy.utils import OrderedDict
from astropy.utils.misc import isiterable
from pyasdf import yamlutil
from pyasdf.tags.transform.basic import TransformType
from pyasdf.tags.core.ndarray import NDArrayType
from ..selector import *
__all__ = ['LabelMapperType', 'RegionsSelectorType']
class LabelMapperType(TransformType):
name = "transform/label_mapper"
types = [LabelMapperArray, LabelMapperDict, LabelMapperRange]
@classmethod
def from_tree_transform(cls, node, ctx):
inputs_mapping = node.get('inputs_mapping', None)
if inputs_mapping is not None and not isinstance(inputs_mapping, models.Mapping):
raise TypeError("inputs_mapping must be an instance"
"of astropy.modeling.models.Mapping.")
mapper = node['mapper']
if isinstance(mapper, NDArrayType):
if mapper.ndim != 2:
raise NotImplementedError(
"GWCS currently only supports 2x2 masks ")
return LabelMapperArray(mapper, inputs_mapping)
else:
inputs = node.get('inputs', None)
if inputs is not None:
inputs = tuple(inputs)
labels = mapper.get('labels')
if isiterable(labels[0]):
labels = [tuple(l) for l in labels]
transforms = mapper.get('models')
dict_mapper = dict(zip(labels, transforms))
return LabelMapperDict(inputs, dict_mapper, inputs_mapping)
@classmethod
def to_tree_transform(cls, model, ctx):
node = OrderedDict()
if isinstance(model, LabelMapperArray):
node['mapper'] = model.mapper
if isinstance(model, (LabelMapperDict, LabelMapperRange)):
mapper = OrderedDict()
labels = list(model.mapper)
transforms = []
for k in labels:
transforms.append(model.mapper[k])
if isiterable(labels[0]):
labels = [list(l) for l in labels]
mapper['labels'] = labels
mapper['models'] = transforms
node['mapper'] = mapper
node['inputs'] = list(model.inputs)
if model.inputs_mapping is not None:
node['inputs_mapping'] = model.inputs_mapping
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
assert (a.__class__ == b.__class__)
if isinstance(a.mapper, dict):
assert(a.mapper.__class__ == b.mapper.__class__)
assert(all(np.in1d(list(a.mapper), list(b.mapper))))
for k in a.mapper:
assert (a.mapper[k].__class__ == b.mapper[k].__class__)
assert(all(a.mapper[k].parameters == b.mapper[k].parameters))
assert (a.inputs == b.inputs)
assert (a.inputs_mapping.mapping == b.inputs_mapping.mapping)
else:
assert_array_equal(a.mapper, b.mapper)
class RegionsSelectorType(TransformType):
name = "transform/regions_selector"
types = [RegionsSelector]
@classmethod
def from_tree_transform(cls, node, ctx):
inputs = node['inputs']
outputs = node['outputs']
label_mapper = node['label_mapper']
undefined_transform_value = node['undefined_transform_value']
sel = node['selector']
sel = dict(zip(sel['labels'], sel['transforms']))
return RegionsSelector(inputs, outputs,
sel, label_mapper, undefined_transform_value)
@classmethod
def to_tree_transform(cls, model, ctx):
selector = OrderedDict()
node = OrderedDict()
labels = list(model.selector)
values = []
for l in labels:
values.append(model.selector[l])
selector['labels'] = labels
selector['transforms'] = values
node['inputs']= list(model.inputs)
node['outputs'] = list(model.outputs)
node['selector'] = selector
node['label_mapper'] = model.label_mapper
node['undefined_transform_value'] = model.undefined_transform_value
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
assert (a.__class__ == b.__class__)
LabelMapperType.assert_equal(a.label_mapper, b.label_mapper)
assert_array_equal(a.inputs, b.inputs)
assert_array_equal(a.outputs, b.outputs)
assert_array_equal(a.selector.keys(), b.selector.keys())
for key in a.selector:
assert_array_equal(a.selector[key].parameters, b.selector[key].parameters)
assert_array_equal(a.undefined_transform_value, b.undefined_transform_value)
| [
"nadia.dencheva@gmail.com"
] | nadia.dencheva@gmail.com |
7668efd69f8bf91a8ac6079a8bb02e84d2b62e28 | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Hebr.HEB/Sun-ExtA_16/pdf_to_json_test_Hebr.HEB_Sun-ExtA_16.py | cce0ed95727e1941254c68573ed061d48457f1e6 | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Hebr.HEB/Sun-ExtA_16/udhr_Hebr.HEB_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
db1921d3478d2e8cde389c8155bd8a9d47b41177 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_8/hrdtim002/question1.py | 807ac77ab8cac575e7d4a6c43d33adaec2f0a04a | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | """program to check for palindromes using recursion
Tim Hardie
9 May 2014"""
def check_palin (str_input):
if len (str_input) > 1:
if str_input[0] == str_input[-1]:
check_palin (str_input[1:-1])
else:
print ("Not a palindrome!")
elif len (str_input) == 1:
print ("Palindrome!")
if __name__ == '__main__':
str_input = input ("Enter a string:\n")
check_palin (str_input) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
40076b43779f6aebbd369738ef11e6d0f116568c | fa44abffdfe00e5a44fffe6610dce25a325ee93e | /instagram_clone/images/serializers.py | dd542db9b1a7c9869aac9e439faf07891b3b29bf | [
"MIT"
] | permissive | devjinius/IG_clone | e539e44318cdf9baf5137057a0c671e8748c36bf | 6a525524ec357d5617b09e013b449df2ec9336ad | refs/heads/master | 2020-03-21T08:55:23.803771 | 2018-12-07T13:55:25 | 2018-12-07T13:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | # python object를 json으로 변형해주는 것이 serializers
from rest_framework import serializers
from . import models
from instagram_clone.users import models as user_model
from taggit_serializer.serializers import (TagListSerializerField, TaggitSerializer)
class SmallImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = (
'file',
)
class UserProfileImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = (
'id',
'file',
'comments_count',
'likes_count'
)
class FeedUserSerializer(serializers.ModelSerializer):
class Meta:
model = user_model.User
fields = (
'username',
'profile_image'
)
class CommentSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.Comment
fields = (
'id',
'message',
'creator',
'image'
)
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = models.Like
fields = '__all__'
class ImageSerializer(TaggitSerializer, serializers.ModelSerializer):
comments = CommentSerializer(many=True)
creator = FeedUserSerializer()
tags = TagListSerializerField()
class Meta:
model = models.Image
fields = (
'id',
'file',
'location',
'caption',
'created_at',
'likes_count',
'creator',
'tags',
# hidden field -> 이 이미지에 belongs to 된 comments
# 기본적으로는 comment_set
'comments'
)
class InputImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = (
'file',
'location',
'caption',
)
| [
"eugenekang94@gmail.com"
] | eugenekang94@gmail.com |
48c9e3c5942973b2fbc895ea878a1b2edfe51d77 | d9b41609de019c5d01ee12f4be77910d4ea0c1aa | /neurodsp/tests/test_sim_periodic.py | 52aac2a11f420c304e83a22a3381bf32e7a332c2 | [
"Apache-2.0"
] | permissive | josepfont65/neurodsp | ec2dcefd605baab62ee42f0589671467b0b331c9 | a7c5b72665eed6368e29bf4f15443a28a2e18732 | refs/heads/master | 2020-12-04T14:53:10.230006 | 2019-12-19T18:10:18 | 2019-12-19T18:10:18 | 231,808,080 | 1 | 0 | Apache-2.0 | 2020-01-04T18:15:34 | 2020-01-04T18:15:33 | null | UTF-8 | Python | false | false | 762 | py | """Test periodic simulation functions."""
from neurodsp.tests.utils import check_sim_output
from neurodsp.tests.settings import FS, N_SECONDS, FREQ1
from neurodsp.sim.periodic import *
from neurodsp.sim.periodic import _make_is_osc
###################################################################################################
###################################################################################################
def test_sim_oscillation():
sig = sim_oscillation(N_SECONDS, FS, FREQ1)
check_sim_output(sig)
def test_sim_bursty_oscillation():
sig = sim_bursty_oscillation(N_SECONDS, FS, FREQ1)
check_sim_output(sig)
def test_make_is_osc():
is_osc = _make_is_osc(10, 0.5, 0.5)
assert isinstance(is_osc[0], bool)
| [
"tdonoghue@ucsd.edu"
] | tdonoghue@ucsd.edu |
8de5f962dcb452320096dc21a032ba26b2db1a16 | 368be25e37bafa8cc795f7c9f34e4585e017091f | /.history/app_fav_books/views_20201115150328.py | 3fc74d4417b8348ea09c322a0e561a86bd596edf | [] | no_license | steven-halla/fav_books_proj | ebcfbfda0e7f3cdc49d592c86c633b1d331da513 | 512005deb84ac906c9f24d4ab0939bd0db096716 | refs/heads/master | 2023-03-30T09:37:38.016063 | 2021-04-02T20:27:22 | 2021-04-02T20:27:22 | 354,125,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,171 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
# contains user signup + login form
def view_index(request):
return render(request, "index.html")
# user signup form will post to a url (/register) which maps to this function
def register_new_user(request):
# returns a dictionary of errors.
# e.g. errors['first_name'] = 'letters only'
errors = User.objects.user_registration_validator(request.POST)
# iterate over each error (key/value) pair in the errors dictionary
# and take the error key and value and makes a full error message,
# and then adds the error message via messages.error()
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/books')
def login(request):
# user did provide email/password, now lets check database
email_from_post = request.POST['email']
password_from_post = request.POST['password']
# this will return all users that have the email_from_post
# in future we should require email to be unique
users = User.objects.filter(email=email_from_post)
if len(users) == 0:
messages.error(request, "email/password does not exist")
return redirect("/")
user = users[0]
print(user)
# check that the user submitted password is the same as what we have stored in the database
if (user.password != password_from_post):
messages.error(request, "email/password does not exist")
return redirect("/")
# we store the logged in user's id in the session variable,
# so that we can quickly get the current logged in user's id any time we need it
# in back end functions. e.g.
request.session['user_id'] = user.id
return redirect("/books")
def logout(request):
request.session.clear()
return redirect("/")
# this will render view_books.html page.
# this page will show a list of all the books and the current logged in user.
def view_books(request):
if 'user_id' not in request.session:
return redirect("/")
user = User.objects.get(id=request.session['user_id'])
all_books_from_db = Books.objects.all()
context = {
"user": user,
"all_books": all_books_from_db
}
return render(request, "view_books.html", context)
# this will render view_book.html page.
# this page will show a single book and the current logged in user.
def view_book(request, book_id):
if 'user_id' not in request.session:
return redirect("/")
user = User.objects.get(id=request.session['user_id'])
book_from_db = Books.objects.get(id=book_id)
context = {
"user": user,
"book": book_from_db
}
return render(request, "view_book.html", context)
# adds new book to database that you like
def add_book(request):
errors = Books.objects.add_book_validator(request.POST)
print(errors)
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("register/success")
else:
title_from_post = request.POST['title']
description_from_post = request.POST['desc']
book = Books.objects.create(
title=title_from_post,
desc=description_from_post,
uploaded_by_id=request.session['user_id']
)
print(book.id)
request.session['book_id'] = book.id
return redirect("/register/success")
| [
"69405488+steven-halla@users.noreply.github.com"
] | 69405488+steven-halla@users.noreply.github.com |
fe774970749bfe5c171952c529eb9429f330e2d3 | 685fa2cb16ff8bce96b603dee8117ed3e9a1adcb | /keras-gradient-tape/gradient_tape_example.py | b740ec82a1cbb9d6cc6e523a6e2e19689565d22d | [] | no_license | albertofernandezvillan/pyimagesearch | 352ec1ec678cb628524c476fdcc86c22238a1a2f | 8c87e6c5d218e42a8864778c032c0fd20261bcdd | refs/heads/master | 2023-02-27T22:02:25.581660 | 2021-02-08T15:15:15 | 2021-02-08T15:15:15 | 338,087,397 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,820 | py | # -----------------------------
# USAGE
# -----------------------------
# python gradient_tape_example.py
# -----------------------------
# IMPORTS
# -----------------------------
# Import the necessary packages
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import mnist
import tensorflow as tf
import numpy as np
import time
import sys
# -----------------------------
# FUNCTIONS
# -----------------------------
def build_model(width, height, depth, classes):
# Initialize the input shape and channels dimension to be "channels last" ordering
inputShape = (height, width, depth)
chanDim = -1
# Build the model using Keras' Sequential API
model = Sequential([
# CONV => RELU => BN => POOL layer set
Conv2D(16, (3, 3), padding="same", input_shape=inputShape),
Activation("relu"),
BatchNormalization(axis=chanDim),
MaxPooling2D(pool_size=(2, 2)),
# (CONV => RELU => BN) * 2 => POOL layer set
Conv2D(32, (3, 3), padding="same"),
Activation("relu"),
BatchNormalization(axis=chanDim),
Conv2D(32, (3, 3), padding="same"),
Activation("relu"),
BatchNormalization(axis=chanDim),
MaxPooling2D(pool_size=(2, 2)),
# (CONV => RELU => BN) * 3 => POOL layer set
Conv2D(64, (3, 3), padding="same"),
Activation("relu"),
BatchNormalization(axis=chanDim),
Conv2D(64, (3, 3), padding="same"),
Activation("relu"),
BatchNormalization(axis=chanDim),
Conv2D(64, (3, 3), padding="same"),
Activation("relu"),
BatchNormalization(axis=chanDim),
MaxPooling2D(pool_size=(2, 2)),
# First (and only) set of FC => RELU layers
Flatten(),
Dense(256),
Activation("relu"),
BatchNormalization(),
Dropout(0.5),
# Softmax Classifier
Dense(classes),
Activation("softmax")
])
# Return the built model to the calling function
return model
def step(X, y):
# Keep track of our gradients
with tf.GradientTape() as tape:
# Make a prediction using the model and then calculate the loss
pred = model(X)
loss = categorical_crossentropy(y, pred)
# Calculate the gradients using our tape and then update the model weights
grads = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
# Initialize the number of epochs to train for, batch size, and the initial learning rate
EPOCHS = 25
BS = 64
INIT_LR = 1e-3
# Load the MNIST dataset
print("[INFO] Loading MNIST dataset...")
((trainX, trainY), (testX, testY)) = mnist.load_data()
# Add a channel dimension to every image in the dataset, then scale the pixel intensities to the range [0, 1]
trainX = np.expand_dims(trainX, axis=-1)
testX = np.expand_dims(testX, axis=-1)
trainX = trainX.astype("float32") / 255.0
testX = testX.astype("float32") / 255.0
# One-hot encode the labels
trainY = to_categorical(trainY, 10)
testY = to_categorical(testY, 10)
# Build the model and initialize the optimizer
print("[INFO] Creating model...")
model = build_model(28, 28, 1, 10)
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
# Compute the number of batch updates per epoch
numUpdates = int(trainX.shape[0] / BS)
# loop over the number of epochs
for epoch in range(0, EPOCHS):
# show the current epoch number
print("[INFO] Starting epoch {}/{}...".format(epoch + 1, EPOCHS), end="")
sys.stdout.flush()
epochStart = time.time()
# Loop over the data in batch size increments
for i in range(0, numUpdates):
# Determine starting and ending slice indexes for the current batch
start = i * BS
end = start + BS
# Take a step
step(trainX[start:end], trainY[start:end])
# Show timing information for the epoch
epochEnd = time.time()
elapsed = (epochEnd - epochStart) / 60.0
print("Took {:.4} minutes".format(elapsed))
# In order to calculate accuracy using Keras' functions we first need to compile the model
model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=["acc"])
# Now that the model is compiled we can measure the accuracy
(loss, acc) = model.evaluate(testX, testY)
print("[INFO] Test accuracy: {:.4f}".format(acc)) | [
"silva.mfpedro@gmail.com"
] | silva.mfpedro@gmail.com |
d30004da33c7f1178e968db895dfb299855397aa | a39ed5db6c75c9ae1f5e05118794c64102dc5f7a | /2021/15_2/solution_test.py | 747664ac87c240e1768aa50e9a615c2466bfabda | [
"MIT"
] | permissive | budavariam/advent_of_code | b656d5caf5d05113b82357754eb225e61e89ac0d | 635be485ec691f9c0cdeb83f944de190f51c1ba3 | refs/heads/master | 2022-12-25T18:12:00.981365 | 2022-12-20T08:20:51 | 2022-12-20T08:20:51 | 114,570,426 | 1 | 1 | MIT | 2022-12-09T09:29:06 | 2017-12-17T21:36:00 | Python | UTF-8 | Python | false | false | 426 | py | """ Advent of code 2021 day 15 / 2 """
import unittest
from solution import solution
class MyTest(unittest.TestCase):
"""Unist tests for actual day"""
def test_basic(self):
""" Test from the task """
self.assertEqual(solution("""1163751742
1381373672
2136511328
3694931569
7463417111
1319128137
1359912421
3125421639
1293138521
2311944581"""), 315)
if __name__ == '__main__':
unittest.main()
| [
"budavariam@gmail.com"
] | budavariam@gmail.com |
a9e82798443b853ac00c1ea8d220208411f9df71 | 30dfa9cea355bcfa7b2eb8210aa8326cbf9dcb97 | /code/pysubs/old_progs/rot_matrix_mini.py | b02d984e9f6b2f96a59215aa7f3993ec105aaa2d | [] | no_license | craiglagegit/Bullet_Cluster | e96e9088f2cf90db54ad1e756cdd28c7a2609b4a | 685555c137d726a86ad02a968e1442ab0c6123f0 | refs/heads/master | 2021-04-29T18:56:08.548631 | 2018-02-16T15:42:39 | 2018-02-16T15:42:39 | 121,702,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | #!/usr/bin/env python
#Author: Craig Lage, NYU;
#Date: 9-Sep-11
#This program looks at a matrix of rotation angles on a databse with an IP of 4 kpc
import lageconfig # system specific path information
import sys
sys.path.append(lageconfig.bulletpath)
import pysubs_mhd_new as pysubs
from pylab import *
#****************MAIN PROGRAM*****************
cmd=sys.argv
toppath=lageconfig.toppath
dir='./'
snapmin=int(cmd[1])
snapmax=int(cmd[2])
counter=1
psi=0.0
filename='fomrot.out'
for theta in [2.55,2.65,2.75]:
for phi in [3.09,3.14,3.19]:
[besttime,bestfom]=pysubs.FindFom(toppath,dir,snapmin,snapmax,counter,phi,theta,psi,simulator='Enzo',PlotSuffix=None,IncludeTemp=False)
result='Phi = %.4f, Theta = %.4f, FOM=%.4f at time = %.4f\n'%(phi,theta,bestfom,besttime)
fomfile=open(filename,'a')
fomfile.write(result)
fomfile.close
#************END MAIN PROGRAM*************************
| [
"cslage@ucdavis.edu"
] | cslage@ucdavis.edu |
65942f14d80a42dcc872cdf06b41d8caabfbd5ee | c3ff891e0e23c5f9488508d30349259cc6b64b4d | /python练习/网络编程练习/线程/03-线程的执行顺序.py | 7f9ef178bb1cef9263c6c5ce775b3b287d6fd41d | [] | no_license | JacksonMike/python_exercise | 2af2b8913ec8aded8a17a98aaa0fc9c6ccd7ba53 | 7698f8ce260439abb3cbdf478586fa1888791a61 | refs/heads/master | 2020-07-14T18:16:39.265372 | 2019-08-30T11:56:29 | 2019-08-30T11:56:29 | 205,370,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | import threading
import time
class MyThread(threading.Thread):
def run(self):
for i in range(3):
time.sleep(1)
msg = self.name + "@" + str(i)
print(msg)
def main():
for i in range(5):
t = MyThread()
t.start()
if __name__ == '__main__':
main()
# 线程执行顺序和操作系统有关,和线程创建顺序无关 | [
"2101706902@qq.com"
] | 2101706902@qq.com |
19272508e23d6510681cf94702628ad535eae7e4 | 4581a822e8b41b0d8497a41745a45270f2336fb3 | /palettes/grey_60.py | 7993c773e76cb18cf874c47a144bae957bcf129b | [] | no_license | jw-git-examples/formatting-conflicts-limestone | 415564ba269c2f953a54e02532f9ffde7db9d9e9 | 068240820eafa1f7aaf16c40273366dd25953dd7 | refs/heads/master | 2021-05-16T21:34:32.634146 | 2020-03-27T17:15:38 | 2020-03-27T17:31:03 | 250,478,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | import common_color_config
# average JetBrains, VScode and Sublime bg lightness; moderate contrast
name = "Monochrome grey 60"
background = (15, 0, 0)
foreground = (75, 0, 0)
shades = common_color_config.shades_lightness
| [
"jan.warchol@gmail.com"
] | jan.warchol@gmail.com |
fc791e81ad12dd63c10cdac9b0efc0ac8b0dd4ad | bc4656f6f74911f114626538294e0e275105c703 | /tests/elftosb/test_elftosb_trustzone.py | bf3c4b9348c394655269166b2e2d3b44324beb3a | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | AdrianCano-01/spsdk | d8679ae58fc67c6369bceff4b31db658d9ad6bc4 | 4a31fb091f95fb035bc66241ee4e02dabb580072 | refs/heads/master | 2023-03-15T00:37:07.419191 | 2021-03-05T16:33:50 | 2021-03-05T16:33:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
"""Test Trustzone part of elftosb app."""
import os
import filecmp
from click.testing import CliRunner
from spsdk.apps import elftosb
from spsdk.utils.misc import use_working_directory
def test_elftosb_trustzone_basic(data_dir, tmpdir):
runner = CliRunner()
with use_working_directory(tmpdir):
cmd = f"--tzm-conf {data_dir}/lpc55xxA1.json"
result = runner.invoke(elftosb.main, cmd.split())
assert os.path.isfile(f"{tmpdir}/lpc55xxA1_tzFile.bin")
assert filecmp.cmp(f"{data_dir}/lpc55xxA1_tzFile.bin", f"{tmpdir}/lpc55xxA1_tzFile.bin") | [
"maria.wisniewska@nxp.com"
] | maria.wisniewska@nxp.com |
a6cb9dc023f493c0851a89646cbed15ce944c1a3 | 5a1eff241b9e9c2504fcc6762899d90993dcec3a | /aliyun/service/aliVpc.py | d429c550918de73afa006336452843c07f6fcbb9 | [] | no_license | P79N6A/AutoCmdb | 58cfabae9c5955427a51c291056b1a6c1a224801 | d0afe1ec1f7122b4ddc7d5dcb70627190e0a10e0 | refs/heads/master | 2020-04-13T04:42:54.346089 | 2018-12-24T08:27:34 | 2018-12-24T08:30:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,993 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: jerry
@contact: wangqiyuan@blhcn.com
@file: aliVpc.py
@time: 2018/11/27 10:27
@desc:
'''
import json
import os
import wrapt
from aliyunsdkcore import client
from aliyunsdkvpc.request.v20160428 import DescribeVpcsRequest,DescribeVpcAttributeRequest,DescribeVRoutersRequest, \
DescribeRouteTableListRequest,DescribeRouteTablesRequest,DescribeVSwitchesRequest,DescribeVSwitchAttributesRequest,\
DescribeCommonBandwidthPackagesRequest,DescribeEipAddressesRequest
from aliyun.service import othHandler
class aliVpc:
def __init__(self, accessKey, accessSecret, RegionId):
self.accessKey = accessKey
self.accessSecret = accessSecret
self.RegionId = RegionId
self.clt = client.AcsClient(self.accessKey, self.accessSecret,self.RegionId)
@othHandler.ali_wrap
def vpc_list(self,VpcId='',IsDefault=True,PageNumber=1,PageSize=10):
"""
vpc列表
:param VpcId:
:param IsDefault:
:param PageNumber:
:param PageSize:
:return:
"""
request = DescribeVpcsRequest.DescribeVpcsRequest()
if VpcId:
request.set_VpcId(VpcId)
request.set_IsDefault(IsDefault)
request.set_PageNumber(PageNumber)
request.set_PageSize(PageSize)
return request
@othHandler.ali_wrap
def get_vpc(self,VpcId,IsDefault=True):
"""
查询指定VPC的配置信息
:param VpcId: 要查询的VPC ID。
:param IsDefault: 是否是默认VPC,取值
false:不是默认VPC
true:是默认VPC
:return:
"""
request = DescribeVpcAttributeRequest.DescribeVpcAttributeRequest()
request.set_VpcId(VpcId)
request.set_IsDefault(IsDefault)
return request
@othHandler.ali_wrap
def get_v_routers(self,VRouterId='',PageNumber=1,PageSize=10):
"""
查询指定地域的路由器列表。
:param VRouterId: 路由器的ID。
:param PageNumber: 列表的页码,默认值为1。
:param PageSize: 分页查询时每页的行数,最大值为50,默认值为10。
:return:
"""
request = DescribeVRoutersRequest.DescribeVRoutersRequest()
if VRouterId:
request.set_VRouterId(VRouterId)
request.set_PageNumber(PageNumber)
request.set_PageSize(PageSize)
return request
@othHandler.ali_wrap
def get_route_list(self,RouterType="VRouter",RouterId='',VpcId='',RouteTableId='',RouteTableName='',PageNumber=1,PageSize=10):
"""
查询路由表。
:param RouterType:路由表所属的路由器类型。取值:
VRouter(默认值):VPC路由器
VBR:边界路由器
:param RouterId:路由表所属路由器的ID。
:param VpcId: 路由表所属的VPC路由器的ID。指定该参数后,参数RouterType的值自动设置为VRouter。
:param RouteTableId:路由表的ID。
:param RouteTableName:路由表的名称。
:param PageNumber:列表的页码,默认值为1。
:param PageSize:分页查询时每页的行数,最大值为50,默认值为10。
:return:
"""
request = DescribeRouteTableListRequest.DescribeRouteTableListRequest()
request.set_RouterType(RouterType)
if RouterId:
request.set_RouterId(RouterId)
if VpcId != "0":
request.set_VpcId(VpcId)
if RouteTableId:
request.set_RouteTableId(RouteTableId)
if RouteTableName:
request.set_RouteTableName(RouteTableName)
request.set_PageNumber(PageNumber)
request.set_PageSize(PageSize)
return request
@othHandler.ali_wrap
def get_route_tables(self,RouterType="VRouter",RouterId='',VRouterId='',RouteTableId="",PageNumber=1,PageSize=10):
"""
查询路由表的路由条目。
:param RouterType:
:param RouterId:
:param VRouterId:
:param RouteTableId:
:param PageNumber:
:param PageSize:
:return:
"""
request = DescribeRouteTablesRequest.DescribeRouteTablesRequest()
request.set_RouterType(RouterType)
if RouterId:
request.set_RouterId(RouterId)
if RouterId:
request.set_RouterId(RouterId)
if VRouterId:
request.set_VRouterId(VRouterId)
request.set_RouterType("VRouter")
if RouteTableId:
request.set_RouteTableId(RouteTableId)
request.set_PageNumber(PageNumber)
request.set_PageSize(PageSize)
return request
@othHandler.ali_wrap
def get_switch_list(self,VpcId='',ZoneId='',VSwitchId='',IsDefault=True,PageNumber=1,PageSize=10,*args,**kwargs):
request = DescribeVSwitchesRequest.DescribeVSwitchesRequest()
dic = ["VpcId",
"ZoneId",
"VSwitchId",
"IsDefault",
"PageNumber",
"PageSize"
]
for k in dic:
if exec(k):
if hasattr(request,"set_{}"):
getattr(request,"set_{}".format(k),)(exec(k))
request.set_IsDefault(IsDefault)
return request
@othHandler.ali_wrap
def get_switch(self,VSwitchId):
request = DescribeVSwitchAttributesRequest.DescribeVSwitchAttributesRequest()
request.set_VSwitchId(VSwitchId)
return request
@othHandler.ali_wrap
def get_common_bandwidth_packages(self,BandwidthPackageId="",PageNumber=1,PageSize=10):
"""
获取共享带宽
:param BandwidthPackageId:
:param PageNumber:
:param PageSize:
:return:
"""
request = DescribeCommonBandwidthPackagesRequest.DescribeCommonBandwidthPackagesRequest()
param = {"BandwidthPackageId":BandwidthPackageId,
"PageNumber":PageNumber,
"PageSize":PageSize
}
return request,param
@othHandler.ali_wrap
def get_eip_addresses(self,Status="",AllocationId="",AssociatedInstanceType="",AssociatedInstanceId="",PageNumber=1,PageSize=10):
request = DescribeEipAddressesRequest.DescribeEipAddressesRequest()
param = {"Status": Status,
"AllocationId": AllocationId,
"AssociatedInstanceType": AssociatedInstanceType,
"AssociatedInstanceId": AssociatedInstanceId,
"PageNumber": PageNumber,
"PageSize": PageSize
}
return request, param
if __name__ == '__main__':
pass
# vpc = aliVpc("LTAIeeuxK8V5grmN","qD8Z4LWicExfwZ72EdcPmLa2ieKg0N","cn-beijing")
# # z = vpc.get_eip_addresses()
# # print(z) | [
"1209394579@qq.com"
] | 1209394579@qq.com |
7f669845c9630c1488d723f1b3b4f8048ef225c0 | 24f2deae78a3f5fa8b5b3a53baff5637e0ea80ff | /sinoera/dizhi/shen1.py | 18752133a50d5ba9b805e03e92251ebc9e77f26e | [
"Apache-2.0"
] | permissive | sinotradition/sinoera | 02b979a7dbca81594eed8862fa86671856b91e2e | 1e93482c0a56a8917bc7ceebeef5b63b24ca3651 | refs/heads/master | 2021-01-10T03:20:20.231752 | 2015-12-14T15:13:42 | 2015-12-14T15:13:42 | 47,981,945 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@contact: sinotradition@gmail.com
@copyright: License according to the project license.
'''
NAME='shen1'
SPELL='shēn'
CN='申'
SEQ='9'
if __name__=='__main__':
pass
| [
"smlh.sheng@gmail.com"
] | smlh.sheng@gmail.com |
aa294b37f93e998bccca23b52be56528fc121b23 | 0466a5dc950f4e89d8696329b89aa50246c7e7e3 | /test/scipy.csd_test.py | 81608a35e0096f0ff436ba9c461743f60e82817b | [] | no_license | HansInM36/ppcode | 00bc94e6177b8110681127514517f277d7a7b07a | e5fe9de8ddf2991f2fe95bde38045ee02bbcfe10 | refs/heads/master | 2023-07-19T03:42:38.667878 | 2021-09-30T22:59:48 | 2021-09-30T22:59:48 | 313,005,222 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | import sys
sys.path.append('/scratch/ppcode/sowfa/src')
sys.path.append('/scratch/ppcode')
import numpy as np
import scipy.signal
import funcs
import matplotlib.pyplot as plt
N = int(1800)
x = np.random.rand(N)
# noise
alpha = 1
w = np.random.rand(N) * alpha
y = 0*x + 2 + w
fs = 0.5
nperseg_ = 128
freq, Pxx = scipy.signal.csd(x, x, fs, nperseg=nperseg_, noverlap=None)
freq, Pxy = scipy.signal.csd(x, y, fs, nperseg=nperseg_, noverlap=None)
freq, Pyy = scipy.signal.csd(y, y, fs, nperseg=nperseg_, noverlap=None)
freq = np.squeeze(freq).ravel()
Rxy = Pxy.real
Qxy = Pxy.imag
coh = abs(np.array(Pxy) * np.array(np.conj(Pxy))) / (np.array(Pxx) * np.array(Pyy))
co_coh = np.real(Pxy / np.sqrt(np.array(Pxx) * np.array(Pyy)))
phase = np.arctan2(Qxy,Rxy)
# freq_, Pxx_ = funcs.PSD_f(x-x.mean(), fs)
freq_, Pxx_ = scipy.signal.csd(x-x.mean(), x-x.mean(), fs, nperseg=nperseg_, noverlap=None)
fig, ax = plt.subplots(figsize=(6,6))
plt.plot(freq, Pxx, linewidth=1.0, color='k')
plt.plot(freq_, Pxx_, linewidth=1.0, color='r')
plt.ylim(0,1.02)
plt.grid()
plt.show()
| [
"xni001@gfi3104118.klientdrift.uib.no"
] | xni001@gfi3104118.klientdrift.uib.no |
5b471db36bd8d290e580839e915b9b819f742552 | 6ba377d55d0442ca13cc3e577bfe698c7f51bf54 | /NewType/Eddid_CRM/Commons/Modify_xls.py | 99e77bdb756f2eb556d1fba4b52b031c524f35d9 | [] | no_license | sevencrime/NewType | 92fc2f3efe6386334b973b190f42b61db5a58194 | 125add626e485323d17745bbe629aaabd523fa62 | refs/heads/master | 2021-07-14T02:08:12.329876 | 2019-07-16T02:25:45 | 2019-07-16T02:25:45 | 170,979,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-02-19 17:41:50
# @Author : Onedi (Onedi@qq.com)
# @Link : ${link}
# @Version : $Id$
from openpyxl import load_workbook
from xlutils.copy import copy
import os
import random
class Modifyxls():
def __init__(self, file_url):
self.path = file_url
# self.workbook = xlrd.open_workbook(self.path)
self.workbook = load_workbook(self.path)
def readxls(self):
# 打开excel文件,open_workbook(path),path为excel所在的路径
# 打开excel表,这里表示打开第一张表
# table = self.workbook.sheets()[0]
table = self.workbook.active
# nrows = table.nrows # 获取excel的行数
nrows = table.max_row
# print(nrows)
# ncols = table.ncols #获取excel的列数
ncols = table.max_column
# print(ncols)
# keys = table.row_values(0) #获取第一行的值
keys = [keys for keys in list(table.rows)[0]]
# print(keys[0].value)
Data = [] #创建一个list,用于存放
x = 1
for i in range(nrows-1):
s = {}
# print(i)
# values = table.row_values(x)
values = [keys for keys in list(table.rows)[x]]
# print(values)
for j in range(ncols):
# print('j=',j)
s[keys[j].value] = values[j].value
# print(s)
Data.append(s)
x += 1
return Data
def writexls(self):
# sheet = self.workbook.active
# nrows = sheet.max_row
# for row in range(nrows):
# print(row)
# id_code = random.randint(100000,13556542154)
# email = 'onedi2%s@qq.com' %(random.randint(0,4541545))
# phone = '6666-11%s' %(random.randint(0,1000))
# # sheet.cell(row=row+2, column=13).value = id_code
# sheet.cell(row=row+2, column=15).value = email
# sheet.cell(row=row+2, column=16).value = phone
# self.workbook.save(self.path)
sheet = self.workbook.active
id_code = random.randint(100000,13556542154)
email = 'onedi2%s@qq.com' %(random.randint(0,4541545))
phone = '6666-11%s' %(random.randint(0,1000))
sheet.cell(row=2, column=13).value = id_code
sheet.cell(row=2, column=15).value = email
# sheet.cell(row=2, column=16).value = phone
self.workbook.save(self.path)
return id_code
# wb = copy(self.workbook)
# sheet = wb.active()
# id_code = random.randint(100000,135483216542154)
# email = 'onedi2%s' %(random.randint(4541545))
# sheet.write(1, 12, id_code)
# sheet.write(1, 14, email)
# wb.save(path)
# return id_code
if __name__ == "__main__":
# file_url = os.path.abspath(os.path.dirname(os.getcwd()))+'/config/Ayers1.xlsx'
file_url = os.path.abspath(os.path.dirname(os.getcwd()))+'/config/1233.xlsx'
modify = Modifyxls(file_url)
# data = modify.readxls()
# print(data)
# for res in data:
# print(int(res['id_code']))
# # print(res)
code = modify.writexls()
# print(code) | [
"onedi@qq.com"
] | onedi@qq.com |
c2cc8a6c3e699641b2edecee5969456d7e6b7af4 | 9c1c714e1b2f0b8e2c16b55e5f9693a123953f20 | /Data_Structures/Graph/Old/traversal.py | 06cb9f51bb5bc9b72412b14ef01ea04147242dd4 | [] | no_license | ekeilty17/Personal-Projects-In-Python | a20f3af44a07b2193b447d022384783f7a0f512a | f8787407cbf9df2a0aa690fb396d933add1520e8 | refs/heads/main | 2023-04-24T10:23:50.074366 | 2021-05-15T21:51:20 | 2021-05-15T21:51:20 | 367,668,843 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | class queue:
def __init__(self):
self.store = []
#had to rename the queue functions
def push(self, val):
self.store += [val]
#so that it works generally
def pop(self):
if self.store == []:
return False
r = self.store[0]
self.store = self.store[1:len(self.store)]
return r
class stack:
def __init__(self):
self.store = []
def push(self,val):
self.store += [val]
def pop(self):
if self.store == []:
return False
r = self.store[len(self.store)-1]
self.store = self.store[0:len(self.store)-1]
return r
def traverse(G,start,typeBreadth):
if start > len(G.adj):
return False
#if start == None:
# traverses entire graphs
#else:
# start is the index the traversal starts on
#if typeBreadth == True:
# Breadth First Traversal (Queue)
#if typeBreadth == False:
# Depth Firt Traversal (Stack)
if start != None:
if start < 0:
return False
if typeBreadth != True and typeBreadth != False:
return False
#making the queue/stack
C = None
if typeBreadth:
C = queue()
if not typeBreadth:
C = stack()
#helper lists to keep track of where I've been
visited = []
processed = []
for i in range(0,len(G.adj)):
visited += [False]
processed += [False]
#dealing with the weird starting case
V = G.adj
n = 0
if start == None:
n = len(G.adj)
else:
n = 1
out = []
for i in range(0,n):
temp = []
#this is also to deal with the weird start case
if start == None:
if visited[i] == False:
C.push(i)
visited[i] = True
else:
if visited[start] == False:
C.push(start)
visited[start] = True
#actual algorithm
#print "\tpushing into C:",start
while C.store != []:
#print "C.store =",C.store
w = C.pop()
#print "\tw:",w
if processed[w] == False:
temp += [w]
processed[w] = True
for x in V[w]:
if visited[x[0]] == False:
C.push(x[0])
visited[x[0]] = True
#print "\tpushing into C:",x[0]
if temp != []:
out += [temp]
return out
| [
"epkeilty@hotmail.com"
] | epkeilty@hotmail.com |
8eb2319a38d264c5e042c431bd3dc55f5ff351d7 | 597c4f48332251552a602122bb3d325bc43a9d7f | /etc/lecture/basic/04_짝수_홀수_분리.py | eb46edb53b22a78efa763765aaa89d7e44752810 | [] | no_license | Kyeongrok/python_algorithm | 46de1909befc7b17766a57090a7036886361fd06 | f0cdc221d7908f26572ae67b5c95b12ade007ccd | refs/heads/master | 2023-07-11T03:23:05.782478 | 2023-06-22T06:32:31 | 2023-06-22T06:32:31 | 147,303,654 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | list = [3, 8, 9, 5, 4, 2]
odds = []
evens = []
for item in list:
if(item % 2 == 1):
odds.append(item)
else:
evens.append(item)
print("odds:", odds)
print("evens:", evens)
| [
"kyeongrok.kim@okiconcession.com"
] | kyeongrok.kim@okiconcession.com |
b52a6d334b834d5ace7fac96f8e438bb9291e5dd | 4dade4f29881e99d8602144744e09ed870bd1034 | /Python/other/comb_subst.py | 5fa89bb08c76630fa6b512ac12ad6a846451c9c7 | [] | no_license | alexbaryzhikov/codebase-archive | 9795347c19a82c098983c6d0fe4959c3162ca868 | c78c189002a26296a552f30078578cc0cf72e426 | refs/heads/master | 2023-02-19T21:54:21.310865 | 2021-01-11T15:47:50 | 2021-01-11T15:47:50 | 106,846,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | s = '*//*/'
subst = {'*':['у', 'при'], '/':['под', 'на']}
for i in range(2**len(s)):
res = []
for l in s:
res.append(subst[l][i&1])
i >>= 1
print('(в е ({} А ({} К Р))) ({} ({} (к П А) Т) ({} Е Т)) !'.format(*res))
| [
"aleksiarts@gmail.com"
] | aleksiarts@gmail.com |
513b380ec66ddb8b8b42ff7f9aa214c12c9d44d1 | 414239752b2bfc4cb3a947474f2662af7588b6eb | / protobufeditor/Tests/MarathonTests/TestCases/Compare/Cmp2Layoutst16.py | b5d14bc14002f6b3fbad04bc160b4097f5017e30 | [] | no_license | dtracers/protobufeditor | 8c7f9671c3b3a7d1cd3094321d030f6e6afcc7e8 | b65d06bce93165eebf9798c533e2447a5992d384 | refs/heads/master | 2020-12-24T19:28:22.486207 | 2016-04-19T23:41:57 | 2016-04-19T23:41:57 | 56,340,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | useFixture(default)
def test():
from Modules import commonBits
java_recorded_version = '1.6.0_0'
if window('Protocol Buffer Editor'):
select_menu('Utilities>>Compare Menu')
click('*2')
select('FileChooser', commonBits.sampleDir() + 'protoStoreSales3_Compare2.bin')
click('Right')
select('TabbedPane', '')
select('FileChooser', commonBits.sampleDir() + 'protoStoreSales.bin')
select('FileChooser1', commonBits.stdCopybookDir() + 'StoreSales.protocomp')
click('Right')
select('TabbedPane', '')
select('Table', 'cell:Record,0(Product)')
select('Table1', 'cell:Equivalent Field,2(quantity)')
select('Table', 'cell:Record,0(Product)')
select('Table1', '', 'Equivalent Field,2')
select('Table1', '', 'Equivalent Field,3')
select('Table1', 'cell:Field,1(saleDate)')
assert_p('Table1', 'Content', '[[keycode, keycode], [saleDate, saleDate], [quantity, ], [price, ]]')
select('Table1', 'cell:Field,1(saleDate)')
click('Right')
select('TabbedPane', '')
click('Compare')
assert_p('Table', 'Content', '[[, , , , ], [, Inserted, 7, 69684558, 40118], [, , , , ], [, Inserted, 10, 69694158, 40118]]')
close()
| [
"bm_tas@yahoo.com.au"
] | bm_tas@yahoo.com.au |
0f3f2a9c607f24ff9189d94f2e4bae6806bf49ef | c0c25fff5f5d900e256cff9bf43fade47043ab2f | /cart/migrations/0001_initial.py | 425cf8efe8fc147abc40ecf592ec2227d54aeb7e | [] | no_license | souleymanediallo/shoppy | fe6bed8467f4d4889659721ed7fa680f71737354 | 412b541bb6871da18fbd69e5ff26ff04cc2eb461 | refs/heads/master | 2020-07-28T05:22:23.723541 | 2019-09-19T13:31:05 | 2019-09-19T13:31:05 | 209,321,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | # Generated by Django 2.2.5 on 2019-09-19 11:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cart_id', models.CharField(blank=True, max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'Cart',
'ordering': ['created'],
},
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('is_activated', models.BooleanField(default=True)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.Cart')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.Product')),
],
options={
'db_table': 'CartItem',
},
),
]
| [
"souleymanedialloo@yahoo.fr"
] | souleymanedialloo@yahoo.fr |
2fcbfe067e5fed8f7fa898937c3e8959c749b6dd | 3ff660941132bcaed5bfe309861843bd6657ee37 | /LinkedList/Detect Loop.py | 91810296a4d47fc50720253094bd946cf1664a5d | [] | no_license | mrunalhirve12/Interviews2 | 04295cebe1946de1f310857d7fbded11a02f8eb1 | c48bd0a4e1112804da8bdf2d7e43ab0f2ef00469 | refs/heads/master | 2023-03-26T14:35:06.029701 | 2021-03-25T21:31:46 | 2021-03-25T21:31:46 | 351,593,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, data):
new_node = Node(data)
new_node.next = self.head
self.head = new_node
def detectLoop(self):
slow_ptr = self.head
fast_ptr = self.head
while slow_ptr and fast_ptr and fast_ptr.next:
slow_ptr = slow_ptr.next
fast_ptr = fast_ptr.next.next
if slow_ptr == fast_ptr:
return True
else:
return False
def printList(self):
temp = self.head
while temp:
print(temp.data)
temp = temp.next
llist = LinkedList()
llist.push(20)
llist.push(4)
llist.push(15)
llist.push(10)
# Create a loop for testing
llist.head.next.next.next.next = llist.head
if llist.detectLoop():
print("yes")
else:
print("no") | [
"mrunalhirve@gmail.com"
] | mrunalhirve@gmail.com |
137b0162e9bbacef3d7f832e1e51873a2d849f2e | 398009b075a10c0653bdf1c3013240b2de656e0d | /podcasts/admin.py | 58b9e7bf3a809a6213c840faef4ba857f3ff0e55 | [
"Apache-2.0"
] | permissive | AliHabes/pinecast | c930040c04bafca4ccbaa2be82e20600a8fdd9e8 | 1eae5bfc2ef6d466ccbe170fd829cbff98332833 | refs/heads/master | 2021-01-18T17:54:26.443918 | 2017-03-29T16:51:47 | 2017-03-29T16:51:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,923 | py | from django.contrib import admin
from .models import Podcast, PodcastEpisode, PodcastCategory
from dashboard.models import Collaborator
class PodcastCategoryInline(admin.TabularInline):
model = PodcastCategory
fk_name = 'podcast'
readonly_fields = ('category', )
extra = 0
def get_fields(self, request, obj=None):
return self.get_readonly_fields(request, obj)
class PodcastEpisodeInline(admin.TabularInline):
model = PodcastEpisode
fk_name = 'podcast'
readonly_fields = ('title', 'publish', 'created', 'duration', )
extra = 0
show_change_link = True
can_delete = False
def get_fields(self, request, obj=None):
return self.get_readonly_fields(request, obj)
class CollaboratorInline(admin.TabularInline):
model = Collaborator
fk_name = 'podcast'
readonly_fields = ('collaborator', )
extra = 0
show_change_link = True
can_delete = False
def get_fields(self, request, obj=None):
return self.get_readonly_fields(request, obj)
class PodcastAdmin(admin.ModelAdmin):
list_display = ('slug', 'name', 'owner_email')
inlines = (PodcastEpisodeInline, PodcastCategoryInline, CollaboratorInline, )
# TODO: This is probably not ideal. There's presumably some technical
# shizzle-wizzle that can be done to prefetch this with, say, the
# get_queryset method.
def owner_email(self, obj):
return obj.owner.email
def fix_category_ampersand(modeladmin, request, queryset):
for cat in queryset:
cat.category = cat.category.replace('&', '&')
cat.save()
fix_category_ampersand.short_description = 'Fix ampersands'
class PodcastCategoryAdmin(admin.ModelAdmin):
search_fields = ('category', )
actions = (fix_category_ampersand, )
admin.site.register(Podcast, PodcastAdmin)
admin.site.register(PodcastEpisode)
admin.site.register(PodcastCategory, PodcastCategoryAdmin)
| [
"me@mattbasta.com"
] | me@mattbasta.com |
016ae86aad6c6e19a54493723d905b567cc55a6d | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/numpy/vectorize_lambda.py | de6b5bc8ccf9ce4c16560706be315d3171913214 | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 231 | py | import numpy as np
animals = np.array(['Cow', 'Elephant', 'Snake', 'Camel', 'Praying Mantis'])
print(animals)
longer_than_5 = np.vectorize(lambda x: len(x) > 5)
long_animals_bool = longer_than_5(animals)
print(long_animals_bool)
| [
"gabor@szabgab.com"
] | gabor@szabgab.com |
efa7461ea64952459e66e1148200f28b73b950ac | bdcfab7fef115312988ef7d2bf4f375ab0723e51 | /test/myself/TestFile.py | 1a105c4d101ed1f5621b1671181223668f23ea7b | [] | no_license | ChenLaiHong/pythonBase | c2587bfa78fbbec225c364c297394131fa2f6f37 | 00bf1b404be8714f907c7750a60c052e83555b3e | refs/heads/master | 2020-03-11T00:57:13.403866 | 2019-09-19T11:55:09 | 2019-09-19T11:55:09 | 129,676,746 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | # 文件操作
# 打开文件
# "r"只读;
# "w"只写,文件不存在会自动创建;指针在文件头
# "a"追加,文件不存在会自动创建,指针在文件末尾
# f = open("test.txt", "a", encoding="utf-8")
#
# # 读写操作
# # content = f.read()
# # print(content)
#
# f.write("张三、李四")
#
# # 关闭文件
# f.close()
# 操作图片,增加b是操作二进制
# fromFile = open("xx.jpg", "rb")
# fromContent = fromFile.read()
# print(fromContent)
# fromFile.close()
#
# toFile = open("xx2.jpg", "wb")
# content = fromContent[0: len(fromContent) // 2]
# toFile.write(content)
# fromFile.close()
# 增加+
# "r+"可读可写,先读再写的话不会覆盖原来的只是在后面添加
# 定位
# f.seek(偏移量,[0,1,2]),0:开头;1:当前位置;2:文件末尾(偏移量只能是负的)
# 注意:文本文件的操作模式下(不带b)只能写0
# 如果想写1/2,必须在二进制文件操作模式下(带b)
# f = open("test.txt", "r", encoding="utf-8")
# # 打印当前指针的位置,文件指针默认在文件最开始的地方
# print(f.tell())
# # 将指针移动三位(seek方法用来移动指针的位置)
# f.seek(3)
# # 再次打印指针的位置
# print(f.tell())
# # 读取当前指针位置到文件最后的内容
# print(f.read())
# f.close()
# f = open("xx.jpg", "rb")
# # 打印当前指针的位置,文件指针默认在文件最开始的地方
# print(f.tell())
# # 将指针移动三位(seek方法用来移动指针的位置)
# f.seek(-3, 2)
# # 再次打印指针的位置
# print(f.tell())
# # 读取当前指针位置到文件最后的内容
# print(f.read())
# f.close()
# f.read(len):len:读取文件内容的长度,默认是文件所有内容
f = open("test.txt", "r", encoding="utf-8")
print(f.read(3))
# 将指针移动两位再读
# f.seek(2)
# print(f.read(3))
f.close()
# f.readLine([limit]):读取一行数据
# print(f.readline(), end="")
# f.readLines()会自动的将文件按换行符进行处理
# 将处理的每一行组成一个列表返回
# print(f.readlines())
# for i in f:
# print(i, end="")
# import os
# os.rename("x.jpg", "xx.jpg")
| [
"1185630400@qq.com"
] | 1185630400@qq.com |
082f1032d60a7484435d669338999b4da1b246a5 | 2a871d3f15bfd1b9d4cd962f681503eaf0a535cc | /sample_player/sample_walker.py | 1ce5ba9d7a8f8e6089ea607cc3de637181bed503 | [] | no_license | PFCM/generative-music | 937046ff30eafc5db6cdbb361be6c7732fbebbd5 | 481b216f2ae795b0ac48a57b83e92fde082b34e9 | refs/heads/master | 2021-04-28T09:38:41.065310 | 2018-09-06T09:47:42 | 2018-09-06T09:47:42 | 122,046,080 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,722 | py | """
Load some embeddings and do a random walk through them.
"""
import itertools
import os
import sys
import time
from functools import partial
from itertools import chain, islice
from multiprocessing.dummy import Pool
import librosa
import networkx as nx
import numpy as np
import pyo
from sklearn.neighbors import kneighbors_graph
from organise import read_embeddings
def embedding_neighbours_graph(filenames, embeddings, neighbours=2):
"""Make a networkx graph (with filenames as nodes) with connectivity
derived from the nearest neighbours of `embeddings`.
Uses euclidean distance."""
graph = nx.Graph(
kneighbors_graph(embeddings, neighbours, p=2, mode='distance'))
graph = nx.relabel_nodes(
graph, {i: f
for i, f in enumerate(filenames)}, copy=False)
return graph
def printer(gen, ident=''):
"""print the elements of a generator then yield them"""
for item in gen:
print('{}{}'.format(ident, item))
yield item
def random_walk(graph, start_node):
"""Random walk through the graph, yielding nodes as we go. Will be
infinite, use itertools.islice or the like to get the right amount."""
current_node = start_node
while True:
yield current_node
options = list(graph.neighbors(current_node))
current_node = np.random.choice(options)
def random_walkers(graph, start_nodes, times, callback):
"""Run a bunch of random walkers with exponentially distributed step
times. Blocks until a keyboard interrupt.
`start_nodes` and `times` should be dictionaries, the keys will be passed
to `callback` along with the new values.
"""
keys, starters = zip(*start_nodes.items())
rates = np.array([times[k] for k in keys])
wait_times = np.random.exponential(rates)
walkers = [random_walk(graph, start_nodes[key]) for key in keys]
while True:
try:
wait = np.min(wait_times)
time.sleep(wait)
wait_times -= wait
changes, = np.where(wait_times < 1e-7)
for result in changes:
callback(keys[result], next(walkers[result]))
wait_times[result] = np.random.exponential(rates[result])
except KeyboardInterrupt:
return
def walker(graph, args, length=10):
"""Random walk through a component of the sound graph, playing as we go"""
num, component = args
start = np.random.choice(list(component))
files = islice(random_walk(graph, start), max(5, len(component) // length))
print('{}--{}'.format(num, len(component)))
files = printer(files, '~~~~{}~'.format(num))
samples = (os.path.join(os.path.dirname(sys.argv[1]), path)
for path in files)
player = pyo.SfPlayer(next(samples), mul=0.1)
trig = pyo.TrigFunc(player['trig'], make_looper(samples, player))
player.out()
while player.isPlaying():
time.sleep(1)
def make_looper(gen, player):
"""make a trigger function to loop through the generator"""
def _replace_fname():
player.stop()
try:
player.setPath(next(gen))
player.out()
except StopIteration:
pass
return _replace_fname
def main():
"""quick test"""
embeddings = read_embeddings(sys.argv[1])
graph = embedding_neighbours_graph(*embeddings)
print('{} components'.format(nx.number_connected_components(graph)))
server = pyo.Server(nchnls=1, duplex=0).boot()
server.start()
with Pool(8) as pool:
results = pool.imap_unordered(
partial(walker, graph), enumerate(nx.connected_components(graph)))
_ = list(results)
if __name__ == '__main__':
main()
| [
"pfcmathews@gmail.com"
] | pfcmathews@gmail.com |
ce82c00ac5d6fb5049cfe4eeb9fcf1d8cbf86f55 | 99f02678101b1790a982301824aa14ed0140c21b | /backend/hawaii_hop_27263/wsgi.py | 35ac4077780e887db0b65c0dbc572bf4d2bbb114 | [] | no_license | crowdbotics-apps/hawaii-hop-27263 | 5128b9057e952375c421e4347ed9b280ffefe127 | 7ae5d451ee9217011921e2874d01e93f1cc07017 | refs/heads/master | 2023-05-03T20:17:25.616012 | 2021-05-22T22:11:16 | 2021-05-22T22:11:16 | 369,914,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for hawaii_hop_27263 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hawaii_hop_27263.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ba8be02377db6cac60f7f9ce5c89b1fcf36d52d4 | 0b9470f9a839d87b21fd575421b5223afb4573c6 | /04day/09-保护对象方法.py | b6a8997e6e6a1ba485c6cbd32ab9f59a0547bc71 | [] | no_license | ZiHaoYa/1808 | 351356b4fa920a5075899c8abdce24a61502097f | 891582547fef4c6fd4fd4132da033e48e069901f | refs/heads/master | 2020-03-30T06:20:46.898840 | 2018-09-29T08:56:53 | 2018-09-29T08:56:53 | 147,603,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | class Tencent():
def __vip(self):#最核心的方法 私有方法
print("开会员成功")
def open(self,money):#判断钱够不够
if money > 10:
self.__vip()
else:
print("QQ币不足")
qq = Tencent()
qq.open(12)
#qq.vip()
| [
"qingyuan@geekniu.com"
] | qingyuan@geekniu.com |
44ddb5ba007d7311a9825af8b5f62b3c330df6df | 4d2238210813c1581bf44f64d8a63196f75d2df4 | /craking/exercise1/listPairToGetSum.py | a1b6f9664ea3f6108ac27c44ed6d15581fd24d99 | [] | no_license | wwtang/code02 | b1600d34907404c81fa523cfdaa74db0021b8bb3 | 9f03dda7b339d8c310c8a735fc4f6d795b153801 | refs/heads/master | 2020-12-24T14:10:33.738734 | 2012-12-14T04:24:47 | 2012-12-14T04:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | """
Given an array of integers, list out all elements that add up to a given sum X with time complexity O(nlgn)
A = {6,4,5,7,9,1,2}
Sum = 10 Then the pairs are - {6,4} , {9,1}
Two methods:
1, sort the array, two pointer begin from the start and the end
sorting takes O(nlgn), iteration takes o(n) Overall Nlg(n)
2. dumps the array into dict, compute the difference of givenSum with current value, look up the difference in the dict
time complexity O(n)
space complexity O(n)
"""
def checkSum(arr, gsum):
if len(arr) ==0:
return None
i = 0
j = len(arr)-1
#sort the array
arr.sort()
res =[]#record the pairs
while i < j:
if arr[i]+arr[j] >gsum:
j -=1
elif arr[i]+arr[j]<gsum:
i +=1
else:
res.append((arr[i],arr[j]))
i +=1
j -=1
return res
def checkSum2(arr, gsum):
"""
use a dict to look
"""
if len(arr) ==0:
return None
d = dict()
for v in arr:
if v not in d:
d[v] = 1
else:
d[v] +=1
res = []
for value in arr:
diff = gsum - value
if diff in d:
res.append((value, diff))
return res
def checkSum3(arr, gsum):
"""
use binary search
"""
if len(arr) ==0:
return None
arr.sort()
res = []
for value in arr:
diff = gsum - value
other = binarySearch(arr,diff)
if other:
res.append((value,other))
return res
def binarySearch(arr, target):
if len(arr) == 0:
return None
i = 0
j = len(arr)
while i<=j:
mid = (i+j)/2
if arr[mid] > target:
j = mid-1
elif arr[mid] < target:
i = mid+1
else:
return arr[mid]
def main():
arr = [6,4,5,7,9,1,2]
gsum = 10
print checkSum(arr, gsum)
print checkSum2(arr, gsum)
print checkSum3(arr,gsum)
if __name__=="__main__":
main() | [
"andytang1994@gmail.com"
] | andytang1994@gmail.com |
941580ec7caf0ea5b4cc0b5f27f63d9e1b1c79f1 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/webpubsub/azure-messaging-webpubsubservice/samples/integration_sample.py | 20680a5da983cf49176db9486613de8c1233f116 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,634 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import threading
import logging
import time
import json
import os
from websocket import WebSocketApp
from typing import List, Optional
from azure.messaging.webpubsubservice import WebPubSubServiceClient
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger()
class WebsocketClientsManager:
'''
This class contains multiple websocket clients which are connected to Azure Web PubSub Services.
'''
def __init__(self) -> None:
self.clients = []
self.connection_ids = []
self.recv_messages = []
def add_client(self, service: WebPubSubServiceClient, user_id: str, groups: Optional[List[str]] = None):
def on_message(websocket_app: WebSocketApp, message: str):
message = json.loads(message)
if message["type"] == "message":
self.recv_messages.append(message["data"])
if message["type"] == "system" and message["event"] == "connected":
self.connection_ids.append(message["connectionId"])
LOG.debug(message)
def on_open(websocket_app: WebSocketApp):
LOG.debug("connected")
token = service.get_client_access_token(groups=groups, user_id=user_id)
client = WebSocketApp(token["url"], subprotocols=['json.webpubsub.azure.v1'], on_open=on_open, on_message=on_message)
self.clients.append(client)
def start_all(self):
for client in self.clients:
wst = threading.Thread(target=client.run_forever, daemon=True)
wst.start()
LOG.debug("Waiting for all clients connected...")
while len(self.connection_ids) != self.client_number:
pass
@property
def client_number(self):
return len(self.clients)
def test_overall_integration(webpubsub_connection_string: str):
# build a service client from the connection string.
service = WebPubSubServiceClient.from_connection_string(webpubsub_connection_string, hub='hub', logging_enable=False)
# build multiple websocket clients connected to the Web PubSub service
clients = WebsocketClientsManager()
for i in range(5):
clients.add_client(service, user_id="User%d" % clients.client_number, groups=["InitGroup"])
clients.start_all()
# test naive send_to_all
service.send_to_all(message='Message_For_All', content_type='text/plain') # N messages
# test if generating token with the initial group is working
service.send_to_group(group="InitGroup", message='Message_For_InitGroup', content_type='text/plain') # N messages
# test if parameter "filter" in send is working
service.send_to_all("Message_Not_For_User0", filter="userId ne 'User0'", content_type='text/plain') # N - 1 messages
# test if remove_connection_from_all_groups works
group_names = ["Group%d" % i for i in range(3)]
for group in group_names:
service.add_connection_to_group(group, clients.connection_ids[0])
service.send_to_group(group, "Message_For_RemoveFromAll", content_type='text/plain')
service.remove_connection_from_all_groups(clients.connection_ids[0])
for group in group_names:
service.send_to_group(group, "Message_For_RemoveFromAll", content_type='text/plain')
# other tests
service.send_to_user("User0", message='Message_For_User0', content_type='text/plain') # 1 messages
time.sleep(5)
LOG.info("Received Message: ", clients.recv_messages)
assert service.group_exists("InitGroup") == True
assert clients.recv_messages.count("Message_For_All") == clients.client_number
assert clients.recv_messages.count("Message_For_InitGroup") == clients.client_number
assert clients.recv_messages.count("Message_Not_For_User0") == clients.client_number - 1
assert clients.recv_messages.count("Message_For_User0") == 1
assert clients.recv_messages.count("Message_For_RemoveFromAll") == 3
LOG.info("Complete All Integration Test Successfully")
if __name__ == "__main__":
try:
connection_string = os.environ['WEBPUBSUB_CONNECTION_STRING']
except KeyError:
LOG.error("Missing environment variable 'WEBPUBSUB_CONNECTION_STRING' - please set if before running the example")
exit()
test_overall_integration(connection_string) | [
"noreply@github.com"
] | Azure.noreply@github.com |
b833d9eb4d791fe08f8cb7fbedc1ee7f77c983ce | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Rn3g3hokznLu8ZtDP_4.py | 9a6e5bf2404857405d8c95ab030f03a3b3675f35 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | """
Write a function which increments a string to create a new string.
* **If the string ends with a number** , the number should be incremented by `1`.
* **If the string doesn't end with a number** , `1` should be **added** to the new string.
* **If the number has leading zeros** , the amount of digits **should be considered**.
### Examples
increment_string("foo") ➞ "foo1"
increment_string("foobar0009") ➞ "foobar0010"
increment_string("foo099") ➞ "foo100"
### Notes
N/A
"""
def increment_string(txt):
if not txt[-1].isdigit(): return txt + '1'
else:
num = [x for x in txt if x.isdigit()]
let = ''.join(x for x in txt if x.isalpha())
a=list(str(int(''.join(num))+1)[::-1])+['0']*(len(num)-len(str(int(''.join(num))+1)))
return let + ''.join(a[::-1])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
710ddac897877db3f60b5650a5bbc440c9ed3017 | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1803699/homework03/program03.py | fcf92797b2788e70eb83311b15b208da45c244b8 | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | from immagini import *
def ricolora(fname, lista, fnameout):
img=load(fname)
coppie=[]
j=len(img[0])
k=len(img)
for quadrupla in lista:
areac=quadrupla[2]
bordoc=quadrupla[3]
baselist=set()
nextlist=set()
checkedlist=set()
arealist=set()
bordolist=set()
color=img[quadrupla[1]][quadrupla[0]]
areacounter=0
bordocounter=0
baselist.add((quadrupla[0],quadrupla[1]))
while len(baselist)>0:
for pixel in baselist:
if pixel not in checkedlist:
x=pixel[0]
y=pixel[1]
counter=0
if x!=j-1:
if img[y][x+1]==color:
nextlist.add((x+1,y))
counter+=1
if y!=k-1:
if img[y+1][x]==color:
nextlist.add((x,y+1))
counter+=1
if x!=0:
if img[y][x-1]==color:
nextlist.add((x-1,y))
counter+=1
if y!=0:
if img[y-1][x]==color:
nextlist.add((x,y-1))
counter+=1
if counter==4:
arealist.add(pixel)
areacounter+=1
else:
bordolist.add(pixel)
bordocounter+=1
checkedlist.add(pixel)
baselist=set()
for i in nextlist:
baselist.add(i)
nextlist=set()
for pixel in arealist:
img[pixel[1]][pixel[0]]=areac
for pixel in bordolist:
img[pixel[1]][pixel[0]]=bordoc
coppie.append((areacounter,bordocounter))
save(img,fnameout)
return coppie
| [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
5f0cfa1e7c2414a3499568a58470b9e2dcb72c07 | e9233367116ace8aac3218abcb8480ac236ac42f | /build/motor_arduino/cmake/motor_arduino-genmsg-context.py | 407947db99afa267b28a1e1dd203ffb9ad0fd72f | [] | no_license | zhangzhongwd/optical_waveguide_calibration | 2c695592d792b26aa8a3218faa86adbff478cd3b | 431d7e0e74885030648c17030f197efa3909a48c | refs/heads/master | 2020-11-29T08:19:12.000041 | 2019-12-25T08:14:20 | 2019-12-25T08:14:20 | 230,065,827 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/zhong/Sensor/src/motor_arduino/msg/Stepper.msg;/home/zhong/Sensor/src/motor_arduino/msg/Limit.msg"
services_str = ""
pkg_name = "motor_arduino"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "motor_arduino;/home/zhong/Sensor/src/motor_arduino/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"zhangzhongwd@gmail.com"
] | zhangzhongwd@gmail.com |
22ff80f135fb0d20d7d0387a072fb397f970347f | 165538de7879dded2b9bd0694f3134b36e923b84 | /Python3/1624-Largest-Substring-Between-Two-Equal-Characters/soln.py | ce3fd3b1970776588f3cc8f42a2831594c716f51 | [
"MIT"
] | permissive | zhangyaqi1989/LeetCode-Solutions | 6f710153ec828e6d9e58a30ae8009f754cae4be6 | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | refs/heads/master | 2023-01-23T16:23:55.483396 | 2020-12-06T17:35:09 | 2020-12-06T17:35:09 | 270,014,622 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | class Solution:
def maxLengthBetweenEqualCharacters(self, s: str) -> int:
last_idxes = [-1] * 26
ans = -1
for i, ch in enumerate(s):
val = ord(ch) - ord('a')
if last_idxes[val] != -1:
temp = i - last_idxes[val] - 1
ans = max(ans, temp)
else:
last_idxes[val] = i
return ans
| [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
ea169c925512954473486672a50a844bef88ff5a | 77d808f47101202db6cec5a9eee6b38c55f73fde | /17. EXERCISE - Objects and Classes/09.py | f69eb464cd55efefcd817a0f4283f308600429d0 | [] | no_license | dimDamyanov/Py-Fundamentals | 2ce5591fbfebf8d95c832e3f7109b24e53dd721b | 5ccae5bfa456829d97e8773ee9f5eaa5f5051765 | refs/heads/main | 2023-01-29T22:21:07.788061 | 2020-12-13T08:11:04 | 2020-12-13T08:11:04 | 317,682,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | class Movie:
__watched_movies = 0
def __init__(self, name, director):
self.name = name
self.director = director
self.watched = False
def change_name(self, new_name):
self.name = new_name
def change_director(self, new_director):
self.director = new_director
def watch(self):
if not self.watched:
self.watched = True
Movie.__watched_movies += 1
def __repr__(self):
return f'Movie name: {self.name};' \
f' Movie director: {self.director}. Total watched movies: {Movie.__watched_movies}' | [
"dim.damianov@gmail.com"
] | dim.damianov@gmail.com |
3f0202e1af943b8bbda704c6e356788f663611fd | 1d892928c70ee9ddf66f2a37a8e083d2632c6e38 | /nova/ipv6/api.py | d74e6cd370bb1cfe071d4498e8ccdf7d772c799f | [
"Apache-2.0"
] | permissive | usc-isi/essex-baremetal-support | 74196c3f1332ee3cdeba9c263faff0ac0567d3cf | a77daf8ef56cf41e38de36621eda25ed3f180156 | refs/heads/master | 2021-05-19T03:12:11.929550 | 2020-07-24T14:15:26 | 2020-07-24T14:15:26 | 4,702,421 | 0 | 1 | Apache-2.0 | 2020-07-24T14:15:27 | 2012-06-18T15:19:41 | null | UTF-8 | Python | false | false | 1,342 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova.openstack.common import cfg
from nova import utils
ipv6_backend_opt = cfg.StrOpt('ipv6_backend',
default='rfc2462',
help='Backend to use for IPv6 generation')
FLAGS = flags.FLAGS
FLAGS.register_opt(ipv6_backend_opt)
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable('ipv6_backend',
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
| [
"dkang@isi.edu"
] | dkang@isi.edu |
f2073f68965e7070362240f4fd2d08cb653f4697 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/398/usersdata/281/81356/submittedfiles/av1_programa2.py | f84881738dd63b159a8897f7eaba31b5aa5a48b2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO ABAIXO DESTA LINHA
a=int(input(':'))
b=int(input(':'))
c=int(input(':'))
d=int(input(':'))
e=int(input(':'))
f=int(input(':'))
if a>0 and a<=13 and b>0 and b<=13 and c>0 and c<=13 and d>0 and d<=13:
if a<b and b<c and c<d and d<e and e<f:
print('C')
elif a>b or b>c or c>d or d>e or e>f:
print('N')
else:
print('fim')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
65d7292164c9a3e0351ff48dcb3a057b8dd2290d | 792ae5d2a5c17af4f2ccfa582e3aeec569a6809a | /42. Trapping Rain Water.py | f35ce886c90306b5cd3f91bc4258b415545fef8e | [] | no_license | ADebut/Leetcode | 396b8b95ad5b5e623db2839bbfdec861c4c1731f | 7333d481e00e8c1bc5b827d1d4ccd6e4d291abd7 | refs/heads/master | 2020-07-05T18:48:27.504540 | 2019-10-28T10:51:43 | 2019-10-28T10:51:43 | 202,735,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | class Solution:
def trap(self, height: List[int]) -> int:
volume = 0
max_left = [0 for i in range(len(height))]
max_right = [0 for i in range(len(height))]
for i in range(1, len(height) - 1):
max_left[i] = max(max_left[i - 1], height[i - 1])
for i in range(len(height) - 2, 0, -1):
max_right[i] = max(max_right[i + 1], height[i + 1])
for i in range(1, len(height) -1):
mini = min(max_left[i], max_right[i])
if mini > height[i]:
volume += mini - height[i]
return volume | [
"chen758@usc.edu"
] | chen758@usc.edu |
13f4510cf7c53943658d0e717453ef04820570b8 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v11/resources/types/customer_user_access.py | 4ef6dbad73be7b9adc4eff12bb1be5952d52fbc8 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 2,730 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v11.enums.types import access_role as gage_access_role
__protobuf__ = proto.module(
package="google.ads.googleads.v11.resources",
marshal="google.ads.googleads.v11",
manifest={"CustomerUserAccess",},
)
class CustomerUserAccess(proto.Message):
r"""Represents the permission of a single user onto a single
customer.
Attributes:
resource_name (str):
Immutable. Name of the resource. Resource names have the
form:
``customers/{customer_id}/customerUserAccesses/{user_id}``
user_id (int):
Output only. User id of the user with the
customer access. Read only field
email_address (str):
Output only. Email address of the user.
Read only field
This field is a member of `oneof`_ ``_email_address``.
access_role (google.ads.googleads.v11.enums.types.AccessRoleEnum.AccessRole):
Access role of the user.
access_creation_date_time (str):
Output only. The customer user access
creation time. Read only field
The format is "YYYY-MM-DD HH:MM:SS".
Examples: "2018-03-05 09:15:00" or "2018-02-01
14:34:30".
This field is a member of `oneof`_ ``_access_creation_date_time``.
inviter_user_email_address (str):
Output only. The email address of the inviter
user. Read only field
This field is a member of `oneof`_ ``_inviter_user_email_address``.
"""
resource_name = proto.Field(proto.STRING, number=1,)
user_id = proto.Field(proto.INT64, number=2,)
email_address = proto.Field(proto.STRING, number=3, optional=True,)
access_role = proto.Field(
proto.ENUM, number=4, enum=gage_access_role.AccessRoleEnum.AccessRole,
)
access_creation_date_time = proto.Field(
proto.STRING, number=6, optional=True,
)
inviter_user_email_address = proto.Field(
proto.STRING, number=7, optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
dab29b7fb99b4053d194a299747a2da05ff8aaf5 | ea260e2d501eda7f04705dbe22a9263e1ffb99c9 | /lambda_function.py | ec573d04b0d8c88a2a2c8f3aa63ba9b9ca9897f0 | [] | no_license | PeterMitrano/my_desk | d4cd5edf3e054e9d51da4b0d6b6c16e0f088caa9 | dfd9816acb7fdb1dd9637b730fa738e2ffac1e8c | refs/heads/master | 2020-07-23T21:08:15.349369 | 2017-08-25T21:20:30 | 2017-08-25T21:20:30 | 66,438,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | from collections import namedtuple
import logging
import requests
LOGGER = 'my_desk'
APP_ID = 'amzn1.ask.skill.999f5e91-7264-4660-b397-5efc340a51f9'
def send_command(command_endpoint):
result = namedtuple('Result', ['err', 'error_msg', 'err_speech'])
if not command_endpoint:
return result(True, 'command endpoint is ' + str(command_endpoint),
"I could not understand your command.")
try:
get_result = requests.get(
'http://66.189.43.74:3776/' + command_endpoint, timeout=3)
if get_result.ok:
return result(False, None, None)
else:
return result(True, "failed to get result",
"Your desk sent an invalid response.")
except requests.exceptions.Timeout:
return result(True, "get request timed out",
"Your desk did not respond to my command.")
def handle_event(event, context):
logging.getLogger(LOGGER).warn(event)
if event['session']['application']['applicationId'] != APP_ID:
raise RuntimeError('Wrong applicationId')
request = event['request']
command_endpoint = None
if 'intent' in request:
intent = request['intent']['name']
if intent == 'GoToPositionIntent':
position = request['intent']['slots']['Position']['value']
if position == 'sit' or position == 'sitting':
logging.getLogger(LOGGER).warn('sit')
command_endpoint = 'position/sit'
elif position == 'stand' or position == 'standing':
logging.getLogger(LOGGER).warn('stand')
command_endpoint = 'position/stand'
elif intent == 'GoToHeightIntent':
height = request['intent']['slots']['Height']['value']
logging.getLogger(LOGGER).warn(height)
command_endpoint = 'height/' + height
elif intent == 'GoUpIntent':
logging.getLogger(LOGGER).warn('up')
command_endpoint = 'up'
elif intent == 'GoDownIntent':
logging.getLogger(LOGGER).warn('down')
command_endpoint = 'down'
elif intent == 'AMAZON.StopIntent':
logging.getLogger(LOGGER).warn('stop')
command_endpoint = 'stop'
result = send_command(command_endpoint)
if result.err:
logging.getLogger(LOGGER).warn("error sending command: %s",
result.error_msg)
response = {
"version": 1.0,
"response": {
"outputSpeech": {
"type": "PlainText",
"text": result.err_speech
}
}
}
else:
response = {"version": 1.0, "response": {}}
logging.getLogger(LOGGER).warn(response)
return response
| [
"mitranopeter@gmail.com"
] | mitranopeter@gmail.com |
b7e46c5a94d311113b0ba3112f69ff066a02906b | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-AVFoundation/PyObjCTest/test_avoutputsettingsassistant.py | 94f07c16b0ffb29979c57ddd8ffc0e0cc3789951 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | from PyObjCTools.TestSupport import *
import AVFoundation
class TestAVOutputSettingsAssistant(TestCase):
@min_os_level("10.9")
def testConstants10_9(self):
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset640x480, unicode)
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset960x540, unicode)
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset1280x720, unicode)
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset1920x1080, unicode)
@min_os_level("10.10")
def testConstants10_10(self):
self.assertIsInstance(AVFoundation.AVOutputSettingsPreset3840x2160, unicode)
@min_os_level("10.13")
def testConstants10_13(self):
self.assertIsInstance(AVFoundation.AVOutputSettingsPresetHEVC1920x1080, unicode)
self.assertIsInstance(AVFoundation.AVOutputSettingsPresetHEVC3840x2160, unicode)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
a1025224fff76aee836ecf09e5384f9e919891e0 | 5336e77ea7dc15de19e8e0722549d0fe35c88699 | /auth_path/services_serializer.py | ca6ea65450602c5f22940f7caf8d4cc68258e090 | [] | no_license | xal9wiii4ik/django-crm | 37d16d6cbec3bda3e7751144d9f0466c5d8897d8 | ba0858a47ef8ab91a9d5a26ec5328ecaadfa1034 | refs/heads/master | 2023-03-30T20:17:47.265034 | 2021-04-08T21:53:44 | 2021-04-08T21:53:44 | 323,412,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from rest_framework import serializers
def verification_password(value: str) -> str:
"""Check password"""
if len(value) >= 8:
if any((c in set('QAZWSXEDCRFVTGBYHNUJMIKOLP')) for c in value):
if any((f in set('1234567890') for f in value)):
return make_password(value)
else:
raise serializers.ValidationError('Password must contain at least 1 number')
else:
raise serializers.ValidationError('Password must contain at least 1 uppercase letter')
else:
raise serializers.ValidationError('Password must have to have at least 8 characters')
def verification_unique_email(value: str) -> str:
"""Checking unique of email"""
user = User.objects.filter(email=value)
if len(user) == 0:
return value
else:
raise serializers.ValidationError('User with given credentials already exist')
def verification_unique_username(value: str) -> str:
"""Checking unique of username"""
user = User.objects.filter(username=value)
if len(user) == 0:
return value
else:
raise serializers.ValidationError('User with given credentials already exist')
def verification_exist_email(value: str) -> str:
"""Checking exist email"""
user = User.objects.filter(email=value)
if len(user) != 0:
return value
else:
raise serializers.ValidationError('User with given credentials are not found')
def verification_email_and_return_username(value: str) -> str:
"""Checking exist email and return value"""
user = User.objects.filter(email=value)
if len(user) != 0:
return user[0].username
else:
raise serializers.ValidationError('User with given credentials are not found')
| [
"xal9wIII4ik@yandex.ru"
] | xal9wIII4ik@yandex.ru |
3586ccadfb58facf6e41abb02ed1e53dd12448bd | d496f743372562ddeac41fb40619d725089d538f | /docker_box/urls.py | 7b2c8def66410af3dd27c133dbda786ad5bcd5a6 | [
"MIT"
] | permissive | druuu/docker-box | 18391919498e59631509e1203a00a0e76fb46e5d | 36619f91fbc8ac356b55e05d7301e8c27e015117 | refs/heads/master | 2021-01-21T11:27:11.003086 | 2018-03-10T18:52:12 | 2018-03-10T18:52:12 | 83,549,111 | 0 | 0 | null | 2017-03-01T11:53:16 | 2017-03-01T11:53:16 | null | UTF-8 | Python | false | false | 199 | py | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('dockit.urls', namespace='docker_box')),
]
| [
"ashwin@micropyramid.com"
] | ashwin@micropyramid.com |
329fc3d0591d6575ee97725f1cab29d392776153 | 0097d779d4d7551569625f6cca16d8bb8e935712 | /python/password.py | ee22eba4d579bbbb78367e46753c0cb74931199c | [] | no_license | kamesh051/django_tutorial-master | 48b710e3b7382f6204a696187bc1e4e6f20c6a04 | 0ff99f1db6d73e569ec0aa8539c73118310acee1 | refs/heads/master | 2020-03-27T19:21:06.471604 | 2018-09-02T05:13:06 | 2018-09-02T05:13:06 | 146,983,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | import random
import sys
import string
def input(help_text="Do you want me to help you with a password? \n"):
return (str(raw_input(help_text)))
def main():
a = []
while True:
x = input()
if x == "yes":
while True:
z = input("How many characters, between 3 and 50, do you want it to have? \n")
if not z.isdigit() or int(z)<3 or int(z)>50:
print "please use a number between 3 and 50!"
else:
while True:
y = input("Do you want it to be a weak, medium, strong or insane password? \n")
if y!="weak" and y!="medium" and y!="strong" and y!="insane":
print "please use weak/medium/strong/insane "
elif y == "weak":
for i in xrange(int(z)):
b = [random.choice(string.ascii_lowercase)]
a.append(random.choice(b))
print "how about: \n", "".join(a)
sys.exit()
elif y == "medium":
for i in xrange(int(z)):
b = [random.choice(string.ascii_lowercase),random.choice(string.ascii_uppercase)]
a.append(random.choice(b))
print "how about: \n", "".join(a)
sys.exit()
elif y == "strong":
for i in xrange(int(z)):
b = [random.choice(string.ascii_lowercase),random.choice(string.digits),random.choice(string.ascii_uppercase)]
a.append(random.choice(b))
print "how about: \n", "".join(a)
sys.exit()
elif y == "insane":
for i in xrange(int(z)):
b = [random.choice(string.digits), random.choice(string.ascii_lowercase), random.choice("-_?!/.,';][)(~`@#$%^&*+|"),random.choice(string.ascii_uppercase)]
a.append(random.choice(b))
print "how about: \n", "".join(a)
sys.exit()
elif x == "no":
print "Ok, goodbye!"
sys.exit()
else:
print " please use yes or no!"
main()
| [
"kameshgithub@gmail.com"
] | kameshgithub@gmail.com |
15a68d03a1bb8812e543e6eac440ae430cd48763 | a2062bd70adf7c64d39401211d4597d010afdad3 | /21.05.20/Pandas02_02_FunEx07_송예지.py | e4b0d61eeb4fb7494fdb2b6054fd7128f7695800 | [] | no_license | yeijSong/LearningPandas | ebf3f3f2416bb3b7254ebf86ede8ce5f696bb1f9 | 6e79a54a4412455d9729412551a64a7e86534ced | refs/heads/main | 2023-05-24T10:38:41.514011 | 2021-06-21T13:25:28 | 2021-06-21T13:25:28 | 378,843,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | '''
# 매개변수에 초기값 미리 설정하기
say_myself 함수는 3개의 매개변수를 받아서
마지막 인수인 man이 True이면 남자, False면 여자 출력
default값이 man이므로 아무 것도 적지 않으면 man으로 인식
'''
def say_myself(name,old,man=True):
print('나의 이름은 %s입니다.'%name)
print('나이는 %d살입니다.'%old)
if man:
print('남자입니다')
else:
print('여자입니다')
say_myself('소나무',27)
print()
say_myself('오렌지',25,False)
print()
'''
say_myself('오렌지',22,man)
이렇게 하면 오류가 뜸
man이 정의되지 않았다고 함
위에서 man을 참으로 정의했기 때문에
참, 거짓으로만 입력해야하는 것으로 보임
''' | [
"noreply@github.com"
] | yeijSong.noreply@github.com |
f8671f2e8b5bf4e1f21484e59b893f0ce9fb25ba | b06a317fd3d1d0f27d8d14731d2d84a1963d98eb | /commons/c2cgeoportal_commons/alembic/main/6a412d9437b1_rename_serverogc_to_ogcserver.py | d82ee0bd1f3fe67745fb033bdd9ed1952959a3f3 | [
"BSD-2-Clause-Views"
] | permissive | samupl/c2cgeoportal | 2844be2376b0598307a4c3e0732aa4e7d196d3be | 63a27ceacb47cc1db00d853b507ee3d568320a48 | refs/heads/master | 2020-03-30T00:58:58.166112 | 2018-09-26T17:42:37 | 2018-09-26T17:42:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,441 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2018, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""Rename ServerOGC to OGCServer
Revision ID: 6a412d9437b1
Revises: 29f2a32859ec
Create Date: 2016-06-28 18:08:23.888198
"""
from alembic import op
from c2c.template.config import config
# revision identifiers, used by Alembic.
revision = '6a412d9437b1'
down_revision = '29f2a32859ec'
branch_labels = None
depends_on = None
def upgrade():
schema = config['schema']
op.rename_table('server_ogc', 'ogc_server', schema=schema)
with op.batch_alter_table('layer_wms', schema=schema) as table_op:
table_op.alter_column('server_ogc_id', new_column_name='ogc_server_id')
def downgrade():
schema = config['schema']
op.rename_table('ogc_server', 'server_ogc', schema=schema)
with op.batch_alter_table('layer_wms', schema=schema) as table_op:
table_op.alter_column('ogc_server_id', new_column_name='server_ogc_id')
| [
"stephane.brunner@camptocamp.com"
] | stephane.brunner@camptocamp.com |
17f5cdfad8ea3aa5b4807f8d00ed3fd9be67775e | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/2021-5-15/python_re2_test_file/regexlib_3424.py | 88dbd13df9a356348055fff7129a8f2730844f25 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # 3424
# (\s*<(\w*)\s+(((\w*)(=["']?)([\w\W\d]*?)["']?)+)\s*/?>)
# EXPONENT
# nums:5
# EXPONENT AttackString:"< "+"0="*16+"!_1_EOA(i or ii)"
import re2 as re
from time import perf_counter
regex = """(\s*<(\w*)\s+(((\w*)(=["']?)([\w\W\d]*?)["']?)+)\s*/?>)"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "< " + "0=" * i * 1 + "!_1_EOA(i or ii)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
7940b6f81821c86a3a9703791694818e2cf56511 | c7d08810eaa13882c65c2f2cf6b4eaa68239910b | /resume_autofill_site/resume_autofill_site/settings.py | ceb958789c264fa96a9cb770246abb973b5d0109 | [] | no_license | thayton/resume-auto-fill | cbcf9b9a81ef7ed7b28c51bebdf8b863bcab7e2d | a5bccb0c422b4d8ce36f15af29ef70847cf24c48 | refs/heads/master | 2021-01-24T22:13:34.514409 | 2014-11-03T20:16:51 | 2014-11-03T20:16:51 | 24,721,068 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,243 | py | """
Django settings for resume_autofill_site project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm2^pd!sgx&ixcu&3u9lsly4e#u6g+7a-z$pi(uhp4433smnk5u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'resume_autofill',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'resume_autofill_site.urls'
WSGI_APPLICATION = 'resume_autofill_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'resume_autofill',
'USER': os.environ.get("RESUME_AUTOFILL_DB_USER", ''),
'PASSWORD': os.environ.get("RESUME_AUTOFILL_DB_PASSWORD", ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| [
"thayton@neekanee.com"
] | thayton@neekanee.com |
3ad723a11f004fabb06e1714eac83026827275b7 | c61310b3da23494fdcd2eae31b26a7c97dbab8a8 | /bl2_save_edit.py | 8ac96670f77ccdc7e2bdd5c9ff7fd46eaa408255 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Riroaki/borderlands2 | d64971ed73b6bf593c12435e4c503520d916849d | f400305e38445fc944d49f6fa8a0588eea25a86c | refs/heads/master | 2022-03-24T11:41:30.670307 | 2019-11-16T18:15:05 | 2019-11-16T18:15:05 | 262,360,555 | 1 | 0 | null | 2020-05-08T15:32:01 | 2020-05-08T15:32:01 | null | UTF-8 | Python | false | false | 673 | py | #!/usr/bin/env python3
import sys
import traceback
from borderlands.bl2 import AppBL2
if __name__ == "__main__":
try:
app = AppBL2(sys.argv[1:])
app.run()
except Exception as e:
print('Something went wrong, but please ensure you have the latest', file=sys.stderr)
print('version from https://github.com/apocalyptech/borderlands2 before', file=sys.stderr)
print('reporting a bug. Information useful for a report follows:', file=sys.stderr)
print('', file=sys.stderr)
print(repr(sys.argv), file=sys.stderr)
print('', file=sys.stderr)
traceback.print_exc(None, sys.stderr)
sys.exit(1)
| [
"pez@apocalyptech.com"
] | pez@apocalyptech.com |
865ca12781577b88d3a2bea93f25428c522a73ce | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/86/usersdata/160/53060/submittedfiles/pico.py | 2709fb36f0f75f68fe6937ef2397ffef9447892c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # -*- coding: utf-8 -*-
def pico(n):
#CONTINUE...
cont=0
for i in range(0,len(n)-1,1):
if n[i]<n[i] and n[i]<n[i]:
cont=cont+1
if cont!=0:
return(True)
else:
return(False)
n = int(input('Digite a quantidade de elementos da lista: '))
#CONTINUE...
a=[]
for i in range(1,n+1,1):
valor=int(input('Digite o elementos da lista:'))
a.append(valor)
if pico(a):
print('N')
else:
print('S')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c1721a80983a02b687c9b844a9bb852d595586f8 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/coderByte_20200518204427.py | 3e1b5f8da2ca300d3e716d8bb95cc003dbd1d538 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py |
def QuestionsMarks(str):
numbers = []
# others = []
for char in str:
if char.isdigit():
numbers.append(int(char))
elif char == '?':
numbers.append(char)
for i in range(len(numbers)):
print(i)
print(numbers[i])
if numbers[i] =
# break
print(numbers.pop())
return str
# keep this function call here
QuestionsMarks("acc?7??sss?3rr1??????5") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
49fcbd8414edd891f3a5061eeebfcc6931da6384 | a667d52d9be08aab9f101952dfede0e29a43b012 | /src/apps/escuela/forms.py | ae8961a84de3e12b386ae7f35317ad2fbc91c7a8 | [] | no_license | Epatzan/app-suni | 50b04bbf58417e8cfba77a5e00c2d6a5a6537c16 | b7f9eaf62942797cd4f222ea2fb82348304aeaf4 | refs/heads/master | 2023-08-21T13:15:44.826842 | 2018-03-15T14:00:30 | 2018-03-15T14:00:30 | 123,631,541 | 0 | 0 | null | 2018-03-02T21:13:55 | 2018-03-02T21:13:55 | null | UTF-8 | Python | false | false | 5,314 | py | from django import forms
from datetime import date
from django.core.urlresolvers import reverse_lazy
from django.forms.models import inlineformset_factory
from django.forms.formsets import BaseFormSet, formset_factory
from apps.escuela.models import (
Escuela, EscContacto, EscContactoTelefono,
EscContactoMail, EscNivel, EscSector, EscPoblacion,
EscMatricula, EscRendimientoAcademico)
from apps.main.models import Departamento, Municipio
from apps.mye.models import Cooperante, Proyecto
class FormEscuelaCrear(forms.ModelForm):
lat = forms.CharField(
required=False,
label='Latitud',
widget=forms.NumberInput(attrs={'step': 'any'}))
lng = forms.CharField(
required=False,
label='Longitud',
widget=forms.NumberInput(attrs={'step': 'any'}))
class Meta:
model = Escuela
fields = '__all__'
exclude = ['mapa']
widgets = {
'municipio': forms.Select(attrs={'class': 'select2'})
}
class EscuelaBuscarForm(forms.Form):
ESTADO_CHOICES = (
(None, 'No importa'),
(False, 'Sí'),
(True, 'No'),)
codigo = forms.CharField(
label='Código',
required=False)
nombre = forms.CharField(
required=False)
direccion = forms.CharField(
label='Dirección',
widget=forms.TextInput(),
required=False)
departamento = forms.ModelChoiceField(
queryset=Departamento.objects.all(),
widget=forms.Select(attrs={'data-url': reverse_lazy('municipio_api_list')}),
required=False)
municipio = forms.ModelChoiceField(
queryset=Municipio.objects.all(),
required=False)
nivel = forms.ModelChoiceField(
queryset=EscNivel.objects.all(),
required=False)
sector = forms.ModelChoiceField(
queryset=EscSector.objects.all(),
required=False)
poblacion_min = forms.IntegerField(
label='Población mínima',
required=False)
poblacion_max = forms.IntegerField(
label='Población máxima',
required=False)
solicitud = forms.ChoiceField(
required=False,
choices=ESTADO_CHOICES)
solicitud_id = forms.IntegerField(
label='Número de solicitud',
min_value=1,
required=False)
validacion = forms.ChoiceField(
label='Validación',
required=False,
choices=ESTADO_CHOICES)
validacion_id = forms.IntegerField(
label='Número de validación',
min_value=1,
required=False)
equipamiento = forms.ChoiceField(
required=False,
choices=ESTADO_CHOICES)
equipamiento_id = forms.IntegerField(
label='Número de entrega',
min_value=1,
required=False)
cooperante_tpe = forms.ModelChoiceField(
label='Cooperante de equipamiento',
queryset=Cooperante.objects.all(),
required=False)
proyecto_tpe = forms.ModelChoiceField(
label='Proyecto de equipamiento',
queryset=Proyecto.objects.all(),
required=False)
class EscPoblacionForm(forms.ModelForm):
fecha = forms.DateField(
initial=date.today(),
widget=forms.TextInput(attrs={'class': 'datepicker'}))
class Meta:
model = EscPoblacion
fields = '__all__'
widgets = {
'escuela': forms.HiddenInput()
}
class ContactoForm(forms.ModelForm):
telefono = forms.CharField(
required=False,
widget=forms.NumberInput(attrs={'class': 'form-control', 'min': 0}))
mail = forms.EmailField(
required=False,
widget=forms.EmailInput(attrs={'class': 'form-control'}))
class Meta:
model = EscContacto
fields = '__all__'
widgets = {
'escuela': forms.HiddenInput()
}
class EscContactoTelefonoForm(forms.ModelForm):
class Meta:
model = EscContactoTelefono
fields = '__all__'
exclude = ['contacto']
class EscContactoTelefonoFormset(BaseFormSet):
def clean(self):
telefonos = []
if any(self.errors):
return
for form in self.forms:
if form.cleaned_data:
telefono = form.cleaned_data['telefono']
if telefono in telefonos:
raise forms.ValidationError('Los números no pueden repetirse')
telefonos.append(telefono)
ContactoTelefonoFormSet = inlineformset_factory(
EscContacto,
EscContactoTelefono,
fields='__all__',
extra=1,
can_delete=True)
ContactoMailFormSet = inlineformset_factory(
EscContacto,
EscContactoMail,
fields='__all__',
extra=1,
can_delete=True)
MailFormSet = formset_factory(EscContactoTelefonoFormset, formset=EscContactoTelefonoFormset)
class EscMatriculaForm(forms.ModelForm):
class Meta:
model = EscMatricula
fields = '__all__'
widgets = {
'escuela': forms.HiddenInput()
}
class EscRendimientoAcademicoForm(forms.ModelForm):
"""Formulario para crear un registro de `:class:EscRendimientoAcademico`
desde una escuela.
"""
class Meta:
model = EscRendimientoAcademico
fields = '__all__'
widgets = {
'escuela': forms.HiddenInput()
}
| [
"jinchuika@gmail.com"
] | jinchuika@gmail.com |
292d5b2771a74f723aa8855a241c3ca6fec6014f | d4432b0c95e5a25c489f825ba0f44e0ecd958669 | /lessons stormnet/lesson3/incapculation.py | 1ce2277132010aa31e15d8a698217dadd0fd5c79 | [] | no_license | romanannaev/python | 1c250c425224ab824492e4893edf786e35b14369 | e2a9015bfceeac940936758f84f0dfbf67897f1d | refs/heads/master | 2021-07-05T23:39:05.480997 | 2019-11-13T10:44:00 | 2019-11-13T10:44:00 | 175,044,359 | 2 | 0 | null | 2020-09-04T20:42:14 | 2019-03-11T16:50:24 | CSS | UTF-8 | Python | false | false | 280 | py | class Cat:
def __init__(self, name, female):
self.__name = name
self.female = female
def __get_mur(self):
print('mur-mur-mur')
mursik = Cat('mursik', 'kot')
# print(mursik.__name)
print(mursik.female)
# mursik.__get_mur()
mursik._Cat__get_mur()
| [
"romanannaev1992@gmail.com"
] | romanannaev1992@gmail.com |
249b5bd914c4442573b5cc09828915b2858e7e59 | 759d0ef07c5473dfdef37454e34259771d16deab | /GJMorph/auxFuncs.py | f074ad51570cc1232767f0014f87d6980ac556ef | [
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] | permissive | wachtlerlab/GJMorph | 3442ba85c08303238854f64b641c80197f7eb55f | a158e825ae3c20f94e1a6b12b2578aa1b3f42a8b | refs/heads/master | 2020-03-21T15:57:52.466404 | 2019-08-05T18:41:26 | 2019-08-05T18:41:26 | 138,742,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,867 | py | import numpy as np
# **********************************************************************************************************************
def resampleSWC(swcFile, resampleLength, mask=None, swcData=None, calculateBranchLens=False):
'''
Resample the SWC points to place points at every resamplelength along the central line of every segment. Radii are interpolated.
:param swcData: nx4 swc point data
:param resampleLength: length at with resampling is done.
:return: branchCenters, branchLens,
ndarray of shape (#pts, 7) with each row containing (node ID, node type, x, y, z, r, parent ID)
'''
if swcData is None:
swcData = np.loadtxt(swcFile)
inds = swcData[:, 0].tolist()
oldNewDict = {}
currentMax = 1
if mask is None:
mask = [True] * swcData.shape[0]
else:
assert len(mask) == swcData.shape[0], 'Supplied mask is invalid for ' + swcFile
resampledSWCData = []
getSegLen = lambda a, b: np.linalg.norm(a - b)
if calculateBranchLens:
branchCenters = []
branchLens = []
totalLen = 0
for pt in swcData:
if pt[6] < 0:
if mask[inds.index(int(pt[0]))]:
resampledSWCData.append([currentMax] + pt[1:].tolist())
oldNewDict[pt[0]] = currentMax
currentMax += 1
if (pt[6] > 0) and (int(pt[6]) in inds):
if mask[inds.index(int(pt[0]))]:
parentPt = swcData[inds.index(pt[6]), :]
segLen = getSegLen(pt[2:5], parentPt[2:5])
totalLen += segLen
currentParent = oldNewDict[pt[6]]
if segLen > resampleLength:
temp = pt[2:5] - parentPt[2:5]
distTemp = np.linalg.norm(temp)
unitDirection = temp / distTemp
radGrad = (pt[5] - parentPt[5]) / distTemp
for newPtsInd in range(1, int(np.floor(segLen / resampleLength)) + 1):
temp = [currentMax, pt[1]] + \
(parentPt[2:5] + newPtsInd * resampleLength * unitDirection).tolist()
temp.append(parentPt[5] + newPtsInd * radGrad * resampleLength)
if calculateBranchLens:
branchLens.append(resampleLength)
branchCenters.append(parentPt[2:5] + (newPtsInd - 0.5) * resampleLength * unitDirection)
temp.append(currentParent)
currentParent = currentMax
currentMax += 1
resampledSWCData.append(temp)
if calculateBranchLens:
branchCenters.append(0.5 * (pt[2:5] + np.array(resampledSWCData[-1][2:5])))
branchLens.append(np.linalg.norm(pt[2:5] - np.array(resampledSWCData[-1][2:5])))
resampledSWCData.append([currentMax] + pt[1:6].tolist() + [currentParent])
oldNewDict[pt[0]] = currentMax
currentMax += 1
else:
if calculateBranchLens:
branchCenters.append(0.5 * (pt[2:5] + parentPt[2:5]))
branchLens.append(segLen)
resampledSWCData.append([currentMax] + pt[1:6].tolist() + [currentParent])
oldNewDict[pt[0]] = currentMax
currentMax += 1
if calculateBranchLens:
return np.array(branchCenters), np.array(branchLens), np.array(resampledSWCData)
else:
return totalLen, np.array(resampledSWCData)
#***********************************************************************************************************************
def windowSWCPts(branchMeans, gridSize, translationIndicator=(0, 0, 0)):
"""
Custom internal function, use at your own risk!
Approximates points represented by rows of branchMeans to nearest voxel centers, where voxels are cubes of side
<gridSize> and are constructed so that there is a voxel with center at the origin. If translationIndicator is
specified, then the voxels are constructed in a way such that a voxel has a center at
- <translationIndicator> * <gridSize> * 0.5.
:param branchMeans: np.array of shape (nRows, 3)
:param gridSize: float
:param translationIndicator: three member iterable of floats
:return: voxelCenters, np.array of shape (nRows, 3), rounded to 6 digits
"""
offset = np.array(translationIndicator) * gridSize * 0.5
temp = branchMeans + offset
voxelCenters = np.array(np.round(temp / gridSize), dtype=np.int32) * gridSize - offset
return np.round(voxelCenters, 6)
#*********************************************************************************************************************** | [
"ajkumaraswamy@tutamail.com"
] | ajkumaraswamy@tutamail.com |
13b5621657465c1700dcd523a472a763187d94a1 | e60487a8f5aad5aab16e671dcd00f0e64379961b | /project/myVacations/vacay/migrations/0002_auto_20191218_2308.py | aef339aafb17c3650a5c1f96f30cfd82d74f60dd | [] | no_license | reenadangi/python | 4fde31737e5745bc5650d015e3fa4354ce9e87a9 | 568221ba417dda3be7f2ef1d2f393a7dea6ccb74 | refs/heads/master | 2021-08-18T08:25:40.774877 | 2021-03-27T22:20:17 | 2021-03-27T22:20:17 | 247,536,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | # Generated by Django 2.2.7 on 2019-12-19 05:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vacay', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='destination',
name='album_title',
field=models.TextField(),
),
]
| [
"reena.dangi@gmail.com"
] | reena.dangi@gmail.com |
eb590e4c84b9786a061331130dccde977b2d21b1 | fff54b01b46cef0bbc70a6469c88c01c82af5a57 | /programming/language/python3/python3-sip/actions.py | c8d1ffbb8fe07a364656fc2c7d076ef632a8a3fd | [] | no_license | LimeLinux/Packages | e51deae6c0d1406e31f06caa5aaa7749466bef0b | d492e075d8b051df68b98c315ad0628e33a8fac4 | refs/heads/master | 2021-01-11T12:37:22.150638 | 2018-08-30T18:24:32 | 2018-08-30T18:24:32 | 77,054,292 | 5 | 19 | null | 2018-02-02T17:24:06 | 2016-12-21T13:33:45 | Python | UTF-8 | Python | false | false | 737 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import get
WorkDir = "sip-%s" % get.srcVERSION()
def setup():
shelltools.system("find . -type f -exec sed -i 's/Python.h/python3.6m\/Python.h/g' {} \;")
pythonmodules.run('configure.py CFLAGS="%s" CXXFLAGS="%s"' % (get.CFLAGS(), get.CXXFLAGS()), pyVer = "3")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
| [
"ergunsalman@hotmail.com"
] | ergunsalman@hotmail.com |
e641aa7c71cf66fa224790c752cd052fd1758d64 | aee4c0839933a11d8ce3c485d06595202dd3cabd | /keras/utils/timed_threads.py | 794fd243c42b7d426baedaf569d92cbf771be5b0 | [
"Apache-2.0"
] | permissive | xiaoheilong3112/keras | fc3025a2f14838bf8416b2faed766cb43da62f9b | 8d5e9b2163ec9b7d9f70920d1c7992b6df6820ec | refs/heads/master | 2023-08-07T18:23:36.804563 | 2023-07-25T19:16:12 | 2023-07-25T19:16:48 | 137,238,629 | 1 | 0 | Apache-2.0 | 2023-07-26T05:22:44 | 2018-06-13T15:59:45 | Python | UTF-8 | Python | false | false | 5,379 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Thread utilities."""
import abc
import threading
from absl import logging
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.utils.TimedThread", v1=[])
class TimedThread:
"""Time-based interval Threads.
Runs a timed thread every x seconds. It can be used to run a threaded
function alongside model training or any other snippet of code.
Args:
interval: The interval, in seconds, to wait between calls to the
`on_interval` function.
**kwargs: additional args that are passed to `threading.Thread`. By
default, `Thread` is started as a `daemon` thread unless
overridden by the user in `kwargs`.
Examples:
```python
class TimedLogIterations(keras.utils.TimedThread):
def __init__(self, model, interval):
self.model = model
super().__init__(interval)
def on_interval(self):
# Logs Optimizer iterations every x seconds
try:
opt_iterations = self.model.optimizer.iterations.numpy()
print(f"Epoch: {epoch}, Optimizer Iterations: {opt_iterations}")
except Exception as e:
print(str(e)) # To prevent thread from getting killed
# `start` and `stop` the `TimerThread` manually. If the `on_interval` call
# requires access to `model` or other objects, override `__init__` method.
# Wrap it in a `try-except` to handle exceptions and `stop` the thread run.
timed_logs = TimedLogIterations(model=model, interval=5)
timed_logs.start()
try:
model.fit(...)
finally:
timed_logs.stop()
# Alternatively, run the `TimedThread` in a context manager
with TimedLogIterations(model=model, interval=5):
model.fit(...)
# If the timed thread instance needs access to callback events,
# subclass both `TimedThread` and `Callback`. Note that when calling
# `super`, they will have to called for each parent class if both of them
# have the method that needs to be run. Also, note that `Callback` has
# access to `model` as an attribute and need not be explictly provided.
class LogThreadCallback(
keras.utils.TimedThread, keras.callbacks.Callback
):
def __init__(self, interval):
self._epoch = 0
keras.utils.TimedThread.__init__(self, interval)
keras.callbacks.Callback.__init__(self)
def on_interval(self):
if self.epoch:
opt_iter = self.model.optimizer.iterations.numpy()
logging.info(f"Epoch: {self._epoch}, Opt Iteration: {opt_iter}")
def on_epoch_begin(self, epoch, logs=None):
self._epoch = epoch
with LogThreadCallback(interval=5) as thread_callback:
# It's required to pass `thread_callback` to also `callbacks` arg of
# `model.fit` to be triggered on callback events.
model.fit(..., callbacks=[thread_callback])
```
"""
def __init__(self, interval, **kwargs):
self.interval = interval
self.daemon = kwargs.pop("daemon", True)
self.thread_kwargs = kwargs
self.thread = None
self.thread_stop_event = None
def _call_on_interval(self):
# Runs indefinitely once thread is started
while not self.thread_stop_event.is_set():
self.on_interval()
self.thread_stop_event.wait(self.interval)
def start(self):
"""Creates and starts the thread run."""
if self.thread and self.thread.is_alive():
logging.warning("Thread is already running.")
return
self.thread = threading.Thread(
target=self._call_on_interval,
daemon=self.daemon,
**self.thread_kwargs
)
self.thread_stop_event = threading.Event()
self.thread.start()
def stop(self):
"""Stops the thread run."""
if self.thread_stop_event:
self.thread_stop_event.set()
def is_alive(self):
"""Returns True if thread is running. Otherwise returns False."""
if self.thread:
return self.thread.is_alive()
return False
def __enter__(self):
# Starts the thread in context manager
self.start()
return self
def __exit__(self, *args, **kwargs):
# Stops the thread run.
self.stop()
@abc.abstractmethod
def on_interval(self):
"""User-defined behavior that is called in the thread."""
raise NotImplementedError(
"Runs every x interval seconds. Needs to be "
"implemented in subclasses of `TimedThread`"
)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
2eca7755846fedb58b2c1bacaa67edd161a96c9b | ac0894b411507bfd027696b6bf11b5e384ed68fc | /need-to-do/python3------download-problem--of--leetcode/845.longest-mountain-in-array.py | c66780c77e698c67fa30867bea3b0d5858a98fac | [] | no_license | mkzpd/leetcode-solution | 1d19554628c34c74012fa52582c225e6dccb345c | 60c9b218683bcdee86477a910c58ec702185c726 | refs/heads/master | 2020-05-31T05:56:48.985529 | 2019-09-20T09:10:49 | 2019-09-20T09:10:49 | 190,128,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | #
# @lc app=leetcode id=845 lang=python3
#
# [845] Longest Mountain in Array
#
# https://leetcode.com/problems/longest-mountain-in-array/description/
#
# algorithms
# Medium (34.97%)
# Total Accepted: 21.6K
# Total Submissions: 61.6K
# Testcase Example: '[2,1,4,7,3,2,5]'
#
# Let's call any (contiguous) subarray B (of A) a mountain if the following
# properties hold:
#
#
# B.length >= 3
# There exists some 0 < i < B.length - 1 such that B[0] < B[1] < ... B[i-1] <
# B[i] > B[i+1] > ... > B[B.length - 1]
#
#
# (Note that B could be any subarray of A, including the entire array A.)
#
# Given an array A of integers, return the length of the longest mountain.
#
# Return 0 if there is no mountain.
#
# Example 1:
#
#
# Input: [2,1,4,7,3,2,5]
# Output: 5
# Explanation: The largest mountain is [1,4,7,3,2] which has length 5.
#
#
# Example 2:
#
#
# Input: [2,2,2]
# Output: 0
# Explanation: There is no mountain.
#
#
# Note:
#
#
# 0 <= A.length <= 10000
# 0 <= A[i] <= 10000
#
#
# Follow up:
#
#
# Can you solve it using only one pass?
# Can you solve it in O(1) space?
#
#
#
class Solution:
def longestMountain(self, A: List[int]) -> int:
| [
"sodgso262@gmail.com"
] | sodgso262@gmail.com |
5a1176e053d3166f99224f9c3611db49c2d3ac53 | d2821e3679389796d65b423ef10a8ce42a419d56 | /exampleproject/views.py | 1e40dad12f629d2fa64569ab2109cdee9ba6a59f | [
"BSD-3-Clause"
] | permissive | zerc/django_molder | e47750108213bbfec08cf6eb40eb69db6564e1ba | 23d0672eaa60b7bdace0252136bbf8ad9c7631ea | refs/heads/master | 2023-01-06T17:55:49.748944 | 2015-07-04T18:44:23 | 2015-07-04T18:44:23 | 35,910,107 | 0 | 0 | BSD-3-Clause | 2022-12-26T19:44:00 | 2015-05-19T21:41:41 | Python | UTF-8 | Python | false | false | 348 | py | # coding: utf-8
from django.views.generic.base import TemplateView
from forms import FormOne
class IndexPage(TemplateView):
template_name = 'index.html'
def get_context_data(self, *args, **kwargs):
context = super(IndexPage, self).get_context_data(*args, **kwargs)
context['form_one'] = FormOne()
return context
| [
"zero13cool@yandex.ru"
] | zero13cool@yandex.ru |
0793c4f5593285e6f03140c10385fccb1b2d9643 | 62766deea531d0b89b86a53e6f51b94fd2a88f23 | /AtCoder/ABC/014/b.py | fb73f462150a40d9afa30384818cff98f81943e2 | [
"MIT"
] | permissive | ttyskg/ProgrammingCompetition | 53620b07317ae5cbd1ee06272e573e3682ac15f3 | 885c5a1be228ae7ba9f00b3d63521c9ff7d21608 | refs/heads/master | 2023-08-18T08:38:33.068168 | 2023-08-15T04:28:13 | 2023-08-15T04:28:13 | 183,425,786 | 0 | 0 | MIT | 2023-08-15T04:28:14 | 2019-04-25T12:02:53 | Python | UTF-8 | Python | false | false | 292 | py | import sys
def main():
input = sys.stdin.readline
n, X = map(int, input().split())
A = list(map(int, input().split()))[::-1]
ans = 0
for i, a in enumerate(A):
if X & 2**(n-1-i):
ans += a
return ans
if __name__ == '__main__':
print(main())
| [
"tatsu100311@gmail.com"
] | tatsu100311@gmail.com |
4e33a21ad3f9248831981123ced61cd4eff6d9b8 | f57907e356079871a8b6d9292dfdb99572098f15 | /DM_int_report_drw/hierarchicalClustering.py | 25f33b0fa60de38268260bfb0c6c5029cfce9558 | [] | no_license | drwiner/WesternCorpus | 421c6b7e2d142b9d46eacd59062e42c403e33be7 | aaac96260c9e976ac030bf38c038a36d48e994ff | refs/heads/master | 2021-01-11T16:56:56.664567 | 2017-11-15T00:49:36 | 2017-11-15T00:49:36 | 79,701,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | import numpy as np
from itertools import product
from clockdeco import clock
import math
# import collections
# NamedPoint = collections.namedtuple('NamedPoint', ['id', 'point'])
class NamedPoint:
def __init__(self, int_id, point):
self.int_id = int_id
self.point = point
def __hash__(self):
return hash(self.int_id)
def __repr__(self):
return 'named_point: ' + str(self.int_id) + ' ' + str(self.point)
def __add__(self, other):
return self.point + other.point
class Cluster:
def __init__(self, int_id, points):
self.int_id = int_id
self.points = [points]
def absorb(self, cluster):
self.points.extend(cluster.points)
def __hash__(self):
return hash(self.int_id)
def __len__(self):
return len(self.points)
def __getitem__(self, pos):
return self.points[pos]
def __repr__(self):
n = 'cluster: ' + str(self.int_id) + ' --- '
return n + ' '.join(str(p) for p in self.points)
def dotDistance(s1, s2):
return math.sqrt(np.dot(s1.point - s2.point, s1.point - s2.point))
def nopointDist(a1, a2):
return math.sqrt(np.dot(a1 - a2, a1 - a2))
# @clock
def singleLink(S1, S2):
# S1 and S2 are clusters, possibly with just 1 entity
# each entity has a point
S_prod = set(product(S1, S2))
return min(dotDistance(s1,s2) for s1,s2 in S_prod)
# @clock
def completeLink(S1, S2):
S_prod = set(product(S1, S2))
return max(dotDistance(s1,s2) for s1, s2 in S_prod)
# @clock
def meanLink(S1, S2):
a1 = (1/len(S1))*sum(s.point for s in S1)
a2 = (1/len(S2))*sum(s.point for s in S2)
return nopointDist(a1, a2)
points = set()
def initialLoading(text_name):
# initial loading
C1 = open('C1.txt')
# points = set()
for line in C1:
split_line = line.split()
p = np.array([float(i) for i in split_line[1:]])
points.add(NamedPoint(int(split_line[0]), p))
singleLink_clusters = set()
completeLink_clusters = set()
meanLink_clusters = set()
def initClusters():
# initialization:
for point in points:
p = [point]
singleLink_clusters.add(Cluster(point.int_id, p))
completeLink_clusters.add(Cluster(point.int_id, p))
meanLink_clusters.add(Cluster(point.int_id, p))
@clock
def h_clustering(clusters, k, dist_method):
clusts = set(clusters)
while len(clusts) > k:
pairwise_clusters = set(product(clusts, clusts))
arg_mins = None
m = float("inf")
for c1, c2 in pairwise_clusters:
if c1 == c2:
continue
value = dist_method(c1, c2)
if value < m:
m = value
arg_mins = (c1, c2)
if arg_mins is None:
print('wtf')
c1, c2 = arg_mins
if len(c1) < len(c2):
c2.absorb(c1)
clusts = clusts - {c1}
else:
c1.absorb(c2)
clusts = clusts - {c2}
return clusts
# def output(k):
# k = 4
# sl_clusts = h_clustering(singleLink_clusters, k, singleLink)
# print('Shortest Link:\n')
# for clust in sl_clusts:
# print(clust)
# for point in clust:
# print(point.int_id,point.point)
# print('\n')
# print('Complete Link:\n')
# cl_clusts = h_clustering(completeLink_clusters, k, completeLink)
# for clust in cl_clusts:
# print(clust)
# for point in clust:
# print(point.int_id, point.point)
# print('\n')
# print('Mean Link:\n')
# ml_clusts = h_clustering(meanLink_clusters, k, meanLink)
# for clust in ml_clusts:
# print(clust)
# for point in clust:
# print(point.int_id, point.point)
# print('\n')
#
# import matplotlib.pyplot as plt
#
# colours = ['r', 'g', 'y', 'b']
# s1 = list(sl_clusts)
#
# for i in range(k):
# x = [p.point[0] for p in s1[i]]
# y = [p.point[1] for p in s1[i]]
# plt.scatter(x, y, c=colours[i])
#
# plt.show()
#
# s1 = list(cl_clusts)
#
# for i in range(k):
# x = [p.point[0] for p in s1[i]]
# y = [p.point[1] for p in s1[i]]
# plt.scatter(x, y, c=colours[i])
#
# plt.show()
#
# s1 = list(ml_clusts)
#
# for i in range(k):
# x = [p.point[0] for p in s1[i]]
# y = [p.point[1] for p in s1[i]]
# plt.scatter(x, y, c=colours[i])
#
# plt.show()
# initialLoading(45)
# initClusters()
# output(4) | [
"drwiner131@gmail.com"
] | drwiner131@gmail.com |
bc3b56a85820774d1ac5eb6f3e9241bd004eb841 | 693567f042c6bd93ecdda41cb5db81c55ccf3158 | /List/reverce a list.py | a12c0980bd0bc7958284b52cdbd7adfc033583fc | [] | no_license | supriyo-pal/Python-Practice | 5806e0045ebfeb04856246a245430e2ab7921ba9 | 2025369f0d23d603ad27eaff149500137e98dbcf | refs/heads/main | 2023-01-25T05:31:58.404283 | 2020-12-09T19:08:22 | 2020-12-09T19:08:22 | 317,021,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 23:52:19 2020
@author: Supriyo
"""
list1=[87,56,89,65,45,23]
def Reverce(list1):
return [ele for ele in reversed(list1)]
print(Reverce(list1)) | [
"noreply@github.com"
] | supriyo-pal.noreply@github.com |
e56e255fb03e55b4a999dc58e4c69021594129ee | 2fd4de2f0820f186c735f0619bce2a0318bbfc38 | /examples/demo.py | d97e0a9e2b393666a9c4f11e75bd4607ebbe1e27 | [
"MIT"
] | permissive | SunYanCN/AppZoo | e90b778fefdaf1a440c3fd40d078b5396e4e3f06 | 91b04cc75fcc5f70ae5819e98233ea9146c1f001 | refs/heads/master | 2023-08-22T05:41:22.175291 | 2021-10-12T13:37:21 | 2021-10-12T13:37:21 | 359,024,301 | 0 | 0 | MIT | 2021-09-05T12:24:47 | 2021-04-18T02:12:40 | Python | UTF-8 | Python | false | false | 1,424 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-App.
# @File : demo
# @Time : 2019-11-13 15:44
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
import asyncio
from datetime import datetime, time, timedelta
from sanic import Sanic
from sanic_scheduler import SanicScheduler, task
app = Sanic()
scheduler = SanicScheduler(app)
import values
import os
d = {}
@task(timedelta(seconds=3))
def hello(app):
"""Runs the function every 3 seconds."""
import time
d['a'] = time.ctime()
print(os.popen("ls").read())
# values.set_value(time.ctime())
print("Hello, {0}".format(app), datetime.now())
@task(timedelta(hours=1), time(hour=1, minute=30))
async def foo_bar(_):
"""Runs the function every 1 hours after 1:30."""
print("Foo", datetime.now())
await asyncio.sleep(1)
print("Bar")
@task(timedelta(minutes=2), timedelta(seconds=10))
def baz(_):
"""Runs the function every 2 minutes after 10 seconds."""
print("Baz", datetime.now())
@task(start=timedelta(seconds=10))
def another(_):
"""Run the function after 10 seconds once."""
print("another", datetime.now())
from appzoo import App
app_ = App()
app_.app = app
app_.add_route('/', lambda **kwargs: d) # values.get_value()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True, workers=4)
| [
"313303303@qq.com"
] | 313303303@qq.com |
6282b45debae15af7a0b552e1dc0444245d5ceea | 4659f8758c5204ff27a14dd4352dc63f564c1136 | /my_library/db/models/association.py | fe2af630bbbd484a70bb7278b14199ee2e95ada1 | [
"MIT"
] | permissive | mplanchard/webservice_template | 71823dcd2b98fa93aa2145011ee5a7b820c25f77 | fe3e865909d56d8c010f55e08dc6faf6bf4f8ef2 | refs/heads/master | 2021-03-27T11:12:06.470951 | 2018-03-29T02:34:51 | 2018-03-29T02:34:51 | 123,056,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | """Association tables."""
from __future__ import absolute_import, unicode_literals
from sqlalchemy import Column, Integer, ForeignKey, Table
from .base import Base
books_authors = Table(
'books_authors',
Base.metadata,
Column('author_id', Integer, ForeignKey('authors.id')),
Column('book_id', Integer, ForeignKey('books.id')),
)
| [
"msplanchard@gmail.com"
] | msplanchard@gmail.com |
60b2bbc2b50ed814b1dc6531018652e7245713ea | a57207602b4da08b0433b1b513c25788476460a1 | /bac_tasks/pipelines/annotation.py | 2774c9388a75ffc343c636eaef3ca6f1864bdcd9 | [] | no_license | Antonior26/BaC | 2e8a1bab08a6e70eb9390947850db2a0c5bca81f | 53cf4dcdbc4e56157dbf9e661bcec71cd89d0289 | refs/heads/master | 2020-04-17T15:42:29.871904 | 2019-12-15T18:04:48 | 2019-12-15T18:04:48 | 166,709,586 | 1 | 0 | null | 2020-04-09T20:55:53 | 2019-01-20T21:08:13 | Python | UTF-8 | Python | false | false | 2,940 | py | import json
import os
from django.conf import settings
from bac_tasks.pipelines.base import JobFailedException, PipelineComponent
class Annotation(PipelineComponent):
_name = 'ANNOTATION'
_workflow = {
"stages":
[
{"name": "call_features_CDS_glimmer3",
"glimmer3_parameters": {
"min_training_len": "2000"
}},
{"name": "annotate_proteins_similarity",
"similarity_parameters": {
"annotate_hypothetical_only": "1"
}},
{"name": "resolve_overlapping_features",
"resolve_overlapping_features_parameters": {}
}
]
}
def _run(self):
name = self.sample.identifier
assembly = self.sample.assembly
output_dir = self.result_folder
sp = self.sample.isolate.species.name
rast_create_genome = settings.ANNOTATION_PATHS['rast_create_genome']
rast_process_genome = settings.ANNOTATION_PATHS['rast_process_genome']
rast_export_genome = settings.ANNOTATION_PATHS['rast_export_genome']
os.makedirs(output_dir, exist_ok=True)
output_dir_exports = os.path.join(output_dir, 'exports')
os.makedirs(output_dir_exports, exist_ok=True)
workflow = os.path.join(output_dir, name + '.workflow')
fdw = open(workflow, 'w')
json.dump(self._workflow, fdw)
fdw.close()
gto = os.path.join(output_dir, name + '.gto')
gto2 = os.path.join(output_dir, name + '.gto2')
genebank = os.path.join(output_dir_exports, name + '.gbk')
gff = os.path.join(output_dir_exports, name + '.gff')
embl = os.path.join(output_dir_exports, name + '.embl')
rna_fasta = os.path.join(output_dir_exports, name + '.rna.fasta')
cds_fasta = os.path.join(output_dir_exports, name + '.cds.fasta')
protein_fasta = os.path.join(output_dir_exports, name + '.proteins.fasta')
self.pipeline_step(rast_create_genome, '-o', gto, '--contig', assembly, '--genetic-code', '11', '--genome-id',
name, '--domain', 'Bacteria', '--scientific-name', sp)
self.pipeline_step(rast_process_genome, '-o', gto2, '-i', gto, '--workflow', workflow)
self.pipeline_step(rast_export_genome, 'gff', '-i', gto2, '-o', gff)
self.pipeline_step(rast_export_genome, 'protein_fasta', '-i', gto2, '-o', protein_fasta)
self.pipeline_step(rast_export_genome, 'feature_dna', '--feature-type', 'rna', '-i', gto2, '-o', rna_fasta)
self.pipeline_step(rast_export_genome, 'feature_dna', '--feature-type', 'CDS', '-i', gto2, '-o', cds_fasta)
return self._results
def post_run(self):
if self._results is None:
JobFailedException('Please execute job first using execute method')
self.sample.rast_folder = self._results
self.sample.save()
| [
"antonio.rueda-martin@genomicsengland.co.uk"
] | antonio.rueda-martin@genomicsengland.co.uk |
b927a00f944c15635399243be39c7fa5201b5b7e | fd2908f80e6a20d1a2c9e7f39bc18ce53e625e9f | /esp-2-Compton/petrillo/plotcasetta.py | a2e14dea094cada89642c7795f05320d0304d77e | [] | no_license | Gattocrucco/lab4mpr | a79ecdcb293923188fcef637c7566bbb3904af05 | c613bb8e57c2943123697789f5b600483a2b4ff6 | refs/heads/master | 2021-09-15T08:15:19.129544 | 2018-05-28T23:20:47 | 2018-05-28T23:20:47 | 111,024,426 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | import histo
import numpy as np
from matplotlib import pyplot as plt
files = ['../dati/histo-16feb-ang45.dat', '../dati/histo-16feb-ang45-casetta.dat']
titles = ['spettro a 45°' , 'spettro a 45° con schermaggio']
fig = plt.figure('plotcasetta', figsize=[6.88, 2.93])
fig.clf()
fig.set_tight_layout(True)
for i in range(len(files)):
counts = np.loadtxt(files[i], unpack=True, dtype='u2')
edges = np.arange(2 ** 13 + 1)
rebin = 32
counts = histo.partial_sum(counts, rebin)
edges = edges[::rebin]
ax = fig.add_subplot(1, 2, i + 1)
histo.bar_line(edges, counts, ax=ax, color='black')
if i == 0:
ax.set_ylabel('conteggio [(%d$\\cdot$digit)$^{-1}$]' % (rebin,))
ax.set_xlabel('canale ADC [digit]')
ax.set_title(titles[i])
ax.grid(linestyle=':')
fig.show()
| [
"info@giacomopetrillo.com"
] | info@giacomopetrillo.com |
1a2c142d1f04cf5c4e7320b05bf0b4af8adb51c2 | 165eb709370407093bd6ba22e466f6070ea2123c | /examples/adwords/v201601/campaign_management/add_campaign_labels.py | 78d075acd7bee801719188e315c8140782ec59b5 | [
"Apache-2.0"
] | permissive | pawankydv/googleads-python-lib | 92ed86d74a09a91fd3c95d6471a8c23adb0de440 | c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022 | refs/heads/master | 2022-06-29T05:26:19.996211 | 2016-02-23T20:07:02 | 2016-02-23T20:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a label to multiple campaigns.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
CAMPAIGN_ID1 = 'INSERT_FIRST_CAMPAIGN_ID_HERE'
CAMPAIGN_ID2 = 'INSERT_SECOND_CAMPAIGN_ID_HERE'
LABEL_ID = 'INSERT_LABEL_ID_HERE'
def main(client, campaign_id1, campaign_id2, label_id):
# Initialize appropriate service.
campaign_service = client.GetService('CampaignService', version='v201601')
operations = [
{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id1,
'labelId': label_id,
}
},
{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id2,
'labelId': label_id,
}
}
]
result = campaign_service.mutateLabel(operations)
# Display results.
for label in result['value']:
print ('CampaignLabel with campaignId \'%s\' and labelId \'%s\' was added.'
% (label['campaignId'], label['labelId']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID1, CAMPAIGN_ID2, LABEL_ID)
| [
"msaniscalchi@google.com"
] | msaniscalchi@google.com |
bedb27f13c256128040486c37f3d346febca1cca | 610349599d32d7fc5ddae5dcb202836ca8be50aa | /blog/migrations/0013_auto_20200917_0441.py | 25848d5bebfae32506aece8caf48f7f56de261d3 | [] | no_license | reetjakhar09/blogs | e3d9d14c01096e4a50474b5a7f562bea7b655a76 | d0e17a8dd3761aaa08a59c466820040e05dc300a | refs/heads/master | 2022-12-20T05:03:50.350408 | 2020-09-29T16:40:17 | 2020-09-29T16:40:17 | 299,676,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | # Generated by Django 2.2.16 on 2020-09-17 04:41
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0012_auto_20200916_1140'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
],
options={
'ordering': ['created_on'],
},
),
migrations.AlterField(
model_name='post',
name='slug',
field=django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='title'),
),
migrations.AlterField(
model_name='tag',
name='slug',
field=django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='title'),
),
migrations.AddField(
model_name='post',
name='comments',
field=models.ManyToManyField(to='blog.Comment'),
),
]
| [
"test@gmail.com"
] | test@gmail.com |
b4c033e2dc7ef615aa538096e2591b4a6e59ef60 | fe0017ae33385d7a2857d0aa39fa8861b40c8a88 | /env/lib/python3.8/site-packages/pandas/core/arrays/_mixins.py | b11a853c8d39e5b6959a994d7637c842b006251d | [] | no_license | enriquemoncerrat/frasesback | eec60cc7f078f9d24d155713ca8aa86f401c61bf | e2c77f839c77f54e08a2f0930880cf423e66165b | refs/heads/main | 2023-01-03T23:21:05.968846 | 2020-10-18T21:20:27 | 2020-10-18T21:20:27 | 305,198,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | from typing import Any, Sequence, Tuple, TypeVar
import numpy as np
from pandas.compat.numpy import function as nv
from pandas.core.algorithms import take, unique
from pandas.core.arrays.base import ExtensionArray
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
_T = TypeVar("_T", bound="NDArrayBackedExtensionArray")
class NDArrayBackedExtensionArray(ExtensionArray):
"""
ExtensionArray that is backed by a single NumPy ndarray.
"""
_ndarray: np.ndarray
def _from_backing_data(self: _T, arr: np.ndarray) -> _T:
"""
Construct a new ExtensionArray `new_array` with `arr` as its _ndarray.
This should round-trip:
self == self._from_backing_data(self._ndarray)
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
def take(
self: _T,
indices: Sequence[int],
allow_fill: bool = False,
fill_value: Any = None,
) -> _T:
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_data = take(
self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value,
)
return self._from_backing_data(new_data)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to a representation
suitable for self._ndarray, raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : native representation
Raises
------
ValueError
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# TODO: make this a cache_readonly; for that to work we need to remove
# the _index_data kludge in libreduction
@property
def shape(self) -> Tuple[int, ...]:
return self._ndarray.shape
def __len__(self) -> int:
return self.shape[0]
@cache_readonly
def ndim(self) -> int:
return len(self.shape)
@cache_readonly
def size(self) -> int:
return np.prod(self.shape)
@cache_readonly
def nbytes(self) -> int:
return self._ndarray.nbytes
def reshape(self: _T, *args, **kwargs) -> _T:
new_data = self._ndarray.reshape(*args, **kwargs)
return self._from_backing_data(new_data)
def ravel(self: _T, *args, **kwargs) -> _T:
new_data = self._ndarray.ravel(*args, **kwargs)
return self._from_backing_data(new_data)
@property
def T(self: _T) -> _T:
new_data = self._ndarray.T
return self._from_backing_data(new_data)
# ------------------------------------------------------------------------
def copy(self: _T) -> _T:
new_data = self._ndarray.copy()
return self._from_backing_data(new_data)
def repeat(self: _T, repeats, axis=None) -> _T:
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(tuple(), dict(axis=axis))
new_data = self._ndarray.repeat(repeats, axis=axis)
return self._from_backing_data(new_data)
def unique(self: _T) -> _T:
new_data = unique(self._ndarray)
return self._from_backing_data(new_data)
| [
"enriquemoncerrat@gmail.com"
] | enriquemoncerrat@gmail.com |
bc19ab8e189e29a058a5dded83af0e6c030da832 | d5fe9d0c7c93c3250b9e212435b02d8373dec091 | /pro/8.py | 98751a3c8367041e394594d3ac34676580508ce2 | [] | no_license | HarshaaArunachalam/GUV | 6937adb84f0928f08c9fbc519310abc06ef3541a | c047887bf6c19a4950c5f634111e1c02966367e5 | refs/heads/master | 2020-05-31T10:52:23.280052 | 2019-08-10T20:23:11 | 2019-08-10T20:23:11 | 190,249,464 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | def GCD(a,b):
z=0
c=min(a,b)
for k in range(c,1,-1):
if(a%k==0)and (b%k==0):
return k
z=1
if z!=1:
return 1
x=input()
y=x.split(" ")
z=[]
w=[]
res=[]
z1=int(y[0])
z2=int(y[1])
z3=input()
u=z3.split(" ")
for i in range(len(u)):
w.append(int(u[i]))
for j in range(z2):
f=input()
g=f.split(" ")
g1=int(g[0])-1
g2=int(g[1])-1
s=GCD(w[g1],w[g2])
res.append(s)
for l in res:
print(str(l))
| [
"noreply@github.com"
] | HarshaaArunachalam.noreply@github.com |
1a8d14085751c17d7831e7fca361ca6d2d552255 | 5ce040197421e557f8e7337183c2420d1cb898b0 | /temoatools/analyze_capacity.py | 5fee6b66e7d42c6683ded9998b5d838c9ccfa662 | [] | no_license | coopercenter/temoatools | b7bd871d1066dbfe2f43481a6c2ca84e4315b2b3 | 151ad4e68a4082166f87db2081bfe552f6d92253 | refs/heads/master | 2023-04-17T00:55:44.106235 | 2021-04-20T19:53:08 | 2021-04-20T19:53:08 | 295,842,252 | 1 | 1 | null | 2020-11-17T16:14:21 | 2020-09-15T20:30:59 | Python | UTF-8 | Python | false | false | 7,400 | py | import os
import sqlite3
import pandas as pd
import temoatools as tt
debug = False
resolution = 600 # DPI
# ==============================================================================
# Remove filetype from filename
def name(db):
return db[:db.find('.')]
# ==============================================================================
def getCapacity(folders, dbs, switch='fuel', sector_name='electric', save_data='N', create_plots='N',
run_name=''):
# inputs:
# 1) folders - paths containing dbs (list or single string if all in the same path)
# 2) dbs - names of databases (list)
# 3) switch - 'fuel' or 'tech', basis of categorization
# 4) sectorName - name of temoa sector to be analyzed
# 5) saveData - 'Y' or 'N', default is 'N'
# 6) createPlot - 'Y' or 'N', default is 'N'
# 7) run_name - Used for saving results in dedicated folder
#
# outputs:
# 1) capacity - pandas DataFrame holding capacity for each model year
# ==============================================================================
print("Analyzing capacity")
# Save original directory
wrkdir = os.getcwd()
# If only a single db and folder provided, change to a list
if type(dbs) == str and type(folders) == str:
dbs = [dbs]
folders = [folders]
# If a list of folders is provided with one database, only use first folder
elif type(dbs) == str:
dbs = [dbs]
folders = [folders[0]]
# If only a single folder provided, create a list of the same folder
elif type(folders) == str:
fldrs = []
for db in dbs:
fldrs.append(folders)
folders = fldrs
# Create dictionary to hold each capacity_single series
capacity = pd.DataFrame(dtype='float64')
# Iterate through each db
for folder, db in zip(folders, dbs):
capacity_single = SingleDB(folder, db, switch=switch, sector_name=sector_name)
capacity = pd.concat([capacity, capacity_single])
# Reset index (remove multi-level indexing, easier to use in Excel)
capacity = capacity.reset_index()
# Directory to hold results
if save_data == 'Y' or create_plots == 'Y':
tt.create_results_dir(wrkdir=wrkdir, run_name=run_name)
# Save results to Excel
if save_data == 'Y':
# Create savename based on switch
if switch == 'fuel':
savename = 'capacity_by_fuel.csv'
elif switch == 'tech':
savename = 'capacity_by_tech.csv'
# Save
capacity.to_csv(savename)
# Create plots
if create_plots == 'Y':
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update({'figure.max_open_warning': 0}) # ignore warning
# new figure
plt.figure()
# set aesthetics
sns.set_style("white", {"font.family": "serif", "font.serif": ["Times", "Palatino", "serif"]})
sns.set_context("paper")
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
# wide to long
df2 = pd.melt(capacity, id_vars=['database', 'scenario', 'fuelOrTech'], var_name='var', value_name='value')
# plot
sns.relplot(x='var', y='value', hue='database', data=df2, kind='line', col='fuelOrTech', col_wrap=4)
# save
if switch == 'fuel':
savename = 'capacity_by_fuel.png'
elif switch == 'tech':
savename = 'capacity_by_tech.png'
plt.savefig(savename, dpi=resolution)
# close figure
plt.close()
# Return to original directory
os.chdir(wrkdir)
# return capacity as a dictionary
return capacity
# ==============================================================================
def SingleDB(folder, db, switch='fuel', sector_name='electric'):
# inputs:
# 1) folder - path containing db
# 2) db - name of databas
# 3) switch - 'fuel' or 'tech', basis of categorization
# 5) sectorName - name of temoa sector to be analyzed
#
# outputs:
# 1) capacity - pandas DataFrame holding capacity for each model year
# ==============================================================================
print("\tAnalyzing db: ", db)
# save original folder
origDir = os.getcwd()
# move to folder
os.chdir(folder)
# Connect to Database
con = sqlite3.connect(db)
cur = con.cursor()
# Read from database:
# Select All Efficiencies
qry = "SELECT * FROM Efficiency"
cur.execute(qry)
db_efficiency = cur.fetchall()
# Select All time_periods
qry = "SELECT * FROM time_periods"
cur.execute(qry)
db_t_periods = cur.fetchall()
# Select All technologies
qry = "SELECT * FROM technologies"
cur.execute(qry)
db_technologies = cur.fetchall()
# Select All Capacities
qry = "SELECT * FROM Output_CapacityByPeriodAndTech"
cur.execute(qry)
db_Output_CapacityByPeriodAndTech = cur.fetchall()
# Review db_t_periods to select future time periods
future_t_periods = []
for t_periods, flag in db_t_periods:
if flag == 'f':
if t_periods not in future_t_periods:
future_t_periods.append(t_periods)
# Review db_technologies to select related sector
techs = []
for tech, flag, sector, tech_desc, tech_category in db_technologies:
if sector == sector_name or sector_name == "all":
if tech not in techs:
techs.append(tech)
# Review db_efficiency to create a dictionary of fuels
d = {}
for input_comm, tech, vintage, output_comm, efficiency, ef_notes in db_efficiency:
if tech in techs:
if tech not in d.keys():
d[tech] = input_comm
# Sort data and assign as columns and rows
if switch == 'fuel':
cols = sorted(set(d.values()))
elif switch == 'tech':
cols = sorted(techs)
future_t_periods = sorted(future_t_periods)
rows = future_t_periods[:-1]
# Identify Unique Scenarios
qry = "SELECT * FROM Output_Objective"
cur.execute(qry)
db_objective = cur.fetchall()
scenarios = []
for scenario, objective_name, total_system_cost in db_objective:
if scenario not in scenarios:
scenarios.append(scenario)
# Create pandas DataFrame to hold yearlyEmissions for all scenarios
index = pd.MultiIndex.from_product([[db], scenarios, cols], names=['database', 'scenario', 'fuelOrTech'])
df = pd.DataFrame(index=index, columns=future_t_periods[:-1], dtype='float64')
df = df.fillna(0.0) # Default value to zero
# Review db_Output_CapacityByPeriodAndTech to fill data frame
for scenario, sector, t_periods, tech, capacity in db_Output_CapacityByPeriodAndTech:
if sector == sector_name or sector_name == "all":
if switch == 'fuel':
df.loc[(db, scenario, d[tech]), t_periods] = df.loc[(db, scenario, d[tech]), t_periods] + capacity
elif switch == 'tech':
df.loc[(db, scenario, tech), t_periods] = df.loc[(db, scenario, tech), t_periods] + capacity
# return to original folder
os.chdir(origDir)
# return capacity as a DataFrame
return df
| [
"jab6ft@virginia.edu"
] | jab6ft@virginia.edu |
f5ac969f98498b817f245eea5127f37ec78d5b86 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/l5/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/app_20200705181608.py | cfa4d4767bd82bbd5cf7b9f1eb764d3239a2e78c | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import random
from flask import Flask
from flask import render_template
#######################################
choose = ['rock', 'paper', 'scisors']
app = Flask(__name__)
#######################################
def winners():
if winner == 'compucter':
return render_template('2.html')
else:
return render_template('3.html')
def random2():
choose = random.choice(choose)
def game(player, bot):
if (bot == 'rock' and player == 'paper') or (bot == 'paper' and player == 'scisors') or (bot == 'scisors' and player == 'rock'):
winner = 'player'
#######################################
elif (bot == 'paper' and player == 'rock') or (bot == 'scisors' and player == 'paper') or (bot == 'rock' and player == 'scisors'):
winner = 'compucter'
else:
winner = 'Tie'
@app.route('/')
def home():
return render_template('index.html')
#######################################
@app.route('/rock')
def rock():
random2()
player = "rock"
game(player, )
#######################################
@app.route('/paper')
def paper():
random2()
player = "paper"
game()
#######################################
@app.route('/scisors')
def scisors():
random2()
player = "scisors"
game()
#######################################
@app.route('/tie')
def tie():
winner = 'Tie'
return render_template('1.html')
#######################################
if __name__ == '__main__':
app.run() | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
d866b7ed98c4357d9ccc8df8cd0f45a28a019655 | 3a01d6f6e9f7db7428ae5dc286d6bc267c4ca13e | /unittests/pytests/friction/TestSlipWeakeningTimeStable.py | 85102fd93c1ba47de9f52358dcbc839563be8946 | [
"MIT"
] | permissive | youngsolar/pylith | 1ee9f03c2b01560706b44b4ccae99c3fb6b9fdf4 | 62c07b91fa7581641c7b2a0f658bde288fa003de | refs/heads/master | 2020-12-26T04:04:21.884785 | 2014-10-06T21:42:42 | 2014-10-06T21:42:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | #!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2014 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
## @file unittests/pytests/friction/TestSlipWeakeningTimeStable.py
## @brief Unit testing of SlipWeakeningTimeStable object.
import unittest
from pylith.friction.SlipWeakeningTimeStable import SlipWeakeningTimeStable
# ----------------------------------------------------------------------
class TestSlipWeakeningTimeStable(unittest.TestCase):
"""
Unit testing of SlipWeakeningTimeStable object.
"""
def setUp(self):
"""
Setup test subject.
"""
self.friction = SlipWeakeningTimeStable()
return
def test_constructor(self):
"""
Test constructor.
"""
return
def test_factory(self):
"""
Test factory method.
"""
from pylith.friction.SlipWeakeningTimeStable import friction_model
m = friction_model()
return
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
d27ad5b6ed1c2a47b246e44fc38b2f4f0c57ce96 | a11bd8615f47c15fb52cd83fe7722309f250537d | /pytgf/test/test_controls/wrappers/test_bot_wrapper.py | f70529d81b25b6e27e7274105b99cfc20429dba0 | [] | no_license | Angeall/pyTGF | 75a0abfc6605f08c93181248bd529279c01b05bc | 463359a6596598c0c6cceb6e30f393d77eca0a89 | refs/heads/master | 2021-01-12T12:21:10.659708 | 2018-09-02T12:37:58 | 2018-09-02T12:37:58 | 72,452,959 | 0 | 0 | null | 2017-05-28T11:41:09 | 2016-10-31T16:00:45 | Python | UTF-8 | Python | false | false | 7,317 | py | import unittest
from typing import Tuple, List
from multiprocess.connection import Pipe
try:
from multiprocess.connection import PipeConnection
except ImportError:
PipeConnection = object
from ....characters.moves import MoveDescriptor
from ....characters.units import Entity
from ....board import Builder
from ....characters.moves import Path
from ....characters.units import Unit
from ....controls.controllers import Bot
from ....controls.events import BotEvent, SpecialEvent
from ....controls.wrappers.bot import BotControllerWrapper
from ....game import Core, UnfeasibleMoveException, API
MOVE1 = "MOVE1"
MOVE2 = "MOVE2"
MSG1 = "DO_MOVE1"
MSG2 = "DO_MOVE2"
class ExampleBotControllerWrapper(BotControllerWrapper):
def isMoveDescriptorAllowed(self, move_descriptor) -> bool:
return type(move_descriptor) == str and move_descriptor[0:4] == 'MOVE'
class ExampleAPI(API):
def _decodeMoveFromPositiveNumber(self, player_number: int, encoded_move: int) -> MoveDescriptor:
pass
def _encodeMoveIntoPositiveNumber(self, player_number: int, move_descriptor: MoveDescriptor) -> int:
pass
def createMoveForDescriptor(self, unit: Unit, move_descriptor: MoveDescriptor, max_moves: int = -1,
force: bool = False, is_step: bool=False) -> Path:
raise UnfeasibleMoveException()
def __init__(self, game: Core):
super().__init__(game)
self.move1 = 0
self.move2 = 0
def isItOneTestMethod(self):
if isinstance(self.game, ExampleGame):
return True
return False
def performMove1(self):
self.move1 += 1
def performMove2(self):
self.move2 += 1
class ExampleGame(Core):
@property
def _teamKillAllowed(self) -> bool:
return False
@property
def _suicideAllowed(self) -> bool:
return False
def _collidePlayers(self, player1, player2, tile_id, frontal: bool = False, entity: Entity=None):
pass
class ExampleBot(Bot):
@property
def possibleMoves(self) -> List[MoveDescriptor]:
return []
def _getGameStateAPI(self, game: Core):
return ExampleAPI(game)
def reactToEvents(self, events: List[BotEvent]):
for event in events:
new_move_event = event.moveDescriptor
if new_move_event == MOVE1:
self.gameState.performMove1()
elif new_move_event == MOVE2:
self.gameState.performMove2()
return super().reactToEvents(events)
def _isMoveInteresting(self, player_number: int, new_move_event) -> bool:
return True
def _isMoveAllowed(self, move: str) -> bool:
if type(move) == str and move[0:4] == 'MOVE':
return True
return False
def selectMoveFollowingTeammateMessage(self, teammate_number: int, message):
if message == MSG1:
return MOVE1
elif message == MSG2:
return MOVE2
def _selectNewMove(self, game_state: ExampleAPI):
return "MOVE1-" + str(game_state.move1) + '/' + "MOVE2-" + str(game_state.move2)
class TestBotControllerWrapper(unittest.TestCase):
def setUp(self):
self.game = ExampleGame(Builder(10, 10, 7, 6).create())
self.game.addUnit(Unit(1), 1, (0, 0))
self.bot1 = ExampleBot(1)
self.bot1.gameState = self.game.copy()
self.linker1 = ExampleBotControllerWrapper(self.bot1)
self.game.addUnit(Unit(2), 1, (0, 0))
self.bot2 = ExampleBot(2)
self.bot2.gameState = self.game.copy()
self.linker2 = ExampleBotControllerWrapper(self.bot2)
self.game_info_pipe_parent1, self.game_info_pipe_child1 = Pipe() # type: Tuple[PipeConnection, PipeConnection]
self.game_info_pipe_parent2, self.game_info_pipe_child2 = Pipe() # type: Tuple[PipeConnection, PipeConnection]
self.move_pipe_parent1, self.move_pipe_child1 = Pipe() # type: Tuple[PipeConnection, PipeConnection]
self.move_pipe_parent2, self.move_pipe_child2 = Pipe() # type: Tuple[PipeConnection, PipeConnection]
self.linker1.setMainPipe(self.move_pipe_child1)
self.linker1.setGameInfoPipe(self.game_info_pipe_child1)
self.linker2.setMainPipe(self.move_pipe_child2)
self.linker2.setGameInfoPipe(self.game_info_pipe_child2)
self.collaboration_pipe_1, self.collaboration_pipe_2 = Pipe()
self.linker1.addCollaborationPipe(2, self.collaboration_pipe_1)
self.linker2.addCollaborationPipe(1, self.collaboration_pipe_2)
def test_invalid_type_sent(self):
"""
Tests that the linker raises an error when a message that is not a "BotEvent" is sent
"""
self.move_pipe_parent1.send("")
self.assertRaises(TypeError, self.linker1._routine, self.game_info_pipe_child1, self.move_pipe_child1)
def test_send_move(self):
"""
Tests that moves are sent correctly, that they affect the GameState and that the AI responds well
"""
move1_event = BotEvent(1, MOVE1)
move2_event = BotEvent(1, MOVE2)
self.move_pipe_parent1.send(move1_event)
self.move_pipe_parent1.send(move1_event)
self.move_pipe_parent1.send(move1_event)
self.move_pipe_parent1.send(move2_event)
self.linker1._routine()
self.assertFalse(self.move_pipe_parent1.poll())
self.linker1._routine()
self.assertTrue(self.move_pipe_parent1.poll())
self.assertEqual(self.move_pipe_parent1.recv(), "MOVE1-3/MOVE2-1")
def test_send_message_to_teammate(self):
"""
Tests that messages are sent well between two teammates
"""
self.bot1.sendMessageToTeammate(2, MSG1)
self.linker1._routine() # Will send the message
self.linker2._routine() # Will receive the message
self.assertTrue(self.move_pipe_parent2.poll())
self.assertEqual(self.move_pipe_parent2.recv(), "MOVE1")
def test_send_end_event(self):
"""
Checks if the linker's logical loop ends correctly when it receives the end event
"""
self.game_info_pipe_parent1.send(SpecialEvent(SpecialEvent.END))
self.linker1.run()
# Should run indefinitely if no flag was sent
self.assertTrue(True)
def test_unit_dead(self):
"""
Checks if the linker blocks the incoming message of a dead unit, and starts to send again when resurrected
"""
self.game_info_pipe_parent1.send(SpecialEvent(SpecialEvent.UNIT_KILLED))
self.move_pipe_parent1.send(BotEvent(1, MOVE1))
self.linker1._routine()
self.assertFalse(self.move_pipe_parent1.poll())
self.linker1._routine() # Message blocked
self.assertFalse(self.move_pipe_parent1.poll())
self.game_info_pipe_parent1.send(SpecialEvent(SpecialEvent.RESURRECT_UNIT))
self.move_pipe_parent1.send(BotEvent(1, MOVE2))
self.linker1._routine() # Message received
self.linker1._routine() # Message sent
self.assertTrue(self.move_pipe_parent1.poll())
# The message "MOVE1" was correctly received while the unit was dead => the game state is updated
# while the unit is dead
self.assertEqual(self.move_pipe_parent1.recv(), "MOVE1-1/MOVE2-1")
| [
"angeal1105@gmail.com"
] | angeal1105@gmail.com |
64ef7179aea9c2d661955269e63c1c7311a1037c | c78ce4f66cc964c230ad60fbf2ced6b4811eab89 | /0x0C-python-almost_a_circle/models/rectangle.py | 42da51e34b0deffedfd7071f7594571e3157540a | [] | no_license | jebichii/holbertonschool-higher_level_programming-1 | 89026557909851dd775ae355f036db89ebd9adb9 | 741953aa479af90e8eac6f1315415eff4a20224f | refs/heads/master | 2023-03-15T14:58:27.062528 | 2020-06-11T07:21:23 | 2020-06-11T07:21:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,270 | py | #!/usr/bin/python3
"""Provides a class to represent a rectangle
"""
from models.base import Base
class Rectangle(Base):
"""Representation of a rectangle
"""
HEADERS = ('id', 'width', 'height', 'x', 'y')
def __init__(self, width, height, x=0, y=0, id=None):
"""Instantiate a rectangle
"""
super().__init__(id)
self.width = width
self.height = height
self.x = x
self.y = y
def __str__(self):
"""Get a string representation of a rectangle
"""
return "[{type}] ({id}) {x}/{y} - {width}/{height}".format(
type=self.__class__.__name__,
id=self.id,
width=self.__width,
height=self.__height,
x=self.__x,
y=self.__y
)
@property
def width(self):
"""Get private instance attribute 'width'
"""
return self.__width
@width.setter
def width(self, width):
"""Set private instance attribute 'width'
"""
if not isinstance(width, int):
raise TypeError("width must be an integer")
if width < 1:
raise ValueError("width must be > 0")
self.__width = width
@property
def height(self):
"""Get private instance attribute 'height'
"""
return self.__height
@height.setter
def height(self, height):
"""Set private instance attribute 'height'
"""
if not isinstance(height, int):
raise TypeError("height must be an integer")
if height < 1:
raise ValueError("height must be > 0")
self.__height = height
@property
def x(self):
"""Get private instance attribute 'x'
"""
return self.__x
@x.setter
def x(self, x):
"""Set private instance attribute 'x'
"""
if not isinstance(x, int):
raise TypeError("x must be an integer")
if x < 0:
raise ValueError("x must be >= 0")
self.__x = x
@property
def y(self):
"""Get private instance attribute 'y'
"""
return self.__y
@y.setter
def y(self, y):
"""Set private instance attribute 'y'
"""
if not isinstance(y, int):
raise TypeError("y must be an integer")
if y < 0:
raise ValueError("y must be >= 0")
self.__y = y
def area(self):
"""Return the area of a rectangle
"""
return self.__width * self.__height
def display(self):
"""Print a text representation of a rectangle
"""
print("\n" * self.__y, end="")
print("\n".join([" " * self.__x + "#" * self.__width] * self.__height))
def to_dictionary(self):
"""Get a dictionary representation of a rectangle
"""
return {key: getattr(self, key) for key in self.__class__.HEADERS}
def update(self, *args, **kwargs):
"""Update the attributes of a object
"""
if args:
for pair in zip(self.HEADERS, args):
setattr(self, *pair)
else:
for key in kwargs:
if key in self.HEADERS:
setattr(self, key, kwargs[key])
| [
"pdeyoreo@gmail.com"
] | pdeyoreo@gmail.com |
f54ccd9b35fcc5700879cdbd97e2f7e94d0704ac | 77db6591c5884204d6016bfa89b33691bac38813 | /lbst/migrations/0008_delete_parcels.py | 91d01710672e0f30065b7338bf188b1d05e9f3e1 | [] | no_license | jbukoski/iltf-signal-webmap-suite | 4fc0aafa977e911a1071872f7adbaf2e7d0da37c | b8374e9cfcc80501a8f632721a7cb9b76e668f6b | refs/heads/master | 2021-03-27T11:20:37.174667 | 2020-12-31T18:03:20 | 2020-12-31T18:03:20 | 79,853,039 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-10-20 04:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lbst', '0007_auto_20171020_0453'),
]
operations = [
migrations.DeleteModel(
name='parcels',
),
]
| [
"jacob.bukoski@yale.edu"
] | jacob.bukoski@yale.edu |
b09045ad72a51dcaad3ea6dd2cf2c6b90953bc15 | 316b8375a7ef8095f09973d13f5a49bc7fbe7580 | /leetcode/746.py | c1d8ec3f17c479eabb62b24a67fab00b5e1725c1 | [] | no_license | zhaolijian/suanfa | 9a8d23fbca01d994f7eef24631783c4b7ed25683 | 4f3b25f360f30c0e604ba4dc4d5774ccb5f25b32 | refs/heads/master | 2023-06-08T17:12:41.522937 | 2021-06-27T08:13:16 | 2021-06-27T08:13:16 | 313,269,459 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | # 数组的每个索引作为一个阶梯,第 i个阶梯对应着一个非负数的体力花费值 cost[i](索引从0开始)。
# 每当你爬上一个阶梯你都要花费对应的体力花费值,然后你可以选择继续爬一个阶梯或者爬两个阶梯。
# 您需要找到达到楼层顶部的最低花费。在开始时,你可以选择从索引为 0 或 1 的元素作为初始阶梯。
# 方法1
class Solution:
def minCostClimbingStairs(self, cost) -> int:
length = len(cost)
dp = [0] * (length + 1)
for i in range(2, length + 1):
dp[i] = min(dp[i - 2] + cost[i - 2], dp[i - 1] + cost[i - 1])
return dp[-1]
# 方法2
class Solution:
def minCostClimbingStairs(self, cost) -> int:
length = len(cost)
ll_value, l_value, cur = 0, 0, 0
for i in range(2, length + 1):
cur = min(ll_value + cost[i - 2], l_value + cost[i - 1])
ll_value = l_value
l_value = cur
return cur | [
"820913569@qq.com"
] | 820913569@qq.com |
097d8374a255ccaa7ec5bbd988be8ef1ae39bea0 | 7111511ef0cca1bcf84a76d49419fad504d78f6e | /test331scaping_Writer_nfl.py | 27f9826cb9dbf89cb2644dbd8d396d44450c712c | [] | no_license | blockchain99/pythonlecture | 7800033cd62251b0eec8cf3b93f253175d9cb2e8 | 198e1b6d68db72e4a5009f988c503958ad7ab444 | refs/heads/master | 2020-12-12T14:21:53.626918 | 2020-01-15T19:02:07 | 2020-01-15T19:02:07 | 234,148,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | ##not completed !
import requests
from bs4 import BeautifulSoup
from csv import writer, DictWriter
response = requests.get("http://en.wikipedia.org/wiki/NFL_win-loss_records")
# print(response.text)
with open('test331Write1nfl.text', 'w') as file:
file.write(response.text)
# print("==============================================================")
# #go to above url -> open developer tool in chrome.
soup = BeautifulSoup(response.text, "html.parser")
# # articles = soup.find_all("tbody")
# # articles = soup.select(".wikitable.sortable.jquery-tablesorter")
# articles = soup.select(".wikitable")
# # articles = soup.find_all(class_="wikitable")
# print(articles)
# print("-------------------csv Writer---------------------")
# with open("test331nflWriter.csv", "w") as file:
# csv_writer = writer(file)
# csv_writer.writerow(["Rank", "Team", "Won","Lost","Tied","Pct.","First NFL Season", "Total Games", "Divison"])
# td_tags = articles.find("td")
# for td_tag in td_tags:
# #get_text: access the inner text in an element("a")
# # print(article.find("a").get_text()) #anchor tag -> convert to text
# rank = td_tag[0].get_text()
# team = td_tag[1].find("a").get_text()
# won = td_tag[2].get_text()
# lost = td_tag[3].get_text()
# tied = td_tag[4].get_text()
# pct = td_tag[5].get_text()
# first = td_tag[6].get_text()
# total = td_tag[7].find("a").get_text()
# division = td_tag[8].find("a").get_text()
# # csv_writer.writerow([rank, team, won, lost, tied, pct, first, total, division])
############ one table scap ##############
# from bs4 import BeautifulSoup
# import csv
# html = open("table.html").read()
# soup = BeautifulSoup(html)
table = soup.find("table")
output_rows = []
for table_row in table.findAll('tr'):
columns = table_row.findAll('td')
output_row = []
for column in columns:
output_row.append(column.text)
output_rows.append(output_row)
with open('output.csv', 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(output_rows)
| [
"shinebytheriver@yahoo.com"
] | shinebytheriver@yahoo.com |
83691555e2c9290c3b999d0373c56b611f949fc5 | cf4f3c181dc04c4e698b53c3bb5dd5373b0cc1f4 | /meridian/acupoints/tiaokou23.py | c01be663e38206cbf5f9d8dea8e5b7bee0d48952 | [
"Apache-2.0"
] | permissive | sinotradition/meridian | da3bba6fe42d3f91397bdf54520b3085f7c3bf1d | 8c6c1762b204b72346be4bbfb74dedd792ae3024 | refs/heads/master | 2021-01-10T03:20:18.367965 | 2015-12-14T14:58:35 | 2015-12-14T14:58:35 | 46,456,260 | 5 | 3 | null | 2015-11-29T15:00:20 | 2015-11-19T00:21:00 | Python | UTF-8 | Python | false | false | 236 | py | #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'tiáokǒu'
CN=u'条口'
NAME=u'tiaokou23'
CHANNEL='stomach'
CHANNEL_FULLNAME='StomachChannelofFoot-Yangming'
SEQ='ST38'
if __name__ == '__main__':
pass
| [
"sinotradition@gmail.com"
] | sinotradition@gmail.com |
02e61c8774835d8f58181e6154d935bfe0f92a5c | 12f006a0e5d75ef2349d4ae519c1c9cac5309761 | /Solution_305.py | c3dacdc43397e43d3cb4084735962f719e35b8d7 | [] | no_license | TimothySjiang/leetcodepy | c613db16282eade713e01b7d641c0f5b341ec84b | ef64e46b8833a684b8b0355ce576b767a0e03596 | refs/heads/master | 2020-07-01T14:48:35.953841 | 2020-01-12T06:19:44 | 2020-01-12T06:19:44 | 201,199,810 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | class Solution:
def numIslands2(self, m: int, n: int, positions: List[List[int]]) -> List[int]:
uf = UnionFind()
res = []
board = [[0 for i in range(n)] for i in range(m)]
def detect(i, j):
if i < 0 or i > m - 1 or j < 0 or j > n - 1 or board[i][j] != 1:
return None
else:
return (i, j)
for i, j in positions:
board[i][j] = 1
uf.union((i, j), detect(i - 1, j))
uf.union((i, j), detect(i + 1, j))
uf.union((i, j), detect(i, j - 1))
uf.union((i, j), detect(i, j + 1))
res.append(len({uf.find(x) for x in uf.uf}))
return res
class UnionFind:
def __init__(self):
self.uf = {}
def same(self, a, b):
return self.find(a) == self.find(b)
def union(self, a, b):
self.uf.setdefault(a, a)
if not b:
return None
else:
self.uf.setdefault(b, b)
self.uf[self.find(a)] = self.find(b)
def find(self, node):
path = []
while node != self.uf[node]:
path.append(node)
node = self.uf[node]
for n in path:
self.uf[n] = node
return node
| [
"shjiang@ucdavis.edu"
] | shjiang@ucdavis.edu |
3cfdc2c73a2715bf71926835cd6c115602db7ba1 | 3fbd26091ebbc13913f9c7be1aaf10d477c79536 | /week01/zuoye/requests_maoyan/.history/maoyan_20200626235954.py | 2b50c6bd80acfa54c79e82eea959c439fb55b91e | [] | no_license | shuncon/Python001-class01 | d28faf3d5d8e9ea4cee93bcae7143a26fd8c472e | df19758181cdaf37f30d4b518600fc4612590499 | refs/heads/master | 2022-11-13T19:31:27.019214 | 2020-07-10T14:58:25 | 2020-07-10T14:58:25 | 273,135,541 | 0 | 0 | null | 2020-06-18T03:46:56 | 2020-06-18T03:46:55 | null | UTF-8 | Python | false | false | 727 | py | #-*-conding:utf-8 -*-
import requests
import lxml
from bs4 import BeautifulSoup as bfs
user_agent= 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'
# header = {'user-agent' : user_agent}
myurl = 'https://maoyan.com/films/1222268'
'https://maoyan.com/films/1217023',
header = {}
header ['user-agent'] = user_agent
response = requests.get(myurl,herders=harder)
selector = lxml.etree.HTML(response.text)
#电影名称
dy_name = selector.xpath(' //*[@id='movie-brief-container']/h1/text()')
print(f'电影名称:{dy_name}')
# response = requests.get(myurl, headers=header)
# # print (response.text)
# # print (f'返回状态码: {response.status.code}') | [
"1428834423@qq.com"
] | 1428834423@qq.com |
055b1b4762a72fc2f85d6bbed013e839d6ab0320 | 096b0e98f698e7b8c46566cd73039ec66c87f525 | /Environmental/DRAXIS/Python/vice_env_buildings_once/consumer.py | 1cabe6b241c62390a5436d2c9e019254069e132d | [
"CC-BY-4.0"
] | permissive | georgiapant/DataCrawlers | 8ba90491255e1f86b92e0ca5f78bce931c3d9553 | 1f2d2d4ab04d34fba1c90efc4119a1d40c964921 | refs/heads/master | 2022-12-09T18:24:30.947686 | 2020-07-15T13:39:08 | 2020-09-01T12:22:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,432 | py | """
This code is open-sourced software licensed under the MIT license. (http://opensource.org/licenses/MIT)
Copyright 2020 Stergios Bampakis, DRAXIS ENVIRONMENTAL S.A.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
DISCLAIMER
This code is used to crawl/parse data from file Thessaloniki-boatstops_public.xlsx provided by DRAXIS ENVIRONMENTAL S.A.
described in D5.3.
By downloading this code, you agree to contact the corresponding data provider
and verify you are allowed to use (including, but not limited, crawl/parse/download/store/process)
all data obtained from the data source.
"""
import os
import json
from kafka import KafkaConsumer
from elastic import ElasticSearchClient, RequestError
from dotenv import load_dotenv
from constants import *
load_dotenv()
es = ElasticSearchClient(os.getenv('ES_HOST'), os.getenv('ES_PORT'),
use_ssl=os.getenv('ES_USE_SSL', False),
verify_certs=os.getenv('ES_VERIFY_CERTS', False),
http_auth=(os.getenv('ES_USER'), os.getenv('ES_PASSWORD')) if os.getenv('ES_USER') else None,
ca_certs=os.getenv('ES_CA_CERTS', None),
timeout=60)
geo_point_mapping = es.define_custom_geo_shape_mapping("geometry")
es.create_index(ELASTICSEARCH_INDEX, geo_point_mapping)
kafka_consumer = KafkaConsumer(KAFKA_TOPIC,
bootstrap_servers=["{}:{}".format(os.getenv('KAFKA_HOST'), os.getenv('KAFKA_PORT'))],
# auto_offset_reset='earliest',
security_protocol=os.getenv('KAFKA_SECURITY_PROTOCOL', 'PLAINTEXT'),
ssl_cafile=os.getenv('KAFKA_CA_FILE', None),
ssl_certfile=os.getenv('KAFKA_CERT_FILE', None),
ssl_keyfile=os.getenv('KAFKA_KEY_FILE', None),
group_id='group_' + KAFKA_TOPIC,
value_deserializer=lambda m: json.loads(m.decode('utf8')))
c = 0
denied_docs = 0
for msg in kafka_consumer:
c += 1
print("Consumed: {} messages".format(c))
# data are already processed in the appropriate way from producer, so just insert them to DB
try:
print(es.insert_doc(msg.value))
except RequestError as e:
denied_docs += 1
logger.error(e.info)
logger.error("Denied docs: {}".format(denied_docs))
continue
| [
"ababakis@draxis.gr"
] | ababakis@draxis.gr |
163344918b63b083811ad2b9058150401e41c853 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_045/ch68_2019_06_07_09_21_26_405440.py | 3fe982878107d4f1707a636956f08b24b66b2f0b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | def separa_trios(n):
l=[]
i=0
while i<len(n):
l.append(n[i:i+3])
i+=3
return l
| [
"you@example.com"
] | you@example.com |
7f90dfab2752f533bb230b138138aea3033928ff | f1a4b824faea817e3f33e477a9a6f9c7d522f72d | /tests/conftest.py | 65b4cf65266d9b03aa101c30a0b97aba88b47768 | [
"MIT"
] | permissive | singingwolfboy/flask-dance | ea94200492befe9b380c4521b084afd925b8458e | 27add7576da008a00867521bc84529d1e4a0fc1e | refs/heads/main | 2023-08-24T18:05:55.465730 | 2023-05-10T18:26:21 | 2023-05-10T18:26:21 | 23,831,381 | 928 | 211 | MIT | 2023-09-12T19:49:00 | 2014-09-09T11:53:06 | Python | UTF-8 | Python | false | false | 336 | py | import pytest
import responses as resp_module
@pytest.fixture
def responses(request):
"""
Set up the `responses` module for mocking HTTP requests
https://github.com/getsentry/responses
"""
resp_module.start()
def done():
resp_module.stop()
resp_module.reset()
request.addfinalizer(done)
| [
"david@davidbaumgold.com"
] | david@davidbaumgold.com |
8415371bb92a7e51b7d22eadc0a43ab589c1fa2d | d33b768704278b6eba4f8d9c6e367cce594a457d | /generator.py | 84ac793b320dffe8d343a5ea870eade593062db8 | [] | no_license | ashishjsharda/PythonSamples | b71ecfe5768a86bbea4ac3ec683f83b633e85d97 | 433e14707ff1ce08d2e5110521caf08969db08ea | refs/heads/master | 2021-01-19T21:09:01.697894 | 2020-03-27T10:37:18 | 2020-03-27T10:37:18 | 88,614,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | '''
Created on Jul 31, 2019
Using Yield
@author: asharda
'''
def gen_nums():
n=0
while n<4:
yield n
n+=1
for num in gen_nums():
print(num)
| [
"noreply@github.com"
] | ashishjsharda.noreply@github.com |
a10b849dc80dd5b27d663b2a62652a1f7a072f47 | 94e7c790d17ba08e8a2a74077dd8b75e7ac120b0 | /chapter05/Exercise20b_05.py | dd6b8c148d72cebee8f387e7cd2addbe1a9bdb3b | [] | no_license | lutfar9427/Exercises_Solution_of_INTRODUCTION_TO_PROGRAMMING_USING_Python | 9632e515428685dcaa7d057cf52f0e191e9f7ae0 | d037475316e6c6b7c6a7a7023318ef4ab4ed3f8d | refs/heads/master | 2020-09-02T09:04:44.990668 | 2018-10-20T00:50:12 | 2018-10-20T00:50:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | '''
*5.20 (Display four patterns using loops) Use nested loops that display the following
patterns in four separate programs:
Pattern B
1 2 3 4 5 6
1 2 3 4 5
1 2 3 4
1 2 3
1 2
1
/**
* @author BASSAM FARAMAWI
* @email tiodaronzi3@yahoo.com
* @since 2018
*/
'''
print("Pattern B")
for n in range(6, 0, -1): # A for loop for printing the pattern
for k in range(6 - n + 1, 6 + 1):
print(k, end=" ")
print()
| [
"tiodaronzi3@yahoo.com"
] | tiodaronzi3@yahoo.com |
5d0829c8e3743612b1c4359ecc4e7b74619061bc | 385224b7699b8cf4358f93eea06518a92fe2d40b | /Data Representation/Linear-Models/wide_deep.py | 13169a6deeb07b52b95b42ecd09730c6d4110fea | [] | no_license | afcarl/tensorflow-machine-learning | 293ab4e513ff46a82e308dbce1fefba4831bdb18 | 60c6b62025a932948d6d96eaf611b35df5e39cda | refs/heads/master | 2020-08-21T20:49:30.237014 | 2018-02-02T06:45:17 | 2018-02-02T06:45:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,294 | py | """Example code for TensorFlow Wide & Deep Tutorial using tf.estimator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tensorflow as tf
_CSV_COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
_CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
[0], [0], [0], [''], ['']]
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', type=str, default='/tmp/census_model',
help='Base directory for the model.')
parser.add_argument(
'--model_type', type=str, default='wide_deep',
help="Valid model types: {'wide', 'deep', 'wide_deep'}.")
parser.add_argument(
'--train_epochs', type=int, default=40, help='Number of training epochs.')
parser.add_argument(
'--epochs_per_eval', type=int, default=2,
help='The number of training epochs to run between evaluations.')
parser.add_argument(
'--batch_size', type=int, default=40, help='Number of examples per batch.')
parser.add_argument(
'--train_data', type=str, default='/tmp/census_data/adult.data',
help='Path to the training data.')
parser.add_argument(
'--test_data', type=str, default='/tmp/census_data/adult.test',
help='Path to the test data.')
_NUM_EXAMPLES = {
'train': 32561,
'validation': 16281,
}
def build_model_columns():
"""Builds a set of wide and deep feature columns."""
# Continuous columns
age = tf.feature_column.numeric_column('age')
education_num = tf.feature_column.numeric_column('education_num')
capital_gain = tf.feature_column.numeric_column('capital_gain')
capital_loss = tf.feature_column.numeric_column('capital_loss')
hours_per_week = tf.feature_column.numeric_column('hours_per_week')
education = tf.feature_column.categorical_column_with_vocabulary_list(
'education', [
'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
'5th-6th', '10th', '1st-4th', 'Preschool', '12th'])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
'marital_status', [
'Married-civ-spouse', 'Divorced', 'Married-spouse-absent',
'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
'relationship', [
'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
'Other-relative'])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
'workclass', [
'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov',
'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked'])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=1000)
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
education, marital_status, relationship, workclass, occupation,
age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
['education', 'occupation'], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, 'education', 'occupation'], hash_bucket_size=1000),
]
wide_columns = base_columns + crossed_columns
deep_columns = [
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(marital_status),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(occupation, dimension=8),
]
return wide_columns, deep_columns
def build_estimator(model_dir, model_type):
"""Build an estimator appropriate for the given model type."""
wide_columns, deep_columns = build_model_columns()
hidden_units = [100, 75, 50, 25]
# Create a tf.estimator.RunConfig to ensure the model is run on CPU, which
# trains faster than GPU for this model.
run_config = tf.estimator.RunConfig().replace(
session_config=tf.ConfigProto(device_count={'GPU': 0}))
if model_type == 'wide':
return tf.estimator.LinearClassifier(
model_dir=model_dir,
feature_columns=wide_columns,
config=run_config)
elif model_type == 'deep':
return tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=hidden_units,
config=run_config)
else:
return tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=hidden_units,
config=run_config)
def input_fn(data_file, num_epochs, shuffle, batch_size):
"""Generate an input function for the Estimator."""
assert tf.gfile.Exists(data_file), (
'%s not found. Please make sure you have either run data_download.py or '
'set both arguments --train_data and --test_data.' % data_file)
def parse_csv(value):
print('Parsing', data_file)
columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)
features = dict(zip(_CSV_COLUMNS, columns))
labels = features.pop('income_bracket')
return features, tf.equal(labels, '>50K')
# Extract lines from input files using the Dataset API.
dataset = tf.data.TextLineDataset(data_file)
if shuffle:
dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])
dataset = dataset.map(parse_csv, num_parallel_calls=5)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def main(unused_argv):
# Clean up the model directory if present
shutil.rmtree(FLAGS.model_dir, ignore_errors=True)
model = build_estimator(FLAGS.model_dir, FLAGS.model_type)
# Train and evaluate the model every `FLAGS.epochs_per_eval` epochs.
for n in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
model.train(input_fn=lambda: input_fn(
FLAGS.train_data, FLAGS.epochs_per_eval, True, FLAGS.batch_size))
results = model.evaluate(input_fn=lambda: input_fn(
FLAGS.test_data, 1, False, FLAGS.batch_size))
# Display evaluation metrics
print('Results at epoch', (n + 1) * FLAGS.epochs_per_eval)
print('-' * 60)
for key in sorted(results):
print('%s: %s' % (key, results[key]))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"le_j6@denison.edu"
] | le_j6@denison.edu |
cf35d907cc4cd9d30e2eeeef7859e0dc51fd7629 | f2c58d0b254dd5586266df23f57265abe322dc8c | /other/views.py | c2b449fa20ab7daf09e1904192b04355b67633e1 | [] | no_license | Shubham101491/Big-Store | f01ff85193bb77040be82385498c66faa5ac619a | 96f842c7856d2b8f6559844e0272828e0b449c6a | refs/heads/master | 2023-02-08T01:28:50.005570 | 2021-01-03T14:34:54 | 2021-01-03T14:34:54 | 326,142,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | from django.shortcuts import render
from bigstore import settings
def shipping(request):
return render(request, 'other/shipping.html', {"BASE_URL": settings.BASE_URL})
def offer(request):
return render(request, 'other/offer.html', {"BASE_URL": settings.BASE_URL})
def wishlist(request):
return render(request, 'other/wishlist.html', {"BASE_URL": settings.BASE_URL})
def single(request):
return render(request, 'other/single.html', {"BASE_URL": settings.BASE_URL})
def terms(request):
return render(request, 'other/terms.html', {"BASE_URL": settings.BASE_URL})
def faqs(request):
return render(request, 'other/faqs.html', {"BASE_URL": settings.BASE_URL})
| [
"55647943+Shubham101491@users.noreply.github.com"
] | 55647943+Shubham101491@users.noreply.github.com |
6c13b1510aa2d1e894f03e08801f3572e56b017a | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /PhysicsTools/PatExamples/test/producePatMcMatch_cfg.py | 5ab4881d7f71ebc06a1455a4c45f7d7d8ded42d0 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 1,632 | py | # Start with a skeleton process which gets imported with the following line
from PhysicsTools.PatAlgos.patTemplate_cfg import *
# Load the standard PAT config
process.load( "PhysicsTools.PatAlgos.patSequences_cff" )
# Load the exercise config
process.load( "PhysicsTools.PatExamples.mcMatch_cfi" ) # The file to modify!
# Modify the default config according to needed exercise settings
# You can comment these lines in order to run the default rather than
# your OWN MC matching from PhysicsTools/PatExamples/python/mcMatching_cfi
# CAUTION: Uncommented, this does NOT run out-of-the-box!
# Own muon match
process.makeAllLayer1Muons.remove( process.muonMatch )
process.makeAllLayer1Muons += process.myMuonMatch
process.makeAllLayer1Muons.remove( process.allLayer1Muons )
process.makeAllLayer1Muons += process.allLayer1Muons
process.allLayer1Muons.genParticleMatch = "myMuonMatch"
process.allLayer1Muons.embedGenMatch = True
# Own jet match to MC jets
process.makeAllLayer1Jets.remove( process.jetGenJetMatch )
process.makeAllLayer1Jets += process.myJetGenJetMatch
process.makeAllLayer1Jets.remove( process.allLayer1Jets )
process.makeAllLayer1Jets += process.allLayer1Jets
process.allLayer1Jets.genJetMatch = "myJetGenJetMatch"
# Define the path
process.p = cms.Path(
process.patDefaultSequence
)
process.maxEvents.input = 1000 # Reduce number of events for testing.
process.out.fileName = 'edmPatMcMatch.root'
process.out.outputCommands += [ 'keep recoGenParticles_genParticles_*_*' ] # Keeps the MC objects for references
process.options.wantSummary = False # to suppress the long output at the end of the job
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
8954d8187ee611a1397e099108f1803f3fa5ff35 | b503e79ccfca67c8114f5bd7a215f5ae993a0ba4 | /airflow/providers/amazon/aws/operators/sagemaker_endpoint_config.py | bbf2be11441f1d4d1f2d7d7be3d6bc723ee9308a | [
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] | permissive | github/incubator-airflow | df1d9780f862ea1df8261ea6015dd50a4583f983 | 73f70e00b9fd294057f8ca6b714a85622f6d5dd5 | refs/heads/gh-2.0.2 | 2023-07-29T18:08:43.140580 | 2022-09-14T18:23:42 | 2022-09-14T18:23:42 | 80,634,006 | 24 | 27 | Apache-2.0 | 2023-04-18T04:24:36 | 2017-02-01T15:34:55 | Python | UTF-8 | Python | false | false | 2,217 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.operators.sagemaker_base import SageMakerBaseOperator
from airflow.utils.decorators import apply_defaults
class SageMakerEndpointConfigOperator(SageMakerBaseOperator):
"""
Create a SageMaker endpoint config.
This operator returns The ARN of the endpoint config created in Amazon SageMaker
:param config: The configuration necessary to create an endpoint config.
For details of the configuration parameter see :py:meth:`SageMaker.Client.create_endpoint_config`
:type config: dict
:param aws_conn_id: The AWS connection ID to use.
:type aws_conn_id: str
"""
integer_fields = [['ProductionVariants', 'InitialInstanceCount']]
@apply_defaults
def __init__(self, *, config: dict, **kwargs):
super().__init__(config=config, **kwargs)
self.config = config
def execute(self, context) -> dict:
self.preprocess_config()
self.log.info('Creating SageMaker Endpoint Config %s.', self.config['EndpointConfigName'])
response = self.hook.create_endpoint_config(self.config)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise AirflowException('Sagemaker endpoint config creation failed: %s' % response)
else:
return {'EndpointConfig': self.hook.describe_endpoint_config(self.config['EndpointConfigName'])}
| [
"noreply@github.com"
] | github.noreply@github.com |
fcb09819fc0640389e046c9fbd0577354ac1e6c1 | 7ad5582d0f59de00c68e1f1dec626be68ac6332e | /src/test/parser/pattern/nodes/test_one_or_more.py | 52252322db65c60fdc295a1cb4f70372188b9286 | [
"MIT"
] | permissive | ebegen/program-y | ddb5525cb992de7f80f793742876bc9285e27b2d | 4ceb6a31c5ead813faad1b454f0c432e93d6ef7d | refs/heads/master | 2021-01-11T14:01:42.053013 | 2017-06-15T20:39:37 | 2017-06-15T20:39:37 | 94,932,566 | 1 | 0 | null | 2017-06-20T20:32:34 | 2017-06-20T20:32:34 | null | UTF-8 | Python | false | false | 1,734 | py | from programy.parser.exceptions import ParserException
from test.parser.pattern.nodes.base import PatternTestBaseClass
from programy.parser.pattern.nodes.oneormore import PatternOneOrMoreWildCardNode
class PatternOneOrMoreWildCardNodeTests(PatternTestBaseClass):
def test_invalid_wildcard(self):
with self.assertRaises(ParserException) as raised:
node = PatternOneOrMoreWildCardNode("X")
self.assertIsNone(node)
def test_star(self):
node = PatternOneOrMoreWildCardNode("*")
self.assertIsNotNone(node)
self.assertFalse(node.is_root())
self.assertFalse(node.is_priority())
self.assertFalse(node.is_zero_or_more())
self.assertTrue(node.is_one_or_more())
self.assertFalse(node.is_set())
self.assertFalse(node.is_bot())
self.assertFalse(node.is_template())
self.assertFalse(node.is_that())
self.assertFalse(node.is_topic())
self.assertTrue(node.is_wildcard())
self.assertIsNotNone(node.children)
self.assertFalse(node.has_children())
self.assertEqual(node.wildcard, "*")
self.assertTrue(node.equivalent(PatternOneOrMoreWildCardNode("*")))
self.assertFalse(node.equals(self.bot, "testid", "*"))
self.assertEqual(node.to_string(), "ONEORMORE [P(0)^(0)#(0)C(0)_(0)*(0)To(0)Th(0)Te(0)] wildcard=[*]")
def test_underline(self):
node = PatternOneOrMoreWildCardNode("_")
self.assertIsNotNone(node)
self.assertEqual(node.wildcard, "_")
self.assertTrue(node.equivalent(PatternOneOrMoreWildCardNode("_")))
self.assertEqual(node.to_string(), "ONEORMORE [P(0)^(0)#(0)C(0)_(0)*(0)To(0)Th(0)Te(0)] wildcard=[_]")
| [
"keith@keithsterling.com"
] | keith@keithsterling.com |
6985bd54d85367c3737c150e198b75089cbd5908 | a0376b1b780488c7f97c324cbc6de51f029b21c9 | /GULBI GOURI SHANKER_4AL18EC018.py.py | 983f54cef68d3643fd05afa74f12d9b3a47eddd9 | [] | no_license | alvas-education-foundation/ECE-2year-Code-Challenge | 7af0b0e887ec3359f1546ba8d0c1f30d99e9d295 | 3ffe3e196890433c62b74263de4717795176d3da | refs/heads/master | 2022-10-14T02:01:39.165085 | 2020-06-13T10:29:07 | 2020-06-13T10:29:07 | 266,272,378 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | correct = True
while correct:
name = input("what is the user_name?")
if name == "micheal".lower():
password = input("please enter your password: ")
if password == "e3$WT89x":
print("you have successfully login.")
break
if password != "e3$WT89x":
passwordi = input("invalid password,would you like to try user_name and password again?y/n: ")
if passwordi == "y".lower():
correct = True
correct=0:
while count < 3:
print("invalid password.")
else:
print("account blocked.")
correct+=1
break
else:
print("thank you for trying to login,goodbye. ")
quit()
if name != "micheal".lower():
username = input("username incorrect,would you like to try the username again?y/n: ")
if username == "y".lower():
correct == True
else:
print("thankyou for trying to login")
quit()
# Code has syntax error
File "<string>", line 14
correct=0:
^
SyntaxError: invalid syntax
| [
"noreply@github.com"
] | alvas-education-foundation.noreply@github.com |
58213c5f3427d53904ddc1ebb3dc309ff9fde3a7 | 236dd1755695fd582b85381cfd40be1886b1459f | /about/migrations/0003_aboutindexpage_body_pua.py | 8bc184adeb760b89fc65b4f53879634b283ad730 | [] | no_license | davebshow/lenguapurhepecha | bbdb841085adbd5b0f42764a7e4bcafd202068c8 | c6ab3ada2b770a0bcca210fa53bb1281857e9168 | refs/heads/master | 2020-06-16T11:37:31.100321 | 2016-11-29T18:03:43 | 2016-11-29T18:03:43 | 75,107,817 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-29 01:54
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('about', '0002_auto_20161129_0114'),
]
operations = [
migrations.AddField(
model_name='aboutindexpage',
name='body_pua',
field=wagtail.wagtailcore.fields.RichTextField(blank=True),
),
]
| [
"davebshow@gmail.com"
] | davebshow@gmail.com |
678ab04dfd419260daa2c3e9c5de70af34821483 | 091a6200be74bf6577c86f623665bcc24e16b02b | /CircuitPython_Templates/mp3_multi_file_playback/code.py | 33f93bfe528b1f5f01e69dcdde308dc8596fa399 | [
"MIT"
] | permissive | adafruit/Adafruit_Learning_System_Guides | b5f7bce40a16da64e7a79d4b39de032f2cca41d4 | 5eaa7a15a437c533b89f359a25983e24bb6b5438 | refs/heads/main | 2023-09-05T18:31:41.621956 | 2023-09-05T15:36:09 | 2023-09-05T15:36:09 | 105,065,494 | 937 | 937 | MIT | 2023-09-12T18:48:53 | 2017-09-27T20:22:44 | C | UTF-8 | Python | false | false | 2,920 | py | # SPDX-FileCopyrightText: 2021 Kattni Rembor for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
CircuitPython multiple MP3 playback example.
Plays two MP3 files consecutively, once time each.
Remove this line and all of the following docstring content before submitting to the Learn repo.
INCLUDE THE MP3 FILES IN THIS DIRECTORY IN A DIRECTORY WITH THE RESULTING CODE.PY FILE.
Choose the setup section appropriate for the board into which this template code is going. The
default is for SAMD51 boards.
If the setup is commented out, uncomment it. Regardless, ALWAYS delete the comment above the chosen
setup is commented out, uncomment it. Regardless, ALWAYS delete the comment above the chosen
imports/setup and all other setup options so that the example includes ONLY the appropriate list
of imports and the hardware setup. For example, a generic SAMD51 example should be:
import board
import audiomp3
import audioio
audio = audioio.AudioOut(board.A0)
mp3files = ["slow.mp3", "happy.mp3"]
with open(mp3files[0], "rb") as mp3:
decoder = audiomp3.MP3Decoder(mp3)
for filename in mp3files:
with open(filename, "rb") as decoder.file:
audio.play(decoder)
print("Playing:", filename)
while audio.playing:
pass
The example content, as above, should contain NO commented out code, NO setup comment labels, and
NO other commented out setup code.
"""
import board
import audiomp3
# For most SAMD51 boards
import audioio
audio = audioio.AudioOut(board.A0)
# For most RP2040 and nRF boards
# import audiopwmio
#
# audio = audiopwmio.PWMAudioOut(board.A0)
# For MacroPad, Circuit Playground Bluefruit, and any RP2040 or nRF boards with a built-in speaker
# and requiring you to enable the SPEAKER_ENABLE pin
# import audiopwmio
# import digitalio
#
# shutdown = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
# shutdown.switch_to_output(True)
# audio = audiopwmio.PWMAudioOut(board.SPEAKER)
# For any SAMD51 boards with a built in speaker and requiring you to enable the SPEAKER_ENABLE pin
# import audioio
# import digitalio
#
# shutdown = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
# shutdown.switch_to_output(True)
# audio = audioio.AudioOut(board.SPEAKER)
# For CLUE or nRF boards with built-in speaker and no SPEAKER_ENABLE pin
# import audiopwmio
#
# audio = audiopwmio.PWMAudioOut(board.SPEAKER)
# For any SAMD51 boards with a built in speaker and no SPEAKER_ENABLE pin
# import audioio
#
# audio = audioio.AudioOut(board.SPEAKER)
mp3files = ["slow.mp3", "happy.mp3"]
with open(mp3files[0], "rb") as mp3:
decoder = audiomp3.MP3Decoder(mp3)
for filename in mp3files:
with open(filename, "rb") as decoder.file:
audio.play(decoder)
print("Playing:", filename)
while audio.playing:
pass
print("Done playing!")
| [
"kattni@adafruit.com"
] | kattni@adafruit.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.