blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9ce85723629a707758ea22deedc74f0c563ea12 | 4b89a7de426fb53b999b5f3834404215a90817df | /pyobjc-framework-GameCenter/setup.py | 21ba4c119f399fb8e08f6ccbc52b420a124e686f | [] | no_license | peeyush-tm/pyobjc | a1f3ec167482566ddc7c895cfa2aca436109cf66 | da488946f6cc67a83dcc26c04484ca4f10fabc82 | refs/heads/master | 2021-01-20T19:26:06.015044 | 2016-05-22T14:53:37 | 2016-05-22T14:53:37 | 60,502,688 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | '''
Wrappers for the "GameCenter" framework on MacOS X. The Address Book is
a centralized database for contact and other information for people. Appliations
that make use of the GameCenter framework all use the same database.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
'''
from pyobjc_setup import setup, Extension
import os
VERSION="3.2a1"
setup(
name='pyobjc-framework-GameCenter',
version=VERSION,
description = "Wrappers for the framework GameCenter on Mac OS X",
long_description=__doc__,
packages = [ "GameCenter" ],
setup_requires = [
'pyobjc-core>=' + VERSION,
],
install_requires = [
'pyobjc-core>=' + VERSION,
'pyobjc-framework-Cocoa>=' + VERSION,
],
ext_modules = [
Extension("GameCenter._GameCenter",
[ "Modules/_GameCenter.m" ],
extra_link_args=["-framework", "GameKit"],
depends=[
os.path.join('Modules', fn)
for fn in os.listdir('Modules')
if fn.startswith('_GameCenter')
]
),
],
min_os_level='10.8',
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
51c1cc328778505cc2c071d613fd39395a4d764e | 137e32a95f52169a38fb952ba245faa13040e46c | /mmrx/settings.py | 9f495b2e723c135d1fe5ffeb1fffa234ffcddfeb | [
"Apache-2.0"
] | permissive | zszwoaini/ygsc | d5d242bdc0638bdb2f4d3b71df68bc2c40b34888 | 9edff2b7ebc2d56879de1ab2c7cabbe4744940f2 | refs/heads/master | 2020-04-25T04:43:16.044920 | 2019-02-25T15:40:59 | 2019-02-25T15:40:59 | 172,520,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,680 | py | """
Django settings for mmrx project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'egfnoyt$v+d1ba-kr#@zlkm+-7%4ytk)n43$@-ln=bg+ijt$dc'
# 白名单,允许名单中的ip和域名进行访问
# CORS
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:8080',
'localhost:8080',
'www.mmrx.site:8080'
)
CORS_ALLOW_CREDENTIALS = True # 允许携带cookie
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"rest_framework",
'mapp',
'haystack',
'djcelery',
'tinymce',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mmrx.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mmrx.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1', # 数据库主机
'PORT': 3306, # 数据库端口
'USER': 'root', # 数据库用户名
'PASSWORD': 'root', # 数据库用户密码
'NAME': 'mshop' # 数据库名字
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
#富文本编辑
TINYMCE_DEFAULT_CONFIG = {
'theme': 'advanced',
'width': 600,
'height': 400,
}
# DRF扩展
REST_FRAMEWORK_EXTENSIONS = {
# 缓存时间
'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 60,
# 缓存位置,此default与下方caches中一致,即存在redis 0号库
'DEFAULT_USE_CACHE': 'default',
}
REST_FRAMEWORK = {
# 异常处理
'EXCEPTION_HANDLER': 'utils.exceptions.exception_handler',
# 认证方式,优先采用JSONWebTokenAuthentication认证方式,因为它在最上面
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
#分页
'DEFAULT_PAGINATION_CLASS': 'utils.pagination.StandardResultsSetPagination',
}
import datetime
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1),
'JWT_RESPONSE_PAYLOAD_HANDLER': 'utils.users.jwt_response_payload_handler',
}
#自定义验证声明
AUTHENTICATION_BACKENDS = ['utils.users.UsernameMobileModelBackend']
AUTH_USER_MODEL = 'mapp.User'
STATIC_URL = '/static/'
GENERATED_STATIC_HTML_FILES_DIR = \
os.path.join(os.path.dirname(BASE_DIR), 'static')
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"code": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/2",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"history": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/3",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"cart": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/4",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://192.168.162.129:9200/', # 此处为elasticsearch运行的服务器ip地址,端口号固定为9200
'INDEX_NAME': 'yigou', # 指定elasticsearch建立的索引库的名称
},
}
# 当添加、修改、删除数据时,自动生成索引
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
import djcelery
djcelery.setup_loader()
BROKER_URL='redis://localhost:6379/3'
CELERY_CONCURRENCY=2 #(设置worker的并发数量)
CELERY_RESULT_BACKEND = 'redis://localhost:6379/4'
# 第三方登录
QQ_CLIENT_ID= '101474184'
QQ_CLIENT_SECRET= 'c6ce949e04e12ecc909ae6a8b09b637c'
QQ_REDIRECT_URL = 'http://www.yigou.site:8080/oauth_callback.html'
EMAIL_USE_SSL = True
EMAIL_HOST = 'smtp.qq.com' # 如果是 163 改成 smtp.163.com
EMAIL_PORT = 465 # SMPT的端口 不能改
EMAIL_HOST_USER = "1181233464@qq.com" # 账号
EMAIL_HOST_PASSWORD = "qseedtkzzdmpgiig" # 授权码
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER # 默认邮件的发送人
VERIFY_CODE_MAX_AGE = 60 * 60 # 单位是秒
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(module)s %(lineno)d %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, "logs/yigou.log"), # 日志文件的位置
'maxBytes': 300 * 1024 * 1024,
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': {
'django': { # 定义了一个名为django的日志器
'handlers': ['console', 'file'],
'propagate': True,
},
}
}
ALIPAY_APPID = "2016091900546081"
ALIPAY_URL = "https://openapi.alipaydev.com/gateway.do"
ALIPAY_DEBUG = True
APP_PRIVATE_KEY_PATH = os.path.join(BASE_DIR, 'apps/pay/keys/app_private_key.pem')
ALIPAY_PUBLIC_KEY_PATH = os.path.join(BASE_DIR, 'apps/pay/keys/alipay_public_key.pem')
| [
"1181233464@qq.com"
] | 1181233464@qq.com |
53092dfd2bd0fa00448c9e96ce8c9b25bf3e34ce | 70fec09ceb625608d561937955c285c0c39f6d95 | /examples/basic_examples/http_middleware_service.py | c93260f707026e51e7be79c6dc733285377333fb | [
"MIT"
] | permissive | kalaspuff/tomodachi | b285e2c73696d14e3c84a479745e00824fba7190 | deca849ec2b4cdc3d27f06e9ce0056fac0146a1a | refs/heads/master | 2023-08-31T00:32:12.042486 | 2023-08-21T13:02:24 | 2023-08-21T13:02:24 | 62,165,703 | 191 | 28 | MIT | 2023-09-11T23:32:51 | 2016-06-28T18:43:51 | Python | UTF-8 | Python | false | false | 1,707 | py | import asyncio
from typing import Any, Callable, Dict
from aiohttp import web
import tomodachi
from tomodachi import Options, http, http_error
async def middleware_function(
func: Callable, service: Any, request: web.Request, context: Dict, *args: Any, **kwargs: Any
) -> Any:
# Functionality before function is called
tomodachi.get_logger().info("middleware before")
return_value = await func(*args, **kwargs)
# There's also the possibility to pass in extra arguments or keywords arguments, for example:
# return_value = await func(*args, id='overridden', **kwargs)
# Functionality after function is called
tomodachi.get_logger().info("middleware after")
return return_value
class ExampleHttpMiddlewareService(tomodachi.Service):
name = "example-http-service"
# Adds a middleware function that is run on every HTTP call. Several middlewares can be chained.
http_middleware = [middleware_function]
# Some options can be specified to define credentials, used ports, hostnames, access log, etc.
options = Options(
http=Options.HTTP(
port=4711,
content_type="text/plain; charset=utf-8",
access_log=True,
),
)
@http("GET", r"/example/?")
async def example(self, request: web.Request, **kwargs: Any) -> str:
await asyncio.sleep(1)
return "友達" # tomodachi
@http("GET", r"/example/(?P<id>[^/]+?)/?")
async def example_with_id(self, request: web.Request, id: str) -> str:
return "友達 (id: {})".format(id)
@http_error(status_code=404)
async def error_404(self, request: web.Request, **kwargs: Any) -> str:
return "error 404"
| [
"hello@carloscar.com"
] | hello@carloscar.com |
350895baad189850e4420355ab20e78d81a348c4 | ece47a6f01011a0eb4dddea49146b8bb726aec1c | /pikarpc/__init__.py | 90436be38b5b4fb652fd5f602fb4137f8511ace6 | [
"MIT"
] | permissive | valfrom/python_pika_rpc | 87b6877110e95936affe4a481eb61756287cf0f0 | 6de04e4bd47cf2322d8d28938586608a32420fe4 | refs/heads/main | 2023-08-31T11:14:40.750418 | 2021-09-29T13:54:48 | 2021-09-29T13:54:48 | 408,753,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | from .rpc_server import RpcServer
from .rpc_client import RpcClient
| [
"valfrom@gmail.com"
] | valfrom@gmail.com |
625a77678dafad3f72ea2f4629bed9b901e7f7cd | 2919484ba494fdb9ce60005392286d293d98c325 | /deep_autoviml/models/big_deep.py | 14b1e0dc911254d0e247500c32d6d37fae9f5323 | [
"Apache-2.0"
] | permissive | Arunava98/deep_autoviml | d6c8d7bb701967d671eae6a8329018e32589d09d | 9902bb230f90d9da367445656fcefad2e2d5aea3 | refs/heads/master | 2023-07-20T03:31:38.705198 | 2021-08-26T14:19:38 | 2021-08-26T14:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,506 | py | ############################################################################################
#Copyright 2021 Google LLC
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
import tensorflow as tf
from tensorflow import keras
#### Make sure it is Tensorflow 2.4 or greater!
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras import models
from tensorflow.keras import callbacks
from tensorflow.keras import backend as K
from tensorflow.keras import utils
from tensorflow.keras import layers
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Reshape, MaxPooling1D, MaxPooling2D
from tensorflow.keras.layers import AveragePooling2D, AveragePooling1D
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Embedding, Reshape, Dropout, Dense
from tensorflow.keras.layers import Activation, Dense, Embedding, GlobalAveragePooling1D
from tensorflow.keras.layers import GlobalMaxPooling1D, Dropout, Conv1D
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
############################################################################################
model = models.Sequential([
BatchNormalization(),
Dropout(0.5),
layers.Dense(128, activation='relu', kernel_initializer='he_normal'),
BatchNormalization(),
Dropout(0.5),
layers.Dense(64, activation='relu', kernel_initializer='he_normal'),
BatchNormalization(),
Dropout(0.2),
layers.Dense(32, activation='relu', kernel_initializer='he_normal'),
BatchNormalization(),
Dropout(0.2),
]) | [
"rsesha2001@yahoo.com"
] | rsesha2001@yahoo.com |
3c5efb127360c2ff705a7b4a8c22f76405d13e77 | 4331e72a851f85ca1a31196d2a6be339ed952330 | /user/models.py | 66a8b86de99049404ac961129a51e3f3f0438eee | [] | no_license | Manas377/Exam | 5b413e882c6b264675e1b2df40b517da28c9333e | d4e6b5f8661fdf5b1f8d74364cc54cd75df67283 | refs/heads/master | 2022-10-19T10:57:25.996434 | 2020-06-07T12:19:25 | 2020-06-07T12:19:25 | 270,294,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | from django.db import models
from exam.models import TestSet
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
is_student = models.BooleanField(default=False)
is_teacher = models.BooleanField(default=False)
class Student(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
roll_number = models.IntegerField(default=1)
test_set = models.ForeignKey(TestSet, on_delete=models.SET_NULL, null=True)
def save(self, *args, **kwargs):
# this means model IS NOT SAVED to database yet
if self._state.adding:
last_roll_number = Student.objects.all().aggregate(largest=models.Max('roll_number'))['largest']
# aggregate can also return none, we would need to check it IF IT"S THE FIRST ENTRY.
# then we can simply add one to 'last_roll_number' which definitely is the largest roll number.
if last_roll_number is not None:
self.roll_number = last_roll_number + 1
super(Student, self).save(*args, **kwargs)
def __str__(self):
return self.roll_number | [
"manas.kundu@hotmail.com"
] | manas.kundu@hotmail.com |
829785859dab7cbc2a6d79cf70e8791449e36fd6 | de89ae9de27c88aa2fca96985febb7e4ae6ec702 | /lenstronomy/ImSim/Numerics/convolution.py | b28cdd7c11b8104b63f238a2f5552208c241c8d8 | [
"MIT"
] | permissive | Jasonpoh/lenstronomy_sims | 0a7f9a116f6103f128a46d28be43ac8283cf111d | 10715966f2d15018fb4e1bcfe934ffa2c36a3073 | refs/heads/master | 2020-05-25T16:55:12.772873 | 2019-05-21T21:29:57 | 2019-05-21T21:29:57 | 187,897,527 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,904 | py | import scipy.signal as signal
import scipy.ndimage as ndimage
import numpy as np
import lenstronomy.Util.kernel_util as kernel_util
import lenstronomy.Util.util as util
import lenstronomy.Util.image_util as image_util
class PixelKernelConvolution(object):
"""
class to compute convolutions for a given pixelized kernel (fft, grid)
"""
def __init__(self, kernel, convolution_type='fft'):
"""
:param kernel: 2d array, convolution kernel
"""
self._kernel = kernel
if convolution_type not in ['fft', 'grid']:
raise ValueError('convolution_type %s not supported!' % convolution_type)
self._type = convolution_type
def convolution2d(self, image):
"""
:param image: 2d array (image) to be convolved
:return: fft convolution
"""
if self._type == 'fft':
image_conv = signal.fftconvolve(image, self._kernel, mode='same')
elif self._type == 'grid':
image_conv = signal.convolve2d(image, self._kernel, mode='same')
else:
raise ValueError('convolution_type %s not supported!' % self._type)
return image_conv
class SubgridKernelConvolution(object):
"""
class to compute the convolution on a supersampled grid with partial convolution computed on the regular grid
"""
def __init__(self, kernel_supersampled, supersampling_factor, supersampling_size=None, convolution_type='fft'):
"""
:param kernel_supersampled: kernel in supersampled pixels
:param supersampling_factor: supersampling factor relative to the image pixel grid
:param supersampling_size: number of pixels (in units of the image pixels) that are convolved with the
supersampled kernel
"""
n_high = len(kernel_supersampled)
self._supersampling_factor = supersampling_factor
numPix = int(n_high / self._supersampling_factor)
if self._supersampling_factor % 2 == 0:
self._kernel = kernel_util.averaging_even_kernel(kernel_supersampled, self._supersampling_factor)
else:
self._kernel = util.averaging(kernel_supersampled, numGrid=n_high, numPix=numPix)
if supersampling_size is None:
kernel_low_res, kernel_high_res = np.zeros_like(self._kernel), kernel_supersampled
self._low_res_convolution = False
else:
kernel_low_res, kernel_high_res = kernel_util.split_kernel(self._kernel, kernel_supersampled,
supersampling_size, self._supersampling_factor)
self._low_res_convolution = True
self._low_res_conv = PixelKernelConvolution(kernel_low_res, convolution_type=convolution_type)
self._high_res_conv = PixelKernelConvolution(kernel_high_res, convolution_type=convolution_type)
def convolution2d(self, image):
"""
:param image: 2d array (high resoluton image) to be convolved and re-sized
:return: convolved image
"""
image_high_res_conv = self._high_res_conv.convolution2d(image)
image_resized_conv = image_util.re_size(image_high_res_conv, self._supersampling_factor)
if self._low_res_convolution is True:
image_resized = image_util.re_size(image, self._supersampling_factor)
image_resized_conv += self._low_res_conv.convolution2d(image_resized)
return image_resized_conv
class MultiGaussianConvolution(object):
"""
class to perform a convolution consisting of multiple 2d Gaussians
This is aimed to lead to a speed-up without significant loss of accuracy do to the simplified convolution kernel
relative to a pixelized kernel.
"""
def __init__(self, sigma_list, fraction_list, pixel_scale, truncation=2):
"""
:param sigma_list: list of std value of Gaussian kernel
:param fraction_list: fraction of flux to be convoled with each Gaussian kernel
:param pixel_scale: scale of pixel width (to convert sigmas into units of pixels)
:param truncation: float. Truncate the filter at this many standard deviations.
Default is 4.0.
"""
self._num_gaussians = len(sigma_list)
self._sigmas_scaled = np.array(sigma_list) / pixel_scale
self._fraction_list = fraction_list / np.sum(fraction_list)
assert len(self._sigmas_scaled) == len(self._fraction_list)
self._truncation = truncation
self._pixel_scale = pixel_scale
def convolution2d(self, image):
"""
2d convolution
:param image: 2d numpy array, image to be convolved
:return: convolved image, 2d numpy array
"""
image_conv = None
for i in range(self._num_gaussians):
if image_conv is None:
image_conv = ndimage.filters.gaussian_filter(image, self._sigmas_scaled[i], mode='nearest',
truncate=self._truncation) * self._fraction_list[i]
else:
image_conv += ndimage.filters.gaussian_filter(image, self._sigmas_scaled[i], mode='nearest',
truncate=self._truncation) * self._fraction_list[i]
return image_conv
def pixel_kernel(self, num_pix):
"""
computes a pixelized kernel from the MGE parameters
:param num_pix: int, size of kernel (odd number per axis)
:return: pixel kernel centered
"""
from lenstronomy.LightModel.Profiles.gaussian import MultiGaussian
mg = MultiGaussian()
x, y = util.make_grid(numPix=num_pix, deltapix=self._pixel_scale)
kernel = mg.function(x, y, amp=self._fraction_list, sigma=self._sigmas_scaled)
kernel = util.array2image(kernel)
return kernel / np.sum(kernel)
class FWHMGaussianConvolution(object):
"""
uses a two-dimensional Gaussian function with same FWHM of given kernel as approximation
"""
def __init__(self, kernel, truncation=4):
"""
:param kernel: 2d kernel
:param truncation: sigma scaling of kernel truncation
"""
fwhm = kernel_util.fwhm_kernel(kernel)
self._sigma = util.fwhm2sigma(fwhm)
self._truncation = truncation
def convolution2d(self, image):
"""
2d convolution
:param image: 2d numpy array, image to be convolved
:return: convolved image, 2d numpy array
"""
image_conv = ndimage.filters.gaussian_filter(image, self._sigma, mode='nearest', truncate=self._truncation)
return image_conv
class MGEConvolution(object):
"""
approximates a 2d kernel with an azimuthal Multi-Gaussian expansion
"""
def __init__(self, kernel, pixel_scale, order=1):
"""
:param kernel: 2d convolution kernel (centered, odd axis number)
:param order: order of Multi-Gaussian Expansion
"""
#kernel_util.fwhm_kernel(kernel)
amps, sigmas, norm = kernel_util.mge_kernel(kernel, order=order)
# make instance o MultiGaussian convolution kernel
self._mge_conv = MultiGaussianConvolution(sigma_list=sigmas*pixel_scale, fraction_list=np.array(amps) / np.sum(amps),
pixel_scale=pixel_scale, truncation=4)
self._kernel = kernel
# store difference between MGE approximation and real kernel
def convolution2d(self, image):
"""
:param image:
:return:
"""
return self._mge_conv.convolution2d(image)
def kernel_difference(self):
"""
:return: difference between true kernel and MGE approximation
"""
kernel_mge = self._mge_conv.pixel_kernel(num_pix=len(self._kernel))
return self._kernel - kernel_mge
| [
"jasonpoh@uchicago.edu"
] | jasonpoh@uchicago.edu |
dadb22dab974adbfc9735fd7fa2a6b6efea0df0a | 80fcb4e9c0981007eb6d4ea92a39e194ef38980f | /Proiect/proiect/rents/migrations/0003_auto_20190607_0432.py | e6d07618e1375592690ae0463c27ce944daedf53 | [] | no_license | sd-2019-30238/final-project-ppvmarius | 17b761ee97dfe40f1ff00b6e1c2cdf034fba772e | df7e59cb785884eb64335a5590f3dba13c1e97ad | refs/heads/master | 2020-04-30T21:11:39.848969 | 2019-06-07T01:43:42 | 2019-06-07T01:43:42 | 177,088,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-07 01:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rents', '0002_auto_20190607_0414'),
]
operations = [
migrations.AddField(
model_name='rent',
name='delivered',
field=models.CharField(default=b'no', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='rent',
name='driver',
field=models.CharField(default=b'no', max_length=50),
preserve_default=False,
),
]
| [
"mpopovici@clj.bitdefender.biz"
] | mpopovici@clj.bitdefender.biz |
90a6fbbade7f6ccd7670bb4ba2455b2457756974 | 614db3d71137766beea7fbda39693ff8fa625d9a | /source_code/test_my_token.py | 18687b7a397e43482241ff655e29550bd130fe11 | [] | no_license | DmSide/DmSide-ai_code_analysis_tools | c8c0043d8ac418d732043f0c0d2844762aa08eb8 | 327ede21d651aada1711d49cf3a55dea98bfc6c9 | refs/heads/master | 2021-06-26T04:10:50.305101 | 2019-12-26T08:40:35 | 2019-12-26T08:40:35 | 230,227,174 | 0 | 0 | null | 2021-06-10T22:26:27 | 2019-12-26T08:36:44 | Python | UTF-8 | Python | false | false | 1,913 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from my_token import TokenCode
from analyze_tokens_list import statist_research
#import mock
import unittest
from unittest.mock import Mock
from unittest.mock import patch
#import unittest.mock
class TokenCodeTestCase(unittest.TestCase):
tk = TokenCode('C:\\User\\one_two_three.txt')
def test_tokenize_line(self):
test_line = '1. FirstTest ;' + '\n'
true_result = ['1', '.', ' ', 'First', 'Test', ' ', ';', '\n']
tk = TokenCode('C:\\User\\one_two_three.txt')
tokens_dict = {}
result, tokens_dict = tk.tokenize_line(test_line,tokens_dict)
self.assertEqual(result, true_result)
def test_capitals_separate(self):
testlist = list('OneTwoThree')
capital_positions_list = [-1,1,-1,-1,1,-1,-1,1,-1,-1,-1]
tk = TokenCode('C:\\User\\one_two_three.txt')
result = tk.capitals_separate(testlist,capital_positions_list)
true_result = list()
true_result.append(list('One'))
true_result.append(list('Two'))
true_result.append(list('Three'))
self.assertEqual(result, true_result)
class AnalyzeTokensTestCase(unittest.TestCase):
def test_statist_research(self):
tokens_list = ['a', 'b', 'a', 'b']
interval_list = [1, 2]
result_group = [['a'],['b'],['a','b'], ['b', 'a']]
result_count = [2, 2, 2, 1]
test_group, test_count = statist_research(tokens_list, interval_list)
self.assertEqual(test_group, result_group)
self.assertEqual(test_count, result_count)
if __name__ == '__main__':
tokenTestSuite = unittest.TestSuite()
tokenTestSuite.addTest(unittest.makeSuite(TokenCodeTestCase))
tokenTestSuite.addTest(unittest.makeSuite(AnalyzeTokensTestCase))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(tokenTestSuite)
#unittest.main() | [
"redhat@zuzex.lan"
] | redhat@zuzex.lan |
d0d32277bf62cb02c65d4a9c171f22a7d3004832 | 250e7bf41bf700d58a17aa73dc955ca94f4c5e6a | /data/shapes.py | fbc1e89ac27323e073595f1b51a3daa92d2581d0 | [] | no_license | darrickyee/as5util | 0878526100ba99a8d9469d0377ebefcbe0b80905 | 990a56599400c4060d08781b84e4656befa88a01 | refs/heads/master | 2020-04-23T05:42:37.522224 | 2019-09-09T23:57:18 | 2019-09-09T23:57:18 | 170,948,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py |
CTRL_SHAPES = {
'ik': {
'point': [[0.0, 1.0, 1.0],
[0.0, 1.0, -1.0],
[0.0, -1.0, -1.0],
[0.0, -1.0, 1.0],
[0.0, 1.0, 1.0]],
'degree': 1},
'fk': {
'point': [[0.0, 0.7836, -0.7836],
[0.0, 1.1082, -0.0],
[-0.0, 0.7836, 0.7836],
[-0.0, 0.0, 1.1082],
[-0.0, -0.7836, 0.7836],
[-0.0, -1.1082, 0.0],
[0.0, -0.7836, -0.7836],
[0.0, -0.0, -1.1082],
[0.0, 0.7836, -0.7836],
[0.0, 1.1082, -0.0],
[-0.0, 0.7836, 0.7836]],
'degree': 3,
'knot': [-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]},
'pole': {'point': [[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, -1.0]],
'degree': 1},
'other': {'point': [[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, 1.0]],
'degree': 1}
}
| [
"darrickyee@users.noreply.github.com"
] | darrickyee@users.noreply.github.com |
459c72af3cd511acf54b8b60834225780fea43e4 | 5a79600f6db7a8c65fa3182f822891d7fd68eeda | /tests/test_gpu_openacc.py | e1fc8aa0bbbf714cbefa9cc3d031f4e9e91790f1 | [
"MIT"
] | permissive | alisiahkoohi/devito | 867fb05c89f24193951835227abdc271f42cc6e2 | f535a44dff12de2837eb6e3217a65ffb2d371cb8 | refs/heads/master | 2023-03-16T05:50:23.610576 | 2021-05-24T21:49:32 | 2021-05-24T22:21:40 | 128,473,180 | 0 | 0 | MIT | 2023-03-02T12:58:21 | 2018-04-06T21:41:54 | Python | UTF-8 | Python | false | false | 6,509 | py | import pytest
import numpy as np
from conftest import skipif
from devito import Grid, Function, TimeFunction, Eq, Operator, norm, solve
from devito.data import LEFT
from devito.ir.iet import FindNodes, Section, retrieve_iteration_tree
from examples.seismic import TimeAxis, RickerSource, Receiver
class TestCodeGeneration(object):
def test_basic(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid)
op = Operator(Eq(u.forward, u + 1), platform='nvidiaX', language='openacc')
trees = retrieve_iteration_tree(op)
assert len(trees) == 1
assert trees[0][1].pragmas[0].value ==\
'acc parallel loop collapse(3) present(u)'
assert op.body[1].header[0].value ==\
('acc enter data copyin(u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
assert str(op.body[1].footer[0]) == ''
assert op.body[1].footer[1].contents[0].value ==\
('acc exit data copyout(u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
assert op.body[1].footer[1].contents[1].value ==\
('acc exit data delete(u[0:u_vec->size[0]]'
'[0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
def test_streaming_postponed_deletion(self):
grid = Grid(shape=(10, 10, 10))
u = TimeFunction(name='u', grid=grid)
v = TimeFunction(name='v', grid=grid)
usave = TimeFunction(name='usave', grid=grid, save=10)
eqns = [Eq(u.forward, u + usave),
Eq(v.forward, v + u.forward.dx + usave)]
op = Operator(eqns, platform='nvidiaX', language='openacc',
opt=('streaming', 'orchestrate'))
sections = FindNodes(Section).visit(op)
assert len(sections) == 2
assert str(sections[1].body[0].body[0].footer[1]) ==\
('#pragma acc exit data delete(usave[time:1][0:usave_vec->size[1]]'
'[0:usave_vec->size[2]][0:usave_vec->size[3]])')
def test_streaming_with_host_loop(self):
grid = Grid(shape=(10, 10, 10))
f = Function(name='f', grid=grid)
u = TimeFunction(name='u', grid=grid, save=10)
eqns = [Eq(f, u),
Eq(u.forward, f + 1)]
op = Operator(eqns, platform='nvidiaX', language='openacc',
opt=('streaming', 'orchestrate'))
# Check generated code
assert len(op._func_table) == 2
assert 'init_device0' in op._func_table
assert 'prefetch_host_to_device0' in op._func_table
sections = FindNodes(Section).visit(op)
assert len(sections) == 2
s = sections[0].body[0].body[0]
assert str(s.body[3].footer[1]) == ('#pragma acc exit data delete'
'(u[time:1][0:u_vec->size[1]][0:u_vec'
'->size[2]][0:u_vec->size[3]])')
assert str(s.body[2]) == ('#pragma acc data present(u[time:1][0:u_vec->'
'size[1]][0:u_vec->size[2]][0:u_vec->size[3]])')
trees = retrieve_iteration_tree(op)
assert len(trees) == 3
assert 'present(f)' in str(trees[0][1].pragmas[0])
class TestOperator(object):
@skipif('nodevice')
def test_op_apply(self):
grid = Grid(shape=(3, 3, 3))
u = TimeFunction(name='u', grid=grid, dtype=np.int32)
op = Operator(Eq(u.forward, u + 1))
# Make sure we've indeed generated OpenACC code
assert 'acc parallel' in str(op)
time_steps = 1000
op.apply(time_M=time_steps)
assert np.all(np.array(u.data[0, :, :, :]) == time_steps)
@skipif('nodevice')
def test_iso_ac(self):
shape = (101, 101)
extent = (1000, 1000)
origin = (0., 0.)
v = np.empty(shape, dtype=np.float32)
v[:, :51] = 1.5
v[:, 51:] = 2.5
grid = Grid(shape=shape, extent=extent, origin=origin)
t0 = 0.
tn = 1000.
dt = 1.6
time_range = TimeAxis(start=t0, stop=tn, step=dt)
f0 = 0.010
src = RickerSource(name='src', grid=grid, f0=f0,
npoint=1, time_range=time_range)
domain_size = np.array(extent)
src.coordinates.data[0, :] = domain_size*.5
src.coordinates.data[0, -1] = 20.
rec = Receiver(name='rec', grid=grid, npoint=101, time_range=time_range)
rec.coordinates.data[:, 0] = np.linspace(0, domain_size[0], num=101)
rec.coordinates.data[:, 1] = 20.
u = TimeFunction(name="u", grid=grid, time_order=2, space_order=2)
m = Function(name='m', grid=grid)
m.data[:] = 1./(v*v)
pde = m * u.dt2 - u.laplace
stencil = Eq(u.forward, solve(pde, u.forward))
src_term = src.inject(field=u.forward, expr=src * dt**2 / m)
rec_term = rec.interpolate(expr=u.forward)
op = Operator([stencil] + src_term + rec_term)
# Make sure we've indeed generated OpenACC code
assert 'acc parallel' in str(op)
op(time=time_range.num-1, dt=dt)
assert np.isclose(norm(rec), 490.56, atol=1e-2, rtol=0)
class TestMPI(object):
@skipif('nodevice')
@pytest.mark.parallel(mode=2)
def test_basic(self):
grid = Grid(shape=(6, 6))
x, y = grid.dimensions
t = grid.stepping_dim
u = TimeFunction(name='u', grid=grid, space_order=2)
u.data[:] = 1.
expr = u[t, x, y-1] + u[t, x-1, y] + u[t, x, y] + u[t, x, y+1] + u[t, x+1, y]
op = Operator(Eq(u.forward, expr), platform='nvidiaX', language='openacc')
# Make sure we've indeed generated OpenACC+MPI code
assert 'acc parallel' in str(op)
assert len(op._func_table) == 4
op(time_M=1)
glb_pos_map = grid.distributor.glb_pos_map
if LEFT in glb_pos_map[x]:
assert np.all(u.data[0] == [[11., 16., 17., 17., 16., 11.],
[16., 23., 24., 24., 23., 16.],
[17., 24., 25., 25., 24., 17.]])
else:
assert np.all(u.data[0] == [[17., 24., 25., 25., 24., 17.],
[16., 23., 24., 24., 23., 16.],
[11., 16., 17., 17., 16., 11.]])
@skipif('nodevice')
@pytest.mark.parallel(mode=2)
def test_iso_ac(self):
TestOperator().test_iso_ac()
| [
"f.luporini12@imperial.ac.uk"
] | f.luporini12@imperial.ac.uk |
17192d0f6f70f84d16ea22b520827341bd2b3559 | ad58fa45fe4092846f293b20ef250fc55ef3bdef | /main.py | e934951023cb53b10e21fa7b1d5f8c58d0ccf113 | [] | no_license | tgb20/HAB | 2051c36ac40d1aff62a9b52de066131260472334 | 61b8c1da377d7b2b099b3319ed15a960819a726d | refs/heads/master | 2020-03-31T09:30:27.810369 | 2018-10-22T15:25:32 | 2018-10-22T15:25:32 | 152,098,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,515 | py | import grovepi as gpi
import time
from grove_i2c_barometic_sensor_BMP180 import BMP085
from gps import GPS
from lcd import LCD
from modem import SatModem
import csv
TEMP_SENSOR = 0 # Port A0 is for the Temperature Sensor
SOUND_SENSOR = 2 # Port A2 is for the Sound Sensor
bmpSensor = BMP085(0x77, 1) # Register pressure sensor Port I2C-1
gpsSensor = GPS('/dev/ttyAMA0', 4800) # Connect Serial to GPS Device
lcdDisplay = LCD() # Create a reference to the LCD Display
satModem = SatModem('/dev/ttyUSB0') # Create a Reference to the Sat Modem
secondElapsed = 0
lcdDisplay.setText("Hello!")
time.sleep(2)
lcdDisplay.setText("Prepping Sensors!")
time.sleep(2)
lcdDisplay.setText("Ready to Go!")
time.sleep(2)
with open(str(int(time.time())) + '.csv', 'wb') as csvfile:
datawriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
datawriter.writerow(['temperature', 'sound', 'pressure',
'latitude', 'longitude', 'altitude'])
while True:
try:
lcdDisplay.setText("Gathering Data")
curTemp = gpi.temp(TEMP_SENSOR) # Get temperature in celcius
# Get current sound level in ???
curSound = gpi.analogRead(SOUND_SENSOR)
curPressure = bmpSensor.readPressure()/100 # Get current presure in millibar
curCoords = gpsSensor.read() # Get the current data from the GPS module
print str(curTemp) + ', ' + str(curSound) + ', ' + \
str(curPressure) # Print Current Readings from Sensors
print str(curCoords.latitude) + ', ' + str(curCoords.longitude) + \
', ' + str(curCoords.altitude) # Print Current Readings from GPS
lcdDisplay.setText("Saved Data")
message = [curCoords.latitude, curCoords.longitude, curCoords.altitude]
if secondElapsed % 30 == 0:
# Send Data to Modem
satModem.sendMessage(str(message))
lcdDisplay.setText("Sent Data")
with open('flight.csv', 'a') as csvfile:
datawriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
datawriter.writerow([curTemp, curSound, curPressure,
curCoords.latitude, curCoords.longitude, curCoords.altitude])
except Exception as e:
print("Error")
print(str(e))
lcdDisplay.setText("Error")
time.sleep(1) # Wait 1 Second Until next loop
secondElapsed += 1
| [
"tgb.bashista@gmail.com"
] | tgb.bashista@gmail.com |
705d988c47eabf08b0d5565e1355f84f7a532a93 | 0b9cad239e7b944b29fda12e7a14dfbc6d5c8eb5 | /example/example.py | ec21d914626cc24283c6273e97da681336f8304f | [
"MIT"
] | permissive | dinhanhx/deep_fried_meme | f7b006534352019ba4ff849c0adf918f0142111f | 508eca7e5ad8b84cc2455c28d40135bfc44efc9f | refs/heads/master | 2022-12-24T03:16:42.516580 | 2020-10-11T14:07:26 | 2020-10-11T14:07:26 | 268,708,066 | 2 | 1 | MIT | 2020-10-11T14:07:27 | 2020-06-02T05:21:15 | Python | UTF-8 | Python | false | false | 901 | py | from deep_fried_meme import deep_frier
print(deep_frier('Hilbert.png', config = None, putout_scheme = ['file', 'proc Hilbert 1', 'jpg']))
# Process an image in machine then export in machine
print('===')
print(deep_frier('Hilbert.png', config = None, putout_scheme = ['response data']))
# Process an image in machine then return response data that Flask, Django can handle
print('===')
print(deep_frier('https://upload.wikimedia.org/wikipedia/commons/2/24/Hilbert_Curve.256x256%2C16-bit_greyscale.png', config = None, putout_scheme = ['file', 'proc Hilbert 2', 'jpg']))
# Process an image from direct url then export in machine
print('===')
print(deep_frier('https://upload.wikimedia.org/wikipedia/commons/2/24/Hilbert_Curve.256x256%2C16-bit_greyscale.png', config = None, putout_scheme = ['response data']))
# Process an image from direct url then return response data that Flask, Django can handle
| [
"dinhanhx@gmail.com"
] | dinhanhx@gmail.com |
7e0937d9c87ca2b42dc9634ae22a9863317be59b | 844eaa6e8719e7842dfadd40f10dff72b5eedcfd | /guestbook.py | e29508d5cd30a7d26b2e04dab6b75c1c8a413b7e | [
"Unlicense"
] | permissive | saptarshighosh9/online-shooting--game | 0546b07e3b780c272dc3093461a661d502b8ad24 | bec8bcd2cf27d02b492a9e91bc81aa0096c78c28 | refs/heads/master | 2016-08-04T21:00:24.299581 | 2013-08-21T10:56:05 | 2013-08-21T10:56:05 | 12,254,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,784 | py | import cgi
import urllib
import os
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
DEFAULT_GUESTBOOK_NAME = 'Score'
# We set a parent key on the 'Greetings' to ensure that they are all in the same
# entity group. Queries across the single entity group will be consistent.
# However, the write rate should be limited to ~1/second.
def guestbook_key(guestbook_name=DEFAULT_GUESTBOOK_NAME):
"""Constructs a Datastore key for a Guestbook entity with guestbook_name."""
return ndb.Key('Guestbook', guestbook_name)
class Greeting(ndb.Model):
"""Models an individual Guestbook entry with author, content, and date."""
author = ndb.UserProperty()
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
class MainPage(webapp2.RequestHandler):
def get(self):
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greetings_query = Greeting.query(
ancestor=guestbook_key(guestbook_name)).order(-Greeting.date)
greetings = greetings_query.fetch(10)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'greetings': greetings,
'guestbook_name': urllib.quote_plus(guestbook_name),
'url': url,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class Guestbook(webapp2.RequestHandler):
def post(self):
# We set the same parent key on the 'Greeting' to ensure each Greeting
# is in the same entity group. Queries across the single entity group
# will be consistent. However, the write rate to a single entity group
# should be limited to ~1/second.
guestbook_name = self.request.get('guestbook_name',
DEFAULT_GUESTBOOK_NAME)
greeting = Greeting(parent=guestbook_key(guestbook_name))
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = self.request.get('content')
greeting.put()
query_params = {'guestbook_name': guestbook_name}
self.redirect('/?' + urllib.urlencode(query_params))
application = webapp2.WSGIApplication([
('/', MainPage),
('/sign', Guestbook),
], debug=True)
| [
"saptarshighosh9@users.noreply.github.com"
] | saptarshighosh9@users.noreply.github.com |
1093db221b2448f0e403511989449cc47907e737 | ea6edbfcba1b8d0a892c666575fc7374e3b96229 | /SimpleGUICS2Pygame/simplegui_lib_draw.py | 7f3a35b889de93c9694f52ee1fa425c3abf33a98 | [] | no_license | guohengkai/hw-poc | 2a5ae9e244d675f15275210ccaf2a87d5e1d570c | 4cc2e530f77a96850811df4cc3a79b52b3c6b48f | refs/heads/master | 2021-01-10T10:57:36.293431 | 2016-01-18T13:30:39 | 2016-01-18T13:30:39 | 49,801,389 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,215 | py | # -*- coding: latin-1 -*-
"""
simplegui_lib_draw (November 8, 2013)
Draw functions to help
in SimpleGUI of CodeSkulptor.
Piece of SimpleGUICS2Pygame.
https://bitbucket.org/OPiMedia/simpleguics2pygame
GPLv3 --- Copyright (C) 2013 Olivier Pirson
http://www.opimedia.be/
"""
#
# Functions
############
def draw_rect(canvas, pos, size, line_width, line_color, fill_color=None):
"""
Draw a rectangle.
:param canvas: simplegui.Canvas
:param pos: (int or float, int or float) or [int or float, int or float]
:param size: (int or float, int or float) or [int or float, int or float]
:param line_width: int >= 0
:param line_color: str
:param fill_color: str
"""
assert isinstance(pos, tuple) or isinstance(pos, list), type(pos)
assert len(pos) == 2, len(pos)
assert isinstance(pos[0], int) or isinstance(pos[0], float), type(pos[0])
assert isinstance(pos[1], int) or isinstance(pos[1], float), type(pos[1])
assert isinstance(size, tuple) or isinstance(size, list), type(size)
assert len(size) == 2, len(size)
assert isinstance(size[0], int) or isinstance(size[0], float), \
type(size[0])
assert isinstance(size[1], int) or isinstance(size[1], float), \
type(size[1])
assert isinstance(line_width, int) or isinstance(line_width, float), \
type(line_width)
assert line_width >= 0, line_width
assert isinstance(line_color, str), type(str)
assert (fill_color is None) or isinstance(fill_color, str), type(str)
x0 = pos[0]
y0 = pos[1]
width = size[0] - 1
height = size[1] - 1
canvas.draw_polygon(((x0, y0),
(x0 + width, y0),
(x0 + width, y0 + height),
(x0, y0 + height)),
line_width, line_color, fill_color)
def draw_text_side(frame, canvas,
text, point,
font_size, font_color,
font_face='serif',
font_size_coef=3.0/4,
rectangle_color=None, rectangle_fill_color=None,
side_x=-1, side_y=1):
"""
Draw the `text` string at the position `point`.
See `simplegui.draw_text()`.
If `rectangle_color` != `None`
then draw a rectangle around the text.
If `rectangle_fill_color` != `None`
then draw a filled rectangle under the text.
| If `side_x`
| < 0 then `point[0]` is the left of the text,
| == 0 then `point[0]` is the center of the text,
| > 0 then `point[0]` is the right of the text.
| If `side_y`
| < 0 then `point[1]` is the top of the text,
| == 0 then `point[1]` is the center of the text,
| > 0 then `point[1]` is the bottom of the text.
:param text: str
:param point: (int or float, int or float) or [int or float, int or float]
:param font_size: (int or float) >= 0
:param font_color: str
:param font_face: str == 'monospace', 'sans-serif', 'serif'
:param rectangle_color: None or str
:param rectangle_fill_color: None or str
:param side_x: int or float
:param side_y: int or float
:param font_size_coef: int or float
"""
assert isinstance(text, str), type(text)
assert isinstance(point, tuple) or isinstance(point, list), type(point)
assert len(point) == 2, len(point)
assert isinstance(point[0], int) or isinstance(point[0], float), \
type(point[0])
assert isinstance(point[1], int) or isinstance(point[1], float), \
type(point[1])
assert isinstance(font_size, int) or isinstance(font_size, float), \
type(font_size)
assert font_size >= 0, font_size
assert isinstance(font_color, str), type(font_color)
assert isinstance(font_face, str), type(font_face)
assert (rectangle_color is None) or isinstance(rectangle_color, str), \
type(rectangle_color)
assert ((rectangle_fill_color is None)
or isinstance(rectangle_fill_color, str)), \
type(rectangle_fill_color)
assert isinstance(side_x, int) or isinstance(side_x, float), type(side_x)
assert isinstance(side_y, int) or isinstance(side_y, float), type(side_y)
assert (isinstance(font_size_coef, int)
or isinstance(font_size_coef, float)), type(font_size_coef)
text_width = (frame.get_canvas_textwidth(text, font_size)
if font_face is None
else frame.get_canvas_textwidth(text, font_size, font_face))
text_height = font_size*font_size_coef
if side_x < 0:
x = point[0]
elif side_x == 0:
x = point[0] - text_width/2.0
else:
x = point[0] - text_width
if side_y < 0:
y = point[1] + text_height
elif side_y == 0:
y = point[1] + text_height/2.0
else:
y = point[1]
if rectangle_color is not None:
draw_rect(canvas, (x, y), (text_width, -text_height),
1, rectangle_color, rectangle_fill_color)
elif rectangle_fill_color is not None:
draw_rect(canvas, (x, y), (text_width, -text_height),
1, rectangle_fill_color, rectangle_fill_color)
canvas.draw_text(text, (x, y), font_size, font_color, font_face)
| [
"guohengkaighk@gmail.com"
] | guohengkaighk@gmail.com |
6cfcbb1a68c162aaf5754e4f590f3db98c8850b8 | ba48780406fd3c04ff7efbd60c8c477a3aaa0f27 | /src2/cv1/try_aux_freq.py | f9d89826ad6a3bea6a4fc559e1a63429ef74ce5e | [] | no_license | umpot/quora | 635d37f1602981d63cc50b5a8070297dce59c19a | ac10e6cd4e396c8b5958371f8e537e671067fd38 | refs/heads/master | 2020-12-30T13:46:07.614164 | 2017-09-18T10:22:07 | 2017-09-18T10:22:07 | 91,250,042 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,817 | py | import pandas as pd
import numpy as np
import seaborn as sns
import re
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sns.set(color_codes=True)
sns.set(style="whitegrid", color_codes=True)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 5000)
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_colwidth', 100)
TARGET = 'is_duplicate'
qid1, qid2 = 'qid1', 'qid2'
question1, question2 = 'question1', 'question2'
lemmas_q1, lemmas_q2 = 'lemmas_q1', 'lemmas_q2'
stems_q1, stems_q2 = 'stems_q1', 'stems_q2'
tokens_q1, tokens_q2 = 'tokens_q1', 'tokens_q2'
ner_q1, ner_q2='ner_q1', 'ner_q2'
postag_q1, postag_q2='postag_q1', 'postag_q2'
data_folder = '../../../data/'
fp_train = data_folder + 'train.csv'
fp_test = data_folder + 'test.csv'
lemmas_train_fp = os.path.join(data_folder,'nlp','lemmas_train.csv')
lemmas_test_fp = os.path.join(data_folder,'nlp','lemmas_test.csv')
tokens_train_fp = os.path.join(data_folder,'nlp','tokens_train.csv')
tokens_test_fp = os.path.join(data_folder,'nlp','tokens_test.csv')
postag_train_fp = os.path.join(data_folder,'nlp','postag_train.csv')
postag_test_fp = os.path.join(data_folder,'nlp','postag_test.csv')
ner_train_fp = os.path.join(data_folder,'nlp','ner_train.csv')
ner_test_fp = os.path.join(data_folder,'nlp','ner_test.csv')
stems_train_fp = os.path.join(data_folder,'nlp','stems_train.csv')
stems_test_fp = os.path.join(data_folder,'nlp','stems_test.csv')
tfidf_with_stops_train_fp = os.path.join(data_folder,'tfidf','old' ,'tokens_with_stop_words_tfidf_train.csv')
tfidf_with_stops_test_fp = os.path.join(data_folder,'tfidf','old','tokens_with_stop_words_tfidf_test.csv')
magic_train_fp=os.path.join(data_folder, 'magic', 'magic_train.csv')
magic_test_fp=os.path.join(data_folder, 'magic', 'magic_test.csv')
magic2_train_fp = os.path.join(data_folder, 'magic', 'magic2_train.csv')
magic2_test_fp = os.path.join(data_folder, 'magic', 'magic2_test.csv')
common_words_train_fp = os.path.join(data_folder, 'basic','common_words_train.csv')
length_train_fp = os.path.join(data_folder, 'basic','lens_train.csv')
common_words_test_fp = os.path.join(data_folder, 'basic','common_words_test.csv')
length_test_fp = os.path.join(data_folder, 'basic','lens_test.csv')
TRAIN_METRICS_FP = [
data_folder + 'distances/'+ 'train_metrics_bool_lemmas.csv',
data_folder + 'distances/'+'train_metrics_bool_stems.csv',
data_folder + 'distances/'+'train_metrics_bool_tokens.csv',
data_folder + 'distances/'+'train_metrics_fuzzy_lemmas.csv',
data_folder + 'distances/'+'train_metrics_fuzzy_stems.csv',
data_folder + 'distances/'+'train_metrics_fuzzy_tokens.csv',
data_folder + 'distances/'+'train_metrics_sequence_lemmas.csv',
data_folder + 'distances/'+'train_metrics_sequence_stems.csv',
data_folder + 'distances/'+'train_metrics_sequence_tokens.csv'
]
TEST_METRICS_FP = [
data_folder + 'distances/'+ 'test_metrics_bool_lemmas.csv',
data_folder + 'distances/'+'test_metrics_bool_stems.csv',
data_folder + 'distances/'+'test_metrics_bool_tokens.csv',
data_folder + 'distances/'+'test_metrics_fuzzy_lemmas.csv',
data_folder + 'distances/'+'test_metrics_fuzzy_stems.csv',
data_folder + 'distances/'+'test_metrics_fuzzy_tokens.csv',
data_folder + 'distances/'+'test_metrics_sequence_lemmas.csv',
data_folder + 'distances/'+'test_metrics_sequence_stems.csv',
data_folder + 'distances/'+'test_metrics_sequence_tokens.csv'
]
trash_cols = [
"w_share_ratio_2_std_idf_dirty_lower_no_stops",
"w_share_ratio_2_smooth_idf_dirty_upper",
"w_share_ratio_2_std_idf_tokens_lower_no_stops",
"abi_jaccard_distance",
"len_char_diff_log",
"len_word_diff_log",
"len_word_expt_stop_diff_log",
"stop_words_num_q1",
"stop_words_num_q2",
"lemmas_kulsinski",
"lemmas_dice",
"lemmas_jaccard",
"stems_kulsinski",
"stems_dice",
"stems_jaccard",
"tokens_dice",
"tokens_jaccard",
"lemmas_partial_token_set_ratio",
"stems_partial_token_set_ratio",
"tokens_partial_token_set_ratio",
"lemmas_distance.jaccard",
"stems_distance.jaccard",
"tokens_distance.jaccard",
"w_share_ratio_2_smooth_idf_dirty_lower_no_stops",
"w_share_ratio_2_std_idf_dirty_upper",
"w_share_ratio_2_smooth_idf_tokens_lower",
"w_share_ratio_2_std_idf_tokens_lower",
"w_share_ratio_2_smooth_idf_tokens_lower_no_stops"
]
def del_trash_cols(df):
for col in trash_cols:
if col in df:
del df[col]
def load_train():
return pd.read_csv(fp_train, index_col='id')
def load_test():
return pd.read_csv(fp_test, index_col='test_id')
def load__train_metrics():
dfs = [pd.read_csv(fp, index_col='id') for fp in TRAIN_METRICS_FP]
return pd.concat(dfs, axis=1)
def load__test_metrics():
dfs = [pd.read_csv(fp, index_col='test_id') for fp in TEST_METRICS_FP]
return pd.concat(dfs, axis=1)
def load_train_all():
return pd.concat([
load_train(),
load_train_lemmas(),
load_train_stems(),
load_train_tokens(),
load_train_lengths(),
load_train_common_words(),
load__train_metrics(),
load_train_tfidf()
], axis=1)
def load_train_nlp():
return pd.concat([
load_train(),
load_train_postag(),
load_train_lemmas(),
load_train_stems(),
load_train_tokens(),
load_train_ner()
], axis=1)
def load_test_nlp():
return pd.concat([
load_test(),
load_test_postag(),
load_test_lemmas(),
load_test_stems(),
load_test_tokens(),
load_test_ner()
], axis=1)
def load_test_all():
return pd.concat([
load_test(),
load_test_lemmas(),
load_test_stems(),
load_test_tokens(),
load_test_lengths(),
load_test_common_words(),
load__test_metrics(),
load_test_tfidf()
], axis=1)
def load_train_test():
return pd.read_csv(fp_train, index_col='id'), pd.read_csv(fp_test, index_col='test_id')
def load_train_lemmas():
df = pd.read_csv(lemmas_train_fp, index_col='id')
df = df.fillna('')
for col in [lemmas_q1, lemmas_q2]:
df[col]=df[col].apply(str)
return df
def load_test_lemmas():
df = pd.read_csv(lemmas_test_fp, index_col='test_id')
df = df.fillna('')
for col in [lemmas_q1, lemmas_q2]:
df[col]=df[col].apply(str)
return df
def load_train_tfidf():
df = pd.read_csv(tfidf_with_stops_train_fp, index_col='id')
return df
def load_test_tfidf():
df = pd.read_csv(tfidf_with_stops_test_fp, index_col='test_id')
return df
def load_train_tokens():
df = pd.read_csv(tokens_train_fp, index_col='id')
df = df.fillna('')
return df
def load_test_tokens():
df = pd.read_csv(tokens_test_fp, index_col='test_id')
df = df.fillna('')
return df
def load_train_postag():
df = pd.read_csv(postag_train_fp, index_col='id')
return df
def load_test_postag():
df = pd.read_csv(postag_test_fp, index_col='test_id')
return df
def load_train_ner():
df = pd.read_csv(ner_train_fp, index_col='id')
return df
def load_test_ner():
df = pd.read_csv(ner_test_fp, index_col='test_id')
return df
def load_train_magic():
df = pd.concat([
pd.read_csv(magic_train_fp, index_col='id')[['freq_question1', 'freq_question2']],
pd.read_csv(magic2_train_fp, index_col='id')],
axis=1
)
return df
def load_test_magic():
df = pd.concat([
pd.read_csv(magic_test_fp, index_col='test_id')[['freq_question1', 'freq_question2']],
pd.read_csv(magic2_test_fp, index_col='test_id')],
axis=1
)
return df
def load_train_stems():
df = pd.read_csv(stems_train_fp, index_col='id')
df = df[['question1_porter', 'question2_porter']]
df = df.rename(columns={'question1_porter': 'stems_q1', 'question2_porter': 'stems_q2'})
df = df.fillna('')
for col in [stems_q1, stems_q2]:
df[col]=df[col].apply(str)
return df
def load_test_stems():
df = pd.read_csv(stems_test_fp, index_col='test_id')
df = df[['question1_porter', 'question2_porter']]
df = df.rename(columns={'question1_porter': 'stems_q1', 'question2_porter': 'stems_q2'})
df = df.fillna('')
for col in [stems_q1, stems_q2]:
df[col]=df[col].apply(str)
return df
def load_train_common_words():
df = pd.read_csv(common_words_train_fp, index_col='id')
return df
def load_test_common_words():
df = pd.read_csv(common_words_test_fp, index_col='test_id')
return df
def load_train_lengths():
df = pd.read_csv(length_train_fp, index_col='id')
return df
def load_test_lengths():
df = pd.read_csv(length_test_fp, index_col='test_id')
return df
def shuffle_df(df, random_state=42):
np.random.seed(random_state)
return df.iloc[np.random.permutation(len(df))]
def explore_target_ratio(df):
return {
'pos':1.0*len(df[df[TARGET]==1])/len(df),
'neg':1.0*len(df[df[TARGET]==0])/len(df)
}
# df = load_train_all()
######################################################################################
######################################################################################
######################################################################################
######################################################################################
#WH
wh_fp_train=os.path.join(data_folder, 'wh', 'wh_train.csv')
wh_fp_test=os.path.join(data_folder, 'wh', 'wh_test.csv')
def load_wh_train():
df = pd.read_csv(wh_fp_train, index_col='id')
return df
def load_wh_test():
df = pd.read_csv(wh_fp_test, index_col='test_id')
return df
######################################################################################
######################################################################################
######################################################################################
######################################################################################
######################################################################################
######################################################################################
######################################################################################
######################################################################################
upper_keywords_fp_train=os.path.join(data_folder, 'keywords', 'train_upper.csv')
upper_keywords_test=os.path.join(data_folder, 'keywords', 'test_upper.csv')
def load_upper_keywords_train():
df = pd.read_csv(upper_keywords_fp_train, index_col='id')
return df
def load_upper_keywords_test():
df = pd.read_csv(upper_keywords_test, index_col='test_id')
return df
######################################################################################
######################################################################################
######################################################################################
######################################################################################
######################################################################################
######################################################################################
######################################################################################
######################################################################################
one_upper_fp_train=os.path.join(data_folder, 'keywords', 'train_upper_freq_200.csv')
one_upper_fp_test=os.path.join(data_folder, 'keywords', 'test_upper_freq_200.csv')
def load_one_upper_train():
df = pd.read_csv(one_upper_fp_train, index_col='id')
return df
def load_one_upper_test():
df = pd.read_csv(one_upper_fp_test, index_col='test_id')
return df
######################################################################################
######################################################################################
######################################################################################
######################################################################################
import pandas as pd
import numpy as np
TARGET = 'is_duplicate'
INDEX_PREFIX= 100000000
#old
{'pos': 0.369197853026293,
'neg': 0.630802146973707}
#new
r1 = 0.174264424749
r0 = 0.825754788586
""""
p_old/(1+delta) = p_new
delta = (p_old/p_new)-1 = 1.1186071314214785
l = delta*N = 452241
"""
delta = 1.1186071314214785
def explore_target_ratio(df):
return {
'pos':1.0*len(df[df[TARGET]==1])/len(df),
'neg':1.0*len(df[df[TARGET]==0])/len(df)
}
def shuffle_df(df, random_state):
np.random.seed(random_state)
return df.iloc[np.random.permutation(len(df))]
def oversample_df(df, l, random_state):
df_pos = df[df[TARGET]==1]
df_neg = df[df[TARGET]==0]
df_neg_sampl = df_neg.sample(l, random_state=random_state, replace=True)
df=pd.concat([df_pos, df_neg, df_neg_sampl])
df = shuffle_df(df, random_state)
return df
def oversample(train_df, test_df, random_state=42):
l_train = int(delta * len(train_df))
l_test = int(delta * len(test_df))
return oversample_df(train_df, l_train, random_state), oversample_df(test_df, l_test, random_state)
############################################################3
############################################################3
############################################################3
train_avg_tokK_freq_fp=os.path.join(data_folder, 'top_k_freq', 'train_avg_K_tok_freq.csv')
test_avg_tokK_freq_fp=os.path.join(data_folder, 'top_k_freq', 'test_avg_K_tok_freq.csv')
def load_topNs_avg_tok_freq_train():
return pd.read_csv(train_avg_tokK_freq_fp, index_col='id')
def load_topNs_avg_tok_freq_test():
return pd.read_csv(test_avg_tokK_freq_fp, index_col='test_id')
############################################################3
############################################################3
############################################################3
abi_train_fp = os.path.join(data_folder, 'abishek', 'abi_train.csv')
abi_test_fp = os.path.join(data_folder, 'abishek', 'abi_test.csv')
def load_abi_train():
return pd.read_csv(abi_train_fp, index_col='id')
def load_abi_test():
return pd.read_csv(abi_test_fp, index_col='test_id')
############################################################3
############################################################3
############################################################3
max_k_cores_train_fp=os.path.join(data_folder,'magic' ,'max_k_cores_train.csv')
max_k_cores_test_fp=os.path.join(data_folder,'magic' ,'max_k_cores_test.csv')
def load_max_k_cores_train():
return pd.read_csv(max_k_cores_train_fp, index_col='id')
def load_max_k_cores_test():
return pd.read_csv(max_k_cores_test_fp, index_col='test_id')
############################################################3
############################################################3
############################################################3
glove_train_fp = os.path.join(data_folder, 'embeddings', 'glove_train.csv')
glove_test_fp = os.path.join(data_folder, 'embeddings', 'glove_test.csv')
def load_glove_metrics_train():
return pd.read_csv(glove_train_fp, index_col='id')
def load_glove_metrics_test():
return pd.read_csv(glove_test_fp, index_col='test_id')
############################################################3
############################################################3
############################################################3
lex_train_fp = os.path.join(data_folder, 'embeddings', 'lex_train.csv')
lex_test_fp = os.path.join(data_folder, 'embeddings', 'lex_test.csv')
def load_lex_metrics_train():
return pd.read_csv(lex_train_fp, index_col='id')
def load_lex_metrics_test():
return pd.read_csv(lex_test_fp, index_col='test_id')
############################################################3
############################################################3
############################################################3
word2vec_train_fp = os.path.join(data_folder, 'embeddings', 'word2vec_train.csv')
word2vec_test_fp = os.path.join(data_folder, 'embeddings', 'word2vec_test.csv')
def load_word2vec_metrics_train():
return pd.read_csv(word2vec_train_fp, index_col='id')
def load_word2vec_metrics_test():
return pd.read_csv(word2vec_test_fp, index_col='test_id')
############################################################3
############################################################3
############################################################3
embedings_list=['word2vec', 'glove', 'lex']
column_types = ['tokens', 'lemmas']
kur_pairs=[
('kur_q1vec_{}_{}'.format(col_type,emb), 'kur_q2vec_{}_{}'.format(col_type,emb))
for col_type in column_types for emb in embedings_list
]
skew_pairs=[
('skew_q1vec_{}_{}'.format(col_type,emb), 'skew_q2vec_{}_{}'.format(col_type,emb))
for col_type in column_types for emb in embedings_list
]
def add_kur_combinations(df):
for col1, col2 in kur_pairs+skew_pairs:
name = col1.replace('q1', '')
df['{}_abs_diff'.format(name)]=np.abs(df[col1]-df[col2])
df['{}_1div2_ratio'.format(name)]= df[col1]/df[col2]
df['{}_log_ratio'.format(name)]= np.abs(np.log(df[col1]/df[col2]))
df['{}_q1_ratio'.format(name)]=df[col1]/(df[col1]+df[col2])
df['{}_q2_ratio'.format(name)]=df[col2]/(df[col1]+df[col2])
############################################################3
############################################################3
############################################################3
aux_pairs_50_train_fp = os.path.join(data_folder, 'aux_pron', 'aux_pairs_50_train.csv')
aux_pairs_50_test_fp = os.path.join(data_folder, 'aux_pron', 'aux_pairs_50_test.csv')
def load_aux_pairs_50_train():
return pd.read_csv(aux_pairs_50_train_fp, index_col='id')
def load_aux_pairs_50_test():
return pd.read_csv(aux_pairs_50_test_fp, index_col='test_id')
############################################################3
############################################################3
############################################################3
train_pos_metrics_fp=os.path.join(data_folder, 'pos_metrics', 'train_pos_metrics.csv')
test_pos_metrics_fp=os.path.join(data_folder, 'pos_metrics', 'test_pos_metrics.csv')
def load_metrics_on_pos_train():
return pd.read_csv(train_pos_metrics_fp, index_col='id')
def load_metrics_on_pos_test():
return pd.read_csv(train_pos_metrics_fp, index_col='test_id')
############################################################3
############################################################3
############################################################3
import xgboost as xgb
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import log_loss
import json
from time import sleep
import traceback
gc_host = '104.197.97.20'
local_host = '10.20.0.144'
user='ubik'
password='nfrf[eqyz'
def load_train_all_xgb():
train_df = pd.concat([
load_train(),
load_train_lengths(),
load_train_common_words(),
load__train_metrics(),
load_train_tfidf(),
load_train_magic(),
load_wh_train(),
load_one_upper_train(),
load_topNs_avg_tok_freq_train(),
load_abi_train(),
load_max_k_cores_train(),
load_word2vec_metrics_train(),
load_glove_metrics_train(),
load_lex_metrics_train(),
load_metrics_on_pos_train(),
load_aux_pairs_50_train()
# load_upper_keywords_train()
], axis=1)
cols_to_del = [qid1, qid2, question1, question2]
for col in cols_to_del:
del train_df[col]
return train_df
# def load_test_all_xgb():
# test_df = pd.concat([
# load_test_lengths(),
# load_test_common_words(),
# load__test_metrics(),
# load_train_tfidf(),
# load_test_magic(),
# load_wh_test(),
# load_one_upper_test(),
# load_topNs_avg_tok_freq_test(),
# # load_abi_test(),
# load_max_k_cores_test(),
# load_word2vec_metrics_test(),
# load_glove_metrics_test(),
# load_lex_metrics_test(),
# load_metrics_on_pos_train()
# ], axis=1)
#
#
# return test_df
def plot_errors(imp):
train_runs= [x['train'] for x in imp]
test_runs= [x['test'] for x in imp]
sz=len(train_runs[0])
x_axis=range(sz)
y_train = [np.mean([x[j] for x in train_runs]) for j in x_axis]
y_test = [np.mean([x[j] for x in test_runs]) for j in x_axis]
fig, ax = plt.subplots()
ax.plot(x_axis, y_train, label='train')
ax.plot(x_axis, y_test, label='test')
ax.legend()
plt.show()
def xgboost_per_tree_results(estimator):
results_on_test = estimator.evals_result()['validation_1']['logloss']
results_on_train = estimator.evals_result()['validation_0']['logloss']
return {
'train': results_on_train,
'test': results_on_test
}
def out_loss(loss):
print '====================================='
print '====================================='
print '====================================='
print loss
print '====================================='
print '====================================='
print '====================================='
def write_results(name,mongo_host, per_tree_res, losses, imp, features):
from pymongo import MongoClient
imp=[x.item() for x in imp]
features=list(features)
client = MongoClient(mongo_host, 27017)
client['admin'].authenticate(user, password)
db = client['xgb_cv']
collection = db[name]
try:
collection.insert_one({
'results': per_tree_res,
'losses': losses,
'importance':imp,
'features':features
})
except:
print 'error in mongo'
traceback.print_exc()
raise
# sleep(20)
def perform_xgb_cv(name, mongo_host):
df = load_train_all_xgb()
del_trash_cols(df)
add_kur_combinations(df)
folds =5
seed = 42
skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)
losses = []
n_est=[]
counter = 0
for big_ind, small_ind in skf.split(np.zeros(len(df)), df[TARGET]):
big = df.iloc[big_ind]
small = df.iloc[small_ind]
print explore_target_ratio(big)
print explore_target_ratio(small)
big, small = oversample(big, small, seed)
print explore_target_ratio(big)
print explore_target_ratio(small)
train_target = big[TARGET]
del big[TARGET]
train_arr = big
test_target = small[TARGET]
del small[TARGET]
test_arr = small
# estimator = xgb.XGBClassifier(n_estimators=10000,
# subsample=0.6,
# # colsample_bytree=0.8,
# max_depth=7,
# objective='binary:logistic',
# learning_rate=0.02,
# base_score=0.2)
estimator = xgb.XGBClassifier(n_estimators=10000,
subsample=0.8,
colsample_bytree=0.8,
max_depth=5,
objective='binary:logistic',
nthread=-1
)
print test_arr.columns.values
print len(train_arr)
print len(test_arr)
eval_set = [(train_arr, train_target), (test_arr, test_target)]
estimator.fit(
train_arr, train_target,
eval_set=eval_set,
eval_metric='logloss',
verbose=True,
early_stopping_rounds=150
)
proba = estimator.predict_proba(test_arr)
loss = log_loss(test_target, proba)
out_loss(loss)
losses.append({'loss':loss, 'best_score':estimator.best_score, 'best_iteration':estimator.best_iteration})
per_tree_res = xgboost_per_tree_results(estimator)
ii = estimator.feature_importances_
n_est.append(estimator.best_iteration)
# xgb.plot_importance(estimator)
# plot_errors(stats)
write_results(name, mongo_host, per_tree_res, losses, ii, train_arr.columns)
out_loss('avg = {}'.format(np.mean(losses)))
name='try_aux_freq'
perform_xgb_cv(name, gc_host)
print '============================'
print 'DONE!'
print '============================'
| [
"dd.petrovskiy@gmail.com"
] | dd.petrovskiy@gmail.com |
cf418f87d6d07bed318c588e39c784ffc7a4ca45 | fa0947d63c61afbb4df959289c6c753ce46faf5f | /amazonscraper/amazonscraper/items.py | 1bf34dcafdf40d408ed7c3eba1d74e3911827047 | [] | no_license | HemantJangid/AmazonScraper | 02a3d788c07e711890bb77fd310fe837ffae3763 | 0020f439b9e7b834b26eb53b74c8cb6a6db2ce0e | refs/heads/master | 2022-04-18T19:17:24.678040 | 2020-04-15T06:14:02 | 2020-04-15T06:14:02 | 255,822,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class AmazonscraperItem(scrapy.Item):
# define the fields for your item here like:
book_name = scrapy.Field()
author = scrapy.Field()
price = scrapy.Field()
image_link = scrapy.Field()
| [
"jhemant539@gmail.com"
] | jhemant539@gmail.com |
58a23a30aaebb6a01ca922fc25c6ac60e9adb546 | ca28664343d2a17264e10080937f2e528b55358f | /pc_example.py | faded9ce36ed39c3c92fa435bc687b7493e66303 | [] | no_license | js418/visual-analytics-using-ontologies-across-heterogeneous-data | 575c029c05872e6bbede89dda25619d191f0b4e7 | 9c593599c947e43741a557cbdade531106268e49 | refs/heads/master | 2020-03-19T07:06:12.217291 | 2018-06-04T22:01:03 | 2018-06-04T22:01:03 | 136,082,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | import pandas as pd
import numpy as np
import ast
from bokeh.models import CustomJS, ColumnDataSource,HoverTool,BoxSelectTool,TapTool,Range1d
from bokeh.layouts import row, column, widgetbox,layout, Spacer
from bokeh.io import show
from bokeh.plotting import figure, curdoc
from bokeh.models.widgets import TextInput,Button, RadioButtonGroup, Select, Slider
import networkx as nx
from datetime import datetime
import itertools
import radviz_centroid_optimization as rv
from bokeh.models.tickers import FixedTicker
def main():
df = pd.read_csv("C:\\Users\\tensa\\Desktop\\courses\\summer\\iris-species\\Iris.csv")
species = np.array(df["Species"])
color = []
for s in species:
if s=="Iris-setosa":
color.append("pink")
elif s == "Iris-versicolor":
color.append("blue")
elif s == "Iris-virginica":
color.append("green")
df.insert(len(df.columns), "color", color)
new_df = df.iloc[:, 1:5]
label = new_df.columns.values.tolist()
x = [1,2,3,4]
#print(df.iloc[0].values.tolist())
n = len(df.index)
xs=[]
ys=[]
for i in range(n):
xs.append(x)
y = new_df.iloc[i].values.tolist()
ys.append(y)
source = ColumnDataSource(data=dict(x=xs,y=ys, c=color, s=species))
p = figure(plot_width=800, plot_height=400, x_range=label,y_range = [-1,9],title="Parallel Coordinates for Iris Dataset")
p.multi_line('x','y', color='c', source= source,alpha=0.5, line_width=2)
p.multi_line([[1,1],[2,2],[3,3],[4,4]],[[-1,9],[-1,9],[-1,9],[-1,9]],color="black")
#p.x_range=Range1d(0,3)
show(p)
main() | [
"noreply@github.com"
] | noreply@github.com |
ac31523ba9787d027e63b488024b15c9e839e46c | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /classes/_point12.py | 3f67194c74253a3ea60ca8994c2d9259631a918f | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | from xcp2k.inputsection import InputSection
class _point12(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Type = None
self.Atoms = []
self.Weights = []
self.Xyz = None
self._name = "POINT"
self._keywords = {'Xyz': 'XYZ', 'Type': 'TYPE'}
self._repeated_keywords = {'Weights': 'WEIGHTS', 'Atoms': 'ATOMS'}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
06c65ebb5271abf5a20170924eb6aa109701b957 | 25e48619b6157be79a0cb3051f7b59af4e7a48bb | /assignment2(part2).py | fbf2a38f76d553b42b9c05e3efdfa47d6a9c84e7 | [] | no_license | Nana-Antwi/UVM-CS-21 | 8fdb2125f01820f063e7a2b3e40c4a0b3bd64c73 | 535b8e7efb61a0e4071766b4986e5d9b97952456 | refs/heads/master | 2020-04-17T09:29:27.027534 | 2019-01-18T19:19:18 | 2019-01-18T19:19:18 | 166,459,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,721 | py | #Nana Antwi
#cs-21
#homework 2
#part 2
#design a program to calculate either profit or loss involved in buying and selling shares
#program 1
#when joe purchased the shares
#varaible declaration
number_shares_purchased = 0.0
cost_per_share = 0.0
stockbroker_commission = 0.0
cost_shares_without_commission = 0.0
commission_cost = 0.0
total_cost_purchase = 0.0
#declare constants
NUMBER_SHARES_PURCHASED = 2000
COST_PER_SHARE = 40.00
COMMISSION_RATE = 3
COST_SHARES_WITHOUT_COMISSION = (NUMBER_SHARES_PURCHASED) * (COST_PER_SHARE)
#to get the cost of shares without brokers commission
cost_shares_without_commission = ( NUMBER_SHARES_PURCHASED ) * (COST_PER_SHARE)
#to get brokers commission
commission_cost = ( (COST_SHARES_WITHOUT_COMISSION) * (COMMISSION_RATE)) / 100
#to get the total cost of purchase
total_cost_purchase = (COST_SHARES_WITHOUT_COMISSION) + (commission_cost)
#results
print ("Cost of shares without commission : $", cost_shares_without_commission)
print ("Cost of commission : $", commission_cost)
print ("Total cost of the purchase : $", total_cost_purchase)
#program 2
#when joe sold the shares
#varaible declaration
number_shares_sold = 0.0
cost_per_share = 0.0
strokbroker_commission = 0.0
cost_shares_without_commission = 0.0
commission_cost = 0.0
total_cost_sold = 0.0
total_money_made_after_commission = 0.0
cost_buying_stocks = 0.0
profit_sale_stocks = 0.0
#declare constants
NUMBER_SHARES_SOLD = 2000
COST_PER_SHARE = 42.75
COMMISSION_RATE = 3
COST_BUYING_STOCKS = 80000
COST_SHARES_WITHOUT_COMMISSION = (NUMBER_SHARES_SOLD) * (COST_PER_SHARE)
COMMISSION_COST = ( (COST_SHARES_WITHOUT_COMMISSION) * (COMMISSION_RATE) / 100)
TOTAL_MONEY_MADE_AFTER_COMMISSION= (COST_SHARES_WITHOUT_COMMISSION) - (COMMISSION_COST)
#to get the cost of shares sold without brokers commission
cost_shares_without_commission = (NUMBER_SHARES_SOLD) * (COST_PER_SHARE)
#to get brokers commission on the sale
commission_cost = ( (COST_SHARES_WITHOUT_COMMISSION) * (COMMISSION_RATE) / 100)
#total money he made from the sale after the brokers commission
total_money_made_after_commission = (COST_SHARES_WITHOUT_COMMISSION) - (COMMISSION_COST)
#profit made from buying and selling of shares
profit_sale_stocks = (TOTAL_MONEY_MADE_AFTER_COMMISSION) - (COST_BUYING_STOCKS)
#results
print ("Cost of the sale of shares without brockers commission : $ ", cost_shares_without_commission )
print ("Cost of brokers commission : $", commission_cost )
print ("Total made after brokers commission : $", total_money_made_after_commission )
print ("Profit made from buying and selling of shares : $", profit_sale_stocks )
| [
"noreply@github.com"
] | noreply@github.com |
6c050c0d77f4e5d5ec77c6bef6bca2540f25d9b6 | 461052f4a7197db023ad3deb864bf1784fdd7854 | /library/migrations/0003_auto_20200513_1625.py | 451151225554e0605b2693ef162763660f71eb46 | [
"MIT"
] | permissive | ArRosid/training_drf | 1660a08272c09302b39adc8e19e3674a78863685 | 4369c8113a67bb3f18b6890210902f09d617569f | refs/heads/master | 2022-06-20T02:03:49.373355 | 2020-05-13T16:38:03 | 2020-05-13T16:38:03 | 263,639,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | # Generated by Django 3.0.6 on 2020-05-13 16:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('library', '0002_book_modified_by'),
]
operations = [
migrations.AddField(
model_name='book',
name='deleted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='book_deleted_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='book',
name='is_deleted',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='book',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='book_modified_by', to=settings.AUTH_USER_MODEL),
),
]
| [
"ahmadrosid30121997@gmail.com"
] | ahmadrosid30121997@gmail.com |
332064ba5922ff92b2319eb3b292136ddec583f8 | 3fcc7957ed103ead0db8d4e6020c52403559e63b | /1557.py | 04bb7afeb9d7032e1dee17c65612b5604da1c506 | [] | no_license | gabrielreiss/URI | db3082bd89832bb4f45d2375db376454c2ff8f27 | 01bc927d1eee8eb16a16de786e981faa494088e8 | refs/heads/master | 2022-04-22T08:25:28.855996 | 2020-04-15T14:58:47 | 2020-04-15T14:58:47 | 255,950,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | cont = False
while cont == False:
n = int(input())
if n == 0:
cont = True
else:
m = []
w, h = n, n
m = [[0 for x in range(w)] for y in range(h)]
for i in range(0, n):
for j in range(0, n):
m[i][j] = 2 ** (i+j)
T = len(str(m[n-1][n-1]))
for i in range(n):
for j in range(n):
m[i][j] = str(m[i][j])
while len(m[i][j]) < T:
m[i][j] = ' ' + m[i][j]
M = ' '.join(m[i])
print(M)
print()
| [
"gabrielreissdecastro@gmail.com"
] | gabrielreissdecastro@gmail.com |
b38ae686b8f4865aa7ec2f285f0c826c7c8762ef | 48c2763ea11e2a7db67acaf46017ca3671e90676 | /starnavi/urls.py | a24e230f7d2250c7a0f87026a35bf614a124df8d | [] | no_license | cynicalanlz/django_likes_posts_example | 4a384c4136eb0f94cd4769de07f7c9a8a16a933c | 6a9cbf281161fb8a5dbe0045992b5c801b748297 | refs/heads/master | 2020-04-10T14:08:30.581939 | 2018-12-10T11:11:26 | 2018-12-10T11:11:26 | 161,068,887 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,683 | py | """starnavi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from core import views as core_views
from rest_framework_jwt.views import obtain_jwt_token
from django.contrib.auth import views as auth_views
from likes.views import LikesViewSet
from posts.views import PostsViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'likes', LikesViewSet)
router.register(r'posts', PostsViewSet)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api-token-auth/', obtain_jwt_token),
url(r'^signup_json/$', core_views.SignUpView.as_view(), name='signup_json'),
url(r'^$', core_views.front, name='front'),
url(r'^signup/$', core_views.signup, name='core_signup'),
url(r'^login/$', auth_views.login,
{'template_name': 'core/login.html',
'redirect_field_name': 'front'}, name='core_login'),
url(r'^logout/$', auth_views.logout, name='core_logout'),
url(r'^api-auth/', include('rest_framework.urls')),
url(r'^', include(router.urls)),
]
| [
"adsnsrg@gmail.com"
] | adsnsrg@gmail.com |
8a3e3ac21b7f3e615c63070f93750a10b7ead43d | f857334dfbbfebde8e000cbe0b718545f524ea01 | /18-database/models.py | 0e81e4f2a6c6fcd32f0273c04964502089ad2dff | [] | no_license | Phibi95/PythonBasics | 8307f4577fd5f600beb5e019d6cad389ef0ec89f | 593bc7d1c16907e5856b6b411a898c844b3dad9b | refs/heads/master | 2023-05-27T22:39:29.510986 | 2020-02-27T18:33:03 | 2020-02-27T18:33:03 | 227,671,644 | 1 | 1 | null | 2023-05-01T20:38:22 | 2019-12-12T18:30:32 | Python | UTF-8 | Python | false | false | 336 | py | import os
from sqla_wrapper import SQLAlchemy
db = SQLAlchemy(os.getenv("DATABASE_URL","sqlite:///localhost.sqlite"))
class Entry(db.Model):
id = db.Column(db.Integer, primary_key = True)
first_name = db.Column(db.String)
last_name = db.Column(db.String)
email = db.Column(db.String)
message = db.Column(db.String) | [
"philipp.bischof@phb-it.de"
] | philipp.bischof@phb-it.de |
28434b2573dce339e33c3e2d2ff0e30e007c76c6 | ec5813bf77fb54a12df8078cb5016a6cfb44dad5 | /tests/test_functional.py | 1533685c0dd11c9d6634616894998dcb450bddb7 | [
"MIT"
] | permissive | dslaw/pipeline-profiler | f9e7403e848549ecfb21138c7fef0d8deeca4c45 | bc41b1acc77471791cdeb6a042776de2034354d5 | refs/heads/master | 2020-05-07T05:41:35.567576 | 2019-04-16T23:26:59 | 2019-10-08T02:30:54 | 180,280,588 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,512 | py | from mprof import read_mprofile_file
from numpy import ones_like
from numpy.testing import assert_allclose, assert_array_equal
from sklearn.dummy import DummyClassifier
from sklearn.externals import joblib
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from io import BytesIO
import pytest
from pipeline_profiler import profile
@pytest.fixture
def transformer():
return StandardScaler()
@pytest.fixture
def predictor():
return DummyClassifier(strategy="stratified")
class TestPipelineMethods(object):
def test_fit(self, buffer, data, transformer):
X, y = data
pipeline = make_pipeline(transformer)
with profile(pipeline, buffer):
pipeline.fit(X, y)
written = buffer.getvalue()
assert ".fit" in written
def test_transform(self, buffer, data, transformer):
X, y = data
pipeline = make_pipeline(transformer)
with profile(pipeline, buffer):
pipeline.fit(X, y)
Xt = pipeline.transform(X)
Xr = pipeline.inverse_transform(Xt)
assert_allclose(Xr, X)
written = buffer.getvalue()
assert ".transform" in written
assert ".inverse_transform" in written
def test_predict(self, buffer, data, transformer, predictor):
X, y = data
pipeline = make_pipeline(transformer, predictor)
with profile(pipeline, buffer):
pipeline.fit(X, y)
y_pred = pipeline.predict(X)
assert y_pred.shape == y.shape
written = buffer.getvalue()
assert ".predict" in written
def test_predict_proba(self, buffer, data, transformer, predictor):
X, y = data
pipeline = make_pipeline(transformer, predictor)
with profile(pipeline, buffer):
pipeline.fit(X, y)
y_pred_proba = pipeline.predict_proba(X)
assert y_pred_proba.shape == (len(y), 2)
written = buffer.getvalue()
assert ".predict_proba" in written
@pytest.fixture
def pipeline(transformer):
predictor = DummyClassifier(strategy="constant", constant=1)
pipeline = make_pipeline(transformer, predictor)
return pipeline
class TestPipelineUsability(object):
def test_pipeline_usable(self, buffer, data, pipeline):
"""Test that the fitted pipeline object can be used after patching."""
X, y = data
expected = ones_like(y)
with profile(pipeline, buffer):
pipeline.fit(X, y)
actual = pipeline.predict(X)
assert_array_equal(actual, expected)
def test_pipeline_serializable(self, buffer, data, pipeline):
"""Test that the fitted pipeline object can be serialized."""
X, y = data
expected = ones_like(y)
with profile(pipeline, buffer):
pipeline.fit(X, y)
with BytesIO() as model_buffer:
joblib.dump(pipeline, model_buffer)
model_buffer.seek(0)
restored = joblib.load(model_buffer)
actual = restored.predict(X)
assert_array_equal(actual, expected)
def test_mprof_compatible_output(tmpdir, data, pipeline):
X, y = data
tmpfile = str(tmpdir / "test.dat")
with open(tmpfile, "w") as fh, profile(pipeline, fh):
pipeline.fit(X, y)
contents = read_mprofile_file(tmpfile)
assert contents["mem_usage"]
assert contents["timestamp"]
assert contents["func_timestamp"]
| [
"davidsamuellaw@gmail.com"
] | davidsamuellaw@gmail.com |
d3842f74d7c8f2ac9012928299f1fbc30bb43ef8 | 13b10d425521c3339d7a6857274aa7be46e40b2a | /src/prct08.py~ | ffe69eccc9957385a304084dbb017709bb328e8d | [] | no_license | alu0100830569/prct12 | 0a96b544bda9e6e0f0a890519d4e9deb5f071c6b | 994ffc880026a39ea01e46e390d803027fe1a93c | refs/heads/master | 2021-01-20T15:44:33.063898 | 2014-05-02T09:19:39 | 2014-05-02T09:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | #! /usr/bin/python
#!encoding: UTF-8
import moduloerror
import sys
if((len(sys.argv)==1) or (len(sys.argv)==2)):
print("No se han encontrado los valores necesarios, por lo que se procederá a ejucutar con los valores predeterminado:")
print("Veces=10 Intervalo=10 Umbral=0.1")
veces=10
n=10
umbral=0.1
else:
n=int(sys.argv[2])
veces=int(sys.argv[1])
umbral=float(sys.argv[3])
print "Nº de subintervalos\tNº de pruebas a realizar\tTolerancia permitida\tPorcentaje de error"
print "%d\t\t\t%d\t\t\t\t%g\t\t\t\t%g" %(n, veces, umbral, moduloerror.error(n, veces, umbral)) | [
"alu0100830569@ull.edu.es"
] | alu0100830569@ull.edu.es | |
09216d4286591f56ae7c01db67922dcae0c20efc | b04039d85bd5d9a06635db6a55873780ef5aa867 | /mysite/settings.py | 9047674d7b193d745eab56d9e2592908f4d4292d | [] | no_license | Josie28/my-first-blog | c8a0c0768998a8ecbf01e0a9af80fc826224824f | 885170d793c6edd9585af7964b54f0ddff628aa5 | refs/heads/master | 2020-07-29T22:25:27.137985 | 2019-09-21T15:05:34 | 2019-09-21T15:05:34 | 209,984,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '13ijd8(&53c#fin!00%fiaxn-(5tp60))#tqd-9di+up10v=#z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["127.0.0.1", ".pythonanywhere.com"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"blog.apps.BlogConfig"
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'de-es'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
| [
"Josephine_Baehr@live.de"
] | Josephine_Baehr@live.de |
272b1ec65ca799d870c867984cdca671ec427361 | d47bd1b09907f97c1041e5231c65f6d598316c1e | /1.tutorial/1_constant.py | b585e4c7b648ef472825f2ce2e3f1877663d34b6 | [] | no_license | krama9181/Hello_tensorflow | b7076d7f817f36ad4a0635dc0dd71b92fed04837 | f76d81494cdd7e9dec46c454223a81ee7db4f9e5 | refs/heads/master | 2021-01-20T03:50:20.892931 | 2017-04-27T13:11:14 | 2017-04-27T13:11:14 | 89,595,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | import tensorflow as tf
hello = tf.constant("hello, TensorFlow!")
sess = tf.Session()
print sess.run(hello)
| [
"krama9181@gmail.com"
] | krama9181@gmail.com |
428d2e6fe934576ddc2122a77c4ba8567f75dd3e | d57ff04e0e82a9c3a1338a63c523bd085ee5c794 | /DecisionTree.py | 2aae69047e47254f0f5959f8639765f87e9c1d81 | [] | no_license | esong200/QuantumStockTensorFlow | 86755fcf764dc60412bff4b08e57eb3a86053f1d | 596091e5b8dbb1e3643bc957471de71c960a625c | refs/heads/master | 2020-04-24T00:15:31.039348 | 2019-02-28T20:38:52 | 2019-02-28T20:38:52 | 171,561,015 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,419 | py | from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, ExtraTreeRegressor
from sklearn import preprocessing
import numpy as np
import json
from pandas import DataFrame
import random
import os
def dropOut(data, solution, percent: int = 3):
random.seed = 123
testData = []
testAns = []
while len(testData) / len(data) <= percent / 100:
toDropIndex = random.randint(0, len(data) - 1)
testData.append(data.pop(toDropIndex))
testAns.append(solution.pop(toDropIndex))
return testData, testAns
def intoPercent(values: list):
return [(values[i + 1] - values[i]) / values[i] if values[i] != 0 else values[i + 1] for i in
range(len(values) - 1)]
def unpack(*items):
elements = []
for item in items:
if type(item) == list:
elements += unpack(*item)
return elements + [b for b in items if not type(b) is list]
def formatData(dailyOpen, dailyClose, dailyHigh, dailyLow, dailyVolume, daysofData: int = 40, daysAfter: int = 14):
dataX = []
y = []
for i in range(len(dailyOpen)):
dataDay = []
for j in range(len(dailyOpen[i]) - (daysofData + daysAfter)):
adjustData = [dailyVolume[i][j + k] for k in range(daysofData)]
adjustData = [a / sum(adjustData) for a in adjustData]
dataDay = [
[dailyOpen[i][j + k], dailyClose[i][j + k], dailyHigh[i][j + k], dailyLow[i][j + k], adjustData[k]] for
k in range(daysofData)]
dataX.append(unpack(*dataDay))
y.append(percentChange(*[dailyClose[i][j + k] for k in range(daysAfter)]))
# print(len(dataX), len(y))
# print(np.asanyarray(y).shape)
return dataX, y
def percentChange(*changes):
change = 1
for day in changes:
change *= (1 + day)
return advice((change - 1) * 100)
def advice(percent: int):
if percent < -5:
return [1, 0, 0, 0, 0]
elif percent < -2:
return [0, 1, 0, 0, 0]
elif percent < 2:
return [0, 0, 1, 0, 0]
elif percent < 5:
return [0, 0, 0, 1, 0]
else:
return [0, 0, 0, 0, 1]
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
file_name = "dataFortune500Raw2019-2-10.txt"
file_directory = os.path.join(THIS_FOLDER, file_name)
testComps = 30
dailyOpen = []
dailyClose = []
dailyHigh = []
dailyLow = []
dailyVolume = []
comps = 0
with open(file_directory, 'r') as file:
data = json.load(file)
print('Opening Json')
print('Json successfully opened')
for company, priceData in data.items():
if company != 'DJIA':
comps += 1
split = 1
compOpen = []
compClose = []
compHigh = []
compLow = []
compVolume = []
for dataDay in priceData:
split*= dataDay[-1]
compOpen.append(dataDay[0]*split)
compClose.append(dataDay[1]*split)
compHigh.append(dataDay[2]*split)
compLow.append(dataDay[3]*split)
compVolume.append(dataDay[-2])
dailyOpen.append(intoPercent(compOpen))
dailyClose.append(intoPercent(compClose))
dailyHigh.append(intoPercent(compHigh))
dailyLow.append(intoPercent(compLow))
dailyVolume.append(compVolume)
print('Done formatting')
#14 Day Period Tests
X, y = formatData(dailyOpen, dailyClose, dailyHigh, dailyLow,dailyVolume)
testX, testy = dropOut(X,y)
print("done")
X = preprocessing.scale(np.asarray(X))
X_scale = preprocessing.scale(X)
y = np.asarray(y)
testX = preprocessing.scale(np.asarray(testX))
testy = np.asarray(testy)
clf = DecisionTreeRegressor(max_depth= None, min_samples_split = 2, random_state = 0).fit(X,y)
clfE = ExtraTreeRegressor(max_depth=None, min_samples_split=2, random_state=0).fit(X,y)
scores = cross_val_score(clf, X, y, cv = 5)
scoresE = cross_val_score(clfE, X, y, cv = 5)
print('Training Decision',scores.mean())
print('Training Extra', scoresE.mean())
unseen = cross_val_score(clf, testX, testy, cv = 5)
unseenE = cross_val_score(clfE, testX, testy, cv = 5)
print('New Data Decision', unseen.mean())
print('New Data Extra', unseenE.mean())
defaultPrdict = clf.predict(testX)
#defaultPrdictLog = clf.predict_proba(testX)
extraPrdict = clfE.predict(testX)
#extraPrdictLog = clfE.predict_proba(testX)
defaultTrain = clf.predict(X)
extraTrain = clfE.predict(X)
#print(defaultPrdictLog)
print(clfE.n_outputs_)
print(X.shape)
print(extraPrdict)
print(defaultPrdict)
print(testy)
dCompare = [(list(defaultPrdict[i]).index(1), list(testy[i]).index(1)) for i in range(len(testy))]
eCompare = [(list(extraPrdict[i]).index(1) if 1 in list(extraPrdict[i]) else None, list(testy[i]).index(1)) for i in
range(len(testy))]
dTracing = {0: {}, 1: {}, 2: {}, 3: {}, 4: {}}
eTracing = {0: {}, 1: {}, 2: {}, 3: {}, 4: {}}
for (pre, act) in dCompare:
try:
dTracing[act][pre] += 1
except:
dTracing[act][pre] = 1
for (pre, act) in eCompare:
try:
eTracing[act][pre] += 1
except:
eTracing[act][pre] = 1
print('Tracing from actual to predict decision Tree: \n', dTracing)
print('Tracing from actual to predict extra Tree: \n', eTracing)
dCasting = {}
eCasting = {}
for (pre, act) in dCompare:
dCasting[pre] = {} if pre not in dCasting else dCasting[pre]
dCasting[pre][act] = dCasting[pre][act] + 1 if act in dCasting[pre] else 1
for (pre, act) in eCompare:
eCasting[pre] = {} if pre not in eCasting else eCasting[pre]
eCasting[pre][act] = eCasting[pre][act] + 1 if act in eCasting[pre] else 1
print('Going form predict to actual Decision Tree: \n', dCasting)
print('Going form predict to actual Extra Tree: \n', eCasting)
# Visualizing the decisions First from actual -> predict
print('Visualization of Actual -> Predicted')
for act, preds in dTracing.items():
# print('When the actual value is {} for Default Trees'.format(act))
predictions = [key for key in sorted(preds)]
values = [preds[key] for key in sorted(preds)]
values = [a / sum(values) for a in values]
df = DataFrame({str(act): predictions, 'predictions': values})
ax = df.plot.bar(x=str(act), y='predictions')
for act, preds in eTracing.items():
# print('When the actual value is {} for Extra Trees'.format(act))
predictions = [key for key in sorted(preds, key=lambda x: x if type(x) is int else -1)]
values = [preds[key] for key in sorted(preds, key=lambda x: x if type(x) is int else -1)]
values = [a / sum(values) for a in values]
df = DataFrame({str(act): predictions, 'predictions': values})
ax = df.plot.bar(x=str(act), y='predictions')
print('Visualization of Predicted -> Actual')
for act, preds in sorted(dCasting.items()):
# print('When the actual value is {} for Default Trees'.format(act))
predictions = [key for key in sorted(preds)]
values = [preds[key] for key in sorted(preds)]
values = [a / sum(values) for a in values]
df = DataFrame({str(act): list(predictions), 'actual': list(values)})
ax = df.plot.bar(x=str(act), y='actual')
for act, preds in sorted(eCasting.items(), key=lambda x: x[0] if type(x[0]) is int else -1):
# print('When the actual value is {} for Default Trees'.format(act))
predictions = [key for key in sorted(preds)]
values = [preds[key] for key in sorted(preds)]
values = [a / sum(values) for a in values]
df = DataFrame({str(act): list(predictions), 'actual': list(values)})
ax = df.plot.bar(x=str(act), y='actual')
# From the Training Data
dTCompare = [(list(defaultTrain[i]).index(1), list(y[i]).index(1)) for i in range(len(y))]
eTCompare = [(list(extraTrain[i]).index(1) if 1 in list(extraTrain[i]) else None, list(y[i]).index(1)) for i in
range(len(y))]
dTTracing = {0: {}, 1: {}, 2: {}, 3: {}, 4: {}}
eTTracing = {0: {}, 1: {}, 2: {}, 3: {}, 4: {}}
for (pre, act) in dTCompare:
try:
dTTracing[act][pre] += 1
except:
dTTracing[act][pre] = 1
for (pre, act) in eTCompare:
try:
eTTracing[act][pre] += 1
except:
eTTracing[act][pre] = 1
print('Tracing from actual to predict decision Tree: \n', dTTracing)
print('Tracing from actual to predict extra Tree: \n', eTTracing) | [
"e.song200@gmail.com"
] | e.song200@gmail.com |
dd258e1388ef102e9d77f492101ef00bda3bda1f | 0dc67428c50acf9dea7c17da9c603169a05e201c | /customer/urls.py | b58afe08ab753951ca3b7f89accd78318da1be54 | [] | no_license | sherrywilly/Razorpay | fe9a48ca9a9dd1d7d59ad959535e7ae2e6045305 | 3fe96ff7d6e988b3c276950e0615c0a4eeb1da8e | refs/heads/master | 2023-07-02T11:37:55.951514 | 2021-07-29T04:36:19 | 2021-07-29T04:36:19 | 390,238,480 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | from django.urls import path
from .views import completed, create_contacts, create_fund_account, create_payout, index,payment, refund,verifyPayment
from .webhooks import VerifyPayHook
urlpatterns = [
path('',index,name="index"),
path('payment/continue/',payment,name="pay"),
path('handlerequest/',verifyPayment,name="verify"),
path('payment/<payid>/refund/',refund,name="refund"),
path('payments',completed),
# path('payment/refund/',refund,name="refund"),
path('payouts/<int:pk>/add_contact/',create_contacts,name="create"),
path('payouts/<int:id>/add_bank/',create_fund_account,name="create_bank"),
path('payouts/<int:id>/pay/',create_payout,name="create"),
# path('payouts/<int:id>/pay/',create_payout,name="create"),
#####################!-------------- HOOK URLS ----------------##########################
path('hooks/verify/',VerifyPayHook.as_view()),
# path('hooks/verify/refund/',VerifyRefundHook.as_view())
]
| [
"sherrywilson521@gmail.com"
] | sherrywilson521@gmail.com |
21a5af9c8fa62de7c1893aa45f03faae75ee32f9 | 0f07b0f038922eea85686af2ad9885e26894792c | /main.py | 52d093eeaee0487889ac033c868e69a98d8b6309 | [] | no_license | eddiezzz/MysqlBuilder | 24c4f90244e37d3971e7ce88b746035efbc90978 | 2d843a356fd3c3d8ac96870c34f66907d7f3d9ce | refs/heads/master | 2021-05-27T08:38:38.506630 | 2014-03-04T09:47:46 | 2014-03-04T09:47:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | #coding: utf8
from db_operator import *
from job import *
import common
class JobFactory:
@staticmethod
def create(language):
if language == 'th':
return ThJob()
if language == 'br':
return BrJob()
if language == 'ar':
return ArJob()
return None
def usage(name):
print '''
#Brief: %s is used for getting specific infos from shitu server
#Author: zhengchangshuai@baidu.com
#usage: python %s cfg_file
''' % (name, name)
if __name__ == '__main__':
filename = 'database.cfg'
conf = MyConf(filename)
if 0 != conf.parse():
print "MyConf.parse error from file:%s" % (filename)
exit(1)
common.init_log(conf.log_file, conf.log_level)
job = JobFactory.create(conf.language)
if not job:
common.logger.error("job create failed for language:%s" % (conf.language))
exit(1)
common.logger.info("job create success for language:%s" % (conf.language))
if job.init(conf) != 0:
common.logger.warn("job runner init failed")
exit(1)
common.logger.info("job runner init success")
ret = job.run()
common.logger.info("job runner run over, ret:%d" % (ret))
exit(0)
| [
"zcs5612@163.com"
] | zcs5612@163.com |
633b26c15c5dc274a4c9596b27023b8e4c850b6a | 5bcd476eb9c02c5511c8447b2794f78b7347752a | /Models/R2N2/Local/scripts/extra_VARMAX_evaluate_models.py | 9643e289c63e387b7354b8e329ddf2249eb3d786 | [] | no_license | MennovDijk/deepcrypto | 3877a0cd5206b59bbb0e143f60f18705daf70312 | 64b6488b13e2c1b32e8c1dd2f282230611ae0683 | refs/heads/master | 2020-03-27T22:41:30.941613 | 2018-03-23T04:13:35 | 2018-03-23T04:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,871 | py | __author__ = 'Ian'
#using https://gist.github.com/spro/ef26915065225df65c1187562eca7ec4
import os
import time
import numpy as np
import pandas as pd
import torch
from statsmodels.tsa.api import VAR
from torch.autograd import Variable
from Data.scripts.data import data
from Models.R2N2.Local.scripts.RNN import RNN
from Models.Evaluation.eval import eval_model
from sklearn.metrics import mean_squared_error, roc_auc_score, roc_curve, auc
def run_model(model_name, hidden_size):
# import data
# X, Y = data.import_data(set='cross_val')
X, Y = data.import_data(set='train')
# do not plug in returns, but residuals
# plug in residuals
VAR_model = VAR(X)
results = VAR_model.fit(1)
ar_returns = results.fittedvalues
# columns to drop from dataframe
columns = ['XMRspread', 'XMRvolume', 'XMRbasevolume', 'XRPspread', 'XRPvolume', 'XRPbasevolume', 'LTCspread',
'LTCvolume', 'LTCbasevolume', 'DASHspread', 'DASHvolume', 'DASHbasevolume', 'ETHspread', 'ETHvolume',
'ETHbasevolume']
ar_returns.drop(columns, 1, inplace=True)
X = X.loc[ar_returns.index]
x_returns = X[ar_returns.columns]
residual_df = x_returns - ar_returns
X = X.join(residual_df, how='inner', rsuffix='residual')
y_ar_returns = ar_returns
y_ar_returns.columns = Y.columns
Y = (Y.loc[X.index] - y_ar_returns.shift(-1)).dropna()
y_ar_returns = y_ar_returns.shift(-1).dropna()
X = X.loc[Y.index]
x = X.as_matrix()
y = Y.as_matrix()
# set preditcion matrix
y_pred = np.zeros(shape=y.shape)
# set model
model = RNN(hidden_size=hidden_size, input_size=len(X.iloc[0:1].values[0]), output_size=len(Y.iloc[0:1].values[0]))
model.load_state_dict(
torch.load(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) +
'/model_params/{}.pth.tar'.format(model_name)))
for iter in range(len(x)):
input = Variable(torch.from_numpy(x[iter]).float())
output = model.forward(input)
y_pred[iter] = output.data.numpy()
y_pred = y_pred + y_ar_returns.as_matrix()
Y_pred = pd.DataFrame(data=y_pred, index=Y.index, columns=Y.columns)
flat_pred = np.clip(Y_pred.as_matrix().flatten() + 0.5, 0, 1)
flat_actual = np.where(Y.as_matrix().flatten() > 0, 1, 0)
auc = roc_auc_score(flat_actual, flat_pred)
mse = mean_squared_error(Y.as_matrix(), Y_pred.as_matrix())
return Y_pred, auc, mse
torch.manual_seed(1)
model_string = 'Mom_LSTM_6_BFC_1_AFC_1_Act_None'
hidden_size = 10
model_name = '{}_H{}'.format(model_string, hidden_size)
model_params_file_str = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + '/model_params/{}.pth.tar'.format(model_name)
X_train_df, Y_train_df = data.import_data(set='train')
X_train_matrix = X_train_df.as_matrix()
Y_train_matrix = Y_train_df.as_matrix()
X_dev_df, Y_dev_df = data.import_data(set='cross_val')
X_dev_matrix = X_dev_df.as_matrix()
Y_dev_matrix = Y_dev_df.as_matrix()
Y_train_pred_df, train_auc, train_mse = run_model(model_name, hidden_size)
# run eval class
check_model = eval_model(y_pred_df= Y_train_pred_df, y_actual_df= Y_train_df)
check_model.backtest(printer=False)
check_model.accuracy(printer=False)
train_metrics_dict = check_model.metrics
train_acc_score = check_model.accuracy_score
train_conf_list = check_model.confusion_matrix
print('Train')
print(train_metrics_dict)
print('AUC: {} , MSE: {}'.format(train_auc, train_mse))
print('Acc: {}'.format(train_acc_score))
filename = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "/records/{}.txt".format(model_name)
with open(filename, 'w') as f:
f.write('Train \n')
f.write('{}\n'.format(train_metrics_dict))
f.write('AUC: {} , MSE: {}\n'.format(train_auc, train_mse))
f.write('Acc: {}\n'.format(train_acc_score))
| [
"ianedwardshaw@gmail.com"
] | ianedwardshaw@gmail.com |
0764fd9cfe586798c0d72ae40679aabd7fe72a9e | ecf4d26ece041e4b6e484c9138d8aceff67b7678 | /apps/channelsbase_sankey.py | 139651ec07a31f5f0cbfab4358b7efbabf9e6833 | [] | no_license | EnzoCalogero/dash_plotly_main | 203435faea3d6731fa067cafd219f657667d4407 | 872f8dea9475358df522fc913a7d01696904fd94 | refs/heads/master | 2022-12-11T12:23:44.881509 | 2019-10-06T17:04:35 | 2019-10-06T17:04:35 | 213,204,436 | 0 | 0 | null | 2022-12-08T02:35:25 | 2019-10-06T16:37:36 | Python | UTF-8 | Python | false | false | 14,522 | py | # -*- coding: utf-8 -*-
import base64
import flask
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import os
from datetime import timedelta
from datetime import datetime as dt
import datetime
from app import app
from apps import db_onnections, general_configurations
#####################################
#### DBs information collections ##
#####################################
list_db = db_onnections.list_dbs()
# Enable the DB selector
if len(list_db) == 1:
enable_db_selector = True
else:
enable_db_selector = False
#####################################
#### First Data Collection ##
#####################################
FROM = (pd.to_datetime(datetime.datetime.now()) - timedelta(days=45)).strftime("%Y-%m-%d")
df = db_onnections.ChannelDB(from_data=FROM, section=general_configurations.Current_active_DB)
df['created_at'] = pd.to_datetime(df['created_at'])
dataMax = df['created_at'].max()
dataMin = dataMax - timedelta(days=30)
available_Users = sorted(df['display_name'].unique())
# Dash Variables
colors = {
'background': '#111111',
'text': '#253471'}
image_filename = os.getcwd() + '/pics//enzo_logo.png'
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
layout = html.Div([
html.Div([
dcc.Interval(id='interval_channels_sand', interval=general_configurations.refresh_interval),
html.Div([
html.A(
html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode()),
style={
'height': '20px',
'float': 'left',
'position': 'relative',
'bottom': '-10px',
'width': '100px'}
), href='/', target="_self"),
html.H2(
children='Channels Sankey',
id='H2_chan_sand',
style={
'textAlign': 'center',
'font-family': 'Glacial Indifference',
'color': colors['text']}
),
], className='ten columns'),
html.Div([
html.A('Set the Default DB', href='/setdb', target="_blank")
], className='two columns'),
], className="row"),
#################################################################################################
html.Div([
html.Div([
html.Div(children="Source Database"),
], className='two columns'),
html.Div([
dcc.Dropdown(
id="DB-selection-channel_sand",
clearable=False,
options=[{'label': i, 'value': i} for i in list_db],
disabled=enable_db_selector,
value=general_configurations.Current_active_DB)
], className='three columns'),
html.Div([
dcc.Link('Percentages', href='/channel_percent'),
], className='two columns'),
html.Div([
dcc.Link('HeatMaps', href='/channel_heatmap'),
], className='two columns'),
], className="row"),
#####################################################################################################
html.Div([
html.Div([
html.Div(children="Date Range Filter"),
], className='two columns'),
html.Div([
dcc.DatePickerRange(
id='date-picker-range-channels_sand',
minimum_nights=1,
start_date=dataMin,
end_date=dataMax),
], className='five columns'),
], className="row"),
####################################################################################################################
html.Div([
html.Div([
dcc.Graph(id='indicator-graphic9b_sand',
config={'modeBarButtonsToRemove': ['sendDataToCloud'], 'displaylogo':False},
style={'width': '100%', 'display': 'inline-block','color': colors['text']}),
], className="row"),
], style={'font-family': 'Glacial Indifference','padding': '0px 10px 15px 10px',
'marginLeft': 'auto', 'marginRight': 'auto', "width": "157vh",
'boxShadow': '0px 0px 5px 5px rgba(37,52,113,0.4)','color': colors['text']}
),
#######################################################################################################################
html.Div([
html.H4(
children='Users View',
style={
'textAlign': 'left',
'font-family': 'Glacial Indifference',
'color': colors['text']}
),
], className="row"),
########################################################################
html.Div([
html.Div(children="User"),
], className='one columns'),
html.Div([
dcc.Dropdown(
id='Users-column_Channel_sand',
clearable=False,
options=[{'label': i, 'value': i} for i in available_Users],
value=available_Users[0]),
], className='two columns'),
###############################################################################################################
html.Div([
html.Div([
dcc.Graph(id='indicator-graphic10b_sand',
config={'modeBarButtonsToRemove': ['sendDataToCloud'], 'displaylogo': False},
style={'width': '100%', 'display': 'inline-block', 'color': colors['text']}),
], className="row"),
], style={'font-family': 'Glacial Indifference', 'padding': '0px 10px 15px 10px',
'marginLeft': 'auto', 'marginRight': 'auto', "width": "157vh",
'boxShadow': '0px 0px 5px 5px rgba(37, 52, 113, 0.4)', 'color': colors['text']}
),
html.Div(id='intermediate-value_chan_sank', style={'display': 'none'}),
html.Div(id='display-time_channels_sand'),
html.Div(id='display-DB-channel_sand'),
], style={'font-family': 'Glacial Indifference', 'padding': '0px 10px 15px 10px',
'marginLeft': 'auto', 'marginRight': 'auto', "width": "160vh",
'boxShadow': '0px 0px 5px 5px rgba(37, 52, 113, 0.4)'}
)
# Closure Layout
@app.callback(dash.dependencies.Output('date-picker-range-channels_sand', 'end_date'),
[dash.dependencies.Input('DB-selection-channel_sand', 'value')])
def update_db_(db):
from_day = (pd.to_datetime(datetime.datetime.now()) - timedelta(days=120)).strftime("%Y-%m-%d")
df = db_onnections.ChannelDB(from_data=from_day, section=db)
df['created_at'] = pd.to_datetime(df['created_at'])
datamax = df['created_at'].max()
return datamax
@app.callback(dash.dependencies.Output('date-picker-range-channels_sand', 'start_date'),
[dash.dependencies.Input('DB-selection-channel_sand', 'value')])
def update_db_(db):
from_day = (pd.to_datetime(datetime.datetime.now()) - timedelta(days=120)).strftime("%Y-%m-%d")
df = db_onnections.ChannelDB(from_data=from_day, section=db)
df['created_at'] = pd.to_datetime(df['created_at'])
datamax = df['created_at'].max()
datamin = datamax - timedelta(days=15)
return datamin
# Cookies Related (First Half)
@app.callback(
dash.dependencies.Output('intermediate-value_chan_sank', 'children'),
[dash.dependencies.Input('H2_chan_sand', 'children')])
def update_db_chan_heat(_):
try:
cached_db = flask.request.cookies['DB']
except:
return list_db[0]
else:
return cached_db
# Cookies Related (Second Half)
@app.callback(
dash.dependencies.Output('DB-selection-channel_sand', 'value'),
[dash.dependencies.Input('intermediate-value_chan_sank', 'children')])
def update_db_chan_heat_(db):
return db
##########################
# Time Related function #
##########################
@app.callback(
dash.dependencies.Output('display-time_channels_sand', 'children'),
events=[dash.dependencies.Event('interval_channels_sand', 'interval')])
def display_time():
return str(datetime.datetime.now())
# DB related Function
@app.callback(
dash.dependencies.Output('display-DB-channel_sand', 'children'),
[dash.dependencies.Input('DB-selection-channel_sand', 'value')])
def update_db(db):
general_configurations.Current_active_DB = db
return db
#################################
# Graphics Related Functions #
#################################
@app.callback(
dash.dependencies.Output('indicator-graphic9b_sand', 'figure'),
[dash.dependencies.Input('interval_channels_sand', 'n_intervals'),
dash.dependencies.Input('date-picker-range-channels_sand', 'start_date'),
dash.dependencies.Input('date-picker-range-channels_sand', 'end_date'),
dash.dependencies.Input('DB-selection-channel_sand', 'value')])
def update_graph9b(n, start_date, end_date, db):
start_date = pd.to_datetime(start_date)
start_date = dt.date(start_date)
end_date = pd.to_datetime(end_date)
end_date = dt.date(end_date)
tempdf = db_onnections.ChannelDB(from_data=start_date, to_data=end_date, section=db)
Users = tempdf[['display_name']].drop_duplicates()
Users.columns = ['Nome_nodo']
tempdf['protDev'] = "[" + tempdf.protocol + '] - ' + tempdf.hostname
tempdf['protDev'] = tempdf['protDev'].replace(' ', '')
Protocols = tempdf[['protDev']].drop_duplicates()
Protocols.columns = ['Nome_nodo']
nodes = pd.concat([Users, Protocols], axis=0)
refrenza = [x for x in range(0, nodes.shape[0])]
nodesDict = dict(zip(nodes.Nome_nodo, refrenza))
links = tempdf[['display_name', 'protocol', 'protDev', 'hostname', 'device']]
links = links.groupby(['display_name', 'protDev'], as_index=False).count()
links['value'] = links['hostname']
links['source'] = links['display_name'].map(nodesDict)
links['target'] = links['protDev'].map(nodesDict)
links = links[links['protDev'] != 'nan']
links = links[links['display_name'] != 'nan']
return {
'data': [dict(
type="sankey",
domain=dict(
x=[0, 1],
y=[0, 1]),
link={
"source": links.source.dropna(axis=0, how='any'),
"target": links.target.dropna(axis=0, how='any'),
"value": links['value'].dropna(axis=0, how='any')
},
node=dict(label=nodes.Nome_nodo,
pad=6,
),
)],
'layout': go.Layout(
title="Sankey Diagram Users vs Protocols-Devices Globally",
autosize=True,
font={'family': 'Glacial Indifference', 'color': colors['text']}
)
}
@app.callback(
dash.dependencies.Output('indicator-graphic10b_sand', 'figure'),
[dash.dependencies.Input('Users-column_Channel_sand', 'value'),
dash.dependencies.Input('date-picker-range-channels_sand', 'start_date'),
dash.dependencies.Input('date-picker-range-channels_sand', 'end_date'),
dash.dependencies.Input('interval_channels_sand', 'n_intervals'),
dash.dependencies.Input('DB-selection-channel_sand', 'value')])
def update_graph10b(user, start_date, end_date, n, db):
start_date = pd.to_datetime(start_date)
start_date = dt.date(start_date)
end_date = pd.to_datetime(end_date)
end_date = dt.date(end_date)
TempDf = db_onnections.ChannelDB(from_data=start_date, to_data=end_date, section=db)
TempDf = TempDf[TempDf['display_name'] == user]
TempDf['protDev'] = '[' + TempDf.protocol + '] - ' + TempDf.hostname
TempDf['protDev'] = TempDf['protDev'].replace(' ', '')
Users = TempDf[['display_name']].drop_duplicates()
Users.columns = ['Nome_nodo']
Protocols = TempDf[['protDev']].drop_duplicates()
Protocols.columns = ['Nome_nodo']
nodes = pd.concat([Users, Protocols], axis=0)
refrenza = [x for x in range(0, nodes.shape[0])]
nodesDict = dict(zip(nodes.Nome_nodo, refrenza))
links = TempDf[['display_name', 'protocol', 'protDev', 'hostname', 'device']]
links = links.groupby(['display_name', 'protDev'], as_index=False).count()
links['value'] = links['hostname']
links['source'] = links['display_name'].map(nodesDict)
links['target'] = links['protDev'].map(nodesDict)
links = links[links['protDev'] != 'nan']
links = links[links['display_name'] != 'nan']
return {
'data': [dict(
type="sankey",
domain=dict(
x=[0, 1],
y=[0, 1]),
link={
"source": links.source.dropna(axis=0, how='any'),
"target": links.target.dropna(axis=0, how='any'),
"value": links['value'].dropna(axis=0, how='any')
},
node=dict(label=nodes.Nome_nodo),
)],
'layout': go.Layout(
title="Sankey {} vs protocols-Devices".format(user),
autosize=True,
font={'family': 'Glacial Indifference', 'color': colors['text']}
)
}
@app.callback(
dash.dependencies.Output('Users-column_Channel_sand', 'options'),
[dash.dependencies.Input('date-picker-range-channels_sand', 'start_date'),
dash.dependencies.Input('date-picker-range-channels_sand', 'end_date'),
dash.dependencies.Input('DB-selection-channel_sand', 'value')])
def update_index1(start_date, end_date, db):
start_date = pd.to_datetime(start_date)
start_date = dt.date(start_date)
end_date = pd.to_datetime(end_date)
end_date = dt.date(end_date)
df = db_onnections.ChannelDB(from_data=start_date, to_data=end_date, section=db)
available_users = sorted(df['display_name'].unique())
return [{'label': i, 'value': i} for i in available_users]
@app.callback(
dash.dependencies.Output('Users-column_Channel_sand', 'value'),
[dash.dependencies.Input('date-picker-range-channels_sand', 'start_date'),
dash.dependencies.Input('date-picker-range-channels_sand', 'end_date'),
dash.dependencies.Input('DB-selection-channel_sand', 'value')])
def update_index2(start_date, end_date, db):
start_date = pd.to_datetime(start_date)
start_date = dt.date(start_date)
end_date = pd.to_datetime(end_date)
end_date = dt.date(end_date)
tempdf = db_onnections.ChannelDB(from_data=start_date, to_data=end_date, section=db)
available_users = sorted(tempdf['display_name'].unique())
return available_users[0]
| [
"enzus@hotmail.com"
] | enzus@hotmail.com |
24d22697d64ee1f29cb1c87424461f1fa75b650b | f6bec8210f9f314bcc8d20b3df34b53ed84be7cc | /core/on_off.py | 0d8a977b1e51f5774096297c0e732b697586e4ae | [] | no_license | SamSunshine/PyLogo | bb4e27f0467ed7acdea9e343614c62660f5aed9b | eb3702dd85089db61bae27a7e3af97b69b7725e6 | refs/heads/master | 2021-01-03T00:48:02.439296 | 2020-02-11T19:25:29 | 2020-02-11T19:25:29 | 239,843,609 | 0 | 0 | null | 2020-02-11T19:13:38 | 2020-02-11T19:13:38 | null | UTF-8 | Python | false | false | 5,103 | py | from pygame.color import Color
import core.gui as gui
from core.gui import HOR_SEP
from core.sim_engine import SimEngine
from core.utils import rgb_to_hex
from core.world_patch_block import Patch, World
import PySimpleGUI as sg
from random import randint
from typing import Tuple
class OnOffPatch(Patch):
# These are rgb colors
on_color = Color('white')
off_color = Color('black')
def __init__(self, *args, **kw_args):
super().__init__(*args, **kw_args)
self.is_on = False
def set_on_off(self, is_on: bool):
self.is_on = is_on
self.set_color(OnOffPatch.on_color if self.is_on else OnOffPatch.off_color)
class OnOffWorld(World):
WHITE = '#ffffff'
BLACK = '#000000'
on_color_chooser = sg.ColorChooserButton('on', button_color=(WHITE, WHITE), size=(10, 1))
off_color_chooser = sg.ColorChooserButton('off', button_color=(BLACK, BLACK), size=(10, 1))
SELECT_ON_TEXT = 'Select "on" color '
SELECT_OFF_TEXT = 'Select "off" color'
@staticmethod
def get_color_and_update_button(button, default_color_string):
key = button.get_text()
color_string = SimEngine.get_gui_value(key)
if color_string in {'None', '', None}:
color_string = default_color_string
button.update(button_color=(color_string, color_string))
color = Color(color_string)
return color
def get_colors(self):
OnOffPatch.off_color = self.get_color_and_update_button(
self.off_color_chooser,
default_color_string=rgb_to_hex(OnOffPatch.off_color))
OnOffPatch.on_color = self.get_color_and_update_button(
self.on_color_chooser,
default_color_string=rgb_to_hex(OnOffPatch.on_color))
def handle_event_and_values(self):
"""
This method handles the color chooser. It does it in a round-about way because
the color chooser can't generate events. Consequently, the user is asked to click
a button next to the color-chooser. In processing that button-click, we ".click()"
the color-chooser button. The user selects a color, which we retrieve by reading
the window. We then color the color-chooser button with that color.
"""
event = SimEngine.get_gui_event()
if event not in {OnOffWorld.SELECT_ON_TEXT, OnOffWorld.SELECT_OFF_TEXT}:
return
selecting_on = event == OnOffWorld.SELECT_ON_TEXT
# There are two color-choosers: selecting_on and selecting_off. Determine and select the
# desired color chooser based on the label on the button the user clicked.
color_chooser_button = OnOffWorld.on_color_chooser if selecting_on else OnOffWorld.off_color_chooser
# Run it
color_chooser_button.click()
# Create a default color_string in case the user had cancelled color selection.
# The default color string is the string of the current color.
default_color_string = rgb_to_hex(OnOffPatch.on_color if selecting_on else OnOffPatch.off_color)
# Retrieve the color choice by reading the window.
(_event, SimEngine.values) = gui.WINDOW.read(timeout=10)
color = self.get_color_and_update_button(color_chooser_button, default_color_string)
# Set the color to the new choice
if selecting_on:
OnOffPatch.on_color = color
else:
OnOffPatch.off_color = color
# Update the patches.
for patch in self.patches:
patch.set_on_off(patch.is_on)
def mouse_click(self, xy: Tuple[int, int]):
""" Toggle clicked patch's aliveness. """
patch = self.pixel_tuple_to_patch(xy)
patch.set_on_off(not patch.is_on)
def setup(self):
self.get_colors()
for patch in self.patches:
is_on = randint(0, 100) < 10
patch.set_on_off(is_on)
def step(self):
self.get_colors()
# Run this only if we're running this on its own.
if isinstance(self, OnOffWorld):
for patch in self.patches:
is_on = patch.is_on and randint(0, 100) < 90 or not patch.is_on and randint(0, 100) < 1
patch.set_on_off(is_on)
# ############################################## Define GUI ############################################## #
on_off_left_upper = [
[sg.Button(OnOffWorld.SELECT_ON_TEXT), OnOffWorld.on_color_chooser],
[sg.Button(OnOffWorld.SELECT_OFF_TEXT), OnOffWorld.off_color_chooser],
HOR_SEP(),
[sg.Text('Cells can be toggled when\nthe system is stopped.')],
]
if __name__ == "__main__":
from core.agent import PyLogo
PyLogo(OnOffWorld, 'On-Off World', on_off_left_upper, patch_class=OnOffPatch, fps=10)
| [
"noreply@github.com"
] | noreply@github.com |
b3d9e3409dc28504183585a91f91ea42ff4e75f9 | f111888515eb47dda6316e0163e1711c330bfd81 | /programming_technique_examples/main menu with input validation - functions, validation, robust design.py | 6cf09e60e6de21e0a7f252ed81f213f0c7f38fc8 | [] | no_license | awdimmick/GCSE-CS | eb8e1a3934d3005126970b466b436e34b7c5a54f | fe2f408598574583b893d3dd9ceadc9eff7f0f8c | refs/heads/master | 2023-04-28T07:09:47.723563 | 2023-04-17T07:38:07 | 2023-04-17T07:38:07 | 289,747,534 | 0 | 0 | null | 2020-08-23T18:50:01 | 2020-08-23T18:50:00 | null | UTF-8 | Python | false | false | 4,765 | py | """
Menu Helper Function
----------------------
This program demonstrates how you can write a helper function to make it really easy to show consistent main menu screens.
Combine this with other helper functions to obtain valid input from the user and you'll be sure to have a robust and efficient
program!
"""
def showMenu(title, options, width=40):
# Decalre constants for each of the Unicode border characters that we want to use to draw a border around our menu - see http://jrgraphix.net/r/Unicode/2500-257F
TOP_LEFT_DBL = chr(0x2554) # Unicode character 0x2554 (remeber, 0x means a hex/base-16 number
TOP_RIGHT_DBL = chr(0x2557)
MID_LEFT_SNG = chr(0x255F)
MID_RIGHT_SNG = chr(0x2562)
BOTTOM_LEFT_DBL = chr(0x255A)
BOTTOM_RIGHT_DBL = chr(0x255D)
HORIZ_DBL = chr(0x2550)
HORIZ_SNG = chr(0x2500)
VERT_DBL = chr(0x2551)
# Print the top border, making its width equal to the value of th width parameter. Remember that when you use * with a string, it duplicates it for as many times as you specify.
print(TOP_LEFT_DBL + (HORIZ_DBL * (width - 2) + TOP_RIGHT_DBL))
# Print the title of the menu, centered
print(VERT_DBL + title.center(width - 2) + VERT_DBL)
# Print a single horizontal line separating the menu header from the main contents
print(MID_LEFT_SNG + (HORIZ_SNG * (width - 2) + MID_RIGHT_SNG))
# Print a message asking the user to select an option
print(VERT_DBL + "Please select an option:".ljust(width-2, " ") + VERT_DBL)
# Print a blank line to give some space before the options
print(VERT_DBL + " " * (width - 2) + VERT_DBL)
# Print the options that have been passed in the options list. Also, collect a list a valid option choices for use later
validChoices = [] # Empty list that will hold each option's number
for item in options: # for each item in the options list, do the following
optionString = VERT_DBL + " {0} - {1}".format(options.index(item) + 1, item) # start a string that will be used to present the option. It starts with the double vertical wall, followed by a string that contains the number of the option in the list (plus 1, because indexes in arrays start at 0) and the text of the option itself
optionString = optionString.ljust(width - 1, " ") # Take the option string that we have started and make it left justified within a block that is 1 character less than the width of the menu
optionString = optionString + VERT_DBL # Finally, add a final double vertical wall to the right of the option string
print(optionString) # Print the complete option string
validChoices.append(str(options.index(item) + 1))
# Print bottom border
print(BOTTOM_LEFT_DBL + (HORIZ_DBL * (width - 2) + BOTTOM_RIGHT_DBL))
return validChoices
def getValidChoice(message, options: list):
# message - a string value that will be presented as the prompt
# options - a List containing the valid options that the user can enter
lower_options = [] # Create a new empty list to store lowercase versions of the valid options
for option in options:
lower_options.append(option.lower()) # Fill the lower_options list with lowercase versions of the options - this means that the entry 'a' and 'A' will be considered equivalent
choice = input(message + ": ") # Added extra space on the end because no one ever remembers this...
if choice.lower() in lower_options:
return choice # the choice made is in our list of valid choices, so we can go ahead and return it to the part of the program that asked for it
else:
print("\nI'm sorry, that isn't an option on the menu. Please select one of the following options: ", end="")
for option in options:
print(option + ", ", end="") # This is optional, but is nice as it shows the user all of the valid options that they can choose from.
input("\n\nPress Enter to try again...\n") # This is a neat trick - using input to wait for the user to continue. You don't need to assign its output to a variable as you are not going to use it for anything.
return getValidChoice(message, options) # Here we are calling the function to run again, because the user obviously needs to make another choice. We write 'return' before it so that, once called, the valid that this new instance of the function returns is then retruend on to the function that called getValidChoice initially.
# Finally, we need to call our procedure in order to display the menu
menuOptions = showMenu("MY AMAZING GAME!", ["Play game", "Enter player names", "Show high scores", "Quit"])
choice = getValidChoice("Please select an option from the menu: ", menuOptions)
print("User entered: {0}".format(choice))
| [
"noreply@github.com"
] | noreply@github.com |
4fa49507c5bf3cc55bf870e07700ee5abeff3a65 | ab51f65e1f4b09eeaf07d63f87b04d7a2ea3433f | /exercise_3_2/app.py | d1ac064bd24df26280eefcf640e6bb24ca0a48a0 | [] | no_license | decisionmechanics/3659 | 9299487a4af2eb9afdb7ea204b3a6fb07f81c0cd | 2a6d319716d3292096bbc770d0aba213f22993ae | refs/heads/main | 2023-03-21T08:11:30.425419 | 2021-03-14T15:45:16 | 2021-03-14T15:45:16 | 347,678,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | import time
import redis
from flask import Flask
app = Flask(__name__)
def get_hit_count():
cache = redis.Redis(host='redis', port=6379)
# Increment the hit count, but allow for timeouts, e.g., on startup (using 5 retries and a timeout)
retries = 5
while True:
try:
return cache.incr('hits')
except redis.exceptions.ConnectionError as exc:
if retries == 0:
raise exc
retries -= 1
time.sleep(0.5)
@app.route('/')
def hello():
count = get_hit_count()
return 'Hello from Docker! I have been seen {} times. \n'.format(count) | [
"Andrew.Tait@decisionmechanics.com"
] | Andrew.Tait@decisionmechanics.com |
bbb0f2c6d950d3c1625b3d04e1d404392e509f42 | 791eedd78b9fe753f998020efb9ff9d0270cd8d3 | /Python/CrashCourse/Chapter9/user.py | 1e06650fa786434cd673e06148520e5a3a96bda6 | [] | no_license | pavel-prykhodko96/source | 6214c212356ffa420bbc489adce104268f5efa12 | 77219b672bb14df93abd7b8203f486d0620533d8 | refs/heads/master | 2023-03-07T21:43:17.422232 | 2023-02-23T12:06:52 | 2023-02-23T12:06:52 | 248,736,899 | 0 | 0 | null | 2023-02-23T12:06:54 | 2020-03-20T11:16:45 | C | UTF-8 | Python | false | false | 1,311 | py | class User():
"""Stores users information"""
def __init__(self, first_name, last_name, **info):
self.first_name = first_name
self.last_name = last_name
self.full_name = first_name + " " + last_name
self.info = info
self.login_attempts = 0
def describe_user(self):
print("Name: " + self.full_name)
if self.info:
for key, value in self.info.items():
print(key + ": " + value)
def greet_user(self):
print("We are glad to see you, " + self.full_name + "!")
def increment_login_attempts(self):
self.login_attempts += 1
def reset_login_attempts(self):
self.login_attempts = 0
class Admin(User):
"""Special kind of user"""
def __init__(
self, first_name, last_name,
*privileges, **info):
super().__init__(first_name, last_name, **info)
self.privileges = Privileges(*privileges)
class Privileges():
"""Specifies privileges of a user"""
def __init__(self, *privileges):
self.privileges = privileges
def show(self):
if self.privileges:
print("Privileges:")
for privilege in self.privileges:
print(" " + privilege)
else:
print("No privileges")
| [
"47929311+pavel-prykhodko96@users.noreply.github.com"
] | 47929311+pavel-prykhodko96@users.noreply.github.com |
d80456331b4a047786914c0b00ae1b4e517dc147 | 3f06e7ae747e935f7a2d1e1bae27a764c36a77d1 | /day23.py | 28136ee107dd5557680c2c853d2ec3f553c3faa0 | [] | no_license | mn113/adventofcode2016 | 94465f36c46e9aa21d879d82e043e1db8c55c9da | 3a93b23519acbfe326b8bf7c056f1747bbea036a | refs/heads/master | 2022-12-11T22:57:21.937221 | 2022-12-04T16:37:24 | 2022-12-04T16:37:24 | 75,545,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,572 | py | #! /usr/bin/env python
# Find result of long-running instruction set
import sys
import time
def intOrRegisterValue(x):
if x in 'abcd':
return registers[x]
else:
return int(x)
registers = {x:0 for x in 'abcd'}
registers['a'] = 7 # Part 1
registers['a'] = 12 # Part 2
print "Start:" + str(registers)
with open('day23_input.txt') as fp:
lines = fp.readlines()
l = len(lines)
i = 0
while i < l:
line = lines[i]
sys.stdout.write(str(registers) + ' : ' + str(i) + ' : ' + line)
sys.stdout.write('\r')
sys.stdout.flush()
#print registers
#print i, '/', l, ':', line
words = line.split()
if words[0] == 'tgl':
toggleDist = intOrRegisterValue(words[1])
# Convert to integer:
j = i + toggleDist
if j < 0 or j >= l:
# Out of range, start next loop immediately
i = i + 1
continue
elif lines[j][:3] == 'inc': # Toggle inc to dec
lines[j] = 'dec' + lines[j][3:]
elif lines[j][:3] == 'dec': # Toggle dec to inc
lines[j] = 'inc' + lines[j][3:]
elif lines[j][:3] == 'tgl': # Toggle tgl to inc
lines[j] = 'inc' + lines[j][3:]
elif lines[j][:3] == 'jnz': # Toggle jnz to cpy
lines[j] = 'cpy' + lines[j][3:]
else:
# cpy doesn't change when toggled
pass
print "Altered", j, lines[j]
elif words[0] == 'inc':
reg = words[1]
registers[reg] = registers[reg] + 1
elif words[0] == 'dec':
reg = words[1]
registers[reg] = registers[reg] - 1
elif words[0] == 'cpy':
src = words[1]
dest = words[2]
# Copy register?
registers[dest] = intOrRegisterValue(src)
elif words[0] == 'jnz':
# Test:
to_test = intOrRegisterValue(words[1])
jumpval = intOrRegisterValue(words[2])
if to_test != 0:
i = i + jumpval
continue # start next loop immediately
elif words[0] == 'ADD':
times = 1
if len(words) > 3:
times = intOrRegisterValue(words[3])
registers[words[2]] = registers[words[2]] + times * registers[words[1]]
elif words[0] == 'ZER':
registers[words[1]] = 0
i = i + 1
time.sleep(0.0)
print "---"
print registers
| [
"recyclebing+github@gmail.com"
] | recyclebing+github@gmail.com |
9b7020f61667e3eb17fd3906229ef0227982fd07 | 42d464ad8cea5b96d7ae8802ca136b4f88e2f3f5 | /Analysis/Sync-Model.py | 7e8d6c3aeb4c2bbdbb00e631f3dcd7e6394c4e9d | [] | no_license | HannahDeLaet/sync-model | 09fc6aafbb7823df198931c7afd8a240ca1debb9 | 9ef2c160941cb5e0b1cab4838ee32f89c3f6e635 | refs/heads/master | 2021-05-20T22:31:16.361338 | 2020-04-02T11:39:28 | 2020-04-02T11:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,328 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 10:38:24 2019
@author: Pieter
"""
import numpy as np
from mne import parallel as par
from scipy import signal as sig
##################
# Functions #
##################
def phase_updating(Neurons=[], Radius=1, Damp=0.3, Coupling=0.3, multiple=True):
"""
Returns updated value of the inhibitory (I) and excitatory (E) phase neurons
@Param Neurons, list containing the current activation of the phase code units
@Param Radius, bound of maximum amplitude
@Param Damp, strength of the attraction towards the Radius, e.g. OLM cells reduce pyramidal cell activation
@Param Coupling, frequency*(2*pi)/sampling rate
@Param multiple, True or false statement whether the Neurons array includes more than one node or not
Formula (2) and (3) from Verguts (2017)
"""
# updating the phase neurons from the processing module
if multiple:
Phase = np.zeros ((len(Neurons[:,0]),2)) # Creating variable to hold the updated phase values ( A zero for each E and I phase neuron of each phase code unit)
r2 = np.sum(Neurons * Neurons, axis = 1) # calculating the amplitude depending on the activation of the E and I phase neurons
# updating the E phase neurons, the higher the value of the I neurons (Neurons[:, 1]), the lower the value of the E neurons
Phase[:,0] = Neurons[:,0] -Coupling * Neurons[:,1] - Damp *((r2>Radius).astype(int)) * Neurons[:,0]
# updating the I phase neurons, the higher the value of the E neurons (Neuron[:, 0]), the higher the value of the I neurons
Phase[:,1] = Neurons[:,1] +Coupling * Neurons[:,0] - Damp * ((r2>Radius).astype(int)) * Neurons[:,1]
# updating the phase neurons of the MFC
else:
Phase = np.zeros((2))
r2 = np.sum(Neurons[0] * Neurons[1], axis = 0)
Phase[0] = Neurons[0] -Coupling*Neurons[1] - Damp * ((r2>Radius).astype(int)) * Neurons[0]
Phase[1] = Neurons[1] +Coupling*Neurons[0] - Damp * ((r2>Radius).astype(int)) * Neurons[1]
return Phase
# Model function
def Model_sim(Threshold=5, drift=2, Nsubjects=1, theta_freq = 5, save_eeg = False):
# Threshold = 5
# drift = 2
# Nsubjects = 1
"""
Model simulation that writes away two files
@Param Threshold, the response threshold
@Param drift, drift rate of Neurons in different layers, Note! this is actually the inverse of drift rate in the drift diffusion model
@Param Nsubjects, how many subjects you want to simulate data for
@File csv-file, should ressemble the behavioral data file you get after testing a participant
@File npy-file, contains simulated EEG data
"""
# timing of the experiment
srate = 500 # sampling rate per second
Preinstr_time = int(.2 * srate) # pre-instruction time (1s)
Instr_time = int(.2 * srate) # instruction presentation (200 ms)
Prep_time = (np.arange(1.7,2.2,.05) * srate).astype(int) # ISI ranging from 1700 to 2200 ms, multiplying it by the sampling rate to get how many samples we have for each ISI
Stim_time = int(.05 * srate) # Stimulus presentation of 50 ms
Resp_time = .7 * srate # max response time of 1s
FB_time = int(.1 * srate) # Feedback presentation of 500 ms
ITI=(np.arange(1,1.9,.25) * srate).astype(int) # ITI ranging from 1000 ms to 1900 ms
Response_deadline = .7 * srate # Response deadline
# max trial time
TotT = (Preinstr_time + Instr_time + max(Prep_time) + Stim_time + Resp_time + FB_time + max(ITI)).astype(int)
# variables for randomization
nInstr = 4 # number of instructions
nTilts = 2 # number of tilt directions
nSides = 2 # number of stimuli locations
nStim = nTilts * nSides # number of stimuli in total
nResp = 4 # number of responses
nReps = 5 # number of replications
UniqueTrials = nInstr * nStim * len(Prep_time) # number of different unique trials
Tr = UniqueTrials * nReps # Total amount of trials
###########################
# Processing Module #
##########################
nNodes = nStim + nResp # total model nodes = stimulus nodes + response nodes
r2max = 1 # max amplitude
Cg_1 = (30/srate) * 2 * np.pi # Coupling gamma waves, for the stimulus nodes
Cg_2 = Cg_1 + (drift/srate) * 2 * np.pi # Coupling gamma waves with frequency difference of 2 Hz, for the response nodes
damp = 0.3 # damping parameter, e.g. OLM cells that damp the gamma amplitude
decay = 0.9 # decay parameter
noise = 0.05 # noise parameter
Phase = np.zeros((nNodes,2,TotT,Tr)) # phase neurons, each node has two phase neurons, we update it each timestep, based on the sample rate of each trial
Rate = np.zeros((nNodes, TotT, Tr)) # rate neurons, each node has one rate neuron
# Weights initialization
W = np.ones((nStim,nResp))*0.5
W[(0,2),1] = 0.1
W[(0,2),3] = 0.1
W[(1,3),0] = 0.1
W[(1,3),2] = 0.1
#########################
# Integrator Module #
#########################
Integr = np.zeros(shape = [nResp, TotT, Tr]); # inhibitory weights inducing competition
inh = np.ones((nResp,nResp))*-0.01
for i in range(nResp):
inh[i,i] = 0
cumul = 1
#Threshold=4
#######################
# Control Module #
######################
# theta_freq = 5
r2_MFC=1 #radius MFC
Ct=(theta_freq/srate)*2*np.pi #coupling theta waves
damp_MFC=.03 #damping parameter MFC
acc_slope=10 #MFC slope parameter, is set to -5 in equation (7) of Verguts (2017)
#(steepness of burst threshold)
MFC = np.zeros((2,TotT,Tr)) # MFC phase units, two phase neurons
Be=0 #bernoulli (rate code MFC)
LFC = np.zeros((nInstr,Tr)) # LFC stores information for each instruction for each trial
LFC_sync = np.zeros((nInstr,4))
LFC_sync[0,:]=[0,1,4,5] # LL sync left stimulus nodes with left hand nodes
LFC_sync[1,:]=[2,3,6,7] # RR sync right stimulus nodes with right hand nodes
LFC_sync[2,:]=[0,1,6,7] # LR sync left stimulus nodes with right hand nodes
LFC_sync[3,:]=[2,3,4,5] # RL sync right stimulus nodes with left hand nodes
tiltrate=.1 # mean tilt ~1.8 degrees =.2*90/10
#Instr_activation=np.diag(np.ones((4))) #Instruction activation matrix
Stim_activation=np.zeros((nStim,nResp)) #Stimulus activation matrix
Stim_activation[0,:]=np.array([1,0,1,0])*tiltrate #Activate 2 stimuli with left tilt (LL)
Stim_activation[1,:]=np.array([0,1,0,1])*tiltrate #Activate 2 stimuli with right tilt(RR)
Stim_activation[2,:]=np.array([1,0,0,1])*tiltrate #Activate left stimulus with left tilt and right with right tilt
Stim_activation[3,:]=np.array([0,1,1,0])*tiltrate #Activate left stimulus with right tilt and right stimulus with left tilt
for sub in range(Nsubjects):
# Randomization for instructions, tilt of stimuli and ISI's
# let's say: 1 = LL left stim, left resp | two times left tilt
# 2 = RR right stim, right resp | two time right tilt
# 3 = LR left stim, right resp | left tilt (left) and right tilt (right)
# 4 = RL right stim, left resp | right tilt (left) and left tilt (right)
##################################
# Create a factorial design #
#################################
Instr = np.repeat(range(nInstr), nStim * len(Prep_time)) # Repeat the instructions (nInstr: 0-4) for the ISI's of each stimulus and put it into an array
Stim = np.tile(range(nStim), nInstr * len(Prep_time)) # Repeat the stimuli for each instruction, total amount of stimuli
"""
TODO: No idea what preparation is doing
"""
Preparation = np.floor(np.array(range(UniqueTrials))/(nStim))%len(Prep_time) # Preparation Period, 11 levels
Design = np.column_stack([Instr, Stim, Preparation]) # Create an array that has a stack of lists, each list contains instruction, stimulus and a preparation period
Design = np.tile(Design,(nReps,1)) # Repeat the design nReps
np.random.shuffle(Design) # shuffle the design making it have a random order
Design = Design.astype(int)
#####################################################
# Oscillations start point of the phase neurons #
#####################################################
start = np.random.random((nNodes,2)) # Draw random starting points for the two phase neurons of each node
"""
# TODO: MFC = ACC? Or ACC is a part of the MFC?
"""
start_MFC = np.random.random((2)) # Acc phase neurons starting point
# assign starting values
Phase[:,:,0,0] = start
MFC[:,0,0] = start_MFC
#################################
# Records #
################################
Hit = np.zeros((TotT,Tr)) # Hit record, check for the sampeling points of each trial
RT = np.zeros((Tr)) # RT record,
accuracy = np.zeros((Tr)) # Accuracy record
Instruct_lock = np.zeros((Tr)) # Instruction onset record
Stim_lock = np.zeros((Tr)) # Stimulus onset record
Response_lock = np.zeros((Tr)) # Response onset record
resp = np.ones((Tr)) * -1 # Response record
preparatory_period = np.zeros((Tr))
sync = np.zeros((nStim, nResp, Tr)) # Sync record between the stimuli and the responses on each trial
############################################
# Preinstruction Period #
###########################################
time = 0
for trial in range(Tr): # for every trial in total amount of trials
# FIRST STEP: copying over the phase values of the previous trial to the current trial
if trial > 0: ### index 0 of the phase neurons are already assigned random starting values, starting points are end points of previous trials
Phase[:,:,0,trial] = Phase[:,:,time,trial-1] ### Taking both phase neurons of all the nodes of the current trial and setting it equal to the phase neurons of the previous triaL
MFC[:,0,trial] = MFC[:,time,trial-1] ### Taking both phase neurons of the MFC of the current trial and setting it equal to the phase neurons of the previous trial
# SECOND STEP: updating phase code units each sample point in the preinstruction period
## Pre-instruction time = no stimulation and no bursts
for time in range(Preinstr_time): # looping across the sample points of the pre-instruction time
## Cg_1 and Cg_2 are the oscillating gamma frequencies, stimulus and response nodes have different gamma frequencies!
## Ct the oscillating frequency of the MFC phase neurons
## r2max is the radius (amplitude) of a pair of inhibitory and excitatory neurons
## damp is the damping value acting on the excitatory (e.g. OLM cells)
Phase[0:nStim, : , time + 1, trial] = phase_updating (Neurons=Phase[0:nStim, :, time, trial], Radius=r2max, Damp=damp, Coupling=Cg_1, multiple=True) ### updating the stimulus nodes
Phase[nStim:nNodes, : , time + 1, trial] = phase_updating(Neurons=Phase[nStim:nNodes, : , time,trial], Radius=r2max, Damp=damp, Coupling=Cg_2, multiple=True) ### updating the response nodes
MFC[:, time+1, trial] = phase_updating(Neurons = MFC[:, time, trial], Radius=r2_MFC, Damp=damp_MFC, Coupling=Ct, multiple=False) ### updating the MFC node
# THIRD STEP: Showing the instructions --> Phase reset
t = time ### setting the current sample point of the preinstruction time on the current trial
Instruct_lock[trial] = t ### setting the instruction onset of the current trial
## phase reset of the MFC phase neurons due to the instruction
MFC[: , t, trial] = np.ones((2)) * r2_MFC
##########################################
# Preparatory Period #
##########################################
## Instruction presentation and preparation period
## start syncing but no stimulation yet
preparatory_period[trial] = Prep_time[Design[trial,2]] ### Set the preparatory period (ISI), use the Preparation variable (which is randomized and 11 levels) to select a Prep_time
for time in range(t , int(t + Instr_time + int(preparatory_period[trial]))): ### looping of the sample points of the ISI + instruction time period
# FIRST Step: set the LFC for the current trial
LFC[Design[trial,0], trial] = 1 ### set the LFC to 1 for the instruction that is shown on this trial
# SECOND STEP: updating phase code units each sample point in the preparatory period
Phase[0:nStim, :, time + 1, trial] = phase_updating(Neurons=Phase[0:nStim, :, time, trial], Radius=r2max, Damp=damp, Coupling=Cg_1, multiple=True) ### updating the stimulus nodes
Phase[nStim:nNodes, :, time + 1, trial]=phase_updating(Neurons=Phase[nStim:nNodes, :, time, trial], Radius=r2max, Damp=damp, Coupling=Cg_2, multiple=True) ### updating the response nodes
MFC[:, time+1, trial] = phase_updating(Neurons=MFC[:, time, trial], Radius=r2_MFC, Damp=damp_MFC, Coupling=Ct, multiple=False) ### updating the MFC node
# THIRD STEP: Rate code MFC neuron activation is calculated by a bernoulli process, start syncing
Be = 1 / (1 + np.exp(-acc_slope * (MFC[0,time,trial]-1))) ### Equation (7) in Verguts (2017)
prob = np.random.random()
# FOURTH STEP:
if prob < Be:
Hit[time, trial] = 1
Gaussian = np.random.normal(size=[1,2])
for Ins in range(nInstr):
if LFC[Ins,trial]: ### Checks which of the 4 instruction is set to 1 in the LFC
for nodes in LFC_sync[Ins,:]: ### take the 4 nodes associated with the current instruction
Phase[int(nodes), :, time + 1, trial] = decay * Phase[int(nodes), :, time, trial] + Gaussian ### Update the nodes that the LFC selected
t=time
Stim_lock[trial]=t
##########################################
# Responds Period #
##########################################
# Response period: syncing bursts and rate code stimulation
while resp[trial] == -1 and time < t + Response_deadline: ### while the response of this trial is still equal to -1 (no answer has been given)
time += 1
# FIRST STEP: updating phase code units of processing module
Phase[0:nStim,:,time+1,trial]=phase_updating(Neurons=Phase[0:nStim,:,time,trial], Radius=r2max, Damp=damp, Coupling=Cg_1, multiple=True)
Phase[nStim:nNodes,:,time+1,trial]=phase_updating(Neurons=Phase[nStim:nNodes,:,time,trial], Radius=r2max, Damp=damp, Coupling=Cg_2, multiple=True)
MFC[:,time+1,trial]=phase_updating(Neurons=MFC[:,time,trial], Radius=r2_MFC, Damp=damp_MFC, Coupling=Ct, multiple=False)
# SECOND STEP: bernoulli process in MFC rate
Be = 1/(1+np.exp(-acc_slope*(MFC[0,time,trial]-1)))
prob = np.random.random()
# THIRD STEP: Burst
if prob<Be:
Hit[time,trial]=1;
Gaussian=np.random.normal(size=[1,2])
for Ins in range(nInstr):
if LFC[Ins,trial]:
for nodes in LFC_sync[Ins,:]:
Phase[int(nodes),:,time+1,trial] = decay * Phase[int(nodes), :, time, trial] + Gaussian
# FOURTH STEP: updating rate code units
Rate[0:nStim, time, trial] = Stim_activation[Design[trial,1],:]*(1/(1+np.exp(-5*Phase[0:nStim,0,time,trial]-0.6))) ### Updating ratecode units for the stimulus nodes
Rate[nStim:nNodes, time, trial] = np.matmul(Rate[0:nStim, time, trial],W)*(1/(1+np.exp(-5*Phase[nStim:nNodes,0,time,trial]-0.6))) ### Updating ratecode units for the response nodes
Integr[:, time+1, trial] = np.maximum(0, Integr[:, time, trial]+cumul*Rate[nStim:nNodes, time, trial]+np.matmul(inh,Integr[:, time, trial]))+noise*np.random.random((nResp))
for i in range(nResp):
if Integr[i, time+1, trial]>Threshold:
resp[trial]=i
Integr[:, time+1, trial] = np.zeros((nResp))
RT[trial]=(time-t)*(1000/srate)
t=time
Response_lock[trial]=t
if Design[trial,0]==0:
if (Design[trial,1]==0 or Design[trial,1]==2 ) and resp[trial]==0:
accuracy[trial]=1
elif (Design[trial,1]==1 or Design[trial,1]==3 ) and resp[trial]==1:
accuracy[trial]=1
else:
accuracy[trial]=0
if Design[trial,0]==1:
if (Design[trial,1]==0 or Design[trial,1]==3 ) and resp[trial]==2:
accuracy[trial]=1
elif (Design[trial,1]==1 or Design[trial,1]==2 ) and resp[trial]==3:
accuracy[trial]=1
else:
accuracy[trial]=0
if Design[trial,0]==2:
if (Design[trial,1]==0 or Design[trial,1]==2 ) and resp[trial]==2:
accuracy[trial]=1
elif (Design[trial,1]==1 or Design[trial,1]==3 ) and resp[trial]==3:
accuracy[trial]=1
else:
accuracy[trial]=0
if Design[trial,0]==3:
if (Design[trial,1]==0 or Design[trial,1]==3 ) and resp[trial]==0:
accuracy[trial]=1
elif (Design[trial,1]==1 or Design[trial,1]==2 ) and resp[trial]==1:
accuracy[trial]=1
else:
accuracy[trial]=0
for time in range(t, t+ FB_time+ ITI[int(np.round(np.random.random()*3))]):
#updating phase code units of processing module
Phase[0:nStim,:,time+1,trial]=phase_updating(Neurons=Phase[0:nStim,:,time,trial], Radius=r2max, Damp=damp, Coupling=Cg_1, multiple=True)
Phase[nStim:nNodes,:,time+1,trial]=phase_updating(Neurons=Phase[nStim:nNodes,:,time,trial], Radius=r2max, Damp=damp, Coupling=Cg_2, multiple=True)
MFC[:,time+1,trial]=phase_updating(Neurons=MFC[:,time,trial], Radius=r2_MFC, Damp=damp_MFC, Coupling=Ct, multiple=False)
for st in range(nStim):
for rs in range(nResp):
sync[st,rs, trial]=np.corrcoef(Phase[st,0,int(Stim_lock[trial]):int(Response_lock[trial]),trial],Phase[nStim+rs,0,int(Stim_lock[trial]):int(Response_lock[trial]),trial])[0,1]
Trials=np.arange(Tr)
Design=np.column_stack((Trials, Design, resp, accuracy, RT, Instruct_lock, Stim_lock, Response_lock))
Column_list='trial,instr,stim,isi,response,accuracy,rt,instr_onset,stim_onset,resp_onset'
#Column_list_2='Visual 1, Visual 2, Visual 3, Visual 4, Motor 1, Motor 2, Motor 3, Motor 4, MFC'
filename_behavioral='Behavioral_Data_simulation_sub%i_thetaFreq%.2fHz_thresh%i_drift%.1f' % (sub, theta_freq, Threshold, drift)
np.savetxt(filename_behavioral+'.csv', Design, header=Column_list, delimiter=',',fmt='%.2f')
if save_eeg:
Phase_ds = sig.resample(Phase, int(TotT/2.), axis = 2)
MFC_ds = sig.resample(MFC, int(TotT/2.), axis = 1)
Rate_ds = sig.resample(Rate, int(TotT/2.), axis = 1)
Integr_ds = sig.resample(Integr, int(TotT/2.), axis = 1)
EEG_data = {'Phase':Phase_ds[:,0,:,:], 'MFC':MFC_ds[0,:,:], 'Rate':Rate_ds, 'Integr':Integr_ds}
Integr_data = Integr_ds
filename_EEG='EEG_Data_simulation_sub%i_thetaFreq%.2fHz_thresh%i_drift%.1f_256Hz' % (sub, theta_freq, Threshold, drift)
filename_Integr='Integr_Data_simulation_sub%i_thetaFreq%.2fHz_thresh%i_drift%.1f_256Hz' % (sub, theta_freq, Threshold, drift)
np.savez(filename_EEG+'.npz', EEG_data)
np.savez(filename_Integr+'.npz', Integr_data)
#return np.mean(accuracy)
# compute surrogates
import time
###########################
# Generate Model Behavior #
###########################
drifts = np.arange(1, 11)
threshs = np.arange(3, 7)
parallel, my_cvstime, _ = par.parallel_func(Model_sim, n_jobs = -1, verbose = 40)
for d in drifts:
for thr in threshs:
print('thresh %i, drift %.1f' % (thr, d))
t = time.time()
parallel(my_cvstime(Threshold = thr, drift = d, Nsubjects=1, theta_freq = theta, save_eeg = False) for theta in np.arange(1, 21))
print('\ttime taken: %.2fmin' % ((time.time() - t) / 60.))
| [
"rs7115@nyu.edu"
] | rs7115@nyu.edu |
758613f5a6a971ee9c7da88463b1731b29accd18 | befab60aa7d857ab4e0dbee3499016c454b0f64f | /xmlExtract.py | bbbddffd09a342d6997e7510636d50e9eddafeda | [] | no_license | naruto0323/smai | 260506263f76842f020db4d0446bf0f8625eeed2 | 824617aaa3205daa55910871a6b9151886102763 | refs/heads/master | 2021-01-20T12:35:05.186305 | 2017-05-05T14:21:39 | 2017-05-05T14:21:39 | 90,383,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | import xml.etree.ElementTree as ET
import pickle
import sys
from nltk.stem.porter import *
stemmer = PorterStemmer()
reload(sys)
sys.setdefaultencoding('utf-8')
from nltk.corpus import stopwords
tree = ET.parse('subtask1-heterographic-test.xml')
root = tree.getroot()
sentences = []
words = {}
stop = set(stopwords.words('english'))
scrap = ['"', "'", ":" , ",", "?", "!", "/", "_", ".", "-"]
wordset = set()
def extractXml():
for sentence in root:
line = ""
for word in sentence:
word = (word.text).lower()
if((word not in stop) and (word[0] not in scrap)):
#print word
word=stemmer.stem(word).encode('utf-8')
wordset.add(word)
#line += word.text + " "
#sentences.append(line)
def saveData():
f = open('mainWords.p','w')
pickle.dump(wordset, f)
f.close()
# print mydict
extractXml()
saveData()
f = open('mainWords.p', 'rb') # 'rb' for reading binary file
mydict = pickle.load(f)
f.close()
mydict=list(mydict)
# print mydict
| [
"ramchandra.0323@gmail.com"
] | ramchandra.0323@gmail.com |
5b47d1be832090f2f26a57dc68231670db71362b | 8add030c13eb431bb71259a92ce99bb1e783665c | /dynamo/tools/dimension_reduction.py | f6373e9ee5b58618f5573a79a2ffd6deb0baaa42 | [
"BSD-3-Clause"
] | permissive | pythseq/dynamo-release | 4d00f37ef7a721148649fdc66606cef72a9a86a0 | 1ed7a32b8f88790e4a6f063f9e3bf017c34c1214 | refs/heads/master | 2020-12-06T16:34:03.661901 | 2020-01-07T19:05:14 | 2020-01-07T19:05:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,585 | py | from sklearn.decomposition import TruncatedSVD
import scipy
import warnings
from copy import deepcopy
from .psl import *
def extract_indices_dist_from_graph(graph, n_neighbors):
"""Extract the matrices for index, distance from the associated kNN sparse graph
Arguments
---------
graph: sparse matrix (`.X`, dtype `float32`)
Sparse matrix of the kNN graph (n_cell x n_cell). The element in the matrix corresponds to the distance between cells.
n_neighbors: 'int' (optional, default 15)
The number of nearest neighbors of the kNN graph.
Returns
-------
ind_mat: :class:`~numpy.ndarray`
The matrix (n_cell x n_neighbors) that stores the indices for the each cell's n_neighbors nearest neighbors.
dist_mat: :class:`~numpy.ndarray`
The matrix (n_cell x n_neighbors) that stores the distances for the each cell's n_neighbors nearest neighbors.
"""
n_cells = graph.shape[0]
ind_mat = np.zeros((n_cells, n_neighbors), dtype=int)
dist_mat = np.zeros((n_cells, n_neighbors), dtype=graph.dtype)
for cur_cell in range(n_cells):
cur_neighbors = graph[cur_cell, :].nonzero() # returns the coordinate tuple for non-zero items
# set itself as the nearest neighbor
ind_mat[cur_cell, 0] = cur_cell
dist_mat[cur_cell, 0] = 0
# there could be more or less than n_neighbors because of an approximate search
cur_n_neighbors = len(cur_neighbors[1])
if cur_n_neighbors != n_neighbors - 1: # could not broadcast input array from shape (13) into shape (14)
sorted_indices = np.argsort(graph[cur_cell][:, cur_neighbors[1]].A)[0][:(n_neighbors - 1)]
ind_mat[cur_cell, 1:] = cur_neighbors[1][sorted_indices]
dist_mat[cur_cell, 1:] = graph[cur_cell][0, cur_neighbors[1][sorted_indices]].A
else:
ind_mat[cur_cell, 1:] = cur_neighbors[1]
dist_mat[cur_cell, 1:] = graph[cur_cell][:, cur_neighbors[1]].A
return ind_mat, dist_mat
def umap_conn_indices_dist_embedding(X,
n_neighbors=15,
n_components=2,
metric="cosine",
min_dist=0.1,
random_state=0,
verbose=False):
"""Compute connectivity graph, matrices for kNN neighbor indices, distance matrix and low dimension embedding with UMAP.
This code is adapted from umap-learn (https://github.com/lmcinnes/umap/blob/97d33f57459de796774ab2d7fcf73c639835676d/umap/umap_.py)
Arguments
---------
X: sparse matrix (`.X`, dtype `float32`)
expression matrix (n_cell x n_genes)
n_neighbors: 'int' (optional, default 15)
The number of nearest neighbors to compute for each sample in ``X``.
n_components: 'int' (optional, default 2)
The dimension of the space to embed into.
metric: 'str' or `callable` (optional, default `cosine`)
The metric to use for the computation.
min_dist: 'float' (optional, default `0.1`)
The effective minimum distance between embedded points. Smaller values will result in a more clustered/clumped
embedding where nearby points on the manifold are drawn closer together, while larger values will result on a
more even dispersal of points. The value should be set relative to the ``spread`` value, which determines the
scale at which embedded points will be spread out.
random_state: `int`, `RandomState` instance or `None`, optional (default: None)
If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is the RandomState instance used by `numpy.random`.
verbose: `bool` (optional, default False)
Controls verbosity of logging.
Returns
-------
graph, knn_indices, knn_dists, embedding_
A tuple of kNN graph (`graph`), indices of nearest neighbors of each cell (knn_indicies), distances of nearest
neighbors (knn_dists) and finally the low dimensional embedding (embedding_).
"""
from sklearn.utils import check_random_state
from sklearn.metrics import pairwise_distances
from umap.umap_ import nearest_neighbors, fuzzy_simplicial_set, simplicial_set_embedding, find_ab_params
random_state = check_random_state(42)
_raw_data = X
if X.shape[0] < 4096: #1
dmat = pairwise_distances(X, metric=metric)
graph = fuzzy_simplicial_set(
X=dmat,
n_neighbors=n_neighbors,
random_state=random_state,
metric="precomputed",
verbose=verbose
)
# extract knn_indices, knn_dist
g_tmp = deepcopy(graph)
g_tmp[graph.nonzero()] = dmat[graph.nonzero()]
knn_indices, knn_dists = extract_indices_dist_from_graph(g_tmp, n_neighbors=n_neighbors)
else:
# Standard case
(knn_indices, knn_dists, rp_forest) = nearest_neighbors(
X=X,
n_neighbors=n_neighbors,
metric=metric,
metric_kwds={},
angular=False,
random_state=random_state,
verbose=verbose
)
graph = fuzzy_simplicial_set(
X=X,
n_neighbors=n_neighbors,
random_state=random_state,
metric=metric,
knn_indices=knn_indices,
knn_dists=knn_dists,
angular=rp_forest,
verbose=verbose
)
_raw_data = X
_transform_available = True
_search_graph = scipy.sparse.lil_matrix(
(X.shape[0], X.shape[0]), dtype=np.int8
)
_search_graph.rows = knn_indices # An array (self.rows) of rows, each of which is a sorted list of column indices of non-zero elements.
_search_graph.data = (knn_dists != 0).astype(np.int8) # The corresponding nonzero values are stored in similar fashion in self.data.
_search_graph = _search_graph.maximum( # Element-wise maximum between this and another matrix.
_search_graph.transpose()
).tocsr()
if verbose:
print("Construct embedding")
a, b = find_ab_params(1, min_dist)
embedding_ = simplicial_set_embedding(
data=_raw_data,
graph=graph,
n_components=n_components,
initial_alpha=1.0, # learning_rate
a=a,
b=b,
gamma=1.0,
negative_sample_rate=5,
n_epochs=0,
init="spectral",
random_state=random_state,
metric=metric,
metric_kwds={},
verbose=verbose
)
return graph, knn_indices, knn_dists, embedding_
def reduceDimension(adata, n_pca_components=25, n_components=2, n_neighbors=10, reduction_method='trimap', velocity_key='velocity_S', cores=1):
"""Compute a low dimension reduction projection of an annodata object first with PCA, followed by non-linear dimension reduction methods
Arguments
---------
adata: :class:`~anndata.AnnData`
an Annodata object
n_pca_components: 'int' (optional, default 50)
Number of PCA components.
n_components: 'int' (optional, default 50)
The dimension of the space to embed into.
n_neighbors: 'int' (optional, default 10)
Number of nearest neighbors when constructing adjacency matrix.
reduction_method: 'str' (optional, default trimap)
Non-linear dimension reduction method to further reduce dimension based on the top n_pca_components PCA components. Currently, PSL
(probablistic structure learning, a new dimension reduction by us), tSNE (fitsne instead of traditional tSNE used) or umap are supported.
velocity_key: 'str' (optional, default velocity_S)
The dictionary key that corresponds to the estimated velocity values.
cores: `int` (optional, default `1`)
Number of cores. Used only when the tSNE reduction_method is used.
Returns
-------
Returns an updated `adata` with reduced dimension data for spliced counts, projected future transcript counts 'Y_dim' and adjacency matrix when possible.
"""
n_obs = adata.shape[0]
if 'use_for_dynamo' in adata.var.keys():
X = adata.X[:, adata.var.use_for_dynamo.values]
if velocity_key is not None:
X_t = adata.X[:, adata.var.use_for_dynamo.values] + adata.layers[velocity_key][:, adata.var.use_for_dynamo.values]
else:
X = adata.X
if velocity_key is not None:
X_t = adata.X + adata.layers[velocity_key]
if((not 'X_pca' in adata.obsm.keys()) or 'pca_fit' not in adata.uns.keys()) or reduction_method is "pca":
transformer = TruncatedSVD(n_components=n_pca_components + 1, random_state=0)
X_fit = transformer.fit(X)
X_pca = X_fit.transform(X)[:, 1:]
adata.obsm['X_pca'] = X_pca
if velocity_key is not None and "_velocity_pca" not in adata.obsm.keys():
X_t_pca = X_fit.transform(X_t)[:, 1:]
adata.obsm['_velocity_pca'] = X_t_pca - X_pca
else:
X_pca = adata.obsm['X_pca'][:, :n_pca_components]
if velocity_key is not None and "_velocity_pca" not in adata.obsm.keys():
X_t_pca = adata.uns['pca_fit'].fit_transform(X_t)
adata.obsm['_velocity_pca'] = X_t_pca[:, 1:(n_pca_components + 1)] - X_pca
adata.obsm['X_pca'] = X_pca
if reduction_method is "trimap":
import trimap
triplemap = trimap.TRIMAP(n_inliers=20,
n_outliers=10,
n_random=10,
distance='angular', # cosine
weight_adj=1000.0,
apply_pca=False)
X_dim = triplemap.fit_transform(X_pca)
adata.obsm['X_trimap'] = X_dim
adata.uns['neighbors'] = {'params': {'n_neighbors': n_neighbors, 'method': reduction_method}, 'connectivities': None, \
'distances': None, 'indices': None}
elif reduction_method is 'tSNE':
try:
from fitsne import FItSNE
except ImportError:
print('Please first install fitsne to perform accelerated tSNE method. Install instruction is provided here: https://pypi.org/project/fitsne/')
X_dim=FItSNE(X_pca, nthreads=cores) # use FitSNE
# bh_tsne = TSNE(n_components = n_components)
# X_dim = bh_tsne.fit_transform(X_pca)
adata.obsm['X_tSNE'] = X_dim
adata.uns['neighbors'] = {'params': {'n_neighbors': n_neighbors, 'method': reduction_method}, 'connectivities': None, \
'distances': None, 'indices': None}
elif reduction_method is 'umap':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
graph, knn_indices, knn_dists, X_dim = umap_conn_indices_dist_embedding(X_pca) # X_pca
adata.obsm['X_umap'] = X_dim
adata.uns['neighbors'] = {'params': {'n_neighbors': n_neighbors, 'method': reduction_method}, 'connectivities': graph, \
'distances': knn_dists, 'indices': knn_indices}
elif reduction_method is 'psl':
adj_mat, X_dim = psl_py(X_pca, d=n_components, K=n_neighbors) # this need to be updated
adata.obsm['X_psl'] = X_dim
adata.uns['PSL_adj_mat'] = adj_mat
else:
raise Exception('reduction_method {} is not supported.'.format(reduction_method))
return adata
| [
"xqiu@tenzing.ucsf.edu"
] | xqiu@tenzing.ucsf.edu |
f101630ef1084e8a5ba07a65d0eadfae679205d2 | bc3b842fd9e4e0645dea30cdf3ff8d39e745eb09 | /melody/melody_types.py | 1e7e16620bd68e9f8912574bdae40d334bb72e56 | [] | no_license | rajansaini691/algorithmic_music | 66e53b0a35452ac851ecaaf2d10e081a349a1ce9 | 7a48269d334d89322ec859d218592ee2a6a5e5e8 | refs/heads/main | 2023-05-09T23:00:42.817117 | 2021-06-03T20:37:30 | 2021-06-03T20:37:30 | 358,388,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,311 | py | """
Defines all of a song's layers of abstraction. If this file
is unwieldy to go through, please split it up.
"""
from dataclasses import dataclass
import enum
from helpers import note_name_to_midi, midi_to_note_name
from scale import Scale
# The direction the notes in a segment should go
class SegmentDirection(enum.Enum):
UP = 1
UPDOWN = 2
STRAIGHT = 3
DOWNUP = 4
DOWN = 5
class DynamicChange(enum.Enum):
CRESCENDO = 1
DECRESCENDO = 2
# TODO Add more
class Dynamic(enum.Enum):
MP = 1
MF = 2
# TODO Add more
class Articulation(enum.Enum):
STACCATO = 1
TENUTO = 2
class Mood(enum.Enum):
MAJOR = 1
MINOR = 2
@dataclass
class PhraseElement:
duration: int = 2
@dataclass
class Rest(PhraseElement):
pass
@dataclass
class Note:
# TODO Dynamic stuff
pitch: int
articulation: Articulation = None
grace: bool = False
duration: int = 2
importance: int = 0.5 # [0, 1]
new: bool = False
def get_final_dynamic():
# TODO Stub
return 1
def to_string(self):
note = midi_to_note_name(self.pitch)
star = "*" if self.new else ""
return note + star
@dataclass
class LandingNote(PhraseElement):
pitch: int = 'A7' # Can be a note name or midi number
duration: int = 2
articulation: Articulation = None
def __post_init__(self):
if type(self.pitch) == str:
self.pitch = note_name_to_midi(self.pitch)
@dataclass
class Segment(PhraseElement):
duration: int
direction: SegmentDirection = None
dynamic_change: DynamicChange = None
dynamic: Dynamic = None
new_note: int = None # If the segment contains a new/special note, stores its pitch
notes: list[Note] = None
scale_constraints: list[int] = None # Pair, [low, high]
scale_width: int = None # Number of note pitches allowed in segment, based on constraints
# TODO Make HarmonyElement class and inherit from that
@dataclass
class HarmonyNote(PhraseElement): # A single note played in the harmony.
pitch: int = 'A7' # Can be a note name or midi number
duration: int = 2
dynamic: Dynamic = None
articulation: Articulation = None
dynamic_change: DynamicChange = None
def __post_init__(self):
if type(self.pitch) == str:
self.pitch = note_name_to_midi(self.pitch)
@dataclass
class Chord(PhraseElement): # Holds a collection of HarmonyNotes; played simultaneously
notes: list[HarmonyNote] = None
dynamic_change: DynamicChange = None # Overrides dynamic change of individual notes
@dataclass
class HarmonyLine: # Stores multiple harmonic lines to be played simultaneously
harmony_elements: list[PhraseElement]
instrument: str = "Vib"
@dataclass
class Phrase:
# TODO Add option to inherit scale from Song
scale: Scale
phrase_elements: list[PhraseElement]
time_signature: list[int] # Should be [upper, lower]
atomic_unit: float # Fraction of a whole note
tempo: int = 120
chords: (str, int) = None # Contains a list of chords/arpegiatted notes
harmony: list[HarmonyLine] = None # Contains a list of chords/arpegiatted notes
harmonic_level: int = 0
mood: Mood = None
@dataclass
class Song:
phrases: list[Phrase]
| [
"rajansaini@cs.ucsb.edu"
] | rajansaini@cs.ucsb.edu |
4972b713381119c240516285a229e2e89c1080c3 | 068e5980d40b4806446d649d5e76fa278314703c | /TheAlgorithms/data_structures/python/linked_list/test.py | 61939f7d03b609e9359fee7f4dcf8735ccdb6644 | [] | no_license | zonkisa/leezyer | a4807a515a6ddb46b07b130cbbc4be28d3ed0de7 | 83d453f34734e99fb5c6b559444912b64d6057c7 | refs/heads/master | 2020-05-15T22:29:38.635712 | 2019-08-05T14:10:41 | 2019-08-05T14:10:41 | 182,528,267 | 0 | 0 | null | 2019-04-21T12:09:51 | 2019-04-21T12:09:50 | null | UTF-8 | Python | false | false | 526 | py | from __future__ import print_function
from TheAlgorithms.data_structures.python.linked_list import linked_list
from TheAlgorithms.data_structures.python.linked_list import circular_linked_list
def testHasCircle(lst):
print(lst.has_circular())
def testIsCircle(lst):
print(lst.is_circular())
if __name__ == '__main__':
lst = linked_list.make_circular_list([1, 2, 3])
# lst = linked_list.make_lst([1, 2, 3, 1])
lst.printList()
print("------")
testHasCircle(lst)
testIsCircle(lst)
pass | [
"271169495@qq.com"
] | 271169495@qq.com |
acc31c814ea75d936d6f304ff2a25b5a76085f86 | 78af322dd1e3b29fc627aa6817eed164c2c751c3 | /iraclis/_3objects.py | bced125ae65cedf4cb053b948d2b5586a293e3d5 | [
"CC-BY-4.0"
] | permissive | nespinoza/Iraclis | 8e676ffe16dbda6caf8f32fb7dde464b75540c65 | 3b5dd8d6bc073f6d2c24ad14341020694255bf65 | refs/heads/master | 2022-10-18T09:55:32.818159 | 2020-06-17T19:39:03 | 2020-06-17T19:39:03 | 273,058,797 | 0 | 0 | NOASSERTION | 2020-06-17T19:14:20 | 2020-06-17T19:14:19 | null | UTF-8 | Python | false | false | 9,141 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ._2variables import *
class DataSet:
def __init__(self, input_data, direct_image=None):
if direct_image is not None:
if isinstance(direct_image, str):
if os.path.isfile(direct_image):
direct_image = pf.open(direct_image)
else:
raise IraclisFileError('No such file {0}'.format(input_data))
else:
raise IraclisFileError('Please give a file name for the direct image or leave it empty.')
if not isinstance(input_data, str):
raise IraclisFileError('Please give a file or directory name for the input data.')
elif input_data == 'empty':
self.file_names = []
self.spectroscopic_images = []
self.direct_image = [1]
self.splitted = False
self._data_set_directory_path = None
elif os.path.isfile(input_data):
self.file_names = []
self.spectroscopic_images = [pf.open(input_data)]
if not direct_image:
self.direct_image = None
else:
self.direct_image = direct_image
self.splitted = False
self._data_set_directory_path = None
if self.spectroscopic_images[0][0].header[variables.observation_type.keyword] != 'SPECTROSCOPIC':
raise IraclisFileError('A single direct image is not s valid input dataset.')
elif os.path.isdir(input_data) and len(glob.glob(os.path.join(input_data, '*', ''))) == 0:
nsamp = []
final_list = []
direct_image = None
files = sorted(glob.glob(os.path.join(input_data, '*.fits')))
for i in files:
with pf.open(i) as j:
if j[0].header[variables.observation_type.keyword] == 'SPECTROSCOPIC':
final_list.append([j[0].header[variables.exposure_start.keyword],
os.path.split(i)[1], plc.copy_fits(j)])
nsamp.append(j[0].header[variables.total_samples.keyword])
elif not direct_image:
direct_image = pf.open(i, mode='update')
nsamps = [int(np.median(np.array(nsamp)))]
final_list.sort()
list_of_times, list_of_files, list_of_fits = np.swapaxes(final_list, 0, 1)
outliers = True
while outliers:
outliers = False
for i in range(len(list_of_fits)):
if list_of_fits[i][0].header[variables.total_samples.keyword] not in nsamps:
list_of_fits = np.delete(list_of_fits, i)
list_of_files = np.delete(list_of_files, i)
outliers = True
break
self.file_names = list_of_files
self.spectroscopic_images = list_of_fits
self.direct_image = direct_image
self.splitted = False
self._data_set_directory_path = input_data
elif os.path.isdir(input_data) and len(glob.glob(os.path.join(input_data, '*', ''))) > 0:
self.file_names = []
self.spectroscopic_images = []
self.direct_image = []
self.splitted = True
self._data_set_directory_path = input_data
for input_data in sorted(glob.glob(os.path.join(input_data, '*', ''))):
nsamp = []
final_list = []
direct_image = None
files = sorted(glob.glob(os.path.join(input_data, '*.fits')))
for i in files:
with pf.open(i) as j:
if j[0].header[variables.observation_type.keyword] == 'SPECTROSCOPIC':
final_list.append([j[0].header[variables.exposure_start.keyword],
os.path.split(i)[1], plc.copy_fits(j)])
nsamp.append(j[0].header[variables.total_samples.keyword])
elif not direct_image:
direct_image = pf.open(i, mode='update')
nsamps = [int(np.median(np.array(nsamp)))]
final_list.sort()
list_of_times, list_of_files, list_of_fits = np.swapaxes(final_list, 0, 1)
outliers = True
while outliers:
outliers = False
for i in range(len(list_of_fits)):
if list_of_fits[i][0].header[variables.total_samples.keyword] not in nsamps:
list_of_fits = np.delete(list_of_fits, i)
list_of_files = np.delete(list_of_files, i)
outliers = True
break
self.file_names = list_of_files
self.spectroscopic_images.append(list_of_fits)
self.direct_image = direct_image
else:
raise IraclisFileError('No such file or directory: {0}'.format(input_data))
if not self.direct_image:
raise IraclisFileError('A direct image is necessary.')
def save(self, export_directory, arrange=True, export_pipeline_variables_file='variables.txt'):
if os.path.isdir(export_directory):
backup = '{0}_{1}'.format(export_directory, time.strftime('%y-%m-%d_%H-%M-%S'))
shutil.copytree(export_directory, backup)
shutil.rmtree(export_directory)
os.mkdir(export_directory)
if arrange:
for i in range(len(self.file_names)):
date = str(self.spectroscopic_images[i][0].header['DATE-OBS'])
obs_time = str(self.spectroscopic_images[i][0].header['TIME-OBS'])
obs_time = '-'.join(obs_time.split(':'))
if self.file_names[i].split('_')[0] != date or self.file_names[i].split('_')[1] != obs_time:
self.file_names[i] = '{0}_{1}_{2}'.format(date, obs_time, os.path.split(self.file_names[i])[1])
for i in range(len(self.file_names)):
copy_of_file = plc.copy_fits(self.spectroscopic_images[i])
try:
copy_of_file.writeto(os.path.join(export_directory, self.file_names[i]), output_verify='fix')
except pf.VerifyError as e:
hdu = int(str(e.args)[4:-4].split('\\n')[1].replace('HDU ', '').replace(':', ''))
card = int(str(e.args)[4:-4].split('\\n')[2].replace('Card ', '').replace(':', ''))
del copy_of_file[hdu].header[card]
copy_of_file.writeto(os.path.join(export_directory, self.file_names[i]), output_verify='fix')
copy_of_file = plc.copy_fits(self.direct_image)
try:
copy_of_file.writeto(os.path.join(export_directory, 'direct_image.fits'), output_verify='fix')
except pf.VerifyError as e:
hdu = int(str(e.args)[4:-4].split('\\n')[1].replace('HDU ', '').replace(':', ''))
card = int(str(e.args)[4:-4].split('\\n')[2].replace('Card ', '').replace(':', ''))
del copy_of_file[hdu].header[card]
copy_of_file.writeto(os.path.join(export_directory, 'direct_image.fits'), output_verify='fix')
if export_pipeline_variables_file:
variables.save(os.path.join(export_directory, export_pipeline_variables_file))
def copy_split(self, split_number):
x = DataSet('empty')
x.spectroscopic_images = self.spectroscopic_images[split_number]
return x
# pipeline counter
class PipelineCounter:
def __init__(self, task, total_iterations, show_every=1):
self.task = '{0}{1}'.format(task, '.' * (15 - len(task)))
self.current_iteration = 0
self.total_iterations = int(total_iterations)
self.start_time = time.time()
self.show = 0
self.show_every = int(show_every)
if self.total_iterations == 1:
self.show_every = 10
def update(self):
self.current_iteration += 1
self.show += 1.0 / self.show_every
out_of = '{0}{1} / {2}'.format(' ' * (len(str(self.total_iterations)) - len(str(self.current_iteration))),
str(self.current_iteration), str(self.total_iterations))
delta_time = time.time() - self.start_time
time_left = str(datetime.timedelta(
seconds=int((self.total_iterations - self.current_iteration) * delta_time / self.current_iteration)))
total_time = str(datetime.timedelta(seconds=int(delta_time)))
if int(self.show):
sys.stdout.write('\r\033[K')
sys.stdout.write('{0}: {1} time left: {2} total time: {3}'.format(
self.task, out_of, time_left, total_time))
sys.stdout.flush()
self.show = 0
if self.current_iteration == self.total_iterations and self.total_iterations > 1:
print('')
| [
"aggelostsiaras@gmail.com"
] | aggelostsiaras@gmail.com |
d2f3608b843b0561afcdc922cf838154a72669ae | cf494b5f507d2b887f9a37a679fcbd3facb63243 | /pytorch_version/preprocess.py | 2a88ef1e07328b3aa34703b1da248759cb6eb2f9 | [] | no_license | w904292841/Voiceprint_lock | 43828be2388c30a4b2408e4457a9858b6730602e | 2fab15d1f7a9c3b9d2dc68bbfa65220df4eafac3 | refs/heads/main | 2023-02-06T02:06:29.777680 | 2020-12-25T07:54:07 | 2020-12-25T07:54:07 | 323,316,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py | import csv
import os
import os.path
import torch
import mfcc_reader
father_path = "H:/科研/vox_data/vox1_dev_wav/"
sub_path = ""
write_path = "H:/科研/vox_data_pre/"
all_list = []
class_head = []
file_head = []
file_name = []
def exisit_equal(lis):
for i in range(len(lis)-1):
if i == lis[-1]:
return True
return False
real_i = 0
fold_list = os.listdir(father_path)
min = 8
for i in range(1,257):
done = 0
if(i>= 1000):
sub_path = "id1" + str(i)
elif(i>=100):
sub_path = "id10" + str(i)
elif(i>=10):
sub_path = "id100" + str(i)
else:
sub_path = "id1000" + str(i)
# print(sub_path)
# father_path+sub_path # F:/Vox_data/vox1_dev_wav/wav/id10001
id_path = father_path+sub_path
filelist = os.listdir(id_path) # 获取 id10001 下的所有文件
filelist.sort()
class_head.append(len(all_list))
real_i = 0
for id, filename in enumerate(filelist):
if id < len(filelist) - 1:
mode = "train"
else:
mode = "dev"
all_vox_path = os.path.join(id_path, filename)
all_vox_list = os.listdir(all_vox_path)
for ID, vox_filename in enumerate(all_vox_list):
this_vox_path = os.path.join(all_vox_path, vox_filename)
this_vox_path += " "
this_vox_path = this_vox_path.replace("\\", "/")
mfcc = mfcc_reader.WavtoMfcc(this_vox_path,16,mode)
data = mfcc.readwav()
for j in range(data.size(0)):
this_data = data[j]
torch.save(this_data,write_path + mode + "/" + sub_path + "_" + str(id) + "_" + str(real_i) + ".pt")
real_i += 1
print(sub_path," ",real_i)
# print(len(all_list))
# print(class_head)
| [
"904292841@qq.com"
] | 904292841@qq.com |
b9e6149164b87a1472585e824d937adcc220d393 | c0cb1559188be071f1cd91d0adbad0ca850be0e1 | /problème dame dans un échéquier.py | 2dd481736794d0a2e70c1647a6960e629ebc9a32 | [] | no_license | mines-nancy-tcss5ac-2018/td1-cordel8u | d7299e661082d57ddf9ed2652c6ba345696d6641 | a766d874042bae2394aa84fded2ff683647d6ea5 | refs/heads/master | 2020-03-31T22:59:50.329544 | 2018-10-11T18:46:45 | 2018-10-11T18:46:45 | 152,639,007 | 0 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 2,590 | py | from scipy import *
def case_libre(n,L,M):
#Prend en argument un tableau M correspondant à l'échéquier
#et une liste L correspondant au placement de la nouvelle dame
#il ressort un nouveau tableau où il est inscrit les cases
#où il est impossible de placer une nouvelle dame
S=array(zeros((n, n)))
for i in range (n):
for j in range (n):
S[i][j]=M[i][j]
for j in range(len(M)):
S [L[0]][j]=1
S [ j ][ L [1] ]=1
a=L[0]+1
b=L[1]+1
while a<n and b<n:
S[a][b]=1
a+=1
b+=1
a=L[0]+1
b=L[1]-1
while a<n and b>-1:
S[a][b]=1
a+=1
b-=1
a=L[0]-1
b=L[1]-1
while a>-1 and b>-1:
S[a][b]=1
a-=1
b-=1
a=L[0]-1
b=L[1]+1
while a>-1 and b<n:
S[a][b]=1
a-=1
b+=1
return(array(S))
def verif(M):
#vérifie si il reste des cases libre au placement d'une dame
z=False
for i in range (len(M)):
for j in range (len(M[i])):
if M[i][j]== 0:
z=True
return(z)
def indice(M):
#ressort l'indice d'une case libre au placement d'une dame
a=[-1,-1]
i=-1
while a==[-1,-1]:
i+=1
if 0 in M[i]:
K=list(M[i])
a=[i,K.index(0)]
return (a)
#M=array([[1,2,2],[1,4,0]])
#print(indice(M))
def iteration(d,n,L,N,compte):
#recherche les toutes les combinaisons possibles et
#ajoute plus 1 au compteur dès qu'il en trouve une
#fonction dont le fonctionnement est difficile à décrire mais je peux l'expliquer
#à l'oral son mécanisme grâce à des dessins
if d!=0 and verif(N[-1]):
L.append(indice(N[-1]))
N.append(case_libre(n,L[-1],N[-1]))
d-=1
return(iteration(d,n,L,N,compte))
if d==0:
compte+=1
a=L[-1]
del L[-1]
del N[-1]
N[-1][a[0]][a[1]]=1
d+=1
return(iteration(d,n,L,N,compte))
if d!=0 and not(verif(N[-1])):
if len(N)==1:
return(compte)
else:
a=L[-1]
del L[-1]
del N[-1]
N[-1][a[0]][a[1]]=1
d+=1
return(iteration(d,n,L,N,compte))
def solve(d,n):
compte=0
L=[]
N=[]
M=array(zeros((n, n)))
N.append(M)
return(iteration(d,n,L,N,compte))
print(solve(4,4))
| [
"noreply@github.com"
] | noreply@github.com |
e39cc095116928a7ff373879228fa4f661896bc3 | 27fe9c0fb5f5459241d11a205bfa7c887d643db1 | /2022/day13a.py | 0a55387ebdd83c8981bb04d4d0f43c86e4c44b0a | [] | no_license | johnrozmaryn/adventofcode | 21c5fdd8d3a95882ce04103484538a215c49491c | 80d67371cfedb71eda0e35cd9130ee25b6c39091 | refs/heads/master | 2023-01-05T19:21:46.034612 | 2022-12-27T15:13:14 | 2022-12-27T15:13:14 | 225,509,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | f = open("day13.in")
contents = f.read()
pairs = contents.split('\n\n')
import ast
from functools import cmp_to_key
#ahh! cmp is all weird in python 3!
#it was handy, because if l == r it's 0, if l>r postive, if l<r negative
def compare(l,r):
l_int = type(l) == type(1) #see if they're integers
r_int = type(r) == type(1)
if l_int:
if r_int:
return l-r
else:
return compare([l],r)
else:
if r_int:
return compare(l,[r])
else:
for ls,rs in zip(l,r):
ans = compare(ls,rs)
if ans != 0:
return ans
return len(l) - len(r)
goodpackets = []
for item in pairs:
left,right = item.split('\n')
left = ast.literal_eval(left)
right = ast.literal_eval(right)
if compare(left,right) < 0:
goodpackets.append(left)
goodpackets.append(right)
else:
goodpackets.append(left)
goodpackets.append(right)
#I have no idea what back magic is going on in the next two lines. I totally cheated to get stars.
goodpackets.extend(([[2]], [[6]]))
goodpackets.sort(key=cmp_to_key(compare))
print((goodpackets.index([[2]]) + 1) * (goodpackets.index([[6]]) + 1)) | [
"john.rozmaryn@gmail.com"
] | john.rozmaryn@gmail.com |
c43a60ed1b624520dfdb98277f2787d7bba25aa9 | 97d6715e6ca961a1ce98171559057d76ee304a26 | /Character-recognition/siamese_with_bg/alexnet.py | b4b2e9f6d6fb97e135d971c9d5a7126feeb3be8c | [] | no_license | abhaydoke09/Siamese-Network-For-Text-Recognition-In-Maps | 566d4e8894d98122f7fc1fbfca12f0b8959e7235 | d8d7364569e913f8af21577b51cf015a420ba708 | refs/heads/master | 2021-09-17T22:08:47.356052 | 2018-07-05T22:29:45 | 2018-07-05T22:29:45 | 104,268,509 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,431 | py | """This is an TensorFLow implementation of AlexNet by Alex Krizhevsky at all.
Paper:
(http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)
Explanation can be found in my blog post:
https://kratzert.github.io/2017/02/24/finetuning-alexnet-with-tensorflow.html
"""
import tensorflow as tf
import numpy as np
class AlexNet(object):
"""Implementation of the AlexNet."""
def __init__(self, x, keep_prob, num_classes, skip_layer,
weights_path='DEFAULT'):
"""Create the graph of the AlexNet model.
Args:
x: Placeholder for the input tensor.
keep_prob: Dropout probability.
num_classes: Number of classes in the dataset.
skip_layer: List of names of the layer, that get trained from
scratch
weights_path: Complete path to the pretrained weight file, if it
isn't in the same folder as this code
"""
# Parse input arguments into class variables
self.X = x
self.NUM_CLASSES = num_classes
self.KEEP_PROB = keep_prob
self.SKIP_LAYER = skip_layer
if weights_path == 'DEFAULT':
self.WEIGHTS_PATH = 'bvlc_alexnet.npy'
else:
self.WEIGHTS_PATH = weights_path
# Call the create function to build the computational graph of AlexNet
self.create()
def create(self):
"""Create the network graph."""
# 1st Layer: Conv (w ReLu) -> Lrn -> Pool
conv1 = conv(self.X, 11, 11, 96, 4, 4, padding='VALID', name='conv1')
norm1 = lrn(conv1, 2, 2e-05, 0.75, name='norm1')
pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')
# 2nd Layer: Conv (w ReLu) -> Lrn -> Pool with 2 groups
conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2')
norm2 = lrn(conv2, 2, 2e-05, 0.75, name='norm2')
pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')
# 3rd Layer: Conv (w ReLu)
conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3')
# 4th Layer: Conv (w ReLu) splitted into two groups
conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4')
# 5th Layer: Conv (w ReLu) -> Pool splitted into two groups
conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5')
pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')
# 6th Layer: Flatten -> FC (w ReLu) -> Dropout
flattened = tf.reshape(pool5, [-1, 6*6*256])
fc6 = fc(flattened, 6*6*256, 4096, name='fc6')
dropout6 = dropout(fc6, self.KEEP_PROB)
# # 7th Layer: FC (w ReLu) -> Dropout
fc7 = fc(dropout6, 4096, 4096, name='fc7')
dropout7 = dropout(fc7, self.KEEP_PROB)
#
# # 8th Layer: FC and return unscaled activations
self.fc8 = fc(dropout7, 4096, self.NUM_CLASSES, relu=False, name='fc8')
def load_initial_weights(self, session):
"""Load weights from file into network.
As the weights from http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/
come as a dict of lists (e.g. weights['conv1'] is a list) and not as
dict of dicts (e.g. weights['conv1'] is a dict with keys 'weights' &
'biases') we need a special load function
"""
# Load the weights into memory
weights_dict = np.load(self.WEIGHTS_PATH, encoding='bytes').item()
# Loop over all layer names stored in the weights dict
for op_name in weights_dict:
# Check if layer should be trained from scratch
if op_name not in self.SKIP_LAYER:
with tf.variable_scope(op_name, reuse=True):
# Assign weights/biases to their corresponding tf variable
for data in weights_dict[op_name]:
# Biases
if len(data.shape) == 1:
var = tf.get_variable('biases', trainable=False)
session.run(var.assign(data))
# Weights
else:
var = tf.get_variable('weights', trainable=False)
session.run(var.assign(data))
def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,
padding='SAME', groups=1):
"""Create a convolution layer.
Adapted from: https://github.com/ethereon/caffe-tensorflow
"""
# Get number of input channels
input_channels = int(x.get_shape()[-1])
# Create lambda function for the convolution
convolve = lambda i, k: tf.nn.conv2d(i, k,
strides=[1, stride_y, stride_x, 1],
padding=padding)
with tf.variable_scope(name) as scope:
# Create tf variables for the weights and biases of the conv layer
weights = tf.get_variable('weights', shape=[filter_height,
filter_width,
input_channels/groups,
num_filters])
biases = tf.get_variable('biases', shape=[num_filters])
if groups == 1:
conv = convolve(x, weights)
# In the cases of multiple groups, split inputs & weights and
else:
# Split input and weights and convolve them separately
input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x)
weight_groups = tf.split(axis=3, num_or_size_splits=groups,
value=weights)
output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)]
# Concat the convolved output together again
conv = tf.concat(axis=3, values=output_groups)
# Add biases
bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv))
# Apply relu function
relu = tf.nn.relu(bias, name=scope.name)
return relu
def fc(x, num_in, num_out, name, relu=True):
"""Create a fully connected layer."""
with tf.variable_scope(name) as scope:
# Create tf variables for the weights and biases
weights = tf.get_variable('weights', shape=[num_in, num_out],
trainable=True)
biases = tf.get_variable('biases', [num_out], trainable=True)
# Matrix multiply weights and inputs and add bias
act = tf.nn.xw_plus_b(x, weights, biases, name=scope.name)
if relu:
# Apply ReLu non linearity
relu = tf.nn.relu(act)
return relu
else:
return act
def max_pool(x, filter_height, filter_width, stride_y, stride_x, name,
padding='SAME'):
"""Create a max pooling layer."""
return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],
strides=[1, stride_y, stride_x, 1],
padding=padding, name=name)
def lrn(x, radius, alpha, beta, name, bias=1.0):
"""Create a local response normalization layer."""
return tf.nn.local_response_normalization(x, depth_radius=radius,
alpha=alpha, beta=beta,
bias=bias, name=name)
def dropout(x, keep_prob):
"""Create a dropout layer."""
return tf.nn.dropout(x, keep_prob)
| [
"abhaydoke09@gmail.com"
] | abhaydoke09@gmail.com |
43125388e7f13fb3f397da7be3da1133ae9fbb3d | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/response/KoubeiServindustryPortfolioDataCreateResponse.py | e5a08c2fa59d81fa90b88ce8d9d521a12247d995 | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 778 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiServindustryPortfolioDataCreateResponse(AlipayResponse):
def __init__(self):
super(KoubeiServindustryPortfolioDataCreateResponse, self).__init__()
self._portfolio_id = None
@property
def portfolio_id(self):
return self._portfolio_id
@portfolio_id.setter
def portfolio_id(self, value):
self._portfolio_id = value
def parse_response_content(self, response_content):
response = super(KoubeiServindustryPortfolioDataCreateResponse, self).parse_response_content(response_content)
if 'portfolio_id' in response:
self.portfolio_id = response['portfolio_id']
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
874f90cfe5e2c9677d1ea430a98f1b30c398836e | 2259cee70a57c00e089d699aea257e6325b1ccfc | /urls.py | 2fd17de4a8f4e461f37337a26789d945d521ccdd | [] | no_license | SudhanshuSahil/vsm-django-app | ee62950c7bbf02aecdc8aace5e42890336323ebc | 86ddf8f8f47623b2161d3e1febef5e194a77d115 | refs/heads/master | 2023-03-21T18:28:57.770339 | 2021-03-03T17:57:08 | 2021-03-03T17:57:08 | 344,215,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from vsm import views
router = routers.DefaultRouter()
router.register(r'instruction', views.InstructionViewSet)
router.register(r'faq', views.FaqViewSet)
router.register(r'profiles', views.VSMProfileViewSet)
router.register(r'companies', views.CompanyViewSet)
router.register(r'holdings', views.HoldingViewSet)
router.register(r'leaders', views.LeaderViewSet)
router.register(r'leaders-iitb', views.IITBLViewSet)
router.register(r'tokens', views.ACTViewSet)
urlpatterns = [
path('', include(router.urls)),
path('me/', views.current_user),
path('my-holdings/', views.my_holdings),
path('trans/', views.make_transaction),
] | [
"sudhanshusahil.19@gmail.com"
] | sudhanshusahil.19@gmail.com |
07dcfbdea494e3e6503f63b1498e94bdd1fee207 | 5f535b35375d68f407ee2f1153b97b686c9a8365 | /aircraft_scanning_control/scripts/uav_manual_scanning.py | 7cdedf4107cbfaac8750f76c38d30624fcb97902 | [
"MIT"
] | permissive | suneric/aircraft_scanning | d32a0ba3e44a0954a1a6a4a283615ca142a4cee8 | 18c7deb8405eabecab643e7ebbda5f3a61e78393 | refs/heads/master | 2022-06-04T11:17:27.210208 | 2022-05-18T22:49:32 | 2022-05-18T22:49:32 | 239,586,464 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | #!/usr/bin/env python
import rospy
import numpy as np
import transform
from uav_controller import uav_cam_controller
from sensor.pointcloud import data_capture
from sensor.camera import realsense_d435
import os
import glob
import struct
import ctypes
if __name__ == '__main__':
# clean folder for save point cloud file
temp_folder = "/home/yufeng/Temp/Scanning/"
files = glob.glob(temp_folder+"*")
for f in files:
print("remove ", f)
os.remove(f)
rospy.init_node("uav_manual_scannig", anonymous=True, log_level=rospy.INFO)
rospy.sleep(2)
controller = uav_cam_controller()
camera = realsense_d435()
pc_capture = data_capture(camera,temp_folder)
rate = rospy.Rate(10)
try:
while not rospy.is_shutdown():
key_input = raw_input("please enter 'space' for cature data:\n")
if (key_input == ''):
mat = controller.transform_q2c()
pc_capture.scan_and_save(mat)
rate.sleep()
except rospy.ROSInterruptException:
pass
| [
"syf_more@163.com"
] | syf_more@163.com |
0e36360a91fe459fa173375c9026a8911f50919c | 5f0787ed95f3858e3e72e0e19bde89ba9d5faaf2 | /pyrapidparser/run/wobj.py | f00f3169a762e20032357326506b05c6b0dd5f99 | [] | no_license | jf---/pyrapidparser | 1e07826b885ab524310a90c7bef98a83b3a1f644 | 87f41d2c6a3544cf335bd8088bd33e77f47bd04e | refs/heads/master | 2020-01-23T21:04:09.624440 | 2016-11-23T16:15:26 | 2016-11-23T16:15:26 | 74,579,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | #this file is an automatic traslation of rapid code
import sys
sys.path.append('..')
import pyrapid
from collections import OrderedDict
import copy
import sys
# Line test/wobj_issue.prg:1
################### Start module ODC ####################
pyrapid.Symbols().PushScope('ODC', None)
##################################################################
############################# start declaration
# Line test/wobj_issue.prg:2
pyrapid.Symbols().AddGlobalSym('hotwirez', pyrapid.Symbol.CreatePers(pyrapid.Symbols().GetSymbol(symbolname='tooldata', modulename='ODC', functionname=None).data([True, [[0.0, 0.0, 733.0], [1.0, 0.0, 0.0, 0.0]], [10.0, [0.04, 0.0, 733.0], [1, 0, 0, 0], 0, 0, 0]])))
# end declaration
############################# start declaration
# Line test/wobj_issue.prg:3
pyrapid.Symbols().AddGlobalSym('Pos_Offset', pyrapid.Symbol.CreatePers(pyrapid.Symbols().GetSymbol(symbolname='pos', modulename='ODC', functionname=None).data([0, 0, 0])))
# end declaration
############################# start declaration
# Line test/wobj_issue.prg:4
pyrapid.Symbols().AddGlobalSym('cur_vel', pyrapid.Symbol.CreatePers(pyrapid.Symbols().GetSymbol(symbolname='speeddata', modulename='ODC', functionname=None).data([200.0, 200.0, 1000.0, 1000.0])))
# end declaration
############################# start declaration
# Line test/wobj_issue.prg:5
pyrapid.Symbols().AddGlobalSym('cur_zone', pyrapid.Symbol.CreatePers(pyrapid.Symbols().GetSymbol(symbolname='zonedata', modulename='ODC', functionname=None).data([False, 1, 1, 50, 0.1, 50, 0.1])))
# end declaration
# Line test/wobj_issue.prg:6
# ## COMMENT:PERS wobjdata cur_wobj := [ FALSE, TRUE, "", [ [ -0.000, -0.000, -1400.000 ], [ 1.000000000, 0.000000000, 0.000000000, 0.000000000 ] ], [ [0, 0, 0], [1, 0, 0 ,0] ] ]; ##
############################# start declaration
# Line test/wobj_issue.prg:8
pyrapid.Symbols().AddGlobalSym('cnstNoStepIn', pyrapid.Symbol.CreatePers(pyrapid.Symbols().GetSymbol(symbolname='string', modulename='ODC', functionname=None).data('ohno')))
# end declaration
# Line test/wobj_issue.prg:10
# ## COMMENT: shell name: shell_000 ##
############################# start routine declaration
# Line test/wobj_issue.prg:11
# Start Procedure Definition
def error_rapid_proc_shell_000 (local_exception):
return 0
def undo_rapid_proc_shell_000 ():
pass
def rapid_proc_shell_000 (local_args_scope):
with pyrapid.Symbols().Scope(modulename='ODC', functionname='shell_000', invalue=local_args_scope):
pass
# data definitions
# Line test/wobj_issue.prg:12
# ## COMMENT: <<< group name: grouped_faces_000 >>> ##
# Line test/wobj_issue.prg:13
# ## COMMENT: ruled surface 001 ##
# stantments
#StatementAST shell_000
retry_stantment = True
while retry_stantment:
retry_stantment = False
try:
# Call procedure MoveL
# end routine declaration
##################################################################
pyrapid.Symbols().PopScope()
################### End module ODC ####################
| [
"jelleferinga@gmail.com"
] | jelleferinga@gmail.com |
22c7dca88ffb0a39939a59965d2870f48a2959c2 | bef807fa68ce7b4c77ddf4f4a030f9a5b798e419 | /GEN-SIM/configs/BulkGravToWW_narrow_M-3000_13TeV-madgraph-herwigpp.py | 3f3ea6ebfefea9d8c1a228279ebfc87446360a65 | [] | no_license | clelange/DibosonSignalProductionHerwig | 97cf93963eb27f450e9ad95c549622a1b24d2cdd | f1477a61a709f5338d07b3c8a04131a8bc301783 | refs/heads/master | 2021-01-11T14:19:09.108870 | 2017-02-08T16:26:21 | 2017-02-08T16:26:21 | 81,343,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,886 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/ThirteenTeV/Grav_Hadronizer_TuneEE5C_13TeV_madgraph_differentPDF_herwigpp_cff.py --filein dbs:/BulkGravToWW_narrow_M-3000_13TeV-madgraph/RunIIWinter15wmLHE-MCRUN2_71_V1-v1/LHE --fileout file:output.root --mc --eventcontent RAWSIM --customise SLHCUpgradeSimulations/Configuration/postLS1Customs.customisePostLS1,Configuration/DataProcessing/Utils.addMonitoring --datatier GEN-SIM --conditions MCRUN2_71_V1::All --beamspot Realistic50ns13TeVCollision --step GEN,SIM --magField 38T_PostLS1 --python_filename BulkGravToWW_narrow_M-3000_13TeV-madgraph-herwigpp_cfg.py --no_exec -n 29
import FWCore.ParameterSet.Config as cms
process = cms.Process('SIM')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.Geometry.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic50ns13TeVCollision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(29)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring('/store/mc/RunIIWinter15wmLHE/BulkGravToWW_narrow_M-3000_13TeV-madgraph/LHE/MCRUN2_71_V1-v1/00000/2632A0E8-C404-E511-9CFE-0025905C431A.root'),
inputCommands = cms.untracked.vstring('keep *',
'drop LHEXMLStringProduct_*_*_*'),
dropDescendantsOfDroppedBranches = cms.untracked.bool(False)
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.19 $'),
annotation = cms.untracked.string('Configuration/GenProduction/python/ThirteenTeV/Grav_Hadronizer_TuneEE5C_13TeV_madgraph_differentPDF_herwigpp_cff.py nevts:29'),
name = cms.untracked.string('Applications')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RAWSIMEventContent.outputCommands,
fileName = cms.untracked.string('file:output.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.generator = cms.EDFilter("ThePEGHadronizerFilter",
hwpp_cmsDefaults = cms.vstring('+hwpp_basicSetup',
'+hwpp_setParticlesStableForDetector'),
run = cms.string('LHC'),
repository = cms.string('HerwigDefaults.rpo'),
dataLocation = cms.string('${HERWIGPATH}'),
hwpp_setParticlesStableForDetector = cms.vstring('set /Herwig/Particles/mu-:Stable Stable',
'set /Herwig/Particles/mu+:Stable Stable',
'set /Herwig/Particles/Sigma-:Stable Stable',
'set /Herwig/Particles/Sigmabar+:Stable Stable',
'set /Herwig/Particles/Lambda0:Stable Stable',
'set /Herwig/Particles/Lambdabar0:Stable Stable',
'set /Herwig/Particles/Sigma+:Stable Stable',
'set /Herwig/Particles/Sigmabar-:Stable Stable',
'set /Herwig/Particles/Xi-:Stable Stable',
'set /Herwig/Particles/Xibar+:Stable Stable',
'set /Herwig/Particles/Xi0:Stable Stable',
'set /Herwig/Particles/Xibar0:Stable Stable',
'set /Herwig/Particles/Omega-:Stable Stable',
'set /Herwig/Particles/Omegabar+:Stable Stable',
'set /Herwig/Particles/pi+:Stable Stable',
'set /Herwig/Particles/pi-:Stable Stable',
'set /Herwig/Particles/K+:Stable Stable',
'set /Herwig/Particles/K-:Stable Stable',
'set /Herwig/Particles/K_S0:Stable Stable',
'set /Herwig/Particles/K_L0:Stable Stable'),
generatorModule = cms.string('/Herwig/Generators/LHCGenerator'),
eventHandlers = cms.string('/Herwig/EventHandlers'),
hwpp_basicSetup = cms.vstring('create ThePEG::RandomEngineGlue /Herwig/RandomGlue',
'set /Herwig/Generators/LHCGenerator:RandomNumberGenerator /Herwig/RandomGlue',
'set /Herwig/Generators/LHCGenerator:NumberOfEvents 10000000',
'set /Herwig/Generators/LHCGenerator:DebugLevel 1',
'set /Herwig/Generators/LHCGenerator:UseStdout 0',
'set /Herwig/Generators/LHCGenerator:PrintEvent 0',
'set /Herwig/Generators/LHCGenerator:MaxErrors 10000'),
herwigNewPhysics = cms.vstring('cd /Herwig/Particles',
'create ThePEG::ParticleData graviton',
'setup graviton 39 graviton 1000 0.0 0.0 0.0 0 0 5 0',
'cd /'),
hwpp_ue_EE5CEnergyExtrapol = cms.vstring('set /Herwig/UnderlyingEvent/MPIHandler:EnergyExtrapolation Power',
'set /Herwig/UnderlyingEvent/MPIHandler:ReferenceScale 7000.*GeV',
'set /Herwig/UnderlyingEvent/MPIHandler:Power 0.33',
'set /Herwig/UnderlyingEvent/MPIHandler:pTmin0 3.91*GeV'),
hwpp_ue_EE5C = cms.vstring('+hwpp_ue_EE5CEnergyExtrapol',
'set /Herwig/Hadronization/ColourReconnector:ColourReconnection Yes',
'set /Herwig/Hadronization/ColourReconnector:ReconnectionProbability 0.49',
'set /Herwig/Partons/RemnantDecayer:colourDisrupt 0.80',
'set /Herwig/UnderlyingEvent/MPIHandler:InvRadius 2.30',
'set /Herwig/UnderlyingEvent/MPIHandler:softInt Yes',
'set /Herwig/UnderlyingEvent/MPIHandler:twoComp Yes',
'set /Herwig/UnderlyingEvent/MPIHandler:DLmode 2'),
hwpp_pdf_CTEQ6LL_Hard_CUETHS1 = cms.vstring('+hwpp_pdf_CTEQ6L1_Hard_CUETHS1'),
hwpp_pdf_CTEQ6LL_Hard = cms.vstring('+hwpp_pdf_CTEQ6L1_Hard'),
hwpp_pdf_CTEQ6L1_Hard = cms.vstring('+hwpp_pdf_CTEQ6L1_Hard_Common',
'+hwpp_ue_EE5C'),
hwpp_pdf_CTEQ6L1_Common = cms.vstring('create ThePEG::LHAPDF /Herwig/Partons/cmsPDFSet ThePEGLHAPDF.so',
'set /Herwig/Partons/cmsPDFSet:PDFName cteq6ll.LHpdf',
'set /Herwig/Partons/cmsPDFSet:RemnantHandler /Herwig/Partons/HadronRemnants',
'set /Herwig/Particles/p+:PDF /Herwig/Partons/cmsPDFSet',
'set /Herwig/Particles/pbar-:PDF /Herwig/Partons/cmsPDFSet'),
hwpp_pdf_CTEQ6L1_CUETHS1 = cms.vstring('+hwpp_pdf_CTEQ6L1_Common',
'+hwpp_ue_CUETHS1'),
hwpp_pdf_CTEQ6L1 = cms.vstring('+hwpp_pdf_CTEQ6L1_Common',
'+hwpp_ue_EE5C'),
hwpp_pdf_CTEQ6LL_CUETHS1 = cms.vstring('+hwpp_pdf_CTEQ6L1_CUETHS1'),
hwpp_pdf_CTEQ6L1_Hard_Common = cms.vstring('create ThePEG::LHAPDF /Herwig/Partons/cmsHardPDFSet ThePEGLHAPDF.so',
'set /Herwig/Partons/cmsHardPDFSet:PDFName cteq6ll.LHpdf',
'set /Herwig/Partons/cmsHardPDFSet:RemnantHandler /Herwig/Partons/HadronRemnants'),
hwpp_pdf_CTEQ6L1_Hard_CUETHS1 = cms.vstring('+hwpp_pdf_CTEQ6L1_Hard_Common',
'+hwpp_ue_CUETHS1'),
hwpp_pdf_CTEQ6LL = cms.vstring('+hwpp_pdf_CTEQ6L1'),
hwpp_pdf_NNPDF30LO_Hard = cms.vstring('create ThePEG::LHAPDF /Herwig/Partons/cmsHardPDFSet ThePEGLHAPDF.so',
'set /Herwig/Partons/cmsHardPDFSet:PDFName NNPDF30_lo_as_0130.LHgrid',
'set /Herwig/Partons/cmsHardPDFSet:RemnantHandler /Herwig/Partons/HadronRemnants'),
hwpp_pdf_NNPDF30LO = cms.vstring('create ThePEG::LHAPDF /Herwig/Partons/cmsPDFSet ThePEGLHAPDF.so',
'set /Herwig/Partons/cmsPDFSet:PDFName NNPDF30_lo_as_0130.LHgrid',
'set /Herwig/Partons/cmsPDFSet:RemnantHandler /Herwig/Partons/HadronRemnants',
'set /Herwig/Particles/p+:PDF /Herwig/Partons/cmsPDFSet',
'set /Herwig/Particles/pbar-:PDF /Herwig/Partons/cmsPDFSet'),
hwpp_cm_13TeV = cms.vstring('set /Herwig/Generators/LHCGenerator:EventHandler:LuminosityFunction:Energy 13000.0',
'set /Herwig/Shower/Evolver:IntrinsicPtGaussian 2.2*GeV'),
hwpp_LHE_Powheg_Common = cms.vstring('+hwpp_LHE_Common',
'set /Herwig/Shower/Evolver:HardVetoMode Yes',
'set /Herwig/Shower/Evolver:HardVetoReadOption PrimaryCollision'),
hwpp_LHE_Powheg = cms.vstring('+hwpp_LHE_Powheg_Common',
'set /Herwig/EventHandlers/LHEReader:PDFA /Herwig/Partons/cmsPDFSet',
'set /Herwig/EventHandlers/LHEReader:PDFB /Herwig/Partons/cmsPDFSet'),
hwpp_LHE_MadGraph = cms.vstring('+hwpp_LHE_Common',
'set /Herwig/EventHandlers/LHEReader:PDFA /Herwig/Partons/cmsPDFSet',
'set /Herwig/EventHandlers/LHEReader:PDFB /Herwig/Partons/cmsPDFSet'),
hwpp_LHE_Common = cms.vstring('create ThePEG::Cuts /Herwig/Cuts/NoCuts',
'create ThePEG::LesHouchesInterface /Herwig/EventHandlers/LHEReader',
'set /Herwig/EventHandlers/LHEReader:Cuts /Herwig/Cuts/NoCuts',
'set /Herwig/EventHandlers/LHEReader:MomentumTreatment RescaleEnergy',
'set /Herwig/EventHandlers/LHEReader:WeightWarnings 0',
'set /Herwig/EventHandlers/LHEReader:InitPDFs 0',
'create ThePEG::LesHouchesEventHandler /Herwig/EventHandlers/LHEHandler',
'insert /Herwig/EventHandlers/LHEHandler:LesHouchesReaders 0 /Herwig/EventHandlers/LHEReader',
'set /Herwig/EventHandlers/LHEHandler:WeightOption VarNegWeight',
'set /Herwig/EventHandlers/LHEHandler:PartonExtractor /Herwig/Partons/QCDExtractor',
'set /Herwig/EventHandlers/LHEHandler:CascadeHandler /Herwig/Shower/ShowerHandler',
'set /Herwig/EventHandlers/LHEHandler:HadronizationHandler /Herwig/Hadronization/ClusterHadHandler',
'set /Herwig/EventHandlers/LHEHandler:DecayHandler /Herwig/Decays/DecayHandler',
'insert /Herwig/EventHandlers/LHEHandler:PreCascadeHandlers 0 /Herwig/NewPhysics/DecayHandler',
'set /Herwig/Generators/LHCGenerator:EventHandler /Herwig/EventHandlers/LHEHandler',
'set /Herwig/Shower/Evolver:MaxTry 100',
'set /Herwig/Shower/Evolver:HardVetoScaleSource Read',
'set /Herwig/Shower/KinematicsReconstructor:ReconstructionOption General',
'set /Herwig/Shower/KinematicsReconstructor:InitialInitialBoostOption LongTransBoost',
'+hwpp_MECorr_Common'),
hwpp_LHE_MadGraph_DifferentPDFs = cms.vstring('+hwpp_LHE_Common',
'set /Herwig/EventHandlers/LHEReader:PDFA /Herwig/Partons/cmsHardPDFSet',
'set /Herwig/EventHandlers/LHEReader:PDFB /Herwig/Partons/cmsHardPDFSet'),
hwpp_LHE_Powheg_DifferentPDFs = cms.vstring('+hwpp_LHE_Powheg_Common',
'set /Herwig/EventHandlers/LHEReader:PDFA /Herwig/Partons/cmsHardPDFSet',
'set /Herwig/EventHandlers/LHEReader:PDFB /Herwig/Partons/cmsHardPDFSet'),
hwpp_MECorr_On = cms.vstring('+hwpp_MECorr_Common',
'set /Herwig/Shower/Evolver:MECorrMode Yes'),
hwpp_MECorr_SoftOn = cms.vstring('+hwpp_MECorr_Common',
'set /Herwig/Shower/Evolver:MECorrMode Soft'),
hwpp_MECorr_Common = cms.vstring('set /Herwig/Shower/Evolver:MECorrMode No'),
hwpp_MECorr_HardOn = cms.vstring('+hwpp_MECorr_Common',
'set /Herwig/Shower/Evolver:MECorrMode Hard'),
hwpp_MECorr_Off = cms.vstring('+hwpp_MECorr_Common'),
configFiles = cms.vstring(),
crossSection = cms.untracked.double(-1),
parameterSets = cms.vstring('hwpp_cmsDefaults',
'herwigNewPhysics',
'hwpp_ue_EE5C',
'hwpp_cm_13TeV',
'hwpp_pdf_CTEQ6L1',
'hwpp_pdf_NNPDF30LO_Hard',
'hwpp_LHE_MadGraph_DifferentPDFs',
'hwpp_MECorr_Off'),
filterEfficiency = cms.untracked.double(1.0)
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.RAWSIMoutput_step)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# Automatic addition of the customisation function from SLHCUpgradeSimulations.Configuration.postLS1Customs
from SLHCUpgradeSimulations.Configuration.postLS1Customs import customisePostLS1
#call to customisation function customisePostLS1 imported from SLHCUpgradeSimulations.Configuration.postLS1Customs
process = customisePostLS1(process)
# End of customisation functions
| [
"clemens.lange@cern.ch"
] | clemens.lange@cern.ch |
cf5419266d22c7d8979c1cc0e6bef7b74e75950f | 4819ed98a31fce25ea53ba8368e817a3c4165768 | /Estructuras repetitivas/complementarios/TP7.py | a793c48ca41a2e7591b0f19c278462bd7761ac19 | [] | no_license | Lusius045/LucioRP | 155e929549cd528e7bc9eda82567555ca1295734 | 0df07f596887e8743426317cd468aafff4e4a9eb | refs/heads/master | 2022-10-10T18:36:12.728135 | 2022-09-13T14:03:35 | 2022-09-13T14:03:35 | 243,602,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | print("Ingrese un número:")
num = int(input())
const = 0
for i in range(2,num):
if num % i == 0:
const = const+1
if const == 0:
print(num," es un número primo.")
else:
print(num," no es un número primo.") | [
"noreply@github.com"
] | noreply@github.com |
3938108372cd25c831552fd33498eec3bf088db5 | e214193fdbc342ce1b84ad4f35bd6d64de7a8767 | /bsn/template_app/conifg/default/id.py | 363401ba409373ea501487650145067b731965a0 | [] | no_license | bsn069/py | 78f791dab87c3246a1a173263a703c63c543c8ad | 3b6c2070d38f61eb8511495d38b1cec522ad6de7 | refs/heads/master | 2020-03-10T04:30:00.282303 | 2018-10-07T15:29:45 | 2018-10-07T15:29:45 | 129,193,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from bsn.common import file_import_tree
file_import_tree.file_begin(__name__)
import os
f_strFileName = os.path.split(__file__)[1]
f_strFileBaseName = os.path.splitext(f_strFileName)[0]
from .config import f_mapConfig
f_mapConfig['id'] = 1
file_import_tree.file_end(__name__) | [
"513026809@qq.com"
] | 513026809@qq.com |
0e272ec741ac296e70453d134d506c54d53c8c38 | cbd5be8ec36d68dce7ebbfd78277ffe80bf0e50d | /mult13.py | f366f60098c300deb2b74512802ef766f9e5e614 | [] | no_license | Gaya3balu0509/program | 56afa3451da71d7e434b3a1c10d22cef71e63acb | 9b1c5c42b9f57368ecc2ee274e56d2ee094a58c8 | refs/heads/master | 2020-05-28T03:19:43.368488 | 2019-08-12T11:35:30 | 2019-08-12T11:35:30 | 188,865,443 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | numb1=int(input())
if numb1%13==0:
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | noreply@github.com |
8f3b0581124daddbc9643e5f936c5d6e1a07bd9a | 053a340d04b17126fb9ce19d69b098c0c1204417 | /yxm/yxm/apps/area/migrations/0001_initial.py | f45404d904a2bc628c6bac9d0f8014b7b70aa7a4 | [] | no_license | H-H1/django-mall | 2e729a1cbc4c1772f2a9b0b9c139f80d07fd9915 | e8e6c7f90cb21ee0f80609997d4e19b13f737ec4 | refs/heads/main | 2023-06-06T19:41:35.848869 | 2021-07-13T04:23:16 | 2021-07-13T04:23:16 | 385,458,429 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | # Generated by Django 2.2.17 on 2021-05-04 10:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subs', to='area.Area', verbose_name='上级行政区划')),
],
options={
'verbose_name': '行政区划',
'verbose_name_plural': '行政区划',
'db_table': 'tb_areas',
},
),
]
| [
"2353367985H-H1@users.noreply.github.com"
] | 2353367985H-H1@users.noreply.github.com |
100aae853ebdf97fc26c93f4179915db3c17836d | 5024b82798ddeeb216b1e67d12a6dba97f886d32 | /Basic/ejer01.py | 4fd002bfbcd51315de2533a8de587268b52cd562 | [] | no_license | Gomez5sh/PyLearn | d85878ef299c556b188d35d53a3396adacd9caa9 | 869ddb72a2d09ba862dc7b1732c44fd75419bb5e | refs/heads/master | 2023-02-18T01:45:10.380095 | 2020-09-04T05:28:48 | 2020-09-04T05:28:48 | 292,753,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | # Declaraciones, sentencias y multilinea
valor = 10*5
print(valor)
valor = 10 * 5 + 7 * 3 \
/ 11 * 8 + 34 \
+ 12 - 10 * 40 - 25
print(valor) | [
"eagomez2387@gmail.com"
] | eagomez2387@gmail.com |
2e772f55c2c27f68f6544766d08317741c917946 | 0529196c4d0f8ac25afa8d657413d4fc1e6dd241 | /runnie0427/17675/17675.pypy3.py | 7063ffb72b39e59872c20046c6617499be8e6fde | [] | no_license | riyuna/boj | af9e1054737816ec64cbef5df4927c749808d04e | 06420dd38d4ac8e7faa9e26172b30c9a3d4e7f91 | refs/heads/master | 2023-03-17T17:47:37.198570 | 2021-03-09T06:11:41 | 2021-03-09T06:11:41 | 345,656,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,372 | py | <!DOCTYPE html>
<html lang="ko">
<head>
<title>Baekjoon Online Judge</title><meta name="viewport" content="width=device-width, initial-scale=1.0"><meta charset="utf-8"><meta name="author" content="스타트링크 (Startlink)"><meta name="keywords" content="ACM-ICPC, ICPC, 프로그래밍, 온라인 저지, 정보올림피아드, 코딩, 알고리즘, 대회, 올림피아드, 자료구조"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta property="og:type" content="website"><meta property="og:image" content="http://onlinejudgeimages.s3-ap-northeast-1.amazonaws.com/images/boj-og-1200.png"><meta property="og:site_name" content="Baekjoon Online Judge"><meta name="format-detection" content = "telephone=no"><meta name="msapplication-config" content="none"><link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png"><link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"><link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png"><link rel="manifest" href="/site.webmanifest"><link rel="mask-icon" href="/safari-pinned-tab.svg" color="#0076c0"><meta name="msapplication-TileColor" content="#00aba9"><meta name="theme-color" content="#ffffff"><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.2.0/css/bootstrap.min.css"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/style.css?version=20210107"><link href="https://fonts.googleapis.com/css?family=Noto+Sans+KR:400,700|Open+Sans:400,400i,700,700i|Source+Code+Pro&subset=korean" rel="stylesheet"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/connect.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/result.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/custom.css?version=20210107"><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.css"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/theme-colors/blue.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/pace.css">
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-10874097-3"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-10874097-3');
</script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/noty/3.1.4/noty.min.css" /><meta name="username" content="">
<link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/pages/page_404_error.css">
</head>
<body>
<div class="wrapper">
<div class="header no-print"><div class="topbar"><div class="container"><ul class="loginbar pull-right"><li><a href = "/register">회원가입</a></li><li class="topbar-devider"></li><li><a href = "/login?next=%2Fsource%2Fdownload%2F22645369">로그인</a></li></ul></div></div><div class="navbar navbar-default mega-menu" role="navigation"><div class="container"><div class="navbar-header"><button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-responsive-collapse"><span class="sr-only">Toggle navigation</span><span class="fa fa-bars"></span></button><a class="navbar-brand" href="/"><img id="logo-header" src="https://d2gd6pc034wcta.cloudfront.net/images/logo@2x.png" alt="Logo" data-retina></a></div><div class="collapse navbar-collapse navbar-responsive-collapse"><ul class="nav navbar-nav"><li class="dropdown mega-menu-fullwidth "><a href="javascript:void(0);" class="dropdown-toggle" data-toggle="dropdown">문제</a><ul class="dropdown-menu"><li><div class="mega-menu-content"><div class="container"><div class="row equal-height"><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>문제</h3></li><li><a href = "/problemset">전체 문제</a></li><li><a href = "/category">문제 출처</a></li><li><a href = "/step">단계별로 풀어보기</a></li><li><a href = "/problem/tags">알고리즘 분류</a></li><li><a href = "/problem/added">새로 추가된 문제</a></li><li><a href = "/problem/added/1">새로 추가된 영어 문제</a></li><li><a href = "/problem/ranking">문제 순위</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>문제</h3></li><li><a href="/problem/only">푼 사람이 한 명인 문제</a></li><li><a href="/problem/nobody">아무도 못 푼 문제</a></li><li><a href="/problem/recent/submit">최근 제출된 문제</a></li><li><a href="/problem/recent/accepted">최근 풀린 문제</a></li><li><a href="/problem/random">랜덤</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>출처</h3></li><li><a href = "/category/1">ICPC</a></li><li><a href = "/category/2">Olympiad</a></li><li><a href = "/category/55">한국정보올림피아드</a></li><li><a href = "/category/57">한국정보올림피아드시․도지역본선</a></li><li><a href = "/category/318">전국 대학생 프로그래밍 대회 동아리 연합</a></li><li><a href = "/category/5">대학교 대회</a></li><li><a href = "/category/428">카카오 코드 페스티벌</a></li><li><a href = "/category/215">Coder's High</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>ICPC</h3></li><li><a href = "/category/7">Regionals</a></li><li><a href = "/category/4">World Finals</a></li><li><a href = "/category/211">Korea Regional</a></li><li><a href = "/category/34">Africa and the Middle East Regionals</a></li><li><a href = "/category/10">Europe Regionals</a></li><li><a href = "/category/103">Latin America Regionals</a></li><li><a href = "/category/8">North America Regionals</a></li><li><a href = "/category/92">South Pacific Regionals</a></li></ul></div></div></div></div></li></ul></li><li><a href = "/workbook/top">문제집</a></li><li><a href = "/contest/official/list">대회<span class='badge badge-red rounded-2x'>2</span></a></li><li><a href = "/status">채점 현황</a></li><li><a href = "/ranklist">랭킹</a></li><li><a href = "/board/list/all">게시판</a></li><li><a href = "/group/list/all">그룹</a></li><li><a href = "/blog/list">블로그</a></li><li><a href = "/lectures">강의</a></li><li><a href = "/search"><i class="fa fa-search search-btn"></i></a></li></ul></div></div></div></div><form action="/logout" method="post" id="logout_form"><input type='hidden' value='%2Fsource%2Fdownload%2F22645369' name="next"></form>
<div class="container content">
<div class="col-md-8 col-md-offset-2">
<div class="error-v1">
<span class="error-v1-title">404</span>
<span>Not found</span>
<div class="margin-bottom-20"></div>
</div>
<div class="text-center">
<span style="font-size:18px;">강의 슬라이드의 첨부 소스 코드가 404 에러가 뜨는 경우에는 링크를 복사/붙여넣기 해주세요.</span>
</div>
<div class="margin-bottom-40"></div>
</div>
</div>
<div class="footer-v3 no-print"><div class="footer"><div class="container"><div class="row"><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>Baekjoon Online Judge</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/about">소개</a></li><li><a href="/news">뉴스</a></li><li><a href="/live">생중계</a></li><li><a href="/poll">설문조사</a></li><li><a href="/blog">블로그</a></li><li><a href="/calendar">캘린더</a></li><li><a href="/donate">기부하기</a></li><li><a href="https://github.com/Startlink/BOJ-Feature-Request">기능 추가 요청</a></li><li><a href="https://github.com/Startlink/BOJ-spj">스페셜 저지 제작</a></li><li><a href="/labs">실험실</a></li></ul><div class="thumb-headline"><h2>채점 현황</h2></div><ul class="list-unstyled simple-list"><li><a href="/status">채점 현황</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>문제</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/problemset">문제</a></li><li><a href="/step">단계별로 풀어보기</a></li><li><a href="/problem/tags">알고리즘 분류</a></li><li><a href="/problem/added">새로 추가된 문제</a></li><li><a href="/problem/added/1">새로 추가된 영어 문제</a></li><li><a href="/problem/ranking">문제 순위</a></li><li><a href="/problem/recent/submit">최근 제출된 문제</a></li><li><a href="/problem/recent/accepted">최근 풀린 문제</a></li><li><a href="/change">재채점 및 문제 수정</a></li></ul><div class="thumb-headline"><h2>유저 대회 / 고등학교 대회</h2></div><ul class="list-inline simple-list margin-bottom"><li><a href="/category/353">FunctionCup</a></li><li><a href="/category/319">kriiicon</a></li><li><a href="/category/420">구데기컵</a></li><li><a href="/category/358">꼬마컵</a></li><li><a href="/category/421">네블컵</a></li><li><a href="/category/413">소프트콘</a></li><li><a href="/category/416">웰노운컵</a></li><li><a href="/category/detail/1743">HYEA Cup</a></li><li><a href="/category/364">경기과학고등학교</a></li><li><a href="/category/417">대구과학고등학교</a></li><li><a href="/category/429">부산일과학고</a></li><li><a href="/category/435">서울과학고등학교</a></li><li><a href="/category/394">선린인터넷고등학교</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>출처</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/category/1">ICPC</a></li><li><a href="/category/211">ICPC Korea Regional</a></li><li><a href="/category/2">Olympiad</a></li><li><a href="/category/55">한국정보올림피아드</a></li><li><a href="/category/57">한국정보올림피아드시․도지역본선</a></li><li><a href="/category/318">전국 대학생 프로그래밍 대회 동아리 연합</a></li><li><a href="/category/5">대학교 대회</a></li><li><a href="/category/428">카카오 코드 페스티벌</a></li><li><a href="/category/215">Coder's High</a></li></ul><div class="thumb-headline"><h2>대학교 대회</h2></div><ul class="list-inline simple-list"><li><a href="/category/320">KAIST</a></li><li><a href="/category/426">POSTECH</a></li><li><a href="/category/341">고려대학교</a></li><li><a href="/category/434">광주과학기술원</a></li><li><a href="/category/361">국민대학교</a></li><li><a href="/category/83">서강대학교</a></li><li><a href="/category/354">서울대학교</a></li><li><a href="/category/352">숭실대학교</a></li><li><a href="/category/408">아주대학교</a></li><li><a href="/category/334">연세대학교</a></li><li><a href="/category/336">인하대학교</a></li><li><a href="/category/347">전북대학교</a></li><li><a href="/category/400">중앙대학교</a></li><li><a href="/category/402">충남대학교</a></li><li><a href="/category/418">한양대 ERICA</a></li><li><a href="/category/363">홍익대학교</a></li><li><a href="/category/409">경인지역 6개대학 연합 프로그래밍 경시대회</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>도움말</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/help/judge">채점 도움말 및 채점 환경</a></li><li><a href="/help/rejudge">재채점 안내</a></li><li><a href="/help/rte">런타임 에러 도움말</a></li><li><a href="/help/problem">문제 스타일 안내</a></li><li><a href="/help/language">컴파일 또는 실행 옵션, 컴파일러 버전, 언어 도움말</a></li><li><a href="/help/workbook">문제집 도움말</a></li><li><a href="/help/contest">대회 개최 안내</a></li><li><a href="/help/problem-add">문제 출제 안내</a></li><li><a href="/help/rule">이용 규칙</a></li><li><a href="/help/stat">통계 도움말</a></li><li><a href="/help/question">질문 도움말</a></li><li><a href="/help/faq">자주묻는 질문</a></li><li><a href="/help/lecture">강의 안내</a></li><li><a href="/help/short">짧은 주소 안내</a></li><li><a href="/help/ad">광고 안내</a></li></ul></div></div></div><div class="copyright"><div class="container"><div class="row"><div class="col-md-9 col-sm-12"><p>© 2021 All Rights Reserved. <a href="https://startlink.io">주식회사 스타트링크</a> | <a href="/terms">서비스 약관</a> | <a href="/privacy">개인정보 보호</a> | <a href="/terms/payment">결제 이용 약관</a> | <a href="https://boj.startlink.help/hc/ko">도움말</a> | <a href="http://startl.ink/2pmlJaY">광고 문의</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj.md">업데이트 노트</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj-issues.md">이슈</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj-todo.md">TODO</a></p></div><div class="col-md-3 col-sm-12"><ul class="social-icons pull-right"><li><a href="https://www.facebook.com/onlinejudge" data-original-title="Facebook" class="rounded-x social_facebook"></a></li><li><a href="https://startlink.blog" data-original-title="Wordpress" class="rounded-x social_wordpress"></a></li></ul></div></div><div class="row"><div class="col-sm-12"><a href="https://startlink.io" class="hidden-xs"><img src="https://d2gd6pc034wcta.cloudfront.net/logo/startlink-logo-white-only.png" class="pull-right startlink-logo"></a><ul class="list-unstyled simple-list"><li>사업자 등록 번호: 541-88-00682</li><li>대표자명: 최백준</li><li>주소: 서울시 서초구 서초대로74길 29 서초파라곤 412호</li><li>전화번호: 02-521-0487 (이메일로 연락 주세요)</li><li>이메일: <a href="mailto:contacts@startlink.io">contacts@startlink.io</a></li><li>통신판매신고번호: 제 2017-서울서초-2193 호</li></ul></div><div class="col-xs-9"><p id="no-acm-icpc"></p></div><div class="col-xs-3"></div></div></div></div></div>
</div>
<div id="fb-root"></div><script>
window.fbAsyncInit = function() {
FB.init({
appId : '322026491226049',
cookie : true,
xfbml : true,
version : 'v2.8'
});
};
(function(d, s, id) {
var js, fjs = d.getElementsByTagName(s)[0];
if (d.getElementById(id)) return;
js = d.createElement(s); js.id = id;
js.src = "//connect.facebook.net/ko_KR/sdk.js";
fjs.parentNode.insertBefore(js, fjs);
}(document, 'script', 'facebook-jssdk'));
</script>
<script>
!function(f,b,e,v,n,t,s){ if(f.fbq)return;n=f.fbq=function(){ n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments) };if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s) }(window,
document,'script','//connect.facebook.net/en_US/fbevents.js');
fbq('init', '1670563073163149');
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none" src="https://www.facebook.com/tr?id=1670563073163149&ev=PageView&noscript=1"/></noscript><script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-migrate/3.0.1/jquery-migrate.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.2.0/js/bootstrap.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.21.0/moment.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.21.0/locale/ko.js"></script><script type="text/javascript" src="https://ddo7jzca0m2vt.cloudfront.net/unify/js/app.min.js?version=20210107"></script><script type="text/javascript">jQuery(document).ready(function() {App.init(0);});</script><!--[if lt IE 9]><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/plugins/respond.js"></script><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/plugins/html5shiv.js"></script><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/js/plugins/placeholder-IE-fixes.js"></script><![endif]--><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pace/1.0.2/pace.min.js"></script><script src="https://js.pusher.com/4.2/pusher.min.js"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/noty/3.1.4/noty.min.js"></script>
<script>
window.MathJax = {
tex: {
inlineMath: [ ['$', '$'], ['\\(', '\\)'] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
processEscapes: true,
tags: "ams",
autoload: {
color: [],
colorv2: ['color']
},
packages: { '[+]': ['noerrors'] }
},
options: {
ignoreHtmlClass: "no-mathjax|redactor-editor",
processHtmlClass: 'mathjax',
enableMenu: false
},
chtml: {
scale: 0.9
},
loader: {
load: ['input/tex', 'output/chtml', '[tex]/noerrors'],
}
};
</script><script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script><script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
</body>
</html> | [
"riyuna0427@gmail.com"
] | riyuna0427@gmail.com |
f58c308b30604b42295c8c1c08aba4f6488df298 | fc0dc72673e8867f71dfd94122fa4bce99ae224d | /udemy/lesson12_data_structures/dict_word_count.py | f8944d7b269fa0f078f10b910f7281e476dc06a9 | [] | no_license | rzuniga64/python | 2fec4a8a3cb2397849004b46312db885c41321b9 | dbdf65d282477803f57c3a35a4d50491f7943805 | refs/heads/master | 2021-01-17T13:57:51.632635 | 2017-02-15T05:28:55 | 2017-02-15T05:28:55 | 11,941,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | sentence = "now is the time for all good people to come"
sentence += " to the aid of their party"
words = sentence.split(' ')
words = sorted(words)
print("Sentence in sorted order:\n")
print(words)
numWords = {}
for i in range(0, len(words)):
if words[i] in numWords:
numWords[words[i]] += 1
else:
numWords[words[i]] = 1
print("Word list and count: \n")
for key in numWords.keys():
print(key, numWords[key])
| [
"rzuniga64@gmail.com"
] | rzuniga64@gmail.com |
8d935ff69d7f2dfcb453d956d57f2d6fcd6f448c | 234727cb15c3415e1a090078df6fb8e1092b77bb | /ichecksum.py | fcaeb26f1d65ed48cdc4f1f5679702bdcf89fa39 | [] | no_license | berkantbayraktar/ComputerNetworking-RDT | e13d0a6a36e129953274616cde6180d0bad3b6c2 | 3ec012f48f103a2f23187f41347a428403e09c6c | refs/heads/master | 2023-03-25T18:11:39.248224 | 2018-12-27T11:15:45 | 2018-12-27T11:15:45 | 351,861,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | # An Internet checksum algorithm using Python.
# This program is licensed under the GPL; see LICENSE for details.
# This procedure can be used to calculate the Internet checksum of
# some data. It is adapted from RFC 1071:
#
# ftp://ftp.isi.edu/in-notes/rfc1071.txt
#
# See also:
#
# http://www.netfor2.com/ipsum.htm
# http://www.netfor2.com/checksum.html
import sys
def ichecksum(data, sum=0):
""" Compute the Internet Checksum of the supplied data. The checksum is
initialized to zero. Place the return value in the checksum field of a
packet. When the packet is received, check the checksum, by passing
in the checksum field of the packet and the data. If the result is zero,
then the checksum has not detected an error.
"""
# make 16 bit words out of every two adjacent 8 bit words in the packet
# and add them up
for i in range(0,len(data),2):
if i + 1 >= len(data):
sum += ord(data[i]) & 0xFF
else:
w = ((ord(data[i]) << 8) & 0xFF00) + (ord(data[i+1]) & 0xFF)
sum += w
# take only 16 bits out of the 32 bit sum and add up the carries
while (sum >> 16) > 0:
sum = (sum & 0xFFFF) + (sum >> 16)
# one's complement the result
sum = ~sum
return sum & 0xFFFF
check = ichecksum('list\n')
print check
print ichecksum('list\n',check) | [
"berkantbayraktar06@gmail.com"
] | berkantbayraktar06@gmail.com |
ca99edf3dd22c0cdb19b5da960df29c915f60306 | 96ac7dd0b18d61e57f0e51efdabf89e344576b56 | /lib/imapwatch/filelikelogger.py | 8e93f9f9a6910dd83b2ffd049467a460cd22a605 | [] | no_license | kennonb/imapwatch | c87c4cf10a52c850a8e4312e3a86f84c7e8da4d7 | 04153d50f03fff77eaa726f6b7cd3e55d962e6b3 | refs/heads/master | 2020-03-16T15:37:55.501312 | 2018-07-07T00:58:31 | 2018-07-07T00:58:31 | 132,752,348 | 0 | 0 | null | 2018-05-09T12:17:29 | 2018-05-09T12:17:29 | null | UTF-8 | Python | false | false | 593 | py | import logging
import sys
class FileLikeLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def flush(self):
for handler in self.logger.handlers:
handler.flush()
def close(self):
for handler in self.logger.handlers:
handler.close()
| [
"m@mdbraber.com"
] | m@mdbraber.com |
9147d66431be067846952b62a328897b2aa98c60 | 56152b48fab2a537442e9302819c14323cde09e3 | /htmlDownloader.py | 714b20e1b97c4ed3b89b5f230f8c0c29a97f1eac | [] | no_license | opyqp/simpleCrawler | dc002595500c27f3fb6435585192cf4f851742dc | 17563cdf2f641c4de89368b2a91064bdadded1f0 | refs/heads/master | 2020-04-14T22:42:53.929748 | 2019-01-05T02:41:48 | 2019-01-05T02:41:48 | 164,174,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | #!/usr/bin/env python
# coding : utf-8
import requests
class htmlDownloader(object):
def download(self, url):
if url is None:
print("the url is None:",url)
return None
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {"User-Agent":user_agent}
r = requests.get(url, headers=headers)
if r.status_code == 200:
r.encoding = 'utf-8'
return r.text
print("the resq is None")
return None
| [
"yuanqinpeng@gmail.com"
] | yuanqinpeng@gmail.com |
6132b5f00b82f3de6048da7a2d0eff2d86c1215e | 22cb6eb3272649e45424b91c0a52a7b2ed588d79 | /Thesis/figures/Kilbinger_banana/plot_first_proposals_2500samples_before_mc_adapt.py | d00bd45b67d72589fe0f708c3d6bb55c1a67749b | [] | no_license | jPhy/master_thesis | 63df053af84fbf852720ceb547af31e1119d25d8 | e466a2a3363b3dfbd684d24079581048c68ab22b | refs/heads/master | 2016-09-05T13:38:55.665091 | 2015-03-17T17:26:33 | 2015-03-17T17:26:33 | 18,561,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | #! /usr/bin/python
import pypmc
from pypmc.tools import plot_mixture
import numpy as np
from matplotlib import pyplot as plt
# choose a dimension
dim = 20
# load target function
from banana_function import LogTarget; log_target = LogTarget(dim)
print 'running Markov Chains'
# Markov chain prerun --> run 10 Markov chains with different initial points
# define a proposal for the initial Markov chain run:
# local Gauss with 0.1 * "unit matrix" as covariance
mc_prop = pypmc.density.gauss.LocalGauss(np.eye(dim)*.1)
# define initial points for the Markov chain run
# we will draw samples from the uniform distribution between [-5]*dim and [+5]*dim
starts = np.random.uniform(-5.,5.,size=10*dim).reshape((10,dim))
# log_target(starts[i]) must not be -inf!
for start in starts:
while log_target(start) == -np.inf:
start[:] = np.random.uniform(-5.,5.,size=dim)
# define the Markov chains
mcs = [pypmc.sampler.markov_chain.AdaptiveMarkovChain(log_target, mc_prop, start) for start in starts]
# run and delete burn-in
for mc in mcs:
mc.run(10**3)
mc.history.clear()
# run chains and use self adaptation
for mc in mcs:
for i in range(20):
mc.run(2500) # TODO less samples to see non prune in PMC
mc.adapt()
# get the Markov chain data
mcmc_data = [mc.history[:] for mc in mcs]
stacked_data = np.vstack(mcmc_data)
# plot stacked_data (pure Markov Chain data)
plt.figure(); plt.hexbin(stacked_data[:,0], stacked_data[:,1], cmap='gray_r', extent=(-30, 30, -20, 6))
# optional: plot autocorrelation of first chain
# plt.figure(); plt.acorr(mcmc_data[0][:,0] - mcmc_data[0][:,0].mean(), maxlags=1000)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.savefig('plain_mc_data_2500.png') # not .svg because there too many hexagin objects
print 'Markov Chains done'
# form the "long_patches"
long_patches = pypmc.mix_adapt.r_value.make_r_gaussmix(mcmc_data)
# form first proposal with PMC
print 'running PMC'
pmcmix = pypmc.mix_adapt.pmc.gaussian_pmc(stacked_data[::100], long_patches, copy=True)
pmcmix.prune(.5/len(long_patches))
for i in range(1000-1):
print i
pypmc.mix_adapt.pmc.gaussian_pmc(stacked_data[::100], pmcmix, copy=False)
pmcmix.prune(.5/len(long_patches))
plt.figure()
plot_mixture(pmcmix)
plt.xlim(-30,30)
plt.ylim(-20,6)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.savefig('first_prop_pmc_2500.pdf')
print 'PMC done'
# form first proposal with Variational Bayes
# run variational bayes with samples --> use the long patches as initial guess
vb = pypmc.mix_adapt.variational.GaussianInference(stacked_data[::100], initial_guess=long_patches)
print 'running VB...'
vb.run(1000, abs_tol=1e-5, rel_tol=1e-10, prune=.5*len(vb.data)/vb.K, verbose=False)
print 'VB done'
vbmix = vb.make_mixture()
plt.figure()
plot_mixture(vbmix)
plt.xlim(-30,30)
plt.ylim(-20,6)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.savefig('first_prop_vb_2500.pdf')
# form first proposal with Hierarchical Clustering
# for hierarchical clustering (and VBMerge) form "short_patches"
short_patches = pypmc.tools.patch_data(stacked_data, L=100)
# run hierarchical clustering
hc = pypmc.mix_adapt.hierarchical.Hierarchical(short_patches, long_patches, verbose=True)
print 'running HC...'
hc.run()
print 'HC done'
hcmix = hc.g
plt.figure()
plot_mixture(hcmix)
plt.xlim(-30,30)
plt.ylim(-20,6)
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.savefig('first_prop_hc_2500.pdf')
| [
"gu92biv@mytum.de"
] | gu92biv@mytum.de |
f72ac11ac30be910cc63471b926300b8dfc75327 | 75081787fb06264d4d90b08a18a17bdb9db811e8 | /yaql/cli/__init__.py | 207fa154d6a98115679be25792cd53808a14d3c4 | [
"Apache-2.0"
] | permissive | istalker2/yaql | f39b5e7462544a41a9adcc5f18272ea2bde011bb | 0c1d28c2023b506a9c601b42b1dd6b7714dc0b06 | refs/heads/master | 2021-01-21T16:38:19.408562 | 2014-01-21T15:55:39 | 2014-01-21T15:55:39 | 16,107,265 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License. | [
"ativelkov@mirantis.com"
] | ativelkov@mirantis.com |
c9e386e60c7f223b7ad420acc9e034ad0036f3e4 | df2bd677efcb8e1afa14b1c7111dd3fb9923dae6 | /app.py | c9474dc4da972b69ad284e918013d1214c5e9960 | [] | no_license | Tenoke/tensorflowjs-rps | 055872f73b43d54388013fa3b617c502261a30b5 | 7c675f869cf2e2969d3b6fbd0650b7419f5a68f0 | refs/heads/master | 2020-03-24T14:58:31.201111 | 2018-07-30T18:17:44 | 2018-07-30T18:31:55 | 142,782,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | from flask import Flask, render_template, request, Response
app = Flask(__name__)
@app.route('/rps')
def rps():
return render_template('rps.html')
@app.route('/save', methods=['POST'])
def save():
"""
Optional endpoint for saving user games
"""
data = request.get_data().decode("utf-8")
with open("collected-data-others.txt", "a") as file:
file.write(data + '\n')
return Response(status=200)
if __name__ == '__main__':
app.run()
| [
"svilen.todorov@nexiona.com"
] | svilen.todorov@nexiona.com |
2b1898bef6d4ef7736e6be2e09c1edf815ece78a | fc3831f9c674fbe6f082be96914f9c7baac1bd6f | /algorithms/find_sums.py | 99424243972c0ef9ea17e78046c4a3aafdc761c4 | [] | no_license | RhysJMartin/algorithms | 872436347d52c6d247ccd40350dfd84c06c2a9cb | b64aa20ce1b3cd63ffbf8a67211762f3556ab5c3 | refs/heads/master | 2020-03-18T16:05:12.991476 | 2018-09-29T09:54:44 | 2018-09-29T09:54:44 | 134,945,572 | 0 | 0 | null | 2018-09-29T09:54:44 | 2018-05-26T08:58:44 | Python | UTF-8 | Python | false | false | 780 | py | import os
from tqdm import tqdm
def load_data(file_name):
file = os.path.join(os.path.dirname(__file__), '..', 'data', file_name)
print('loading file: {}'.format(file))
data_file = open(file, 'rb')
data = []
for line in data_file:
data.append(int(line))
return data
def count_2_sums(data, interval):
data_set = set(data)
count = 0
for target in tqdm(range(interval[0], interval[1]+1)):
for test_number in data:
opposite = target - test_number
if (opposite in data_set) and opposite != test_number:
count += 1
break
return count
if __name__ == '__main__':
_data = load_data('2sum.txt')
_interval = [-10000, 10000]
print(count_2_sums(_data, _interval)) | [
"rhysjmartin@gmail.com"
] | rhysjmartin@gmail.com |
739c240b7a18b467eba9b58892e797d8eea4f3cb | 5d51470d5c9d7d951170810a7150fbe50ae340db | /print_results_hints.py | d1e1dae1dc0ff135e806190bee56f4298877fb89 | [] | no_license | ElenaM10/Udacity-Project1 | ff42aede5f2645477dd0a7bda4d5dbab2f247238 | 61a39b0e4f0d23fb2a0fbc83a4635398d873df02 | refs/heads/master | 2022-06-05T08:12:01.801874 | 2020-05-05T12:30:15 | 2020-05-05T12:30:15 | 260,978,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,667 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/print_results_hints.py
#
# PROGRAMMER: Elena M
# DATE CREATED: 05/03/2020
# REVISED DATE:
# PURPOSE: This is a *hints* file to help guide students in creating the
# function print_results that prints the results statistics from the
# results statistics dictionary (results_stats_dic). It should also
# allow the user to be able to print out cases of misclassified
# dogs and cases of misclassified breeds of dog using the Results
# dictionary (results_dic).
# This function inputs:
# -The results dictionary as results_dic within print_results
# function and results for the function call within main.
# -The results statistics dictionary as results_stats_dic within
# print_results function and results_stats for the function call within main.
# -The CNN model architecture as model wihtin print_results function
# and in_arg.arch for the function call within main.
# -Prints Incorrectly Classified Dogs as print_incorrect_dogs within
# print_results function and set as either boolean value True or
# False in the function call within main (defaults to False)
# -Prints Incorrectly Classified Breeds as print_incorrect_breed within
# print_results function and set as either boolean value True or
# False in the function call within main (defaults to False)
# This function does not output anything other than printing a summary
# of the final results.
##
# TODO 6: EDIT and ADD code BELOW to do the following that's stated in the
# comments below that start with "TODO: 6" for the print_results function.
# Specifically edit and add code below within the the print_results function.
# Notice that this function doesn't return anything because it prints
# a summary of the results using results_dic and results_stats_dic
#
def print_results(results_dic, results_stats_dic, model,
print_incorrect_dogs = False, print_incorrect_breed = False):
"""
Prints summary results on the classification and then prints incorrectly
classified dogs and incorrectly classified dog breeds if user indicates
they want those printouts (use non-default values)
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
results_stats_dic - Dictionary that contains the results statistics (either
a percentage or a count) where the key is the statistic's
name (starting with 'pct' for percentage or 'n' for count)
and the value is the statistic's value
model - Indicates which CNN model architecture will be used by the
classifier function to classify the pet images,
values must be either: resnet alexnet vgg (string)
print_incorrect_dogs - True prints incorrectly classified dog images and
False doesn't print anything(default) (bool)
print_incorrect_breed - True prints incorrectly classified dog breeds and
False doesn't print anything(default) (bool)
Returns:
None - simply printing results.
"""
# Prints summary statistics over the run
print("\n\n*** Results Summary for CNN Model Architecture",model.upper(),
"***")
print("{:20}: {:3d}".format('N Images', results_stats_dic['n_images']))
print("{:20}: {:3d}".format('N Dog Images', results_stats_dic['n_dogs_img']))
# TODO: 6a. REPLACE print("") with CODE that prints the text string
# 'N Not-Dog Images' and then the number of NOT-dog images
# that's accessed by key 'n_notdogs_img' using dictionary
# results_stats_dic
#
print("{:20}: {:3d}".format('N Not-Dog-Images', results_stats_dic['n_notdogs_img']))
# Prints summary statistics (percentages) on Model Run
print("\n\n*** Results Summary for percentages for CNN Model Architecture", model.upper(),"***")
print("{:20}: {:3d}".format('Pct Match', results_stats_dic['pct_match']))
print("{:20}: {:3d}".format('Pct Correct Dogs', results_stats_dic['pct_correct_dogs']))
print("{:20}: {:3d}".format('Pct Correct Breed', results_stats_dic['pct_correct_breed']))
print("{:20}: {:3d}".format('Pct Correct Not Dogs', results_stats_dic['pct_correct_notdogs']))
for key in results_stats_dic:
# TODO: 6b. REPLACE pass with CODE that prints out all the percentages
# in the results_stats_dic dictionary. Recall that all
# percentages in results_stats_dic have 'keys' that start with
# the letter p. You will need to write a conditional
# statement that determines if the key starts with the letter
# 'p' and then you want to use a print statement to print
# both the key and the value. Remember the value is accessed
# by results_stats_dic[key]
#
if results_stats_dic[key][0] == 'p':
print("{}, {}".format(key, results_stats_dic[key]))
# IF print_incorrect_dogs == True AND there were images incorrectly
# classified as dogs or vice versa - print out these cases
if (print_incorrect_dogs and
( (results_stats_dic['n_correct_dogs'] + results_stats_dic['n_correct_notdogs'])
!= results_stats_dic['n_images'] )
):
print("\nINCORRECT Dog/NOT Dog Assignments:")
# process through results dict, printing incorrectly classified dogs
for key in results_dic:
# TODO: 6c. REPLACE pass with CODE that prints out the pet label
# and the classifier label from results_dic dictionary
# ONLY when the classifier function (classifier label)
# misclassified dogs specifically:
# pet label is-a-dog and classifier label is-NOT-a-dog
# -OR-
# pet label is-NOT-a-dog and classifier label is-a-dog
# You will need to write a conditional statement that
# determines if the classifier function misclassified dogs
# See 'Adjusting Results Dictionary' section in
# 'Classifying Labels as Dogs' for details on the
# format of the results_dic dictionary. Remember the value
# is accessed by results_dic[key] and the value is a list
# so results_dic[key][idx] - where idx represents the
# index value of the list and can have values 0-4.
#
# Pet Image Label is a Dog - Classified as NOT-A-DOG -OR-
# Pet Image Label is NOT-a-Dog - Classified as a-DOG
if (results_dic[key][3] ==1 and results_dic[key][4]==0) or (results_dic[key][3]==0 and results_dic[key][4]==1):
print("\nINCORRECT classification")
# IF print_incorrect_breed == True AND there were dogs whose breeds
# were incorrectly classified - print out these cases
if (print_incorrect_breed and
(results_stats_dic['n_correct_dogs'] != results_stats_dic['n_correct_breed'])
):
print("\nINCORRECT Dog Breed Assignment:")
# process through results dict, printing incorrectly classified breeds
for key in results_dic:
# Pet Image Label is-a-Dog, classified as-a-dog but is WRONG breed
if ( sum(results_dic[key][3:]) == 2 and
results_dic[key][2] == 0 ):
print("Real: {:>26} Classifier: {:>30}".format(results_dic[key][0],
results_dic[key][1]))
| [
"noreply@github.com"
] | noreply@github.com |
621018c2068da4101237b017b6120c6bc57e8dca | 22cd4bbf1ef7cd4d9e9ec24d2929691ae2e7b07f | /ansible/roles/vm_set/library/vm_network.py | 5fdddb28d83d022c95f3522966cef3b1aaa1640b | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | wadelnn/sonic-mgmt | 64c0285ea7225b42899572bb2241bffef0ff0345 | a2ab261dbadf1884e081c99b9f3f815a2f3b0844 | refs/heads/master | 2021-01-22T02:34:25.054685 | 2017-02-03T22:32:07 | 2017-02-03T22:32:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,629 | py | #!/usr/bin/python
import subprocess
import re
from docker import Client
from ansible.module_utils.basic import *
DOCUMENTATION = '''
---
module: vm_network_create
version_added: "0.1"
author: Pavel Shirshov (pavelsh@microsoft.com)
short_description: Generate virtual network for a set of VMs
description:
- With cmd: 'create' the module:
- creates 32*8 ovs bridges with name template "br-vs{{ vm_set_id }}-vm{{ vm_set_dict[]['num']}}-{{ 0..7 }}" which will be used by FP port of VMs
- creates a linux bridge with name {{ port1_bridge }} for backplane connectivity between VMs
- With cmd: 'destroy' the module:
- destroys 32*8 ovs bridges with name template "br-vs{{ vm_set_id }}-vm{{ vm_set_dict[]['num']}}-{{ 0..7 }}" which were used by FP port of VMs
- destroys a linux bridge with name {{ port1_bridge }} for backplane connectivity between VMs
- With cmd: 'bind' the module:
- creates 32 vlan interfaces on the external interface
- bind this interfaces to the ovs bridges which were created by 'create' command
- bind corresponing interface from ptf_injected container to the ovs bridges
- With cmd: 'unbind' the module:
- destroys 32 vlan interfaces from the external interface
Parameters:
- cmd: One of the commands: 'create', 'bind', 'unbind', 'destroy'
- vm_set_id: identifier for the VM set, a number
- port1_bridge: name of the bridge which will be created for the backplane connectivity
- vm_set_dict: dictionary with VM parameters. Check host_vars/STR-ACS-SERV-0x.yml for details
- fp_mtu: MTU for FP ports
- ext_iface: physical interface which will be used for for vlan creation
- vlan_base: the first vlan for the network
'''
EXAMPLES = '''
- name: Create VM set network. vm set {{ id }}
vm_network:
cmd: 'create'
vm_set_id: "{{ id }}"
port1_bridge: "{{ port1_bridge }}"
vm_set_dict: "{{ VMs }}"
fp_mtu: "{{ fp_mtu_size }}"
ext_iface: "{{ external_iface }}"
vlan_base: "{{ vlan_base }}"
'''
DEFAULT_MTU = 0
NUM_FP_VLANS_PER_FP = 8
OVS_BRIDGE_TEMPLATE = 'br-vs%d-vm%d-%d'
INJECTED_INTERFACES_TEMPLATE = "inje-%d-%d"
class VMNetwork(object):
def __init__(self, vm_set_id, port1_bridge, vm_set_dict, ext_iface, vlan_base, fp_mtu=DEFAULT_MTU):
self.vm_set_id = vm_set_id
self.port1_bridge = port1_bridge
self.vm_set_dict = vm_set_dict
self.ext_iface = ext_iface
self.vlan_base = vlan_base
self.fp_mtu = fp_mtu
self.host_ifaces = VMNetwork.ifconfig('ifconfig -a')
return
def create_port1_bridge(self):
if self.port1_bridge not in self.host_ifaces:
VMNetwork.cmd('brctl addbr %s' % self.port1_bridge)
VMNetwork.cmd('ifconfig %s up' % self.port1_bridge)
return
def destroy_port1_bridge(self):
if self.port1_bridge in self.host_ifaces:
VMNetwork.cmd('ifconfig %s down' % self.port1_bridge)
VMNetwork.cmd('brctl delbr %s' % self.port1_bridge)
return
def create_fp_bridges(self):
for vm in self.vm_set_dict.itervalues():
for vlan_num in xrange(NUM_FP_VLANS_PER_FP):
self.create_fp_bridge(vm["num"], vlan_num)
return
def create_fp_bridge(self, vm_num, vlan_num):
vlan_name = OVS_BRIDGE_TEMPLATE % (self.vm_set_id, int(vm_num), vlan_num)
if vlan_name not in self.host_ifaces:
VMNetwork.cmd('ovs-vsctl add-br %s' % vlan_name)
if self.fp_mtu != DEFAULT_MTU:
VMNetwork.cmd('ifconfig %s mtu %d' % (vlan_name, self.fp_mtu))
VMNetwork.cmd('ifconfig %s up' % vlan_name)
return
def destroy_fp_bridges(self):
for vm in self.vm_set_dict.itervalues():
for vlan_num in xrange(NUM_FP_VLANS_PER_FP):
self.destroy_fp_bridge(vm["num"], vlan_num)
return
def destroy_fp_bridge(self, vm_num, vlan_num):
vlan_name = OVS_BRIDGE_TEMPLATE % (self.vm_set_id, int(vm_num), vlan_num)
if vlan_name in self.host_ifaces:
VMNetwork.cmd('ifconfig %s down' % vlan_name)
VMNetwork.cmd('ovs-vsctl del-br %s' % vlan_name)
return
def up_ext_iface(self):
if self.ext_iface in self.host_ifaces:
VMNetwork.cmd('ifconfig %s up' % self.ext_iface)
return
def check_vlans(self, vlans_str, vlans):
if len(vlans) == 0:
return
if len(vlans) > 8:
raise Exception("Wrong vlans parameter. Too many vlans. Maximum is 8: %s" % vlans_str)
for vlan in vlans_str.split(','):
if not vlan.isdigit():
raise Exception("Wrong vlans parameter: %s" % vlans_str)
for vlan in vlans:
if int(vlan) > 31:
raise Exception("Vlan offset %s supposed to be not more then 31: %s" % (vlan, vlans_str))
return
def bind(self):
for vm in self.vm_set_dict.itervalues():
vm_num = vm['num']
vlans_str = vm['vlans']
vlans = [int(vlan) for vlan in vlans_str.split(',')]
self.check_vlans(vlans_str, vlans)
for vlan_num, vlan in enumerate(vlans):
vlan_id = self.vlan_base + vlan
vlan_iface = "%s.%d" % (self.ext_iface, vlan_id)
injected_iface = INJECTED_INTERFACES_TEMPLATE % (self.vm_set_id, vlan)
port0_bridge = OVS_BRIDGE_TEMPLATE % (self.vm_set_id, int(vm_num), vlan_num)
self.create_phys_vlan(vlan_iface, vlan_id)
self.bind_phys_vlan(port0_bridge, vlan_iface, injected_iface)
return
def create_phys_vlan(self, vlan_iface, vlan_id):
if vlan_iface not in self.host_ifaces:
VMNetwork.cmd('vconfig add %s %d' % (self.ext_iface, vlan_id))
VMNetwork.cmd('ifconfig %s up' % vlan_iface)
return
def bind_phys_vlan(self, br_name, vlan_iface, injected_iface):
ports = VMNetwork.get_ovs_br_ports(br_name)
if injected_iface not in ports:
VMNetwork.cmd('ovs-vsctl add-port %s %s' % (br_name, injected_iface))
if vlan_iface not in ports:
VMNetwork.cmd('ovs-vsctl add-port %s %s' % (br_name, vlan_iface))
bindings = VMNetwork.get_ovs_port_bindings(br_name)
vlan_iface_id = bindings[vlan_iface]
# clear old bindings
VMNetwork.cmd('ovs-ofctl del-flows %s' % br_name)
# Add flow from a VM to an external iface
VMNetwork.cmd("ovs-ofctl add-flow %s table=0,in_port=1,action=output:%s" % (br_name, vlan_iface_id))
# Add flow from external iface to a VM and a ptf container
VMNetwork.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=output:1,2" % (br_name, vlan_iface_id))
# Add flow from a ptf container to an external iface
VMNetwork.cmd("ovs-ofctl add-flow %s table=0,in_port=2,action=output:%s" % (br_name, vlan_iface_id))
return
def unbind(self):
# try vlans from the host_vars
for vm in self.vm_set_dict.itervalues():
vm_num = vm['num']
vlans_str = vm['vlans']
vlans = [int(vlan) for vlan in vlans_str.split(',')]
self.check_vlans(vlans_str, vlans)
for vlan_num, vlan in enumerate(vlans):
vlan_id = self.vlan_base + vlan
vlan_iface = "%s.%d" % (self.ext_iface, vlan_id)
injected_iface = INJECTED_INTERFACES_TEMPLATE % (self.vm_set_id, vlan)
port0_bridge = OVS_BRIDGE_TEMPLATE % (self.vm_set_id, int(vm_num), vlan_num)
self.unbind_phys_vlan(port0_bridge, vlan_iface)
self.destroy_phys_vlan(vlan_iface)
# try vlans from the ovs db
for vm in self.vm_set_dict.itervalues():
vm_num = vm['num']
for vlan_num in xrange(NUM_FP_VLANS_PER_FP):
bridge_name = OVS_BRIDGE_TEMPLATE % (self.vm_set_id, int(vm_num), vlan_num)
ports = VMNetwork.get_ovs_port_bindings(bridge_name)
for port in ports.iterkeys():
if self.ext_iface in port:
self.unbind_phys_vlan(bridge_name, port)
self.destroy_phys_vlan(port)
return
def destroy_phys_vlan(self, vlan_iface):
if vlan_iface in self.host_ifaces:
VMNetwork.cmd('ifconfig %s down' % vlan_iface)
VMNetwork.cmd('vconfig rem %s' % vlan_iface)
return
def unbind_phys_vlan(self, br_name, vlan_iface):
ports = VMNetwork.get_ovs_br_ports(br_name)
if vlan_iface in ports:
VMNetwork.cmd('ovs-vsctl del-port %s %s' % (br_name, vlan_iface))
return
@staticmethod
def cmd(cmdline):
cmd = cmdline.split(' ')
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
ret_code = process.returncode
if ret_code != 0:
raise Exception("ret_code=%d, error message=%s. cmd=%s" % (ret_code, stderr, cmdline))
return stdout
@staticmethod
def get_ovs_br_ports(bridge):
out = VMNetwork.cmd('ovs-vsctl list-ports %s' % bridge)
return set(out.split('\n'))
@staticmethod
def get_ovs_port_bindings(bridge):
out = VMNetwork.cmd('ovs-ofctl show %s' % bridge)
lines = out.split('\n')
result = {}
for line in lines:
matched = re.match(r'^\s+(\S+)\((\S+)\):\s+addr:.+$', line)
if matched:
port_id = matched.group(1)
iface_name = matched.group(2)
result[iface_name] = port_id
return result
@staticmethod
def ifconfig(cmdline):
out = VMNetwork.cmd(cmdline)
ifaces = set()
rows = out.split('\n')
for row in rows:
if len(row) == 0:
continue
terms = row.split()
if not row[0].isspace():
ifaces.add(terms[0])
return ifaces
def main():
module = AnsibleModule(
argument_spec=dict(
cmd=dict(required=True, choices=['create', 'bind', 'unbind', 'destroy']),
vm_set_id=dict(required=True, type='int'),
port1_bridge=dict(required=True, type='str'),
vm_set_dict=dict(required=True, type='dict'),
fp_mtu=dict(required=False, type='int', default=DEFAULT_MTU),
ext_iface=dict(required=True, type='str'),
vlan_base=dict(required=True, type='int')),
supports_check_mode=False)
cmd = module.params['cmd']
vm_set_id = module.params['vm_set_id']
port1_bridge = module.params['port1_bridge']
vm_set_dict = module.params['vm_set_dict']
fp_mtu = module.params['fp_mtu']
ext_iface = module.params['ext_iface']
vlan_base = module.params['vlan_base']
try:
net = VMNetwork(vm_set_id, port1_bridge, vm_set_dict, ext_iface, vlan_base, fp_mtu)
if cmd == 'create':
net.create_port1_bridge()
net.create_fp_bridges()
elif cmd == 'destroy':
net.destroy_port1_bridge()
net.destroy_fp_bridges()
elif cmd == 'bind':
net.up_ext_iface()
net.bind()
elif cmd == 'unbind':
net.unbind()
else:
raise Exception("Got wrong cmd: %s. Ansible bug?" % cmd)
except Exception as error:
module.fail_json(msg=str(error))
module.exit_json(changed=True)
if __name__ == "__main__":
main()
| [
"shuche@microsoft.com"
] | shuche@microsoft.com |
41f0b061faa865135f2f57ad59a1ae0f19e92f19 | 0b6f7b0c0915f5ac9e18690e28dfa76c331ea55b | /rivet/local/bin/make-plots | e6ccfd4e7b75491f5c776ce8dd6b3a4ac53648b0 | [] | no_license | Spudmeister/tuningAna | bb81148187528121a8d027512ac0792a12ee937e | 4bdfff74391ba478f0aaecdee78b9c56c929a402 | refs/heads/master | 2020-05-21T16:43:33.386264 | 2016-10-28T08:04:48 | 2016-10-28T08:04:48 | 65,187,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125,706 | #! /usr/bin/env python
"""\
Usage: %prog [options] file.dat [file2.dat ...]
TODO
* Optimise output for e.g. lots of same-height bins in a row
* Add a RatioFullRange directive to show the full range of error bars + MC envelope in the ratio
* Tidy LaTeX-writing code -- faster to compile one doc only, then split it?
* Handle boolean values flexibly (yes, no, true, false, etc. as well as 1, 0)
"""
##
## This program is copyright by Hendrik Hoeth <hoeth@linta.de> and
## the Rivet team https://rivet.hepforge.org. It may be used
## for scientific and private purposes. Patches are welcome, but please don't
## redistribute changed versions yourself.
##
## Check the Python version
import sys
if sys.version_info[:3] < (2,6,0):
print "make-plots requires Python version >= 2.6.0... exiting"
sys.exit(1)
## Try to rename the process on Linux
try:
import ctypes
libc = ctypes.cdll.LoadLibrary('libc.so.6')
libc.prctl(15, 'make-plots', 0, 0, 0)
except Exception, e:
pass
import os, logging, re
import tempfile
import getopt
import string
from math import *
## Regex patterns
pat_begin_block = re.compile(r'^#+\s*BEGIN ([A-Z0-9_]+) ?(\S+)?')
pat_end_block = re.compile('^#+\s*END ([A-Z0-9_]+)')
pat_comment = re.compile('^#|^\s*$')
pat_property = re.compile('^(\w+?)=(.*)$')
pat_path_property = re.compile('^(\S+?)::(\w+?)=(.*)$')
def fuzzyeq(a, b, tolerance=1e-6):
"Fuzzy equality comparison function for floats, with given fractional tolerance"
# if type(a) is not float or type(a) is not float:
# print a, b
if (a == 0 and abs(b) < 1e-12) or (b == 0 and abs(a) < 1e-12):
return True
return 2.0*abs(a-b)/abs(a+b) < tolerance
def is_end_marker(line, blockname):
m = pat_end_block.match(line)
return m and m.group(1) == blockname
def is_comment(line):
return pat_comment.match(line) is not None
class InputData(object):
def __init__(self, filename):
self.filename = filename+".dat"
self.histos = {}
self.special = {}
self.functions = {}
self.description = {}
self.pathdescriptions = []
self.is2dim = False
f = open(self.filename)
for line in f:
m = pat_begin_block.match(line)
if m:
name, path = m.group(1,2)
if path is None and name != 'PLOT':
raise Exception('BEGIN sections need a path name.')
## Pass the reading of the block to separate functions
if name == 'PLOT':
self.read_input(f);
elif name == 'SPECIAL':
self.special[path] = Special(f)
elif name == 'HISTOGRAM' or name == 'HISTOGRAM2D':
self.histos[path] = Histogram(f, p=path)
# self.histos[path].path = path
self.description['is2dim'] = self.histos[path].is2dim
elif name == 'HISTO1D':
self.histos[path] = Histo1D(f, p=path)
elif name == 'HISTO2D':
self.histos[path] = Histo2D(f, p=path)
self.description['is2dim'] = True
elif name == 'FUNCTION':
self.functions[path] = Function(f)
# elif is_comment(line):
# continue
# else:
# self.read_path_based_input(line)
f.close()
self.apply_config_files(opts.CONFIGFILES)
## Plot (and subplot) sizing
self.description.setdefault('PlotSizeX', 10.)
if self.description['is2dim']:
self.description['PlotSizeX'] -= 1.7
self.description['MainPlot'] = '1'
self.description['RatioPlot'] = '0'
if self.description.has_key('PlotSize') and self.description['PlotSize']!='':
plotsizes = self.description['PlotSize'].split(',')
self.description['PlotSizeX'] = float(plotsizes[0])
self.description['PlotSizeY'] = float(plotsizes[1])
if len(plotsizes) == 3:
self.description['RatioPlotSizeY'] = float(plotsizes[2])
del self.description['PlotSize']
if self.description.get('MainPlot', '1') == '0':
## Ratio, no main
self.description['RatioPlot'] = '1' #< don't allow both to be zero!
self.description['PlotSizeY'] = 0.
self.description.setdefault('RatioPlotSizeY', 9.)
else:
if self.description.get('RatioPlot', '0') == '1':
## Main and ratio
self.description.setdefault('PlotSizeY', 6.)
self.description.setdefault('RatioPlotSizeY', self.description.get('RatioPlotYSize', 3.))
else:
## Main, no ratio
self.description.setdefault('PlotSizeY', self.description.get('PlotYSize', 9.))
self.description['RatioPlotSizeY'] = 0.
## Ensure numbers, not strings
self.description['PlotSizeX'] = float(self.description['PlotSizeX'])
self.description['PlotSizeY'] = float(self.description['PlotSizeY'])
self.description['RatioPlotSizeY'] = float(self.description['RatioPlotSizeY'])
# self.description['TopMargin'] = float(self.description['TopMargin'])
# self.description['BottomMargin'] = float(self.description['BottomMargin'])
self.description['LogX'] = self.description.has_key('LogX') and self.description['LogX']=='1'
self.description['LogY'] = self.description.has_key('LogY') and self.description['LogY']=='1'
self.description['LogZ'] = self.description.has_key('LogZ') and self.description['LogZ']=='1'
if self.description.has_key('Rebin'):
for i in self.histos:
self.histos[i].description['Rebin'] = self.description['Rebin']
histoordermap = {}
histolist = self.histos.keys()
if self.description.has_key('DrawOnly'):
histolist = filter(self.histos.keys().count, self.description['DrawOnly'].strip().split())
for histo in histolist:
order = 0
if self.histos[histo].description.has_key('PlotOrder'):
order = int(self.histos[histo].description['PlotOrder'])
if not order in histoordermap:
histoordermap[order] = []
histoordermap[order].append(histo)
sortedhistolist = []
for i in sorted(histoordermap.keys()):
sortedhistolist.extend(histoordermap[i])
self.description['DrawOnly'] = sortedhistolist
## Inherit various values from histograms if not explicitly set
for k in ['LogX', 'LogY', 'LogZ',
'XLabel', 'YLabel', 'ZLabel',
'XCustomMajorTicks', 'YCustomMajorTicks', 'ZCustomMajorTicks']:
self.inherit_from_histos(k)
return
def has_attr(self, key):
return self.description.has_key(key)
def set_attr(self, key, val):
self.description[key] = val
def attr(self, key, default=None):
return self.description.get(key, default)
def attr_bool(self, key, default=None):
x = self.attr(key, default)
if x is None: return None
if str(x).lower() in ["1", "true", "yes", "on"]: return True
if str(x).lower() in ["0", "false", "no", "off"]: return False
return None
def attr_int(self, key, default=None):
x = self.attr(key, default)
try:
x = int(x)
except:
x = None
return x
def attr_float(self, key, default=None):
x = self.attr(key)
try:
x = float(x)
except:
x = None
return x
@property
def is2dim(self):
return self.attr_bool("is2dim", False)
@is2dim.setter
def is2dim(self, val):
self.set_attr("is2dim", val)
@property
def drawonly(self):
x = self.attr("DrawOnly")
if type(x) is str:
self.drawonly = x #< use setter to listify
return x if x else []
@drawonly.setter
def drawonly(self, val):
if type(val) is str:
val = val.strip().split()
self.set_attr("DrawOnly", val)
@property
def stacklist(self):
x = self.attr("Stack")
if type(x) is str:
self.stacklist = x #< use setter to listify
return x if x else []
@stacklist.setter
def stacklist(self, val):
if type(val) is str:
val = val.strip().split()
self.set_attr("Stack", val)
@property
def plotorder(self):
x = self.attr("PlotOrder")
if type(x) is str:
self.plotorder = x #< use setter to listify
return x if x else []
@plotorder.setter
def plotorder(self, val):
if type(val) is str:
val = val.strip().split()
self.set_attr("PlotOrder", val)
@property
def plotsizex(self):
return self.attr_float("PlotSizeX")
@plotsizex.setter
def plotsizex(self, val):
self.set_attr("PlotSizeX", val)
@property
def plotsizey(self):
return self.attr_float("PlotSizeY")
@plotsizey.setter
def plotsizey(self, val):
self.set_attr("PlotSizeY", val)
@property
def plotsize(self):
return [self.plotsizex, self.plotsizey]
@plotsize.setter
def plotsize(self, val):
if type(val) is str:
val = [float(x) for x in val.split(",")]
assert len(val) == 2
self.plotsizex = val[0]
self.plotsizey = val[1]
@property
def ratiosizey(self):
return self.attr_float("RatioPlotSizeY")
@ratiosizey.setter
def ratiosizey(self, val):
self.set_attr("RatioPlotSizeY", val)
@property
def scale(self):
return self.attr_float("Scale")
@scale.setter
def scale(self, val):
self.set_attr("Scale", val)
@property
def xmin(self):
return self.attr_float("XMin")
@xmin.setter
def xmin(self, val):
self.set_attr("XMin", val)
@property
def xmax(self):
return self.attr_float("XMax")
@xmax.setter
def xmax(self, val):
self.set_attr("XMax", val)
@property
def xrange(self):
return [self.xmin, self.xmax]
@xrange.setter
def xrange(self, val):
if type(val) is str:
val = [float(x) for x in val.split(",")]
assert len(val) == 2
self.xmin = val[0]
self.xmax = val[1]
@property
def ymin(self):
return self.attr_float("YMin")
@ymin.setter
def ymin(self, val):
self.set_attr("YMin", val)
@property
def ymax(self):
return self.attr_float("YMax")
@ymax.setter
def ymax(self, val):
self.set_attr("YMax", val)
@property
def yrange(self):
return [self.ymin, self.ymax]
@yrange.setter
def yrange(self, val):
if type(val) is str:
val = [float(y) for y in val.split(",")]
assert len(val) == 2
self.ymin = val[0]
self.ymax = val[1]
# TODO: add more rw properties for plotsize(x,y), ratiosize(y),
# show_mainplot, show_ratioplot, show_legend, log(x,y,z), rebin,
# drawonly, legendonly, plotorder, stack,
# label(x,y,z), majorticks(x,y,z), minorticks(x,y,z),
# min(x,y,z), max(x,y,z), range(x,y,z)
def inherit_from_histos(self, k):
"""Note: this will inherit the key from a random histogram:
only use if you're sure all histograms have this key!"""
if not self.description.has_key(k):
h = list(self.histos.itervalues())[0]
if h.description.has_key(k):
self.description[k] = h.description[k]
def read_input(self, f):
for line in f:
if is_end_marker(line, 'PLOT'):
break
elif is_comment(line):
continue
m = pat_property.match(line)
if m:
prop, value = m.group(1,2)
if prop in self.description:
logging.debug("Overwriting property %s = %s -> %s" % (prop, self.description[prop], value))
## Use strip here to deal with DOS newlines containing \r
self.description[prop.strip()] = value.strip()
def apply_config_files(self, conffiles):
if conffiles is not None:
for filename in conffiles:
cf = open(filename,'r')
lines = cf.readlines()
for i in range(0, len(lines)):
## First evaluate PLOT sections
m = pat_begin_block.match(lines[i])
if m and m.group(1) == 'PLOT' and re.match(m.group(2),self.filename):
while i<len(lines)-1:
i = i+1
if is_end_marker(lines[i], 'PLOT'):
break
elif is_comment(lines[i]):
continue
m = pat_property.match(lines[i])
if m:
prop, value = m.group(1,2)
if prop in self.description:
logging.debug("Overwriting from conffile property %s = %s -> %s" % (prop, self.description[prop], value))
## Use strip here to deal with DOS newlines containing \r
self.description[prop.strip()] = value.strip()
elif is_comment(lines[i]):
continue
else:
## Then evaluate path-based settings, e.g. for HISTOGRAMs
m = pat_path_property.match(lines[i])
if m:
regex, prop, value = m.group(1,2,3)
for obj_dict in [self.special, self.histos, self.functions]:
for path, obj in obj_dict.iteritems():
if re.match(regex, path):
## Use strip here to deal with DOS newlines containing \r
obj.description.update({prop.strip() : value.strip()})
cf.close()
class Plot(object):
def __init__(self,inputdata):
pass
def set_normalization(self,inputdata):
for method in ['NormalizeToIntegral', 'NormalizeToSum']:
if inputdata.description.has_key(method):
for i in inputdata.drawonly:
if not inputdata.histos[i].has_attr(method):
inputdata.histos[i].set_attr(method, inputdata.attr(method))
if inputdata.scale:
for i in inputdata.drawonly:
inputdata.histos[i].scale = inputdata.scale
for i in inputdata.drawonly:
inputdata.histos[i].mangle_input()
def stack_histograms(self,inputdata):
if inputdata.description.has_key('Stack'):
stackhists = [h for h in inputdata.attr('Stack').strip().split() if h in inputdata.histos]
previous = ''
for i in stackhists:
if previous != '':
inputdata.histos[i].add(inputdata.histos[previous])
previous = i
def set_histo_options(self,inputdata):
if inputdata.description.has_key('ConnectGaps'):
for i in inputdata.histos.keys():
if not inputdata.histos[i].description.has_key('ConnectGaps'):
inputdata.histos[i].description['ConnectGaps'] = inputdata.description['ConnectGaps']
def set_borders(self,inputdata):
self.set_xmax(inputdata)
self.set_xmin(inputdata)
self.set_ymax(inputdata)
self.set_ymin(inputdata)
self.set_zmax(inputdata)
self.set_zmin(inputdata)
inputdata.description['Borders'] = (self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax)
def set_xmin(self,inputdata):
self.xmin = inputdata.attr_float("XMin")
if self.xmin is None:
self.xmin = min(inputdata.histos[h].getXMin() for h in inputdata.description['DrawOnly'])
def set_xmax(self,inputdata):
self.xmax = inputdata.attr_float("XMax")
if self.xmax is None:
self.xmax = min(inputdata.histos[h].getXMax() for h in inputdata.description['DrawOnly'])
def set_ymin(self,inputdata):
if inputdata.description.has_key('YMin'):
self.ymin = inputdata.attr_float('YMin')
else:
ymins = [inputdata.histos[i].getYMin(self.xmin, self.xmax, inputdata.description['LogY']) for i in inputdata.attr('DrawOnly')]
minymin = min(ymins)
if inputdata.description['is2dim']:
self.ymin = minymin
else:
showzero = inputdata.attr_bool("ShowZero", True)
if showzero:
self.ymin = 0. if minymin > -1e-4 else 1.1*minymin
else:
self.ymin = 1.1*min(foo) if minymin < -1e-4 else 0 if minymin < 1e-4 else 0.9*minymin
if inputdata.description['LogY']:
ymins = [ymin for ymin in ymins if ymin > 0.0]
if not ymins:
if self.ymax == 0:
self.ymax = 1
ymins.append(2e-7*self.ymax)
minymin = min(ymins)
fullrange = opts.FULL_RANGE
if inputdata.has_attr('FullRange'):
fullrange = inputdata.attr_bool('FullRange')
self.ymin = minymin/1.7 if fullrange else max(minymin/1.7, 2e-7*self.ymax)
if self.ymin == self.ymax:
self.ymin -= 1
self.ymax += 1
def set_ymax(self,inputdata):
if inputdata.has_attr('YMax'):
self.ymax = inputdata.attr_float('YMax')
else:
self.ymax = max(inputdata.histos[h].getYMax(self.xmin, self.xmax) for h in inputdata.attr('DrawOnly'))
if not inputdata.is2dim:
self.ymax *= (1.7 if inputdata.attr_bool('LogY') else 1.1)
def set_zmin(self,inputdata):
if inputdata.has_attr('ZMin'):
self.zmin = inputdata.attr_float('ZMin')
else:
zmins = [inputdata.histos[i].getZMin(self.xmin, self.xmax, self.ymin, self.ymax) for i in inputdata.attr('DrawOnly')]
minzmin = min(zmins)
self.zmin = minzmin
if zmins:
showzero = inputdata.attr_bool('ShowZero', True)
if showzero:
self.zmin = 0 if minzmin > -1e-4 else 1.1*minzmin
else:
self.zmin = 1.1*minzmin if minzmin < -1e-4 else 0. if minzmin < 1e-4 else 0.9*minzmin
if inputdata.attr_bool('LogZ', False):
zmins = [zmin for zmin in zmins if zmin > 0]
if not zmins:
if self.zmax == 0:
self.zmax = 1
zmins.append(2e-7*self.zmax)
minzmin = min(zmins)
fullrange = inputdata.attr_bool("FullRange", opts.FULL_RANGE)
self.zmin = minzmin/1.7 if fullrange else max(minzmin/1.7, 2e-7*self.zmax)
if self.zmin == self.zmax:
self.zmin -= 1
self.zmax += 1
def set_zmax(self,inputdata):
self.zmax = inputdata.attr_float('ZMax')
if self.zmax is None:
zmaxs = [inputdata.histos[h].getZMax(self.xmin, self.xmax, self.ymin, self.ymax) for h in inputdata.attr('DrawOnly')]
self.zmax = max(zmaxs) if zmaxs else 1
def draw(self):
pass
def write_header(self,inputdata):
if inputdata.description.has_key('LeftMargin') and inputdata.description['LeftMargin']!='':
inputdata.description['LeftMargin'] = float(inputdata.description['LeftMargin'])
else:
inputdata.description['LeftMargin'] = 1.4
if inputdata.description.has_key('RightMargin') and inputdata.description['RightMargin']!='':
inputdata.description['RightMargin'] = float(inputdata.description['RightMargin'])
else:
inputdata.description['RightMargin'] = 0.35
if inputdata.description.has_key('TopMargin') and inputdata.description['TopMargin']!='':
inputdata.description['TopMargin'] = float(inputdata.description['TopMargin'])
else:
inputdata.description['TopMargin'] = 0.65
if inputdata.description.has_key('BottomMargin') and inputdata.description['BottomMargin']!='':
inputdata.description['BottomMargin'] = float(inputdata.description['BottomMargin'])
else:
inputdata.description['BottomMargin'] = 0.95
if inputdata.description['is2dim']:
inputdata.description['RightMargin'] += 1.7
papersizex = inputdata.description['PlotSizeX'] + 0.1 + \
inputdata.description['LeftMargin'] + inputdata.description['RightMargin']
papersizey = inputdata.description['PlotSizeY'] + inputdata.description['RatioPlotSizeY'] + 0.1 + \
inputdata.description['TopMargin'] + inputdata.description['BottomMargin']
#
out = ""
out += '\\documentclass{article}\n'
if opts.OUTPUT_FONT == "MINION":
out += ('\\usepackage{minion}\n')
elif opts.OUTPUT_FONT == "PALATINO_OSF":
out += ('\\usepackage[osf,sc]{mathpazo}\n')
elif opts.OUTPUT_FONT == "PALATINO":
out += ('\\usepackage{mathpazo}\n')
elif opts.OUTPUT_FONT == "TIMES":
out += ('\\usepackage{mathptmx}\n')
elif opts.OUTPUT_FONT == "HELVETICA":
out += ('\\renewcommand{\\familydefault}{\\sfdefault}\n')
out += ('\\usepackage{sfmath}\n')
out += ('\\usepackage{helvet}\n')
out += ('\\usepackage[symbolgreek]{mathastext}\n')
for pkg in opts.LATEXPKGS:
out += ('\\usepackage{%s}\n' % pkg)
out += ('\\usepackage{pst-all}\n')
out += ('\\usepackage{xcolor}\n')
out += ('\\selectcolormodel{rgb}\n')
out += ('\\definecolor{red}{HTML}{EE3311}\n') # (Google uses 'DC3912')
out += ('\\definecolor{blue}{HTML}{3366FF}')
out += ('\\definecolor{green}{HTML}{109618}')
out += ('\\definecolor{orange}{HTML}{FF9900}')
out += ('\\definecolor{lilac}{HTML}{990099}')
out += ('\\usepackage{amsmath}\n')
out += ('\\usepackage{amssymb}\n')
out += ('\\usepackage{relsize}\n')
out += ('\\usepackage[dvips,\n')
out += (' left=%4.3fcm, right=0cm,\n' %(inputdata.description['LeftMargin']-0.45,))
out += (' top=%4.3fcm, bottom=0cm,\n' %(inputdata.description['TopMargin']-0.30,))
out += (' paperwidth=%scm,paperheight=%scm\n' %(papersizex,papersizey))
out += (']{geometry}\n')
out += ('\\begin{document}\n')
out += ('\\pagestyle{empty}\n')
out += ('\\SpecialCoor\n')
out += ('\\begin{pspicture}(0,0)(0,0)\n')
out += ('\\psset{xunit=%scm}\n' %(inputdata.description['PlotSizeX']))
if inputdata.description['is2dim']:
if inputdata.description.has_key('ColorSeries') and inputdata.description['ColorSeries']!='':
colorseries = inputdata.description['ColorSeries']
else:
colorseries = '{hsb}{grad}[rgb]{0,0,1}{-.700,0,0}'
out += ('\\definecolorseries{gradientcolors}%s\n' % colorseries)
out += ('\\resetcolorseries[130]{gradientcolors}\n')
return out
def write_footer(self):
out = ""
out += ('\\end{pspicture}\n')
out += ('\\end{document}\n')
return out
class MainPlot(Plot):
def __init__(self, inputdata):
self.set_normalization(inputdata)
self.stack_histograms(inputdata)
if (inputdata.description.has_key('GofLegend') and inputdata.description['GofLegend']=='1') or \
(inputdata.description.has_key('GofFrame') and inputdata.description['GofFrame']!='') and not \
(inputdata.description.has_key('TaylorPlot') and inputdata.description['TaylorPlot']=='1'):
self.calculate_gof(inputdata)
self.set_histo_options(inputdata)
self.set_borders(inputdata)
self.yoffset = inputdata.description['PlotSizeY']
self.coors = Coordinates(inputdata)
def draw(self, inputdata):
out = ""
out += ('\n%\n% MainPlot\n%\n')
out += ('\\psset{yunit=%scm}\n' %(self.yoffset))
out += ('\\rput(0,-1){%\n')
out += ('\\psset{yunit=%scm}\n' %(inputdata.description['PlotSizeY']))
out += self._draw(inputdata)
out += ('}\n')
return out
def _draw(self, inputdata):
out = ""
if inputdata.description.has_key('DrawSpecialFirst') and inputdata.description['DrawSpecialFirst']=='1':
for i in inputdata.special.keys():
out += inputdata.special[i].draw(self.coors)
if inputdata.description.has_key('DrawFunctionFirst') and inputdata.description['DrawFunctionFirst']=='1':
for i in inputdata.functions.keys():
out += inputdata.functions[i].draw(self.coors)
for i in inputdata.description['DrawOnly']:
out += inputdata.histos[i].draw(self.coors)
else:
for i in inputdata.description['DrawOnly']:
out += inputdata.histos[i].draw(self.coors)
for i in inputdata.functions.keys():
out += inputdata.functions[i].draw(self.coors)
else:
if inputdata.description.has_key('DrawFunctionFirst') and inputdata.description['DrawFunctionFirst']=='1':
for i in inputdata.functions.keys():
out += inputdata.functions[i].draw(self.coors)
for i in inputdata.description['DrawOnly']:
out += inputdata.histos[i].draw(self.coors)
else:
for i in inputdata.description['DrawOnly']:
out += inputdata.histos[i].draw(self.coors)
for i in inputdata.functions.keys():
out += inputdata.functions[i].draw(self.coors)
for i in inputdata.special.keys():
out += inputdata.special[i].draw(self.coors)
if inputdata.description.has_key('Legend') and inputdata.description['Legend']=='1':
legend = Legend(inputdata.description,inputdata.histos,inputdata.functions)
out += legend.draw()
if inputdata.description['is2dim']:
colorscale = ColorScale(inputdata.description,self.coors)
out += colorscale.draw()
frame = Frame()
out += frame.draw(inputdata)
if inputdata.description.has_key('XMajorTickMarks') and inputdata.description['XMajorTickMarks']:
xcustommajortickmarks=int(inputdata.description['XMajorTickMarks'])
else:
xcustommajortickmarks = -1
if inputdata.description.has_key('XMinorTickMarks') and inputdata.description['XMinorTickMarks']:
xcustomminortickmarks = int(inputdata.description['XMinorTickMarks'])
else:
xcustomminortickmarks = -1
xcustommajorticks = None
xcustomminorticks = None
if inputdata.description.has_key('XCustomMajorTicks'): # and inputdata.description['XCustomMajorTicks']!='':
xcustommajorticks = []
x_label_pairs = inputdata.description['XCustomMajorTicks'].strip().split() #'\t')
if len(x_label_pairs) % 2 == 0:
for i in range(0, len(x_label_pairs), 2):
xcustommajorticks.append({'Value': float(x_label_pairs[i]), 'Label': x_label_pairs[i+1]})
else:
print "Warning: XCustomMajorTicks requires an even number of alternating pos/label entries"
if inputdata.description.has_key('XCustomMinorTicks'): # and inputdata.description['XCustomMinorTicks']!='':
xs = inputdata.description['XCustomMinorTicks'].strip().split() #'\t')
xcustomminorticks = [{'Value': float(x)} for x in xs]
xticks = XTicks(inputdata.description, self.coors)
if (inputdata.description.has_key('RatioPlot') and inputdata.description['RatioPlot'] == '1') or \
(inputdata.description.has_key('PlotXTickLabels') and inputdata.description['PlotXTickLabels'] == '0'):
drawxlabels = False
else:
drawxlabels = True
out += xticks.draw(custommajortickmarks=xcustommajortickmarks,\
customminortickmarks=xcustomminortickmarks,\
custommajorticks=xcustommajorticks,\
customminorticks=xcustomminorticks,\
drawlabels=drawxlabels)
if inputdata.description.has_key('YMajorTickMarks') and inputdata.description['YMajorTickMarks'] != '':
ycustommajortickmarks = int(inputdata.description['YMajorTickMarks'])
else:
ycustommajortickmarks = -1
if inputdata.description.has_key('YMinorTickMarks') and inputdata.description['YMinorTickMarks']!='':
ycustomminortickmarks = int(inputdata.description['YMinorTickMarks'])
else:
ycustomminortickmarks = -1
ycustommajorticks = None
ycustomminorticks = None
if inputdata.description.has_key('YCustomMajorTicks'): # and inputdata.description['YCustomMajorTicks']!='':
ycustommajorticks = []
y_label_pairs = inputdata.description['YCustomMajorTicks'].strip().split() #'\t')
if len(y_label_pairs) % 2 == 0:
for i in range(0, len(y_label_pairs), 2):
ycustommajorticks.append({'Value': float(y_label_pairs[i]), 'Label': y_label_pairs[i+1]})
else:
print "Warning: YCustomMajorTicks requires an even number of alternating pos/label entries"
if inputdata.description.has_key('YCustomMinorTicks'): # and inputdata.description['YCustomMinorTicks']!='':
ys = inputdata.description['YCustomMinorTicks'].strip().split() #'\t')
ycustomminorticks = [{'Value': float(y)} for y in ys]
yticks = YTicks(inputdata.description, self.coors)
if inputdata.description.has_key('PlotYTickLabels') and inputdata.description['PlotYTickLabels'] == '0':
drawylabels = False
else:
drawylabels = True
out += yticks.draw(custommajortickmarks=ycustommajortickmarks,
customminortickmarks=ycustomminortickmarks,
custommajorticks=ycustommajorticks,
customminorticks=ycustomminorticks,
drawlabels=drawylabels)
labels = Labels(inputdata.description)
if inputdata.description.has_key('RatioPlot') and inputdata.description['RatioPlot']=='1':
out += labels.draw(['Title','YLabel'])
else:
if not inputdata.description['is2dim']:
out += labels.draw(['Title','XLabel','YLabel'])
else:
out += labels.draw(['Title','XLabel','YLabel','ZLabel'])
return out
def calculate_gof(self, inputdata):
refdata = inputdata.description.get('GofReference')
if refdata is None:
refdata = inputdata.description.get('RatioPlotReference')
if refdata is None:
inputdata.description['GofLegend'] = '0'
inputdata.description['GofFrame'] = ''
return
def pickcolor(gof):
color = None
colordefs = {}
for i in inputdata.description.setdefault('GofFrameColor', '0:green 3:yellow 6:red!70').strip().split():
foo = i.split(':')
if len(foo)!=2: continue
colordefs[float(foo[0])] = foo[1]
for i in sorted(colordefs.keys()):
if gof>=i:
color=colordefs[i]
return color
inputdata.description.setdefault('GofLegend','0')
inputdata.description.setdefault('GofFrame','')
inputdata.description.setdefault('FrameColor',None)
for i in inputdata.description['DrawOnly']:
if i==refdata: continue
if inputdata.description['GofLegend']!='1' and i!=inputdata.description['GofFrame']: continue
if inputdata.description.has_key('GofType') and inputdata.description['GofType']!='chi2':
return
gof = inputdata.histos[i].getChi2(inputdata.histos[refdata])
if i==inputdata.description['GofFrame'] and inputdata.description['FrameColor'] is None:
inputdata.description['FrameColor']=pickcolor(gof)
if inputdata.histos[i].description.setdefault('Title', '')!='':
inputdata.histos[i].description['Title'] += ', '
inputdata.histos[i].description['Title'] += '$\\chi^2/n={}$%1.2f' %gof
class TaylorPlot(Plot):
def __init__(self, inputdata):
self.refdata = inputdata.description['TaylorPlotReference']
self.calculate_taylorcoordinates(inputdata)
def calculate_taylorcoordinates(self,inputdata):
foo=inputdata.description['DrawOnly'].pop(inputdata.description['DrawOnly'].index(self.refdata))
inputdata.description['DrawOnly'].append(foo)
for i in inputdata.description['DrawOnly']:
print i
print 'meanbinval = ', inputdata.histos[i].getMeanBinValue()
print 'sigmabinval = ', inputdata.histos[i].getSigmaBinValue()
print 'chi2/nbins = ', inputdata.histos[i].getChi2(inputdata.histos[self.refdata])
print 'correlation = ', inputdata.histos[i].getCorrelation(inputdata.histos[self.refdata])
print 'distance = ', inputdata.histos[i].getRMSdistance(inputdata.histos[self.refdata])
class RatioPlot(Plot):
def __init__(self, inputdata):
self.refdata = inputdata.description['RatioPlotReference']
self.yoffset = inputdata.description['PlotSizeY'] + inputdata.description['RatioPlotSizeY']
inputdata.description['RatioPlotStage'] = True
inputdata.description['PlotSizeY'] = inputdata.description['RatioPlotSizeY']
inputdata.description['LogY'] = False
# TODO: It'd be nice it this wasn't so MC-specific
if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='deviation':
inputdata.description['YLabel']='$(\\text{MC}-\\text{data})$'
inputdata.description['YMin']=-3.5
inputdata.description['YMax']=3.5
elif inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='datamc':
inputdata.description['YLabel']='Data/MC'
inputdata.description['YMin']=0.5
inputdata.description['YMax']=1.5
else:
inputdata.description['YLabel']='MC/Data'
inputdata.description['YMin']=0.5
inputdata.description['YMax']=1.5
if inputdata.description.has_key('RatioPlotYLabel'):
inputdata.description['YLabel'] = inputdata.description['RatioPlotYLabel']
inputdata.description['YLabel']='\\rput(-%s,0){%s}'%(0.5*inputdata.description['PlotSizeY']/inputdata.description['PlotSizeX'],inputdata.description['YLabel'])
if inputdata.description.has_key('RatioPlotYMin'):
inputdata.description['YMin']=inputdata.description['RatioPlotYMin']
if inputdata.description.has_key('RatioPlotYMax'):
inputdata.description['YMax']=inputdata.description['RatioPlotYMax']
if not inputdata.description.has_key('RatioPlotErrorBandColor'):
inputdata.description['RatioPlotErrorBandColor']='yellow'
if not inputdata.description.has_key('RatioPlotSameStyle') or inputdata.description['RatioPlotSameStyle']=='0':
inputdata.histos[self.refdata].description['ErrorBandColor']=inputdata.description['RatioPlotErrorBandColor']
inputdata.histos[self.refdata].description['ErrorBands']='1'
inputdata.histos[self.refdata].description['ErrorBars']='0'
inputdata.histos[self.refdata].description['LineStyle']='solid'
inputdata.histos[self.refdata].description['LineColor']='black'
inputdata.histos[self.refdata].description['LineWidth']='0.3pt'
inputdata.histos[self.refdata].description['PolyMarker']=''
inputdata.histos[self.refdata].description['ConnectGaps']='1'
self.calculate_ratios(inputdata)
self.set_borders(inputdata)
self.coors = Coordinates(inputdata)
def draw(self, inputdata):
out = ""
out += ('\n%\n% RatioPlot\n%\n')
out += ('\\psset{yunit=%scm}\n' %(self.yoffset))
out += ('\\rput(0,-1){%\n')
out += ('\\psset{yunit=%scm}\n' %(inputdata.description['PlotSizeY']))
out += self._draw(inputdata)
out += ('}\n')
return out
def calculate_ratios(self,inputdata):
foo=inputdata.description['DrawOnly'].pop(inputdata.description['DrawOnly'].index(self.refdata))
if inputdata.histos[self.refdata].description.has_key('ErrorBands') and inputdata.histos[self.refdata].description['ErrorBands']=='1':
inputdata.description['DrawOnly'].insert(0,foo)
else:
inputdata.description['DrawOnly'].append(foo)
for i in inputdata.description['DrawOnly']:
if i!=self.refdata:
if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='deviation':
inputdata.histos[i].deviation(inputdata.histos[self.refdata])
elif inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='datamc':
inputdata.histos[i].dividereverse(inputdata.histos[self.refdata])
inputdata.histos[i].description['ErrorBars']='1'
else:
inputdata.histos[i].divide(inputdata.histos[self.refdata])
if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='deviation':
inputdata.histos[self.refdata].deviation(inputdata.histos[self.refdata])
elif inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='datamc':
inputdata.histos[self.refdata].dividereverse(inputdata.histos[self.refdata])
else:
inputdata.histos[self.refdata].divide(inputdata.histos[self.refdata])
def _draw(self, inputdata):
out = ""
for i in inputdata.description['DrawOnly']:
if inputdata.description.has_key('RatioPlotMode') and inputdata.description['RatioPlotMode']=='datamc':
if i!=self.refdata:
out += inputdata.histos[i].draw(self.coors)
else:
out += inputdata.histos[i].draw(self.coors)
frame = Frame()
out += frame.draw(inputdata)
# TODO: so much duplication with MainPlot... yuck!
if inputdata.description.has_key('XMajorTickMarks') and inputdata.description['XMajorTickMarks']!='':
xcustommajortickmarks=int(inputdata.description['XMajorTickMarks'])
else:
xcustommajortickmarks=-1
if inputdata.description.has_key('XMinorTickMarks') and inputdata.description['XMinorTickMarks']!='':
xcustomminortickmarks=int(inputdata.description['XMinorTickMarks'])
else:
xcustomminortickmarks=-1
xcustommajorticks=None
xcustomminorticks=None
if inputdata.description.has_key('XCustomMajorTicks'): # and inputdata.description['XCustomMajorTicks']!='':
xcustommajorticks=[]
FOO=inputdata.description['XCustomMajorTicks'].strip().split() #'\t')
if not len(FOO)%2:
for i in range(0,len(FOO),2):
xcustommajorticks.append({'Value': float(FOO[i]), 'Label': FOO[i+1]})
if inputdata.description.has_key('XCustomMinorTicks'): # and inputdata.description['XCustomMinorTicks']!='':
xcustomminorticks=[]
FOO=inputdata.description['XCustomMinorTicks'].strip().split() #'\t')
for i in range(len(FOO)):
xcustomminorticks.append({'Value': float(FOO[i])})
xticks = XTicks(inputdata.description, self.coors)
if inputdata.description.has_key('RatioPlotTickLabels') and inputdata.description['RatioPlotTickLabels']=='0':
drawlabels=False
else:
drawlabels=True
out += xticks.draw(custommajortickmarks=xcustommajortickmarks,\
customminortickmarks=xcustomminortickmarks,\
custommajorticks=xcustommajorticks,\
customminorticks=xcustomminorticks,
drawlabels=drawlabels)
if inputdata.description.has_key('YMajorTickMarks') and inputdata.description['YMajorTickMarks']!='':
ycustommajortickmarks=int(inputdata.description['YMajorTickMarks'])
else:
ycustommajortickmarks=-1
if inputdata.description.has_key('YMinorTickMarks') and inputdata.description['YMinorTickMarks']!='':
ycustomminortickmarks=int(inputdata.description['YMinorTickMarks'])
else:
ycustomminortickmarks=-1
ycustommajorticks=None
ycustomminorticks=None
if inputdata.description.has_key('YCustomMajorTicks'): # and inputdata.description['YCustomMajorTicks']!='':
ycustommajorticks=[]
FOO=inputdata.description['YCustomMajorTicks'].strip().split() #'\t')
if not len(FOO)%2:
for i in range(0,len(FOO),2):
ycustommajorticks.append({'Value': float(FOO[i]), 'Label': FOO[i+1]})
if inputdata.description.has_key('YCustomMinorTicks'): # and inputdata.description['YCustomMinorTicks']!='':
ycustomminorticks=[]
FOO=inputdata.description['YCustomMinorTicks'].strip().split() #'\t')
for i in range(len(FOO)):
ycustomminorticks.append({'Value': float(FOO[i])})
yticks = YTicks(inputdata.description, self.coors)
out += yticks.draw(custommajortickmarks=ycustommajortickmarks,\
customminortickmarks=ycustomminortickmarks,\
custommajorticks=ycustommajorticks,\
customminorticks=ycustomminorticks)
if inputdata.description.has_key('MainPlot') and inputdata.description['MainPlot']=='0':
if inputdata.description.has_key('Legend') and inputdata.description['Legend']=='1':
legend = Legend(inputdata.description,inputdata.histos,inputdata.functions)
out += legend.draw()
labels = Labels(inputdata.description)
if inputdata.description.has_key('MainPlot') and inputdata.description['MainPlot']=='0':
out += labels.draw(['Title','XLabel','YLabel'])
else:
out += labels.draw(['XLabel','YLabel'])
return out
class Legend(object):
def __init__(self, description, histos, functions):
self.histos = histos
self.functions = functions
self.description = description
def draw(self):
out = ""
out += '\n%\n% Legend\n%\n'
out += '\\rput[tr](%s,%s){%%\n' % (self.getLegendXPos(), self.getLegendYPos())
ypos = -0.05*6/self.description['PlotSizeY']
legendordermap = {}
legendlist = self.description['DrawOnly']+self.functions.keys()
if self.description.has_key('LegendOnly'):
legendlist = []
for legend in self.description['LegendOnly'].strip().split():
if legend in self.histos.keys() or legend in self.functions.keys():
legendlist.append(legend)
for legend in legendlist:
order = 0
if self.histos.has_key(legend) and self.histos[legend].description.has_key('LegendOrder'):
order = int(self.histos[legend].description['LegendOrder'])
if self.functions.has_key(legend) and self.functions[legend].description.has_key('LegendOrder'):
order = int(self.functions[legend].description['LegendOrder'])
if not order in legendordermap:
legendordermap[order] = []
legendordermap[order].append(legend)
foo=[]
for i in sorted(legendordermap.keys()):
foo.extend(legendordermap[i])
rel_xpos_sign = 1.0
if self.getLegendAlign()=='r':
rel_xpos_sign = -1.0
xpos1 = -0.10*rel_xpos_sign
xpos2 = -0.02*rel_xpos_sign
for i in foo:
if self.histos.has_key(i):
drawobject=self.histos[i]
elif self.functions.has_key(i):
drawobject=self.functions[i]
else:
continue
title = drawobject.getTitle()
if title == '':
continue
else:
out += ('\\rput[B%s](%s,%s){%s}\n' %(self.getLegendAlign(),rel_xpos_sign*0.1,ypos,title))
out += ('\\rput[B%s](%s,%s){%s\n' %(self.getLegendAlign(),rel_xpos_sign*0.1,ypos,'%'))
if drawobject.getErrorBands():
out += ('\\psframe[linewidth=0pt,linestyle=none,fillstyle=solid,fillcolor=%s,opacity=%s]' %(drawobject.getErrorBandColor(),drawobject.getErrorBandOpacity()))
out += ('(%s, 0.033)(%s, 0.001)\n' %(xpos1, xpos2))
out += ('\\psline[linestyle=' + drawobject.getLineStyle() \
+ ', linecolor=' + drawobject.getLineColor() \
+ ', linewidth=' + drawobject.getLineWidth() \
+ ', strokeopacity=' + drawobject.getLineOpacity() \
+ ', opacity=' + drawobject.getFillOpacity())
if drawobject.getLineDash()!='':
out += (', dash=' + drawobject.getLineDash())
if drawobject.getFillStyle()!='none':
out += (', fillstyle=' + drawobject.getFillStyle() \
+ ', fillcolor=' + drawobject.getFillColor() \
+ ', hatchcolor=' + drawobject.getHatchColor() \
+ ']{C-C}(%s, 0.030)(%s, 0.030)(%s, 0.004)(%s, 0.004)(%s, 0.030)\n' \
%(xpos1, xpos2, xpos2, xpos1, xpos1))
else:
out += ('](%s, 0.016)(%s, 0.016)\n' %(xpos1, xpos2))
if drawobject.getPolyMarker() != '':
out += (' \\psdot[dotstyle=' + drawobject.getPolyMarker() \
+ ', dotsize=' + drawobject.getDotSize() \
+ ', dotscale=' + drawobject.getDotScale() \
+ ', linecolor=' + drawobject.getLineColor() \
+ ', linewidth=' + drawobject.getLineWidth() \
+ ', linestyle=' + drawobject.getLineStyle() \
+ ', fillstyle=' + drawobject.getFillStyle() \
+ ', fillcolor=' + drawobject.getFillColor() \
+ ', strokeopacity=' + drawobject.getLineOpacity() \
+ ', opacity=' + drawobject.getFillOpacity() \
+ ', hatchcolor=' + drawobject.getHatchColor())
if drawobject.getFillStyle()!='none':
out += ('](%s, 0.028)\n' % (rel_xpos_sign*-0.06))
else:
out += ('](%s, 0.016)\n' % (rel_xpos_sign*-0.06))
out += ('}\n')
ypos -= 0.075*6/self.description['PlotSizeY']
if self.description.has_key('CustomLegend'):
for i in self.description['CustomLegend'].strip().split('\\\\'):
out += ('\\rput[B%s](%s,%s){%s}\n' %(self.getLegendAlign(),rel_xpos_sign*0.1,ypos,i))
ypos -= 0.075*6/self.description['PlotSizeY']
out += ('}\n')
return out
def getLegendXPos(self):
if self.description.has_key('LegendXPos'):
return self.description['LegendXPos']
else:
if self.getLegendAlign()=='r':
return '0.95'
else:
return '0.53'
def getLegendYPos(self):
if self.description.has_key('LegendYPos'):
return self.description['LegendYPos']
else:
return '0.93'
def getLegendAlign(self):
if self.description.has_key('LegendAlign'):
return self.description['LegendAlign']
else:
return 'l'
class ColorScale(object):
def __init__(self, description, coors):
self.description = description
self.coors = coors
def draw(self):
out = ''
out += '\n%\n% ColorScale\n%\n'
out += '\\rput(1,0){\n'
out += ' \\psset{xunit=4mm}\n'
out += ' \\rput(0.5,0){\n'
out += ' \\psset{yunit=0.0076923, linestyle=none, fillstyle=solid}\n'
out += ' \\multido{\\ic=0+1,\\id=1+1}{130}{\n'
out += ' \\psframe[fillcolor={gradientcolors!![\\ic]},dimen=inner,linewidth=0.1pt](0, \\ic)(1, \\id)\n'
out += ' }\n'
out += ' }\n'
out += ' \\rput(0.5,0){\n'
out += ' \\psframe[linewidth=0.3pt,dimen=middle](0,0)(1,1)\n'
zcustommajortickmarks = int(self.description.get('ZMajorTickMarks', -1))
zcustomminortickmarks = int(self.description.get('ZMinorTickMarks', -1))
zcustommajorticks=[]
zcustomminorticks=[]
if self.description.has_key('ZCustomMajorTicks') and self.description['ZCustomMajorTicks']!='':
# TODO: Would be nice to have less invisible separation of the custom ticks than split on tabs
ticks = self.description['ZCustomMajorTicks'].strip().split('\t')
if not len(ticks)%2:
for i in range(0,len(ticks),2):
zcustommajorticks.append({'Value': float(ticks[i]), 'Label': ticks[i+1]})
if self.description.has_key('ZCustomMinorTicks') and self.description['ZCustomMinorTicks']!='':
# TODO: Would be nice to have less invisible separation of the custom ticks than split on tabs
ticks = self.description['ZCustomMinorTicks'].strip().split('\t')
for i in range(len(ticks)):
zcustomminorticks.append({'Value': float(ticks[i])})
if (self.description.has_key('PlotZTickLabels') and self.description['PlotZTickLabels']=='0'):
drawzlabels=False
else:
drawzlabels=True
zticks = ZTicks(self.description, self.coors)
out += zticks.draw(custommajortickmarks=zcustommajortickmarks,\
customminortickmarks=zcustomminortickmarks,\
custommajorticks=zcustommajorticks,\
customminorticks=zcustomminorticks,
drawlabels=drawzlabels)
out += ' }\n'
out += '}\n'
return out
class Labels(object):
def __init__(self, description):
self.description = description
def draw(self, axis=[]):
out = ""
out += ('\n%\n% Labels\n%\n')
if self.description.has_key('Title') and (axis.count('Title') or axis==[]):
out += ('\\rput(0,1){\\rput[lB](0, 1.7\\labelsep){\\normalsize '+self.description['Title']+'}}\n')
if self.description.has_key('XLabel') and (axis.count('XLabel') or axis==[]):
xlabelsep=4.7
if self.description.has_key('XLabelSep'):
xlabelsep=float(self.description['XLabelSep'])
out += ('\\rput(1,0){\\rput[rB](0,-%4.3f\\labelsep){\\normalsize '%(xlabelsep) +self.description['XLabel']+'}}\n')
if self.description.has_key('YLabel') and (axis.count('YLabel') or axis==[]):
ylabelsep=6.5
if self.description.has_key('YLabelSep'):
ylabelsep=float(self.description['YLabelSep'])
out += ('\\rput(0,1){\\rput[rB]{90}(-%4.3f\\labelsep,0){\\normalsize '%(ylabelsep) +self.description['YLabel']+'}}\n')
if self.description.has_key('ZLabel') and (axis.count('ZLabel') or axis==[]):
zlabelsep=5.3
if self.description.has_key('ZLabelSep'):
zlabelsep=float(self.description['ZLabelSep'])
out += ('\\rput(1,1){\\rput(%4.3f\\labelsep,0){\\psset{xunit=4mm}\\rput[lB]{270}(1.5,0){\\normalsize '%(zlabelsep) +self.description['ZLabel']+'}}}\n')
return out
class Special(object):
def __init__(self, f):
self.description = {}
self.data = []
self.read_input(f)
def read_input(self, f):
for line in f:
if is_end_marker(line, 'SPECIAL'):
break
elif is_comment(line):
continue
else:
self.data.append(line)
def draw(self,coors):
out = ""
out += ('\n%\n% Special\n%\n')
import re
regex = re.compile(r'^(.*?)(\\physics[xy]?coor)\(\s?([0-9\.eE+-]+)\s?,\s?([0-9\.eE+-]+)\s?\)(.*)')
# TODO: More precise number string matching, something like this:
# num = r"-?[0-9]*(?:\.[0-9]*)(?:[eE][+-]?\d+]"
# regex = re.compile(r'^(.*?)(\\physics[xy]?coor)\(\s?(' + num + ')\s?,\s?(' + num + ')\s?\)(.*)')
for i in xrange(len(self.data)):
while regex.search(self.data[i]):
match = regex.search(self.data[i])
xcoor, ycoor = float(match.group(3)), float(match.group(4))
if match.group(2)[1:] in ["physicscoor", "physicsxcoor"]:
xcoor = coors.phys2frameX(xcoor)
if match.group(2)[1:] in ["physicscoor", "physicsycoor"]:
ycoor = coors.phys2frameY(ycoor)
line = "%s(%f, %f)%s" % (match.group(1), xcoor, ycoor, match.group(5))
self.data[i] = line
out += self.data[i]+'\n'
return out
class DrawableObject(object):
def __init__(self, f):
pass
def getTitle(self):
if self.description.has_key('Title'):
return self.description['Title']
else:
return ''
def getLineStyle(self):
if self.description.has_key('LineStyle'):
## I normally like there to be "only one way to do it", but providing
## this dashdotted/dotdashed synonym just seems humane ;-)
if self.description['LineStyle'] in ('dashdotted', 'dotdashed'):
self.description['LineStyle']='dashed'
self.description['LineDash']='3pt 3pt .8pt 3pt'
return self.description['LineStyle']
else:
return 'solid'
def getLineDash(self):
if self.description.has_key('LineDash'):
# Check if LineStyle=='dashdotted' before returning something
self.getLineStyle()
return self.description['LineDash']
else:
return ''
def getLineWidth(self):
if self.description.has_key('LineWidth'):
return self.description['LineWidth']
else:
return '0.8pt'
def getLineColor(self):
if self.description.has_key('LineColor'):
return self.description['LineColor']
else:
return 'black'
def getLineOpacity(self):
if self.description.has_key('LineOpacity'):
return self.description['LineOpacity']
else:
return '1.0'
def getFillColor(self):
if self.description.has_key('FillColor'):
return self.description['FillColor']
else:
return 'white'
def getFillOpacity(self):
if self.description.has_key('FillOpacity'):
return self.description['FillOpacity']
else:
return '1.0'
def getHatchColor(self):
if self.description.has_key('HatchColor'):
return self.description['HatchColor']
else:
return 'black'
def getFillStyle(self):
if self.description.has_key('FillStyle'):
return self.description['FillStyle']
else:
return 'none'
def getPolyMarker(self):
if self.description.has_key('PolyMarker'):
return self.description['PolyMarker']
else:
return ''
def getDotSize(self):
if self.description.has_key('DotSize'):
return self.description['DotSize']
else:
return '2pt 2'
def getDotScale(self):
if self.description.has_key('DotScale'):
return self.description['DotScale']
else:
return '1'
def getErrorBars(self):
if self.description.has_key('ErrorBars'):
return bool(int(self.description['ErrorBars']))
else:
return False
def getErrorBands(self):
if self.description.has_key('ErrorBands'):
return bool(int(self.description['ErrorBands']))
else:
return False
def getErrorBandColor(self):
if self.description.has_key('ErrorBandColor'):
return self.description['ErrorBandColor']
else:
return 'yellow'
def getErrorBandOpacity(self):
if self.description.has_key('ErrorBandOpacity'):
return self.description['ErrorBandOpacity']
else:
return '1.0'
def getSmoothLine(self):
if self.description.has_key('SmoothLine'):
return bool(int(self.description['SmoothLine']))
else:
return False
def startclip(self):
return '\\psclip{\\psframe[linewidth=0, linestyle=none](0,0)(1,1)}\n'
def stopclip(self):
return '\\endpsclip\n'
def startpsset(self):
out = ""
out += ('\\psset{linecolor='+self.getLineColor()+'}\n')
out += ('\\psset{linewidth='+self.getLineWidth()+'}\n')
out += ('\\psset{linestyle='+self.getLineStyle()+'}\n')
out += ('\\psset{fillstyle='+self.getFillStyle()+'}\n')
out += ('\\psset{fillcolor='+self.getFillColor()+'}\n')
out += ('\\psset{hatchcolor='+self.getHatchColor()+'}\n')
out += ('\\psset{strokeopacity='+self.getLineOpacity()+'}\n')
out += ('\\psset{opacity='+self.getFillOpacity()+'}\n')
if self.getLineDash()!='':
out += ('\\psset{dash='+self.getLineDash()+'}\n')
return out
def stoppsset(self):
out = ""
out += ('\\psset{linecolor=black}\n')
out += ('\\psset{linewidth=0.8pt}\n')
out += ('\\psset{linestyle=solid}\n')
out += ('\\psset{fillstyle=none}\n')
out += ('\\psset{fillcolor=white}\n')
out += ('\\psset{hatchcolor=black}\n')
out += ('\\psset{strokeopacity=1.0}\n')
out += ('\\psset{opacity=1.0}\n')
return out
class Function(DrawableObject):
def __init__(self, f):
self.description = {}
self.read_input(f)
def read_input(self, f):
self.code='def plotfunction(x):\n'
iscode=False
for line in f:
if is_end_marker(line, 'FUNCTION'):
break
elif is_comment(line):
continue
else:
m = pat_property.match(line)
if iscode:
self.code+=' '+line
elif m:
prop, value = m.group(1,2)
if prop=='Code':
iscode=True
else:
self.description[prop] = value
if not iscode:
print '++++++++++ ERROR: No code in function'
else:
foo = compile(self.code, '<string>', 'exec')
exec(foo)
self.plotfunction = plotfunction
def draw(self,coors):
out = ""
out += self.startclip()
out += self.startpsset()
xmin = coors.xmin()
if self.description.has_key('XMin') and self.description['XMin']:
xmin = float(self.description['XMin'])
xmax=coors.xmax()
if self.description.has_key('XMax') and self.description['XMax']:
xmax=float(self.description['XMax'])
# TODO: Space sample points logarithmically if LogX=1
dx = (xmax-xmin)/500.
x = xmin-dx
out += '\\pscurve'
if self.description.has_key('FillStyle') and self.description['FillStyle']!='none':
out += '(%s,%s)\n' % (coors.strphys2frameX(xmin),coors.strphys2frameY(coors.ymin()))
while x < (xmax+2*dx):
y = self.plotfunction(x)
out += ('(%s,%s)\n' % (coors.strphys2frameX(x), coors.strphys2frameY(y)))
x += dx
if self.description.has_key('FillStyle') and self.description['FillStyle']!='none':
out += '(%s,%s)\n' % (coors.strphys2frameX(xmax),coors.strphys2frameY(coors.ymin()))
out += self.stoppsset()
out += self.stopclip()
return out
class Histogram(DrawableObject):
def __init__(self, f, p=None):
self.description = {}
self.is2dim = False
self.data = []
self.read_input_data(f)
self.sigmabinvalue = None
self.meanbinvalue = None
self.path = p
def read_input_data(self, f):
for line in f:
if is_end_marker(line, 'HISTOGRAM'):
break
elif is_comment(line):
continue
else:
line = line.rstrip()
m = pat_property.match(line)
if m:
prop, value = m.group(1,2)
self.description[prop] = value
else:
## Detect symm errs
linearray = line.split()
if len(linearray) == 4:
self.data.append({'LowEdge': float(linearray[0]),
'UpEdge': float(linearray[1]),
'Content': float(linearray[2]),
'Error': [float(linearray[3]),float(linearray[3])]})
## Detect asymm errs
elif len(linearray) == 5:
self.data.append({'LowEdge': float(linearray[0]),
'UpEdge': float(linearray[1]),
'Content': float(linearray[2]),
'Error': [float(linearray[3]),float(linearray[4])]})
## Detect two-dimensionality
elif len(linearray) in [6,7]:
self.is2dim = True
# If asymm z error, use the max or average of +- error
err = float(linearray[5])
if len(linearray) == 7:
if self.description.get("ShowMaxZErr", 1):
err = max(err, float(linearray[6]))
else:
err = 0.5 * (err + float(linearray[6]))
self.data.append({'LowEdge': [float(linearray[0]), float(linearray[2])],
'UpEdge': [float(linearray[1]), float(linearray[3])],
'Content': float(linearray[4]),
'Error': err})
## Unknown histo format
else:
raise RuntimeError("Unknown HISTOGRAM data line format with %d entries" % len(linearray))
def mangle_input(self):
if (self.description.has_key('NormalizeToIntegral') and self.description['NormalizeToIntegral']=='1') or \
(self.description.has_key('NormalizeToSum') and self.description['NormalizeToSum']=='1'):
if (self.description.has_key('NormalizeToIntegral') and self.description['NormalizeToIntegral']=='1') and \
(self.description.has_key('NormalizeToSum') and self.description['NormalizeToSum']=='1'):
print 'Can\'t normalize to Integral and to Sum at the same time. Will normalize to the Sum.'
foo = 0
for i in range(len(self.data)):
if self.description.has_key('NormalizeToSum') and self.description['NormalizeToSum']=='1':
foo += self.data[i]['Content']
else:
foo += self.data[i]['Content']*(self.data[i]['UpEdge']-self.data[i]['LowEdge'])
for i in range(len(self.data)):
self.data[i]['Content'] /= foo
self.data[i]['Error'][0] /= foo
self.data[i]['Error'][1] /= foo
if self.description.has_key('Scale') and self.description['Scale']!='':
scale = float(self.description['Scale'])
for i in range(len(self.data)):
self.data[i]['Content'] *= scale
self.data[i]['Error'][0] *= scale
self.data[i]['Error'][1] *= scale
if self.description.has_key('Rebin') and self.description['Rebin']!='':
rebin=int(self.description['Rebin'])
errortype = "stat"
if self.description.has_key('ErrorType') and self.description['ErrorType']!='':
errortype = self.description['ErrorType']
newdata=[]
if rebin>=2:
for i in range(0,(len(self.data)/rebin)*rebin,rebin):
foo=0.
barl=0.
baru=0.
for j in range(rebin):
binwidth=self.data[i+j]['UpEdge']-self.data[i+j]['LowEdge']
foo +=self.data[i+j]['Content']*binwidth
if errortype=="stat":
barl+=(binwidth*self.data[i+j]['Error'][0])**2
baru+=(binwidth*self.data[i+j]['Error'][1])**2
elif errortype=="env":
barl+=(self.data[i+j]['Content']-self.data[i+j]['Error'][0])*binwidth
baru+=(self.data[i+j]['Content']+self.data[i+j]['Error'][1])*binwidth
else:
logging.error("Rebinning for ErrorType not implemented.")
sys.exit(1)
newbinwidth=self.data[i+rebin-1]['UpEdge']-self.data[i]['LowEdge']
newcentral=foo/newbinwidth
if errortype=="stat":
newerror=[sqrt(barl)/newbinwidth,sqrt(baru)/newbinwidth]
elif errortype=="env":
newerror=[(foo-barl)/newbinwidth,(baru-foo)/newbinwidth]
newdata.append({'LowEdge': self.data[i]['LowEdge'],
'UpEdge': self.data[i+rebin-1]['UpEdge'],
'Content': newcentral,
'Error': newerror})
self.data=newdata
def add(self, name):
if len(self.data) != len(name.data):
print '+++ Error in Histogram.add() for %s: different numbers of bins' % self.path
for i in range(len(self.data)):
if fuzzyeq(self.data[i]['LowEdge'], name.data[i]['LowEdge']) and \
fuzzyeq(self.data[i]['UpEdge'], name.data[i]['UpEdge']):
self.data[i]['Content'] += name.data[i]['Content']
self.data[i]['Error'][0] = sqrt(self.data[i]['Error'][0]**2 + name.data[i]['Error'][0]**2)
self.data[i]['Error'][1] = sqrt(self.data[i]['Error'][1]**2 + name.data[i]['Error'][1]**2)
else:
print '+++ Error in Histogram.add() for %s: binning of histograms differs' % self.path
def divide(self, name):
#print name.path, self.path
if len(self.data) != len(name.data):
print '+++ Error in Histogram.divide() for %s: different numbers of bins' % self.path
for i in range(len(self.data)):
if fuzzyeq(self.data[i]['LowEdge'], name.data[i]['LowEdge']) and \
fuzzyeq(self.data[i]['UpEdge'], name.data[i]['UpEdge']):
try:
self.data[i]['Error'][0] /= name.data[i]['Content']
except ZeroDivisionError:
self.data[i]['Error'][0]=0.
try:
self.data[i]['Error'][1] /= name.data[i]['Content']
except ZeroDivisionError:
self.data[i]['Error'][1]=0.
try:
self.data[i]['Content'] /= name.data[i]['Content']
except ZeroDivisionError:
self.data[i]['Content']=1.
# self.data[i]['Error'][0] = sqrt(self.data[i]['Error'][0]**2 + name.data[i]['Error'][0]**2)
# self.data[i]['Error'][1] = sqrt(self.data[i]['Error'][1]**2 + name.data[i]['Error'][1]**2)
else:
print '+++ Error in Histogram.divide() for %s: binning of histograms differs' % self.path
def dividereverse(self, name):
if len(self.data) != len(name.data):
print '+++ Error in Histogram.dividereverse() for %s: different numbers of bins' % self.path
for i in range(len(self.data)):
if fuzzyeq(self.data[i]['LowEdge'], name.data[i]['LowEdge']) and \
fuzzyeq(self.data[i]['UpEdge'], name.data[i]['UpEdge']):
try:
self.data[i]['Error'][0] = name.data[i]['Error'][0]/self.data[i]['Content']
except ZeroDivisionError:
self.data[i]['Error'][0]=0.
try:
self.data[i]['Error'][1] = name.data[i]['Error'][1]/self.data[i]['Content']
except ZeroDivisionError:
self.data[i]['Error'][1]=0.
try:
self.data[i]['Content'] = name.data[i]['Content']/self.data[i]['Content']
except ZeroDivisionError:
self.data[i]['Content']=1.
else:
print '+++ Error in Histogram.dividereverse(): binning of histograms differs'
def deviation(self, name):
if len(self.data) != len(name.data):
print '+++ Error in Histogram.deviation() for %s: different numbers of bins' % self.path
for i in range(len(self.data)):
if fuzzyeq(self.data[i]['LowEdge'], name.data[i]['LowEdge']) and \
fuzzyeq(self.data[i]['UpEdge'], name.data[i]['UpEdge']):
self.data[i]['Content'] -= name.data[i]['Content']
try:
self.data[i]['Content'] /= 0.5*sqrt((name.data[i]['Error'][0] + name.data[i]['Error'][1])**2 + \
(self.data[i]['Error'][0] + self.data[i]['Error'][1])**2)
except ZeroDivisionError:
self.data[i]['Content'] = 0.0
try:
self.data[i]['Error'][0] /= name.data[i]['Error'][0]
except ZeroDivisionError:
self.data[i]['Error'][0] = 0.0
try:
self.data[i]['Error'][1] /= name.data[i]['Error'][1]
except ZeroDivisionError:
self.data[i]['Error'][1] = 0.0
else:
print '+++ Error in Histogram.deviation() for %s: binning of histograms differs' % self.path
def getChi2(self, name):
chi2 = 0.
for i in range(len(self.data)):
if fuzzyeq(self.data[i]['LowEdge'], name.data[i]['LowEdge']) and \
fuzzyeq(self.data[i]['UpEdge'], name.data[i]['UpEdge']):
try:
chi2 += (self.data[i]['Content']-name.data[i]['Content'])**2/((0.5*self.data[i]['Error'][0]+0.5*self.data[i]['Error'][1])**2 + (0.5*name.data[i]['Error'][0]+0.5*name.data[i]['Error'][1])**2)
except ZeroDivisionError:
pass
else:
print '+++ Error in Histogram.getChi2() for %s: binning of histograms differs' % self.path
return chi2/len(self.data)
def getSigmaBinValue(self):
if self.sigmabinvalue==None:
self.sigmabinvalue = 0.
sumofweights = 0.
for i in range(len(self.data)):
if self.is2dim:
binwidth = abs( (self.data[i]['UpEdge'][0] - self.data[i]['LowEdge'][0])
*(self.data[i]['UpEdge'][1] - self.data[i]['LowEdge'][1]))
else:
binwidth = abs(self.data[i]['UpEdge'] - self.data[i]['LowEdge'])
self.sigmabinvalue += binwidth*(self.data[i]['Content']-self.getMeanBinValue())**2
sumofweights += binwidth
self.sigmabinvalue = sqrt(self.sigmabinvalue/sumofweights)
return self.sigmabinvalue
def getMeanBinValue(self):
if self.meanbinvalue==None:
self.meanbinvalue = 0.
sumofweights = 0.
for i in range(len(self.data)):
if self.is2dim:
binwidth = abs( (self.data[i]['UpEdge'][0] - self.data[i]['LowEdge'][0])
*(self.data[i]['UpEdge'][1] - self.data[i]['LowEdge'][1]))
else:
binwidth = abs(self.data[i]['UpEdge'] - self.data[i]['LowEdge'])
self.meanbinvalue += binwidth*self.data[i]['Content']
sumofweights += binwidth
self.meanbinvalue /= sumofweights
return self.meanbinvalue
def getCorrelation(self, name):
correlation = 0.
sumofweights = 0.
for i in range(len(self.data)):
if fuzzyeq(self.data[i]['LowEdge'], name.data[i]['LowEdge']) and \
fuzzyeq(self.data[i]['UpEdge'], name.data[i]['UpEdge']):
if self.is2dim:
binwidth = abs( (self.data[i]['UpEdge'][0] - self.data[i]['LowEdge'][0])
* (self.data[i]['UpEdge'][1] - self.data[i]['LowEdge'][1]) )
else:
binwidth = abs(self.data[i]['UpEdge'] - self.data[i]['LowEdge'])
correlation += binwidth * ( self.data[i]['Content'] - self.getMeanBinValue() ) \
* ( name.data[i]['Content'] - name.getMeanBinValue() )
sumofweights += binwidth
else:
print '+++ Error in Histogram.getCorrelation(): binning of histograms differs' % self.path
correlation /= sumofweights
try:
correlation /= self.getSigmaBinValue()*name.getSigmaBinValue()
except ZeroDivisionError:
correlation = 0
return correlation
def getRMSdistance(self,name):
distance = 0.
sumofweights = 0.
for i in range(len(self.data)):
if fuzzyeq(self.data[i]['LowEdge'], name.data[i]['LowEdge']) and \
fuzzyeq(self.data[i]['UpEdge'], name.data[i]['UpEdge']):
if self.is2dim:
binwidth = abs( (self.data[i]['UpEdge'][0] - self.data[i]['LowEdge'][0])
* (self.data[i]['UpEdge'][1] - self.data[i]['LowEdge'][1]) )
else:
binwidth = abs(self.data[i]['UpEdge'] - self.data[i]['LowEdge'])
distance += binwidth * ( (self.data[i]['Content'] - self.getMeanBinValue())
-(name.data[i]['Content'] - name.getMeanBinValue()))**2
sumofweights += binwidth
else:
print '+++ Error in Histogram.getRMSdistance() for %s: binning of histograms differs' % self.path
distance = sqrt(distance/sumofweights)
return distance
def draw(self,coors):
seen_nan = False
out = ""
out += self.startclip()
out += self.startpsset()
if self.data:
out += "% START DATA\n"
if self.is2dim:
for i in xrange(len(self.data)):
out += ('\\psframe')
color=int(129*coors.phys2frameZ(self.data[i]['Content']))
if self.data[i]['Content']>coors.zmax():
color=129
if self.data[i]['Content']<coors.zmin():
color=0
if self.data[i]['Content']<=coors.zmin():
out += ('[linewidth=0pt, linestyle=none, fillstyle=solid, fillcolor=white]')
else:
out += ('[linewidth=0pt, linestyle=none, fillstyle=solid, fillcolor={gradientcolors!!['+str(color)+']}]')
out += ('(' + coors.strphys2frameX(self.data[i]['LowEdge'][0]) + ', ' \
+ coors.strphys2frameY(self.data[i]['LowEdge'][1]) + ')(' \
+ coors.strphys2frameX(self.data[i]['UpEdge'][0]) + ', ' \
+ coors.strphys2frameY(self.data[i]['UpEdge'][1]) + ')\n')
else:
if self.getErrorBands():
self.description['SmoothLine']=0
for i in xrange(len(self.data)):
out += ('\\psframe[dimen=inner,linewidth=0pt,linestyle=none,fillstyle=solid,fillcolor=%s,opacity=%s]' %(self.getErrorBandColor(),self.getErrorBandOpacity()))
out += ('(' + coors.strphys2frameX(self.data[i]['LowEdge']) + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']-self.data[i]['Error'][0]) + ')(' \
+ coors.strphys2frameX(self.data[i]['UpEdge']) + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']+self.data[i]['Error'][1]) + ')\n')
if self.getErrorBars():
for i in xrange(len(self.data)):
if isnan(self.data[i]['Content']) or isnan(self.data[i]['Error'][0]) or isnan(self.data[i]['Error'][1]):
seen_nan = True
continue
if self.data[i]['Content']==0. and self.data[i]['Error']==[0.,0.]:
continue
out += ('\\psline')
out += ('(' + coors.strphys2frameX(self.data[i]['LowEdge']) + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']) + ')(' \
+ coors.strphys2frameX(self.data[i]['UpEdge']) + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']) + ')\n')
out += ('\\psline')
bincenter = coors.strphys2frameX(.5*(self.data[i]['LowEdge']+self.data[i]['UpEdge']))
out += ('(' + bincenter + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']-self.data[i]['Error'][0]) + ')(' \
+ bincenter + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']+self.data[i]['Error'][1]) + ')\n')
if self.getSmoothLine():
out += '\\psbezier'
else:
out += '\\psline'
if (self.getFillStyle() != 'none'): # make sure that filled areas go all the way down to the x-axis
if (coors.phys2frameX(self.data[0]['LowEdge']) > 1e-4):
out += '(' + coors.strphys2frameX(self.data[0]['LowEdge']) + ', -0.1)\n'
else:
out += '(-0.1, -0.1)\n'
for i in xrange(len(self.data)):
if isnan(self.data[i]['Content']):
seen_nan = True
continue
if self.getSmoothLine():
out += ('(' + coors.strphys2frameX(0.5*(self.data[i]['LowEdge']+self.data[i]['UpEdge'])) + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']) + ')\n')
else:
out += ('(' + coors.strphys2frameX(self.data[i]['LowEdge']) + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']) + ')(' \
+ coors.strphys2frameX(self.data[i]['UpEdge']) + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']) + ')\n')
## Join/separate data points, with vertical/diagonal lines
if (i+1 < len(self.data)): #< If this is not the last point
if self.description.get('ConnectBins', '1') != '1':
out += ('\\psline')
else:
## If bins are joined, but there is a gap in binning, choose whether to fill the gap
if (abs(coors.phys2frameX(self.data[i]['UpEdge']) - coors.phys2frameX(self.data[i+1]['LowEdge'])) > 1e-4):
if self.description.get('ConnectGaps', '0') != '1':
out += ('\\psline')
# TODO: Perhaps use a new dashed line to fill the gap?
if self.getFillStyle() != 'none': # make sure that filled areas go all the way down to the x-axis
if (coors.phys2frameX(self.data[-1]['UpEdge']) < 1-1e-4):
out += '(' + coors.strphys2frameX(self.data[-1]['UpEdge']) + ', -0.1)\n'
else:
out += '(1.1, -0.1)\n'
#
if self.getPolyMarker() != '':
for i in xrange(len(self.data)):
if isnan(self.data[i]['Content']):
seen_nan = True
continue
if self.data[i]['Content']==0. and self.data[i]['Error']==[0.,0.]:
continue
out += ('\\psdot[dotstyle=%s,dotsize=%s,dotscale=%s](' %(self.getPolyMarker(),self.getDotSize(),self.getDotScale()) \
+ coors.strphys2frameX(.5*(self.data[i]['LowEdge']+self.data[i]['UpEdge'])) + ', ' \
+ coors.strphys2frameY(self.data[i]['Content']) + ')\n')
out += "% END DATA\n"
out += self.stoppsset()
out += self.stopclip()
if seen_nan:
print "WARNING: NaN-valued value or error bar!"
return out
# def is2dimensional(self):
# return self.is2dim
def getXMin(self):
if not self.data:
return 0
elif self.is2dim:
return min([self.data[i]['LowEdge'][0] for i in range(len(self.data))])
else:
return min([self.data[i]['LowEdge'] for i in range(len(self.data))])
def getXMax(self):
if not self.data:
return 1
elif self.is2dim:
return max([self.data[i]['UpEdge'][0] for i in range(len(self.data))])
else:
return max([self.data[i]['UpEdge'] for i in range(len(self.data))])
def getYMin(self, xmin, xmax, logy):
if not self.data:
return 0
elif self.is2dim:
return min([self.data[i]['LowEdge'][1] for i in range(len(self.data))])
else:
yvalues = []
for i in range(len(self.data)):
if ((self.data[i]['UpEdge'] > xmin or self.data[i]['LowEdge'] >= xmin) and \
(self.data[i]['LowEdge'] < xmax or self.data[i]['UpEdge'] <= xmax)):
foo = 0
if self.getErrorBars() or self.getErrorBands():
foo = self.data[i]['Content']-self.data[i]['Error'][0]
else:
foo = self.data[i]['Content']
if logy:
if foo>0: yvalues.append(foo)
else:
yvalues.append(foo)
if len(yvalues) > 0:
return min(yvalues)
else:
return self.data[0]['Content']
def getYMax(self, xmin, xmax):
if not self.data:
return 1
elif self.is2dim:
return max([self.data[i]['UpEdge'][1] for i in range(len(self.data))])
else:
yvalues = []
for i in range(len(self.data)):
if ((self.data[i]['UpEdge'] > xmin or self.data[i]['LowEdge'] >= xmin) and \
(self.data[i]['LowEdge'] < xmax or self.data[i]['UpEdge'] <= xmax)):
if self.getErrorBars() or self.getErrorBands():
yvalues.append(self.data[i]['Content']+self.data[i]['Error'][1])
else:
yvalues.append(self.data[i]['Content'])
if len(yvalues) > 0:
return max(yvalues)
else:
return self.data[0]['Content']
def getZMin(self, xmin, xmax, ymin, ymax):
if not self.is2dim:
return 0
zvalues = []
for i in range(len(self.data)):
if (self.data[i]['UpEdge'][0] > xmin and self.data[i]['LowEdge'][0] < xmax) and \
(self.data[i]['UpEdge'][1] > ymin and self.data[i]['LowEdge'][1] < ymax):
zvalues.append(self.data[i]['Content'])
return min(zvalues)
def getZMax(self, xmin, xmax, ymin, ymax):
if not self.is2dim:
return 0
zvalues = []
for i in range(len(self.data)):
if (self.data[i]['UpEdge'][0] > xmin and self.data[i]['LowEdge'][0] < xmax) and \
(self.data[i]['UpEdge'][1] > ymin and self.data[i]['LowEdge'][1] < ymax):
zvalues.append(self.data[i]['Content'])
return max(zvalues)
class Histo1D(Histogram):
def read_input_data(self, f):
for line in f:
if is_end_marker(line, 'HISTO1D'):
break
elif is_comment(line):
continue
else:
line = line.rstrip()
m = pat_property.match(line)
if m:
prop, value = m.group(1,2)
self.description[prop] = value
else:
linearray = line.split()
## Detect symm errs
if len(linearray) == 4:
self.data.append({'LowEdge': float(linearray[0]),
'UpEdge': float(linearray[1]),
'Content': float(linearray[2]),
'Error': [float(linearray[3]),float(linearray[3])]})
## Detect asymm errs
elif len(linearray) == 5:
self.data.append({'LowEdge': float(linearray[0]),
'UpEdge': float(linearray[1]),
'Content': float(linearray[2]),
'Error': [float(linearray[3]),float(linearray[4])]})
## Not sure what this is for... auto-compatibility with YODA format? Urgh
elif len(linearray) == 8:
self.data.append({'LowEdge': float(linearray[0]),
'UpEdge': float(linearray[1]),
'Content': float(linearray[2]),
'Error': [float(linearray[3]),float(linearray[3])]})
else:
raise Exception('Histo1D does not have 8 columns.'+line)
class Histo2D(Histogram):
def read_input_data(self, f):
self.is2dim = True #< Should really be done in a constructor, but this is easier for now...
for line in f:
if is_end_marker(line, 'HISTO2D'):
break
elif is_comment(line):
continue
else:
line = line.rstrip()
m = pat_property.match(line)
if m:
prop, value = m.group(1,2)
self.description[prop] = value
else:
linearray = line.split()
if len(linearray) in [6,7]:
# If asymm z error, use the max or average of +- error
err = float(linearray[5])
if len(linearray) == 7:
if self.description.get("ShowMaxZErr", 1):
err = max(err, float(linearray[6]))
else:
err = 0.5 * (err + float(linearray[6]))
self.data.append({'LowEdge': [float(linearray[0]), float(linearray[2])],
'UpEdge': [float(linearray[1]), float(linearray[3])],
'Content': float(linearray[4]),
'Error': err})
else:
raise Exception('Histo1D does not have 6 or 7 columns.'+line)
class Frame(object):
def __init__(self):
self.framelinewidth = '0.3pt'
def draw(self,inputdata):
out = ('\n%\n% Frame\n%\n')
if inputdata.description.has_key('FrameColor') and inputdata.description['FrameColor']!=None:
color = inputdata.description['FrameColor']
# We want to draw this frame only once, so set it to False for next time:
inputdata.description['FrameColor']=None
# Calculate how high and wide the overall plot is
height = [0,0]
width = inputdata.description['PlotSizeX']
if inputdata.description.has_key('RatioPlot') and inputdata.description['RatioPlot']=='1':
height[1] = -inputdata.description['RatioPlotSizeY']
if not (inputdata.description.has_key('MainPlot') and inputdata.description['MainPlot']=='0'):
height[0] = inputdata.description['PlotSizeY']
else:
height[0] = -height[1]
height[1] = 0
# Get the margin widths
left = inputdata.description['LeftMargin']+0.1
right = inputdata.description['RightMargin']+0.1
top = inputdata.description['TopMargin']+0.1
bottom = inputdata.description['BottomMargin']+0.1
#
out += ('\\rput(0,1){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(top, color, -left, top/2, width+right, top/2))
out += ('\\rput(0,%scm){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(height[1], bottom, color, -left, -bottom/2, width+right, -bottom/2))
out += ('\\rput(0,0){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(left, color, -left/2, height[1]-0.05, -left/2, height[0]+0.05))
out += ('\\rput(1,0){\\psline[linewidth=%scm,linecolor=%s](%scm,%scm)(%scm,%scm)}\n' %(right, color, right/2, height[1]-0.05, right/2, height[0]+0.05))
out += ('\\psframe[linewidth='+self.framelinewidth+',dimen=middle](0,0)(1,1)\n')
return out
class Ticks(object):
def __init__(self, description, coors):
self.majorticklinewidth = '0.3pt'
self.minorticklinewidth = '0.3pt'
self.majorticklength = '9pt'
self.minorticklength = '4pt'
self.description = description
self.coors = coors
def draw_ticks(self, min, max, plotlog=False, custommajorticks=None, customminorticks=None, custommajortickmarks=-1, customminortickmarks=-1, drawlabels=True, twosided=False):
out = ""
if plotlog:
if min <= 0 or max <= 0:
raise Exception("Cannot place log axis min or max tick <= 0")
if custommajorticks is None:
x=int(log10(min))
n_labels=0
while (x<log10(max)+1):
if 10**x>=min:
ticklabel=10**x
if ticklabel>min and ticklabel<max:
out += self.draw_majortick(ticklabel,twosided)
if drawlabels:
out += self.draw_majorticklabel(ticklabel)
n_labels+=1
if ticklabel==min or ticklabel==max:
if drawlabels:
out += self.draw_majorticklabel(ticklabel)
n_labels+=1
for i in range(2,10):
ticklabel=i*10**(x-1)
if ticklabel>min and ticklabel<max:
out += self.draw_minortick(ticklabel,twosided)
if drawlabels and n_labels==0:
if (i+1)*10**(x-1)<max: # some special care for the last minor tick
out += self.draw_minorticklabel(ticklabel)
else:
out += self.draw_minorticklabel(ticklabel, last=True)
x+=1
else:
print "Warning: custom major ticks not currently supported on log axes -- please contact the developers to request!"
elif custommajorticks is not None or customminorticks is not None:
if custommajorticks:
for i in range(len(custommajorticks)):
value=custommajorticks[i]['Value']
label=custommajorticks[i]['Label']
if value>=min and value<=max:
out += self.draw_majortick(value,twosided)
if drawlabels:
out += self.draw_majorticklabel(value, label=label)
if customminorticks:
for i in range(len(customminorticks)):
value=customminorticks[i]['Value']
if value>=min and value<=max:
out += self.draw_minortick(value,twosided)
else:
xrange = max-min
digits = int(log10(xrange))+1
if (xrange < 1):
digits -= 1
foo = int(xrange/(10**(digits-1)))
if (foo/9. > 0.5):
tickmarks = 10
elif (foo/9. > 0.2):
tickmarks = 5
elif (foo/9. > 0.1):
tickmarks = 2
if (custommajortickmarks>-1):
if custommajortickmarks not in [1, 2, 5, 10, 20]:
print '+++ Error in Ticks.draw_ticks(): MajorTickMarks must be in [1, 2, 5, 10, 20]'
else:
#if custommajortickmarks==1: custommajortickmarks=10
tickmarks = custommajortickmarks
if (tickmarks == 2 or tickmarks == 20):
minortickmarks = 3
else:
minortickmarks = 4
if (customminortickmarks>-1):
minortickmarks = customminortickmarks
#
x = 0
while (x > min*10**digits):
x -= tickmarks*100**(digits-1)
while (x <= max*10**digits):
if (x >= min*10**digits-tickmarks*100**(digits-1)):
ticklabel = 1.*x/10**digits
if (int(ticklabel) == ticklabel):
ticklabel = int(ticklabel)
if (float(ticklabel-min)/xrange >= -1e-5):
if (fabs(ticklabel-min)/xrange > 1e-5 and fabs(ticklabel-max)/xrange > 1e-5):
out += self.draw_majortick(ticklabel,twosided)
if drawlabels:
out += self.draw_majorticklabel(ticklabel)
xminor = x
for i in range(minortickmarks):
xminor += 1.*tickmarks*100**(digits-1)/(minortickmarks+1)
ticklabel = 1.*xminor/10**digits
if (ticklabel > min and ticklabel < max):
if (fabs(ticklabel-min)/xrange > 1e-5 and fabs(ticklabel-max)/xrange > 1e-5):
out += self.draw_minortick(ticklabel,twosided)
x += tickmarks*100**(digits-1)
return out
def draw(self):
pass
def draw_minortick(self, ticklabel, twosided):
pass
def draw_majortick(self, ticklabel, twosided):
pass
def draw_majorticklabel(self, ticklabel):
pass
def draw_minorticklabel(self, value, label='', last=False):
return ''
def get_ticklabel(self, value, plotlog=False, minor=False, lastminor=False):
label=''
prefix = ''
if plotlog:
bar = int(log10(value))
if bar < 0:
sign='-'
else:
sign='\\,'
if minor: # The power of ten is only to be added to the last minor tick label
if lastminor:
label = str(int(value/(10**bar))) + "\\cdot" + '10$^{'+sign+'\\text{'+str(abs(bar))+'}}$'
else:
label = str(int(value/(10**bar))) # The naked prefactor
else:
if bar==0:
label = '1'
else:
label = '10$^{'+sign+'\\text{'+str(abs(bar))+'}}$'
else:
if fabs(value) < 1e-10:
value = 0
label = str(value)
if "e" in label:
a, b = label.split("e")
astr = "%2.1f" % float(a)
bstr = str(int(b))
label = "\\smaller{%s $\\!\\cdot 10^{%s} $}" % (astr, bstr)
return label
class XTicks(Ticks):
def draw(self, custommajorticks=[], customminorticks=[], custommajortickmarks=-1, customminortickmarks=-1,drawlabels=True):
twosided = bool(int(self.description.get('XTwosidedTicks', '0')))
out = ""
out += ('\n%\n% X-Ticks\n%\n')
out += ('\\def\\majortickmarkx{\\psline[linewidth='+self.majorticklinewidth+'](0,0)(0,'+self.majorticklength+')}%\n')
out += ('\\def\\minortickmarkx{\\psline[linewidth='+self.minorticklinewidth+'](0,0)(0,'+self.minorticklength+')}%\n')
uselog = self.description['LogX'] and (self.coors.xmin() > 0 and self.coors.xmax() > 0)
out += self.draw_ticks(self.coors.xmin(), self.coors.xmax(),\
plotlog=uselog,\
custommajorticks=custommajorticks,\
customminorticks=customminorticks,\
custommajortickmarks=custommajortickmarks,\
customminortickmarks=customminortickmarks,\
drawlabels=drawlabels,\
twosided=twosided)
return out
def draw_minortick(self, ticklabel, twosided):
out = ''
out += '\\rput('+self.coors.strphys2frameX(ticklabel)+', 0){\\minortickmarkx}\n'
if twosided:
out += '\\rput{180}('+self.coors.strphys2frameX(ticklabel)+', 1){\\minortickmarkx}\n'
return out
def draw_minorticklabel(self, value, label='', last=False):
if not label:
label=self.get_ticklabel(value, int(self.description['LogX']), minor=True, lastminor=last)
if last: # Some more indentation for the last minor label
return ('\\rput('+self.coors.strphys2frameX(value)+', 0){\\rput[B](1.9\\labelsep,-2.3\\labelsep){\\strut{}'+label+'}}\n')
else:
return ('\\rput('+self.coors.strphys2frameX(value)+', 0){\\rput[B](0,-2.3\\labelsep){\\strut{}'+label+'}}\n')
def draw_majortick(self, ticklabel, twosided):
out = ''
out += '\\rput('+self.coors.strphys2frameX(ticklabel)+', 0){\\majortickmarkx}\n'
if twosided:
out += '\\rput{180}('+self.coors.strphys2frameX(ticklabel)+', 1){\\majortickmarkx}\n'
return out
def draw_majorticklabel(self, value, label=''):
if not label:
label = self.get_ticklabel(value, int(self.description['LogX']) and (self.coors.xmin() > 0 and self.coors.xmax() > 0))
labelparts = label.split("\\n")
labelcode = label if len(labelparts) == 1 else ("\\shortstack{" + "\\\\ ".join(labelparts) + "}")
rtn = "\\rput(" + self.coors.strphys2frameX(value) + ", 0){\\rput[t](0,-\\labelsep){" + labelcode + "}}\n"
return rtn
class YTicks(Ticks):
def draw(self, custommajorticks=[], customminorticks=[], custommajortickmarks=-1, customminortickmarks=-1, drawlabels=True):
twosided = bool(int(self.description.get('YTwosidedTicks', '0')))
out = ""
out += ('\n%\n% Y-Ticks\n%\n')
out += ('\\def\\majortickmarky{\\psline[linewidth='+self.majorticklinewidth+'](0,0)('+self.majorticklength+',0)}%\n')
out += ('\\def\\minortickmarky{\\psline[linewidth='+self.minorticklinewidth+'](0,0)('+self.minorticklength+',0)}%\n')
uselog = self.description['LogY'] and (self.coors.ymin() > 0 and self.coors.ymax() > 0)
out += self.draw_ticks(self.coors.ymin(), self.coors.ymax(),
plotlog=uselog,
custommajorticks=custommajorticks,
customminorticks=customminorticks,
custommajortickmarks=custommajortickmarks,
customminortickmarks=customminortickmarks,
twosided=twosided,
drawlabels=drawlabels)
return out
def draw_minortick(self, ticklabel, twosided):
out = ''
out += '\\rput(0, '+self.coors.strphys2frameY(ticklabel)+'){\\minortickmarky}\n'
if twosided:
out += '\\rput{180}(1, '+self.coors.strphys2frameY(ticklabel)+'){\\minortickmarky}\n'
return out
def draw_majortick(self, ticklabel, twosided):
out = ''
out += '\\rput(0, '+self.coors.strphys2frameY(ticklabel)+'){\\majortickmarky}\n'
if twosided:
out += '\\rput{180}(1, '+self.coors.strphys2frameY(ticklabel)+'){\\majortickmarky}\n'
return out
def draw_majorticklabel(self, value, label=''):
if not label:
label = self.get_ticklabel(value, int(self.description['LogY']) and (self.coors.ymin() > 0 and self.coors.ymax() > 0))
if self.description.has_key('RatioPlotMode') and self.description['RatioPlotMode'] == 'deviation' and \
self.description.has_key('RatioPlotStage') and self.description['RatioPlotStage'] == '1':
rtn = '\\uput[180]{0}(0, '+self.coors.strphys2frameY(value)+'){\\strut{}'+label+'\\,$\\sigma$}\n'
else:
labelparts = label.split("\\n")
labelcode = label if len(labelparts) == 1 else ("\\shortstack{" + "\\\\ ".join(labelparts) + "}")
rtn = "\\rput(0, " + self.coors.strphys2frameY(value) + "){\\rput[r](-\\labelsep,0){" + labelcode + "}}\n"
return rtn
class ZTicks(Ticks):
def __init__(self, description, coors):
self.majorticklinewidth = '0.3pt'
self.minorticklinewidth = '0.3pt'
self.majorticklength = '6pt'
self.minorticklength = '2.6pt'
self.description = description
self.coors = coors
def draw(self, custommajorticks=[], customminorticks=[],
custommajortickmarks=-1, customminortickmarks=-1,
drawlabels=True):
out = ""
out += ('\n%\n% Z-Ticks\n%\n')
out += ('\\def\\majortickmarkz{\\psline[linewidth='+self.majorticklinewidth+'](0,0)('+self.majorticklength+',0)}%\n')
out += ('\\def\\minortickmarkz{\\psline[linewidth='+self.minorticklinewidth+'](0,0)('+self.minorticklength+',0)}%\n')
out += self.draw_ticks(self.coors.zmin(), self.coors.zmax(),\
plotlog=self.description['LogZ'],\
custommajorticks=custommajorticks,\
customminorticks=customminorticks,\
custommajortickmarks=custommajortickmarks,\
customminortickmarks=customminortickmarks,\
twosided=False,\
drawlabels=drawlabels)
return out
def draw_minortick(self, ticklabel, twosided):
return '\\rput{180}(1, '+self.coors.strphys2frameZ(ticklabel)+'){\\minortickmarkz}\n'
def draw_majortick(self, ticklabel, twosided):
return '\\rput{180}(1, '+self.coors.strphys2frameZ(ticklabel)+'){\\majortickmarkz}\n'
def draw_majorticklabel(self, value, label=''):
if label=='':
label = self.get_ticklabel(value, int(self.description['LogZ']))
if self.description.has_key('RatioPlotMode') and self.description['RatioPlotMode']=='deviation' \
and self.description.has_key('RatioPlotStage') and self.description['RatioPlotStage']:
return ('\\uput[0]{0}(1, '+self.coors.strphys2frameZ(value)+'){\\strut{}'+label+'\\,$\\sigma$}\n')
else:
return ('\\uput[0]{0}(1, '+self.coors.strphys2frameZ(value)+'){\\strut{}'+label+'}\n')
class Coordinates(object):
def __init__(self, inputdata):
self.description = inputdata.description
def phys2frameX(self, x):
if self.description['LogX']:
if x>0:
result = 1.*(log10(x)-log10(self.xmin()))/(log10(self.xmax())-log10(self.xmin()))
else:
return -10
else:
result = 1.*(x-self.xmin())/(self.xmax()-self.xmin())
if (fabs(result) < 1e-4):
return 0
else:
return min(max(result,-10),10)
def phys2frameY(self, y):
if self.description['LogY']:
if y > 0 and self.ymin() > 0 and self.ymax() > 0:
result = 1.*(log10(y)-log10(self.ymin()))/(log10(self.ymax())-log10(self.ymin()))
else:
return -10
else:
result = 1.*(y-self.ymin())/(self.ymax()-self.ymin())
if (fabs(result) < 1e-4):
return 0
else:
return min(max(result,-10),10)
def phys2frameZ(self, z):
if self.description['LogZ']:
if z>0:
result = 1.*(log10(z)-log10(self.zmin()))/(log10(self.zmax())-log10(self.zmin()))
else:
return -10
else:
result = 1.*(z-self.zmin())/(self.zmax()-self.zmin())
if (fabs(result) < 1e-4):
return 0
else:
return min(max(result,-10),10)
# TODO: Add frame2phys functions (to allow linear function sampling in the frame space rather than the physical space)
def strphys2frameX(self, x):
return str(self.phys2frameX(x))
def strphys2frameY(self, y):
return str(self.phys2frameY(y))
def strphys2frameZ(self, z):
return str(self.phys2frameZ(z))
def xmin(self):
return self.description['Borders'][0]
def xmax(self):
return self.description['Borders'][1]
def ymin(self):
return self.description['Borders'][2]
def ymax(self):
return self.description['Borders'][3]
def zmin(self):
return self.description['Borders'][4]
def zmax(self):
return self.description['Borders'][5]
####################
def try_cmd(args):
"Run the given command + args and return True/False if it succeeds or not"
import subprocess
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
return True
except AttributeError:
return True
except:
return False
def have_cmd(cmd):
return try_cmd(["which", cmd])
import shutil, subprocess
def process_datfile(datfile):
global opts
if not os.access(datfile, os.R_OK):
raise Exception("Could not read data file '%s'" % datfile)
dirname = os.path.dirname(datfile)
datfile = os.path.basename(datfile)
filename = datfile.replace('.dat','')
## Create a temporary directory
cwd = os.getcwd()
datpath = os.path.join(cwd, dirname, datfile)
tempdir = tempfile.mkdtemp('.make-plots')
tempdatpath = os.path.join(tempdir, datfile)
shutil.copy(datpath, tempdir)
## Make TeX file
inputdata = InputData(os.path.join(dirname,filename))
texpath = os.path.join(tempdir, '%s.tex' % filename)
texfile = open(texpath, 'w')
p = Plot(inputdata)
texfile.write(p.write_header(inputdata))
if inputdata.description.get('MainPlot', '1') == '1':
mp = MainPlot(inputdata)
texfile.write(mp.draw(inputdata))
if not inputdata.description.get('is2dim', False) and \
inputdata.description.get('RatioPlot', '1') == '1' and \
inputdata.description.get('RatioPlotReference') is not None:
rp = RatioPlot(inputdata)
texfile.write(rp.draw(inputdata))
texfile.write(p.write_footer())
texfile.close()
if opts.OUTPUT_FORMAT != ["TEX"]:
## Check for the required programs
latexavailable = have_cmd("latex")
dvipsavailable = have_cmd("dvips")
convertavailable = have_cmd("convert")
ps2pnmavailable = have_cmd("ps2pnm")
pnm2pngavailable = have_cmd("pnm2png")
# TODO: It'd be nice to be able to control the size of the PNG between thumb and full-size...
# currently defaults (and is used below) to a size suitable for thumbnails
def mkpng(infile, outfile, density=100):
if convertavailable:
pngcmd = ["convert", "-flatten", "-density", str(density), infile, "-quality", "100", "-sharpen", "0x1.0", outfile]
logging.debug(" ".join(pngcmd))
pngproc = subprocess.Popen(pngcmd, stdout=subprocess.PIPE, cwd=tempdir)
pngproc.wait()
else:
raise Exception("Required PNG maker program (convert) not found")
# elif ps2pnmavailable and pnm2pngavailable:
# pstopnm = "pstopnm -stdout -xsize=461 -ysize=422 -xborder=0.01 -yborder=0.01 -portrait " + infile
# p1 = subprocess.Popen(pstopnm.split(), stdout=subprocess.PIPE, stderr=open("/dev/null", "w"), cwd=tempdir)
# p2 = subprocess.Popen(["pnmtopng"], stdin=p1.stdout, stdout=open("%s/%s.png" % (tempdir, outfile), "w"), stderr=open("/dev/null", "w"), cwd=tempdir)
# p2.wait()
# else:
# raise Exception("Required PNG maker programs (convert, or ps2pnm and pnm2png) not found")
## Run LaTeX (in no-stop mode)
logging.debug(os.listdir(tempdir))
texcmd = ["latex", "\scrollmode\input", texpath]
logging.debug("TeX command: " + " ".join(texcmd))
texproc = subprocess.Popen(texcmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tempdir)
logging.debug(texproc.communicate()[0])
logging.debug(os.listdir(tempdir))
## Run dvips
dvcmd = ["dvips", filename]
if not logging.getLogger().isEnabledFor(logging.DEBUG):
dvcmd.append("-q")
## Handle Minion Font
if opts.OUTPUT_FONT == "MINION":
dvcmd.append('-Pminion')
## Choose format
# TODO: Rationalise... this is a mess! Maybe we can use tex2pix?
if "PS" in opts.OUTPUT_FORMAT:
dvcmd += ["-o", "%s.ps" % filename]
logging.debug(" ".join(dvcmd))
dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
dvproc.wait()
if "PDF" in opts.OUTPUT_FORMAT:
dvcmd.append("-f")
logging.debug(" ".join(dvcmd))
dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
cnvproc = subprocess.Popen(["ps2pdf", "-"], stdin=dvproc.stdout, stdout=subprocess.PIPE, cwd=tempdir)
f = open(os.path.join(tempdir, "%s.pdf" % filename), "w")
f.write(cnvproc.communicate()[0])
f.close()
if "EPS" in opts.OUTPUT_FORMAT:
dvcmd.append("-f")
logging.debug(" ".join(dvcmd))
dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
cnvproc = subprocess.Popen(["ps2eps"], stdin=dvproc.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=tempdir)
f = open(os.path.join(tempdir, "%s.eps" % filename), "w")
f.write(cnvproc.communicate()[0])
f.close()
if "PNG" in opts.OUTPUT_FORMAT:
dvcmd.append("-f")
logging.debug(" ".join(dvcmd))
dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
pngcmd = ["convert", "-flatten", "-density", "100", "-", "-quality", "100", "-sharpen", "0x1.0", "%s.png" % filename]
logging.debug(" ".join(pngcmd))
pngproc = subprocess.Popen(pngcmd, stdin=dvproc.stdout, stdout=subprocess.PIPE, cwd=tempdir)
pngproc.wait()
# if opts.OUTPUT_FORMAT == "PSPNG":
# dvcmd += ["-o", "%s.ps" % filename]
# logging.debug(" ".join(dvcmd))
# dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
# dvproc.wait()
# assert os.path.exists("%s.ps" % filename)
# mkpng("%s.ps" % filename, "%s.png" % filename)
# if opts.OUTPUT_FORMAT == "PDFPNG":
# dvcmd.append("-f")
# logging.debug(" ".join(dvcmd))
# dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
# cnvproc = subprocess.Popen(["ps2pdf", "-"], stdin=dvproc.stdout, stdout=subprocess.PIPE, cwd=tempdir)
# f = open(os.path.join(tempdir, "%s.pdf" % filename), "w")
# f.write(cnvproc.communicate()[0])
# f.close()
# logging.debug(os.listdir(tempdir))
# assert os.path.exists("%s.pdf" % filename)
# mkpng("%s.pdf" % filename, "%s.png" % filename)
# if opts.OUTPUT_FORMAT == "EPSPNG":
# dvcmd.append("-f")
# logging.debug(" ".join(dvcmd))
# dvproc = subprocess.Popen(dvcmd, stdout=subprocess.PIPE, cwd=tempdir)
# cnvproc = subprocess.Popen(["ps2eps"], stdin=dvproc.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tempdir)
# f = open(os.path.join(tempdir, "%s.eps" % filename), "w")
# f.write(cnvproc.communicate()[0])
# f.close()
# mkpng("%s.eps" % filename, "%s.png" % filename)
# else:
# logging.error("Unknown format: %s" % opts.OUTPUT_FORMAT)
# sys.exit(1)
logging.debug(os.listdir(tempdir))
## Copy results back to main dir
for fmt in opts.OUTPUT_FORMAT:
outname = "%s.%s" % (filename, fmt.lower())
outpath = os.path.join(tempdir, outname)
if os.path.exists(outpath):
shutil.copy(outpath, os.path.join(cwd,dirname))
else:
logging.error("No output file '%s' from processing %s" % (outname, datfile))
## Clean up
if opts.NO_CLEANUP:
logging.info('Keeping temp-files in %s' % tempdir)
else:
shutil.rmtree(tempdir, ignore_errors=True)
####################
if __name__ == '__main__':
## Try to rename the process on Linux
try:
import ctypes
libc = ctypes.cdll.LoadLibrary('libc.so.6')
libc.prctl(15, 'make-plots', 0, 0, 0)
except Exception:
pass
## Try to use Psyco optimiser
try:
import psyco
psyco.full()
except ImportError:
pass
## Find number of (virtual) processing units
import multiprocessing
try:
numcores = multiprocessing.cpu_count()
except:
numcores = 1
## Parse command line options
from optparse import OptionParser, OptionGroup
parser = OptionParser(usage=__doc__)
parser.add_option("-n", "-j", "--num-threads", dest="NUM_THREADS", type="int",
default=numcores, help="max number of threads to be used [%s]" % numcores)
parser.add_option("--font", dest="OUTPUT_FONT", choices="palatino,cm,times,helvetica,minion".split(","),
default="palatino", help="choose the font to be used in the plots")
parser.add_option("--palatino", dest="OUTPUT_FONT", action="store_const", const="palatino", default="palatino",
help="Use Palatino as font (default). DEPRECATED: Use --font")
parser.add_option("--cm", dest="OUTPUT_FONT", action="store_const", const="cm", default="palatino",
help="Use Computer Modern as font. DEPRECATED: Use --font")
parser.add_option("--times", dest="OUTPUT_FONT", action="store_const", const="times", default="palatino",
help="Use Times as font. DEPRECATED: Use --font")
parser.add_option("--minion", dest="OUTPUT_FONT", action="store_const", const="minion", default="palatino",
help="Use Adobe Minion Pro as font. Note: You need to set TEXMFHOME first. DEPRECATED: Use --font")
parser.add_option("--helvetica", dest="OUTPUT_FONT", action="store_const", const="helvetica", default="palatino",
help="Use Helvetica as font. DEPRECATED: Use --font")
parser.add_option("--format", dest="OUTPUT_FORMAT", default="PDF",
help="Choose plot format, perhaps multiple comma-separated formats e.g. 'pdf' or 'tex,pdf,png' (default = PDF).")
parser.add_option("--ps", dest="OUTPUT_FORMAT", action="store_const", const="PS", default="PDF",
help="Create PostScript output (default). DEPRECATED")
parser.add_option("--pdf", dest="OUTPUT_FORMAT", action="store_const", const="PDF", default="PDF",
help="Create PDF output. DEPRECATED")
parser.add_option("--eps", dest="OUTPUT_FORMAT", action="store_const", const="EPS", default="PDF",
help="Create Encapsulated PostScript output. DEPRECATED")
parser.add_option("--png", dest="OUTPUT_FORMAT", action="store_const", const="PNG", default="PDF",
help="Create PNG output. DEPRECATED")
parser.add_option("--pspng", dest="OUTPUT_FORMAT", action="store_const", const="PS,PNG", default="PDF",
help="Create PS and PNG output. DEPRECATED")
parser.add_option("--pdfpng", dest="OUTPUT_FORMAT", action="store_const", const="PDF,PNG", default="PDF",
help="Create PDF and PNG output. DEPRECATED")
parser.add_option("--epspng", dest="OUTPUT_FORMAT", action="store_const", const="EPS,PNG", default="PDF",
help="Create EPS and PNG output. DEPRECATED")
parser.add_option("--tex", dest="OUTPUT_FORMAT", action="store_const", const="TEX", default="PDF",
help="Create TeX/LaTeX output.")
parser.add_option("--no-cleanup", dest="NO_CLEANUP", action="store_true", default=False,
help="Keep temporary directory and print its filename.")
parser.add_option("--full-range", dest="FULL_RANGE", action="store_true", default=False,
help="Plot full y range in LogY plots.")
parser.add_option("-c", "--config", dest="CONFIGFILES", action="append", default=None,
help="Plot config file to be used. Overrides internal config blocks.")
verbgroup = OptionGroup(parser, "Verbosity control")
verbgroup.add_option("-v", "--verbose", action="store_const", const=logging.DEBUG, dest="LOGLEVEL",
default=logging.INFO, help="print debug (very verbose) messages")
verbgroup.add_option("-q", "--quiet", action="store_const", const=logging.WARNING, dest="LOGLEVEL",
default=logging.INFO, help="be very quiet")
parser.add_option_group(verbgroup)
opts, args = parser.parse_args()
logging.basicConfig(level=opts.LOGLEVEL, format="%(message)s")
opts.OUTPUT_FONT = opts.OUTPUT_FONT.upper()
opts.OUTPUT_FORMAT = opts.OUTPUT_FORMAT.upper().split(",")
## Check for no args
if len(args) == 0:
logging.error(parser.get_usage())
sys.exit(2)
## Check that the files exist
for f in args:
if not os.access(f, os.R_OK):
print "Error: cannot read from %s" % f
sys.exit(1)
## Test for external programs (kpsewhich, latex, dvips, ps2pdf/ps2eps, and convert)
opts.LATEXPKGS = []
if opts.OUTPUT_FORMAT != ["TEX"]:
try:
## latex
if not have_cmd("latex"):
logging.error("ERROR: required program 'latex' could not be found. Exiting...")
sys.exit(1)
## dvips
if not have_cmd("dvips"):
logging.error("ERROR: required program 'dvips' could not be found. Exiting...")
sys.exit(1)
## ps2pdf / ps2eps
if "PDF" in opts.OUTPUT_FORMAT:
if not have_cmd("ps2pdf"):
logging.error("ERROR: required program 'ps2pdf' (for PDF output) could not be found. Exiting...")
sys.exit(1)
elif "EPS" in opts.OUTPUT_FORMAT:
if not have_cmd("ps2eps"):
logging.error("ERROR: required program 'ps2eps' (for EPS output) could not be found. Exiting...")
sys.exit(1)
## PNG output converter
if "PNG" in opts.OUTPUT_FORMAT:
if not have_cmd("convert"):
logging.error("ERROR: required program 'convert' (for PNG output) could not be found. Exiting...")
sys.exit(1)
## kpsewhich: required for LaTeX package testing
if not have_cmd("kpsewhich"):
logging.warning("WARNING: required program 'kpsewhich' (for LaTeX package checks) could not be found")
else:
## Check minion font
if opts.OUTPUT_FONT == "MINION":
p = subprocess.Popen(["kpsewhich", "minion.sty"], stdout=subprocess.PIPE)
p.wait()
if p.returncode != 0:
logging.warning('Warning: Using "--minion" requires minion.sty to be installed. Ignoring it.')
opts.OUTPUT_FONT = "PALATINO"
## Check for HEP LaTeX packages
# TODO: remove HEP-specifics/non-standards?
for pkg in ["hepnames", "hepunits", "underscore"]:
p = subprocess.Popen(["kpsewhich", "%s.sty" % pkg], stdout=subprocess.PIPE)
p.wait()
if p.returncode == 0:
opts.LATEXPKGS.append(pkg)
## Check for Palatino old style figures and small caps
if opts.OUTPUT_FONT == "PALATINO":
p = subprocess.Popen(["kpsewhich", "ot1pplx.fd"], stdout=subprocess.PIPE)
p.wait()
if p.returncode == 0:
opts.OUTPUT_FONT = "PALATINO_OSF"
except Exception, e:
logging.warning("Problem while testing for external packages. I'm going to try and continue without testing, but don't hold your breath...")
## Set up signal handling
import signal
RECVD_KILL_SIGNAL = None
def handleKillSignal(signum, frame):
"Declare us as having been signalled, and return to default handling behaviour"
global RECVD_KILL_SIGNAL
logging.critical("Signal handler called with signal " + str(signum))
RECVD_KILL_SIGNAL = signum
signal.signal(signum, signal.SIG_DFL)
## Signals to handle
# signal.signal(signal.SIGINT, handleKillSignal)
# signal.signal(signal.SIGTERM, handleKillSignal)
# signal.signal(signal.SIGHUP, handleKillSignal)
# signal.signal(signal.SIGUSR2, handleKillSignal)
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
## Run threads
datfiles = args
plotword = "plots" if len(datfiles) > 1 else "plot"
logging.info("Making %d %s" % (len(datfiles), plotword))
pool = multiprocessing.Pool(opts.NUM_THREADS, init_worker)
try:
for i, _ in enumerate(pool.imap(process_datfile, datfiles)):
logging.info("Plotting %s (%d/%d remaining)" % (datfiles[i], len(datfiles)-i, len(datfiles)))
pool.close()
except KeyboardInterrupt:
print "Caught KeyboardInterrupt, terminating workers"
pool.terminate()
pool.join()
| [
"aevans@physics.umn.edu"
] | aevans@physics.umn.edu | |
0c0ded10cd6bd928c1a0139a00ea914d63bbfe18 | e8d7951f1f37c337326166219d9ab67a6103ce8e | /casemgmt_example/auth/__init__.py | f62386ce132b61e3fb95f5b8f323960e237544d0 | [
"MIT"
] | permissive | saolsen/oso-casemgmt-django | 8eb29431c26086a0dc76cf2056487bab04873d09 | 05e7e1d54c0ca274341df3fa53c82b9735c377c6 | refs/heads/main | 2023-03-04T18:24:33.791662 | 2021-01-27T19:39:18 | 2021-01-27T19:39:18 | 339,831,889 | 0 | 0 | MIT | 2021-02-17T19:22:05 | 2021-02-17T19:22:04 | null | UTF-8 | Python | false | false | 289 | py | from django_oso import Oso
from . import oso_extensions
def register_extensions():
# Register extensions/types into Oso
Oso.register_constant(oso_extensions.PermissionHelpers, name="PermissionHelper")
Oso.register_class(oso_extensions.PermissionInfo, name="PermissionInfo")
| [
"5084545+devmonkey22@users.noreply.github.com"
] | 5084545+devmonkey22@users.noreply.github.com |
ffa649ffc9007d2d7beafeb7e371b5ef568458a2 | fb8b513842138f2251c18ee751a9341a004b76d2 | /solutions/0-20/problem_6_sum_square_difference.py | 94543aa1beca134e9dbdfc4d6e42ecc087be0002 | [] | no_license | den4kox/project_euler | ad041371711d0880d2742038cc529aa0db4ffdfb | 920813e6b9f61cfe27558ef3caffae92555896cc | refs/heads/master | 2021-01-01T05:22:14.190063 | 2016-05-25T12:52:21 | 2016-05-25T12:52:21 | 57,980,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | """The sum of the squares of the first ten natural numbers is,
12 + 22 + ... + 102 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)2 = 552 = 3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum."""
from decorators_k import writeResult
from functools import reduce
import sys
sys.path.insert(0, "..")
@writeResult
def solution(x=None):
return pow(sum(list(range(1, x + 1))), 2) - reduce(lambda sum, x: sum + x, list(
map(lambda a: pow(a, 2), list(range(1, x + 1)))))
solution(100)
## result: 25164150. Time: 0.0 | [
"den4kox@gmail.com"
] | den4kox@gmail.com |
4d0f962c3d5b7d6166bccada19f8017d05660af3 | 945c1f0642c7abbeafbf5a103d58a41050173a92 | /h5_to_pb.py | b456ea82608d0bf7b74efc4dc3dc5158a09149ba | [] | no_license | Jayanie/Sinhala-Speech-Recognition-Module | e128910d54c92953b1f526a69d69e8f4833cc858 | c288752b8f38f11510eb3d6cf2a8bcd313a45a7c | refs/heads/master | 2023-06-22T20:28:58.970843 | 2023-06-19T03:40:03 | 2023-06-19T03:40:03 | 246,554,222 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | from keras.models import model_from_config
| [
"jayanieb@gmail.com"
] | jayanieb@gmail.com |
7a8025dcb81d5b1fc065dd9d0723bdbbe3e61a4c | 1b4dc714cd5f337260747d112cb123e1a54fd5a9 | /skeletons/head_remap.py | 24559b27db1541b5f387fdc3c109682b515fffb7 | [] | no_license | darrickyee/daz | 0fab97314c51b4151f1b8d608978f7ef79c78bb3 | 4a5c559e83aa87c4b048db199d52add2dc58a98d | refs/heads/master | 2020-04-21T15:46:07.060350 | 2019-11-13T02:45:54 | 2019-11-13T02:45:54 | 169,679,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | HEAD_MAP = {'Neck_M': 'neckLower',
'Head_M': 'head',
'Jaw_M': 'lowerJaw',
'JawEnd_M': 'Chin',
'Tongue1_M': 'tongue01',
'Tongue2_M': 'tongue02',
'Tongue3_M': 'tongue03',
'Tongue4_M': 'tongue04',
'Eye_R': 'rEye',
'Eye_L': 'lEye'
}
for jnt in HEAD_MAP:
sk_jnt = pm.ls(jnt)[0]
tgt_jnt = pm.ls(HEAD_MAP[jnt])[0]
pm.move(sk_jnt, tgt_jnt.getTranslation(space='world'), ws=True, pcp=True)
| [
"dyee@air.org"
] | dyee@air.org |
640e21954409059be57ffe645324d501deee5f19 | 1354fc288e8bb87fad660fce3004b7db471c6a7c | /utils/global_count.py | d3760475a72121a6d5a35f5025631691e34ab3f9 | [] | no_license | zhangjunwang/drawing_and_paintings_network_make_photo_retouching_easier | a89cc60aff2dd5d1b377973c48d37cd530625dd7 | 600a760044d873ab34d3e3e24278c858fa8fe415 | refs/heads/master | 2020-09-19T03:50:02.291802 | 2019-10-28T03:04:08 | 2019-10-28T03:04:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py |
# -----------------------------------global_count--------------------------------
def _init_train_count():
global EPISODE_COUNT
EPISODE_COUNT = 0
def _add_train_count():
global EPISODE_COUNT
EPISODE_COUNT = EPISODE_COUNT + 1
return EPISODE_COUNT
def _get_train_count():
global EPISODE_COUNT
return EPISODE_COUNT
#------------------------------------global_reward-----------------------------------
def _init_reward_list():
global GLOBAL_REWARD
GLOBAL_REWARD = []
def _append_reward_list(reward):
global GLOBAL_REWARD
GLOBAL_REWARD.append(reward)
def _get_reward_list():
global GLOBAL_REWARD
return GLOBAL_REWARD
#------------------------------------global_trajectory-----------------------------------
def _init_trajectory_dict():
global GLOBAL_TRAJECTORY
GLOBAL_TRAJECTORY = dict()
def _append_trajectory_dict(epi_count,trajectory):
global GLOBAL_TRAJECTORY
GLOBAL_TRAJECTORY[epi_count] = trajectory
def _get_trajectory_dict():
global GLOBAL_TRAJECTORY
return GLOBAL_TRAJECTORY
| [
"459239181@qq.om"
] | 459239181@qq.om |
1cbd1b138e8c9056778b071e1abb5923ae0f5581 | fe28e7d49df19dfc62c169214c6e9554664f8a76 | /day1/calc.py | 90a8332fcfcc2561a5431ccf8e9831f54c84cff4 | [] | no_license | jackmooooo/Week-3 | 13b07dbc9c51b569f4cbec2baeb077f23d685fca | 37373a59b3248046d72f8256865b9260ea63a795 | refs/heads/master | 2020-03-24T08:19:50.767394 | 2018-07-27T15:18:51 | 2018-07-27T15:18:51 | 142,592,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py |
import wx
import sys, os
def add(a, b):
return a+b
def sub(a, b):
return a-b
def mult(a, b):
c = 0
for i in range(b):
if c == 0:
c = a
else:
c = add(c, a)
return c
def div(a, b):
return a/b
def pwr(a, b):
c = 0
for i in range(b):
if c == 0:
c = a
else:
c = mult(c, b)
return c
def remain(a, b):
c = div(a, b)
d = int(c)
e = sub(c, d)
f = mult(e, b)
return f
def bintodec(a):
b = a.split('.')
c = 0
d = 0
for i in range(len(b)):
for x in range(len(b[i])):
if i == 0:
c += int(b[i][x])*(2**(len(b[i])-add(x, 1)))
else:
d += int(b[i][x])*(2**(x-len(b[i])))
return add(c, d)
def bintoOct(a):
b = a.split(".")
c = ""
d = ""
b[1] = add(str(b[1]), str(mult("0", int(remain(len(b[1])+1, 3)))))
b[0] = add(str(mult("0", int(remain(len(b[0])+1, 3)))), str(b[0]))
x = len(b[0])-1
while x >= 0:
if b[0][x:x-2] == "000"
c = "0" + c
if b[0][x:x-2] == "001"
c = "1" + c
if b[0][x:x-2] == "010"
c = "2" + c
if b[0][x:x-2] == "011"
c = "3" + c
if b[0][x:x-2] == "100"
c = "4" + c
if b[0][x:x-2] == "101"
c = "5" + c
if b[0][x:x-2] == "110"
c = "6" + c
if b[0][x:x-2] == "111"
c = "7" + c
x = x-3
print("c "+c)
x = 0
while x <= len(b[1]):
d += str(bintodec(str(b[1][x:x+3])))
x += 3
print("d "+d)
return add(c, '.'+d)
print(bintoOct("111.111"))
| [
"stephanielam@comcast.net"
] | stephanielam@comcast.net |
0584d64e008097ac104a0042de0a040dc2de4dd7 | fc9e71fda5a6bfd16614705b537f2ebe5d91b16c | /core/src/autogluon/core/utils/loaders/_utils.py | 7509eec7a2cb2365f40ce9afe99f8f1765649707 | [
"Apache-2.0"
] | permissive | sackoh/autogluon | e8f9d9d7bdedea302cb999aea1a5ef33435a55bb | e9c95345303f930195a74ecd7fcde5c7cace3add | refs/heads/master | 2023-07-16T13:42:29.573260 | 2021-08-24T20:42:52 | 2021-08-24T20:42:52 | 295,719,004 | 0 | 0 | Apache-2.0 | 2020-09-15T12:20:35 | 2020-09-15T12:20:34 | null | UTF-8 | Python | false | false | 10,168 | py | import zipfile
from typing import Optional
import os
import warnings
import tqdm
import sys
import uuid
import logging
import hashlib
import functools
import requests
import numpy as np
import boto3
S3_PREFIX = 's3://'
INT_TYPES = (int, np.uint8, np.int8, np.int32, np.int64)
FLOAT_TYPES = (float, np.float16, np.float32, np.float64)
BOOL_TYPES = (bool, np.bool_)
if not sys.platform.startswith('win32'):
# refer to https://github.com/untitaker/python-atomicwrites
def replace_file(src, dst):
"""Implement atomic os.replace with linux and OSX.
Parameters
----------
src : source file path
dst : destination file path
"""
try:
os.rename(src, dst)
except OSError:
try:
os.remove(src)
except OSError:
pass
finally:
raise OSError(
'Moving downloaded temp file - {}, to {} failed. \
Please retry the download.'.format(src, dst))
else:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
# Setting this value guarantees that a move performed as a copy
# and delete operation is flushed to disk before the function returns.
# The flush occurs at the end of the copy operation.
_MOVEFILE_WRITE_THROUGH = 0x8
_windows_default_flags = _MOVEFILE_WRITE_THROUGH
def _str_to_unicode(x):
"""Handle text decoding. Internal use only"""
if not isinstance(x, str):
return x.decode(sys.getfilesystemencoding())
return x
def _handle_errors(rv, src):
"""Handle WinError. Internal use only"""
if not rv:
msg = ctypes.FormatError(ctypes.GetLastError())
# if the MoveFileExW fails(e.g. fail to acquire file lock), removes the tempfile
try:
os.remove(src)
except OSError:
pass
finally:
raise OSError(msg)
def replace_file(src, dst):
"""Implement atomic os.replace with windows.
refer to https://docs.microsoft.com/en-us/windows/desktop/api/winbase/nf-winbase-movefileexw
The function fails when one of the process(copy, flush, delete) fails.
Parameters
----------
src : source file path
dst : destination file path
"""
_handle_errors(ctypes.windll.kernel32.MoveFileExW(
_str_to_unicode(src), _str_to_unicode(dst),
_windows_default_flags | _MOVEFILE_REPLACE_EXISTING
), src)
def sha1sum(filename):
"""Calculate the sha1sum of a file
Parameters
----------
filename
Name of the file
Returns
-------
ret
The sha1sum
"""
with open(filename, mode='rb') as f:
d = hashlib.sha1()
for buf in iter(functools.partial(f.read, 1024*100), b''):
d.update(buf)
return d.hexdigest()
def download(url: str,
path: Optional[str] = None,
overwrite: Optional[bool] = False,
sha1_hash: Optional[str] = None,
retries: Optional[int] = 5,
verify_ssl: Optional[bool] = True) -> str:
"""Download a given URL
Parameters
----------
url
URL to download
path
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite
Whether to overwrite destination file if already exists.
sha1_hash
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl
Verify SSL certificates.
Returns
-------
fname
The file path of the downloaded file.
"""
is_s3 = url.startswith(S3_PREFIX)
if is_s3:
s3 = boto3.resource('s3')
if boto3.session.Session().get_credentials() is None:
from botocore.handlers import disable_signing
s3.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
components = url[len(S3_PREFIX):].split('/')
if len(components) < 2:
raise ValueError('Invalid S3 url. Received url={}'.format(url))
s3_bucket_name = components[0]
s3_key = '/'.join(components[1:])
if path is None:
fname = url.split('/')[-1]
# Empty filenames are invalid
assert fname, 'Can\'t construct file-name from this URL. ' \
'Please set the `path` option manually.'
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0, currently it's {}".format(
retries)
if not verify_ssl:
warnings.warn(
'Unverified HTTPS request is being made (verify_ssl=False). '
'Adding certificate verification is strongly advised.')
if overwrite or not os.path.exists(fname) or (sha1_hash and not sha1sum(fname) == sha1_hash):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print('Downloading {} from {}...'.format(fname, url))
if is_s3:
response = s3.meta.client.head_object(Bucket=s3_bucket_name,
Key=s3_key)
total_size = int(response.get('ContentLength', 0))
random_uuid = str(uuid.uuid4())
tmp_path = '{}.{}'.format(fname, random_uuid)
if tqdm is not None:
def hook(t_obj):
def inner(bytes_amount):
t_obj.update(bytes_amount)
return inner
with tqdm.tqdm(total=total_size, unit='iB', unit_scale=True) as t:
s3.meta.client.download_file(s3_bucket_name, s3_key, tmp_path,
Callback=hook(t))
else:
s3.meta.client.download_file(s3_bucket_name, s3_key, tmp_path)
else:
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError('Failed downloading url {}'.format(url))
# create uuid for temporary files
random_uuid = str(uuid.uuid4())
total_size = int(r.headers.get('content-length', 0))
chunk_size = 1024
if tqdm is not None:
t = tqdm.tqdm(total=total_size, unit='iB', unit_scale=True)
with open('{}.{}'.format(fname, random_uuid), 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
if tqdm is not None:
t.update(len(chunk))
f.write(chunk)
if tqdm is not None:
t.close()
# if the target file exists(created by other processes)
# and have the same hash with target file
# delete the temporary file
if not os.path.exists(fname) or (sha1_hash and not sha1sum(fname) == sha1_hash):
# atomic operation in the same file system
replace_file('{}.{}'.format(fname, random_uuid), fname)
else:
try:
os.remove('{}.{}'.format(fname, random_uuid))
except OSError:
pass
finally:
warnings.warn(
'File {} exists in file system so the downloaded file is deleted'.format(fname))
if sha1_hash and not sha1sum(fname) == sha1_hash:
raise UserWarning(
'File {} is downloaded but the content hash does not match.'
' The repo may be outdated or download may be incomplete. '
'If the "repo_url" is overridden, consider switching to '
'the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
print('download failed due to {}, retrying, {} attempt{} left'
.format(repr(e), retries, 's' if retries > 1 else ''))
return fname
def path_expander(path, base_folder):
path_l = path.split(';')
return ';'.join([os.path.join(base_folder, path) for path in path_l])
def protected_zip_extraction(zipfile_path, sha1_hash, folder):
"""Extract zip file to the folder.
A signature file named ".SHA1HASH.sig" will be created if the extraction has been finished.
Returns
-------
folder
The directory to extract the zipfile
"""
os.makedirs(folder, exist_ok=True)
if sha1_hash:
sha1_hash = sha1_hash[:6]
signature = '.{}.sig'.format(sha1_hash)
if os.path.exists(os.path.join(folder, signature)):
# We have found the signature file. Thus, we will not extract again.
return folder
else:
signature = None
# Extract the file
logging.info('Extract files...')
with zipfile.ZipFile(zipfile_path, 'r') as zip_ref:
zip_ref.extractall(folder)
if signature:
# Create the signature
with open(os.path.join(folder, signature), 'w') as of:
pass
return folder
| [
"noreply@github.com"
] | noreply@github.com |
c00366144af3c60cf5607f0d5fc17155c15ffd32 | 2623a7d5002c78c8dbe571db931394a61b9abca0 | /register/pymote_env/Scripts/pilprint.py | 6ed994857db0f5bb98a48cebb386be1a122f8cfe | [] | no_license | harshit23897/IIITV-ECAMPUS-FINAL | 41cdc4948b09c342afa5a2e2250fec13027bf08b | 87df78e5ffb3363d8ce496c6fc5041eb4e2e71e6 | refs/heads/master | 2021-09-09T01:51:07.458945 | 2018-03-13T07:47:56 | 2018-03-13T07:47:56 | 113,251,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,665 | py | #!c:\users\dell-pc\desktop\iiitv-ecampus-master\register\pymote_env\scripts\python.exe
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| [
"harshitjain23897@gmail.com"
] | harshitjain23897@gmail.com |
f92cbe0802128443f0b9e5f1da8b25c2405691ea | 164e18ee979cc134e31212cb6d5af4f31cb37f60 | /Homework_3/I_polyglots/main.py | cd79f2db7804ac6f07b5bb9b61b706b1360bc134 | [] | no_license | IlyasDevelopment/Yandex-Algorithm-Training | 626649f2bca6392858d9411a1b4ffea1d028a2ba | 2a7539dcb58b3c69b804635855997ecd86f0ea0a | refs/heads/main | 2023-07-13T21:23:37.501005 | 2021-08-06T11:04:35 | 2021-08-06T11:04:35 | 376,364,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | def get_languages(m):
languages = set()
for i in range(m):
languages.add(input())
return languages
n = int(input())
all_languages = set()
common_languages = set()
for i in range(n):
m = int(input())
new_languages = get_languages(m)
all_languages |= new_languages
if i == 0:
common_languages = new_languages
common_languages &= new_languages
print(len(common_languages))
print('\n'.join(common_languages))
print(len(all_languages))
print('\n'.join(all_languages))
| [
"noreply@github.com"
] | noreply@github.com |
b49a2361ae55bc3867dea12e98fd98daf22d2e36 | d26c44a84a824f63cfdcf7c45d26f14940179701 | /core/utils.py | 30423ef39200ea368629d1ea5cad2a3e49f7a893 | [
"MIT"
] | permissive | TebogoNakampe/transfer-learning-for-sign-language-recognition | 5c8871b5f0207bacefcc2f516d69f090e16d3f91 | e0627115e6b68d6b85244d484011bb3895ccf4ee | refs/heads/master | 2022-12-05T13:31:22.665213 | 2020-08-29T18:42:02 | 2020-08-29T18:42:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | from pathlib import Path
import pandas as pd
from pandas import DataFrame
def package_path(package):
"""Returns the location of the passed package.
Arguments:
package: A package object.
Returns:
An absolute Path object pointing to the package's location.
"""
return Path(package.__path__[0])
def save_dataframe(df: DataFrame, path):
"""Saves a DataFrame as a pickled object.
Arguments:
df: The DataFrame to be saved.
path: A string representing the path where the DataFrame will be saved.
"""
df.to_pickle(path)
def load_dataframe(path):
"""Loads a DataFrame from a saved pickled object.
Arguments:
path: A string representing the path where the DataFrame will be loaded from.
Returns:
The loaded DataFrame.
"""
return pd.read_pickle(path)
| [
"roman.toengi@gmail.com"
] | roman.toengi@gmail.com |
064076ab815d5e9e1cec65c82a13e5039b2571f3 | a5f879e45d54eccee4ef5e5eb229dfeabb7711d4 | /otros/imgclas_lo/data_utils.py | 8cb1e3b795efad991ae6eebd685fd31085f4337e | [
"Apache-2.0"
] | permissive | lmc00/tfg_en_desarrollo | ae64fc280ca2f957ec54b5c63cf7ea63c43da6f0 | 30e61f4bb3f060f7468b1bb94930fcbe0d0f92ae | refs/heads/master | 2021-03-08T14:51:53.639367 | 2020-03-10T16:42:07 | 2020-03-10T16:42:07 | 246,352,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,977 | py | """
Miscellaneous functions manage data.
Date: September 2018
Author: Ignacio Heredia
Email: iheredia@ifca.unican.es
Github: ignacioheredia
"""
import os
import threading
from multiprocessing import Pool
import queue
import subprocess
import warnings
import base64
import numpy as np
import requests
from tqdm import tqdm
from tensorflow.keras.utils import to_categorical, Sequence
import cv2
import albumentations
from albumentations.augmentations import transforms
from albumentations.imgaug import transforms as imgaug_transforms
def load_data_splits(splits_dir, im_dir,use_location, split_name='train'):
"""
Load the data arrays from the [train/val/test].txt files.
Lines of txt files have the following format:
'relative_path_to_image' 'image_label_number' 'image_location_label_number'
Parameters
----------
im_dir : str
Absolute path to the image folder.
split_name : str
Name of the data split to load
use_location : boolean
to work properly with or without location data
Returns
-------
X : Numpy array of strs
First colunm: Contains 'absolute_path_to_file' to images.
y : Numpy array of int32
Image label number
"""
if use_location :
print("con location")
#Usual workflow with extra stuff in order to return location labels properly
if '{}.txt'.format(split_name) not in os.listdir(splits_dir):
raise ValueError("Invalid value for the split_name parameter: there is no `{}.txt` file in the `{}` "
"directory.".format(split_name, splits_dir))
# Loading splits
print("Loading {} data...".format(split_name))
split = np.genfromtxt(os.path.join(splits_dir, '{}.txt'.format(split_name)), dtype='str', delimiter=' ')
X = np.array([os.path.join(im_dir, i) for i in split[:, 0]])
#TODO Check this part of the code
if len(split.shape) == 2:
y = split[:, 1].astype(np.int32)
location = split[:, 2].astype(np.int32)
else: # maybe test file has not labels
y = None
return X, y, location
else:
print("sin location")
#If no location data, the workflow resumes as usual
if '{}.txt'.format(split_name) not in os.listdir(splits_dir):
raise ValueError("Invalid value for the split_name parameter: there is no `{}.txt` file in the `{}` "
"directory.".format(split_name, splits_dir))
# Loading splits
print("Loading {} data...".format(split_name))
split = np.genfromtxt(os.path.join(splits_dir, '{}.txt'.format(split_name)), dtype='str', delimiter=' ')
X = np.array([os.path.join(im_dir, i) for i in split[:, 0]])
#TODO Check this part of the code
if len(split.shape) == 2:
y = split[:, 1].astype(np.int32)
else: # maybe test file has not labels
y = None
return X, y
def mount_nextcloud(frompath, topath):
"""
Mount a NextCloud folder in your local machine or viceversa.
"""
command = (['rclone', 'copy', frompath, topath])
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = result.communicate()
if error:
warnings.warn("Error while mounting NextCloud: {}".format(error))
return output, error
def load_class_names(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class names...")
class_names = np.genfromtxt(os.path.join(splits_dir, 'classes.txt'), dtype='str', delimiter='/n')
return class_names
def load_location_names(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading location names...")
location_names = np.genfromtxt(os.path.join(splits_dir, 'locations.txt'), dtype='str', delimiter='/n')
return location_names
def load_class_info(splits_dir):
"""
Load list of class names
Returns
-------
Numpy array of shape (N) containing strs with class names
"""
print("Loading class info...")
class_info = np.genfromtxt(os.path.join(splits_dir, 'info.txt'), dtype='str', delimiter='/n')
return class_info
def load_image(filename, filemode='local'):
"""
Function to load a local image path (or an url) into a numpy array.
Parameters
----------
filename : str
Path or url to the image
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
Returns
-------
A numpy array
"""
if filemode == 'local':
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
raise ValueError('The local path does not exist or does not correspond to an image: \n {}'.format(filename))
elif filemode == 'url':
try:
if filename.startswith('data:image'): # base64 encoded string
data = base64.b64decode(filename.split(';base64,')[1])
else: # normal url
data = requests.get(filename).content
data = np.frombuffer(data, np.uint8)
image = cv2.imdecode(data, cv2.IMREAD_COLOR)
if image is None:
raise Exception
except:
raise ValueError('Incorrect url path: \n {}'.format(filename))
else:
raise ValueError('Invalid value for filemode.')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # change from default BGR OpenCV format to Python's RGB format
return image
def preprocess_batch(batch, mean_RGB, std_RGB, mode='tf', channels_first=False):
"""
Standardize batch to feed the net. Adapted from [1] to take replace the default imagenet mean and std.
[1] https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py
Parameters
----------
batch : list of numpy arrays
mean_RGB, std_RGB : list of floats, len=3
Mean/std RGB values for your dataset.
channels_first : bool
Use batch of shape (N, C, H, W) instead of (N, H, W, C)
Returns
-------
Numpy array
"""
assert type(batch) is list, "Your batch must be a list of numpy arrays"
mean_RGB, std_RGB = np.array(mean_RGB), np.array(std_RGB)
batch = np.array(batch) - mean_RGB[None, None, None, :] # mean centering
if mode == 'caffe':
batch = batch[:, :, :, ::-1] # switch from RGB to BGR
if mode == 'tf':
batch /= 127.5 # scaling between [1, -1]
if mode == 'torch':
batch /= std_RGB
if channels_first:
batch = batch.transpose(0, 3, 1, 2) # shape(N, 3, 224, 224)
return batch.astype(np.float32)
def augment(im, params=None):
"""
Perform data augmentation on some image using the albumentations package.
Parameters
----------
im : Numpy array
params : dict or None
Contains the data augmentation parameters
Mandatory keys:
- h_flip ([0,1] float): probability of performing an horizontal left-right mirroring.
- v_flip ([0,1] float): probability of performing an vertical up-down mirroring.
- rot ([0,1] float): probability of performing a rotation to the image.
- rot_lim (int): max degrees of rotation.
- stretch ([0,1] float): probability of randomly stretching an image.
- crop ([0,1] float): randomly take an image crop.
- zoom ([0,1] float): random zoom applied to crop_size.
--> Therefore the effective crop size at each iteration will be a
random number between 1 and crop*(1-zoom). For example:
* crop=1, zoom=0: no crop of the image
* crop=1, zoom=0.1: random crop of random size between 100% image and 90% of the image
* crop=0.9, zoom=0.1: random crop of random size between 90% image and 80% of the image
* crop=0.9, zoom=0: random crop of always 90% of the image
Image size refers to the size of the shortest side.
- blur ([0,1] float): probability of randomly blurring an image.
- pixel_noise ([0,1] float): probability of randomly adding pixel noise to an image.
- pixel_sat ([0,1] float): probability of randomly using HueSaturationValue in the image.
- cutout ([0,1] float): probability of using cutout in the image.
Returns
-------
Numpy array
"""
## 1) Crop the image
effective_zoom = np.random.rand() * params['zoom']
crop = params['crop'] - effective_zoom
ly, lx, channels = im.shape
crop_size = int(crop * min([ly, lx]))
rand_x = np.random.randint(low=0, high=lx - crop_size + 1)
rand_y = np.random.randint(low=0, high=ly - crop_size + 1)
crop = transforms.Crop(x_min=rand_x,
y_min=rand_y,
x_max=rand_x + crop_size,
y_max=rand_y + crop_size)
im = crop(image=im)['image']
## 2) Now add the transformations for augmenting the image pixels
transform_list = []
# Add random stretching
if params['stretch']:
transform_list.append(
imgaug_transforms.IAAPerspective(scale=0.1, p=params['stretch'])
)
# Add random rotation
if params['rot']:
transform_list.append(
transforms.Rotate(limit=params['rot_lim'], p=params['rot'])
)
# Add horizontal flip
if params['h_flip']:
transform_list.append(
transforms.HorizontalFlip(p=params['h_flip'])
)
# Add vertical flip
if params['v_flip']:
transform_list.append(
transforms.VerticalFlip(p=params['v_flip'])
)
# Add some blur to the image
if params['blur']:
transform_list.append(
albumentations.OneOf([
transforms.MotionBlur(blur_limit=7, p=1.),
transforms.MedianBlur(blur_limit=7, p=1.),
transforms.Blur(blur_limit=7, p=1.),
], p=params['blur'])
)
# Add pixel noise
if params['pixel_noise']:
transform_list.append(
albumentations.OneOf([
transforms.CLAHE(clip_limit=2, p=1.),
imgaug_transforms.IAASharpen(p=1.),
imgaug_transforms.IAAEmboss(p=1.),
transforms.RandomBrightnessContrast(contrast_limit=0, p=1.),
transforms.RandomBrightnessContrast(brightness_limit=0, p=1.),
transforms.RGBShift(p=1.),
transforms.RandomGamma(p=1.)#,
# transforms.JpegCompression(),
# transforms.ChannelShuffle(),
# transforms.ToGray()
], p=params['pixel_noise'])
)
# Add pixel saturation
if params['pixel_sat']:
transform_list.append(
transforms.HueSaturationValue(p=params['pixel_sat'])
)
# Remove randomly remove some regions from the image
if params['cutout']:
ly, lx, channels = im.shape
scale_low, scale_high = 0.05, 0.25 # min and max size of the squares wrt the full image
scale = np.random.uniform(scale_low, scale_high)
transform_list.append(
transforms.Cutout(num_holes=8, max_h_size=int(scale*ly), max_w_size=int(scale*lx), p=params['cutout'])
)
# Compose all image transformations and augment the image
augmentation_fn = albumentations.Compose(transform_list)
im = augmentation_fn(image=im)['image']
return im
def resize_im(im, height, width):
resize_fn = transforms.Resize(height=height, width=width)
return resize_fn(image=im)['image']
def data_generator(inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Generator to feed Keras fit function
Parameters
----------
inputs : Numpy array, shape (N, H, W, C)
targets : Numpy array, shape (N)
batch_size : int
shuffle : bool
aug_params : dict
im_size : int
Final image size to feed the net's input (eg. 224 for Resnet).
Returns
-------
Generator of inputs and labels
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
# Create list of indices
idxs = np.arange(len(inputs))
if shuffle:
np.random.shuffle(idxs)
# # Reshape targets to the correct shape
# if len(targets.shape) == 1:
# print('reshaping targets')
# targets = targets.reshape(-1, 1)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
excerpt = idxs[start_idx:start_idx + batch_size]
batch_X = []
for i in excerpt:
im = load_image(inputs[i], filemode='local')
im = augment(im, params=aug_params)
im = resize_im(im, height=im_size, width=im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=mean_RGB, std_RGB=std_RGB, mode=preprocess_mode)
batch_y = to_categorical(targets[excerpt], num_classes=num_classes)
yield batch_X, batch_y
def buffered_generator(source_gen, buffer_size=10):
"""
Generator that runs a slow source generator in a separate thread. Beware of the GIL!
Author: Benanne (github-kaggle/benanne/ndsb)
Parameters
----------
source_gen : generator
buffer_size: the maximal number of items to pre-generate (length of the buffer)
Returns
-------
Buffered generator
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = queue.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_thread(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.daemon = True
thread.start()
for data in iter(buffer.get, None):
yield data
class data_sequence(Sequence):
"""
Instance of a Keras Sequence that is safer to use with multiprocessing than a standard generator.
Check https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
TODO: Add sample weights on request
"""
def __init__(self, inputs, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes,
im_size=224, shuffle=True):
"""
Parameters are the same as in the data_generator function
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
self.inputs = inputs
self.targets = targets
self.batch_size = batch_size
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.num_classes = num_classes
self.im_size = im_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.inputs) / float(self.batch_size)))
def __getitem__(self, idx):
batch_idxs = self.indexes[idx*self.batch_size: (idx+1)*self.batch_size]
batch_X = []
for i in batch_idxs:
im = load_image(self.inputs[i])
if self.aug_params:
im = augment(im, params=self.aug_params)
im = resize_im(im, height=self.im_size, width=self.im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
batch_y = to_categorical(self.targets[batch_idxs], num_classes=self.num_classes)
return batch_X, batch_y
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.inputs))
if self.shuffle:
np.random.shuffle(self.indexes)
class data_sequence_lo(Sequence):
"""
Modificacion de data_sequence que soporta el uso de localizaciones y se las pasa a la red. Instance of a Keras Sequence that is safer to use with multiprocessing than a standard generator.
Check https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
TODO: Add sample weights on request
"""
def __init__(self, inputs, locations, targets, batch_size, mean_RGB, std_RGB, preprocess_mode, aug_params, num_classes, num_locations,
im_size=224, shuffle=True):
"""
Mismo parámetros de data sequence salvo por el añadido de la lista de localizaciones y un int32
con el número de localizaciones distintas. Parameters are the same as in the data_generator function
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batch_size
self.inputs = inputs
self.locations = locations
self.targets = targets
self.batch_size = batch_size
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.num_classes = num_classes
self.num_locations = num_locations
self.im_size = im_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.inputs) / float(self.batch_size)))
def __getitem__(self, idx):
batch_idxs = self.indexes[idx*self.batch_size: (idx+1)*self.batch_size]
batch_X = []
for i in batch_idxs:
im = load_image(self.inputs[i])
if self.aug_params:
im = augment(im, params=self.aug_params)
im = resize_im(im, height=self.im_size, width=self.im_size)
batch_X.append(im) # shape (N, 224, 224, 3)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
batch_y = to_categorical(self.targets[batch_idxs], num_classes=self.num_classes)
batch_locations = to_categorical(self.locations[batch_idxs], num_locations=self.num_locations)
return batch_X, batch_y, batch_locations
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = np.arange(len(self.inputs))
if self.shuffle:
np.random.shuffle(self.indexes)
def standard_tencrop_batch(im, crop_prop=0.9):
"""
Returns an ordered ten crop batch of images from an original image (corners, center + mirrors).
Parameters
----------
im : numpy array, type np.uint8
crop_prop: float, [0, 1]
Size of the crop with respect to the whole image
Returns
-------
List of 10 numpy arrays
"""
batch = []
min_side = np.amin(im.shape[:2])
im = resize_im(im, height=min_side, width=min_side) # resize to shorter border
h, w = min_side, min_side # height, width (square)
crop_size = int(crop_prop * min_side)
# Crops
c1 = transforms.Crop(x_min=0,
y_min=0,
x_max=crop_size,
y_max=crop_size)(image=im)['image'] # top-left
c2 = transforms.Crop(x_min=0,
y_min=h-crop_size,
x_max=crop_size,
y_max=h)(image=im)['image'] # bottom-left
c3 = transforms.Crop(x_min=w-crop_size,
y_min=0,
x_max=w,
y_max=crop_size)(image=im)['image'] # top-right
c4 = transforms.Crop(x_min=w-crop_size,
y_min=h-crop_size,
x_max=w,
y_max=h)(image=im)['image'] # bottom-right
c5 = transforms.Crop(x_min=np.round((w-crop_size)/2).astype(int),
y_min=np.round((h-crop_size)/2).astype(int),
x_max=np.round((w+crop_size)/2).astype(int),
y_max=np.round((h+crop_size)/2).astype(int))(image=im)['image'] # center
# Save crop and its mirror
lr_aug = albumentations.HorizontalFlip(p=1)
for image in [c1, c2, c3, c4, c5]:
batch.append(image)
batch.append(lr_aug(image=image)['image'])
return batch
class k_crop_data_sequence(Sequence):
"""
Data sequence generator for test time to feed to predict_generator.
Each batch delivered is composed by multiple crops (default=10) of the same image.
"""
def __init__(self, inputs, mean_RGB, std_RGB, preprocess_mode, aug_params, crop_number=10, crop_mode='random',
filemode='local', im_size=224):
"""
Parameters are the same as in the data_generator function except for:
Parameters
----------
crop_number : int
Number of crops of each image to take.
mode :str, {'random', 'standard'}
If 'random' data augmentation is performed randomly.
If 'standard' we take the standard 10 crops (corners +center + mirrors)
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
"""
self.inputs = inputs
self.mean_RGB = mean_RGB
self.std_RGB = std_RGB
self.preprocess_mode = preprocess_mode
self.aug_params = aug_params
self.crop_number = crop_number
self.crop_mode = crop_mode
self.filemode = filemode
self.im_size = im_size
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
batch_X = []
im = load_image(self.inputs[idx], filemode=self.filemode)
if self.crop_mode == 'random':
for _ in range(self.crop_number):
if self.aug_params:
im_aug = augment(im, params=self.aug_params)
else:
im_aug = np.copy(im)
im_aug = resize_im(im_aug, height=self.im_size, width=self.im_size)
batch_X.append(im_aug) # shape (N, 224, 224, 3)
if self.crop_mode == 'standard':
batch_X = standard_tencrop_batch(im)
batch_X = preprocess_batch(batch=batch_X, mean_RGB=self.mean_RGB, std_RGB=self.std_RGB, mode=self.preprocess_mode)
return batch_X
def im_stats(filename):
"""
Helper for function compute_meanRGB
"""
im = load_image(filename, filemode='local')
mean = np.mean(im, axis=(0, 1))
std = np.std(im, axis=(0, 1))
return mean.tolist(), std.tolist()
def compute_meanRGB(im_list, verbose=False, workers=4):
"""
Returns the mean and std RGB values for the whole dataset.
For example in the plantnet dataset we have:
mean_RGB = np.array([ 107.59348955, 112.1047813 , 80.9982362 ])
std_RGB = np.array([ 52.78326119, 50.56163087, 50.86486131])
Parameters
----------
im_list : array of strings
Array where the first column is image_path (or image_url). Shape (N,).
verbose : bool
Show progress bar
workers: int
Numbers of parallel workers to perform the computation with.
References
----------
https://stackoverflow.com/questions/41920124/multiprocessing-use-tqdm-to-display-a-progress-bar
"""
print('Computing mean RGB pixel with {} workers...'.format(workers))
with Pool(workers) as p:
r = list(tqdm(p.imap(im_stats, im_list),
total=len(im_list),
disable=verbose))
r = np.asarray(r)
mean, std = r[:, 0], r[:, 1]
mean, std = np.mean(mean, axis=0), np.mean(std, axis=0)
print('Mean RGB pixel: {}'.format(mean.tolist()))
print('Standard deviation of RGB pixel: {}'.format(std.tolist()))
return mean.tolist(), std.tolist()
def compute_classweights(labels, max_dim=None, mode='balanced'):
"""
Compute the class weights for a set of labels to account for label imbalance.
Parameters
----------
labels : numpy array, type (ints), shape (N)
max_dim : int
Maximum number of classes. Default is the max value in labels.
mode : str, {'balanced', 'log'}
Returns
-------
Numpy array, type (float32), shape (N)
"""
if mode is None:
return None
weights = np.bincount(labels)
weights = np.sum(weights) / weights
# Fill the count if some high number labels are not present in the sample
if max_dim is not None:
diff = max_dim - len(weights)
if diff != 0:
weights = np.pad(weights, pad_width=(0, diff), mode='constant', constant_values=0)
# Transform according to different modes
if mode == 'balanced':
pass
elif mode == 'log':
# do not use --> produces numerical instabilities at inference when transferring weights trained on GPU to CPU
weights = np.log(weights) # + 1
else:
raise ValueError('{} is not a valid option for parameter "mode"'.format(mode))
return weights.astype(np.float32)
def json_friendly(d):
"""
Return a json friendly dictionary (mainly remove numpy data types)
"""
new_d = {}
for k, v in d.items():
if isinstance(v, (np.float32, np.float64)):
v = float(v)
elif isinstance(v, (np.ndarray, list)):
if isinstance(v[0], (np.float32, np.float64)):
v = np.array(v).astype(float).tolist()
else:
v = np.array(v).tolist()
new_d[k] = v
return new_d
| [
"lmc00@alumnos.unican.es"
] | lmc00@alumnos.unican.es |
c91a7b5364ed05d94b915ad3edca42e51af1ea75 | f11600b9a256bf6a2b584d127faddc27a0f0b474 | /normal/662.py | df18e5f15146532ef8f12376b46b3043f70c7355 | [] | no_license | longhao54/leetcode | 9c1f0ce4ca505ec33640dd9b334bae906acd2db5 | d156c6a13c89727f80ed6244cae40574395ecf34 | refs/heads/master | 2022-10-24T07:40:47.242861 | 2022-10-20T08:50:52 | 2022-10-20T08:50:52 | 196,952,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
ans = 1
dp = [(root,1)]
while dp:
s, m = float('inf'), float('-inf')
l = len(dp)
for i in range(l):
t, i = dp.pop(0)
if t.left:
dp.append((t.left, i*2))
if t.right:
dp.append((t.right, i*2+1))
s = min(s, i)
m = max(m, i)
ans = max(m-s+1, ans)
return ans
| [
"jinlha@jiedaibao.com"
] | jinlha@jiedaibao.com |
3fd3878a08b3f0b3f00dac287d62c71984f01380 | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_available_waf_rule_sets_result_py3.py | 1d90cb1f1470bffbefbb643312ec48f97b2613b3 | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,104 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewayAvailableWafRuleSetsResult(Model):
"""Response for ApplicationGatewayAvailableWafRuleSets API service call.
:param value: The list of application gateway rule sets.
:type value:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayFirewallRuleSet]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationGatewayFirewallRuleSet]'},
}
def __init__(self, *, value=None, **kwargs) -> None:
super(ApplicationGatewayAvailableWafRuleSetsResult, self).__init__(**kwargs)
self.value = value
| [
"noreply@github.com"
] | noreply@github.com |
2a2366415fcc370032093aed684574abb337bee2 | 3b290545c52066c8ff7a2ae6c677c7782bddcb24 | /app.py | 9c51980e54b9f2aeafb01f95e25fbc68900f450b | [] | no_license | munira4x/ModelDeploy | 8beac73b370db7a3f07180ec4e6e7ce26e768af5 | 781930602ca72c37e9a24c30b48d9267140584a3 | refs/heads/main | 2023-07-21T07:14:08.225886 | 2021-08-22T20:06:04 | 2021-08-22T20:06:04 | 398,889,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from flask import *
import pandas as pd
app = Flask(__name__)
@app.route("/")
def hello():
return("index.html")
@app.route("/index" , method = ['POST'])
def submit():
if request.method == "POST":
@app.route("/")
def show_tables():
data = pd.read_excel('toy_datadase.csv')
data.set_index(['Name'], inplace=True)
data.index.name=None
if __name__ == "__main__":
app.run(debug=True) | [
"noreply@github.com"
] | noreply@github.com |
f9647ae5295cd9029bbc3b670e2bb13a08a4ecbb | fbbb58d9d8386c694c2787464bca7753448098d3 | /mmdet/models/necks/fpg.py | c8e0d163ccf8cef6211530ba6c1b4d558ff6403f | [
"Apache-2.0"
] | permissive | krishnatejakk/mmdetection | ab61e6280c87e9939f56a1c673b592f2698c25c4 | 9c6143950d19e0d694c4d14b0ca10e9011e7f404 | refs/heads/master | 2023-04-08T14:37:28.334393 | 2021-04-15T03:54:07 | 2021-04-15T03:54:07 | 350,409,149 | 1 | 0 | Apache-2.0 | 2021-04-02T14:59:35 | 2021-03-22T16:14:44 | Python | UTF-8 | Python | false | false | 15,923 | py | import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, caffe2_xavier_init, constant_init, is_norm
from ..builder import NECKS
class Transition(nn.Module):
"""Base class for transition.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
def forward(x):
pass
class UpInterpolationConv(Transition):
"""A transition used for up-sampling.
Up-sample the input by interpolation then refines the feature by
a convolution layer.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Up-sampling factor. Default: 2.
mode (int): Interpolation mode. Default: nearest.
align_corners (bool): Whether align corners when interpolation.
Default: None.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor=2,
mode='nearest',
align_corners=None,
kernel_size=3,
**kwargs):
super().__init__(in_channels, out_channels)
self.mode = mode
self.scale_factor = scale_factor
self.align_corners = align_corners
self.conv = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, x):
x = F.interpolate(
x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
x = self.conv(x)
return x
class LastConv(Transition):
"""A transition used for refining the output of the last stage.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
num_inputs (int): Number of inputs of the FPN features.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
num_inputs,
kernel_size=3,
**kwargs):
super().__init__(in_channels, out_channels)
self.num_inputs = num_inputs
self.conv_out = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, inputs):
assert len(inputs) == self.num_inputs
return self.conv_out(inputs[-1])
@NECKS.register_module()
class FPG(nn.Module):
"""FPG.
Implementation of `Feature Pyramid Grids (FPG)
<https://arxiv.org/abs/2004.03580>`_.
This implementation only gives the basic structure stated in the paper.
But users can implement different type of transitions to fully explore the
the potential power of the structure of FPG.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
stack_times (int): The number of times the pyramid architecture will
be stacked.
paths (list[str]): Specify the path order of each stack level.
Each element in the list should be either 'bu' (bottom-up) or
'td' (top-down).
inter_channels (int): Number of inter channels.
same_up_trans (dict): Transition that goes down at the same stage.
same_down_trans (dict): Transition that goes up at the same stage.
across_lateral_trans (dict): Across-pathway same-stage
across_down_trans (dict): Across-pathway bottom-up connection.
across_up_trans (dict): Across-pathway top-down connection.
across_skip_trans (dict): Across-pathway skip connection.
output_trans (dict): Transition that trans the output of the
last stage.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
norm_cfg (dict): Config dict for normalization layer. Default: None.
"""
transition_types = {
'conv': ConvModule,
'interpolation_conv': UpInterpolationConv,
'last_conv': LastConv,
}
def __init__(self,
in_channels,
out_channels,
num_outs,
stack_times,
paths,
inter_channels=None,
same_down_trans=None,
same_up_trans=dict(
type='conv', kernel_size=3, stride=2, padding=1),
across_lateral_trans=dict(type='conv', kernel_size=1),
across_down_trans=dict(type='conv', kernel_size=3),
across_up_trans=None,
across_skip_trans=dict(type='identity'),
output_trans=dict(type='last_conv', kernel_size=3),
start_level=0,
end_level=-1,
add_extra_convs=False,
norm_cfg=None,
skip_inds=None):
super(FPG, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
if inter_channels is None:
self.inter_channels = [out_channels for _ in range(num_outs)]
elif isinstance(inter_channels, int):
self.inter_channels = [inter_channels for _ in range(num_outs)]
else:
assert isinstance(inter_channels, list)
assert len(inter_channels) == num_outs
self.inter_channels = inter_channels
self.stack_times = stack_times
self.paths = paths
assert isinstance(paths, list) and len(paths) == stack_times
for d in paths:
assert d in ('bu', 'td')
self.same_down_trans = same_down_trans
self.same_up_trans = same_up_trans
self.across_lateral_trans = across_lateral_trans
self.across_down_trans = across_down_trans
self.across_up_trans = across_up_trans
self.output_trans = output_trans
self.across_skip_trans = across_skip_trans
self.with_bias = norm_cfg is None
# skip inds must be specified if across skip trans is not None
if self.across_skip_trans is not None:
skip_inds is not None
self.skip_inds = skip_inds
assert len(self.skip_inds[0]) <= self.stack_times
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
# build lateral 1x1 convs to reduce channels
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = nn.Conv2d(self.in_channels[i],
self.inter_channels[i - self.start_level], 1)
self.lateral_convs.append(l_conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
if self.add_extra_convs:
fpn_idx = self.backbone_end_level - self.start_level + i
extra_conv = nn.Conv2d(
self.inter_channels[fpn_idx - 1],
self.inter_channels[fpn_idx],
3,
stride=2,
padding=1)
self.extra_downsamples.append(extra_conv)
else:
self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))
self.fpn_transitions = nn.ModuleList() # stack times
for s in range(self.stack_times):
stage_trans = nn.ModuleList() # num of feature levels
for i in range(self.num_outs):
# same, across_lateral, across_down, across_up
trans = nn.ModuleDict()
if s in self.skip_inds[i]:
stage_trans.append(trans)
continue
# build same-stage down trans (used in bottom-up paths)
if i == 0 or self.same_up_trans is None:
same_up_trans = None
else:
same_up_trans = self.build_trans(
self.same_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['same_up'] = same_up_trans
# build same-stage up trans (used in top-down paths)
if i == self.num_outs - 1 or self.same_down_trans is None:
same_down_trans = None
else:
same_down_trans = self.build_trans(
self.same_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['same_down'] = same_down_trans
# build across lateral trans
across_lateral_trans = self.build_trans(
self.across_lateral_trans, self.inter_channels[i],
self.inter_channels[i])
trans['across_lateral'] = across_lateral_trans
# build across down trans
if i == self.num_outs - 1 or self.across_down_trans is None:
across_down_trans = None
else:
across_down_trans = self.build_trans(
self.across_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['across_down'] = across_down_trans
# build across up trans
if i == 0 or self.across_up_trans is None:
across_up_trans = None
else:
across_up_trans = self.build_trans(
self.across_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_up'] = across_up_trans
if self.across_skip_trans is None:
across_skip_trans = None
else:
across_skip_trans = self.build_trans(
self.across_skip_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_skip'] = across_skip_trans
# build across_skip trans
stage_trans.append(trans)
self.fpn_transitions.append(stage_trans)
self.output_transition = nn.ModuleList() # output levels
for i in range(self.num_outs):
trans = self.build_trans(
self.output_trans,
self.inter_channels[i],
self.out_channels,
num_inputs=self.stack_times + 1)
self.output_transition.append(trans)
self.relu = nn.ReLU(inplace=True)
def build_trans(self, cfg, in_channels, out_channels, **extra_args):
cfg_ = cfg.copy()
trans_type = cfg_.pop('type')
trans_cls = self.transition_types[trans_type]
return trans_cls(in_channels, out_channels, **cfg_, **extra_args)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
caffe2_xavier_init(m)
elif is_norm(m):
constant_init(m, 1.0)
def fuse(self, fuse_dict):
out = None
for item in fuse_dict.values():
if item is not None:
if out is None:
out = item
else:
out = out + item
return out
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build all levels from original feature maps
feats = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
for downsample in self.extra_downsamples:
feats.append(downsample(feats[-1]))
outs = [feats]
for i in range(self.stack_times):
current_outs = outs[-1]
next_outs = []
direction = self.paths[i]
for j in range(self.num_outs):
if i in self.skip_inds[j]:
next_outs.append(outs[-1][j])
continue
# feature level
if direction == 'td':
lvl = self.num_outs - j - 1
else:
lvl = j
# get transitions
if direction == 'td':
same_trans = self.fpn_transitions[i][lvl]['same_down']
else:
same_trans = self.fpn_transitions[i][lvl]['same_up']
across_lateral_trans = self.fpn_transitions[i][lvl][
'across_lateral']
across_down_trans = self.fpn_transitions[i][lvl]['across_down']
across_up_trans = self.fpn_transitions[i][lvl]['across_up']
across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']
# init output
to_fuse = dict(
same=None, lateral=None, across_up=None, across_down=None)
# same downsample/upsample
if same_trans is not None:
to_fuse['same'] = same_trans(next_outs[-1])
# across lateral
if across_lateral_trans is not None:
to_fuse['lateral'] = across_lateral_trans(
current_outs[lvl])
# across downsample
if lvl > 0 and across_up_trans is not None:
to_fuse['across_up'] = across_up_trans(current_outs[lvl -
1])
# across upsample
if (lvl < self.num_outs - 1 and across_down_trans is not None):
to_fuse['across_down'] = across_down_trans(
current_outs[lvl + 1])
if across_skip_trans is not None:
to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])
x = self.fuse(to_fuse)
next_outs.append(x)
if direction == 'td':
outs.append(next_outs[::-1])
else:
outs.append(next_outs)
# output trans
final_outs = []
for i in range(self.num_outs):
lvl_out_list = []
for s in range(len(outs)):
lvl_out_list.append(outs[s][i])
lvl_out = self.output_transition[i](lvl_out_list)
final_outs.append(lvl_out)
return final_outs
| [
"noreply@github.com"
] | noreply@github.com |
a12a2f37cb428903860417ef23a04a630d2274bd | 766c83688c5288c7bd4f862e686eb2de65808e83 | /src/simapi_simulation/fmu_simulator/simulator_api/sim_api.py | 6e84b69fcbf6a886900898276d89c075c7891328 | [
"MIT"
] | permissive | fabianoP/SimApi-Python | 086172d4416b29f503dbaec41462e9a5dbb19738 | 78548056b12aa96fea1a2a52b95b03ec22602199 | refs/heads/master | 2021-05-26T03:32:45.221631 | 2020-04-05T22:49:42 | 2020-04-05T22:49:42 | 254,034,535 | 0 | 0 | null | 2020-04-08T08:52:15 | 2020-04-08T08:52:15 | null | UTF-8 | Python | false | false | 1,824 | py | import subprocess
from bottle import request, route, run, response
import os.path
import json
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
def write_json(data, filename):
with open(filename, 'w') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def write_time_step(t_step, filename):
with open(filename, 'w') as f:
f.seek(0)
f.write(t_step)
@route('/model_input', method='POST')
def get_input():
print("RECEIVED INPUT: " + str(request.json))
data = json.loads(request.json)
t_step = data['time_step']
write_time_step(t_step, '/home/deb/code/store_incoming_json/time_step.txt')
@route('/upload/<model_name>', method='POST')
def do_upload(model_name):
upload = request.files
save_path = '/home/deb/code/volume/' + model_name
print('UPLOAD SIM')
with open('/home/deb/code/isSwarm.txt', 'w') as f:
f.write("isSwarm?")
json_data = request.forms.pop('json')
try:
os.mkdir(save_path)
except OSError:
print("Creation of the directory %s failed" % save_path)
else:
print("Successfully created the directory %s " % save_path)
for name, file in upload.iteritems():
file.save(save_path)
subprocess.getoutput('chmod -R a+rw /home/deb/code/volume/ *')
if len(upload) == 2:
j_dict = {'model_params': []}
j_dict['model_params'].append(json.loads(json_data))
write_json(j_dict, save_path + '/model_params.json')
response.status = 200
return 'File upload success in sim container for model_name = {0}'.format(model_name)
else:
response.status = 400
return 'Number of Files required = 2. Uploaded Files = {0}'.format(len(upload))
run(host='0.0.0.0', port=8000, debug=True, reloader=True)
| [
"richard.brady@ucdconnect.ie"
] | richard.brady@ucdconnect.ie |
56052fc5690dc0fbd9529a96cbe1b602c35676a9 | dfc827bf144be6edf735a8b59b000d8216e4bb00 | /CODE/postprocessing/Thesis/GaussBump/SimpleRead.py | d19224edb9ae2717dba2faecbed8532dbb7825c9 | [] | no_license | jordanpitt3141/ALL | c5f55e2642d4c18b63b4226ddf7c8ca492c8163c | 3f35c9d8e422e9088fe096a267efda2031ba0123 | refs/heads/master | 2020-07-12T16:26:59.684440 | 2019-05-08T04:12:26 | 2019-05-08T04:12:26 | 94,275,573 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | import csv
from numpy.linalg import norm
from scipy import *
import os
from pylab import plot, show, legend,xlim,ylim,savefig,title,xlabel,ylabel,clf, loglog
from numpy import ones
wdir = "/home/jp/Documents/PhD/project/data/ThesisRedo2019/DryForced/FEVM2NoRegTol/12/"
sdir = "/home/jp/Documents/PhD/project/master/FigureData/ThesisRedo/DryForced/FEVM2/Ex/"
if not os.path.exists(sdir):
os.makedirs(sdir)
ts = "10.0"
gap = 8
s = wdir + "outList"+ts+"s.txt"
with open(s,'r') as file1:
readfile = csv.reader(file1, delimiter = ',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
b = []
w = []
h = []
u = []
G = []
x = []
j = -1
for row in readfile:
if (j >= 0):
#ASPECTRAT/constantmultiplier
x.append(float(row[0]))
h.append(float(row[1]))
G.append(float(row[2]))
u.append(float(row[3]))
b.append(float(row[4]))
w.append(float(row[5]))
j = j + 1
x = array(x[::gap])
b = array(b[::gap])
w = array(w[::gap])
h = array(h[::gap])
u = array(u[::gap])
G = array(G[::gap])
n = len(x)
s = sdir + "Stage"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",w[i])
file1.write(s)
s = sdir + "Bed"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",b[i])
file1.write(s)
s = sdir + "h"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",h[i])
file1.write(s)
s = sdir + "u"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",u[i])
file1.write(s)
s = sdir + "G"+ts+"s.dat"
with open(s,'w') as file1:
for i in range(n):
s ="%3.8f%5s%1.20f\n" %(x[i]," ",G[i])
file1.write(s)
| [
"jordanpitt3141@github.com"
] | jordanpitt3141@github.com |
6ff9a5a62dbdaa0426570202d6055465fd0c676f | 8b1af009f41fe550379f0380f7cc4d024646da3c | /usePackage.py | e82027e39a587bf84d7f8cec6e2c1c08582f5e58 | [] | no_license | jgk98f/Python-3-Fun | b976b9a4d3335fbbc8456f77e410b4e4b269fd51 | bf532c08c530751f304d9b998f56551933eeb80c | refs/heads/master | 2021-07-22T23:44:59.900597 | 2017-11-02T01:32:13 | 2017-11-02T01:32:13 | 109,201,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | #!/usr/bin/python3
##
# Author: Jason Klamert
# Date: 11/1/2017
# Purpose: My first package use in Python 3!
##
import Package
Package.hi()
Package.hello() | [
"noreply@github.com"
] | noreply@github.com |
3e306235707e7aa903ba5a43be32b5ea1def345c | 3535bac8b6641d7e6742e39de178508a208dbd0c | /checkout/migrations/0005_auto_20210227_1256.py | 83e32758c10a03e725cbb2142de3e28dc3b55f32 | [] | no_license | Lindsaykerr1994/Onwards-And-Upwards-Backend | a5f397fdcc7e19c8d99b6087734a5786e475c53a | 3fc2e6862f1f62667c6c3daacdea12862034f681 | refs/heads/master | 2023-03-28T17:08:08.946017 | 2021-04-06T13:36:21 | 2021-04-06T13:36:21 | 336,790,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.1.6 on 2021-02-27 12:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('checkout', '0004_auto_20210227_1250'),
]
operations = [
migrations.RenameField(
model_name='payment',
old_name='appointment_no',
new_name='appointment',
),
]
| [
"lindsaykerr1994@gmail.com"
] | lindsaykerr1994@gmail.com |
352497cdfd93bf546ea40d18ab959251d15042f2 | f199dbeb753133634e7e26bd54807fde96f36db6 | /src/ggrc/utils/query_helpers.py | 6cc5a5789a1dc077f3aeb64d212413a1c697be67 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | gaurav46/ggrc-core | 40a86f7a182c81f6902bcd26ea63487e028fbfc8 | 5f4ea7173ec1da7763bd5b4fef39858c8be07df2 | refs/heads/develop | 2023-02-06T05:15:17.866451 | 2016-12-01T14:17:27 | 2016-12-01T14:17:27 | 75,402,854 | 0 | 0 | Apache-2.0 | 2023-02-02T01:56:55 | 2016-12-02T14:32:52 | Python | UTF-8 | Python | false | false | 10,111 | py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""This module helper query builder for my dashboard page."""
from sqlalchemy import and_
from sqlalchemy import case
from sqlalchemy import literal
from sqlalchemy import or_
from sqlalchemy import true
from sqlalchemy import union
from sqlalchemy import alias
from sqlalchemy.orm import aliased
from ggrc import db
from ggrc.models import all_models
from ggrc.models.object_person import ObjectPerson
from ggrc.models.object_owner import ObjectOwner
from ggrc.models.relationship import Relationship
from ggrc.models.custom_attribute_value import CustomAttributeValue
from ggrc.rbac import permissions as pr
from ggrc_basic_permissions import backlog_workflows
from ggrc_basic_permissions.models import UserRole, Role
from ggrc_workflows.models import Cycle
def get_type_select_column(model):
"""Get column name,taking into account polymorphic types."""
mapper = model._sa_class_manager.mapper
if mapper.polymorphic_on is None:
type_column = literal(mapper.class_.__name__)
else:
# Handle polymorphic types with CASE
type_column = case(
value=mapper.polymorphic_on,
whens={
val: m.class_.__name__
for val, m in mapper.polymorphic_map.items()
})
return type_column
def _types_to_type_models(types):
"""Convert string types to real objects."""
if types is None:
return all_models.all_models
return [m for m in all_models.all_models if m.__name__ in types]
def get_myobjects_query(types=None, contact_id=None, is_creator=False): # noqa
"""Filters by "myview" for a given person.
Finds all objects which might appear on a user's Profile or Dashboard
pages.
This method only *limits* the result set -- Contexts and Roles will still
filter out forbidden objects.
"""
type_models = _types_to_type_models(types)
model_names = [model.__name__ for model in type_models]
type_union_queries = []
def _get_people():
"""Get all the people w/o any restrictions."""
all_people = db.session.query(
all_models.Person.id.label('id'),
literal(all_models.Person.__name__).label('type'),
literal(None).label('context_id')
)
return all_people
def _get_object_people():
"""Objects to which the user is 'mapped'."""
object_people_query = db.session.query(
ObjectPerson.personable_id.label('id'),
ObjectPerson.personable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
ObjectPerson.person_id == contact_id,
ObjectPerson.personable_type.in_(model_names)
)
)
return object_people_query
def _get_object_owners():
"""Objects for which the user is an 'owner'."""
object_owners_query = db.session.query(
ObjectOwner.ownable_id.label('id'),
ObjectOwner.ownable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
ObjectOwner.person_id == contact_id,
ObjectOwner.ownable_type.in_(model_names),
)
)
return object_owners_query
def _get_object_mapped_ca():
"""Objects to which the user is mapped via a custom attribute."""
ca_mapped_objects_query = db.session.query(
CustomAttributeValue.attributable_id.label('id'),
CustomAttributeValue.attributable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
CustomAttributeValue.attribute_value == "Person",
CustomAttributeValue.attribute_object_id == contact_id,
CustomAttributeValue.attributable_type.in_(model_names)
)
)
return ca_mapped_objects_query
def _get_objects_user_assigned():
"""Objects for which the user is assigned."""
dst_assignee_query = db.session.query(
Relationship.destination_id.label('id'),
Relationship.destination_type.label('type'),
literal(None).label('context_id'),
).filter(
and_(
Relationship.source_type == "Person",
Relationship.source_id == contact_id,
Relationship.destination_type.in_(model_names)
),
)
src_assignee_query = db.session.query(
Relationship.source_id.label('id'),
Relationship.source_type.label('type'),
literal(None).label('context_id'),
).filter(
and_(
Relationship.destination_type == "Person",
Relationship.destination_id == contact_id,
Relationship.source_type.in_(model_names)
),
)
return dst_assignee_query.union(src_assignee_query)
def _get_results_by_context(model):
"""Objects based on the context of the current model.
Return the objects that are in private contexts via UserRole.
"""
context_query = db.session.query(
model.id.label('id'),
literal(model.__name__).label('type'),
literal(None).label('context_id'),
).join(
UserRole,
and_(
UserRole.context_id == model.context_id,
UserRole.person_id == contact_id,
)
)
return context_query
def _get_assigned_to_records(model):
"""Get query by models contacts fields.
Objects for which the user is the 'contact' or 'secondary contact'.
Control also has 'principal_assessor' and 'secondary_assessor'.
"""
model_type_queries = []
for attr in ('contact_id', 'secondary_contact_id',
'principal_assessor_id', 'secondary_assessor_id'):
if hasattr(model, attr):
model_type_queries.append(getattr(model, attr) == contact_id)
return model_type_queries
def _get_tasks_in_cycle(model):
"""Filter tasks with particular statuses and cycle.
Filtering tasks with statuses "Assigned", "InProgress" and "Finished".
Where the task is in current users cycle.
"""
task_query = db.session.query(
model.id.label('id'),
literal(model.__name__).label('type'),
literal(None).label('context_id'),
).join(Cycle, Cycle.id == model.cycle_id).filter(
and_(
Cycle.is_current == true(),
model.contact_id == contact_id,
model.status.in_(
all_models.CycleTaskGroupObjectTask.ACTIVE_STATES
)
)
)
return task_query
def _get_model_specific_query(model):
"""Prepare query specific for a particular model."""
model_type_query = None
if model is all_models.CycleTaskGroupObjectTask:
model_type_query = _get_tasks_in_cycle(model)
else:
model_type_queries = _get_assigned_to_records(model)
if model_type_queries:
type_column = get_type_select_column(model)
model_type_query = db.session.query(
model.id.label('id'),
type_column.label('type'),
literal(None).label('context_id')
).filter(or_(*model_type_queries)).distinct()
return model_type_query
def _get_context_relationships():
"""Load list of objects related on contexts and objects types.
This code handles the case when user is added as `Auditor` and should be
able to see objects mapped to the `Program` on `My Work` page.
Returns:
objects (list((id, type, None))): Related objects
"""
user_role_query = db.session.query(UserRole.context_id).join(
Role, UserRole.role_id == Role.id).filter(and_(
UserRole.person_id == contact_id, Role.name == 'Auditor')
)
_ct = aliased(all_models.Context, name="c")
_rl = aliased(all_models.Relationship, name="rl")
context_query = db.session.query(
_rl.source_id.label('id'),
_rl.source_type.label('type'),
literal(None)).join(_ct, and_(
_ct.id.in_(user_role_query),
_rl.destination_id == _ct.related_object_id,
_rl.destination_type == _ct.related_object_type,
_rl.source_type.in_(model_names),
)).union(db.session.query(
_rl.destination_id.label('id'),
_rl.destination_type.label('type'),
literal(None)).join(_ct, and_(
_ct.id.in_(user_role_query),
_rl.source_id == _ct.related_object_id,
_rl.source_type == _ct.related_object_type,
_rl.destination_type.in_(model_names),)))
return context_query
# Note: We don't return mapped objects for the Creator because being mapped
# does not give the Creator necessary permissions to view the object.
if not is_creator:
type_union_queries.append(_get_object_people())
type_union_queries.extend((_get_object_owners(),
_get_object_mapped_ca(),
_get_objects_user_assigned(),
_get_context_relationships(),))
for model in type_models:
query = _get_model_specific_query(model)
if query:
type_union_queries.append(query)
if model is all_models.Workflow:
type_union_queries.append(backlog_workflows())
if model is all_models.Person:
type_union_queries.append(_get_people())
if model in (all_models.Program, all_models.Audit, all_models.Workflow):
type_union_queries.append(_get_results_by_context(model))
return alias(union(*type_union_queries))
def get_context_resource(model_name, permission_type='read',
permission_model=None):
"""Get allowed contexts and resources."""
permissions_map = {
"create": (pr.create_contexts_for, pr.create_resources_for),
"read": (pr.read_contexts_for, pr.read_resources_for),
"update": (pr.update_contexts_for, pr.update_resources_for),
"delete": (pr.delete_contexts_for, pr.delete_resources_for),
}
contexts = permissions_map[permission_type][0](
permission_model or model_name)
resources = permissions_map[permission_type][1](
permission_model or model_name)
if permission_model and contexts:
contexts = set(contexts) & set(
pr.read_contexts_for(model_name))
return contexts, resources
| [
"egor.hamaliy@gmail.com"
] | egor.hamaliy@gmail.com |
b2262ac385c5fdf6442a2e8d4893d66427960a22 | b54d6a18bc5e86462c1f085386bc48065db5851c | /RandLinkVelDist.py | e43b4b1d5dc7d36505295ad323282e22a34e50c3 | [] | no_license | zoshs2/Percolation_Seoul | 5b5b8ebabe186fbc9e265fc190c3d0641e196517 | 69c0aa99d1f7a2fb9259681a1ed63794cbe5ea5c | refs/heads/main | 2023-07-28T20:50:13.393765 | 2021-09-28T13:25:31 | 2021-09-28T13:25:31 | 390,687,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,764 | py | import os
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from datetime import datetime
from statsmodels.nonparametric.kernel_regression import KernelReg
def RandLinkVelDist(date_dataset, sample=20, reg=False, time_step=5, savefig:'file_name'=False):
'''
Display the circadian velocity distribution of randomly-selected road samples.
'''
VEL_RESOLUTION = 5
timestep = int(time_step / VEL_RESOLUTION)
TIME = date_dataset.loc[0, ['PRCS_YEAR', 'PRCS_MON', 'PRCS_DAY', 'PRCS_HH', 'PRCS_MIN']].astype(np.int64).values
TIME = datetime(TIME[0], TIME[1], TIME[2], TIME[3], TIME[4])
filename_date = "s" + str(sample) + "_" + str(TIME.strftime("%Y%m%d"))
RandData = date_dataset[date_dataset['LINK_ID'].isin(np.random.choice(date_dataset['LINK_ID'].unique(), sample))].reset_index(drop=True)
TimeIdx = RandData.groupby(['PRCS_HH', 'PRCS_MIN'])['PRCS_SPD'].mean().index # mean() is just used to get a groupy time('Hour', 'Min') index.
time_xaxis = list(map(lambda x : str(format(x[0], '02d'))+':'+str(format(x[1], '02d')), TimeIdx))
time_xaxis = [datetime.strptime(i, '%H:%M') for i in time_xaxis]
RandIDs = RandData['LINK_ID'].unique()
fig = plt.figure(facecolor='w', figsize=(15, 8))
ax = plt.gca() # Get the Current Axes (GCA)
cmap = plt.get_cmap('gnuplot')
colors = [cmap(i) for i in np.linspace(0, 1, sample)]
for i, ID in enumerate(RandIDs):
RandOne = RandData[RandData['LINK_ID']==ID].sort_values(by=['PRCS_HH', 'PRCS_MIN'])
VelHist = RandOne['PRCS_SPD'].values
if reg is True:
VelShape = VelHist.shape[0]
kde = KernelReg(endog=VelHist, exog=np.arange(VelShape), var_type='c', bw=[5])
estimator = kde.fit(np.arange(VelShape))
estimator = np.reshape(estimator[0], VelShape)
plt.plot(time_xaxis, estimator, c=colors[i], label=str(ID))
continue
plt.plot(time_xaxis[::timestep], VelHist[::timestep], c=colors[i], label=str(ID))
fmt = mpl.dates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(fmt)
fig.autofmt_xdate()
ax.set_ylabel('Velocity (km/h)', fontsize=18)
ax.set_xlabel('Time', fontsize=18)
if savefig is not False:
filename = savefig + "_RandLinkVelDist_" + filename_date
if reg is True:
filename = "(Reg)" + filename
with open(filename+'.txt', 'w') as f:
for ID in RandIDs:
f.write("{}\n".format(ID))
print(filename, ".txt saved on ", os.getcwd())
print(filename, ".png saved on ", os.getcwd())
plt.savefig(filename + ".png")
plt.show()
return | [
"zoshs27@gmail.com"
] | zoshs27@gmail.com |
e3683f15f1c1f8d0bfab9cf5561642086cd81dd5 | 824b494a9d789cb2afd1397cacb50b91efeb6fb9 | /redfox.py | 003ae78a290bf4a3acbbc3174266b1a8cbfb902d | [] | no_license | zymil/RubberDucky-Hunter | 6cada22b9d68fddf59dd079434e8a5bf7a343d04 | d363b3cd35cfcbdc3994ffca47df04a0224819e8 | refs/heads/main | 2023-08-30T20:18:01.519497 | 2021-11-16T19:32:15 | 2021-11-16T19:32:15 | 322,376,724 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from pynput.keyboard import Key, Listener
import logging
import time
import ctypes
#.pyw fica invisivel
prevTime = 0
def timeElapsed(start, end):
delta = end - start
return delta
def stop(key):
if key == Key.esc:
return False
def keypress(Key):
global prevTime
now=time.time()
diff=timeElapsed(prevTime, now)
if prevTime == 0:
prevTime = now;
if diff != 0 and diff <= 0.020:
ctypes.windll.user32.LockWorkStation()
prevTime = now;
with Listener(on_press = keypress, on_release=stop) as listener:
listener.join()
| [
"noreply@github.com"
] | noreply@github.com |
1d568fd6979787b84243352a436927ec1d71fbed | 18772c6c85f9dc50ebde176d03a586bc4c7c031c | /apps/course/models.py | d96292a6ce8ebc513bad0ec5f24c77ab88035de3 | [] | no_license | hanjiangxue20/Online | 220d9ad919fc34ceacdf5c0b2aa1771f7a6f84c3 | 2ee25d1ea84b7e1c103ccd7829412781d21eb50d | refs/heads/master | 2020-03-13T20:17:35.139561 | 2019-01-28T10:03:04 | 2019-01-28T10:03:04 | 131,270,828 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | from datetime import datetime
from django.db import models
# Create your models here.
class Course(models.Model):
"""
课程
"""
DEGREE_CHOICES =(
('cj','初级'),
('zj','中级'),
('gj','高级'),
)
name = models.CharField("课程名", max_length=50)
desc = models.CharField('课程描述',max_length=300)
detail = models.TextField('课程详情')
degree = models.CharField('难度', choices=DEGREE_CHOICES, max_length=2)
learn_times = models.IntegerField('学习时长(分钟数)',default=0)
students = models.IntegerField('学习人数',default=0)
fav_nums = models.IntegerField("收藏人数",default=0)
image = models.ImageField("封面图", upload_to="course/%Y%m",max_length=100)
click_nums = models.IntegerField("点击数",default=0)
add_time = models.DateTimeField('添加时间', default=datetime.now)
class Meta:
verbose_name = "课程"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Lesson(models.Model):
"""
章节
"""
course = models.ForeignKey(Course,verbose_name="课程", on_delete=models.CASCADE)
name = models.CharField("章节名",max_length=100)
add_time = models.DateTimeField('添加时间',default=datetime.now)
class Meta:
verbose_name = "课程资源"
verbose_name_plural = verbose_name
class Video(models.Model):
lesson = models.ForeignKey(Lesson,verbose_name="章节", on_delete=models.CASCADE)
name = models.CharField("视频名", max_length=100)
add_time = models.DateTimeField('添加时间', default=datetime.now)
class Meta:
verbose_name = "视频"
verbose_name_plural = verbose_name
class CourseResource(models.Model):
course = models.ForeignKey(Course, verbose_name="课程",on_delete=models.CASCADE)
name = models.CharField("名称",max_length=100)
download = models.FileField("资源文件",upload_to="course/resource/%Y/%m",max_length=100)
add_time = models.DateTimeField("添加时间", default=datetime.now)
class Meta:
verbose_name = "课程资源"
verbose_name_plural = verbose_name | [
"2038770992@qq.com"
] | 2038770992@qq.com |
74be85ae36daaeda3ba64f5a7f31c7a1529b699e | 1c41c592389d45d1d933d5063cbbf08d02128816 | /parse.py | 9268d64a6f169992ede36775aa3d41f5421ff269 | [] | no_license | oscarkraemer/monitorsite | d2dba3f84fb2b7674e8d1f43772bdf829b2d7a4d | 024ed61e1b088f87fdf00ee4f81a5bb7de88d2ec | refs/heads/master | 2021-01-10T22:00:40.817990 | 2014-08-13T13:58:44 | 2014-08-13T13:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | #!/usr/bin/python
import os
worda = '</li> <li> <a href="'
wordb = '" tar'
wordc = worda + "http"
wordNot = "://dashb-ssb"
print "var links =["
with open("source.html") as file:
for line in file:
if (wordc in line) and (not wordNot in line) :
x = line.index(worda)
y = line.index(wordb)
print '"' + line[x+len(worda):y] + '", '
else:
continue
print "];"
| [
"carloscarkraemer@gmail.com"
] | carloscarkraemer@gmail.com |
a7bb04d8878e8e6537e23b80cfc69d208b58298b | a317b83f20068b15086276046344c92db4743d4d | /test/08_manylinuxXXXX_only/cibuildwheel_test.py | 69c28fcdf413a2effc5e7a9ab8e511d549bff471 | [
"BSD-2-Clause"
] | permissive | mtreinish/cibuildwheel | 13b5e010d70847ed45aeab760d67b318d09c28d6 | 973ecf53b3e654ec998928ddd5f39169c1fd6ead | refs/heads/master | 2020-12-19T08:20:44.573759 | 2020-01-19T01:07:33 | 2020-01-19T01:07:33 | 235,678,862 | 0 | 0 | NOASSERTION | 2020-01-22T22:19:02 | 2020-01-22T22:19:01 | null | UTF-8 | Python | false | false | 1,194 | py | import os, pytest
import utils
@pytest.mark.parametrize('manylinux_image', ['manylinux1', 'manylinux2010', 'manylinux2014'])
def test(manylinux_image):
project_dir = os.path.dirname(__file__)
if utils.platform != 'linux':
pytest.skip('the docker test is only relevant to the linux build')
# build the wheels
# CFLAGS environment variable is necessary to fail on 'malloc_info' (on manylinux1) during compilation/linking,
# rather than when dynamically loading the Python
add_env = {
'CIBW_ENVIRONMENT': 'CFLAGS="$CFLAGS -Werror=implicit-function-declaration"',
'CIBW_MANYLINUX_X86_64_IMAGE': manylinux_image,
'CIBW_MANYLINUX_I686_IMAGE': manylinux_image,
}
if manylinux_image == 'manylinux2014':
add_env['CIBW_SKIP'] = 'cp27*' # not available on manylinux2014
actual_wheels = utils.cibuildwheel_run(project_dir, add_env=add_env)
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0', manylinux_versions=[manylinux_image])]
if manylinux_image == 'manylinux2014':
expected_wheels = [w for w in expected_wheels if '-cp27' not in w]
assert set(actual_wheels) == set(expected_wheels)
| [
"noreply@github.com"
] | noreply@github.com |
cd388b1fa34c8b7c139387d2f9da86e2be08a184 | bf9c1aa7ac16d467921affa7381dae301e0a1308 | /apps/articles/urls.py | 4ba8201ad1d278b04f4c849955da1484c39b3dd6 | [] | no_license | clincher/ecigar | ec12223bbbcad383e30ea588babee0a89b15db9d | f534bee7ede5c3af882792616c440c7736193fd0 | refs/heads/master | 2020-12-24T15:14:18.688748 | 2016-12-26T00:29:44 | 2016-12-26T00:29:44 | 2,352,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from django.conf.urls.defaults import patterns, url
from django.views.generic import ListView, DetailView
from models import Article
urlpatterns = patterns('',
# Products
url(r'^stat.html$',
ListView.as_view(model=Article),
name='article_list'
),
url(r'^stat(?P<slug>[0-9A-Za-z-_.//]+).html$',
DetailView.as_view(model=Article),
name='article_detail'
),
)
| [
"va.bolshakov@gmail.com"
] | va.bolshakov@gmail.com |
876ed83ba6bdb4eb06e312ab60f046c7fccfd56f | d29f24a74972a4f25d5e64beaf973c20fcff9f15 | /Task8(Statistics Questions)/Binomial Distribution II.py | 38625e5e00338a08418c0f7984d6ba6841f77fdd | [] | no_license | jadhavmayur/innomatics_internship_APR_21 | c5490126ca49c6b7838c537defd73620cd988c88 | 1ee6d6c3df907f96a4fa13ec8bd2c95d61316b7a | refs/heads/main | 2023-05-30T17:33:07.631679 | 2021-06-30T05:20:57 | 2021-06-30T05:20:57 | 363,411,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
def fact(a):
fact_a=1
for i in range(1,a+1):
fact_a=fact_a*i
return fact_a
def comb(a,b):
m=fact(a)
n=fact(b)
r=fact(a-b)
comb=m/(n*r)
return comb
f,g=map(int,input().split())
g=g
d=2
p_s=f/100
p_g=1-(f/100)
fact_ans=0
fact_ans2=0
for i in range(d,g+1):
fact_ans+=comb(g,i)*pow(p_s,i)*pow(p_g,(g-i))
for i in range(0,d+1):
fact_ans2+=comb(g,i)*pow(p_s,i)*pow(p_g,(g-i))
print("%.3f" % fact_ans2)
print("%.3f" % fact_ans)
| [
"noreply@github.com"
] | noreply@github.com |
afdd51818b626fec3bf43a0127ea2a00c1b514e9 | 977209e7e742e0e72d854e290ed74118a6f5565f | /dcard.py | 9b4492e648aeaf071a726e1eaeb4e3c6ddd39e3e | [] | no_license | willlaiwk/python-learning | 4a660f24bf7f2082248343fb72ded20591ebf874 | a8234f2f801abde816069636210ef6cce0f4b2fb | refs/heads/master | 2020-03-13T04:51:41.069273 | 2018-04-25T08:17:00 | 2018-04-25T08:17:00 | 130,971,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | import re
import requests
from bs4 import BeautifulSoup
url = 'https://www.dcard.tw/f'
resp = requests.get(url)
soup = BeautifulSoup(resp.text, 'html.parser')
dcard_title = soup.find_all('h3', re.compile('PostEntry_title_'))
print('Dcard 熱門前十文章標題:')
for index, item in enumerate(dcard_title[:10]):
print("{0:2d}. {1}".format(index+1, item.text.strip())) # strip() 移除字串前後的空白
| [
"will.lai.wk@gmail.com"
] | will.lai.wk@gmail.com |
67609da91082ac8c072982adcffd6dfb9232253c | 15f7c52c87376c62ff79cb0b5955bd09e471b119 | /LearnCython/Week1/Examples/animal_list.py | e4757e6ba94f80ea1a9025be91b5ad77b958cecb | [] | no_license | rkillam/LearnPython | 6a7d7be75f44ab4f008ee88b174521a39453b06e | ebf8f1ed150d4ad1e9cf8f1ad464ecbcc41c3c7d | refs/heads/master | 2021-01-22T01:05:40.035312 | 2015-11-02T19:16:34 | 2015-11-02T19:16:34 | 41,547,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | import animal_class
def make_animal():
name = input('Name > ')
noise = input('Noise > ')
return animal_class.Animal(name, noise)
def get_query():
name = input('Name > ')
noise = input('Noise > ')
return name, noise
def menu():
print('add to add an animal')
print('find to find an added animal')
print('q to quit')
animals = []
choice = ''
while choice != 'q':
menu()
choice = input('> ')
if choice == 'add':
animals.append(make_animal())
elif choice == 'find':
name, noise = get_query()
for animal in animals:
if animal.name == name or animal.noise == noise:
print('Found {} who makes a {} noise'.format(animal.name, animal.noise))
print('')
| [
"richard.killam@gmail.com"
] | richard.killam@gmail.com |
9f6271f117c0ab0c85097b6f105c3ca916c22e44 | d02509b92518ed5f8346881b694e991a83ff53ea | /userauth/views.py | 5e3443eaf633855f7f121d9e3d91e25e667841dd | [] | no_license | navneet35371/nasa | 5d2e611f8e586c62101b01202ea24f6d350144cf | 1f4920c5eea4e0fa17c55dccbd3a18fb2c2ac52d | refs/heads/master | 2020-04-14T23:47:56.643495 | 2015-01-22T11:50:10 | 2015-01-22T11:50:10 | 29,232,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | from django.shortcuts import redirect, render
from forms import UserProfileForm, UserForm
from django.http import HttpResponse, HttpResponseRedirect
from userauth.models import UserProfile
from django.contrib.auth import authenticate, login, logout
def register(request):
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
print request.POST
my_model = form.save(commit=False)
my_model.set_password(request.POST['password'])
my_model.save()
return redirect('accounts/login/')
else:
form = UserForm()
c = {'form' : form}
return render(request,'register.html',c)
def user_login(request):
next = ""
if request.GET:
next = request.GET['next']
print next
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if next == "":
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect(next)
else:
return HttpResponse('Invalid user name or password')
else:
x = {'next': next}
return render(request, 'login.html', x)
def logout_view(request):
logout(request)
return redirect('/')
| [
"navneet.anand@hindustantimes.com"
] | navneet.anand@hindustantimes.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.