blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8f8772cff4eeb35ff9ba3081fb0b24b8b2e87a0 | 8ce656578e04369cea75c81b529b977fb1d58d94 | /bank_guarantee/migrations/0091_auto_20201007_0933.py | 5d8276a6bb98c3327024da8649fbcbcccb77f944 | [] | no_license | JJvzd/django_exp | f9a08c40a6a7535777a8b5005daafe581d8fe1dc | b1df4681e67aad49a1ce6426682df66b81465cb6 | refs/heads/master | 2023-05-31T13:21:24.178394 | 2021-06-22T10:19:43 | 2021-06-22T10:19:43 | 379,227,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # Generated by Django 2.1.7 on 2020-10-07 06:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bank_guarantee', '0090_auto_20201007_0931'),
]
operations = [
migrations.AlterField(
model_name='request',
name='tmp_manager',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='request_managers', to=settings.AUTH_USER_MODEL, verbose_name='Временный менеджер'),
),
]
| [
"javad@MacBook-Pro-Namig.local"
] | javad@MacBook-Pro-Namig.local |
bd8ef562dc94c9982961eec05e59a91be8026841 | 74152b9ee97de2657214a1497fd87ac982574d5d | /Lesson5/reverse.py | edc2c281f247a5c1bf232c1708eef4d038163218 | [
"Apache-2.0"
] | permissive | shinkai-tester/python_beginner | 8b7dc0189a1615e86778d1712bf5ca03b0bbce47 | a934328c9a50241cc3f02a423060e16aab53b425 | refs/heads/main | 2023-05-12T20:25:36.470754 | 2021-06-01T13:23:32 | 2021-06-01T13:23:32 | 371,542,145 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | x = input()
a= []
while x != '0':
a.append(x)
x = input()
length = len(a)
for i in range(length):
print(-i - 1, end = ' ')
print(a[- i - 1]) | [
"alinlaegnor@gmail.com"
] | alinlaegnor@gmail.com |
6ee0bf2fa7e401ec7786686e7172e1fa9174dc67 | 03778e9985613ffccd8e2076ed51e86e86f997b6 | /pysad/models/loda.py | 087d84871d730b4752c502f6b81edea44e79645d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | isci1102/pysad | e3fe263a075fc51ee2bce548da458222ec5a3b7a | dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede | refs/heads/master | 2023-03-18T03:33:19.345541 | 2020-09-08T01:32:29 | 2020-09-08T01:32:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,018 | py | from pysad.core.base_model import BaseModel
import numpy as np
class LODA(BaseModel):
"""The LODA model :cite:`pevny2016loda` The implemnetation is adapted to the steraming framework from the `PyOD framework <https://pyod.readthedocs.io/en/latest/_modules/pyod/models/loda.html#LODA>`_.
Args:
num_bins (int): The number of bins of the histogram.
num_random_cuts (int): The number of random cuts.
"""
def __init__(self, num_bins=10, num_random_cuts=100):
self.to_init = True
self.n_bins = num_bins
self.n_random_cuts = num_random_cuts
def fit_partial(self, X, y=None):
"""Fits the model to next instance.
Args:
X (np.float array of shape (num_features,)): The instance to fit.
y (int): Ignored since the model is unsupervised (Default=None).
Returns:
object: Returns the self.
"""
if self.to_init:
self.num_features = X.shape[0]
self.weights = np.ones(
self.n_random_cuts,
dtype=np.float) / self.n_random_cuts
self.projections_ = np.random.randn(
self.n_random_cuts, self.num_features)
self.histograms_ = np.zeros((self.n_random_cuts, self.n_bins))
self.limits_ = np.zeros((self.n_random_cuts, self.n_bins + 1))
n_nonzero_components = np.sqrt(self.num_features)
self.n_zero_components = self.num_features - \
np.int(n_nonzero_components)
self.to_init = False
X = X.reshape(1, -1)
for i in range(self.n_random_cuts):
rands = np.random.permutation(self.num_features)[
:self.n_zero_components]
self.projections_[i, rands] = 0.
projected_data = self.projections_[i, :].dot(X.T)
self.histograms_[i, :], self.limits_[i, :] = np.histogram(
projected_data, bins=self.n_bins, density=False)
self.histograms_[i, :] += 1e-12
self.histograms_[i, :] /= np.sum(self.histograms_[i, :])
return self
def score_partial(self, X):
"""Scores the anomalousness of the next instance.
Args:
X (np.float array of shape (num_features,)): The instance to score. Higher scores represent more anomalous instances whereas lower scores correspond to more normal instances.
Returns:
float: The anomalousness score of the input instance.
"""
X = X.reshape(1, -1)
pred_scores = np.zeros([X.shape[0], 1])
for i in range(self.n_random_cuts):
projected_data = self.projections_[i, :].dot(X.T)
inds = np.searchsorted(self.limits_[i, :self.n_bins - 1],
projected_data, side='left')
pred_scores[:, 0] += -self.weights[i] * np.log(
self.histograms_[i, inds])
pred_scores /= self.n_random_cuts
return pred_scores.ravel()
| [
"yilmazselimfirat@gmail.com"
] | yilmazselimfirat@gmail.com |
6fe2ed83133466acc1e94ac82eec4c91681d4f48 | deda909b6f5797a05bbaf4f8e56edbc9b2a6de09 | /devops/settings.py | 42a3a2c2691e773ff13a0184c7d93b158bd3bed4 | [
"MIT"
] | permissive | cnbillow/devops | 628a892962a17cf5e99865dc4bffe6b895d7038a | 9883ba547e400758e8743456ceb3655345f9263f | refs/heads/master | 2020-08-30T14:54:44.023237 | 2019-10-25T09:09:40 | 2019-10-25T09:09:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,202 | py | """
Django settings for devops project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# app统一放到apps目录,方便管理
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
MEDIA_URL = '/media/'
RECORD_DIR = 'record' # 存放终端结果文件
RECORD_ROOT = os.path.join(MEDIA_ROOT, RECORD_DIR)
if not os.path.isdir(RECORD_ROOT):
os.makedirs(RECORD_ROOT)
SCRIPT_DIR = 'script' # 存放脚本
SCRIPT_ROOT = os.path.join(MEDIA_ROOT, SCRIPT_DIR)
if not os.path.isdir(SCRIPT_ROOT):
os.makedirs(SCRIPT_ROOT)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
TMP_DIR = 'tmp'
TMP_ROOT = os.path.join(MEDIA_ROOT, TMP_DIR)
if not os.path.isdir(TMP_ROOT):
os.makedirs(TMP_ROOT)
FILE_UPLOAD_MAX_MEMORY_SIZE = 27262976 # 上传的文件保存在内存中的大小限制 26MB
DATA_UPLOAD_MAX_MEMORY_SIZE = 27262976 # 上传的数据保存在内存中的大小限制 26MB
FILE_UPLOAD_TEMP_DIR = os.path.join(MEDIA_ROOT, 'tmp') # 上传的文件大于FILE_UPLOAD_MAX_MEMORY_SIZE时临时保存目录
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jp9kd-^-)93ke1^4i6)sd^+kovh2)197m83+^q+o_6^+dz^3xb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
# 'simpleui',
# 'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'server',
'user',
'webssh',
'webtelnet',
'webguacamole',
'tasks',
'batch',
]
MIDDLEWARE = [
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'util.middleware.NewNextMiddleware',
'util.middleware.GetRealClientMiddleware', # 前端有代理,获取真实 IP
'util.middleware.BlackListMiddleware', # IP 黑名单
'util.middleware.LockScreenMiddleware', # 锁屏
'util.middleware.DebugMiddleware', # 管理员显示 DEBUG 页面
]
FILE_UPLOAD_HANDLERS = [
# 'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# 添加 websocket 支持
ASGI_APPLICATION = 'devops.routing.application'
ROOT_URLCONF = 'devops.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'devops.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DATABASES_mysql = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'devops',
'USER': 'devops',
'PASSWORD': 'devops',
'HOST': '192.168.223.111',
'PORT': '3306',
'OPTIONS': {
# 'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
'init_command': "SET sql_mode=''",
},
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
# simpleui 使用本地css和js
# SIMPLEUI_STATIC_OFFLINE = True
# session 如果在此期间未做任何操作,则退出, django 本身要么设置固定时间,要么关闭浏览器失效
CUSTOM_SESSION_EXIPRY_TIME = 60 * 120 # 30 分钟
# 终端过期时间,最好小于等于 CUSTOM_SESSION_EXIPRY_TIME
CUSTOM_TERMINAL_EXIPRY_TIME = 60 * 120
redis_setting = {
'host': '192.168.223.111',
'port': 6379,
}
# celery 配置 redis
CELERY_BROKER_URL = 'redis://{0}:{1}/0'.format(redis_setting['host'], redis_setting['port'])
# channel_layers 使用 redis
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [(redis_setting['host'], redis_setting['port'])],
},
},
}
# 缓存使用 redis
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://{0}:{1}'.format(redis_setting['host'], redis_setting['port']),
'OPTIONS': {
# 'DB': 10,
# 'PASSWORD': '123456',
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 500,
'timeout': 15,
},
"COMPRESSOR": "django_redis.compressors.zlib.ZlibCompressor", # 开启压缩
},
'KEY_PREFIX': 'devops',
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
SESSION_COOKIE_HTTPONLY = True
# proxy_sshd 配置
PROXY_SSHD = {
'listen_host': '0.0.0.0',
'listen_port': 2222,
'cons': 500,
}
# guacd 配置
GUACD = {
'host': '192.168.223.111',
'port': 4822,
'timeout': 15,
}
# 访问黑名单, 需开启 Middleware:util.middleware.BlackListMiddleware
BLACKLIST = ['192.168.223.220', '192.168.223.221']
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
a8ebdb184a5ca437c1e15382aa65fcad9bb81aa1 | 20b2d61c0959023cb51be92fafe54877aecb9887 | /pabi_asset_management/wizard/account_asset_remove.py | c89442cfc3df27f67907d79bc50b0ba929f6e8dc | [] | no_license | BTCTON/pb2_addons | 6841a23554054f859b0c4acafb4e91bd0c3a14e4 | a5bfd90c202cea894690c96d74a74fa96eb79468 | refs/heads/master | 2021-09-07T16:55:41.195667 | 2018-02-26T11:27:01 | 2018-02-26T11:27:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | # -*- encoding: utf-8 -*-
from openerp import fields, models, api
class AccountAssetRemove(models.TransientModel):
_inherit = 'account.asset.remove'
target_status = fields.Many2one(
'account.asset.status',
string='Asset Status',
domain="[('map_state', '=', 'removed')]",
required=True,
)
@api.multi
def remove(self):
self.ensure_one()
active_id = self._context.get('active_id')
asset = self.env['account.asset'].browse(active_id)
# If no_depreciation, no early_removal
if asset.no_depreciation:
self = self.with_context(early_removal=False)
res = super(AccountAssetRemove, self).remove()
asset.status = self.target_status
return res
| [
"kittiu@gmail.com"
] | kittiu@gmail.com |
1168edfb4618b69f4fffd2ff9fe9d85e6abe5787 | 87e62af4768c0f594e675551f4c7c1c81ce7f7d9 | /lawyer/pipelines/mongo_pipeline.py | 24655000c001f65246bd768b972da53ad41ff3b0 | [] | no_license | dulei001/Spider | 78d12adbef1d865da6978704fe146cc21a8d2d3e | 628d468501c6502763ce453a58a09813b2a46b8c | refs/heads/master | 2021-01-18T17:27:11.771434 | 2019-05-14T02:14:33 | 2019-05-14T02:14:33 | 86,802,130 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,689 | py | #!/usr/bin/python
#-*- coding: utf-8 -*-
import os
import uuid
from datetime import datetime
import pymongo
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db,mongo_replicat_set,mongo_username,mongo_password):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
self.mongo_replicat_set = mongo_replicat_set
self.mongo_username = mongo_username
self.mongo_password = mongo_password
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DATABASE', 'spider'),
mongo_replicat_set = crawler.settings.get('MONGO_REPLICAT_SET'),
mongo_username=crawler.settings.get('MONGO_USERNAME'),
mongo_password=crawler.settings.get('MONGO_PASSWORD'),
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri,replicaSet=self.mongo_replicat_set)
self.client[self.mongo_db].authenticate(self.mongo_username, self.mongo_password)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
# if item.has_key("uuid")==False:
# item["uuid"]=str(uuid.uuid1())
# item['createtime'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# item['pid']=os.getpid()
if dict(item).has_key('collection'):
model = dict(item)
del model['collection']
self.db[item["collection"]].insert(model)
else:
self.db[spider.name].insert(dict(item))
return item | [
"280680441@qq.com"
] | 280680441@qq.com |
21cf3a893bbeda22a11b0aeac631f7175b4a9162 | d609e607a7ff6eecfcab196b546201f4d2452746 | /blog/migrations/0001_initial.py | 768601f77d97cdf42cbcadbde4e8d04b8fef8847 | [] | no_license | anhdhbn/django-mqtt | 417b345ea123721ad9faa8950fa3e23a9392b7ad | 2660341dd6c3d57c9704ae66da7a91da2b363aa2 | refs/heads/master | 2022-12-28T09:12:23.881762 | 2020-10-16T14:07:16 | 2020-10-16T14:07:16 | 304,644,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | # Generated by Django 3.1.1 on 2020-09-28 02:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('content', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')], default=0)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_on'],
},
),
]
| [
"anhdhbn@gmail.com"
] | anhdhbn@gmail.com |
3ea96be9668d58a79cad9531613994cad129db1d | c582efcb41f04c77f4fa7d204c109c22e27e93e2 | /18sep/day3/excel/excel1.py | be8baca7d9e3f776578cd884a78be05969120bcb | [] | no_license | shobhit-nigam/infineon | dddeaa4b212eb67ce0f130ff1d8d58dbd0dacbdf | b2830abc7485428b1aeba7abd13bfb18528ce61b | refs/heads/master | 2021-06-22T13:17:52.886845 | 2021-02-26T12:04:29 | 2021-02-26T12:04:29 | 195,349,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
#pandas
# In[2]:
from openpyxl import load_workbook
# In[3]:
wba = load_workbook('beforelunch.xlsx')
# In[4]:
type(wba)
# In[5]:
print(wba.sheetnames)
# In[8]:
sheeta = wba['first']
# In[7]:
sheeta = wba.get_sheet_by_name('first')
# In[9]:
print(sheeta['B3'].value)
# In[10]:
var = sheeta['B3']
# In[11]:
for i in range(1, 6):
print(sheeta.cell(row=i, column=1).value)
# In[13]:
import pandas as pd
# In[16]:
x1 = pd.ExcelFile('beforelunch.xlsx')
# In[15]:
type(dfa)
# In[17]:
print(x1.sheet_names)
# In[18]:
dfa = x1.parse('second')
# In[19]:
dfa
# In[20]:
dicta = {'Data':[6, 7, 8, 90, 12]}
# In[21]:
dfb = pd.DataFrame(dicta)
# In[22]:
dfb
# In[23]:
writer = pd.ExcelWriter('awake.xlsx', engine='xlsxwriter')
# In[24]:
dfb.to_excel(writer, sheet_name='one')
# In[25]:
writer.save()
# In[28]:
from openpyxl import Workbook
wba = Workbook()
wsa = wba.active
wsa['A3'] = 22
wsa['B3'] = 24
wsa['C3'] = "=SUM(A3, B3)"
wba.save('formula.xlsx')
# In[29]:
from openpyxl import Workbook
from openpyxl.drawing.image import Image
wbb = Workbook()
wsb = wbb.active
img = Image('one.jpg')
wsb.add_image(img, 'B2')
wbb.save('image.xlsx')
# In[32]:
from openpyxl import Workbook
from openpyxl.chart import BarChart, PieChart, Reference
wbb = Workbook()
wsb = wbb.active
for i in range(10):
wsb.append([i])
values = Reference(wsb, min_col=1, min_row=1, max_col=1, max_row=10)
chart1 = BarChart()
chart1.add_data(values)
wsb.add_chart(chart1, 'C3')
wbb.save('chart.xlsx')
# In[ ]:
| [
"noreply@github.com"
] | shobhit-nigam.noreply@github.com |
400dcb020040b3841ef08c80eb55318d7b8d7aa5 | 785f5a4bfd97ac77559110fb831f18a3822b4e17 | /01-python_crash_course/01-ejercicios_teoria/chapter_10_files/remember_me.py | 24cc799b4417700d26b0dae882c652e7a9a0f532 | [] | no_license | lionelherrerobattista/practica_python | c552ae06336eb805172decd0d527b26a17c09cb9 | 44f09bae58748c2c7c2449adc5117591abd2828d | refs/heads/master | 2020-03-28T02:28:09.910563 | 2020-03-14T21:31:31 | 2020-03-14T21:31:31 | 147,572,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | import json
def get_stored_username():
"""Get stored username if available."""
filename = 'username.json'
try: #Cargo desde el archivo
with open(filename) as f_obj:
username = json.load(f_obj)
except FileNotFoundError: #si no existe
return None
else:
return username
def get_new_username():
"""Prompt for a new username."""
username = input("What is your name? ")
filename = 'username.json'
with open(filename, 'w') as f_obj: #lo guardo
json.dump(username, f_obj)
return username
def greet_user():
"""Greet the user by name."""
username = get_stored_username()
if username:
print("Welcome back, " + username + "!")
else:
username = get_new_username()
print("We'll remember you when you come back, " + username + "!")
greet_user()
| [
"-"
] | - |
340462bf30c2ed5cbb3fb0c3dd921c6b94303a12 | 308c7d5dd37ca24096081edea6f73e79ce089da4 | /201-250/203. 删除链表中的节点.py | f097e301332908911f079b841ef1f295937d0a77 | [] | no_license | fengges/leetcode | 4953ca038b085cdb772054fa1483bf816dccb578 | 5d592440b214024cad342c497b381dbce19d8a70 | refs/heads/master | 2022-09-18T17:52:49.525234 | 2022-09-03T08:45:07 | 2022-09-03T08:45:07 | 132,389,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeElements(self, head, val):
t=ListNode(0)
t.next=head
head=t
while t.next:
if t.next.val==val:
t.next=t.next.next
else:
t=t.next
return head.next
| [
"1059387928@qq.com"
] | 1059387928@qq.com |
7016a4710ff4da144f8a58cfb31202c38c667a98 | 37e87b3d5e1ee9009f0ea0671bc0c6edf0e233b7 | /027.py | c7672604738dad2bb9d5985ba4194e99573ec277 | [] | no_license | Jane11111/Leetcode2021 | d9f4987792938597bf89ff72ba6bbcb4a3f9d081 | a95b871578aae0103066962c33b8c0f4ec22d0f2 | refs/heads/master | 2023-07-14T21:29:41.196752 | 2021-08-23T03:28:02 | 2021-08-23T03:28:02 | 344,804,297 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | # -*- coding: utf-8 -*-
# @Time : 2021-03-02 11:30
# @Author : zxl
# @FileName: 027.py
class Solution(object):
def swap(self,nums,i,j):
tmp = nums[i]
nums[i] = nums[j]
nums[j] = tmp
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
if len(nums) == 0:
return 0
p = len(nums) - 1
i = 0
while i < p:
if nums[i] == val:
self.swap(nums, i, p)
if nums[i] != val:
i+=1
p-=1
else:
i+=1
if nums[p] == val:
return p
return p+1
obj = Solution()
nums = [4,5]
val = 5
res = obj.removeElement(nums,val)
print(res)
print(nums)
| [
"791057615@qq.com"
] | 791057615@qq.com |
2f54a80a00dd9de3108b4b616405f5b7a05bd8a3 | 1577e1cf4e89584a125cffb855ca50a9654c6d55 | /pyobjc/pyobjc/pyobjc-framework-WebKit-2.5.1/PyObjCTest/test_webplugin.py | c34363bdcc0ed21a2ac82af352e13a22c15cf1d2 | [
"MIT"
] | permissive | apple-open-source/macos | a4188b5c2ef113d90281d03cd1b14e5ee52ebffb | 2d2b15f13487673de33297e49f00ef94af743a9a | refs/heads/master | 2023-08-01T11:03:26.870408 | 2023-03-27T00:00:00 | 2023-03-27T00:00:00 | 180,595,052 | 124 | 24 | null | 2022-12-27T14:54:09 | 2019-04-10T14:06:23 | null | UTF-8 | Python | false | false | 324 | py |
from PyObjCTools.TestSupport import *
from WebKit import *
class TestWebPluginHelper (NSObject):
def webPlugInSetIsSelected_(self, v): pass
class TestWebPlugin (TestCase):
def testConstants(self):
self.assertArgIsBOOL(TestWebPluginHelper.webPlugInSetIsSelected_, 0)
if __name__ == "__main__":
main()
| [
"opensource@apple.com"
] | opensource@apple.com |
7f2e9f67a922ae73ed06152cd0babe4ed8aa4e8e | 1749147fb24b13803d3437e0ae94250d67d618bd | /AE/a05_CAE.py | ed687530d4470574deb493be5383e72896d77586 | [] | no_license | MJK0211/bit_seoul | 65dcccb9336d9565bf9b3bc210b1e9c1c8bd840e | 44d78ce3e03f0a9cf44afafc95879e4e92d27d54 | refs/heads/master | 2023-02-06T00:45:52.999272 | 2020-12-26T07:47:30 | 2020-12-26T07:47:30 | 311,308,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | #a02 카피
# 딥하게 구성
# CNN으로 구성
import numpy as np
from tensorflow.keras.datasets import mnist
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
(x_train, _ ), (x_test, _ ) = mnist.load_data()
x_train = x_train.reshape(60000,28,28,1).astype('float32')/255.
x_test = x_test.reshape(10000,28,28,1).astype('float32')/255.
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPooling2D, Flatten, Input
def autoencoder(hidden_layer_size):
input_img = Input(shape=(28, 28))
model = Conv2D(filters=hidden_layer_size, kernel_size=(3, 3), activation='relu', padding='same')(input_img)
model = MaxPooling2D()(model)
model = Conv2D(32, (3, 3), activation='relu', padding='same')(model)
model = MaxPooling2D()(model)
model = Conv2D(64, (3, 3), activation='relu', padding='same')(model)
model = MaxPooling2D()(model)
model = Conv2D(32, (3, 3), activation='relu', padding='same')(model)
model = MaxPooling2D()(model)
model = Flatten()(model)
model = Dense(units=784, activation='sigmoid')(model)
return model
model = autoencoder(hidden_layer_size=154)
model.compile(optimizer='adam', loss='mse', metrics=['acc'])
model.fit(x_train, x_train, epochs=10)
output = model.predict(x_test)
from matplotlib import pyplot as plt
import random
fig, ((ax1, ax2, ax3, ax4, ax5), (ax6, ax7, ax8, ax9, ax10)) = plt.subplots(2, 5, figsize=(20,7))
#이미지 5개를 무작위로 고른다
random_images = random.sample(range(output.shape[0]), 5)
#원본(입력) 이미지를 맨 위에 그린다datetime A combination of a date and a time. Attributes: ()
for i, ax in enumerate([ax1, ax2, ax3, ax4, ax5]):
ax.imshow(x_test[random_images[i]].reshape(28,28), cmap='gray')
if i == 0:
ax.set_xlabel("INPUT", size=40)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
#오토 인코더가 출력한 이미지를 아래에 그린다
for i, ax in enumerate([ax6, ax7, ax8, ax9, ax10]):
ax.imshow(output[random_images[i]].reshape(28,28), cmap='gray')
if i == 0:
ax.set_ylabel("OUTPUT", size=40)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
plt.show()
| [
"kimminjong0211@gmail.com"
] | kimminjong0211@gmail.com |
820029127213c4a647d7f8f1e54cc772960af4a9 | 0c325cf7a68ef51067ed8db566d525a20de5b635 | /other/panda365/panda365/migrations/versions/dbf55d071d34_add_admin_votes_num.py | d1fd1247b26e0de8a22962b576839ecbb1dde1b1 | [] | no_license | alinzel/NOTES | 2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241 | 3e0594641a605580e920d0b08a251fbc99f34e2f | refs/heads/master | 2023-01-08T22:48:30.762625 | 2020-01-17T09:14:47 | 2020-01-17T09:14:47 | 175,339,492 | 0 | 0 | null | 2022-12-27T15:01:19 | 2019-03-13T03:28:08 | HTML | UTF-8 | Python | false | false | 1,000 | py | """add admin_votes_num
Revision ID: dbf55d071d34
Revises: 9cc8e9322d7e
Create Date: 2017-06-28 11:47:03.044287
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dbf55d071d34'
down_revision = '9cc8e9322d7e'
branch_labels = None
depends_on = None
def upgrade():
"""
rename column `votes_num` to `real_votes_num`
add column admin_votes_num
"""
op.add_column(
'wish', sa.Column(
'admin_votes_num', sa.Integer(), server_default='0',
nullable=True))
op.add_column(
'wish', sa.Column('real_votes_num', sa.Integer(), nullable=True))
op.execute('UPDATE wish SET real_votes_num = votes_num')
op.drop_column('wish', 'votes_num')
def downgrade():
op.add_column('wish', sa.Column('votes_num', sa.INTEGER(), nullable=True))
op.execute('UPDATE wish SET votes_num = real_votes_num')
op.drop_column('wish', 'real_votes_num')
op.drop_column('wish', 'admin_votes_num')
| [
"944951481@qq.com"
] | 944951481@qq.com |
3bb168d46e93c5e71ecaeb35497dc68401a176f6 | 9a1169f3d008d81f046578994c7607c2670c0476 | /tests/chainer_tests/distributions_tests/test_gumbel.py | 50cbbd88174542e261faba926c4f892da8e5ec19 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | ishanrai05/chainer | d77d61e481c7c0a9d4837582595bc98b3c7156dd | 758c74122f4463f3b0de61c6609b8688576f59a6 | refs/heads/master | 2020-04-23T16:11:28.496584 | 2019-02-18T09:52:23 | 2019-02-18T09:52:23 | 171,289,639 | 1 | 0 | MIT | 2019-02-18T13:30:46 | 2019-02-18T13:30:46 | null | UTF-8 | Python | false | false | 1,210 | py | import numpy
from chainer import distributions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestGumbel(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Gumbel
self.scipy_dist = stats.gumbel_r
self.test_targets = set([
"batch_shape", "entropy", "event_shape", "log_prob", "mean",
"sample", "support", "variance"])
loc = utils.force_array(
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32))
scale = utils.force_array(numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32))
self.params = {"loc": loc, "scale": scale}
self.scipy_params = {"loc": loc, "scale": scale}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| [
"yoshikawa@preferred.jp"
] | yoshikawa@preferred.jp |
829e0449fb5db433b31272b5900ca1b144331321 | 6e5ab77fee1fb4a0310213dd8c6dd8601828b1b9 | /Algorithm/문제/수업/D-13t/AD/[TST] 치킨배달2.py | 41a4c73adabf114f7dac858dd407e941d456012d | [] | no_license | hongyong3/TIL | 36d031c0da9e3e6db3eebb977bd3e12df00a849f | 7f1492128e957a78fc95b255f4f7f2978161e471 | refs/heads/master | 2023-08-19T09:16:03.231757 | 2023-08-18T09:38:47 | 2023-08-18T09:38:47 | 162,100,258 | 1 | 0 | null | 2023-02-11T00:52:32 | 2018-12-17T08:42:42 | Jupyter Notebook | UTF-8 | Python | false | false | 3,564 | py | import sys
sys.stdin = open("[TST] 치킨배달2_input.txt", "r")
# def solve():
# sums = 0
# for i in range(home_count): # 현재 집에서 고른 치킨집과 최소인 거리 찾기
# dist_min = 20*20
# for j in range(chicken_count): # 치킨집
# if not check[j]: continue # 선택안한 치킨집이면 스킵
# dist_min = min(dist_min, arr[j][i]) # 최소거리
# sums += dist_min
# return sums
#
#
# def DFS(no, count):
# global minn
# # M개 골랐을때 고른 치킨거리의 최소의 합 비교
# if count == M:
# # for i in range(chicken_count): print(check[i], end=" ")
# # print()
# sums = solve()
# if sums < minn:
# minn = sums
# return
#
# if no > chicken_count:
# return
#
# # 현재 치킨집을 고르거나 고르지 않는 경우 시도
# check[no] = 1
# DFS(no + 1, count + 1)
# check[no] = 0
# DFS(no + 1, count)
#
# # main ------------------------------------------------------
# N, M = map(int, input().split())
# data = [list(map(int, input().split())) for _ in range(N)]
# home = []
# chicken = []
# minn = 20*20
#
# for i in range(N):
# for j in range(N):
# if data[i][j] == 1:
# home.append([i, j])
# if data[i][j] == 2:
# chicken.append([i, j])
#
# chicken_count = len(chicken) # 치킨집의 개수
# home_count = len(home) # 집의 개수
# arr = [[0] * home_count for _ in range(chicken_count)]
# check = [0] * home_count
#
# for i in range(chicken_count):
# for j in range(home_count):
# dist = abs(chicken[i][0] - home[j][0]) + abs(chicken[i][1] - home[j][1]) # 치킨집과 집의 거리
# arr[i][j] = dist
#
# DFS(0, 0) # 0행(첫번째 치킨)부터 시작, 개수는 0개
# print(minn)
# 선생님 풀이
def solve(): # 열을 돌려야 함 그래서 ji
sums = 0
for i in range(HN): # 기준은 집 현재 집에서 고른 치킨집과 최소인 거리 찾기
dist_min = 20*20
for j in range(CN): #치킨집
if not sel[j]: continue # 선택안한 치킨집이면 스킵
dist_min = min(dist_min,arr[j][i]) # 최소거리
sums += dist_min
return sums
def DFS(no,cnt):
global sol
# M개 골랐을 때 고른 치킨과의 최소 거리 합 비교
if cnt == M:
# for i in range(CN): print(sel[i], end=' ')
# print()
sums = solve()
if sums < sol: sol=sums
return
if no>= CN:
return
# 현재 치킨집을 고르거나 고르지 않는 경우 시도
sel[no] = 1 # 3개중에 두개 고르기
DFS(no+1, cnt+1)
sel[no] = 0
DFS(no+1, cnt)
#main----------------
N,M = list(map(int,input().split()))
temp = []
for i in range(N):
temp.append(list(map(int,input().split())))
house = []
chk = []
# 치킨집과 집 정보 정리하기
for i in range(N):
for j in range(N):
if temp[i][j] == 2: # 치킨집
chk.append((i,j))
elif temp[i][j] == 1:# 집
house.append((i,j))
CN = len(chk) # 치킨집갯수
HN = len(house) # 집 갯수
arr= [[0]*HN for _ in range(CN)]
for i in range(CN): # 행을 치킨집
for j in range(HN):
# 치킨집과 집 거리 계산
dist = abs(chk[i][0] - house[j][0]) + abs(chk[i][1] - house[j][1])
arr[i][j] = dist
sel = [0] * HN # 고른 치킨집
rec = [0] * CN
sol = 20 * 20
DFS(0,0) # 0행(첫번째 치킨)부터 시작, 개수는 0개
print(sol)
| [
"chy66822495@gmail.com"
] | chy66822495@gmail.com |
7125a29ff2c0e1eecfb3db2d2366cbe9fde9ce28 | 006ae2f32313b59e9f2a637631d981ccfd1c3430 | /setup.py | 15fd1c9ac595ba0ec642b2c4ae90dbf1388eff44 | [] | no_license | yourlabs/starlette-cli | 03a941f998c7dead5ff4d439dbfde67aa17f1cb1 | 3c9be7519d32d28e665a0a86b638c27ad845d622 | refs/heads/master | 2023-03-06T03:28:17.117784 | 2021-02-06T19:42:11 | 2021-02-07T00:26:38 | 336,623,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from setuptools import setup
setup(
name='starlette-cli',
versioning='dev',
setup_requires='setupmeta',
modules=['starlette_cli'],
author='James Pic',
author_email='jamespic@gmail.com',
url='https://yourlabs.io/oss/starlette-cli',
include_package_data=True,
license='MIT',
keywords='starlette cli',
python_requires='>=3',
install_requires=['starlette-apps>=0.1.6'],
)
| [
"jpic@yourlabs.org"
] | jpic@yourlabs.org |
0d68beccc9cba0e3f18c7002e64278071a90aa98 | ec93fc95f583991bf2dd958f34ce3409f5d6b0a8 | /vouchers/migrations/0009_batch_user.py | cbc1e2a57fe199f16a77a41a00aad901c38cf87c | [] | no_license | deone/vms | b45fcd50535397bebceb6f50dd55bff363bdb200 | 82c4cfafbde96d18776c7a6136f67acad173f520 | refs/heads/master | 2021-06-15T06:00:22.833997 | 2019-06-17T13:36:44 | 2019-06-17T13:36:44 | 44,692,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('vouchers', '0008_auto_20160106_2323'),
]
operations = [
migrations.AddField(
model_name='batch',
name='user',
field=models.ForeignKey(default=2, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"alwaysdeone@gmail.com"
] | alwaysdeone@gmail.com |
e59655ec1a9e6e1c9dd715db5fc0da5c6cf31946 | 0822d36728e9ed1d4e91d8ee8b5ea39010ac9371 | /robo/pages/rio_grande_do_sul/correiodopovo.py | 889bebdead1f9794b8e7615dff426e0eec6e12f7 | [] | no_license | diegothuran/blog | 11161e6f425d08bf7689190eac0ca5bd7cb65dd7 | 233135a1db24541de98a7aeffd840cf51e5e462e | refs/heads/master | 2022-12-08T14:03:02.876353 | 2019-06-05T17:57:55 | 2019-06-05T17:57:55 | 176,329,704 | 0 | 0 | null | 2022-12-08T04:53:02 | 2019-03-18T16:46:43 | Python | UTF-8 | Python | false | false | 1,356 | py | # coding: utf-8
import sys
sys.path.insert(0, '../../../blog')
from bs4 import BeautifulSoup
import requests
GLOBAL_RANK = 35259
RANK_BRAZIL = 1087
NAME = 'correiodopovo.com.br'
def get_urls():
try:
root = 'http://www.correiodopovo.com.br'
urls = []
links = [
'http://www.correiodopovo.com.br/busca/?Sessao=Noticias&ED=Cidades',
'http://www.correiodopovo.com.br/busca/?Sessao=Noticias&ED=Economia',
'http://www.correiodopovo.com.br/busca/?Sessao=Noticias&ED=Geral',
'http://www.correiodopovo.com.br/busca/?Sessao=Noticias&ED=Mundo',
'http://www.correiodopovo.com.br/busca/?Sessao=Noticias&ED=Policia',
'http://www.correiodopovo.com.br/busca/?Sessao=Noticias&ED=Politica',
'http://www.correiodopovo.com.br/busca/?Sessao=Noticias&ED=Rural'
]
for link in links:
req = requests.get(link)
noticias = BeautifulSoup(req.text, "html.parser").find_all('h1')
for noticia in noticias:
href = noticia.find_all('a', href=True)[0]['href']
full_link = root + href
urls.append(full_link)
return urls
except:
raise Exception('Exception in correiodopovo')
| [
"diego.thuran@gmail.com"
] | diego.thuran@gmail.com |
84d6040fc0ff051397d0aea6be5fa73dd87c25bf | 2befb6f2a5f1fbbd5340093db43a198abdd5f53b | /pythonProject/customAuth/CustomAuthApp/views.py | 34fcdfebcc1ba80f731777cf404be22738ac9833 | [] | no_license | JanardanPandey/RestAPI | 1956d3529782d18ef2118961f6286e3213665aad | 654933a4d9687076a00c6f4c57fc3dfee1a2c567 | refs/heads/master | 2023-06-14T07:02:31.702000 | 2021-07-02T07:50:59 | 2021-07-02T07:50:59 | 382,357,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from django.shortcuts import render
from rest_framework import viewsets
from .models import Student
from .serializers import StudentSerializers
# Create your views here.
class studentapi(viewsets.ModelViewSet):
queryset = Student.objects.all()
serializer_class = StudentSerializers | [
"janardanpandey0510@gmail.com"
] | janardanpandey0510@gmail.com |
a2a3f5ea87b79717745563396a14f4d4ee143753 | 550d999981ccaaa32e734fd4c963181c4d5ab6f8 | /core/rest/wscdn.py | 4baa6af0e24fc1f4638f43397d1b4b1bab7096a4 | [
"MIT"
] | permissive | macdaliot/Osmedeus | 4709289aa2791473073241fda2275aa22370569a | cd8020416af81a745af7b06c8c5a1c5881911234 | refs/heads/master | 2023-03-01T22:03:24.438090 | 2019-07-22T09:40:17 | 2019-07-22T09:40:17 | 198,321,076 | 0 | 0 | MIT | 2021-02-09T18:44:54 | 2019-07-23T00:25:33 | Python | UTF-8 | Python | false | false | 1,669 | py | import os
import glob
import json
from flask_restful import Resource, reqparse
from flask_jwt_extended import jwt_required
from flask import send_from_directory
import utils
from pathlib import Path
BASE_DIR = Path(os.path.dirname(os.path.abspath(__file__)))
# incase you can't install ansi2html it's won't break the api
try:
from ansi2html import Ansi2HTMLConverter
except:
pass
'''
render stdout content
'''
class Wscdn(Resource):
def verify_file(self, filename):
option_files = glob.glob(
str(BASE_DIR) + '/storages/**/options.json', recursive=True)
# loop though all options avalible
for option in option_files:
json_option = utils.reading_json(option)
stdout_path = json_option.get('WORKSPACES') + "/" + filename
if utils.not_empty_file(stdout_path):
return json_option.get('WORKSPACES'), os.path.normpath(filename)
# get real path
p = Path(filename)
ws = p.parts[0]
if ws != utils.url_encode(ws):
# just replace the first one
filename_encode = filename.replace(ws, utils.url_encode(ws), 1)
stdout_path_encode = json_option.get('WORKSPACES') + filename_encode
if utils.not_empty_file(stdout_path_encode):
return json_option.get('WORKSPACES'), os.path.normpath(filename_encode)
return False, False
def get(self, filename):
ws_path, stdout_path = self.verify_file(filename)
if not stdout_path:
return 'Custom 404 here', 404
return send_from_directory(ws_path, stdout_path)
| [
"jeromej3m@gmail.com"
] | jeromej3m@gmail.com |
343068211e01bf5569aef84eb8a3e7c9ded11ecc | 90b2b50be27da77b6680f4c7b9cfea53267f2f6d | /leetcode/math/48. 旋转图像.py | 2595164dd966a7485695e73f7d34bd7e3f6887ab | [
"Apache-2.0"
] | permissive | xuhuasheng/algorithm-python | 22cd3007d0b63d169d8f84b9b88787d6883e9c76 | 9c47c1add8da1ccfbee8882057883d90615d27b5 | refs/heads/master | 2023-01-18T22:52:22.027636 | 2020-11-20T09:48:23 | 2020-11-20T09:48:23 | 313,158,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | # 给定一个 n × n 的二维矩阵表示一个图像。
# 将图像顺时针旋转 90 度。
# 说明:
# 你必须在原地旋转图像,这意味着你需要直接修改输入的二维矩阵。请不要使用另一个矩阵来旋转图像。
# 示例 1:
# 给定 matrix =
# [
# [1,2,3],
# [4,5,6],
# [7,8,9]
# ],
# 原地旋转输入矩阵,使其变为:
# [
# [7,4,1],
# [8,5,2],
# [9,6,3]
# ]
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/rotate-image
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
# 1.上下翻转
matrix.reverse()
# 2.主对角线翻转
for i in range(len(matrix)):
for j in range(i+1, len(matrix)):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
| [
"xu_watson@163.com"
] | xu_watson@163.com |
22f096a774eed7bab451ce2bf6694d1729482d86 | 82fca16c707946faecf6eb2b2f03fa5799b8d9d5 | /student/urls.py | e507a40948619d671373a5a8b214fa2b3ef8adfc | [] | no_license | riteshh101/CrudTest | 487ef5d3337064343c5cf2de7a40bfaaa0363e99 | f0ad5e67e3f6c71c3b89295cf79f6a3f43253e01 | refs/heads/main | 2023-06-15T19:44:33.853855 | 2021-07-14T08:15:29 | 2021-07-14T08:15:29 | 385,862,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | """student URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app import views
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('login/',views.login_student,name="login_student"),
path('signup/',views.register_student,name="register_student"),
path('all_student/',views.all_student,name="all_student"),
path('student<int:id>/',views.student_remove,name='student_remove'),
path('logout/',views.student_logout,name='logout'),
# path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
# path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
# path('token/verify/',TokenVerifyView.as_view(),name="token_verify"),
path('student_edit<int:id>/',views.student_edit,name='student_edit'),
]
| [
"83176095+riteshh101@users.noreply.github.com"
] | 83176095+riteshh101@users.noreply.github.com |
d540f96d5385b1438d96992cc5fd6eb17fe6230c | 2d4380518d9c591b6b6c09ea51e28a34381fc80c | /CIM16/CDPSM/Geographical/IEC61970/Wires/SynchronousMachine.py | 333e1ee1450fdc72f375c18920597ce6b88c110a | [
"MIT"
] | permissive | fran-jo/PyCIM | 355e36ae14d1b64b01e752c5acd5395bf88cd949 | de942633d966bdf2bd76d680ecb20517fc873281 | refs/heads/master | 2021-01-20T03:00:41.186556 | 2017-09-19T14:15:33 | 2017-09-19T14:15:33 | 89,480,767 | 0 | 1 | null | 2017-04-26T12:57:44 | 2017-04-26T12:57:44 | null | UTF-8 | Python | false | false | 1,709 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.CDPSM.Geographical.IEC61970.Core.ConductingEquipment import ConductingEquipment
class SynchronousMachine(ConductingEquipment):
"""An electromechanical device that operates synchronously with the network. It is a single machine operating either as a generator or synchronous condenser or pump.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'SynchronousMachine' instance.
"""
super(SynchronousMachine, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| [
"fran_jo@hotmail.com"
] | fran_jo@hotmail.com |
25442ed8370e9c2c4552f217bfd5a5cf12faf416 | b510f89dd40c0ed6bda55230fe4615d16de07251 | /picmodels/migrations/0040_auto_20170808_1541.py | ada816c33f11f1a6c27a281e5fa1dcd47fbca85f | [
"MIT"
] | permissive | bbcawodu/careadvisors-backend | 42676d269679c487a97a8870339bbded27a0ccf7 | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | refs/heads/master | 2020-03-25T12:47:05.267866 | 2018-08-14T16:29:02 | 2018-08-14T16:29:02 | 143,793,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('picmodels', '0039_careadvisorcustomer'),
]
operations = [
migrations.RemoveField(
model_name='careadvisorcustomer',
name='first_name',
),
migrations.RemoveField(
model_name='careadvisorcustomer',
name='last_name',
),
migrations.AddField(
model_name='careadvisorcustomer',
name='full_name',
field=models.TextField(default='No name'),
preserve_default=False,
),
]
| [
"awodubradley@gmail.com"
] | awodubradley@gmail.com |
d306eb9cf443b85405b6d6c0b9b89c5ebb7a02ac | ec0dc0fbfe110e50420ecd76e052dcda33bd2ad4 | /functionalprogramming/map_reduce_filter/functionalprgm.py | fae8a188504fc9437ffde660bfe57c99de981f15 | [] | no_license | aishAMZK/luminardjangonew | 88383a64a60f85e2cba5ee99bede5c04af82ec77 | 745ee4409984ae07e70aeeab8632cf8f7754df12 | refs/heads/master | 2023-03-22T11:03:42.458417 | 2021-03-22T16:45:45 | 2021-03-22T16:45:45 | 315,669,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | #lambda function
f=lambda no1,no2:no1+no2
print(f(10,20))
cube=lambda no:no**3
print(cube(3))
sub=lambda no1,no2:no1-no2
print(sub(20,10))
mul=lambda no1,no2:no1*no2
print(mul(10,20))
div=lambda no1,no2:no1/no2
print(div(20,10)) | [
"aiswaryakrishna46@gmail.com"
] | aiswaryakrishna46@gmail.com |
f6db0d878c673ee37cb07285f33ea9925400fb0b | fed0736dcd16356a74aa405aa730dec14d95fbec | /controller/apps/tinc/migrations/0009_auto__add_field_host_island.py | 9449a35c1d589f203f59162a6d14cb9a336ed63c | [] | no_license | m00dy/vct-controller | 78e233930c17487d736dccdd6f62ca964a4907ad | dd798dc9bd3321b17007ff131e7b1288a2cd3c36 | refs/heads/master | 2020-12-28T22:01:17.251131 | 2015-01-22T14:16:52 | 2015-01-22T14:16:52 | 29,681,858 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,109 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Host.island'
db.add_column(u'tinc_host', 'island',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tinc.Island'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Host.island'
db.delete_column(u'tinc_host', 'island_id')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'tinc.gateway': {
'Meta': {'object_name': 'Gateway'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'tinc.host': {
'Meta': {'object_name': 'Host'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tinc.Island']", 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tinc_hosts'", 'to': u"orm['users.User']"})
},
u'tinc.island': {
'Meta': {'object_name': 'Island'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
u'tinc.tincaddress': {
'Meta': {'object_name': 'TincAddress'},
'addr': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tinc.Island']", 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.SmallIntegerField', [], {'default': "'655'"}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': u"orm['tinc.TincServer']"})
},
u'tinc.tincclient': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'TincClient'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'island': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tinc.Island']", 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pubkey': ('controller.models.fields.RSAPublicKeyField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'tinc.tincserver': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'TincServer'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pubkey': ('controller.models.fields.RSAPublicKeyField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'users.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'allow_nodes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_slices': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
u'users.roles': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'Roles'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'roles'", 'to': u"orm['users.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_researcher': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_technician': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'roles'", 'to': u"orm['users.User']"})
},
u'users.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'users'", 'blank': 'True', 'through': u"orm['users.Roles']", 'to': u"orm['users.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('controller.models.fields.TrimmedCharField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('controller.models.fields.NullableCharField', [], {'db_index': 'True', 'max_length': '30', 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['tinc']
| [
"santiago@pangea.org"
] | santiago@pangea.org |
5ecf543cff70919e446533708646fbf6a7ca738e | e5329001263e67a4d3c13d57bb91f2502280e206 | /InvTL/lm_py/pypy/objspace/std/test/test_celldict.py | ceb5a64c290b62c57e64125a367e5a051aebe433 | [] | no_license | yanhongliu/DARLAB | d9432db6e005a39e33501d7ffffe6e648b95b3fc | f739318c9620b44ef03d155f791c7ed4111d80fa | refs/heads/master | 2021-05-27T19:58:58.458846 | 2014-02-04T12:09:26 | 2014-02-04T12:09:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | import py
from pypy.conftest import gettestobjspace, option
from pypy.objspace.std.celldict import ModuleCell, ModuleDictImplementation
from pypy.objspace.std.test.test_dictmultiobject import FakeSpace
from pypy.interpreter import gateway
space = FakeSpace()
class TestCellDict(object):
def test_basic_property(self):
d = ModuleDictImplementation(space)
d.setitem("a", 1)
assert d.getcell("a", False) is d.getcell("a", False)
acell = d.getcell("a", False)
d.setitem("b", 2)
assert d.getcell("b", False) is d.getcell("b", False)
assert d.getcell("c", True) is d.getcell("c", True)
assert d.getitem("a") == 1
assert d.getitem("b") == 2
d.delitem("a")
py.test.raises(KeyError, d.delitem, "a")
assert d.getitem("a") is None
assert d.getcell("a", False) is acell
assert d.length() == 1
d.clear()
assert d.getitem("a") is None
assert d.getcell("a", False) is acell
assert d.length() == 0
| [
"mickg10@gmail.com"
] | mickg10@gmail.com |
7bcea3bd28d9367111a7471451575f50f311e78a | dd573ed68682fd07da08143dd09f6d2324f51345 | /swea/summer/1249_보급로.py | 1cf4f39224b7b34a81ed73ff883fe93da2456bdc | [] | no_license | chelseashin/My-Algorithm | 0f9fb37ea5c6475e8ff6943a5fdaa46f0cd8be61 | db692e158ebed2d607855c8e554fd291c18acb42 | refs/heads/master | 2021-08-06T12:05:23.155679 | 2021-07-04T05:07:43 | 2021-07-04T05:07:43 | 204,362,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,504 | py | import sys
sys.stdin = open("1249_input.txt")
# 상 하 좌 우
dr = (-1, 1, 0, 0)
dc = (0, 0, -1, 1)
def bfs(sr, sc):
global arr, N, visited
Q = [(sr, sc)]
visited[sr][sc] = 0
while Q:
r, c = Q.pop(0)
for i in range(4):
nr = r + dr[i]
nc = c + dc[i]
if not (0 <= nr < N and 0 <= nc < N):
continue
time = arr[nr][nc]
if visited[nr][nc] > visited[r][c] + time:
visited[nr][nc] = visited[r][c] + time
Q.append((nr, nc))
T = int(input())
for tc in range(T):
N = int(input())
arr = [list(map(int, input())) for _ in range(N)]
visited = [[float('inf')] * N for _ in range(N)]
bfs(0, 0)
print("#{} {}".format(tc+1, visited[N-1][N-1]))
# 2
# dx = (-1, 1, 0, 0)
# dy = (0, 0, -1, 1)
#
# def dfs():
# V[0][0] = A[0][0]
# q = [[0, 0]]
#
# while q:
# a = q.pop(0)
# y, x = a[0], a[1]
# for i in range(4):
# nx = x + dx[i]
# ny = y + dy[i]
# if not (0 <= nx < N and 0 <= ny < N):
# continue
# if V[ny][nx] > V[y][x] + A[ny][nx]:
# V[ny][nx] = V[y][x] + A[ny][nx]
# q.append([ny, nx])
#
# T = int(input())
# for tc in range(1, T + 1):
# N = int(input())
# A = [list(map(int, (input()))) for i in range(N)]
# V = [[100000] * N for i in range(N)]
# dfs()
#
# print("#{} {}".format(tc, V[N - 1][N - 1]))
| [
"chaewonshin95@gmail.com"
] | chaewonshin95@gmail.com |
4de6d624e297737b79f34f2d263f711951474974 | 522d93a2829470f2d5ddd16c8784b29e8cff2189 | /static_files/admin/global/plugins/clockface/CHANGELOG.txt.py | 4fe83cb2e827277e3e3658a872e1c279e11344b7 | [
"MIT"
] | permissive | omorapatterson/ontraveline | 7c4d5d75466b60469279b7ee041ad2a7d4afa637 | b3248431fd56c932a1d23a67034315b19787b1a0 | refs/heads/master | 2023-02-19T22:40:52.666021 | 2018-02-27T17:30:29 | 2018-02-27T17:30:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXX XXX XXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXX XXXX XXXXXXX XX XXXX XX XXX XXXXXX XXXXXXXXXX
XXXXXXX XXXXX XXX XXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXX | [
"contact@magestree.com"
] | contact@magestree.com |
00fe0c4fb019f721213defd68567862d5fd31ed8 | bfd68f439aff62ccba40c1d4e57f2e7a9f99c523 | /docs/scripts/gauss.b.py | b77f5d57a88c9ad28f1e1b2d09d4df05ea90d526 | [
"MIT"
] | permissive | ivan-pi/RBF | 6225c7ea267929f4866e62b227495985a6d62828 | 675f841bc325848f019276a980773145783d7ca4 | refs/heads/master | 2020-06-20T15:20:58.519211 | 2019-09-09T01:42:06 | 2019-09-09T01:42:06 | 197,162,365 | 1 | 1 | MIT | 2019-07-16T09:22:50 | 2019-07-16T09:22:50 | null | UTF-8 | Python | false | false | 1,581 | py | '''
This script demonstrates how to make a custom *GaussianProcess* by
combining *GaussianProcess* instances. The resulting Gaussian process
has two distinct length-scales.
'''
import numpy as np
import matplotlib.pyplot as plt
from rbf.basis import se
from rbf.gauss import gpiso
np.random.seed(1)
dx = np.linspace(0.0, 5.0, 1000)[:,None]
x = np.linspace(-5, 5.0, 1000)[:,None]
gp_long = gpiso(se, (0.0, 1.0, 2.0))
gp_short = gpiso(se, (0.0, 0.5, 0.25))
gp = gp_long + gp_short
# compute the autocovariances
acov_long = gp_long.covariance(dx, [[0.0]])
acov_short = gp_short.covariance(dx, [[0.0]])
acov = gp.covariance(dx, [[0.0]])
# draw 3 samples
sample = gp.sample(x)
# mean and uncertainty of the new gp
mean,sigma = gp(x)
# plot the autocovariance functions
fig,axs = plt.subplots(2,1,figsize=(6,6))
axs[0].plot(dx,acov_long,'r--',label='long component')
axs[0].plot(dx,acov_short,'b--',label='short component')
axs[0].plot(dx,acov,'k-',label='sum')
axs[0].set_xlabel('$\mathregular{\Delta x}$',fontsize=10)
axs[0].set_ylabel('auto-covariance',fontsize=10)
axs[0].legend(fontsize=10)
axs[0].tick_params(labelsize=10)
axs[0].set_xlim((0,4))
axs[0].grid(True)
# plot the samples
axs[1].plot(x,sample,'k--',label='sample')
axs[1].plot(x,mean,'k-',label='mean')
axs[1].fill_between(x[:,0],mean-sigma,mean+sigma,color='k',alpha=0.2,edgecolor='none',label='std. dev.')
axs[1].set_xlabel('x',fontsize=10)
axs[1].legend(fontsize=10)
axs[1].tick_params(labelsize=10)
axs[1].set_xlim((-5,5))
axs[1].grid(True)
plt.tight_layout()
plt.savefig('../figures/gauss.b.png')
plt.show()
| [
"treverhines@gmail.com"
] | treverhines@gmail.com |
def0cf113000143b32c2bf32544a4b798364f28a | 1b1804ad30b0103f56db7a4130ddc264d7269c29 | /mga_graphql/mgagraphql.py | eef7140a83553bb5357573ca7b3b9f15d85f4c08 | [
"MIT"
] | permissive | metamorph-inc/mga-graphql | 2127b33e9f0c77d17d0be9e9cc3e72912a1a27ca | bb715bba75f5b0798389e4955a2c20aa91fc2a7a | refs/heads/master | 2021-09-02T03:22:16.685235 | 2017-12-29T22:35:59 | 2017-12-29T22:35:59 | 115,758,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,929 | py | import xml.etree.ElementTree as ET
import _winreg as winreg
import os
import sys
import udm
import collections
import functools
class MgaGraphQlSchemaConverter(object):
TEMPLATE_CLASS = """# auto-generated by mga-graphql
from mga_graphql.mgaclasses import *
from graphene import String, Int, Field, Boolean, List
{imports}
class {classname}({baseclasses}):
{attributes}
"""
TEMPLATE_IMPORT = """from .{classname} import {classname}"""
RELPATH_DSML_CLASSES = 'dsmlclasses'
MGA_CLASSES = ['MgaObject', 'MgaFco',
'MgaModel', 'MgaAtom',
'MgaReference', 'MgaSet',
'MgaFolder', 'MgaConnection']
def __init__(self, udm_xml=None):
self.parse_metamodel(udm_xml)
def build_class_file(self, classname, baseclasses, attributes):
if baseclasses:
imports = '\n'.join([self.TEMPLATE_IMPORT.format(classname=bc)
for bc in baseclasses
if bc not in self.MGA_CLASSES])
list_baseclasses = ', '.join(baseclasses)
else:
imports = ''
list_baseclasses = ''
if attributes:
code_attributes = '\n '.join(['{name} = {type}()'.format(name=k,
type=v)
for k, v in attributes.iteritems()])
else:
code_attributes = 'pass'
code_class = self.TEMPLATE_CLASS.format(classname=classname,
baseclasses=list_baseclasses,
attributes=code_attributes,
imports=imports)
print (code_class)
print ('\n')
path_classfile = os.path.join(self.RELPATH_DSML_CLASSES, classname + '.py')
with open(path_classfile, 'w') as cf:
cf.writelines(code_class)
TEMPLATE_QUERY_FILE = """# auto-generated by mga-graphql
import graphene
from mga_graphql.mgaclasses import *
from dsmlclasses import *
import udm
def load_data(path_mga, path_udm_xml):
uml_diagram = udm.uml_diagram()
meta_dn = udm.SmartDataNetwork(uml_diagram)
meta_dn.open(path_udm_xml.encode('utf-8'), b'')
dn = udm.SmartDataNetwork(meta_dn.root)
dn.open(path_mga.encode('utf-8'), b'')
# Need to make our own data structure for now.
models = {{}}
root = dn.root
def visit(obj):
type = obj.type.name
if type == 'Compound':
model = Compound(id=obj.id,
name=obj.name)
else:
model = MgaObject(id=obj.id,
name=obj.name)
models[str(obj.id)] = model
child_ids = []
for child in obj.children():
child_ids.append(visit(child))
if type == 'Compound':
model.children = child_ids
return obj.id
visit(dn.root)
return models
class Query(graphene.ObjectType):
{objects}
def run_server(d_models):
schema = graphene.Schema(query=Query)
from flask import Flask, render_template
from flask_graphql import GraphQLView
app = Flask(__name__)
app.debug = False
app.add_url_rule('/graphql',
view_func=GraphQLView.as_view('graphql',
schema=schema,
graphiql=True,
context={{
'session': Query,
'data': d_models
}}))
@app.route('/')
def index():
return "Go to /graphql"
app.run()
"""
def build_query_file_entry(self, l_classes):
template_objects = """
{lowercase} = graphene.Field({classname}, id=graphene.String(),)
def resolve_{lowercase}(self, info, id):
return info.context['data'][id]
all_{lowercase} = graphene.List({classname}, )
def resolve_all_{lowercase}(self, info):
return [v for k, v in info.context['data'].iteritems()
if isinstance(v, {classname})]
"""
code_schema = self.TEMPLATE_QUERY_FILE.format(objects='\n'
.join([template_objects.format(lowercase=c.lower(),
classname=c)
for c in l_classes]))
print (code_schema)
path_schemafile = os.path.join('schema.py')
with open(path_schemafile, 'w') as cf:
cf.writelines(code_schema)
def parse_metamodel(self, udm_xml):
tree = ET.parse(udm_xml)
root = tree.getroot()
# Build classes for everything
path_dsml_classes = self.RELPATH_DSML_CLASSES
# if os.path.exists(path_dsml_classes):
# os.remove(path_dsml_classes)
# os.mkdir(path_dsml_classes)
# Build class dict
m_classes = {clazz.get('_id'): clazz.get('name') for clazz in root.iter('Class')}
# Build class code files
for clazz in root.iter('Class'):
name_class = clazz.get('name')
# Skip Mga classes (already included)
if name_class in self.MGA_CLASSES:
continue
# Get baseclasses
baseclass_names = []
basetypes = clazz.get('baseTypes')
if basetypes:
baseclass_names = [m_classes[id] for id in basetypes.split(' ')
if m_classes[id] not in self.MGA_CLASSES]
# Also flag the MGA basetype
baseclass_names.append('Mga' + clazz.attrib['stereotype'])
m_attr = {}
for attr in clazz.iter('Attribute'):
name_attr = attr.get('name')
type_attr = attr.get('type')
if type_attr == 'Integer':
m_attr[name_attr] = 'Int'
else:
m_attr[name_attr] = type_attr
if not baseclass_names:
baseclass_names = ['MgaObject']
self.build_class_file(name_class, baseclass_names, m_attr)
l_dsml_classes = [v for k, v in m_classes.iteritems()
if v not in self.MGA_CLASSES]
# Build dsmlclasses/__init__.py
code_init = '\n'.join(["from .{cname} import {cname}".format(cname=cname)
for cname in l_dsml_classes])
with open(os.path.join('dsmlclasses', '__init__.py'), 'w') as init:
init.writelines(code_init)
# Build schema file
self.build_query_file_entry(l_dsml_classes)
| [
"adam.nagel+git@gmail.com"
] | adam.nagel+git@gmail.com |
579f6723b9a9226201392f80914589a5b8098d09 | ced6ca53b222eff6cfd13c4786cec32a7f5a393e | /exam05_Blue_Color_detect.py | 37b03a134e868cd2918b483a66321a6dbc99e923 | [] | no_license | ykiseong303/Image_detect_openCV | b48867b3d34b58675d4ec9e74ed8d3b65641c1f0 | 7c4beae9db46d23c5dd7ba9c57efc881ba2f2b3f | refs/heads/master | 2021-08-08T13:41:46.465522 | 2020-04-24T04:17:31 | 2020-04-24T04:17:31 | 162,654,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,757 | py | import numpy as np
import cv2
lower_blue = np.array([90,100,100])
upper_blue = np.array([130,255,255])
lower_red = np.array([6,100,100])
upper_red = np.array([10,255,255])
cap = cv2.VideoCapture(0)
cap.set(3,480)
cap.set(4,320)
right_count = 0
left_count = 0
while True :
ret, frame = cap.read()
cv_image_input = frame
hsv = cv2.cvtColor(cv_image_input,cv2.COLOR_BGR2HSV)
#Line for dividing left and right area
#cv2.line(cv_image_input,(90,0),(90,240),(255,0,0),3)
#cv2.line(cv_image_input,(162,0),(162,240),(255,0,0),3)
#cv2.line(cv_image_input,(235,0),(235,240),(255,0,0),3)
cv2.line(cv_image_input,(160,0),(160,480),(255,0,0),3)
#cv2.line(cv_image_input,(162,0),(162,240),(255,0,0),3)
#cv2.line(cv_image_input,(235,0),(235,240),(255,0,0),3)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
median = cv2.medianBlur(gray,3)
gray_blurred = cv2.GaussianBlur(median,(3,3),0)
ret, threshold = cv2.threshold(gray_blurred,210,255,cv2.THRESH_BINARY)
mask_blue = cv2.inRange(hsv,lower_blue,upper_blue)
blue = cv2.bitwise_and(cv_image_input,cv_image_input,mask=mask_blue)
blue_gray = cv2.cvtColor(blue,cv2.COLOR_BGR2GRAY)
blue_gray_blurred = cv2.GaussianBlur(blue_gray,(5,5),0)
ret_b, thresh_b = cv2.threshold(blue_gray_blurred,0,255,cv2.THRESH_BINARY)
_, center_blue_contours, hierarchy = cv2.findContours(thresh_b, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
blue_x = []
blue_y = []
center_blue_x = []
center_blue_y = []
for c in center_blue_contours:
peri = cv2.arcLength(c,True)
approx = cv2.approxPolyDP(c,0.04*peri,True)
(x,y,w,h) = cv2.boundingRect(approx)
end=x+w
if w>20 and w<100 and h<60 and x<250 and end<280 and len(approx)!= 3 and len(approx)!= 6 :
center_blue_x.append(x)
center_blue_y.append(y)
cv2.drawContours(cv_image_input, [c], -1, (255,0,0),3)
if not center_blue_x:
pass
elif center_blue_x[0] > 160 :
print("right accracy : %d, count : %d ", center_blue_x[0], right_count+1)
right_count +=1
if right_count == 3:
print("====================================")
print("final destination : right")
print("====================================")
right_count = 0
elif center_blue_x[0] < 160 :
print("left accuracy : %d, count : %d ", center_blue_x[0], left_count+1)
left_count +=1
if left_count == 3:
print("====================================")
print("final destination : left")
print("====================================")
left_count = 0
cv2.imshow('blud_area',cv_image_input), cv2.waitKey(1) & 0xFF
| [
"noreply@github.com"
] | ykiseong303.noreply@github.com |
0da101c0b34a4e6865d62f8973d1159415244fd5 | 02b2af5696f83bc0bdc9b24e4ca187413313bea4 | /payment/settings.py | 80534b47672b22b5e8d3f0599b36cf4fa530ec44 | [] | no_license | Jonolsh/djangoPayment | e6051de9b36a27cc6b687635d5b6d7a0e7880600 | 5b654c96bb6b10f436ad98a36997ced431cb6717 | refs/heads/main | 2023-07-15T20:06:22.637176 | 2021-08-21T08:24:42 | 2021-08-21T08:24:42 | 398,502,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,896 | py | """
Django settings for payment project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ellh7_00=u*#@v$85ztheho65a7-rsqyfnys)=_$dlp$z2h6d3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'payments'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'payment.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'payment.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
if DEBUG:
# STRIPE_PUBLISHABLE_KEY = 'test_publishable_key'
# STRIPE_SECRET_KEY = 'test_secret_key'
STRIPE_PUBLISHABLE_KEY = 'pk_test_51JGRrHCTZDAGFneNjy1sEJh985JHltkuUFdlfa3gsmlN7SfPFwaiPgtK6sk2NLeoyJqIK6I1oWNkSi7VtnUjB2jr00tGhgihur'
STRIPE_SECRET_KEY = 'sk_test_51JGRrHCTZDAGFneN9UjYxvICZqDbDo44w4rCFGIobWOJI1ltdvx5B4C7imgwmtDK5aZVSGzDJBZaSSLSopEqVQWU00MOv7HwB9'
# Uncomment these lines if you have a live keys
# else:
# STRIPE_PUBLISHABLE_KEY = 'production_publishable_key'
# STRIPE_SECRET_KEY = 'production_secret_key'
# 4242 4242 4242 4242
# card number 4242 4242 4242 4242
# card number 4242 4242 4242 4242 | [
"johndoe@example.com"
] | johndoe@example.com |
a64dd6a6a11bc09c23986bfbffcc98651ae24a13 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayDataPrinterStatusGetModel.py | c43c09a37737dbcb0c58721d4739802f642f64b0 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,461 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataPrinterStatusGetModel(object):
def __init__(self):
self._access_token = None
self._client_id = None
self._client_secret = None
self._device_sn = None
@property
def access_token(self):
return self._access_token
@access_token.setter
def access_token(self, value):
self._access_token = value
@property
def client_id(self):
return self._client_id
@client_id.setter
def client_id(self, value):
self._client_id = value
@property
def client_secret(self):
return self._client_secret
@client_secret.setter
def client_secret(self, value):
self._client_secret = value
@property
def device_sn(self):
return self._device_sn
@device_sn.setter
def device_sn(self, value):
self._device_sn = value
def to_alipay_dict(self):
params = dict()
if self.access_token:
if hasattr(self.access_token, 'to_alipay_dict'):
params['access_token'] = self.access_token.to_alipay_dict()
else:
params['access_token'] = self.access_token
if self.client_id:
if hasattr(self.client_id, 'to_alipay_dict'):
params['client_id'] = self.client_id.to_alipay_dict()
else:
params['client_id'] = self.client_id
if self.client_secret:
if hasattr(self.client_secret, 'to_alipay_dict'):
params['client_secret'] = self.client_secret.to_alipay_dict()
else:
params['client_secret'] = self.client_secret
if self.device_sn:
if hasattr(self.device_sn, 'to_alipay_dict'):
params['device_sn'] = self.device_sn.to_alipay_dict()
else:
params['device_sn'] = self.device_sn
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataPrinterStatusGetModel()
if 'access_token' in d:
o.access_token = d['access_token']
if 'client_id' in d:
o.client_id = d['client_id']
if 'client_secret' in d:
o.client_secret = d['client_secret']
if 'device_sn' in d:
o.device_sn = d['device_sn']
return o
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
802845374a8d2e914fa600a6b1fc6df2037e16bd | dd586139c891041a4e84fdda3d3e6180b44f3c72 | /TTH/MEAnalysis/gc/hadd_local.py | c74482f68482d098726baa24a1fe6fdccebda835 | [] | no_license | mmeinhard/CodeThesis | e92c734dacad4e3bfc9065b98b8a8ae00908aae3 | 7792c96048e250e0008861062d4f63069204efeb | refs/heads/master | 2023-07-15T02:53:39.503146 | 2021-08-14T16:44:35 | 2021-08-14T16:44:35 | 396,058,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | #!/usr/bin/env python
#This script recursively adds root files in directories
# e.g. /a/b/c/output_*.root -> /a/b/c.root
from TTH.MEAnalysis.ParHadd import par_hadd
import glob
import sys
import os, fnmatch
#recurse over the given path
for path, dirs, files in os.walk(sys.argv[1]):
#allow alternative destination
if(len(sys.argv)>2):
destination = sys.argv[2]
else:
destination = ""
#Check if there are root files in this path
rootfiles = filter(lambda x: x.endswith("root"), files)
#If yes, this is a sample directory
isSample = False
if len(rootfiles)>0:
isSample = True
if not isSample:
continue
#Add the full path
rootfiles = map(lambda f: os.path.join(path, f), rootfiles)
print "adding", path
#do the hadd (in parallel)
if(destination == ""):
print ".....", path
par_hadd(path + ".root", rootfiles, 250, 10, 3)
else:
name = path.split("/")[-1]
par_hadd(name + ".root", rootfiles, 250, 10, 3)
| [
"marenm@student.ethz.ch"
] | marenm@student.ethz.ch |
8d08f430112106a5799b7d4f02ad5511ef9e4ea2 | 34148545a20f0b9fe07860d1107e6aab2ec1f75d | /info_spider/Scrapy_E_info_V1_01/build/lib/Scrapy_E_info_V1_01/spiders/chinaLuqiao_V1.py | 28d1c2fbc615c7928c830b6777c66d9cac893c7d | [] | no_license | tangzhutao/chf | 9bb9fa9b6ad75f1b587364e1005922c5bdddb4ca | 4b249aee9689d3669306bbf020ad7fbb7e6b92bc | refs/heads/master | 2022-12-03T03:55:17.308231 | 2020-08-21T09:57:47 | 2020-08-21T09:57:47 | 288,969,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,774 | py | # -*- coding: utf-8 -*-
import scrapy
import re
import time
from scrapy.utils import request
from Scrapy_E_info_V1_01.items import InfoItem
class ChinaluqiaoV1Spider(scrapy.Spider):
name = 'chinaLuqiao_V1'
allowed_domains = ['news.9to.com']
# start_urls = ['http://news.9to.com/']
base_url = 'http://news.9to.com/'
url_name = '中国路桥网'
url1 = {
# 50-市场行情-772 行业分类-资讯分类-页数
'481-行业资讯-2': 'http://news.9to.com/list.php/catid-232-page-',
}
def start_requests(self):
for c, u in self.url1.items():
cs = c.split('-')
for i in range(int(cs[2])):
url = u + f'{i + 1}/'
req = scrapy.Request(url=url, callback=self.parse, meta={'industry_Lcategories': cs[0], 'information_categories': cs[1]}, dont_filter=True)
yield req
def parse(self, response):
config_list = response.xpath('//div[@class="catlist"]/ul/li')
num = [5, 11, 17, 23]
for i in range(len(config_list)):
if i not in num:
item = InfoItem()
link = config_list[i].xpath('./a/@href').extract_first()
title = config_list[i].xpath('./a/text()').extract_first()
issue_time = config_list[i].xpath('./i/text()').extract_first()
# print(title, link, issue_time)
req = scrapy.Request(url=link, callback=self.parse_detail, meta={'item': item}, dont_filter=True)
item['id'] = request.request_fingerprint(req)
item['title'] = title
item['title_images'] = None
item['content_url'] = link
item['issue_time'] = issue_time[:10] if issue_time else None
item['industry_categories'] = 'E'
item['industry_Lcategories'] = response.meta['industry_Lcategories'][:2]
item['industry_Mcategories'] = response.meta['industry_Lcategories']
item['industry_Scategories'] = None
item['information_categories'] = response.meta['information_categories']
yield req
def parse_detail(self, response):
item = response.meta['item']
content = response.xpath('//div[@id="article"]').extract_first()
info = response.xpath('//div[@class="info"]/text()').extract_first()
# print(info)
source = re.search(r'来源:([^\x00-\xff]+)', info.strip())
author = re.search(r'作者:(.+)', info.strip())
item['source'] = source.group(1) if source else '中国路桥网'
item['author'] = author.group(1).strip() if author else None
item['information_source'] = '中国路桥网'
item['content'] = content
images = response.xpath('//div[@id="article"]//img/@original').extract()
if images:
images_url = []
for img in images:
if 'http' in img:
images_url.append(img)
else:
image = f'{self.base_url}{img}'
images_url.append(image)
images_urls = '; '.join(images_url)
item['images'] = images_urls if images_urls else None
else:
item['images'] = None
item['attachments'] = None
item['area'] = None
item['address'] = None
item['tags'] = None
item['sign'] = '19'
item['update_time'] = str(int(time.time() * 1000))
if content:
yield item
self.logger.info("title({}), issue_time({})".format(item['title'], item['issue_time']))
if __name__ == '__main__':
from scrapy import cmdline
cmdline.execute(['scrapy', 'crawl', 'chinaLuqiao_V1'])
| [
"18819492919@163.com"
] | 18819492919@163.com |
50629f2f60951afbc01158af0f3bec30d4730cb3 | 2bdc01012fbcf479e841c97508e1765cf9848100 | /3_13_setcomps.py | 59a7f4e2bae25ff83a02528cb2555fdd82602842 | [] | no_license | xushubo/fluent_python | 81a5000185a47fdecd93c33b1ea3419bef36ffa1 | b4b4951a46b78e8c07047e6c1d5375cdb4d9a469 | refs/heads/master | 2021-09-05T12:15:19.408321 | 2018-01-27T12:40:31 | 2018-01-27T12:40:31 | 104,754,039 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | '''新建一个 Latin-1 字符集合,该集合里的每个字符的 Unicode 名字里都有“SIGN”这个单词'''
from unicodedata import name
sign = {chr(i) for i in range(32, 256) if 'SIGN' in name(chr(i), '')}
print(sign)
s = {1, 2, 3, 4}
z = {3, 4, 5, 6}
print(s.symmetric_difference(z))
print(s ^ z)
print(z ^ s) | [
"tmac523@163.com"
] | tmac523@163.com |
22fcddf32c183ea18e65ac064a58a0099dca38de | a8544cedbec52f929e91e617a5f484d987352525 | /src/api/technique/definitions/transitive/definition.py | ef98df74da87a328b0af622356b6db76a2495a0d | [] | no_license | thearod5/Tracer | 75df513ee2500bffc32c29139983990867239369 | 67ee3d7296fb4c788c111387b335ab9804815655 | refs/heads/master | 2023-05-31T13:53:34.640103 | 2021-06-18T01:00:10 | 2021-06-18T01:00:10 | 349,507,401 | 1 | 0 | null | 2021-06-18T01:00:10 | 2021-03-19T17:41:37 | Python | UTF-8 | Python | false | false | 2,787 | py | """
TODO
"""
from typing import List
from api.technique.definitions.direct.technique import DirectTechnique
from api.technique.parser.itechnique_definition import ITechniqueDefinition
from api.technique.variationpoints.aggregation.aggregation_method import (
AggregationMethod,
)
from api.technique.variationpoints.algebraicmodel.models import AlgebraicModel
from api.technique.variationpoints.scalers.scaling_method import ScalingMethod
TRANSITIVE_COMMAND_SYMBOL = "x"
def get_algebraic_model(techniques: [DirectTechnique]):
"""
TODO
:param techniques:
:return:
"""
algebraic_model = None
for technique in techniques:
if algebraic_model is None:
algebraic_model = technique.definition.algebraic_model
elif algebraic_model != technique.definition.algebraic_model:
raise ValueError(
"Transitive technique expected algebraic models to be the same"
)
return algebraic_model
class TransitiveTechniqueDefinition(ITechniqueDefinition):
"""
TODO
"""
def __init__(self, parameters: [str], components: [str]):
self.algebraic_model: AlgebraicModel = None
self.scaling_method: ScalingMethod = None
self.transitive_aggregation: AggregationMethod = None
self._component_techniques: List[DirectTechnique] = []
super().__init__(parameters, components)
def parse(self):
"""
TODO
:return:
"""
assert len(self.parameters) >= 1
self.transitive_aggregation = AggregationMethod(self.parameters[0])
self.scaling_method = ScalingMethod(self.parameters[1])
assert len(self.components) >= 2
for component in self.components:
assert len(component) == 3
technique = DirectTechnique(component[1], component[2])
self._component_techniques.append(technique)
self.algebraic_model = get_algebraic_model(self._component_techniques)
# add source and target levels
self.source_level = self._component_techniques[0].definition.source_level
self.target_level = self._component_techniques[-1].definition.target_level
def validate(self):
"""
TODO
:return:
"""
super().validate()
assert self.algebraic_model is not None
assert self.transitive_aggregation is not None
assert len(self._component_techniques) >= 2, self._component_techniques
def get_component_techniques(self) -> List[DirectTechnique]:
"""
TODO
:return:
"""
return self._component_techniques
@staticmethod
def get_symbol() -> str:
"""
TODO
:return:
"""
return TRANSITIVE_COMMAND_SYMBOL
| [
"vhsalbertorodriguez@gmail.com"
] | vhsalbertorodriguez@gmail.com |
cf126456d54febb62fdd455046e65ad4da51e205 | 26c5f6bb53331f19e2a0ef0797b752aca9a89b19 | /caffe2/python/tutorials/py_gen/Training_a_Model.py | e8719578536b5a72e5522a148d32b4d6238cb093 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | Maratyszcza/caffe2 | 4c68baedbdaf5378f9da0ebf58b232478f689ae4 | f4794ac7629e6825b2c8be99950ea130b69c4840 | refs/heads/master | 2023-06-20T18:23:06.774651 | 2018-03-26T07:41:33 | 2018-03-26T18:22:53 | 122,715,434 | 1 | 0 | Apache-2.0 | 2018-02-24T07:28:21 | 2018-02-24T07:28:21 | null | UTF-8 | Python | false | false | 1,011 | py | #########################################################
#
# DO NOT EDIT THIS FILE. IT IS GENERATED AUTOMATICALLY. #
# PLEASE LOOK INTO THE README FOR MORE INFORMATION. #
#
#########################################################
# coding: utf-8
# ### Dataset Formats
#
# When you look at a model and its dataset, one of the things that will be specified is how the dataset is organized. Additionally, within Caffe2 when you load the data you will need to relay this specification. When trying to optimize training and increase its speed you may find discussions related to changing this format. For the purposes of this tutorial you don't need to worry about that, but it is good to recognize the different flavors and the fact the the raw data is loaded into temporary databases to facilitate the network's training and testing.
#
# #### Data Ordering
#
# * NCHW: [description]
# * Others: [description]
#
# #### Databases
#
# * minidb: [description]
# * leveldb: [descrption]
# * others...
#
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
811001ee821bfc40f0daa9b620bbaf67d35be28f | 163973e9a6bd5a0c422bfc37c9eba09f51f86a96 | /surveygizmo/api/survey.py | 9dca2b7ebaecfd7623d0b16f86a1ad8c351404d9 | [
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hellofresh/SurveyGizmo | 191bdb859409d42599190badcdf415cdd2c38d6a | 998ea795cc74e12db86bc7686ffa8278702090e8 | refs/heads/master | 2023-07-09T10:33:09.710834 | 2021-05-25T12:03:38 | 2021-05-25T12:03:38 | 47,969,483 | 0 | 1 | BSD-3-Clause | 2023-06-16T10:53:54 | 2015-12-14T10:58:20 | Python | UTF-8 | Python | false | false | 888 | py |
from surveygizmo.api import base
class Survey(base.Resource):
resource_fmt_str = 'survey/%(survey_id)s'
resource_id_keys = ['survey_id']
def list(self, **kwargs):
return super(Survey, self).list(**kwargs)
def get(self, survey_id, **kwargs):
kwargs.update({'survey_id': survey_id, })
return super(Survey, self).get(**kwargs)
def create(self, **kwargs):
return super(Survey, self).create(**kwargs)
def update(self, survey_id, **kwargs):
kwargs.update({'survey_id': survey_id, })
return super(Survey, self).update(**kwargs)
def copy(self, survey_id, **kwargs):
kwargs.update({'survey_id': survey_id, })
return super(Survey, self).copy(**kwargs)
def delete(self, survey_id, **kwargs):
kwargs.update({'survey_id': survey_id, })
return super(Survey, self).delete(**kwargs)
| [
"rpkilby@ncsu.edu"
] | rpkilby@ncsu.edu |
56320efd4df4f8e74fa05e6b1494c05cc8fbf300 | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/palo_alto_cortex_xdr/icon_palo_alto_cortex_xdr/actions/isolate_endpoint/schema.py | 7dd69ef97caa5030f535de5ecdf10180343c94c7 | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 2,512 | py | # GENERATED BY INSIGHT-PLUGIN - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Isolate or unisolate an endpoint"
class Input:
ENDPOINT = "endpoint"
ISOLATION_STATE = "isolation_state"
WHITELIST = "whitelist"
class Output:
RESULT = "result"
class IsolateEndpointInput(insightconnect_plugin_runtime.Input):
schema = json.loads(r"""
{
"type": "object",
"title": "Variables",
"properties": {
"endpoint": {
"type": "string",
"title": "Endpoint",
"description": "Endpoint to isolate or unisolate. This can be an IPv4 address, hostname, or endpoint ID",
"order": 1
},
"isolation_state": {
"type": "string",
"title": "Isolation State",
"description": "Isolation state to set",
"default": "Isolate",
"enum": [
"Isolate",
"Unisolate"
],
"order": 2
},
"whitelist": {
"type": "array",
"title": "Whitelist",
"description": "This list contains a set of devices that should not be blocked. This can be a combination of IPv4 addresses, hostnames, or endpoint IDs",
"default": [],
"items": {
"type": "string"
},
"order": 3
}
},
"required": [
"endpoint",
"isolation_state"
],
"definitions": {}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class IsolateEndpointOutput(insightconnect_plugin_runtime.Output):
schema = json.loads(r"""
{
"type": "object",
"title": "Variables",
"properties": {
"result": {
"$ref": "#/definitions/isolation_result",
"title": "Result",
"description": "The result of the isolation request",
"order": 1
}
},
"required": [
"result"
],
"definitions": {
"isolation_result": {
"type": "object",
"title": "isolation_result",
"properties": {
"action_id": {
"type": "integer",
"title": "Action ID",
"description": "Action ID",
"order": 1
},
"status": {
"type": "string",
"title": "Status",
"description": "Status",
"order": 2
},
"endpoints_count": {
"type": "integer",
"title": "Endpoints Count",
"description": "Endpoints count",
"order": 3
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| [
"noreply@github.com"
] | rapid7.noreply@github.com |
c18d33226b8769a2870153f029f63698bbf582c3 | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/mayavi/enthought/mayavi/core/null_engine.py | 7a2d139d5fd772b3197810db19517fa9f064ac91 | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | """
A Null engine for Mayavi.
The `NullEngine` class lets you create a full-fledged (almost) Mayavi
engine without the need for it poping up a window.
It is useful for testing or for using VTK as numerical engine. It does
not allow for rendering.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
from enthought.traits.api import HasTraits, Any, Event, Callable
from enthought.mayavi.core.engine import Engine
def dummy_viewer_factory():
"""Factory function for the dummy viewer."""
return DummyViewer()
################################################################################
# `NullEngine` class.
################################################################################
class NullEngine(Engine):
"""
This class represents a NullEngine which creates a DummyViewer with
a scene set to None. This allows us to write full mayavi scripts
without the need for a UI and this is perfect for testing, or to
use Mayavi (and VTK) as a numerical engine.
This engine does not allow for rendring.
"""
scene_factory = Callable(dummy_viewer_factory)
################################################################################
# `DummyViewer` class.
################################################################################
class DummyViewer(HasTraits):
"""Mimics the API of a viewer."""
scene = Any
closing = Event
activated = Event
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
d14066ade435cd095ff9bd332a3426563ab52023 | a3387fbcc918acb55d289ffb61b9fb603203dc11 | /Puzzles/2021-05/purdy_sol.py | ca220a40c3b870fb1379c630b321d0f1513b6474 | [] | no_license | fbhs-cs/purdys-puzzles | 13e970ff909ff2e093b3b9d9777faac47c099913 | 1cf3f9c52677843fad781e46304e1485a91aae58 | refs/heads/master | 2023-08-17T06:28:06.659751 | 2023-08-09T14:45:43 | 2023-08-09T14:45:43 | 212,085,565 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | # https://stackoverflow.com/questions/2049582/how-to-determine-if-a-point-is-in-a-2d-triangle
# Basic idea -- get area of triangle ABC, then get areas of ABD, BCD, and ACD and add them. If their sum is the same is ABC, the D must be inside of ABC.
def area(a, b, c):
return abs(0.5*(a[0]*(b[1]-c[1])+b[0]*(c[1]-a[1])+c[0]*(a[1]-b[1])))
def solve_area(d,a,b,c):
orig_area = area(a,b,c)
t1 = area(d,a,b)
t2 = area(d,a,c)
t3 = area(d,b,c)
return round(orig_area,3) == round(t1+t2+t3,3)
def solve_puzzle():
contains = 0
with open('triangles.txt') as f:
for line in f:
points = line.strip().split(",")
a = [int(x) for x in points[:2]]
b = [int(x) for x in points[2:4]]
c = [int(x) for x in points[4:6]]
d = [int(x) for x in points[6:]]
if solve_area(d,a,b,c):
contains += 1
print(line)
print(contains)
from random import randint
def generate_points():
this_line = ""
for i in range(7):
this_line += str(randint(-1000,1000)) +","
this_line += str(randint(-1000,1000))
return this_line
def create_data():
with open("triangles.txt","w") as f:
for i in range(10000):
f.write(generate_points() + "\n")
#create_data()
solve_puzzle()
# ANSWER 786 | [
"cpurdy@flourbluffschools.net"
] | cpurdy@flourbluffschools.net |
868d26e6139cacb59be4c08cda180637caaa1c22 | d9aba5a9c2011acfc9e5693481db01df7c6f816b | /app_cbv/views.py | df27e15be4a66fcf80f9263fa99a4239848f348c | [] | no_license | Kaalesson/webinar_drf | 479f0907d1908c8b78ce76ac9d251dfad32964e7 | 3d5f7de8fae5d88f2d510d2aa1bb02c605757cee | refs/heads/master | 2022-04-18T03:23:26.824931 | 2020-04-19T07:42:46 | 2020-04-19T07:42:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,857 | py | import json
from django.core import serializers
from django.views.generic.base import View
from django.views.generic.list import MultipleObjectMixin
from django.views.generic.detail import SingleObjectMixin
from django.http import HttpResponse
from posts_models.models import Post
from app_cbv.forms import PostForm
class PostListView(MultipleObjectMixin, View):
model = Post
def get(self, request, *args, **kwargs):
posts_qs = self.get_queryset()
return HttpResponse(
serializers.serialize('json', posts_qs, indent=4),
content_type='application/json'
)
def post(self, request, *args, **kwargs):
post_data = json.loads(request.body)
post_form = PostForm(post_data)
if post_form.is_valid():
post_form.save()
return HttpResponse('OK', status=201)
return HttpResponse(
json.dumps(post_form.errors),
content_type='application/json'
)
class PostDetailView(SingleObjectMixin, View):
model = Post
def get(self, request, *args, **kwargs):
post_obj = self.get_object()
return HttpResponse(
serializers.serialize('json', [post_obj], indent=4),
content_type='application/json'
)
def put(self, request, *args, **kwargs):
post_obj = self.get_object()
post_data = json.loads(request.body)
post_form = PostForm(instance=post_obj, data=post_data)
if post_form.is_valid():
post_form.save()
return HttpResponse('OK')
return HttpResponse(
json.dumps(post_form.errors),
content_type='application/json'
)
def delete(self, request, *args, **kwargs):
post_obj = self.get_object()
post_obj.delete()
return HttpResponse(status=204)
| [
"roman.oxenuk@gmail.com"
] | roman.oxenuk@gmail.com |
257cedc5795f405fed2da26aa6941b3872080efa | ee92057a8ebc91ba90d8055a9bece25d24211499 | /kattis/lowest-common-ancestor-of-a-binary-tree-iv/lowest-common-ancestor-of-a-binary-tree-iv.py | 1f9eec47869c00c4a5d00445d7f95caa871ad059 | [] | no_license | KendrickAng/competitive-programming | ce0a4f44f592f295c2f8cd7e854139f18fb8853a | f9768a2020f801b8e4787cc853398b8258a0bf09 | refs/heads/master | 2022-05-29T07:21:32.607089 | 2022-04-24T16:35:14 | 2022-04-24T16:35:14 | 254,402,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', nodes: 'List[TreeNode]') -> 'TreeNode':
want = set(nodes)
def postorder(node):
if not node:
return None
if node in want:
return node
l = postorder(node.left)
r = postorder(node.right)
if l and r:
return node
return l or r
return postorder(root) | [
"kendrick.wh@outlook.com"
] | kendrick.wh@outlook.com |
eef8ab803b4bc224b1cad11a87afdc4adf310e16 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part004780.py | b1840bc7bac055d347c1e02723ae759ff69ebc92 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher140331(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.0', 1, 1, None), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 0
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher140331._instance is None:
CommutativeMatcher140331._instance = CommutativeMatcher140331()
return CommutativeMatcher140331._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 140330
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 140332
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 140333
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst2
subjects.appendleft(tmp2)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp4 = subjects.popleft()
associative1 = tmp4
associative_type1 = type(tmp4)
subjects5 = deque(tmp4._args)
matcher = CommutativeMatcher140335.get()
tmp6 = subjects5
subjects5 = []
for s in tmp6:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp6, subst0):
pass
if pattern_index == 0:
pass
# State 140336
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst1
subjects.appendleft(tmp4)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from multiset import Multiset
from matchpy.utils import VariableWithCount
from .generated_part004781 import * | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
5f2ba76d460fc6e0ce11508e7aa131f3bd5d91a8 | b33ddc7b89d05e19fdeb69593872fd174fab9f4f | /URI-py/2129.py | e19d317ca7e5f9e76f9a91d89125795d6880fcfc | [] | no_license | ThiagoCComelli/URI-Online-Judge | 8b8d609d880342b39ba0d396c0610ecb7e01a5af | 5348f736b2d683f4b857232c22cccb7c1d8b8d65 | refs/heads/master | 2020-07-23T15:14:05.353948 | 2020-03-10T19:42:12 | 2020-03-10T19:42:12 | 207,606,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # -*- coding: utf-8 -*-
instancia = 1
while True:
try:
n = int(input())
s = 1
i = 2
lista = []
lista1 = []
while i <= n:
s *= i
i += 1
print("Instancia %d"%instancia)
instancia += 1
lista.append(str(s))
for i in lista:
for u in i:
if u != "0":
lista1.append(u)
print(lista1[-1])
print()
except EOFError:
break
| [
"thiago.comelli@outlook.com"
] | thiago.comelli@outlook.com |
c6155b355152d4d1086fb83a5604c3b17cf973b4 | 99e1a15d8f605be456f17608843c309dd8a3260f | /src/Battle/Attack/Steps/remove_pp_step.py | 98b488b3986495a6c7f6620716ce5a0796cfbabf | [] | no_license | sgtnourry/Pokemon-Project | e53604096dcba939efca358e4177374bffcf0b38 | 3931eee5fd04e18bb1738a0b27a4c6979dc4db01 | refs/heads/master | 2021-01-17T23:02:25.910738 | 2014-04-12T17:46:27 | 2014-04-12T17:46:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | from Battle.Attack.Steps.attack_step import AttackStep
from Battle.Attack.HitDelegates.hitself_delegate import HitSelfDelegate
class RemovePPStep(AttackStep):
""" Represents the Remove PP Step in the Attack Process """
def perform(self, user, target, environment):
""" Perform this step """
pressure = self.getPressure(target)
if self.parent.currPowerPoints > 0:
self.parent.currPowerPoints -= pressure
return []
def getPressure(self, target):
""" Return the Pressure exerted when using the attack """
if isinstance(self.parent.hitDelegate, HitSelfDelegate):
return 1
else:
return target.getAbility().powerPointsPressure() | [
"cloew123@gmail.com"
] | cloew123@gmail.com |
59677c65e22cff6907dbc6fdb170ea5e6560d5ed | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/buyTopStocks_20210202221338.py | 6521c8fe240b688235b1831cfb54ed0095b24ba1 | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | # from excel import OpenExcel
from tda import auth, client
import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config
from selenium import webdriver
import json
DRIVER_PATH = "/home/hopper/chromedriver"
driver = webdriver.Chrome(DRIVER_PATH)
redirect_uri = "https://localhost"
try:
c = auth.client_from_token_file(config.token_path, config.api_key)
except FileNotFoundError:
# with webdriver.Chrome() as driver:
c = auth.client_from_login_flow(
driver, config.api_key, redirect_uri, config.token_path
)
# r = c.get_price_history(
# "AAPL",
# period_type=client.Client.PriceHistory.PeriodType.YEAR,
# period=client.Client.PriceHistory.Period.TWENTY_YEARS,
# frequency_type=client.Client.PriceHistory.FrequencyType.DAILY,
# frequency=client.Client.PriceHistory.Frequency.DAILY,
# )
# assert r.status_code == 200, r.raise_for_status()
# print(json.dumps(r.json(), indent=4))
driver = webdriver.Chrome(PATH)
driver.get('https://financhill.com/screen/stock-score')
score = int(driver.find_element_by_tag_name('h2').text)
time.sleep(2)
print(stock)
print(score)
driver.quit()
soldFile.write(soldStocks)
soldFile.close()
<span class="sort sort-desc" data-sort-name="stock_score_normalized" data-current-order="">Stock Score<i class="glyphicon"></i></span> | [
"hopperkremer@gmail.com"
] | hopperkremer@gmail.com |
6ef7f16d0a9d61f86c6b5fdd9aa5e6ba6c961aa6 | cdfc6b79b770b0f8120bdde772b3a3f25a3cb1aa | /src/pulsarvoip/settings.py | 321256abf1c9e6c6349a39c42e0ddb62e561355a | [] | no_license | avkpol/pulsar | fcd47f5f1ad94e346db01867bfa7c1d15c26c8a5 | a3a6997f12a0dfb6eca9929813723a36eea045bc | refs/heads/master | 2021-01-10T14:01:50.576638 | 2016-01-23T20:42:01 | 2016-01-23T20:42:01 | 47,421,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,272 | py | """
Django settings for pulsarvoip project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
#BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DEBUG = True
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#6r8l+_0@inqq_n_vv80n28fu_r3*s^7d21+&y!4^#)p8oxcq8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'auth.User'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
#'bootstrap3',
#'treemenus',
'crispy_forms',
'bootstrapform',
'pulsarvpn',
)
SITE_ID = 1
LOGIN_URL ='/accounts/login/'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_AUTHENTICATION_METHOD = "username_email" #(="username" | "email" | "username_email")
ACCOUNT_CONFIRM_EMAIL_ON_GET = True # (=False)
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = LOGIN_URL
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = LOGIN_REDIRECT_URL
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 10
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = None #choices are: "mandatory", "optional", or None
ACCOUNT_EMAIL_SUBJECT_PREFIX = "Subject is: "
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "http" #if secure use https
ACCOUNT_LOGOUT_ON_GET = False #log user out right away.
ACCOUNT_LOGOUT_REDIRECT_URL = LOGIN_URL
ACCOUNT_SIGNUP_FORM_CLASS =None # add a custom sign up form
ACCOUNT_SIGNUP_PASSWORD_VERIFICATION =True # use False if you don't want double password fields
ACCOUNT_UNIQUE_EMAIL= True #enforces emails are unique to all accounts
ACCOUNT_USER_MODEL_USERNAME_FIELD = "username" # If you're using a Custom Model, maybe it's "email"
ACCOUNT_USER_MODEL_EMAIL_FIELD ="email"
#ACCOUNT_USER_DISPLAY (=a callable returning user.username)
ACCOUNT_USERNAME_MIN_LENGTH = 4
ACCOUNT_USERNAME_BLACKLIST =['some_username_youdon\'t_want']
ACCOUNT_USERNAME_REQUIRED =True #do you want them to have a user name?
ACCOUNT_PASSWORD_INPUT_RENDER_VALUE =False #don't show the password
ACCOUNT_PASSWORD_MIN_LENGTH =6 #min length of password
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION =True #login the user after confirming email, if required.
# CRISPY_TEMPLATE_PACK = 'uni_form'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'pulsarvoip.urls'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, "templates"), )
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates"),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# 'django.core.context_processors.csrf',
# `allauth` needs this from django
'django.template.context_processors.request',
],
},
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
ROOT_URLCONF = 'pulsarvoip.urls'
WSGI_APPLICATION = 'pulsarvoip.wsgi.application'
CSRF_FAILURE_VIEW = True
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': '/etc/mysql/my.cnf',
},
}
}
DATABASE_ENGINE = 'mysql'
DATABASE_NAME = 'pulsarvoip_db'
DATABASE_USER = 'avkpol'
DATABASE_PASSWORD = '1963'
DATABASE_HOST = '84.22.98.170'
DATABASE_PORT = ''
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
#if DEBUG:
# STATICFILES_DIRS = ('/var/www/pulsarvoip/src/static/static-admin/',)
# #STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"), )
# STATIC_URL = '/static/'
# STATIC_ROOT = '/var/www/pulsarvoip/src/static/static-admin'
# #STATIC_ROOT = os.path.join(BASE_DIR, "static", "static-admin")
# MEDIA_URL = '/media/'
# MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
TEMPLATE_DIRS = (os.path.join(BASE_DIR,"templates"),)
STATICFILES_DIRS = (os.path.join(BASE_DIR,"static"), )
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static", "admin")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
| [
"root@localhost.(none)"
] | root@localhost.(none) |
27b0102a936d518b5d3a99821b8bd379f946a615 | 6d33f5f65e211a48df8b5ff1622f9750ae374962 | /project-2-master/website/migrations/0003_auto_20200612_1807.py | b59a3fa21a233471b9bd7f54e691a6f1186b9108 | [] | no_license | mohammednew1999/project-2 | 0cf190550b40e5cb90cb1af466eb9cf09fda997d | 980373e02081cba6cdbdaf11f62bb8818ffe8b55 | refs/heads/master | 2022-11-09T19:56:32.946642 | 2020-06-19T16:19:17 | 2020-06-19T16:19:17 | 273,076,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | # Generated by Django 3.0.3 on 2020-06-12 15:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0002_auto_20200612_0027'),
]
operations = [
migrations.RemoveField(
model_name='job',
name='image',
),
migrations.AlterField(
model_name='job',
name='status',
field=models.BooleanField(default=False, null=True),
),
]
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
89e82e39587e9370be620e2ff5f9be5c2acc71f9 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /iuenzEsAejQ4ZPqzJ_5.py | 97f4733e0b5371b8e19ec4dd126b2c0a0a240586 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | """
This is a **reverse coding challenge**. Normally you're given explicit
directions with how to create a function. Here, you must generate your own
function to satisfy the relationship between the inputs and outputs.
Your task is to create a function that, when fed the inputs below, produce the
sample outputs shown.
### Examples
3 ➞ 21
9 ➞ 2221
17 ➞ 22221
24 ➞ 22228
### Notes
If you get stuck, check the **Comments** for help.
"""
from math import sqrt
def mystery_func(num):
a='2'*int(sqrt(num))
b=str(num-2**int(sqrt(num)))
return int(a+b)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
6be22e5d4c80b5a55250d03739fe50e7880025b9 | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/safezone/DistributedTreasureAI.py | 2770a6c849035a4cea106f46e2f2f2521f2627d9 | [] | no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 1,423 | py | #Embedded file name: toontown.safezone.DistributedTreasureAI
from otp.ai.AIBase import *
from direct.distributed.ClockDelta import *
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObjectAI
class DistributedTreasureAI(DistributedObjectAI.DistributedObjectAI):
def __init__(self, air, treasurePlanner, treasureType, x, y, z):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.treasurePlanner = treasurePlanner
self.treasureType = treasureType
self.pos = (x, y, z)
def requestGrab(self):
avId = self.air.getAvatarIdFromSender()
self.treasurePlanner.grabAttempt(avId, self.getDoId())
def validAvatar(self, av):
return 1
def getTreasureType(self):
return self.treasureType
def d_setGrab(self, avId):
self.sendUpdate('setGrab', [avId])
av = simbase.air.doId2do.get(avId)
if not hasattr(av, 'addStat'):
return
av.addStat(ToontownGlobals.STATS_TREASURES)
def d_setReject(self):
self.sendUpdate('setReject', [])
def getPosition(self):
return self.pos
def setPosition(self, x, y, z):
self.pos = (x, y, z)
def b_setPosition(self, x, y, z):
self.setPosition(x, y, z)
self.d_setPosition(x, y, z)
def d_setPosition(self, x, y, z):
self.sendUpdate('setPosition', [x, y, z])
| [
"linktlh@gmail.com"
] | linktlh@gmail.com |
0304c1f7833d124961c9afefa02b257e43263123 | c4feb6227cc68e96c7454ee7682a91f6f6afd164 | /supervised_learning/0x03-optimization/10-Adam.py | 5bc85b9653a0d029547beba99c525bacc3833130 | [] | no_license | Karenahv/holbertonschool-machine_learning | 4b7ae5ad4cd1f06f8bae87a509d11b5c8069f8c9 | 884db3d605c2d0eee968f03ce7f525f2a557f261 | refs/heads/master | 2022-12-24T16:17:34.753055 | 2020-09-30T02:09:08 | 2020-09-30T02:09:08 | 255,319,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | #!/usr/bin/env python3
"""Adam Optimization algorithm tf"""
import tensorflow as tf
def create_Adam_op(loss, alpha, beta1, beta2, epsilon):
"""Adam Optimization algorithm tf"""
return (tf.train.AdamOptimizer(learning_rate=alpha,
beta1=beta1, beta2=beta2,
epsilon=epsilon).minimize(loss))
| [
"you@example.com"
] | you@example.com |
8d5db242ef9162a0ad9be8390ea3e6f54acef215 | 965fe92b03b37d2e6fa700281c4ef383fb104ada | /sciencebeam_trainer_delft/sequence_labelling/tools/install_models.py | ce91c1cee4d59f190c4138263475aa26058770a5 | [
"MIT"
] | permissive | elifesciences/sciencebeam-trainer-delft | 1591bebb7f5b9ed178329f4e9e02a9d893ab228d | 2413fe7f0801869208741e4ab6c4096db8d53b5e | refs/heads/develop | 2022-05-20T21:55:13.210432 | 2022-03-28T17:32:31 | 2022-03-28T17:32:31 | 192,557,708 | 5 | 1 | MIT | 2022-03-28T17:33:14 | 2019-06-18T14:34:50 | Python | UTF-8 | Python | false | false | 6,052 | py | import argparse
import logging
import os
import pickle
from pathlib import Path
from shutil import rmtree
from typing import Dict, List
from sciencebeam_trainer_delft.utils.misc import parse_dict, merge_dicts
from sciencebeam_trainer_delft.utils.io import (
get_compression_wrapper,
FileContainer,
open_file_container
)
from sciencebeam_trainer_delft.utils.cli import (
add_default_arguments,
process_default_args,
initialize_and_call_main
)
LOGGER = logging.getLogger(__name__)
SOURCE_URL_META_FILENAME = '.source-url'
def get_source_url_meta_file_path(target_directory: str) -> Path:
return Path(target_directory, SOURCE_URL_META_FILENAME)
def copy_file_container_with_source_meta(
file_container: FileContainer,
target_directory: str):
files = file_container.list_files()
LOGGER.debug('files: %s', files)
if not files:
raise FileNotFoundError('no files found in %s' % file_container)
if os.path.exists(target_directory):
rmtree(target_directory)
os.makedirs(target_directory, exist_ok=True)
target_filepath_list = []
for file_ref in files:
relative_filename = file_ref.basename
relative_output_filename = get_compression_wrapper(
relative_filename
).strip_compression_filename_ext(relative_filename)
# source_filepath = os.path.join(source_url, relative_filename)
target_filepath = os.path.join(target_directory, relative_output_filename)
target_filepath_list.append(target_filepath)
LOGGER.debug('copying %s to %s', file_ref, target_filepath)
file_ref.copy_to(target_filepath)
return target_filepath_list
def copy_directory_with_source_meta(source_url: str, target_directory: str, force: bool = False):
LOGGER.debug('source_url: %s, target_directory: %s', source_url, target_directory)
source_url_meta_file = get_source_url_meta_file_path(target_directory)
current_source_url = (
source_url_meta_file.read_text().strip()
if source_url_meta_file.exists()
else None
)
if not force and current_source_url == source_url:
LOGGER.debug(
'current source_url of %s already (skipping): %s',
target_directory, current_source_url
)
return []
with open_file_container(source_url) as file_container:
result = copy_file_container_with_source_meta(
file_container,
target_directory
)
LOGGER.debug('setting %s to %s', source_url_meta_file, source_url)
source_url_meta_file.write_text(source_url)
return result
def validate_pickle_file(pickle_file: str):
with open(pickle_file, 'rb') as fp:
pickle.load(fp)
LOGGER.info('validated pickle file: %s', pickle_file)
def validate_pickle_files(pickle_files: List[str]):
for pickle_file in pickle_files:
validate_pickle_file(pickle_file)
def is_pickle_file(filename: str) -> bool:
return filename.endswith('.pkl')
def filter_pickle_files(filenames: List[str]) -> List[str]:
return [filename for filename in filenames if is_pickle_file(filename)]
def install_model(
model_base_path: str, model_name: str, model_source_url: str,
force: bool = False, validate_pickles: bool = False):
LOGGER.debug(
'model_base_path: %s, model_name: %s, model_source_url: %s',
model_base_path, model_name, model_source_url
)
target_directory = os.path.join(model_base_path, model_name)
target_files = copy_directory_with_source_meta(
model_source_url, target_directory, force=force
)
if validate_pickles:
validate_pickle_files(filter_pickle_files(target_files))
LOGGER.info('copied model %s to %s (%s)', model_source_url, target_directory, target_files)
def install_models(
model_base_path: str, model_source_dict: Dict[str, str],
force: bool = False, validate_pickles: bool = False):
LOGGER.debug('model_base_path: %s, model_source_dict: %s', model_base_path, model_source_dict)
for model_name, model_source_url in model_source_dict.items():
install_model(
model_base_path, model_name, model_source_url,
force=force, validate_pickles=validate_pickles
)
def parse_model_source_expr(model_source_expr: str) -> Dict[str, str]:
LOGGER.debug('model_source_expr: %s', model_source_expr)
return parse_dict(model_source_expr, delimiter='|')
def parse_args(argv: List[str] = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Install model(s)"
)
parser.add_argument(
"--model-base-path",
required=True,
help=(
"The base path for the local models. It will install the models to a"
" sub-directory using the model name."
)
)
parser.add_argument(
"--install",
nargs='+',
required=True,
type=parse_model_source_expr,
help=(
"The models to install, in the form: <model name>=<url>"
"\n (multiple models can be specified by using the pipe ('|') separator"
" or using the --install parameter multiple times"
)
)
parser.add_argument(
"--force",
action="store_true",
help="Force install model even if already installed from the source url"
)
parser.add_argument(
"--validate-pickles",
action="store_true",
help="Validate .pkl files after copying (e.g. package structure may have changed)"
)
add_default_arguments(parser)
return parser.parse_args(argv)
def run(args: argparse.Namespace):
install_models(
model_base_path=args.model_base_path,
model_source_dict=merge_dicts(args.install),
force=args.force,
validate_pickles=args.validate_pickles
)
def main(argv: List[str] = None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == "__main__":
initialize_and_call_main(main)
| [
"noreply@github.com"
] | elifesciences.noreply@github.com |
1cde8882a2a466f8d468ea50b918d236f0b5300f | 14b44aa2b73fb3df08c9c085219ebfd320d5d63a | /counter_service/DockerImageCreator.py | 7a4355e1e2ea308b82438c9ad92d2640aa4df143 | [] | no_license | sande2jm/CI-CD-Service | c46f95f380872e9aca02d5c5e5a88578ba6e88b0 | 34535e69a3c39a78cd1d1ca785587d5e78a03580 | refs/heads/master | 2020-03-27T02:16:00.843764 | 2018-08-25T00:28:32 | 2018-08-25T00:28:32 | 145,778,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | from subprocess import call
class DockerImageCreator():
def __init__(self):
self.directory_path = "?"
self.serviceName = 'counter_service'
self.repository = 'https://sande2jm@github.com/sande2jm/workflow_configuration.git'
self.cmdList = {'build image': None,
'push image': None }
self.version_filepath = 'config/counter_service_version.txt'
self.version = None
def getVersion(self):
with open(self.version_filepath, 'r+') as f:
self.version = f.readline().rstrip()
self.cmdList['build image'] = ['docker build -t counter_service WebFront/.',
'docker tag counter_service sande2jm/counter_service:' + self.version]
self.cmdList['push image'] = 'docker push sande2jm/counter_service:' + self.version
def updateVersion(self):
with open(self.version_filepath, 'w') as f:
f.write(str(round(float(self.version)+ 0.1,1)))
def runCommand(self, cmd):
cmd = cmd.split(' ')
call(cmd)
def createImage(self):
for cmd in self.cmdList['build image']:
self.runCommand(cmd)
def pushToDockerHub(self):
self.runCommand(self.cmdList['push image'])
if __name__ == '__main__':
d = DockerImageCreator()
d.getVersion()
d.updateVersion()
d.createImage()
d.pushToDockerHub()
print(d.version)
| [
"sande2jm@gmail.com"
] | sande2jm@gmail.com |
44b964b89723e25678bfac030bab339c5473d12a | 915e056a1b633fd693eebeaa4b5ecedbe31c52e4 | /conftest.py | e797fba98d0b3d5bc3418dd82468cbb7cb30ea07 | [
"Apache-2.0"
] | permissive | mszarski/robot | 1336b8d776c08ba16779f8c3d485f7850ba03316 | 5d6e8b591963cf5bfe45c95b7b431e8b312ab5e0 | refs/heads/master | 2023-07-11T08:20:56.669397 | 2021-08-20T12:28:20 | 2021-08-20T12:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,943 | py | import ivy
import pytest
import ivy.numpy
import ivy.jax
import ivy.tensorflow
import ivy.torch
import ivy.mxnd
from typing import List, Dict
import itertools
from ivy_tests import helpers
FW_STRS = ['numpy', 'jax', 'tensorflow', 'tensorflow_graph', 'torch', 'mxnd']
def get_test_devices() -> Dict[ivy.Framework, List[str]]:
devices_dict: Dict[str, List[str]] = dict()
for fw_str, (f, _) in zip(FW_STRS, helpers.f_n_calls()):
new_list = list()
new_list.append(None)
new_list.append('cpu:0')
if f.gpu_is_available():
new_list.append('gpu:0')
if f.tpu_is_available():
new_list.append('tpu:0')
devices_dict[fw_str] = new_list
return devices_dict
# setup the global containers to test the source code
TEST_DEV_STRS: Dict[ivy.Framework, List[str]] = get_test_devices()
TEST_FRAMEWORKS: Dict[str, ivy.Framework] = {'numpy': ivy.numpy,
'jax': ivy.jax,
'tensorflow': ivy.tensorflow,
'tensorflow_graph': ivy.tensorflow,
'torch': ivy.torch,
'mxnd': ivy.mxnd}
TEST_CALL_METHODS: Dict[str, callable] = {'numpy': helpers.np_call,
'jax': helpers.jnp_call,
'tensorflow': helpers.tf_call,
'tensorflow_graph': helpers.tf_graph_call,
'torch': helpers.torch_call,
'mxnd': helpers.mx_call}
@pytest.fixture(autouse=True)
def run_around_tests(f):
with f.use:
yield
def pytest_generate_tests(metafunc):
dev_strs = None
f_strs = None
if 'dev_str' in metafunc.fixturenames:
raw_value = metafunc.config.getoption('--dev_str')
if raw_value == 'all':
dev_strs = TEST_DEV_STRS
else:
dev_strs = raw_value.split(',')
if 'f' in metafunc.fixturenames:
raw_value = metafunc.config.getoption('--backend')
if raw_value == 'all':
f_strs = TEST_FRAMEWORKS.keys()
else:
f_strs = raw_value.split(',')
if dev_strs is not None and f_strs is not None:
params = list(itertools.chain.from_iterable(
[[(item, TEST_FRAMEWORKS[f_str], TEST_CALL_METHODS[f_str])
for item in TEST_DEV_STRS[f_str] if item in dev_strs] for f_str in f_strs]))
metafunc.parametrize('dev_str,f,call', params)
# ToDo: add full support for partial arguments later
elif dev_strs is not None:
metafunc.parametrize('dev_str', dev_strs)
def pytest_addoption(parser):
parser.addoption('--dev_str', action="store", default="cpu:0")
parser.addoption('--backend', action="store", default="all")
| [
"daniel.lenton11@imperial.ac.uk"
] | daniel.lenton11@imperial.ac.uk |
2bb754e080d87f35ef161a5936456a0b5c889146 | 2b502aae9bc33bac6c4b28d1e702591f2cbed690 | /terrascript/circonus/d.py | c5e3b67ae0afa9dd35360d2680eab949aa494bc5 | [
"Python-2.0",
"BSD-2-Clause"
] | permissive | LeeroyC710/python-terrascript | 4c8fbe032e9b7dd8844d962f888c28f87a26ff77 | b8f3c3549b149c124e3e48e0cea0396332ad1a1d | refs/heads/master | 2020-12-28T03:58:04.502969 | 2020-01-19T21:46:52 | 2020-01-19T21:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | # terrascript/circonus/d.py
import terrascript
class circonus_account(terrascript.Data):
pass
class circonus_collector(terrascript.Data):
pass
| [
"markus@juenemann.net"
] | markus@juenemann.net |
490af94ef458e80333f925dec5be122d96e40d53 | 2d9cedf0ed36dadca1ca2f696290c8261ef7851f | /000006/DataJoint/DJ-NWB-Economo-2018/pipeline/behavior.py | 7bb2e62796665945b1f735a3871f0e3bed1d8aea | [
"MIT",
"Apache-2.0"
] | permissive | dandi/example-notebooks | 4365285697d41fd383110b5af5c30860d72fad22 | be3a8b345dfa9c0145692a30087647bc47f865e8 | refs/heads/master | 2023-08-30T20:41:41.323355 | 2023-08-16T21:21:12 | 2023-08-16T21:21:12 | 231,629,025 | 5 | 8 | Apache-2.0 | 2023-09-12T19:53:10 | 2020-01-03T16:55:02 | Jupyter Notebook | UTF-8 | Python | false | false | 605 | py | '''
Schema of behavioral information.
'''
import re
import os
from datetime import datetime
import sys
import numpy as np
import scipy.io as sio
import datajoint as dj
import h5py as h5
from . import utilities, acquisition, analysis
schema = dj.schema(dj.config['custom'].get('database.prefix', '') + 'behavior')
@schema
class LickTimes(dj.Manual):
definition = """
-> acquisition.Session
---
lick_left_times: longblob # (s), lick left onset times (based on contact of lick port)
lick_right_times: longblob # (s), lick right onset times (based on contact of lick port)
"""
| [
"ben.dichter@gmail.com"
] | ben.dichter@gmail.com |
fb039eaf9df6671dafaff1cd53de5f7fe1230764 | 2481cde6506743565dff2b405a2396daf208ab3e | /src/ranking/migrations/0068_auto_20220522_1139.py | d2f21b149b0871c941a73ecc32a502daabc59c77 | [
"Apache-2.0"
] | permissive | aropan/clist | 4819a3036d179595e4df8c646aff2ed593b9dad3 | 5c805b2af71acee97f993f19d8d4e229f7f5b411 | refs/heads/master | 2023-08-31T11:15:17.987776 | 2023-08-27T21:51:14 | 2023-08-27T21:52:16 | 187,111,853 | 276 | 35 | Apache-2.0 | 2023-09-06T18:42:53 | 2019-05-16T22:57:03 | Python | UTF-8 | Python | false | false | 466 | py | # Generated by Django 3.1.14 on 2022-05-22 11:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ranking', '0067_auto_20220514_1748'),
]
operations = [
migrations.RemoveField(
model_name='module',
name='has_accounts_infos_update',
),
migrations.RemoveField(
model_name='module',
name='multi_account_allowed',
),
]
| [
"nap0rbl4@gmail.com"
] | nap0rbl4@gmail.com |
b939786deb554e3bd4a972736f0169755a079824 | 5f5c052aa6a42e7492daf940c9561f5ce84ecb1c | /geatpy/source-code/templets/awGA_templet.py | 8db23bc65aa142fa6c7f217fc9cdac5cf2e4bc6b | [] | no_license | siuyincheung/geatpy | 8b343087c506cef39a7dc377a667ae9f1392acd4 | 48d41c8835004d9b0c36060881ed9cfb07483f1e | refs/heads/master | 2020-03-27T08:15:57.973576 | 2018-08-27T00:21:00 | 2018-08-27T00:21:00 | 146,237,904 | 1 | 0 | null | 2018-08-27T02:39:41 | 2018-08-27T02:39:41 | null | UTF-8 | Python | false | false | 5,445 | py | # -*- coding: utf-8 -*-
import numpy as np
import geatpy as ga # 导入geatpy库
import time
def awGA_templet(AIM_M, AIM_F, PUN_M, PUN_F, ranges, borders, precisions, maxormin, MAXGEN, MAXSIZE, NIND, SUBPOP, GGAP, selectStyle, recombinStyle, recopt, pm, drawing = 1):
"""
awGA_templet.py - 基于awGA的多目标优化编程模板
语法:
该函数除了参数drawing外,不设置可缺省参数。
当某个参数需要缺省时,在调用函数时传入None即可。
比如当没有罚函数时,则在调用编程模板时将第3、4个参数设置为None即可,如:
awGA_templet(AIM_M, 'aimfuc', None, None, ..., maxormin)
输入参数:
AIM_M - 目标函数的地址,传入该函数前通常由AIM_M = __import__('目标函数名')语句得到
AIM_F : str - 目标函数名
PUN_M - 罚函数的地址,传入该函数前通常由PUN_M = __import__('罚函数名')语句得到
PUN_F : str - 罚函数名
ranges : array - 代表自变量的范围矩阵,要求上界必须大于下界
例如:[[1, 2, 3],
[3, 4, 5]]
表示有3个控制变量,其范围分别是1-3, 2-4, 3-5
borders : list -(可选参数)代表是否包含变量范围的边界,为1代表控制变量的范围包含该边界
当为None时,默认设置为全是1的矩阵
例如:[[1, 0, 1],
[0, 1, 1]]
表示上面的三个控制变量的范围分别是:[1, 3)、(2, 4]、[3, 5]
precisions : list -(可选参数)代表控制变量的精度,
如等于4,表示对应的控制变量的编码可以精确到小数点后4位。
当precisions为None时,默认precision为1*n的0矩阵(此时表示种群是离散编码的)
precision的元素必须不小于0
maxormin int - 最小最大化标记,1表示目标函数最小化;-1表示目标函数最大化
MAXGEN : int - 最大遗传代数
MAXSIZE : int - 帕累托最优集最大规模
NIND : int - 种群规模,即种群中包含多少个个体
SUBPOP : int - 子种群数量,即对一个种群划分多少个子种群
GGAP : float - 代沟,表示子代与父代染色体及性状不相同的概率
selectStyle : str - 指代所采用的低级选择算子的名称,如'rws'(轮盘赌选择算子)
recombinStyle: str - 指代所采用的低级重组算子的名称,如'xovsp'(单点交叉)
recopt : float - 交叉概率
pm : float - 重组概率
drawing : int - (可选参数),0表示不绘图,1表示绘制最终结果图,2表示绘制进化过程的动画。
默认drawing为1
算法描述:
本模板实现了基于适应性权重聚合法(awGA)的多目标优化搜索,
通过维护一个全局帕累托最优集来实现帕累托前沿的搜索,故并不需要保证种群所有个体都是非支配的
"""
#==========================初始化配置===========================
# 获取目标函数和罚函数
aimfuc = getattr(AIM_M, AIM_F) # 获得目标函数
FieldDR = ga.crtfld(ranges, borders, precisions)
#=========================开始遗传算法进化=======================
Chrom = ga.crtrp(NIND, FieldDR) # 创建简单离散种群
ObjV = aimfuc(Chrom) # 计算种群目标函数值
NDSet = np.zeros((0, Chrom.shape[1])) # 定义帕累托最优解记录器
NDSetObjV = np.zeros((0, ObjV.shape[1])) # 定义帕累托最优解的目标函数值记录器
ax = None
start_time = time.time() # 开始计时
# 开始进化!!
for gen in range(MAXGEN):
if NDSet.shape[0] > MAXSIZE:
break
[CombinObjV, weight] = ga.awGA(maxormin * ObjV) # 计算适应性权重以及多目标的加权单目标
FitnV = ga.ranking(maxormin * CombinObjV) # 根据加权单目标计算适应度
# 更新帕累托最优集以及种群非支配个体的适应度
[FitnV, NDSet, NDSetObjV, repnum] = ga.upNDSet(Chrom, maxormin * ObjV, FitnV, NDSet, maxormin * NDSetObjV)
# 进行遗传操作!!
SelCh=ga.selecting(selectStyle, Chrom, FitnV, GGAP, SUBPOP) # 选择
SelCh=ga.recombin(recombinStyle, SelCh, recopt, SUBPOP) #交叉
SelCh=ga.mutbga(SelCh, FieldDR, pm) # 变异
if repnum > Chrom.shape[0] * 0.1: # 进行一次高斯变异
SelCh=ga.mutgau(SelCh, FieldDR, pm) # 高斯变异
ObjVSel = aimfuc(SelCh) # 求育种个体的目标函数值
[CombinObjV, weight] = ga.awGA(maxormin * ObjVSel)
FitnVSel = ga.ranking(maxormin * CombinObjV)
[Chrom,ObjV] = ga.reins(Chrom,SelCh,SUBPOP,1,0.9,FitnV,FitnVSel,ObjV,ObjVSel) #重插入
if drawing == 2:
ax = ga.frontplot(NDSetObjV, False, ax, gen + 1) # 绘制动态图
end_time = time.time() # 结束计时
#=========================绘图及输出结果=========================
if drawing != 0:
ga.frontplot(NDSetObjV,True)
times = end_time - start_time
print('用时:', times, '秒')
print('帕累托前沿点个数:', NDSet.shape[0], '个')
print('单位时间找到帕累托前沿点个数:', int(NDSet.shape[0] // times), '个')
# 返回帕累托最优集以及执行时间
return [ObjV, NDSet, NDSetObjV, end_time - start_time] | [
"jazzbin@geatpy.com"
] | jazzbin@geatpy.com |
7a6e8a9cc8568967a2e559fb21c2f931c3151863 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/43/usersdata/64/14191/submittedfiles/mediaLista.py | bd0fc8aa030b09fbb9672685bc889d7ec9e5f44a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # -*- coding: utf-8 -*-
from __future__ import division
n = input("Digite a quantidade de valores: ")
a = []
i = 1
media = 0
while i<=n:
a.append(input("Digite o valor da lista: "))
media = sum(a)/n
i = i + 1
print(("2.%f" %a[0])
print (media) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
9689b853fc20ed4974723099e3aa75aa73c7a94f | bcc7e80589acfb08924fa3650c56229eadb0b686 | /root/migrations/0004_auto_20170907_1756.py | fa28e79486c37e4de81709e8913b02f242d06f82 | [
"MIT"
] | permissive | lubchenko05/django_test | 91146f10e2ed205437ca2c7db8db64b40aae8772 | 3b07813e2e9abb68ee568dd62d312b3e6b543e84 | refs/heads/master | 2021-01-23T17:44:13.173264 | 2017-09-07T19:39:15 | 2017-09-07T19:39:15 | 102,777,274 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-07 17:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('root', '0003_auto_20170907_1755'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='email',
new_name='owner',
),
]
| [
"lubchenko05@gmail.com"
] | lubchenko05@gmail.com |
5b029b43b980a90f876e059544e08e5d6873dc90 | 4b68243d9db908945ee500174a8a12be27d150f9 | /pogoprotos/data/telemetry/rpc_socket_response_telemetry_pb2.py | de2dd50a00da9e38e948f8764f1ed834eee90cfd | [] | no_license | ykram/pogoprotos-py | 7285c86498f57dcbbec8e6c947597e82b2518d80 | a045b0140740625d9a19ded53ece385a16c4ad4a | refs/heads/master | 2020-04-20T10:19:51.628964 | 2019-02-02T02:58:03 | 2019-02-02T02:58:03 | 168,787,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,327 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/data/telemetry/rpc_socket_response_telemetry.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.data.telemetry import rpc_socket_response_time_pb2 as pogoprotos_dot_data_dot_telemetry_dot_rpc__socket__response__time__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/telemetry/rpc_socket_response_telemetry.proto',
package='pogoprotos.data.telemetry',
syntax='proto3',
serialized_pb=_b('\n=pogoprotos/data/telemetry/rpc_socket_response_telemetry.proto\x12\x19pogoprotos.data.telemetry\x1a\x38pogoprotos/data/telemetry/rpc_socket_response_time.proto\"\x81\x01\n\x1aRpcSocketResponseTelemetry\x12\x17\n\x0fwindow_duration\x18\x01 \x01(\x02\x12J\n\x10response_timings\x18\x02 \x03(\x0b\x32\x30.pogoprotos.data.telemetry.RpcSocketResponseTimeb\x06proto3')
,
dependencies=[pogoprotos_dot_data_dot_telemetry_dot_rpc__socket__response__time__pb2.DESCRIPTOR,])
_RPCSOCKETRESPONSETELEMETRY = _descriptor.Descriptor(
name='RpcSocketResponseTelemetry',
full_name='pogoprotos.data.telemetry.RpcSocketResponseTelemetry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='window_duration', full_name='pogoprotos.data.telemetry.RpcSocketResponseTelemetry.window_duration', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_timings', full_name='pogoprotos.data.telemetry.RpcSocketResponseTelemetry.response_timings', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=151,
serialized_end=280,
)
_RPCSOCKETRESPONSETELEMETRY.fields_by_name['response_timings'].message_type = pogoprotos_dot_data_dot_telemetry_dot_rpc__socket__response__time__pb2._RPCSOCKETRESPONSETIME
DESCRIPTOR.message_types_by_name['RpcSocketResponseTelemetry'] = _RPCSOCKETRESPONSETELEMETRY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RpcSocketResponseTelemetry = _reflection.GeneratedProtocolMessageType('RpcSocketResponseTelemetry', (_message.Message,), dict(
DESCRIPTOR = _RPCSOCKETRESPONSETELEMETRY,
__module__ = 'pogoprotos.data.telemetry.rpc_socket_response_telemetry_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.telemetry.RpcSocketResponseTelemetry)
))
_sym_db.RegisterMessage(RpcSocketResponseTelemetry)
# @@protoc_insertion_point(module_scope)
| [
"mark@noffle.net"
] | mark@noffle.net |
81236ad98965e7fb469904c3265f320f6fef3a0a | a8248250b783d8b034110d26016f75dea53ca35a | /pypsi/commands/xargs.py | 3795c59d8419c15aadb54531790824d21be0d958 | [
"BSD-3-Clause"
] | permissive | jnashold/pypsi | ce5daf81e8b516e5e43fed8c7838120de53ad26f | d480d6377182d649ed342f6bcc67c33b73e4f1e1 | refs/heads/master | 2020-04-03T11:47:19.971869 | 2014-06-13T20:58:04 | 2014-06-13T20:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py |
from pypsi.base import Command, PypsiArgParser
import sys
import argparse
XArgsUsage = """{name} [-h] [-I repstr] command"""
class XArgsCommand(Command):
'''
Execute a command for each line of input from :data:`sys.stdin`.
'''
def __init__(self, name='xargs', topic='shell', **kwargs):
self.parser = PypsiArgParser(
prog=name,
description='build and execute command lines from stdin',
usage=XArgsUsage.format(name=name)
)
self.parser.add_argument(
'-I', default='{}', action='store',
metavar='repstr', help='string token to replace',
dest='token'
)
self.parser.add_argument(
'command', nargs=argparse.REMAINDER, help="command to execute"
)
super(XArgsCommand, self).__init__(
name=name, topic=topic, usage=self.parser.format_help(),
brief='build and execute command lines from stdin', **kwargs
)
def run(self, shell, args, ctx):
ns = self.parser.parse_args(args)
if self.parser.rc is not None:
return self.parser.rc
if not ns.command:
self.error(shell, "missing command")
return 1
base = ' '.join([
'"{}"'.format(c.replace('"', '\\"')) for c in ns.command
])
child = ctx.fork()
for line in sys.stdin:
cmd = base.replace(ns.token, line.strip())
shell.execute(cmd, child)
return 0
| [
"meily.adam@gmail.com"
] | meily.adam@gmail.com |
2c87277c3fe7d5a97fb11322f4f3e8b02f18286b | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/crud/gnmi/models/cisco-ios-xr/Cisco-IOS-XR-ethernet-lldp-cfg/gn-delete-xr-ethernet-lldp-cfg-10-ydk.py | 422f102411d15f82fa3282f22a5170de88122b0a | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 2,688 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Delete all config data for model Cisco-IOS-XR-ethernet-lldp-cfg.
usage: gn-delete-xr-ethernet-lldp-cfg-10-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ethernet_lldp_cfg \
as xr_ethernet_lldp_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
lldp = xr_ethernet_lldp_cfg.Lldp() # create object
# delete configuration on gNMI device
# crud.delete(provider, lldp)
exit()
# End of script
| [
"deom119@gmail.com"
] | deom119@gmail.com |
657c0b94772a99cfc7f1bd6b61435581ae75725c | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /ACL_PyTorch/contrib/cv/detection/FOTS/FOTS_postprocess.py | 4fd136fe05dcca70e28aeebd1f0374133505db99 | [
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,862 | py | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import torch
from modules.parse_polys import parse_polys
import re
import tqdm
import os
import sys
import struct
# bin文件格式转为numpy
def bin2np(binName, binShape):
size = os.path.getsize(binName) # size 是字节大小
binfile = open(binName, 'rb')
Len = int(size / 4) # 4个字节=float32 类型
res = []
for i in range(Len):
data = binfile.read(4) # 将4个字节取出作为 float
num = struct.unpack('f', data)
res.append(num[0])
binfile.close()
dim_res = np.array(res).reshape(binShape)
return dim_res
# bin 文件转回 tensor
def postprocess(bin_folder, output_folder):
preNum = 1
while (preNum < 501):
scale_x = 2240 / 1280
scale_y = 1248 / 720
preName = "img_" + str(preNum)
confBin = bin_folder + preName + "_0.bin"
disBin = bin_folder + preName + "_1.bin"
angleBin = bin_folder + preName + "_2.bin"
preNum += 1
confidence = torch.tensor(bin2np(confBin, (1, 1, 312, 560)))
distances = torch.tensor(bin2np(disBin, (1, 4, 312, 560)))
angle = torch.tensor(bin2np(angleBin, (1, 1, 312, 560)))
confidence = torch.sigmoid(confidence).squeeze().data.cpu().numpy()
distances = distances.squeeze().data.cpu().numpy()
angle = angle.squeeze().data.cpu().numpy()
polys = parse_polys(confidence, distances, angle, 0.95, 0.3)
with open('{}'.format(os.path.join(output_folder, 'res_{}.txt'.format(preName))), 'w') as f:
for id in range(polys.shape[0]):
f.write('{}, {}, {}, {}, {}, {}, {}, {}\n'.format(
int(polys[id, 0] / scale_x), int(polys[id, 1] /
scale_y), int(polys[id, 2] / scale_x),
int(polys[id, 3] / scale_y),
int(polys[id, 4] / scale_x), int(polys[id, 5] /
scale_y), int(polys[id, 6] / scale_x),
int(polys[id, 7] / scale_y)
))
if __name__ == '__main__':
output_folder = sys.argv[1]
bin_folder = sys.argv[2]
postprocess(bin_folder, output_folder)
| [
"liuzhuheng@huawei.com"
] | liuzhuheng@huawei.com |
3516ad539063e8ff67e0bb14b76e8a42e5a5b2eb | 9b30b51a919ca531a836efa230b2bacfdf9b5c70 | /app.py | dc8db0f050a3b49a178b8cbc0715a16d9820964c | [] | no_license | bharathjinka09/flask-realtime-dashboard | 2c6ed52fb775040e0843f5d0eba2ba3274d1a14d | e963ee205863495635ab6c698de0fa7645a25c50 | refs/heads/master | 2022-11-30T04:16:13.461812 | 2020-08-05T14:26:45 | 2020-08-05T14:26:45 | 285,311,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | from flask import Flask, render_template, request
from pusher import Pusher
app = Flask(__name__)
pusher = Pusher(app_id=u'1050289', key=u'8c3492c4c6666ee54c33', secret=u'e393c3c007e4945da628', cluster=u'ap2')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html')
@app.route('/orders', methods=['POST'])
def order():
data = request.form
pusher.trigger(u'order', u'place', {
u'units': data['units']
})
return "units logged"
@app.route('/message', methods=['POST'])
def message():
data = request.form
pusher.trigger(u'message', u'send', {
u'name': data['name'],
u'message': data['message']
})
return "message sent"
@app.route('/customer', methods=['POST'])
def customer():
data = request.form
pusher.trigger(u'customer', u'add', {
u'name': data['name'],
u'position': data['position'],
u'office': data['office'],
u'age': data['age'],
u'salary': data['salary'],
})
return "customer added"
if __name__ == '__main__':
app.run(debug=True)
| [
"bharathjinka09@gmail.com"
] | bharathjinka09@gmail.com |
013fa03087e0d2197e9d15100d78ab20d480dce7 | bf15a97a377bc49495a8c278cd247387a08361fd | /intersight/models/vnic_eth_network_policy_ref.py | 67418a69847320cddf71c347b40d31a6afb946a9 | [
"Apache-2.0"
] | permissive | movinalot/intersight-python | ffcb434e5fdf3f6e857dd967c794a64b2d2e05de | cdc3b082d75eac93b74029ab610e16d3008fdd8c | refs/heads/master | 2020-12-18T15:46:06.780834 | 2019-10-29T00:39:49 | 2019-10-29T00:39:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,714 | py | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-961
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VnicEthNetworkPolicyRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str',
'selector': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType',
'selector': 'Selector'
}
def __init__(self, moid=None, object_type=None, selector=None):
"""
VnicEthNetworkPolicyRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
self._selector = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if selector is not None:
self.selector = selector
@property
def moid(self):
"""
Gets the moid of this VnicEthNetworkPolicyRef.
The Moid of the referenced REST resource.
:return: The moid of this VnicEthNetworkPolicyRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this VnicEthNetworkPolicyRef.
The Moid of the referenced REST resource.
:param moid: The moid of this VnicEthNetworkPolicyRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this VnicEthNetworkPolicyRef.
The Object Type of the referenced REST resource.
:return: The object_type of this VnicEthNetworkPolicyRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this VnicEthNetworkPolicyRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this VnicEthNetworkPolicyRef.
:type: str
"""
self._object_type = object_type
@property
def selector(self):
"""
Gets the selector of this VnicEthNetworkPolicyRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this VnicEthNetworkPolicyRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this VnicEthNetworkPolicyRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this VnicEthNetworkPolicyRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VnicEthNetworkPolicyRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"ategaw@cisco.com"
] | ategaw@cisco.com |
8babf3a3d1553cfe9b0d3f343311bba9ce05fd57 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/13266006.py | d9ba20eadbb64604ed1b5f1bed42ff0502c3c6b0 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/13266006.py generated: Fri, 27 Mar 2015 15:48:13
#
# Event Type: 13266006
#
# ASCII decay Descriptor: {[[B_s0]nos -> (D_s- => K+ K- pi-) pi+ pi- pi+]cc, [[B_s0]os -> (D_s+ => K- K+ pi+) pi- pi+ pi-]cc}
#
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/TracksInAccWithMinP.py" )
from Configurables import Generation
Generation().EventType = 13266006
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_Dspipipi-withf2=DecProdCut,bis_pCut1600MeV.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCbAndWithMinP"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13266006
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
55d74c22f4a72511c7ff700bbe8115ece19dee85 | 358aaf68f3c60ebbbd86b3bc66d4e6c098bcb39e | /fonts/svga141_8x16.py | dda0b24ea4f067938a52a5ce2b3eb9946a955e3b | [
"MIT"
] | permissive | ccccmagicboy/st7735_mpy | d2de0046abd81978d5176dace45a40758377af82 | b15f1bde69fbe6e0eb4931c57e71c136d8e7f024 | refs/heads/master | 2022-08-28T23:18:04.353733 | 2020-05-28T04:19:21 | 2020-05-28T04:19:21 | 254,869,035 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,755 | py | """converted from ..\fonts\SVGA141__8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x66\x66\x66\x24\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x6c\x6c\xfe\x6c\x6c\x6c\xfe\x6c\x6c\x00\x00\x00\x00'\
b'\x18\x18\x7c\xc6\xc2\xc0\x7c\x06\x06\x86\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\xc2\xc6\x0c\x18\x30\x60\xc6\x86\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x6c\x38\x76\xdc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x30\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x18\x30\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xd6\xd6\xc6\xc6\x6c\x38\x00\x00\x00\x00'\
b'\x00\x00\x18\x38\x78\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x0c\x18\x30\x60\xc0\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xfe\x0c\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x38\x60\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x06\x0c\x18\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7e\x06\x06\x06\x0c\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x06\x0c\x18\x30\x60\x30\x18\x0c\x06\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7e\x00\x00\x7e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x7c\xc6\xc6\xde\xde\xde\xdc\xc0\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xde\xc6\xc6\x66\x3a\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x1e\x0c\x0c\x0c\x0c\x0c\xcc\xcc\xcc\x78\x00\x00\x00\x00'\
b'\x00\x00\xe6\x66\x66\x6c\x78\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xee\xfe\xfe\xd6\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xde\x7c\x0c\x0e\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x6c\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x60\x38\x0c\x06\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x7e\x5a\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\x6c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\x6c\x7c\x38\x38\x7c\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x3c\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x3c\x30\x30\x30\x30\x30\x30\x30\x30\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x80\xc0\xe0\x70\x38\x1c\x0e\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x3c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x3c\x00\x00\x00\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00'\
b'\x30\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x78\x6c\x66\x66\x66\x66\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x3c\x6c\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xfe\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x64\x60\xf0\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\xcc\x78\x00'\
b'\x00\x00\xe0\x60\x60\x6c\x76\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x06\x06\x00\x0e\x06\x06\x06\x06\x06\x06\x66\x66\x3c\x00'\
b'\x00\x00\xe0\x60\x60\x66\x6c\x78\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xec\xfe\xd6\xd6\xd6\xd6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00'\
b'\x00\x00\x00\x00\x00\xdc\x76\x66\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\x60\x38\x0c\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x66\x66\x66\x66\x3c\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\x6c\x38\x38\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\x7e\x06\x0c\xf8\x00'\
b'\x00\x00\x00\x00\x00\xfe\xcc\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x38\x6c\xc6\xc6\xc6\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| [
"cuiwei_cv@163.com"
] | cuiwei_cv@163.com |
55f2ec00b233606f261eb492176f308e1725b9ae | 8cca481c8dd508012aa794e2f9a07e11c3706a87 | /presidio-analyzer/tests/mocks/__init__.py | 3c5c37a7be8f1f7edfdd06c11f5ac8c83c171b4a | [
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-3-Clause",
"Unlicense",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-warranty-disclaimer",
"CNRI-Python",
"MIT",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla"
] | permissive | microsoft/presidio | 174472891e241e292982eee26a666d71ca263d42 | 3effc1467b8714714d5112ef7b627889507ea83d | refs/heads/main | 2023-08-15T20:14:00.962803 | 2023-08-14T19:13:49 | 2023-08-14T19:13:49 | 132,129,752 | 2,092 | 412 | MIT | 2023-09-13T18:17:58 | 2018-05-04T11:08:58 | Python | UTF-8 | Python | false | false | 219 | py | from .nlp_engine_mock import NlpEngineMock
from .app_tracer_mock import AppTracerMock
from .recognizer_registry_mock import RecognizerRegistryMock
__all__ = ["NlpEngineMock", "AppTracerMock", "RecognizerRegistryMock"]
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
afc9478b33f0384681f7c30005fc5f311c6f9d5e | df042e5934ae53a50396593585f33ec94454d7b0 | /ex5.py | e3a39eb398da55edeac87daf6cfa00739b88a0da | [] | no_license | ialeksandrov/Python | 897649bb6784dbf87da9752281d7e52c0c7c0bd8 | ea6f998cc42c65e15f3925a73729079caa829699 | refs/heads/master | 2020-12-07T02:16:15.848599 | 2017-06-28T14:52:34 | 2017-06-28T14:52:34 | 95,467,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | my_name = 'Zed A. Shaw'
my_age = 35 # not a lie
my_height = 74 # inches
my_weight = 180 # lbs
my_eyes = 'Blue'
my_teeth = 'White'
my_hair = 'Brown'
print "Lets talk about %s." % my_name
print "He`s %d inches tall." % my_height
print "He`s %d punds heavy." % my_weight
print "Actually that`s not too heavy."
print "He`s got %s eyes and %s hair." % (my_eyes, my_hair)
print "His teeth are usually %s depending on the coffee." % my_teeth
# this line is tricky, try to get it exacly right
print "If I add %d, %d, and %d I get %d." % (my_age, my_height, my_weight, my_age + my_height + my_weight)
| [
"ialeksandrov1990@abv.bg"
] | ialeksandrov1990@abv.bg |
b29e5f3d7ee4b2ab0a4763919ec99367e13d3e68 | 60834953e6bfdd8469996f09b9aa290319afc0e3 | /uf/task/__init__.py | dcc739cef9676a653e24b51ae0910f745bc365f6 | [
"Apache-2.0"
] | permissive | zoukaifa/unif | e8c4913a250da597ebe8e911b87ee52c2ab94958 | a6c9c94f60a7b906d9bd410bb446c4e3f2540ffc | refs/heads/master | 2023-06-18T21:28:24.770145 | 2021-07-18T13:55:50 | 2021-07-18T13:55:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | # coding:=utf-8
# Copyright 2021 Tencent. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import Training
from .adversarial import AdversarialTraining
from .base import Inference
from .base import Scoring
from .base import Initialization
from .base import Exportation
__all__ = [
'Training',
'AdversarialTraining',
'Inference',
'Scoring',
'Initialization',
'Exportation',
]
| [
"luv_dusk@163.com"
] | luv_dusk@163.com |
e92ac7af4076b062a628790607331d4861bdc044 | 0fb0dba210ff0f63515c464d7acc95ae32d7603c | /Application/Change Time Zone and Run a .bat and .exe File from Network Location/change-time-zone-and-run-a-bat-and-exe-file-from-network-location.py | bd48285676c17116d3f17432047e38abff7dabb4 | [] | no_license | slad99/pythonscripts | 7cbe6b8bb27c8c06e140c46e7c8cf286cbc56d8e | 4e0ebb023899a602cb041ef6f153fd3b7ab032e9 | refs/heads/master | 2022-01-04T21:49:10.486758 | 2019-06-28T14:29:28 | 2019-06-28T14:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,811 | py | name='"Syria Standard Time"' #provide the name of the time zone to be changed
#If name is in string, enclose it in '" "'- eg:'"Syria Standard Time"'
#If name is in String+number, enclose it in " " - eg: "UTC+13"
Filepath=r'\\Audi\c\Users\audiR7\Desktop' ##Provide the network share file path
share_user="XXXXX" ## Provide the user name for the shared path
share_pass="YYYYY" ## Provide the password for the shared path
Setup_Path_X64=r"qbittorrent_4.1.0_x64_setup.exe" ## Enter the .exe file name for 64 bit
Setup_Path_X86=r"qbittorrent_4.0.4_setup.exe" ## Enter the .exe file name for 32 bit
Bat_file=r"Sample.bat" ## Enter the .bat file name
silent_commnad ="/S" ## Enter the silent command to install the .exe file
import os
import shutil
import platform
import ctypes
import re
import subprocess
print"-------- TIME ZONE--------\n"
print("The current time zone is")
cur_zon=os.popen("TZUTIL /g ").read()
print cur_zon
print("------CHANGING TIME ZONE------------\n")
change=os.popen("TZUTIL /s "+name).read()
print change
print("The Changed current time zone is")
cur_zone=os.popen("TZUTIL /g ").read()
print cur_zone
path=r"C:\Program Files (x86)"
if os.path.exists(path):
print "64"
NFN=Setup_Path_X64
else:
NFN=Setup_Path_X86
print "32"
CP=os.path.join(Filepath,NFN)
SP=os.path.join(Filepath,Bat_file)
workdir=os.environ["TEMP"]
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
def login(cmd,Filepath,CP):
with disable_file_system_redirection():
print 'Login to network share'
print os.popen(cmd).read()
print 'Copying file from Network share....'
print os.popen('copy "'+CP+'" '+workdir).read()
print os.popen('copy "'+SP+'" '+workdir).read()
cmd= 'NET USE "'+Filepath+'" /USER:'+share_user+' "'+share_pass+'"'
login(cmd,Filepath,CP)
PTI=os.path.join(workdir,NFN)
print "Executing .exe file"
os.chdir(workdir)
CMD = '"'+NFN+'"'+" "+silent_commnad
print CMD
a=os.popen(CMD).read()
ATI=os.path.join(workdir,Bat_file)
print "Excuting .bat File"
process = subprocess.Popen([ATI],stdout=subprocess.PIPE)
stdout = process.communicate()[0]
print "---------------------------"
print stdout
| [
"noreply@github.com"
] | slad99.noreply@github.com |
bd4b8d8a77f5abb07d761ccd0a8f0be9e2964e06 | 923ae97768992cd8e183133a64cd92a1e423b1a4 | /app/equity/screener_eqs/equity_stats.py | 60ac3138a8c07348582ab5de8f49c111f50b78f2 | [] | no_license | webclinic017/finance_hub | 618cc55110842f63c67127e8cbf06743fb360133 | c61a2170eef444eb77ac7f1e1363173d31f19a16 | refs/heads/master | 2022-01-01T00:26:21.992825 | 2018-02-12T22:58:25 | 2018-02-12T22:58:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,712 | py | import sys
sys.path.append("/home/ubuntu/workspace/finance")
sys.path.append("/usr/local/lib/python2.7/dist-packages")
import datetime
import re, os, string, json, types, pdb
import pandas as pd
import numpy as np
import requests
from app import app
from app.utils.db_utils import DBHelper
from bs4 import BeautifulSoup
column_opts = []
class EquityStats():
"""holds all the individual stats for a stock
like P/E, dividend yield, etc.
"""
def __init__(self, stats, col_list, source, write=False, date=None):
self._date = date or datetime.datetime.now().strftime('%Y-%m-%d')
self._source = source
if self._source == "API1":
self._stats = dict(zip(col_list, stats))
self._ticker = self._stats['s']
self._stats['date'] = self._date
# Change the name of the column
self._stats['ticker'] = self._stats['s']
del self._stats['s']
elif self._source == "API2":
self._stats = stats
self._ticker = self._stats['ticker']
self._stats['date'] = self._date
if write:
self.write_to_db()
def write_to_db(self):
with DBHelper() as db:
db.connect()
if self._source == "API1":
table = 'eq_screener'
self._stats['n'] = self._stats['n'].replace("'", "''")
prim_keys = ['date', 'ticker']
elif self._source == "API2":
table = 'key_stats_yahoo'
self._stats['ticker'] = self._stats['ticker'].replace("'", "''")
prim_keys = ['date', 'ticker']
db.upsert(table, self._stats, prim_keys)
@staticmethod
def setColumns(source):
# TODO set the columns and set the favorites here from the file, lets get them out of the code
column_map = {}
if source == "API1":
file = "/home/ubuntu/workspace/finance/app/equity/screener_eqs/yahoo_api1_notes.txt"
elif source == "API2":
file = "/home/ubuntu/workspace/finance/app/equity/screener_eqs/yahoo_api2_notes.txt"
with open(file, "r") as f:
for line in f:
if line.strip() == 'EOF':
break
t_tup = line.split(' ')
column_map[t_tup[0]] = " ".join(t_tup[1:]).strip()
EquityStats.cols = column_map
class ES_Dataframe:
"""HOlds the dataframe of a call to the eq_screener DB
and preforms all the filters"""
test_filters = [('r', '<', 15), ('y', '>', 2), ('m6', '<', 0), ('m8', '<', 0), ('r5', '<', 1)]
def __init__(self, date=None, filters=None, favs=False):
self._favs = favs
self._filters = filters or ES_Dataframe.test_filters
self._colmap = self.setColumns()
self._date = date or datetime.datetime.now().strftime('%Y-%m-%d')
pdb.set_trace()
self._df = self.read_from_db(table='key_stats_yahoo')
self.readOther()
self.clean_data()
self.apply_filters()
self.cleanForPresentation()
def cleanForPresentation(self):
df = self._df
# Removing NaNs so it can be put in a JSON
df = df.replace(np.nan,' ', regex=True)
# order columns
df = df.reindex_axis(sorted(df.columns), axis=1)
self._df = df
def readOther(self):
file = "/home/ubuntu/workspace/finance/app/equity_screener/yahoo_api2_notes.txt"
other = False
with open(file, "r") as f:
for line in f:
if other:
nn = line.strip()
date_to_string = f.readline().strip()
break
if line.strip() == 'Other':
other = True
self._nonnumeric = nn.split(",")
self._date_to_string = date_to_string.split(",")
def read_from_db(self, table):
with DBHelper() as db:
db.connect()
return db.select(table, where="date='{0}'".format(self._date))
@staticmethod
def setColumns():
column_map = {}
with open("/home/ubuntu/workspace/finance/app/equity/screener_eqs/screen_info.csv", "r") as f:
cols = str.split(f.readline(), ",")[1:]
cols_desc = str.split(f.readline(), ",")[1:]
return dict(zip(cols, cols_desc))
def clean_data(self):
"""moves around data in the dataframe for screening purposes"""
# self.removePunctuation()
self.numberfy()
app.logger.info("Done cleaning data")
def removePunctuation(self):
"""replacing punctiation in all the columns"""
# Doesnt really work, lot of errors on None values
# Also might not be necessary
df = self._df
for col in df.columns:
if not isinstance(df[col][0], str):
continue
for pct in ['+', '%']:
try:
df[col] = df[col].apply(lambda x: x.replace(pct,""))
except Exception as e:
print("column prob doesnt need punctiation cleaning" + e)
break
# exc_type, exc_obj, exc_tb = sys.exc_info()
# app.logger.info("PANDAS DATA CLEAN ERROR: {0}, {1}, {2}".format(exc_type, exc_tb.tb_lineno, exc_obj))
self._df = df
def numberfy(self):
"""sets all the numeric columns to numbers"""
df = self._df
for col in df.columns:
if col not in self._nonnumeric:
df[col] = df[col].apply(pd.to_numeric, errors='coerce')
if col in self._date_to_string:
# Need this to convert certain datetimes to strings
df[col] = df[col].apply(lambda x: x.strftime("%Y%m%d"))
self._df = df
def apply_filters(self):
df = self._df
for filt in self._filters:
try:
if filt[1] == "=":
df = df[df[filt[0]] == filt[2]]
elif filt[1] == ">":
df = df[df[filt[0]] > filt[2]]
elif filt[1] == "<":
df = df[df[filt[0]] < filt[2]]
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
app.logger.info("COULD NOT APPLY FILTER: {0}, {1}, {2}".format(exc_type, exc_tb.tb_lineno, exc_obj))
self._df = df
app.logger.info("Filters applied")
if __name__ == '__main__':
# import pdb; pdb.set_trace()
d = datetime.datetime(2016, 10, 25).strftime('%Y-%m-%d')
es_df = ES_Dataframe(date=d) | [
"mccarviks@gmail.com"
] | mccarviks@gmail.com |
b6caeb01e9fc8fe3a5608d1a5a0cea804283620e | a16feb303b7599afac19a89945fc2a9603ae2477 | /Simple_Python/standard/doctest/doctest_14.py | 941442b0f2e103cb3fa96e6818128b466d90e799 | [] | no_license | yafeile/Simple_Study | d75874745ce388b3d0f9acfa9ebc5606a5745d78 | c3c554f14b378b487c632e11f22e5e3118be940c | refs/heads/master | 2021-01-10T22:08:34.636123 | 2015-06-10T11:58:59 | 2015-06-10T11:58:59 | 24,746,770 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | #! /usr/bin/env/python
# -*- coding:utf-8 -*-
"""Tests can appear in any docstring within the module.
Module-level tests cross class and function boundaries.
>>> A('a') == B('b')
False
"""
class A(object):
"""Simple class.
>>> A('instance_name').name
'instance_name'
"""
def __init__(self,name):
self.name = name
def method(self):
"""Returns an unusual value.
>>> A('name').method()
'eman'
"""
return ''.join(reversed(list(self.name)))
class B(A):
"""Another simple class.
>>> B('different_name').name
'different_name'
""" | [
"zhuzhulang@126.com"
] | zhuzhulang@126.com |
71c957d58b0b858f9dc446e5717fdd8d635d2104 | 52be02c1ddf1610356d4818a5fd06e8d9ee98a73 | /algorithms/gps/algorithm/policy/tf_policy.py | bd581a1e71abe192c25b05bf7e8fd20582e8e2b1 | [] | no_license | DengYuelin/multiple_assembly_old | f32e34a2e6633c99639489fb02b2e28edb1db180 | 696c55de23bb217b4471324bf3c3246a1bfcd5d8 | refs/heads/master | 2020-12-22T07:12:06.749844 | 2020-02-03T18:27:39 | 2020-02-03T18:27:39 | 236,664,915 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,965 | py | import pickle
import os
import uuid
import numpy as np
import tensorflow as tf
from gps.algorithm.policy.policy import Policy
class TfPolicy(Policy):
"""
A neural network policy implemented in tensor flow. The network output is
taken to be the mean, and Gaussian noise is added on top of it.
U = net.forward(obs) + noise, where noise ~ N(0, diag(var))
Args:
obs_tensor: tensor representing tf observation. Used in feed dict for forward pass.
act_op: tf op to execute the forward pass. Use sess.run on this op.
var: Du-dimensional noise variance vector.
sess: tf session.
device_string: tf device string for running on either gpu or cpu.
"""
def __init__(self, dU, obs_tensor, act_op, feat_op, var, sess, device_string, copy_param_scope=None):
Policy.__init__(self)
self.dU = dU
self.obs_tensor = obs_tensor
self.act_op = act_op
self.feat_op = feat_op
self.sess = sess
self.device_string = device_string
self.chol_pol_covar = np.diag(np.sqrt(var))
self.scale = None # must be set from elsewhere based on observations
self.bias = None
self.x_idx = None
if copy_param_scope:
self.copy_params = tf.get_collection(tf.GraphKeys.VARIABLES, scope=copy_param_scope)
self.copy_params_assign_placeholders = [tf.placeholder(tf.float32, shape=param.get_shape()) for
param in self.copy_params]
self.copy_params_assign_ops = [tf.assign(self.copy_params[i],
self.copy_params_assign_placeholders[i])
for i in range(len(self.copy_params))]
def act(self, x, obs, t, noise):
"""
Return an action for a state.
Args:
x: State vector.
obs: Observation vector.
t: Time step.
noise: Action noise. This will be scaled by the variance.
"""
# Normalize obs.
if len(obs.shape) == 1:
obs = np.expand_dims(obs, axis=0)
obs[:, self.x_idx] = obs[:, self.x_idx].dot(self.scale) + self.bias
with tf.device(self.device_string):
action_mean = self.sess.run(self.act_op, feed_dict={self.obs_tensor: obs})
if noise is None:
u = action_mean
else:
u = action_mean + self.chol_pol_covar.T.dot(noise)
return u[0] # the DAG computations are batched by default, but we use batch size 1.
def get_features(self, obs):
"""
Return the image features for an observation.
Args:
obs: Observation vector.
"""
if len(obs.shape) == 1:
obs = np.expand_dims(obs, axis=0)
# Assume that features don't depend on the robot config, so don't normalize by scale and bias.
with tf.device(self.device_string):
feat = self.sess.run(self.feat_op, feed_dict={self.obs_tensor: obs})
return feat[0] # the DAG computations are batched by default, but we use batch size 1.
def get_copy_params(self):
param_values = self.sess.run(self.copy_params)
return {self.copy_params[i].name:param_values[i] for i in range(len(self.copy_params))}
def set_copy_params(self, param_values):
value_list = [param_values[self.copy_params[i].name] for i in range(len(self.copy_params))]
feeds = {self.copy_params_assign_placeholders[i]:value_list[i] for i in range(len(self.copy_params))}
self.sess.run(self.copy_params_assign_ops, feed_dict=feeds)
def pickle_policy(self, deg_obs, deg_action, checkpoint_path, goal_state=None, should_hash=False):
"""
We can save just the policy if we are only interested in running forward at a later point
without needing a policy optimization class. Useful for debugging and deploying.
"""
if should_hash is True:
hash_str = str(uuid.uuid4())
checkpoint_path += hash_str
os.mkdir(checkpoint_path + '/')
checkpoint_path += '/_pol'
pickled_pol = {'deg_obs': deg_obs, 'deg_action': deg_action, 'chol_pol_covar': self.chol_pol_covar,
'checkpoint_path_tf': checkpoint_path + '_tf_data', 'scale': self.scale, 'bias': self.bias,
'device_string': self.device_string, 'goal_state': goal_state, 'x_idx': self.x_idx}
pickle.dump(pickled_pol, open(checkpoint_path, "wb"))
saver = tf.train.Saver()
saver.save(self.sess, checkpoint_path + '_tf_data')
@classmethod
def load_policy(cls, policy_dict_path, tf_generator, network_config=None):
"""
For when we only need to load a policy for the forward pass. For instance, to run on the robot from
a checkpointed policy.
"""
from tensorflow.python.framework import ops
ops.reset_default_graph() # we need to destroy the default graph before re_init or checkpoint won't restore.
pol_dict = pickle.load(open(policy_dict_path, "rb"))
tf_map = tf_generator(dim_input=pol_dict['deg_obs'], dim_output=pol_dict['deg_action'],
batch_size=1, network_config=network_config)
sess = tf.Session()
init_op = tf.initialize_all_variables()
sess.run(init_op)
saver = tf.train.Saver()
check_file = pol_dict['checkpoint_path_tf']
saver.restore(sess, check_file)
device_string = pol_dict['device_string']
cls_init = cls(pol_dict['deg_action'], tf_map.get_input_tensor(), tf_map.get_output_op(), np.zeros((1,)),
sess, device_string)
cls_init.chol_pol_covar = pol_dict['chol_pol_covar']
cls_init.scale = pol_dict['scale']
cls_init.bias = pol_dict['bias']
cls_init.x_idx = pol_dict['x_idx']
return cls_init
| [
"2539722953@qq.com"
] | 2539722953@qq.com |
b74a9b32a04ebaa820dc2c4a2920fe845e39dfed | 07ecc53b5be6b1a34914a0e02265e847f3ac1a65 | /Python/Back Track/126_Hard_单词接龙II.py | 6768248c554e77dae65689c2c4c7cf45f592ba66 | [] | no_license | JasmineRain/Algorithm | 764473109ad12c051f5337ed6f22b517ed9bff30 | 84d7e11c1a01b1994e04a3ab446f0a35eb3d362a | refs/heads/master | 2023-03-14T00:39:51.767074 | 2021-03-09T12:41:44 | 2021-03-09T12:41:44 | 289,603,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,425 | py | from collections import defaultdict, deque
from typing import List
class Solution:
# def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# situations = []
# ans = []
# min_length = len(wordList)
# flag = [False] * len(wordList)
#
# def backtrack(num, trace):
# nonlocal min_length
# if trace[-1] == endWord:
# situation = [beginWord] + trace
# min_length = min(min_length, len(situation))
# situations.append([beginWord] + trace)
# return
#
# if num == min_length:
# return
#
# for i in range(0, len(wordList)):
# if not flag[i] and sum(1 for s1, s2 in zip(trace[-1], wordList[i]) if s1 != s2) == 1:
# flag[i] = True
# backtrack(num + 1, trace + [wordList[i]])
# flag[i] = False
#
# # early return
# if endWord not in wordList:
# return []
#
# # main process
# # search for the first transformable word
# for i in range(len(wordList)):
# if sum(1 for s1, s2 in zip(beginWord, wordList[i]) if s1 != s2) <= 1:
# flag[i] = True
# backtrack(1, [wordList[i]])
# flag[i] = False
#
# # select the shortest answers
# for i in range(len(situations)):
# if len(situations[i]) == min_length:
# ans.append(situations[i])
#
# return ans
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
wordList.append(beginWord)
### 构建具有邻接关系的桶
buckets = defaultdict(list)
for word in wordList:
for i in range(len(beginWord)):
match = word[:i] + '_' + word[i + 1:]
buckets[match].append(word)
##### BFS遍历
preWords = defaultdict(list) # 前溯词列表
toSeen = deque([(beginWord, 1)]) # 待遍历词及深度
beFound = {beginWord: 1} # 已探测词列表
while toSeen:
curWord, level = toSeen.popleft()
for i in range(len(beginWord)):
match = curWord[:i] + '_' + curWord[i + 1:]
for word in buckets[match]:
if word not in beFound:
beFound[word] = level + 1
toSeen.append((word, level + 1))
if beFound[word] == level + 1: # 当前深度等于该词首次遍历深度,则仍应加入前溯词列表
preWords[word].append(curWord)
if endWord in beFound and level + 1 > beFound[endWord]: # 已搜索到目标词,且完成当前层遍历
break
#### 列表推导式输出结果
if endWord in beFound:
res = [[endWord]]
while res[0][0] != beginWord:
res = [[word] + r for r in res for word in preWords[r[0]]]
return res
else:
return []
if __name__ == "__main__":
S = Solution()
print(S.findLadders(beginWord="hit", endWord="cog", wordList=["hot", "dot", "dog", "lot", "log", "cog"]))
print(S.findLadders(beginWord="hit", endWord="cog", wordList=["hot", "dot", "dog", "lot", "log"]))
| [
"530781348@qq.com"
] | 530781348@qq.com |
e98c6affc7deb93b579e0bd0db54454aadddac23 | f256c98a3b3b6d79ad666c1631be6b6a7a702636 | /算法/滑动窗口万能模版/至多包含 K 个不同字符的最长子串.py | 6aa0f601cc492ebe107bff03d2826771ceafa9e4 | [] | no_license | Codewyf-top/python-learning | 87e1ec227da465f353c540b17a1e366e6ca714fa | afb5f7ac6689780892130cfd50c07c9f2cb27cbe | refs/heads/master | 2023-07-11T16:15:03.196402 | 2021-08-15T22:29:40 | 2021-08-15T22:29:40 | 255,283,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | # -*- coding: utf-8 -*-
"""
@Time : 04/12/2020 10:35
@Auth : Codewyf
@File :至多包含 K 个不同字符的最长子串.py
@IDE :PyCharm
@Motto:Go Ahead Instead of Heasitating
"""
class Solution:
def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int:
from collections import defaultdict
lookup = defaultdict(int)
start = 0
end = 0
max_len = 0
counter = 0
while end < len(s):
if lookup[s[end]] == 0:
counter += 1
lookup[s[end]] += 1
end += 1
while counter > k:
if lookup[s[start]] == 1:
counter -= 1
lookup[s[start]] -= 1
start += 1
max_len = max(max_len, end - start)
return max_len
| [
"1090187390@qq.com"
] | 1090187390@qq.com |
06af30c9f731602907247521c2c51b2f90529315 | df716b2868b289a7e264f8d2b0ded52fff38d7fc | /tests/formatters/fseventsd.py | ede435e5c6fe75ff24430b29c96281b4f6fc6ff9 | [
"Apache-2.0"
] | permissive | ir4n6/plaso | 7dd3cebb92de53cc4866ae650d41c255027cf80a | 010f9cbdfc82e21ed6658657fd09a7b44115c464 | refs/heads/master | 2021-04-25T05:50:45.963652 | 2018-03-08T15:11:58 | 2018-03-08T15:11:58 | 122,255,666 | 0 | 0 | Apache-2.0 | 2018-02-20T21:00:50 | 2018-02-20T21:00:50 | null | UTF-8 | Python | false | false | 971 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the fseventsd record event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import fseventsd
from tests.formatters import test_lib
class FseventsdFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the fseventsd record event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = fseventsd.FSEventsdEventFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = fseventsd.FSEventsdEventFormatter()
expected_attribute_names = [
u'event_identifier', u'flag_values', u'hex_flags', u'path']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetSources.
if __name__ == '__main__':
unittest.main()
| [
"onager@deerpie.com"
] | onager@deerpie.com |
7c00d97b441ec38aae554fd8b6aa9ece59a945fb | 67ffddfd7e0ace7490c5d52325838b82644eb458 | /samsung_swea/intermediate/swea_4828.py | 8c8e194b3513abc1136fb48690dfac8781030073 | [] | no_license | ckdrjs96/algorithm | 326f353c5aa89a85ec86ce1aabb06cde341193ce | d5d09b047808b6fc2eeaabdbe7f32c83446b4a1b | refs/heads/main | 2023-08-20T05:12:50.671798 | 2021-10-23T04:20:05 | 2021-10-23T04:20:05 | 324,481,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | ## Bubble sort
def bubble_sort(length,arr):
for i in range(length-1,0,-1):
for j in range(i):
if arr[j] > arr[j+1]:
arr[j],arr[j+1]=arr[j+1],arr[j]
return arr
#T=1
T = int(input())
for test_case in range(1, T + 1):
length=int(input())
arr=list(map(int,input().split()))
arr_sort=bubble_sort(length,arr)
print(f'#{test_case}',arr_sort[-1]-arr_sort[0]) | [
"ckdrjs96@gmail.com"
] | ckdrjs96@gmail.com |
730e5123f88fd3f69026ea5170652c213985e902 | 1dcd99bb96d9c51b2b561e7c2e54615cf2bc5ced | /Question/190130/test2.py | c871fd2309810405fac0ac5b9cca53666dcead44 | [] | no_license | dongsik93/HomeStudy | 62bbcad93be49ed396fe9d50e840f921bb751d4e | 3a28ff8c0b522a546ea2ed07c939f49bac3699c7 | refs/heads/master | 2020-04-15T02:12:36.615830 | 2019-04-05T14:31:10 | 2019-04-05T14:31:10 | 164,306,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | # class Dele():
# def __init__ (self, **kargs):
# self.kargs = kargs
# def gogo(self):
# for k,v in self.kargs.items():
# if(len(v) <= 5):
# del self.kargs[k]
# return self.kargs
# fruit = Dele(사과="apple",바나나="banana")
# print(fruit.gogo())
class Fruit:
def __init__(self,**kwargs):
self.kwargs = kwargs
def delete(self):
for i in self.kwargs:
if len(self.kwargs.get(i)) > 5:
print({i:self.kwargs.get(i)})
fruit ={"사과":"apple","바나나":"banana"}
f1=Fruit(**fruit)
f1.delete() | [
"ehdtlr9376@naver.com"
] | ehdtlr9376@naver.com |
bddcc803a12f594f0b9490f5b5742354b8c5db80 | 20a9c98bb59304dd406353c0b5c675801081f056 | /ExcelCalculate-with-Django/ExcelCalculate/calculate/views.py | 1fe1617dca40086ec61fe515e5305fa1f388ab8c | [] | no_license | jungeun919/Django | b585fdbf75c973b7f57dc13320db5ab9f50fd10f | 71fe0449b7b849e7ea66a78d38104fa18d5a95f3 | refs/heads/master | 2023-07-21T13:49:36.632867 | 2021-09-04T14:39:42 | 2021-09-04T14:39:42 | 323,032,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,768 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
import pandas as pd
from datetime import datetime
from .models import *
# Create your views here.
def calculate(request):
file = request.FILES['fileInput']
# 파일 저장하기
origin_file_name = file.name
user_name = request.session['user_name']
now_HMS = datetime.today().strftime('%H%M%S')
file_upload_name = now_HMS + '_' + user_name + '_' + origin_file_name
file.name = file_upload_name
document = Document(user_upload_file=file)
document.save()
df = pd.read_excel(file, sheet_name='Sheet1', header=0)
# grade별 value 리스트 만들기
grade_dic = {}
total_row_num = len(df.index)
for i in range(total_row_num):
data = df.loc[i]
if not data['grade'] in grade_dic.keys():
grade_dic[data['grade']] = [data['value']]
else:
grade_dic[data['grade']].append(data['value'])
# grade별 최솟값, 최댓값, 평균값 구하기
grade_calculate_dic = {}
for key in grade_dic.keys():
grade_calculate_dic[key] = {}
grade_calculate_dic[key]['min'] = min(grade_dic[key])
grade_calculate_dic[key]['max'] = max(grade_dic[key])
grade_calculate_dic[key]['avg'] = float(sum(grade_dic[key])) / len(grade_dic[key])
grade_list = list(grade_calculate_dic.keys())
grade_list.sort()
for key in grade_list:
print("grade:", key)
print("min:", grade_calculate_dic[key]['min'], end='')
print("/ max:", grade_calculate_dic[key]['max'], end='')
print("/ avg:", grade_calculate_dic[key]['avg'], end='\n\n')
# 이메일 주소 도메인별 인원 구하기
email_domain_dic = {}
for i in range(total_row_num):
data = df.loc[i]
email_domain = (data['email'].split("@"))[1]
if not email_domain in email_domain_dic.keys():
email_domain_dic[email_domain] = 1
else:
email_domain_dic[email_domain] += 1
print("도메인 별 사용 인원")
for key in email_domain_dic.keys():
print(key, email_domain_dic[key], "명")
grade_calculate_dic_to_session = {}
for key in grade_list:
grade_calculate_dic_to_session[int(key)] = {}
grade_calculate_dic_to_session[int(key)]['max'] = float(grade_calculate_dic[key]['max'])
grade_calculate_dic_to_session[int(key)]['avg'] = float(grade_calculate_dic[key]['avg'])
grade_calculate_dic_to_session[int(key)]['min'] = float(grade_calculate_dic[key]['min'])
request.session['grade_calculate_dic'] = grade_calculate_dic_to_session
request.session['email_domain_dic'] = email_domain_dic
return redirect('/result') | [
"renee8369@likelion.org"
] | renee8369@likelion.org |
45d2baa545df614395663f9299fa73660c2b2144 | d4093670913e1e77f896a39af226a241842c5fbc | /dataset/imdb.py | 4d3e808f28fbb9cfc610d2428cd3979e77dc8d5d | [] | no_license | ChuanleiGuo/SSD_Net | ece80251fc19ef311636a2c77c543579ee1a0423 | 0b632e90931e3aad38f2d93339952ac6c5ff79ca | refs/heads/master | 2021-09-04T20:33:16.090057 | 2018-01-22T07:17:38 | 2018-01-22T07:17:38 | 104,025,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,984 | py | # -*- coding: utf-8 -*-
import os
import numpy as np
class Imdb(object):
"""
Base class for dataset loading
# Parameters
name: str
name of dataset
"""
def __init__(self, name):
self.name = name
self.classes = []
self.num_classes = 0
self.image_set_index = None
self.num_images = 0
self.labels = None
self.padding = 0
def image_path_from_index(self, index):
"""
load image full path given specified index
# Parameters
index: int
index of image requested in dateset
# Returns
full path of specified image
"""
raise NotImplementedError
def label_from_index(self, index):
"""
load ground-truth of image given specified index
# Parameters
index: int
index of image requested in dataset
returns:
object ground-truths in format
numpy.array([id, xmin, ymin, xmax, ymax]...)
"""
raise NotImplementedError
def save_imglist(self, fname=None, root=None, shuffle=False):
"""
save imglist to disk
# Parameters
fname: str
saved filename
"""
def progress_bar(count, total, suffix=""):
import sys
bar_len = 24
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = "=" * filled_len + "-" * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
str_list = []
for index in range(self.num_images):
progress_bar(index, self.num_images)
label = self.label_from_index(index)
if label.size < 1:
continue
path = self.image_path_from_index(index)
if root:
path = os.path.relpath(path, root)
str_list.append('\t'.join([str(index), str(2), str(label.shape[1])] \
+ ["{0:.4f}".format(x) for x in label.ravel()] + [path, ]) + '\n')
if str_list:
if shuffle:
import random
random.shuffle(str_list)
if not fname:
fname = self.name + '.lst'
with open(fname, 'w') as f:
for line in str_list:
f.write(line)
else:
raise RuntimeError("No image in imdb")
def _load_class_names(self, filename, dirname):
"""
load class names from text file
Parameters:
----------
filename: str
file stores class names
dirname: str
file directory
"""
full_path = os.path.join(dirname, filename)
classes = []
with open(full_path, 'r') as f:
classes = [l.strip() for l in f.readlines()]
return classes
| [
"chuanleiguo@gmail.com"
] | chuanleiguo@gmail.com |
2afe47fd4744b31114f59535086e8495d93f2108 | c7d91529db199322e39e54fe4051a75704ea843e | /竞赛/198场/换酒问题.py | 36be138d140252fd2a79a357886e2cea3edc3d40 | [] | no_license | 2226171237/Algorithmpractice | fc786fd47aced5cd6d96c45f8e728c1e9d1160b7 | 837957ea22aa07ce28a6c23ea0419bd2011e1f88 | refs/heads/master | 2020-12-26T07:20:37.226443 | 2020-09-13T13:31:05 | 2020-09-13T13:31:05 | 237,431,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | class Solution(object):
def numWaterBottles(self, numBottles, numExchange):
"""
:type numBottles: int
:type numExchange: int
:rtype: int
"""
if numExchange>numBottles:
return numBottles
cnt=numBottles # 一次喝完所有瓶
while numBottles>=numExchange:
cnt+=numBottles//numExchange # 空瓶可以换的数量
numBottles=numBottles//numExchange+numBottles%numExchange # 没喝的喝完+空瓶数量
# cnt+=numBottles
return cnt
if __name__ == '__main__':
s=Solution()
print(s.numWaterBottles(9,3)) | [
"2226171237@qq.com"
] | 2226171237@qq.com |
3cf0b9388acb7a74ee2c8f455a24d5645b67cfd6 | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/path_integral_naive_sampling_20200419015655.py | a6b940f0ba4278a1c6884a485cc85e53c25021d9 | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,098 | py | # -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
# Author: Juan Esteban Aristizabal-Zuluaga
# date: 202004151200
def rho_free(x,xp,beta):
"""Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito."""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial armónico para una posición x dada"""
return 0.5* x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anarmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al oscilador armónico
(inmerso en un baño térmico a temperatura inversa beta) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición x para temperatura inversa beta.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def path_naive_sampling( N_path = 10,beta = 4., N_iter = int(1e5), delta = 0.5,
potential = harmonic_potential, append_every = 1 ):
"""
Uso:
"""
dtau = beta/N_path
path_x = [0.] * N_path
pathss_x = [path_x[:]]
t_0 = time()
N_iter = int(N_iter)
for step in range(N_iter):
k = np.random.randint(0,N_path)
#Periodic boundary conditions
knext, kprev = (k+1) % N_path, (k-1) % N_path
x_new = path_x[k] + np.random.uniform(-delta,delta)
old_weight = ( rho_free(path_x[kprev],path_x[k],dtau) *
np.exp(- dtau * potential(path_x[k])) *
rho_free(path_x[k],path_x[knext],dtau) )
new_weight = ( rho_free(path_x[kprev],x_new,dtau) *
np.exp(- dtau * potential(x_new)) *
rho_free(x_new,path_x[knext],dtau) )
if np.random.uniform(0,1) < new_weight/old_weight:
path_x[k] = x_new
if step%append_every == 0:
pathss_x.append(path_x[:])
t_1 = time()
print('Path integral naive sampling: %d iterations -> %.2E seconds'%(N_iter,t_1-t_0))
pathss_x = np.array(pathss_x)
return pathss_x
def figures_fn( pathss_x, beta = 4 , N_plot = 201, x_max = 3, N_iter=int(1e5), append_every=1,
N_beta_ticks = 11, msq_file='file.csv', file_name='path-plot-prueba',
show_theory=True, show_matrix_squaring=True, show_path=True, save_plot=True,
show_plot=True, show_compare_hist=True, show_complete_path_hist=True):
pathss_x = np.array(pathss_x)
script_dir=os.path.dirname(os.path.abspath(__file__))
x_plot = np.linspace(-x_max,x_max,N_plot)
N_path = len(pathss_x[-1])
# Agranda letra en texto en figuras generadas
plt.rc('text', usetex=True) #usa latex en texto de figuras
plt.rcParams.update({'font.size':15,'text.latex.unicode':True})
# Crea figura
fig, ax1 = plt.subplots()
# Grafica histograma, teórico y si se pide un camino aleatorio
ax1.set_xlabel(u'$x$')
ax1.set_ylabel(u'$\pi^{(Q)} (x;\\beta)$')
if show_theory:
lns1 = ax1.plot(x_plot,QHO_canonical_ensemble(x_plot,beta),label=u'Teórico')
if show_matrix_squaring:
msq_file = script_dir + '/' + msq_file
matrix_squaring_data = pd.read_csv(msq_file, index_col=0, comment='#')
lns2 = ax1.plot( matrix_squaring_data['position_x'],matrix_squaring_data['prob_density'],
label = u'Algoritmo Matrix\nSquaring')
lns3 = ax1.hist(pathss_x[:,0], bins=int(np.sqrt(N_iter/append_every)), normed=True,
label=u'Integral de camino\nnaive sampling',alpha=.40)
if show_compare_hist:
lns5 = ax1.hist(pathss_x[:,np.random.choice(np.arange(1,N_path))], bins=int(np.sqrt(N_iter/append_every)), normed=True,
label=u'Comparación hist. $x[k]$',alpha=.40)
if show_complete_path_hist:
pathss_x2 = pathss_x.copy()
pathss_x2 = pathss_x2.flatten()
lns6 = ax1.hist(pathss_x2, bins=int(np.sqrt(N_iter*N_path/append_every)), normed=True,
label=u'Comparación tomando\npath completo $\{x[k]\}_k$',alpha=.40)
ax1.tick_params(axis='y')
ax1.set_ylim(bottom=0)
ax1.set_xlim(-x_max,x_max)
if not show_path:
plt.legend(loc = 'best', fontsize=12)
if save_plot:
plt.savefig(script_dir+'/'+file_name+'.eps')
if show_plot:
plt.show()
plt.close()
if show_path:
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel(u'$\\tau$') # we already handled the x-label with ax1
beta_plot = np.linspace(0,beta,N_path+1)
path_plot = list(pathss_x[-1])
path_plot.append(pathss_x[-1][0])
lns4 = ax2.plot(path_plot, beta_plot,'o-',c='k',label=u'Path')
ax2.tick_params(axis='y')
beta_ticks = np.linspace(0,beta,N_beta_ticks)
ax2.set_yticks(beta_ticks)
ax2.set_yticklabels(u'$%.2f$'%b for b in beta_ticks)
ax2.set_ylim(bottom=0)
ax2.set_xlim(-x_max,x_max)
# Solution for having legends that share two different scales
# if show_theory and show_matrix_squaring and show_compare_hist and show_complete_path_hist:
# leg = lns1 + lns2 + [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
# elif show_theory and not show_matrix_squaring and show_compare_hist and show_complete_path_hist:
# leg = lns1 + [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
# elif not show_theory and show_matrix_squaring and show_compare_hist and show_complete_path_hist:
# leg = lns2 + [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
# elif not show_theory and not show_matrix_squaring and show_compare_hist and show_complete_path_hist:
# leg = [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
if not show_theory:
lns1 = [0]
if not show_compare_hist:
lns5 = [0]
if not show_complete_path_hist:
lns6 = [0]
leg_test = lns1 + lns2 + [lns3[2][0]] + lns4 + [lns5[2][0]] + [lns6[2][0]]
labs = []
leg = []
for i,l in enumerate(leg_test):
try:
labs.append(l.get_label())
leg.append(leg_test[i])
except:
pass
ax1.legend(leg, labs, loc='best',title=u'$\\beta=%.2f$'%beta, fontsize=12)
fig.tight_layout() # otherwise the right y-label is slightly clipped
if save_plot:
plt.savefig(script_dir+'/'+file_name+'-path_true.eps')
if show_plot:
plt.show()
plt.close()
return 0
N_path = 10
beta = 4.
N_iter = int(1e4)
delta = 0.5
potential, potential_string = harmonic_potential, 'harmonic_potential'
append_every = 1
msq_file = 'pi_x-ms-harmonic_potential-x_max_5.000-nx_201-N_iter_7-beta_fin_4.000.csv'
N_plot = 201
x_max = 3
x_plot = np.linspace(-x_max,x_max,N_plot)
plot_file_name = 'pi_x-pi-plot-%s-x_max_%.3f-N_path_%d-N_iter_%d-beta_fin_%.3f'\
%(potential_string,x_max,N_path,N_iter,beta)
pathss_x = path_naive_sampling( N_path = N_path, beta = beta, N_iter = N_iter, delta = 0.5,
potential = harmonic_potential, append_every = 1 )
figures_fn( pathss_x, beta = beta , N_plot = N_plot, x_max = x_max, N_iter=N_iter,
append_every=1, N_beta_ticks = N_path+1, msq_file=msq_file,
file_name=plot_file_name, show_theory=1 , show_matrix_squaring=True,
show_path=True, save_plot=True, show_plot=True) | [
"jeaz.git@gmail.com"
] | jeaz.git@gmail.com |
bcadd443cd0d9a1dc93142ea5c9b19450c98dde2 | a4deea660ea0616f3b5ee0b8bded03373c5bbfa2 | /concrete_instances/register-variants/cmovpeq_r64_r64/instructions/cmovpeq_r64_r64/cmovpeq_r64_r64.gen.vex.py | da67fb87e1152638aada2e933b8f1cb059b3356f | [] | no_license | Vsevolod-Livinskij/x86-64-instruction-summary | 4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd | c276edab1b19e3929efb3ebe7514489f66087764 | refs/heads/master | 2022-02-02T18:11:07.818345 | 2019-01-25T17:19:21 | 2019-01-25T17:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | import angr
proj = angr.Project('./instructions/cmovpeq_r64_r64/cmovpeq_r64_r64.o')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp() | [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
ca1cf3a33e49903807f26cd1e9bf7bf47a970380 | d46cad930fc1174eded859ca9cb51ce17f17fbf6 | /Fundamentals/Mid-Exam-Preparation/Dec-2019-01-disneyland-journey.py | 1700e3191005c51e0b7a74d38c5396e85e7aa422 | [] | no_license | AntoniyaV/SoftUni-Exercises | 66f79fee065f510748350d629838db9291c71f91 | d7bd9ae6aa511b705a3b2e86018f6f79d4f7ab2b | refs/heads/main | 2023-07-09T10:49:29.456647 | 2021-08-13T18:59:12 | 2021-08-13T18:59:12 | 317,540,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | journey_cost = float(input())
months = int(input())
saved_money = 0
for month in range(1, months + 1):
if not month == 1 and not month % 2 == 0:
saved_money -= 0.16 * saved_money
if month % 4 == 0:
bonus = 0.25 * saved_money
saved_money += bonus
saved_money += 0.25 * journey_cost
if saved_money >= journey_cost:
print(f"Bravo! You can go to Disneyland and you will have {saved_money - journey_cost:.2f}lv. for souvenirs.")
else:
print(f"Sorry. You need {journey_cost - saved_money:.2f}lv. more.") | [
"antoniya.vladimirova@gmail.com"
] | antoniya.vladimirova@gmail.com |
f00fe3c0bcd52540243acb352de7d91f49e3ccda | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_lightens.py | 63c52972ea8d3d8914a23e132baa694c5beff6ce | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.verbs._lighten import _LIGHTEN
#calss header
class _LIGHTENS(_LIGHTEN, ):
def __init__(self,):
_LIGHTEN.__init__(self)
self.name = "LIGHTENS"
self.specie = 'verbs'
self.basic = "lighten"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
bf17a1475e5f1e7bb21eceabf09aebd90c2a6fbc | 37da8476c91c99a06251f7c394426d6a2024b228 | /part2/office_space/setup.py | 99ae96e3c0453a934b80f1f11ce7516d27cf71d0 | [] | no_license | codemation/eco | dc411bd50004ec32ec9a4e98dfd29d522b30b110 | a8befea4c2988d39393f7e890d2ce41cc38442b0 | refs/heads/main | 2022-12-28T03:49:10.513259 | 2020-10-12T07:47:46 | 2020-10-12T07:47:46 | 302,462,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | def run(server):
import os
import uvloop, asyncio
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
server.event_loop = asyncio.get_event_loop()
try:
cmddirPath = None
realPath = None
with open('./.cmddir', 'r') as cmddir:
for line in cmddir:
cmddirPath = line
realPath = str(os.path.realpath(cmddir.name)).split('.cmddir')[0]
if not realPath == cmddirPath:
print(f"NOTE: Project directory may have moved, updating project cmddir files from {cmddirPath} -> {realPath}")
import os
os.system("find . -name .cmddir > .proj_cmddirs")
with open('.proj_cmddirs', 'r') as projCmdDirs:
for f in projCmdDirs:
with open(f.rstrip(), 'w') as projCmd:
projCmd.write(realPath)
except Exception as e:
print("encountered exception when checking projPath")
print(repr(e))
async def setup():
from dbs import setup as dbsetup
await dbsetup.run(server)
from apps import setup
await setup.run(server)
server.event_loop.create_task(
setup()
)
| [
"joshjamison1@gmail.com"
] | joshjamison1@gmail.com |
aeb043e0851fc98a93b942e06dd5b42e2e37a7c0 | 4bb1a23a62bf6dc83a107d4da8daefd9b383fc99 | /contests/abc113/d.py | 4de619a07c685d182023b2ac038ef298f6e4e46f | [] | no_license | takushi-m/atcoder-work | 0aeea397c85173318497e08cb849efd459a9f6b6 | f6769f0be9c085bde88129a1e9205fb817bb556a | refs/heads/master | 2021-09-24T16:52:58.752112 | 2021-09-11T14:17:10 | 2021-09-11T14:17:10 | 144,509,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | # -*- coding: utf-8 -*-
h,w,k = map(int, input().split())
k -= 1
mod = 1000000007
if w==1:
print(1)
exit()
dp = [[0 for _ in range(w)] for _ in range(h+1)]
dp[0][0] = 1
for hi in range(1,h+1):
for b in range(2**(w-1)):
judge = False
for i in range(w-2):
if (b>>i)&3 == 3:
judge = True
break
if judge:
continue
perm = [i for i in range(w)]
for i in range(w-1):
if (b>>i)&1==1:
perm[i],perm[i+1] = perm[i+1],perm[i]
for i in range(w):
dp[hi][perm[i]] = (dp[hi][perm[i]]+dp[hi-1][i])%mod
print(dp[h][k])
| [
"takushi-m@users.noreply.github.com"
] | takushi-m@users.noreply.github.com |
2474496d5a2b4653eb639bce76fc5ac8e2bb2b56 | 3de3dae722829727edfdd6cc3b67443a69043475 | /edexOsgi/com.raytheon.uf.tools.gfesuite/cli/src/siteActivation/deactivateSite.py | 669d7666739672f2a222526845caa7f024eb9d13 | [
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] | permissive | Unidata/awips2 | 9aee5b7ec42c2c0a2fa4d877cb7e0b399db74acb | d76c9f96e6bb06f7239c563203f226e6a6fffeef | refs/heads/unidata_18.2.1 | 2023-08-18T13:00:15.110785 | 2023-08-09T06:06:06 | 2023-08-09T06:06:06 | 19,332,079 | 161 | 75 | NOASSERTION | 2023-09-13T19:06:40 | 2014-05-01T00:59:04 | Java | UTF-8 | Python | false | false | 3,587 | py | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Provides a command-line utility to deactivate a site
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 09/10/14 #3623 randerso Initial Creation.
#
##
import os
import sys
import time
from dynamicserialize.dstypes.com.raytheon.uf.common.site.requests import DeactivateSiteRequest
from awips import ThriftClient
from awips import UsageArgumentParser
from ActivationTopicListener import ActivationTopicListener
def main():
args = validateArgs()
request = DeactivateSiteRequest(args.site, args.plugin)
thriftClient = ThriftClient.ThriftClient(args.host, args.port, "/services")
thread = ActivationTopicListener(args.jmsHost, args.jmsPort)
try:
thread.start()
time.sleep(1) # sleep to allow thread to connect to JMS broker
print "\nSending site deactivation request for "+args.site
thriftClient.sendRequest(request)
print "\nMonitoring site activation messages."
thread.join()
except KeyboardInterrupt:
pass
except Exception, ex:
import traceback
traceback.print_exc()
finally:
thread.stop()
def validateArgs():
parser = UsageArgumentParser.UsageArgumentParser(conflict_handler="resolve", prog='deactivateSite')
parser.add_argument("-h", action="store", dest="host",
help="host name of edex request server",
default=str(os.getenv("DEFAULT_HOST", "localhost")),
metavar="hostname")
parser.add_argument("-r", action="store", type=int, dest="port",
help="port number of edex request server",
default=int(os.getenv("DEFAULT_PORT", "9581")),
metavar="port")
parser.add_argument("-j", action="store", dest="jmsHost",
help="host name of JMS broker",
default=str(os.getenv("JMS_HOST", "localhost")),
metavar="jmsHost")
parser.add_argument("-q", action="store", type=int, dest="jmsPort",
help="port number of JMS broker",
default=int(os.getenv("JMS_PORT", "5672")),
metavar="jmsPort")
parser.add_argument("-p", action="store", dest="plugin", required=False,
help="plugin",
default="gfe",
metavar="plugin")
parser.add_argument("-s", action="store", dest="site", required=True,
help="site to deactivate",
metavar="site")
args = parser.parse_args()
return args
if __name__ == '__main__':
main() | [
"mjames@unidata.ucar.edu"
] | mjames@unidata.ucar.edu |
68333ba74102bbf8decdbf1b7b793425a3c06483 | 67ffddfd7e0ace7490c5d52325838b82644eb458 | /programmers/ 2019 KAKAO BLIND RECRUITMENT/오픈채팅방.py | 433080e446bdbd0c11f942b07b4117de3fcef3bc | [] | no_license | ckdrjs96/algorithm | 326f353c5aa89a85ec86ce1aabb06cde341193ce | d5d09b047808b6fc2eeaabdbe7f32c83446b4a1b | refs/heads/main | 2023-08-20T05:12:50.671798 | 2021-10-23T04:20:05 | 2021-10-23T04:20:05 | 324,481,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | def solution(record):
save_dict = dict()
commands = []
for recor in record:
com = recor.split()
if com[0] == 'Enter' or com[0] == 'Change':
save_dict[com[1]] = com[2]
commands.append([com[0], com[1]])
answer = []
for command in commands:
if command[0] == 'Enter':
answer.append(save_dict[command[1]] + '님이 들어왔습니다.')
elif command[0] == 'Leave':
answer.append(save_dict[command[1]] + '님이 나갔습니다.')
return answer | [
"ckdrjs96@gmail.com"
] | ckdrjs96@gmail.com |
1b2238e7e1b2795515b7cb4c80b374b5c310daa4 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Trigger/TrigT1/TrigT1CaloSim/share/TrigT1CaloSim_Reprocess_Run2.py | 9a8549942c47e67f4054b7b45b05bd0b6cdcb4c2 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | include('TrigT1CaloCalibConditions/L1CaloCalibConditions_jobOptions.py')
# Get the algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
svcMgr.ToolSvc += CfgMgr.LVL1__L1DatabaseOverrideForDataTool('L1DatabaseOverrideForDataTool')
topSequence += CfgMgr.LVL1__TransientDatabaseOverride(
'TransientDatabaseOverride',
OverrideTools = [ svcMgr.ToolSvc.L1DatabaseOverrideForDataTool ],
InDeadChannelsFolder = '/TRIGGER/L1Calo/V1/Calibration/PpmDeadChannels',
InDisabledTowersFolder = '/TRIGGER/L1Calo/V1/Conditions/DisabledTowers',
OutPprChanDefaultsKey = 'ModifiedPprChanDefaults',
OutPprChanCalibKey = 'ModifiedPprChanCalib',
OutDeadChannelsKey = 'ModifiedPpmDeadChannels',
OutDisabledTowersKey = 'ModifiedDisabledTowers'
)
topSequence += CfgMgr.LVL1__Run2TriggerTowerMaker(
'Run2TriggerTowerMaker',
CellType=2, # Trigger Towers
TriggerTowerLocation = 'ReprocessedTriggerTowers',
ZeroSuppress = False,
ChanDefaultsFolderKey = 'ModifiedPprChanDefaults',
ChanCalibFolderKey = 'ModifiedPprChanCalib',
DeadChannelsFolderKey = 'ModifiedPpmDeadChannels',
DisabledTowersFolderKey = 'ModifiedDisabledTowers'
)
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
97a492b2bae7260471e1857e4f6da5f93a74ea6d | 5fcff46bf18de538dc01989cb906cd84e1f22c92 | /resources/user.py | 58e40841184636cf507bdd06b58952c383967979 | [] | no_license | a-soliman/flask-todo | b37c72f7757152a4a65c038893ebd271c298c98e | d6041cb56618a1e763ee9e3e2b5c5d112d859a2e | refs/heads/master | 2020-03-11T03:51:27.430736 | 2018-04-16T15:42:39 | 2018-04-16T15:42:39 | 128,657,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | from flask_restful import Resource, reqparse
import sqlite3
from models.user import UserModel
class UsersList(Resource):
def get(self):
return {'users': [ user.json() for user in UserModel.query.all() ] }
class User(Resource):
def get(self, username):
user = UserModel.find_by_username(username)
if user is not None:
return user.json(), 200
return {'message': 'user was not found!'}, 404
class RegisterUser(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username',
type = str,
required = True,
help = 'This fild can not be blank'
)
parser.add_argument('password',
type = str,
required = True,
help = 'This filed can not be blank'
)
parser.add_argument('email',
type = str,
required = True,
help = 'This filed can not be blank'
)
def post(self):
data = RegisterUser.parser.parse_args()
if UserModel.find_by_username(data['username']):
return {'message': 'a user with the provided username already exists'}, 400
user = UserModel(None, data['username'], data['password'], data['email'])
try:
user.save_to_db()
except:
return { 'message': 'Something went wrong'}, 500
return {'message': 'user created successfully'}, 201 | [
"ahmed.soliman@programmer.net"
] | ahmed.soliman@programmer.net |
8c64e28efaaaa6fb4c06199d21b090b86206cd2a | 7b6c3876aec5f2660300ec67f7ee77a8374bd26d | /multi_tenant/context_processors.py | d6e40515a1b2730a214352d3c3396151c3ee7cdb | [
"MIT"
] | permissive | arineto/django-multi-tenant | 493bda06fd9bc6ae91efed4391d0fa5833750294 | 713d555831b35a487f5a91494a20ee3d955a4b63 | refs/heads/master | 2021-01-17T13:27:27.996692 | 2016-07-11T13:54:54 | 2016-07-11T13:54:54 | 58,878,519 | 7 | 5 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | def subdomain(request):
return {
'subdomain': request.subdomain
}
def tenant(request):
return {
'tenant': request.tenant
}
def theme(request):
return {
'theme': request.tenant.theme if request.tenant else None
}
| [
"arineto30@gmail.com"
] | arineto30@gmail.com |
fdd3da44b6c2f6dfb995e8abbb9aa96cd9112b9f | e42149d118d4104e7e3e3fad9e2c648e1ce02efd | /benchmarks/runner.py | c2be4022b9c69490bc4f5553cf68d14658de2b45 | [] | no_license | bocadilloproject/benchmarks | e9ab07de201b376167669adc46876dba186b50c4 | dabdda61613cc6825186dddb7a69b617d1f1d2c9 | refs/heads/master | 2020-04-11T18:04:55.631117 | 2019-01-01T18:51:10 | 2019-01-01T18:51:10 | 161,985,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,344 | py | import logging
import os
import shutil
import subprocess
from collections import defaultdict
from contextlib import contextmanager
from os.path import join, exists, dirname
from time import sleep
from typing import Tuple, List, NamedTuple
import pandas as pd
from psutil import Process
from benchmarks.config import Config, Framework, Bench
from benchmarks.utils import (
wait_online, kill_recursively, wait_offline, get_wrk_reqs_per_second,
)
class Runner:
def __init__(self, config: Config):
self.config = config
self.frameworks = config.frameworks
self.tests = config.tests
self._logger = logging.getLogger("root")
self._logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
self._logger.addHandler(handler)
def _run(
self, command: str, timeout: int = 30, **kwargs
) -> subprocess.Popen:
self._logger.info(f"Running: %s", command)
kwargs.setdefault("stdout", subprocess.DEVNULL)
p = subprocess.Popen(command, shell=True, **kwargs)
p.wait(timeout)
return p
def get_python(self, framework: Framework) -> Tuple[str, str]:
env_dir = framework.name.replace(" ", "_").lower()
env_path = join(self.config.virtualenvs_dir, env_dir)
python = join(env_path, "bin", "python3")
site_packages = join(dirname(dirname(python)), "lib", "python3.6")
path = ":".join([os.getcwd(), site_packages])
if not exists(env_path):
self._run(f"python -m venv {env_path}")
# Install dependencies
pip = join(dirname(python), "pip")
if framework.requirements:
packages = " ".join(framework.requirements)
self._run(f"{pip} install -U {packages}", timeout=60)
return python, path
def wait(self, framework: Framework, up=True):
seconds = self.config.warmup_seconds
action = "Warming up" if up else "Cooling down"
self._logger.info(f"%s %s for %ss", action, framework.name, seconds)
sleep(seconds)
@contextmanager
def server(self, script: str, framework: Framework):
"""Spawn and manage a process for the framework server."""
python, path = self.get_python(framework)
host = self.config.host
port = self.config.port
env = f"PYTHONPATH={path}"
command = " ".join([env, python, script, host, str(port)])
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
try:
wait_online(host, port)
self.wait(framework, up=True)
yield
except TimeoutError as e:
self._logger.exception(e)
raise Exception(
f"{script} failed to start the server at "
f"{host}:{port} (framework: {framework.name})"
)
except Exception as e:
self._logger.exception(e)
raise Exception(
f"{script} encountered an unknown error "
f"(framework: {framework.name})"
)
finally:
kill_recursively(Process(p.pid))
try:
wait_offline(self.config.host, self.config.port)
except TimeoutError as e:
self._logger.exception(e)
raise Exception(
f"{script} failed to stop the server at "
f"{host}:{port} (framework: {framework.name})"
)
finally:
self.wait(framework, up=False)
def benchmark(self, script: str, framework: Framework, bench: Bench) -> int:
with self.server(script, framework):
cmd = (
f"wrk "
f"-c {bench.concurrency} "
f"-t {bench.threads} "
f"http://{self.config.address}/ "
f"-d {bench.duration}"
)
p = self._run(
cmd, timeout=bench.duration + 2, stdout=subprocess.PIPE
)
output = p.stdout.read().decode()
return get_wrk_reqs_per_second(output)
def run(self) -> pd.DataFrame:
scores = {}
num_benches = len(self.config.benches)
for i, bench in enumerate(self.config.benches):
print(15 * "=", f"Bench {i + 1} of {num_benches}", 15 * "=")
bench.show()
bench_scores = defaultdict(defaultdict)
for framework in self.frameworks:
directory = join(self.config.frameworks_dir, framework.dirname)
if not exists(directory):
continue
print()
print(5 * "-", framework.name, 5 * "-")
print()
for test in self.tests:
print(f"Starting test: {test.name}")
print()
test_scores = []
for r in range(self.config.rounds):
print("Round", r + 1, "of", self.config.rounds)
script_path = join(directory, test.filename)
score = self.benchmark(script_path, framework, bench)
test_scores.append(score)
score = max(test_scores)
print("Score:", score)
print()
bench_scores[test.name][framework.name] = score
scores[bench] = bench_scores
return self.as_df(scores)
@staticmethod
def as_df(scores: dict) -> pd.DataFrame:
records: List[dict] = []
for bench, bench_scores in scores.items():
for test, results in bench_scores.items():
for framework, score in results.items():
records.append({
"concurrency": bench.concurrency,
"test": test,
"framework": framework,
"score": score,
})
return pd.DataFrame(records)
def clean(self):
self._logger.info("Cleaning up…")
try:
shutil.rmtree(self.config.virtualenvs_dir)
except FileNotFoundError:
pass
| [
"florimond.manca@gmail.com"
] | florimond.manca@gmail.com |
94f753ed608d3400a8f995ccd162749b0d16f1bf | 9c894d56f153156b82bc4bbde2db09fb04ec58cf | /17/data/ExoDiBosonResonances/EDBRTreeMaker/test/QCDHT200to300ext.py | 484fd80610066792500efce901ede596cf93a04e | [] | no_license | gqlcms/run2_ntuple | 023bb97238980e3d4e7b8c112bc11e63658f1844 | 196c90facf042a64fddfef1e1c69681ccb9ab71c | refs/heads/master | 2020-08-04T09:01:43.466814 | 2019-10-01T11:40:36 | 2019-10-01T11:40:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'QCDHT200to300ext'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName='Analysis'
config.JobType.sendExternalFolder=True# = 'Analysis'
config.JobType.inputFiles = ['Summer16_23Sep2016V3_MC_L1FastJet_AK4PFchs.txt','Summer16_23Sep2016V3_MC_L2Relative_AK4PFchs.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK4PFchs.txt','Summer16_23Sep2016V3_MC_L1FastJet_AK8PFchs.txt','Summer16_23Sep2016V3_MC_L2Relative_AK8PFchs.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK8PFchs.txt','Summer16_23Sep2016V3_MC_L1FastJet_AK8PFPuppi.txt','Summer16_23Sep2016V3_MC_L2Relative_AK8PFPuppi.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK8PFPuppi.txt','Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi.txt','Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/QCD_HT200to300_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
# This string is used to construct the output dataset name
name='WWW'
steam_dir='chench'
config.Data.outLFNDirBase='/store/user/chench/'#='/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/'+steam_dir+'/'+name+'/'
config.Data.publication = False
config.Data.outputDatasetTag = 'QCDHT200to300ext'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"c.chen@cern.ch"
] | c.chen@cern.ch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.