blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e713e3b650aa8bcd75898ce8b9478770267dcfe3 | af9d9043a83a751f00f7b805533d87ccce330d21 | /Portfolio/Datawiz/test_task/test_task/settings.py | 934b25bd3018fa1bfebe472ca3678cf757ebf827 | [] | no_license | HeCToR74/Python | e664b79593a92daa7d39d402f789812dfc59c19f | f448ec0453818d55c5c9d30aaa4f19e1d7ca5867 | refs/heads/master | 2023-03-08T13:44:19.961694 | 2022-07-03T19:23:25 | 2022-07-03T19:23:25 | 182,556,680 | 1 | 1 | null | 2023-02-28T15:30:01 | 2019-04-21T16:26:48 | HTML | UTF-8 | Python | false | false | 3,363 | py | """
Django settings for test_task project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_==i0*m4vvexpe%vrgp39%!ay9*^)*8)giv=ur671jbma*21o@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_task.urls'
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_task.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
] | [
"noreply@github.com"
] | HeCToR74.noreply@github.com |
f6256f707e0944be2fc80c00428ef105ef18d54a | 44eb88d1e3d9aa4bab66eaa9e096082500906cf5 | /075.py | 8620cadcc6d21386ae6ae2f3e010ef04a049611c | [
"MIT"
] | permissive | souza-joao/cursoemvideo-python3 | 82f00d6ca7f9bfc201af796ebfc76eed500bc39d | b9f747d1083c1c779630b25b321b291d76611901 | refs/heads/main | 2023-07-16T09:28:58.441664 | 2021-08-31T15:41:59 | 2021-08-31T15:41:59 | 376,969,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | num = (int(input('Digite um número: ')),
int(input('Digite um número: ')),
int(input('Digite um número: ')),
int(input('Digite um número: ')),)
print(f'O número 9 apareceu {num.count(9)} vez(es).')
if 3 in num:
print(f'O número 3 aparece na {num.index(3) + 1}ª posição.')
else:
print('Você não digitou o número 3.')
print('Os números pares foram ', end='')
for c in num:
if c % 2 == 0:
print(c, end=' ')
'''n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro número: '))
n3 = int(input('Digite outro número: '))
n4 = int(input('Digite mais um número: '))
tup = (n1, n2, n3, n4)
print(f'O número 9 apareceu {tup.count(9)} vezes.')
print('-' * 60)
if tup.count(3) > 0:
print(f'O número 3 está na posição {tup.index(3) + 1}.')
else:
print(f'O número 3 não foi digitado.')
print('-' * 60)
print('Os valores pares foram ', end='')
if tup[0] % 2 == 0:
print(f'{tup[0]} ', end='')
if tup[1] % 2 == 0:
print(f'{tup[1]} ', end='')
if tup[2] % 2 == 0:
print(f'{tup[2]} ', end='')
if tup[3] % 2 == 0:
print(f'{tup[3]}')''' | [
"85567361+souza-joao@users.noreply.github.com"
] | 85567361+souza-joao@users.noreply.github.com |
24bb0afa161eea35fad82d7423ec8767db87fdaf | a4cb0495dec84d433b00d275a63afcd3f5c296b8 | /ResNet/layers/IdentityBlock.py | 992b515223b88207f9b51268c65d2c3dd2395fb7 | [
"MIT"
] | permissive | panuthept/Models_Corpus | 370aa5054145ecb20312360e33217c7a743f327b | 6d9e91eb97829e73d88ecfc4754492f6324ef383 | refs/heads/master | 2023-05-25T10:09:40.600017 | 2020-01-11T14:17:44 | 2020-01-11T14:17:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | import tensorflow as tf
class IdentityBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, depth=2):
super().__init__()
if isinstance(filters, list):
assert len(filters) == depth, "Length if filter exceed the layer depth."
if isinstance(kernel_size, list):
assert len(kernel_size) == depth, "Length if kernel_size exceed the layer depth."
self.depth = depth
self.filters = filters if isinstance(filters, list) else [filters for _ in range(depth)]
self.kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size for _ in range(depth)]
self.conv2s = [tf.keras.layers.Conv2D(self.filters[i], self.kernel_size[i], padding="same") for i in range(depth)]
self.bns = [tf.keras.layers.BatchNormalization() for _ in range(depth)]
def call(self, input_tensor, training=False):
x = input_tensor
for i in range(len(self.conv2s) - 1):
x = self.conv2s[i](x)
x = self.bns[i](x, training=training)
x = tf.nn.relu(x)
x = self.conv2s[-1](x)
x = self.bns[-1](x, training=training)
x += input_tensor
x = tf.nn.relu(x)
return x
| [
"falcon_270394@hotmail.co.th"
] | falcon_270394@hotmail.co.th |
90feabc37881c6074fed4004e3e610421ad0bb4e | b76947d520fb1f867ed7165403cbcc47caabd5cb | /hivemind_powerball/hive/apps.py | 7c2876c4b0a631b48c502bb4c5060e5326ea5b64 | [] | no_license | peter-stratton/hivemind_powerball | abf3b0c21a3e92be45e6271d701ea1ce628960cc | f1e650bb26c23a0c42b05ac6826105bade0d8d3b | refs/heads/master | 2021-01-22T02:17:45.452114 | 2017-02-09T17:42:02 | 2017-02-09T17:42:02 | 81,042,442 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from django.apps import AppConfig
class HiveConfig(AppConfig):
"""Config object for the hive app"""
name = 'hive'
| [
"pstratton@jornaya.com"
] | pstratton@jornaya.com |
925028b08297779546c047873b5ba67c870ad692 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_55/59.py | 09da2bcfaa4c411daa5449e6b502ef93033a8f6c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,319 | py | #!/usr/bin/env python
import sys
filename=sys.argv[1]
inputfile=file(filename, 'r')
numcases=int(inputfile.readline().strip())
for case in range(1,numcases+1):
R, k, N = map(long, inputfile.readline().strip().split())
g = map(long, inputfile.readline().strip().split())
y = 0
first_ride = [None] * N
ride_groups = [None] * N
ride_seats = [None] * N
ride = 0
start = 0
while ride < R:
if first_ride[start] is not None:
break
ridestart = start
seats = 0
groups = 0
while seats + g[start] <= k and groups < N:
seats += g[start]
groups += 1
start += 1
if start >= N:
start = 0
if start == ridestart:
break
first_ride[ridestart] = ride
ride_groups[ridestart] = groups
ride_seats[ridestart] = seats
ride += 1
y += seats
if ride < R:
cyclelen = ride - first_ride[start]
if R - ride >= cyclelen:
cycles = (R - ride) / cyclelen
cycle_euros = 0
cycle_start = start
while True:
cycle_euros += ride_seats[start]
start = (start + ride_groups[start]) % N
ride += 1
if start == cycle_start:
break
y += cycle_euros * cycles
ride += (cycles - 1) * cyclelen
while ride < R:
y += ride_seats[start]
start = (start + ride_groups[start]) % N
ride += 1
print "Case #%d: %d" % (case, y)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
856fd644e4807971a8bf3571c1cb7ce181e1716d | 72ae28eaff4a6f8239392409069782bf9a827a68 | /setup.py | 4ff5f8dc5cac880a51e4e283a489bfc630c258c1 | [] | no_license | GargiVyas31/Elena | d8030547475ca90a1cb745b5c762b51af12bfe92 | 98453b42219d16826a6461135bfdd7bbc4cf91ec | refs/heads/master | 2023-01-24T00:45:16.900630 | 2020-11-24T22:50:03 | 2020-11-24T22:50:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | import os
from setuptools import setup, find_packages
import src
pkg_scripts = []
setup(
name=src.__name__,
long_description='ELena',
version='1.0.0',
platforms=['Any'],
scripts=pkg_scripts,
provides=[],
namespace_packages=[],
packages=find_packages(),
include_package_data=True,
package_data={'src' : []},
zip_safe=False,
) | [
"sayantan@hp.com"
] | sayantan@hp.com |
6f990446cead9603dedfef48a68c2ec8c67a8073 | e970c48f83f74e6c28d51b556c47ae07f18f22fa | /code/sampling_methods.py | d0af413c62743488539e35d91194acfa1ad2c58d | [] | no_license | Loopdiloop/fys-stk4155-project2 | ddd812c3eaea525fa6b959d39391f1ece7877332 | 9efb4fb92581faa64fda188a1a600a47fc4bb594 | refs/heads/master | 2020-09-06T00:35:33.097309 | 2019-11-14T00:13:16 | 2019-11-14T00:13:16 | 220,260,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,147 | py | import numpy as np
import statistical_functions as statistics
from fit_matrix import fit
from functions import franke_function
import copy
class sampling():
def __init__(self, inst):
self.inst = inst
def kfold_cross_validation(self, k, method, deg=5, lambd=1):
"""Method that implements the k-fold cross-validation algorithm. It takes
as input the method we want to use. if "least squares" an ordinary OLS will be evaulated.
if "ridge" then the ridge method will be used, and respectively the same for "lasso"."""
inst = self.inst
lowest_mse = 1e5
self.mse = []
self.R2 = []
self.mse_train = []
self.R2_train = []
self.bias = []
self.variance = []
design_matrix = fit(inst)
whole_DM = design_matrix.create_design_matrix(deg = deg).copy() #design matrix for the whole dataset
whole_z = inst.z_1d.copy() #save the whole output
for i in range(self.inst.k):
#pick the i-th set as test
inst.sort_training_test_kfold(i)
inst.fill_array_test_training()
design_matrix.create_design_matrix(deg = deg) #create design matrix for the training set, and evaluate
if method == "least squares":
z_train, beta_train = design_matrix.fit_design_matrix_numpy()
elif method == "ridge":
z_train, beta_train = design_matrix.fit_design_matrix_ridge(lambd)
elif method == "lasso":
z_train, beta_train = design_matrix.fit_design_matrix_lasso(lambd)
else:
sys.exit("Wrongly designated method: ", method, " not found")
#Find out which values get predicted by the training set
X_test = design_matrix.create_design_matrix(x=inst.test_x_1d, y=inst.test_y_1d, z=inst.test_z_1d, N=inst.N_testing, deg=deg)
z_pred = design_matrix.test_design_matrix(beta_train, X=X_test)
#Take the real values from the dataset for comparison
z_test = inst.test_z_1d
#Calculate the prediction for the whole dataset
whole_z_pred = design_matrix.test_design_matrix(beta_train, X=whole_DM)
# Statistically evaluate the training set with test and predicted solution.
mse, calc_r2 = statistics.calc_statistics(z_test, z_pred)
# Statistically evaluate the training set with itself
mse_train, calc_r2_train = statistics.calc_statistics(inst.z_1d, z_train)
# Get the values for the bias and the variance
bias, variance = statistics.calc_bias_variance(z_test, z_pred)
self.mse.append(mse)
self.R2.append(calc_r2)
self.mse_train.append(mse_train)
self.R2_train.append(calc_r2_train)
self.bias.append(bias)
self.variance.append(variance)
# If needed/wanted:
if abs(mse) < lowest_mse:
lowest_mse = abs(mse)
self.best_predicting_beta = beta_train
| [
"mamoll@uio.no"
] | mamoll@uio.no |
eeb5073afecbaf0f35097a0d4970f139fc0282fd | 014e9a6f3d48ffa7b9ee759904d2e33284a6f4d6 | /api/caoloapi/model/auth.py | c73941f6992e52e8c9728cbae96791221e95e3a7 | [
"MIT"
] | permissive | kissmikijr/caolo-backend | 33c0262239182b96d1215677c45065b4ef90455b | efec05bb793bd40951cb4e5ae4e930d972f63d36 | refs/heads/master | 2023-09-04T01:09:50.068148 | 2021-10-18T22:00:59 | 2021-10-18T22:06:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | from datetime import datetime as dt, timedelta
from passlib.context import CryptContext
from jose import jwt
SECRET_KEY = "fe9fb923daa2a5c34a57b6da5d807a1e9cb48d4afee5c10095bab37bcf860059"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
PEPPER_RANGE = (128, 139, 3)
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def __concatpw(pw: str, salt: str, pepper):
return f"{pw}{salt}{pepper}"
def verifypw(plain, salt, pepper, hashed_pw):
pw = __concatpw(plain, salt, pepper)
return pwd_context.verify(pw, hashed_pw)
def hashpw(pw: str, salt: str, pepper):
return pwd_context.hash(__concatpw(pw, salt, pepper))
def create_access_token(data: dict):
payload = data.copy()
payload.update({"exp": dt.utcnow() + timedelta(minutes=15)})
return jwt.encode(payload, SECRET_KEY, algorithm=ALGORITHM)
def decode_access_token(token: str):
"""
raises jose.JWTError or AssertionError on invalid token
"""
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
assert "sub" in payload
return payload
| [
"littlesnorrboy@gmail.com"
] | littlesnorrboy@gmail.com |
3e1738529ae55e62ae6843901eca2eb0d436e07a | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractIntheinkpotfictionWordpressCom.py | 5a22827f09f4623da612321d5379b4873ab2b614 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 576 | py |
def extractIntheinkpotfictionWordpressCom(item):
'''
Parser for 'intheinkpotfiction.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"something@fake-url.com"
] | something@fake-url.com |
c510739d0552b39eefb1a37c3464536b93358cc2 | e0c26c8e9ffc94dd5b1f1757230fd8779dc244bc | /cell/synapse/__init__.py | 21aed928dd6e1bfc544e87bc35ed7278a595e4e9 | [] | no_license | mpelko/neurovivo | 6804e381a78a03164785bf6fa24f84f123a1201f | 10923140da270693988313b36ad6d0ad42bf529a | refs/heads/master | 2016-09-09T18:55:59.331807 | 2014-03-25T18:01:38 | 2014-03-25T18:01:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | from exp2syn_AMPA_synapse import *
from exp2syn_GABA_synapse import * | [
"mpelko@gmail.com"
] | mpelko@gmail.com |
15487621d75896236eb3ebe106a4f8748a6a389b | e43b78db4ff598944e58e593610f537f3833d79c | /py-faster-rcnn/lib/roi_data_layer/roidb.py | 93f713e1f127d432736a654ce6fa292eef3b6c67 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | ZJUZQ/Net_caffe | 577e9b3e80a391d772a21c27639465d539fceb1f | bed3c7384a259339c5a0fb2ea34fa0cdd32ddd29 | refs/heads/master | 2021-09-08T12:19:37.039970 | 2018-03-09T14:44:24 | 2018-03-09T14:44:24 | 114,853,721 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,356 | py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Transform a roidb into a trainable roidb by adding a bunch of metadata."""
import numpy as np
from fast_rcnn.config import cfg
from fast_rcnn.bbox_transform import bbox_transform
from utils.cython_bbox import bbox_overlaps
import PIL
def prepare_roidb(imdb):
"""Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function precomputes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
sizes = [PIL.Image.open(imdb.image_path_at(i)).size
for i in xrange(imdb.num_images)]
roidb = imdb.roidb
# roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
for i in xrange(len(imdb.image_index)):
roidb[i]['image'] = imdb.image_path_at(i)
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
# need gt_overlaps as a dense array for argmax
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes ## gt class that had the max overlap (columns)
roidb[i]['max_overlaps'] = max_overlaps ## max overlap with gt over classes (columns)
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
assert len(roidb) > 0
assert 'max_classes' in roidb[0], 'Did you call prepare_roidb first?'
num_images = len(roidb)
# Infer number of classes from the number of columns in gt_overlaps
num_classes = roidb[0]['gt_overlaps'].shape[1]
for im_i in xrange(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = \
_compute_targets(rois, overlaps=max_overlaps, labels=max_classes) # Compute bounding-box regression targets for an image
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED: ## RPN can only use precomputed normalization because there are no fixed statistics to compute a priori
# Use fixed / precomputed "means" and "stds" instead of empirical values
means = np.tile(
np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS), (num_classes, 1)) # shape = (num_classes, 4)
stds = np.tile(
np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS), (num_classes, 1)) # shape = (num_classes, 4)
else:
# Compute values needed for means and stds
# var(x) = E(x^2) - E(x)^2
class_counts = np.zeros((num_classes, 1)) + cfg.EPS
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in xrange(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
if cls_inds.size > 0:
class_counts[cls] += cls_inds.size
sums[cls, :] += targets[cls_inds, 1:].sum(axis=0)
squared_sums[cls, :] += \
(targets[cls_inds, 1:] ** 2).sum(axis=0)
means = sums / class_counts
stds = np.sqrt(squared_sums / class_counts - means ** 2)
print 'bbox target means:'
print means
print means[1:, :].mean(axis=0) # ignore bg class
print 'bbox target stdevs:'
print stds
print stds[1:, :].mean(axis=0) # ignore bg class
# Normalize targets
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS:
print "Normalizing targets"
for im_i in xrange(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_inds, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_inds, 1:] /= stds[cls, :]
else:
print "NOT normalizing targets"
# These values will be needed for making predictions
# (the predicts will need to be unnormalized and uncentered)
return means.ravel(), stds.ravel() ## Return a contiguous flattened array
def _compute_targets(rois, overlaps, labels):
"""Compute bounding-box regression targets for an image."""
"""
overlaps: max_overlaps of rois
labels: max_classes of rois
return:
[[cls, dx, dy, dw, dh]
...
]
"""
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
if len(gt_inds) == 0:
# Fail if the image has no ground-truth ROIs
return np.zeros((rois.shape[0], 5), dtype=np.float32)
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] ## e.g., 0.5
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(
np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) # compute [dx, dy, dw, dh]
return targets
| [
"zhouqiang@zju.edu.cn"
] | zhouqiang@zju.edu.cn |
f0612efb0fad45a35627608742c836a5c0fd909d | ce7c86b8c9a193d421d7e67b794d81f65ef92c1c | /Day10/day_10.py | 4e9b016b695e356eaf9c9bea80808fcfd277dca2 | [] | no_license | Panagiotis-Zachos/advent-of-code-2020 | a00f9728c4e332940b23f0c3ab904183914971b5 | b305b900eed2d96a1997ee58450d9a11aa3137bf | refs/heads/main | 2023-02-11T04:11:41.908362 | 2021-01-02T18:46:11 | 2021-01-02T18:46:11 | 324,453,531 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | adapters = list(map(int, open('input.txt').read().split('\n')))
adapters.sort()
# Part 1
dif1 = 1
dif3 = 1
for i in range(1, len(adapters)):
dif = adapters[i] - adapters[i-1]
if dif == 1: dif1 += 1
elif dif == 3: dif3 += 1
print(dif3*dif1)
# Part 2.1
adapters.append(adapters[-1] + 3)
adapters.insert(0, 0)
paths = [0 for i in range(len(adapters))]
paths[0] = 1
for i in range(1, len(adapters)):
paths[i] = paths[i - 1]
if i > 1 and adapters[i] - adapters[i - 2] <= 3:
paths[i] += paths[i - 2]
if i > 2 and adapters[i] - adapters[ i - 3] <= 3:
paths[i] += paths[i - 3]
print(paths[-1])
# Part 2.2
curr = 0
p1 = 1
p2 = 0
p3 = 0
for i in range(1, len(adapters)):
curr = p1
if i > 1 and adapters[i] - adapters[i - 2] <= 3:
curr += p2
if i > 2 and adapters[i] - adapters[ i - 3] <= 3:
curr += p3
p3, p2, p1 = (p2, p1, curr)
print(curr)
| [
"noreply@github.com"
] | Panagiotis-Zachos.noreply@github.com |
6d8c2fe636110f1a7126213981d239f0d387291c | 185820587b6a0663f8aac706384fa7e00b236bb9 | /samples/python/clusqmgr_suspend.py | af9c8f5fe63ae203a40c03995e0ed6d08bba2407 | [
"MIT"
] | permissive | hlkiltas/mqweb | 9c70252d6af442e45de3a18726b69dcda65db94a | c651014765018d32b5d49054ff8c36618d50093d | refs/heads/master | 2020-03-27T01:51:56.858586 | 2017-09-22T12:11:20 | 2017-09-22T12:11:20 | 145,749,223 | 2 | 0 | MIT | 2018-08-22T18:46:21 | 2018-08-22T18:46:20 | null | UTF-8 | Python | false | false | 878 | py | '''
Supends a queuemanager in a cluster
MQWeb runs on localhost and is listening on port 8081.
'''
import sys
import json
import httplib
import socket
if len(sys.argv) < 3 :
print 'Please pass me the name of a queuemanager and a clustername as argument'
sys.exit(1)
url = "/api/clusqmgr/suspend/" + sys.argv[1] + '/' + sys.argv[2];
try:
conn = httplib.HTTPConnection('localhost', 8081)
conn.request('GET', url)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print ('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code'])
)
else:
print "Suspend command succesfully send."
except httplib.HTTPException as e:
print ('An HTTP error occurred while suspending the queuemanager in the cluster: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
| [
"franky.braem@gmail.com"
] | franky.braem@gmail.com |
ad73927538d2a6b51e3e9da4eaa96818ced5e08a | f714db4463dd37fc33382364dc4b1963a9053e49 | /tests/sentry/event_manager/interfaces/test_frame.py | 22dd3b8b5756050429bafb0bd12c3db6daa422ae | [
"BUSL-1.1",
"Apache-2.0"
] | permissive | macher91/sentry | 92171c2ad23564bf52627fcd711855685b138cbd | dd94d574403c95eaea6d4ccf93526577f3d9261b | refs/heads/master | 2021-07-07T08:23:53.339912 | 2020-07-21T08:03:55 | 2020-07-21T08:03:55 | 140,079,930 | 0 | 0 | BSD-3-Clause | 2020-05-13T11:28:35 | 2018-07-07T11:50:48 | Python | UTF-8 | Python | false | false | 1,366 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import pytest
from sentry import eventstore
from sentry.event_manager import EventManager
@pytest.fixture
def make_frames_snapshot(insta_snapshot):
def inner(data):
mgr = EventManager(data={"stacktrace": {"frames": [data]}})
mgr.normalize()
evt = eventstore.create_event(data=mgr.get_data())
frame = evt.interfaces["stacktrace"].frames[0]
insta_snapshot({"errors": evt.data.get("errors"), "to_json": frame.to_json()})
return inner
@pytest.mark.parametrize(
"input",
[
{"filename": 1},
{"filename": "foo", "abs_path": 1},
{"function": 1},
{"module": 1},
{"function": "?"},
],
)
def test_bad_input(make_frames_snapshot, input):
make_frames_snapshot(input)
@pytest.mark.parametrize(
"x", [float("inf"), float("-inf"), float("nan")], ids=["inf", "neginf", "nan"]
)
def test_context_with_nan(make_frames_snapshot, x):
make_frames_snapshot({"filename": "x", "vars": {"x": x}})
def test_address_normalization(make_frames_snapshot):
make_frames_snapshot(
{
"lineno": 1,
"filename": "blah.c",
"function": "main",
"instruction_addr": 123456,
"symbol_addr": "123450",
"image_addr": "0x0",
}
)
| [
"noreply@github.com"
] | macher91.noreply@github.com |
44891d7b9820b9884945e121c98a187821ebb72e | ef92b7c2ade5905ada8da7a55e7708f318017a9f | /test/__init__.py | 8c7f1088a0651c40bbc94c63dbc66ca8a0cad193 | [] | no_license | nolanbconaway/raspi-nyc-train-display | 1bba781b6420c1e96a29279caeb14b6ade04d918 | 4865629fb97788d649dfd75a27622b5cfa02638b | refs/heads/master | 2020-07-06T13:52:19.100199 | 2020-04-06T02:06:29 | 2020-04-06T02:06:29 | 203,038,726 | 0 | 0 | null | 2020-04-06T02:06:30 | 2019-08-18T17:56:57 | Python | UTF-8 | Python | false | false | 309 | py | """Test package."""
def epoch_to_datetime(epoch):
"""Convert epoch time into a datetime in NYC timezone."""
import datetime
import pytz
return (
pytz.timezone("UTC")
.localize(datetime.datetime.utcfromtimestamp(epoch))
.astimezone(pytz.timezone("US/Eastern"))
)
| [
"nolanbconaway@gmail.com"
] | nolanbconaway@gmail.com |
79ec05a20274e044cf8660379e72fb5c8f2319f4 | 405bc4aee79e340a3b41f0f8e3f143118caa7cf6 | /hcipy/atmosphere/standard_atmosphere.py | 1442788a1ad31dcd2827ffc4c897f06293d2af69 | [
"MIT"
] | permissive | rahulbhadani/hcipy | c6c2fd487974ef7534a68f2a5036dc418609fc0a | b52726cb9502b5225ddff9d7b1ff417f2350cda8 | refs/heads/master | 2020-07-06T07:23:43.688016 | 2019-08-02T12:00:28 | 2019-08-02T12:00:28 | 202,938,491 | 0 | 0 | MIT | 2019-08-17T22:58:06 | 2019-08-17T22:58:05 | null | UTF-8 | Python | false | false | 539 | py | from .infinite_atmospheric_layer import InfiniteAtmosphericLayer
from .atmospheric_model import MultiLayerAtmosphere
import numpy as np
def make_standard_atmospheric_layers(input_grid, L0=10):
heights = np.array([500, 1000, 2000, 4000, 8000, 16000])
velocities = np.array([10, 10, 10, 10, 10, 10])
Cn_squared = np.array([0.2283, 0.0883, 0.0666, 0.1458, 0.3350, 0.1350]) * 1e-12
layers = []
for h, v, cn in zip(heights, velocities, Cn_squared):
layers.append(InfiniteAtmosphericLayer(input_grid, cn, L0, v, h, 2))
return layers | [
"por@strw.leidenuniv.nl"
] | por@strw.leidenuniv.nl |
716beeecaa9af980ba1af4c2e9ae8ada8085d998 | 86fb27e01bb1f2d203252aad4e375fba853f30ee | /KNN.py | 5ad9ccbdbb3e3cd1351e56d405d81e1cc96aefcc | [] | no_license | Bhuvaneshwari-Anand/Kirana | e0e8a1c49732ae5e19bc2459db7efd423ce8b5ff | d3651b7e6c0e8390c1659e6d404f506e5201c797 | refs/heads/main | 2023-01-12T21:38:05.905860 | 2020-11-19T15:01:08 | 2020-11-19T15:01:08 | 314,262,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | import math
import pandas
import numpy as np
#the dimensions of the item as input
x = float(input())
y = float(input())
#reading the data from the csv file
data = pandas.read_csv("F:\Contest\Infra Mind\Excel and CSV\DatasetFinal.csv")
#store the dataset items in separate arrays for each column
iden = data["ID"].values
item = data["Item"].values
height = data["Height"].values
width = data["Width"].values
#the array data are now stored in separate lists
pid = np.array(iden).tolist()
pitem = np.array(item).tolist()
h = np.array(height).tolist()
w = np.array(width).tolist()
# n is the number of items present in the dataset
n = len(pitem)
#declare a list to store the error range
RangeErr = []
#calculate the error range for each item with the given input and store in the list
for i in range(0,n):
temp = ((h[i] - x)**2) + ((w[i] - y)**2)
temp1 = math.sqrt(temp)
RangeErr.append(temp1)
#postion of the output
position = RangeErr.index(min(RangeErr))
#print the outputs
print("The item is ",end="")
print(pitem[position])
print("The product ID is ",end="")
print(pid[position])
| [
"bhuvaneshwari119anand@gmail.com"
] | bhuvaneshwari119anand@gmail.com |
3a46b739fdd3269370d45b82b4103d66bc0a5353 | 1718a0e60b3df6bb23ea50e57bc2a39e268c0d53 | /store_app/views.py | a452b60bee841fcbf43da93e842bf057b9cac01a | [] | no_license | ckizer86/final | 551be3fc3e0e6021a5103acc645238f0d5ddc905 | c6fd0fd8ffe46c23d9fe6f6b7138cce44b32fa1c | refs/heads/main | 2023-05-28T18:50:48.939996 | 2021-06-08T23:36:27 | 2021-06-08T23:36:27 | 374,507,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,172 | py | from django.db.models import fields
from django.shortcuts import render, redirect
from django.http.response import JsonResponse, HttpResponse
from django.views.generic import FormView
from django.urls import reverse
from django.conf import settings
from django.http.response import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import stripe
from django.contrib import messages
import bcrypt
from time import gmtime, localtime, strftime
from datetime import date, datetime
from .models import *
import ast
# payments/views.py
@csrf_exempt
def stripe_webhook(request):
stripe.api_key = settings.STRIPE_SECRET_KEY
endpoint_secret = settings.STRIPE_ENDPOINT_SECRET
payload = request.body
sig_header = request.META['HTTP_STRIPE_SIGNATURE']
event = None
try:
event = stripe.Webhook.construct_event(
payload, sig_header, endpoint_secret
)
except ValueError as e:
# Invalid payload
return HttpResponse(status=400)
except stripe.error.SignatureVerificationError as e:
# Invalid signature
return HttpResponse(status=400)
# Handle the checkout.session.completed event
if event['type'] == 'checkout.session.completed':
print("Payment was successful.")
# TODO: run some custom code here
return HttpResponse(status=200)
def SuccessView(request):
return render(request, "success.html")
def CancelledView(request):
return render(request, "cancelled.html")
@csrf_exempt
def create_checkout_session(request):
if request.method == 'GET':
domain_url = 'http://localhost:8000/'
stripe.api_key = settings.STRIPE_SECRET_KEY
try:
# Create new Checkout Session for the order
# Other optional params include:
# [billing_address_collection] - to display billing address details on the page
# [customer] - if you have an existing Stripe Customer ID
# [payment_intent_data] - capture the payment later
# [customer_email] - prefill the email input in the form
# For full details see https://stripe.com/docs/api/checkout/sessions/create
# ?session_id={CHECKOUT_SESSION_ID} means the redirect will have the session ID set as a query param
checkout_session = stripe.checkout.Session.create(
client_reference_id=request.user.id if request.user.is_authenticated else None,
success_url=domain_url + 'success?session_id={CHECKOUT_SESSION_ID}',
cancel_url=domain_url + 'cancelled/',
payment_method_types=['card'],
mode='payment',
line_items=[
{
'name': 'T-shirt',
'quantity': 1,
'currency': 'usd',
'amount': '2000',
}
]
)
return JsonResponse({'sessionId': checkout_session['id']})
except Exception as e:
return JsonResponse({'error': str(e)})
# new
@csrf_exempt
def stripe_config(request):
if request.method == 'GET':
stripe_config = {'publicKey': settings.STRIPE_PUBLISHABLE_KEY}
return JsonResponse(stripe_config, safe=False)
# Create your views here.
def index(request):
context={
"all_products": Product.objects.all(),
"all_categories": Category.objects.all(),
"all_stores": Store.objects.all(),
}
return render(request, "index.html", context)
def login_page(request):
if "user_id" in request.session:
return redirect ('/dashboard')
return render(request, "login.html")
def login(request):
if request.method == "POST":
errors = User.objects.loginvalidation(request.POST)
if errors:
for error in errors.values():
messages.error(request,error)
return redirect('/login')
email = request.POST['email']
logged_user = User.objects.filter(email=email)
logged_user = logged_user[0]
if bcrypt.checkpw(request.POST['pw'].encode(), logged_user.password.encode()):
request.session["user_id"] = logged_user.id
request.session["username"] = f"{logged_user.first_name} {logged_user.last_name}"
return redirect('/dashboard')
else:
messages.error(request, "Invalid password")
return redirect('/login')
return redirect('/login')
def register_page(request):
return render(request, "register.html")
def register(request):
if request.method == "POST":
errors = User.objects.registervalidation(request.POST)
if errors:
for error in errors.values():
messages.error(request,error)
return redirect('/register')
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
password = bcrypt.hashpw(request.POST["pw"].encode(), bcrypt.gensalt()).decode()
dob = request.POST['dob']
address_1 = request.POST['address1']
address_2 = request.POST['address2']
city = request.POST['city']
state = request.POST['state']
zip = request.POST['zip']
user = User.objects.create(first_name=first_name, last_name=last_name, email=email, password=password, dob=dob, address_1=address_1, address_2=address_2, city=city, state=state, zip=zip)
request.session["user_id"] = user.id
request.session["username"] = f"{user.first_name} {user.last_name}"
return redirect('/dashboard')
return redirect('/register')
def category(request, id):
cat = Category.objects.get(id=id)
context={
"catproducts": cat.product.all(),
"all_categories": Category.objects.all(),
"category": cat,
}
return render(request, "category.html", context)
def product(request, id):
productid = id
productinfo = Product.objects.get(id=productid)
if "user_id" not in request.session:
context = {
"product": productinfo,
"all_categories": Category.objects.all(),
}
return render(request, "product.html", context)
userid = request.session["user_id"]
user = User.objects.get(id=userid)
context = {
"product": productinfo,
"all_categories": Category.objects.all(),
"likes": productinfo.likes.filter(id=userid),
"user": user,
}
return render(request, "product.html", context)
def addcat(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
if request.method == "POST":
errors = User.objects.catvalidation(request.POST)
if errors:
for error in errors.values():
messages.error(request,error)
return redirect('/admin/add_product')
name = request.POST['name']
Category.objects.create(name=name)
return redirect('/admin/add_product')
return redirect('/admin')
def addcart(request):
if "user_id" not in request.session:
return redirect ('/login')
if request.method == "POST":
userid = request.session["user_id"]
pid = request.POST['pid']
quantity = int(request.POST['quantity'])
user = User.objects.get(id=userid)
product = Product.objects.get(id=pid)
product.stock = product.stock - quantity
product.save()
name = product.name
amount = product.amount
pic = product.pic
total = user.total
for count in range(0, quantity):
count += 1
cart = Cart.objects.create(user=user, pid=pid, pic=pic, name=name, amount=amount)
user.total = user.total + product.amount
user.save()
return redirect('/cart')
def removecart(request,id):
if "user_id" not in request.session:
return redirect ('/login')
pid = id
userid = request.session["user_id"]
user = User.objects.get(id=userid)
cart = user.usecart.all()
product = Product.objects.get(id=pid)
for item in cart:
if item.pid == pid:
rid = item.id
removeitem = Cart.objects.get(id=rid)
product.stock += 1
product.save()
user.total = user.total - product.amount
user.save()
removeitem.delete()
return redirect('/cart')
return redirect('/cart')
def cart(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
subtotal = user.total
tax = float(subtotal * .0825)
shipping = float(5.00)
total = float(subtotal + tax + shipping)
context = {
"all_categories": Category.objects.all(),
"cart_products": user.usecart.all(),
"user": user,
"subtotal": subtotal,
"shipping": shipping,
"tax": tax,
"total": total,
}
return render(request, "cart.html", context)
def likeditems(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
context = {
"liked_products": user.userlike.all(),
"all_categories": Category.objects.all(),
}
return render(request, "like.html", context)
def likeitem(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if request.method == "POST":
id = request.POST['postid']
product = Product.objects.get(id=id)
product.likes.add(user)
return redirect(f'/product/{id}')
return redirect('/')
def unlikeitem(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if request.method == "POST":
id = request.POST['postid']
product = Product.objects.get(id=id)
product.likes.remove(user)
return redirect(f'/product/{id}')
return redirect('/')
def dashboard(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level == 3:
return redirect('/admin')
if "user_id" not in request.session:
return redirect ('/login')
return render(request, "dashboard.html")
def accountinfo(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
month = '{:02d}'.format(user.dob.month)
day = '{:02d}'.format(user.dob.day)
context = {
"user": user,
"month": month,
"day": day,
}
return render(request, "accountinfo.html", context)
def accountupdate(request):
if request.method == "POST":
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
password = bcrypt.hashpw(request.POST["new_pw"].encode(), bcrypt.gensalt()).decode()
dob = request.POST['dob']
address1 = request.POST['address1']
address2 = request.POST['address2']
city = request.POST['city']
state = request.POST['state']
zip = request.POST['zip']
userid = request.session["user_id"]
user = User.objects.get(id=userid)
user.first_name = first_name
user.last_name = last_name
user.email = email
user.password = password
user.dob = dob
user.address_1 = address1
user.address_2 = address2
user.city = city
user.state = state
user.zip = zip
user.save()
return redirect('/dashboard/account')
return redirect('/')
def recentorders(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
userorders = user.userorders.all()
context={
"userorders": userorders,
}
return render(request, "recentorders.html", context)
def submitorder(request):
if "user_id" not in request.session:
return redirect ('/login')
if request.method == "POST":
userid = request.session["user_id"]
user = User.objects.get(id=userid)
subtotal = ast.literal_eval(request.POST['subtotal'])
tax = ast.literal_eval(request.POST['tax'])
shipping = ast.literal_eval(request.POST['shipping'])
usercart = user.usecart.all()
productlist = {"product":[]}
total = float(subtotal + tax + shipping)
for product in usercart:
rid = product.id
productid = Cart.objects.get(id=rid)
pid = productid.pid
orderproduct = Product.objects.get(id=pid)
pamount = str("{:.2f}".format(orderproduct.amount))
prodid = str(orderproduct.id)
productlist["product"].append('Product ID: ' + prodid + ' - ' + orderproduct.name + " : " + pamount)
destroyitem = Cart.objects.get(id=rid)
destroyitem.delete()
Order.objects.create(product=productlist, user=user, subtotal=subtotal, tax=tax, total=total, shipping=shipping)
user.total = 0
user.save()
return redirect('/dashboard')
return redirect('/')
def vieworder(request, id):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
for order in user.userorders.all():
if order.id == id:
order = Order.objects.get(id=id)
product_dict = ast.literal_eval(order.product)
context = {
"order":order,
"productlist": product_dict,
}
return render(request, "vieworder.html", context)
return redirect('/dashboard')
def admindash(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
return render(request, "admindashboard.html")
def adminneworders(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context ={
"orders":Order.objects.all(),
}
return render(request, "adminneworders.html", context)
def adminpastorders(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context ={
"orders":Order.objects.all(),
}
return render(request, "adminpastorders.html", context)
def adminvieworder(request, id):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
order = Order.objects.get(id=id)
product_dict = ast.literal_eval(order.product)
context = {
"order": order,
"productlist": product_dict,
}
return render(request, "adminvieworder.html", context)
def updatetracking(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
if request.method == "POST":
tracking = request.POST['tracking']
oid = request.POST['oid']
order = Order.objects.get(id=oid)
order.tracking = tracking
order.save()
return redirect(f'/admin/order/{oid}')
return redirect('/admin')
def products(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context = {
"all_products": Product.objects.all(),
"all_categories": Category.objects.all(),
}
return render(request, "products.html", context)
def addprod(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context = {
'all_categories': Category.objects.all(),
}
return render(request, "addproduct.html", context)
def addingprod(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
if request.method == "POST":
errors = Product.objects.createproduct(request.POST)
if errors:
for error in errors.values():
messages.error(request,error)
return redirect('/admin/add_product')
name = request.POST['name']
desc = request.POST['desc']
amount = request.POST['amt']
pic = request.POST['pic']
stock = request.POST['stock']
product = Product.objects.create(name=name, desc=desc, amount=amount, pic=pic, stock=stock)
categories = request.POST.getlist('categories')
for category in categories:
product.categories.add(category)
return redirect(f'/product/{product.id}')
return redirect('/admin/products')
def editprod(request, id):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
product = Product.objects.get(id=id)
thesecats = product.categories.all()
context = {
"product": product,
"excats": Category.objects.exclude(product=id),
"currentcats": thesecats,
}
return render(request, "editproduct.html", context)
def edittingprod(request):
if request.method == "POST":
name = request.POST['name']
desc = request.POST['desc']
amount = request.POST['amt']
pic = request.POST['pic']
stock = request.POST['stock']
id = request.POST['pid']
all_categories = Category.objects.all()
product = Product.objects.get(id=id)
for category in all_categories:
product.categories.remove(category)
categories = request.POST.getlist('categories')
for newcategory in categories:
product.categories.add(newcategory)
product.name = name
product.desc = desc
product.amount = amount
product.pic = pic
product.stock = stock
product.save()
return redirect(f'/admin/product/edit/{id}')
return redirect('/')
def storeinfo(request):
if "user_id" not in request.session:
return redirect ('/login')
userid = request.session["user_id"]
user = User.objects.get(id=userid)
if user.level != 3:
return redirect('/dashboard')
context = {
"store": Store.objects.all()
}
return render(request, "store.html", context)
def createstore(request):
if request.method == "POST":
name = request.POST['storename']
address1 = request.POST['address1']
address2 = request.POST['address2']
city = request.POST['city']
state = request.POST['state']
zip = request.POST['zip']
Store.objects.create(name=name, address_1=address1, address_2=address2, city=city, state=state, zip=zip)
return redirect('/admin/store')
return redirect('/')
def editstore(request):
if request.method == "POST":
name = request.POST['storename']
address1 = request.POST['address1']
address2 = request.POST['address2']
city = request.POST['city']
state = request.POST['state']
zip = request.POST['zip']
storeid = request.POST['storeid']
store = Store.objects.get(id=storeid)
store.name = name
store.address_1 = address1
store.address_2 = address2
store.city = city
store.state = state
store.zip = zip
store.save()
return redirect('/admin/store')
return redirect('/')
def logout(request):
request.session.flush()
return redirect('/') | [
"ckizer86@yahoo.com"
] | ckizer86@yahoo.com |
914431d0adb4b2c7286505f6d7737d82868ba8a8 | 699737150c95c92d2acbdf612e931ca6ccd8ba81 | /a116_buggy_image_ar_version_29-31.py | 86269fa5d5c1116396cd0cdcc4dd13e0c1e34d6e | [] | no_license | AntonioR0211/buggyimage | 25f25a3a8f03eb007231639ad6dfdc2295ad7cf2 | 899a1a843af4429acd5f5e686028c97448c79962 | refs/heads/main | 2023-01-13T20:50:45.582647 | 2020-11-14T07:08:01 | 2020-11-14T07:08:01 | 312,750,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # a116_buggy_image.py
import turtle as trtl
# instead of a descriptive name of the turtle such as painter,
# a less useful variable name x is used
Spider = trtl.Turtle()
# The pensize makes a circle which is the spider body.
Spider.pensize(40)
Spider.circle(20)
Spider.goto(20,20)
# This displays how much legs will be there.
Leg = 8
y1 = 70
y2 = y1
leglength = 360 / Leg
print("leglength=", leglength)
Spider.pensize(5)
up = 0
# Tests for how many legs are given and draws however many amounts it is.
while (up < Leg):
Spider.goto(0,20)
Spider.setheading(leglength*up - 45)
Spider.forward(y1)
print(Leg < y1)
up = up + 1
Spider.hideturtle()
# Eyes
Spider.penup()
Spider.goto(20,20)
Spider.pendown()
Spider.color("Red")
Spider.circle(0.1)
Spider.penup()
Spider.goto(-20,20)
Spider.pendown()
Spider.circle(0.1)
wn = trtl.Screen()
wn.mainloop()
| [
"noreply@github.com"
] | AntonioR0211.noreply@github.com |
c18140fcd9465540c29e5bf57783ab8ebbab2f3c | 2cc5ad64d812b94508ac9d817d33072e633231ca | /Mission 2/Contest 2.2/Zhou-Xinming-3-2d.py | 51351a85c5457f3e68127e8c5171bd1b30a5995f | [] | no_license | xinmingzh/CS1010X | 6943c06dcd53730e23b54e1fa3df12734cb7c39a | d3613899e82321d06544a7ad6e15d759aaebc988 | refs/heads/master | 2021-04-27T03:00:53.173961 | 2018-02-24T05:45:16 | 2018-02-24T05:45:16 | 120,771,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | #
# CS1010X --- Programming Methodology
#
# Mission 2 - 2D Contest
#
# Note that written answers are commented out to allow us to run your
# code easily while grading your problem set.
from runes import *
########
# Task #
########
# You may submit up to 3 entries. Please update your entry number below.
# Entry 3 of 3
# ============
# Write your function here. It should return a rune.
bb = overlay_frac(0, make_cross(make_cross(rcross_bb)), make_cross(rcross_bb))
show(overlay_frac(0, make_cross(turn_upside_down(corner_bb)), scale(0.8, bb)))
| [
"xinming.zh@gmail.com"
] | xinming.zh@gmail.com |
0a77844aeb0ff0c47c3b8ac2e106430f4a5deb65 | 9e3eef34a21a60610c4dc9f5c5fec809a8cb7706 | /lab2/ex1.py | d1c06a877a652f35276089666fd5e8bca69c1930 | [] | no_license | toma-ungureanu/FII-Python | f7df993a985f82dfed793f7549852abbb191bdee | 60a9b657db7aa09b7e9d42402f4689ca7df615ac | refs/heads/master | 2020-08-28T23:40:22.382485 | 2020-01-22T04:28:35 | 2020-01-22T04:28:35 | 217,855,104 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | def printFibonacciNumbers(n):
f1 = 0
f2 = 1
if n < 1:
return
for x in range(0, n):
print(f2, end=" ")
next = f1 + f2
f1 = f2
f2 = next
# Driven code
printFibonacciNumbers(int(input("Primele x numere Fibonacci: ")))
| [
"toma.florin.ungureanu@gmail.com"
] | toma.florin.ungureanu@gmail.com |
a1e88c8713d1e262f21e9358122e6e5749a99712 | 6472d60797abbb0d2814495943d2aaabb2baf7d9 | /lecture/lecture.py | 641025b9fad157e83284bbacdec0cc8df25da8ed | [] | no_license | maya-salcedo/webcam-motion-detector | 4e23336b36a56138becd7b7a636fac38d4c9a8d4 | 8746cd3af917b5e6af74cc7cfea6a69a5a715a43 | refs/heads/master | 2023-01-06T17:38:38.566764 | 2020-11-10T19:15:32 | 2020-11-10T19:15:32 | 311,289,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import cv2
img = cv2.imread("galaxy.jpg", 0) #if you want to read to the image as it is, pass 1,
# grayscale, 0 which is 1 band
# transparency capabilities, -1
print(type(img))
print(img)
print(img.shape)
print(img.ndim)
resized_image = cv2.resize(img, (int(img.shape[1]/2),int(img.shape[0]/2))) #the tuple parameter is the new dimension
cv2.imshow("Galaxy", resized_image)
cv2.imwrite("Galaxy_resized.jpg", resized_image) #method to save the new img
cv2.waitKey(0) # 0: when user click any button, the window will close
# 2000: 2 seconds
cv2.destroyAllWindows() # method to close the window
| [
"maya.salcedo07@gmail.com"
] | maya.salcedo07@gmail.com |
f4f46508d1a0f02512ff3ef04f883f5f7004be63 | 1bc2a635a93b5bc84606edf9ac2226851cac9e6d | /tests/unit/test_business.py | 99dba73500a5ba0dccb4d31c5d763654cfe9ff9d | [
"MIT"
] | permissive | coolkat64/rolling | 819149cbb1e11a455b93a030477f9da91e2f93e4 | 4c3ee2401128e993a52ac9b52cdbd32e17728129 | refs/heads/master | 2022-11-29T00:35:14.058665 | 2020-07-31T20:37:15 | 2020-07-31T20:37:15 | 285,312,272 | 0 | 0 | MIT | 2020-08-05T14:25:48 | 2020-08-05T14:25:47 | null | UTF-8 | Python | false | false | 37,466 | py | # coding: utf-8
import typing
from aiohttp import ClientResponse
from aiohttp.test_utils import TestClient
import pytest
from rolling.kernel import Kernel
from rolling.model.character import CharacterModel
from rolling.server.controller.business import ALL_OF_THEM
from rolling.server.controller.business import ONE_OF_THEM
from rolling.server.document.business import OfferDocument
from rolling.server.document.business import OfferItemDocument
from rolling.server.document.business import OfferItemPosition
from rolling.server.document.business import OfferOperand
from rolling.server.document.business import OfferStatus
from rolling.server.document.universe import UniverseStateDocument
from tests.fixtures import create_stuff
from tests.fixtures import description_serializer
EXPECTED_PLASTIC_BOTTLE_NAME = "Plastic bottle (1)"
EXPECTED_PLASTIC_BOTTLE_NAME_ = "(!) Plastic bottle (1)"
def _add_items(kernel: Kernel, offer_id: int) -> None:
kernel.server_db_session.add(
OfferItemDocument(
offer_id=offer_id,
position=OfferItemPosition.REQUEST.value,
resource_id="RED_WINE",
quantity=1.5,
)
)
kernel.server_db_session.add(
OfferItemDocument(
offer_id=offer_id,
position=OfferItemPosition.REQUEST.value,
stuff_id="STONE_HAXE",
quantity=1,
)
)
kernel.server_db_session.add(
OfferItemDocument(
offer_id=offer_id,
position=OfferItemPosition.OFFER.value,
resource_id="WOOD",
quantity=0.5,
)
)
kernel.server_db_session.add(
OfferItemDocument(
offer_id=offer_id,
position=OfferItemPosition.OFFER.value,
stuff_id="LEATHER_JACKET",
quantity=1,
)
)
@pytest.fixture
def xena_permanent_or_offer(worldmapc_xena_model: CharacterModel, worldmapc_kernel: Kernel):
offer_doc = OfferDocument(
character_id=worldmapc_xena_model.id,
title="OfferTitle",
request_operand=OfferOperand.OR.value,
offer_operand=OfferOperand.OR.value,
permanent=True,
status=OfferStatus.OPEN.value,
)
worldmapc_kernel.server_db_session.add(offer_doc)
worldmapc_kernel.server_db_session.commit()
_add_items(worldmapc_kernel, offer_doc.id)
worldmapc_kernel.server_db_session.commit()
return offer_doc
@pytest.fixture
def xena_permanent_and_offer(worldmapc_xena_model: CharacterModel, worldmapc_kernel: Kernel):
offer_doc = OfferDocument(
character_id=worldmapc_xena_model.id,
title="OfferTitle",
request_operand=OfferOperand.AND.value,
offer_operand=OfferOperand.AND.value,
permanent=True,
status=OfferStatus.OPEN.value,
)
worldmapc_kernel.server_db_session.add(offer_doc)
worldmapc_kernel.server_db_session.commit()
_add_items(worldmapc_kernel, offer_doc.id)
worldmapc_kernel.server_db_session.commit()
return offer_doc
class TestBusiness:
async def _assert_owned_offers(
self,
kernel: Kernel,
web: TestClient,
character: CharacterModel,
count: int,
names: typing.Optional[typing.List[str]] = None,
) -> None:
names = names or []
# main page
resp: ClientResponse = await web.post(f"/business/{character.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert f"Voir les offres que vous proposez ({count} en cours)" in item_labels
if not names:
return
# offers page
resp: ClientResponse = await web.post(f"/business/{character.id}/offers")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
for name in names:
assert next(l for l in item_labels if name in str(l))
async def _assert_edit_offer(
self,
kernel: Kernel,
web: TestClient,
character: CharacterModel,
offer_id: int,
request_operand_str: str = ONE_OF_THEM,
request_item_names: typing.Optional[typing.List[str]] = None,
request_item_names_not: typing.Optional[typing.List[str]] = None,
offer_operand_str: str = ONE_OF_THEM,
offer_item_names: typing.Optional[typing.List[str]] = None,
offer_item_names_not: typing.Optional[typing.List[str]] = None,
open_: bool = False,
) -> None:
request_item_names = request_item_names or []
request_item_names_not = request_item_names_not or []
offer_item_names = offer_item_names or []
offer_item_names_not = offer_item_names_not or []
resp = await web.post(f"/business/{character.id}/offers/{offer_id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
form_item_by_name = {i.name: i for i in descr.items[0].items}
form_item_labels = [i.label for i in descr.items[0].items]
assert form_item_by_name["request_operand"].value == request_operand_str
assert form_item_by_name["offer_operand"].value == offer_operand_str
for request_item_name in request_item_names:
assert request_item_name in form_item_labels
for offer_item_name in offer_item_names:
assert offer_item_name in form_item_labels
for request_item_name_not in request_item_names_not:
assert request_item_name_not not in form_item_labels
for offer_item_name_not in offer_item_names_not:
assert offer_item_name_not not in form_item_labels
if not open_:
assert "Activer" == descr.items[1].label
else:
assert "Désactiver" == descr.items[1].label
async def _assert_read_offer(
self,
kernel: Kernel,
web: TestClient,
owner: CharacterModel,
character: CharacterModel,
offer_id: int,
request_operand_str: str = ONE_OF_THEM,
have_not_item_names: typing.Optional[typing.List[str]] = None,
have_item_names: typing.Optional[typing.List[str]] = None,
offer_operand_str: str = ONE_OF_THEM,
offer_item_names: typing.Optional[typing.List[str]] = None,
owner_can_make_deal: bool = True,
can_make_deal: bool = False,
) -> None:
have_not_item_names = have_not_item_names or []
have_item_names = have_item_names or []
offer_item_names = offer_item_names or []
resp = await web.post(f"/business/{character.id}/see-offer/{owner.id}/{offer_id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
form_item_labels = [i.label or i.text for i in descr.items]
assert f"Eléments demandé(s) ({request_operand_str})" in form_item_labels
assert f"Eléments donné(s) ({offer_operand_str})" in form_item_labels
for have_not_item_name in have_not_item_names:
assert f"(X) {have_not_item_name}" in form_item_labels
for have_item_name in have_item_names:
assert f"(V) {have_item_name}" in form_item_labels
for offer_item_name in offer_item_names:
assert offer_item_name in form_item_labels
if owner_can_make_deal:
if can_make_deal:
assert "Effectuer une transaction" in form_item_labels
else:
assert "Vous ne possédez pas de quoi faire un marché" in form_item_labels
else:
assert f"{owner.name} ne peut pas assurer cette opération"
async def test_create_offer__nominal_case(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
resp = await web.post(f"/business/{xena.id}/offers-create?permanent=1")
assert 200 == resp.status
resp = await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
assert descr.redirect == f"/business/{xena.id}/offers/1"
await self._assert_owned_offers(kernel, web, xena, count=1, names=["My offer"])
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
request_operand_str=ONE_OF_THEM,
request_item_names=[],
offer_operand_str=ONE_OF_THEM,
offer_item_names=[],
open_=False,
)
async def test_create_offer__change_operands(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
# see test_create_offer__nominal_case if in error
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
assert (
await web.post(
f"/business/{xena.id}/offers/{1}",
json={"request_operand": ALL_OF_THEM, "offer_operand": ALL_OF_THEM},
)
).status == 200
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
)
async def test_create_offer__open_close(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
# see test_create_offer__nominal_case if in error
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
assert (await web.post(f"/business/{xena.id}/offers/{1}?open=1")).status == 200
await self._assert_edit_offer(kernel, web, xena, offer_id=1, open_=True)
await self._assert_owned_offers(kernel, web, xena, count=1, names=["(V) My offer"])
assert (await web.post(f"/business/{xena.id}/offers/{1}?close=1")).status == 200
await self._assert_edit_offer(kernel, web, xena, offer_id=1, open_=False)
await self._assert_owned_offers(kernel, web, xena, count=1, names=["(X) My offer"])
async def test_add_items__check_form(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
resp = await web.post(f"/business/{xena.id}/offers/{1}/add-item?position=REQUEST")
assert resp.status == 200
descr = description_serializer.load(await resp.json())
assert descr.items[0].is_form
assert descr.items[0].items[0].name == "value"
for name in [
"Bois (mètre cubes)",
"Vin rouge (litres)",
"Plastic bottle (unité)",
"Bouclier de bois (unité)",
"Hache de pierre (unité)",
"Veste de cuir (unité)",
"Pierre (unités)",
"Corps (unité)",
"Petit bois (mètre cubes)",
]:
assert name in descr.items[0].items[0].choices
assert descr.items[0].items[1].name == "quantity"
async def test_update_offer__have_some_required__request_and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
# see test_create_offer__nominal_case if in error
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
# Add one stuff
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item"
f"?position=REQUEST&value=Plastic bottle (unité)&quantity=1"
)
).status == 200
await self._assert_edit_offer(
kernel, web, xena, offer_id=1, request_item_names=[EXPECTED_PLASTIC_BOTTLE_NAME]
)
# Add one resource
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item"
f"?position=REQUEST&value=Petit bois (mètre cubes)&quantity=1.50"
)
).status == 200
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
request_item_names=[EXPECTED_PLASTIC_BOTTLE_NAME, "Petit bois (1.5 mètre cubes)"],
)
async def test_update_offer__have_some_required__remove_item(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_owned_offers(kernel, web, xena, count=0)
# see test_create_offer__nominal_case if in error
assert (
await web.post(
f"/business/{xena.id}/offers-create?permanent=1", json={"title": "My offer"}
)
).status == 200
# Add one stuff
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item?position=REQUEST&value=Plastic bottle (unité)&quantity=1"
)
).status == 200
await self._assert_edit_offer(
kernel, web, xena, offer_id=1, request_item_names=[EXPECTED_PLASTIC_BOTTLE_NAME]
)
# remove it
assert (await web.post(f"/business/{xena.id}/offers/{1}/remove-item/{1}")).status == 200
await self._assert_edit_offer(
kernel, web, xena, offer_id=1, request_item_names_not=[EXPECTED_PLASTIC_BOTTLE_NAME]
)
async def test_edit_offer__test_owner_have_display(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
kernel = worldmapc_kernel
web = worldmapc_web_app
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
offer_item_names=["(X) Bois (0.5 mètre cubes)", "(X) Veste de cuir (1)"],
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
open_=True,
)
# add one to offer owner
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
offer_item_names=["Bois (0.5 mètre cubes)", "(X) Veste de cuir (1)"],
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
open_=True,
)
# add one to offer owner
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
await self._assert_edit_offer(
kernel,
web,
xena,
offer_id=1,
request_item_names=["Bois (0.5 mètre cubes)", "Veste de cuir (1)"],
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
open_=True,
)
async def test_read_offer__have_some_required_items__and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_and_offer
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
have_not_item_names=["Vin rouge (1.5 litres)", "Hache de pierre (1)"],
offer_item_names=["(!) Bois (0.5 mètre cubes)", "(!) Veste de cuir (1)"],
owner_can_make_deal=False,
)
kernel.resource_lib.add_resource_to("RED_WINE", 2.0, character_id=arthur.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
have_not_item_names=["Hache de pierre (1)"],
have_item_names=["Vin rouge (1.5 litres)"],
owner_can_make_deal=False,
)
haxe = create_stuff(kernel, "STONE_HAXE")
kernel.stuff_lib.set_carried_by(haxe.id, character_id=arthur.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
have_item_names=["Vin rouge (1.5 litres)", "Hache de pierre (1)"],
owner_can_make_deal=False,
)
# add wood to offer owner (remove the (!))
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
offer_item_names=["Bois (0.5 mètre cubes)", "(!) Veste de cuir (1)"],
owner_can_make_deal=False,
)
# add jacket to offer owner (remove the (!))
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ALL_OF_THEM,
offer_operand_str=ALL_OF_THEM,
offer_item_names=["Bois (0.5 mètre cubes)", "Veste de cuir (1)"],
owner_can_make_deal=True,
can_make_deal=True,
)
async def test_read_offer__have_some_required_items__or(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_or_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_or_offer
# ensure xena have all offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_not_item_names=["Vin rouge (1.5 litres)", "Hache de pierre (1)"],
offer_item_names=["Bois (0.5 mètre cubes)", "Veste de cuir (1)"],
can_make_deal=False,
)
kernel.resource_lib.add_resource_to("RED_WINE", 2.0, character_id=arthur.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_not_item_names=["Hache de pierre (1)"],
have_item_names=["Vin rouge (1.5 litres)"],
can_make_deal=True,
)
haxe = create_stuff(kernel, "STONE_HAXE")
kernel.stuff_lib.set_carried_by(haxe.id, character_id=arthur.id)
await self._assert_read_offer(
kernel,
web,
xena,
arthur,
offer_id=offer.id,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_item_names=["Vin rouge (1.5 litres)", "Hache de pierre (1)"],
can_make_deal=True,
)
async def test_read_offer__make_transaction__missing_request_and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_and_offer
# ensure xena have all offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
# Give just a part of necessary to arthur
kernel.resource_lib.add_resource_to("RED_WINE", 2.0, character_id=arthur.id)
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert "Vous ne possédez pas ce qu'il faut pour faire ce marché" in item_labels
async def test_read_offer__make_transaction__owner_missing_offer_and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_and_offer
# xena have just a part of offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert f"{xena.name} ne peut pas assurer cette opération" in item_labels
async def test_read_offer__make_transaction__request_and(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_and_offer: OfferDocument,
initial_universe_state: UniverseStateDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_and_offer
# Give all necessary to arthur
kernel.resource_lib.add_resource_to("RED_WINE", 2.0, character_id=arthur.id)
haxe = create_stuff(kernel, "STONE_HAXE")
kernel.stuff_lib.set_carried_by(haxe.id, character_id=arthur.id)
# ensure xena have all offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
assert kernel.resource_lib.have_resource(xena.id, "WOOD", 0.5)
assert kernel.stuff_lib.have_stuff_count(xena.id, "LEATHER_JACKET")
assert not kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "STONE_HAXE")
assert not kernel.resource_lib.have_resource(arthur.id, "WOOD", 0.5)
assert not kernel.stuff_lib.have_stuff_count(arthur.id, "LEATHER_JACKET")
assert kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "STONE_HAXE")
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert "Je confirme vouloir faire ce marché" in item_labels
# Do the deal
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal?confirm=1"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert "Marché effectué" in item_labels
assert not kernel.resource_lib.have_resource(xena.id, "WOOD", 0.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "LEATHER_JACKET")
assert kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(xena.id, "STONE_HAXE")
assert kernel.resource_lib.have_resource(arthur.id, "WOOD", 0.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "LEATHER_JACKET")
assert not kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(arthur.id, "STONE_HAXE")
async def test_read_offer__make_transaction__missing_all_request_or(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_or_offer: OfferDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_or_offer
# ensure xena have all offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
assert "Vous ne possédez pas ce qu'il faut pour faire ce marché" in item_labels
async def test_read_offer__make_transaction__request_or(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
xena_permanent_or_offer: OfferDocument,
initial_universe_state: UniverseStateDocument,
) -> None:
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
offer = xena_permanent_or_offer
# ensure xena have one of offered items
kernel.resource_lib.add_resource_to("WOOD", 0.5, character_id=xena.id)
# Give all necessary to arthur
kernel.resource_lib.add_resource_to("RED_WINE", 1.5, character_id=arthur.id)
haxe = create_stuff(kernel, "STONE_HAXE")
kernel.stuff_lib.set_carried_by(haxe.id, character_id=arthur.id)
assert kernel.resource_lib.have_resource(xena.id, "WOOD", 0.5)
assert not kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "STONE_HAXE")
assert not kernel.resource_lib.have_resource(arthur.id, "WOOD", 0.5)
assert kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "STONE_HAXE")
resp = await web.post(
f"/business/{arthur.id}/see-offer/{offer.character_id}/{offer.id}/deal"
)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
give_wine_str = "Faire ce marché et donner Vin rouge (1.5 litres)"
assert give_wine_str in item_labels
assert "Faire ce marché et donner Hache de pierre (1)" in item_labels
give_wine_url = item_by_label[give_wine_str].form_action
resp = await web.post(give_wine_url)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
take_wood_str = "Faire ce marché et obtenir Bois (0.5 mètre cubes)"
assert take_wood_str in item_labels
assert "Faire ce marché et obtenir Veste de cuir (1)" not in item_labels
# Give jacket to xena to permit take it
jacket = create_stuff(kernel, "LEATHER_JACKET")
kernel.stuff_lib.set_carried_by(jacket.id, character_id=xena.id)
resp = await web.post(give_wine_url)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
take_wood_str = "Faire ce marché et obtenir Bois (0.5 mètre cubes)"
assert take_wood_str in item_labels
assert "Faire ce marché et obtenir Veste de cuir (1)" in item_labels
take_wood_url = item_by_label[take_wood_str].form_action
resp = await web.post(take_wood_url)
assert 200 == resp.status
assert not kernel.resource_lib.have_resource(xena.id, "WOOD", 0.5)
assert kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "STONE_HAXE")
assert kernel.resource_lib.have_resource(arthur.id, "WOOD", 0.5)
assert not kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "STONE_HAXE")
async def test_create_with_character_transaction(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_web_app: TestClient,
worldmapc_kernel: Kernel,
initial_universe_state: UniverseStateDocument,
) -> None:
"""+ conteur main page + vue depuis target + blinker"""
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
kernel = worldmapc_kernel
web = worldmapc_web_app
assert (
await web.post(
f"/business/{xena.id}/offers-create?with_character_id={arthur.id}",
json={"title": "My offer"},
)
).status == 200
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item"
f"?position=REQUEST&value=Plastic bottle (unité)&quantity=1"
)
).status == 200
assert (
await web.post(
f"/business/{xena.id}/offers/{1}/add-item"
f"?position=OFFER&value=Vin rouge (litres)&quantity=1.5"
)
).status == 200
assert (await web.post(f"/business/{xena.id}/offers/{1}?open=1")).status == 200
await self._assert_edit_offer(
kernel,
web,
character=xena,
offer_id=1,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
request_item_names=["Plastic bottle (1)"],
offer_item_names=["(X) Vin rouge (1.5 litres)"],
open_=True,
)
await self._assert_read_offer(
kernel,
web,
owner=xena,
character=arthur,
offer_id=1,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_not_item_names=["Plastic bottle (1)"],
offer_item_names=["(!) Vin rouge (1.5 litres)"],
can_make_deal=False,
)
# Give all necessary
kernel.resource_lib.add_resource_to("RED_WINE", 1.5, character_id=xena.id)
bottle = create_stuff(kernel, "PLASTIC_BOTTLE_1L")
kernel.stuff_lib.set_carried_by(bottle.id, character_id=arthur.id)
assert kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(xena.id, "PLASTIC_BOTTLE_1L")
assert not kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(arthur.id, "PLASTIC_BOTTLE_1L")
await self._assert_read_offer(
kernel,
web,
owner=xena,
character=arthur,
offer_id=1,
request_operand_str=ONE_OF_THEM,
offer_operand_str=ONE_OF_THEM,
have_item_names=["Plastic bottle (1)"],
offer_item_names=["Vin rouge (1.5 litres)"],
can_make_deal=True,
)
# xena main page
resp: ClientResponse = await web.post(f"/business/{xena.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert "Voir les transactions en attente (1 en cours)" in item_labels
# arthur main page
resp: ClientResponse = await web.post(f"/business/{arthur.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert "*Voir les transactions en attente (1 en cours)" in item_labels
resp = await web.post(f"/business/{arthur.id}/see-offer/{xena.id}/{1}/deal")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
deal_str = "Faire ce marché et donner Plastic bottle (1)"
assert deal_str in item_labels
go_url = item_by_label[deal_str].form_action
resp = await web.post(go_url)
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label or i.text for i in descr.items]
item_by_label = {i.label: i for i in descr.items}
deal_str = "Faire ce marché et obtenir Vin rouge (1.5 litres)"
assert deal_str in item_labels
go_url = item_by_label[deal_str].form_action
assert (await web.post(go_url)).status == 200
assert not kernel.resource_lib.have_resource(xena.id, "RED_WINE", 1.5)
assert kernel.stuff_lib.have_stuff_count(xena.id, "PLASTIC_BOTTLE_1L")
assert kernel.resource_lib.have_resource(arthur.id, "RED_WINE", 1.5)
assert not kernel.stuff_lib.have_stuff_count(arthur.id, "PLASTIC_BOTTLE_1L")
# xena main page
resp: ClientResponse = await web.post(f"/business/{xena.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert "Voir les transactions en attente (0 en cours)" in item_labels
# arthur main page
resp: ClientResponse = await web.post(f"/business/{arthur.id}")
assert 200 == resp.status
descr = description_serializer.load(await resp.json())
item_labels = [i.label for i in descr.items]
assert "Voir les transactions en attente (0 en cours)" in item_labels
| [
"sevajol.bastien@gmail.com"
] | sevajol.bastien@gmail.com |
e259df553081c2a0843857a31971fbeb29ab02d1 | 8c9df3465ec7cab68b10e67823c1f9b475dab68e | /square__transverse_longitudinal_field_af_ising__static/square_ising.py | 12dad1d1699c6934cd3da33fb9d3ea8f37bdd5f5 | [
"BSD-3-Clause"
] | permissive | deyh2020/quspin_example | f86cf3cea2b8c04efc017e9618cb935494e94f82 | 931ca2ea5e6bbe02ebdd6d6a22d90db24d6c760c | refs/heads/master | 2023-02-07T21:27:12.913763 | 2020-12-30T08:00:57 | 2020-12-30T08:00:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,427 | py | ## http://weinbe58.github.io/QuSpin/generated/quspin.basis.spin_basis_general.html#quspin.basis.spin_basis_general
## https://doi.org/10.1103/PhysRevX.8.021069
## https://doi.org/10.1103/PhysRevX.8.021070
## consider nearest neighbor Ising
from __future__ import print_function, division
from quspin.operators import hamiltonian # operators
from quspin.basis import spin_basis_general # spin basis constructor
import numpy as np # general math functions
def exact_diag(J,Hx,Hz,Lx,Ly):
N_2d = Lx*Ly # number of sites
###### setting up user-defined symmetry transformations for 2d lattice ######
s = np.arange(N_2d) # sites [0,1,2,....]
x = s%Lx # x positions for sites
y = s//Lx # y positions for sites
T_x = (x+1)%Lx + Lx*y # translation along x-direction
T_y = x +Lx*((y+1)%Ly) # translation along y-direction
P_x = x + Lx*(Ly-y-1) # reflection about x-axis
P_y = (Lx-x-1) + Lx*y # reflection about y-axis
Z = -(s+1) # spin inversion
###### setting up bases ######
# basis_2d = spin_basis_general(N=N_2d,S="1/2",pauli=0)
basis_2d = spin_basis_general(N=N_2d,S="1/2",pauli=0,kxblock=(T_x,0),kyblock=(T_y,0))
###### setting up hamiltonian ######
# setting up site-coupling lists
Jzzs = [[J,i,T_x[i]] for i in range(N_2d)]+[[J,i,T_y[i]] for i in range(N_2d)]
Hxs = [[-Hx,i] for i in range(N_2d)]
Hzs = [[-Hz,i] for i in range(N_2d)]
static = [["zz",Jzzs],["x",Hxs],["z",Hzs]]
# build hamiltonian
# H = hamiltonian(static,[],static_fmt="csr",basis=basis_2d,dtype=np.float64)
no_checks = dict(check_symm=False, check_pcon=False, check_herm=False)
H = hamiltonian(static,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks)
# diagonalise H
ene,vec = H.eigsh(time=0.0,which="SA",k=2)
# ene = H.eigsh(time=0.0,which="SA",k=2,return_eigenvectors=False); ene = np.sort(ene)
norm2 = np.linalg.norm(vec[:,0])**2
# calculate uniform magnetization
int_mx = [[1.0,i] for i in range(N_2d)]
int_mz = [[1.0,i] for i in range(N_2d)]
static_mx = [["x",int_mx]]
static_mz = [["z",int_mz]]
op_mx = hamiltonian(static_mx,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0)
op_mz = hamiltonian(static_mz,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0)
mx = (np.conjugate(vec[:,0]).dot(op_mx.dot(vec[:,0])) / norm2).real / N_2d
mz = (np.conjugate(vec[:,0]).dot(op_mz.dot(vec[:,0])) / norm2).real / N_2d
# calculate n.n. sz.sz correlation
int_mz0mz1 = [[1.0,i,T_x[i]] for i in range(N_2d)]+[[1.0,i,T_y[i]] for i in range(N_2d)]
static_mz0mz1 = [["zz",int_mz0mz1]]
op_mz0mz1 = hamiltonian(static_mz0mz1,[],static_fmt="csr",basis=basis_2d,dtype=np.float64,**no_checks).tocsr(time=0)
mz0mz1 = (np.conjugate(vec[:,0]).dot(op_mz0mz1.dot(vec[:,0])) / norm2).real / N_2d
return ene, mx, mz, mz0mz1
def main():
###### define model parameters ######
Lx, Ly = 4, 4 # linear dimension of 2d lattice
N_2d = Lx*Ly # number of sites
J = 1.0 # AF Ising
# Hz = 2.00 # longitudinal field
Hzs = np.linspace(0.0,4.0,401)
# Hzs = np.linspace(1.99,2.03,41)
Hx = 0.10 # transverse field
for Hz in Hzs:
ene, mx, mz, mz0mz1 = exact_diag(J,Hx,Hz,Lx,Ly)
# print(J,Hz,Hx,Lx,Ly,ene[0]/N_2d,ene[1]/N_2d)
print(J,Hz,Hx,Lx,Ly,ene[0]/N_2d,mx,mz,mz0mz1)
if __name__ == "__main__":
main()
| [
"27846552+ryuikaneko@users.noreply.github.com"
] | 27846552+ryuikaneko@users.noreply.github.com |
36f4077e738bcd0ad9b6fb9a000f343d5aa57385 | fe931e4eb729e010add72732889cfcfa0e82663d | /test/ParametricNLP_test.py | 7e32cc47eea227c73a524210f0191ac4c1677cb2 | [] | no_license | Duam/python-master-thesis-code | 14d7ae68d94475cc978717dbb2a9df691b5246e5 | bb1a800612a1f046d2184ae42e00ed5ec0425b06 | refs/heads/master | 2023-06-30T07:02:28.369372 | 2021-08-06T15:11:26 | 2021-08-06T15:11:26 | 385,187,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | import casadi as cas
from thesis_code.utils.ParametricNLP import ParametricNLP
# Create a test optimization problem
nlp = ParametricNLP(name='test_problem', verbose=True)
# Add decision variables and parameters to the problem
nlp.add_decision_var('x', (3,1))
nlp.add_parameter('lbx', (3,1))
nlp.add_parameter('ubx', (3,1))
nlp.bake_variables()
# Fetch symbolics
x_sym = nlp.get_decision_var('x')
lbx_sym = nlp.get_parameter('lbx')
ubx_sym = nlp.get_parameter('ubx')
print(nlp.struct_w['x'])
# Create a cost function
nlp.set_cost(cas.mtimes([x_sym.T, x_sym]))
# Create an inequality constraint
nlp.add_inequality('x_iq_lbx', x_sym - lbx_sym)
nlp.add_inequality('x_iq_ubx', ubx_sym - x_sym)
# Set the parameters and initial guess
lbx_scalar = 0.5
ubx_scalar = 1.0
params = nlp.struct_p(0)
params['lbx'] = lbx_scalar * cas.DM.ones((3,1))
params['ubx'] = ubx_scalar * cas.DM.ones((3,1))
winit = nlp.struct_w(0)
winit['x'] = cas.DM.zeros((3,1))
# Some options
opts = {}
opts['ipopt.print_info_string'] = 'yes'
opts['ipopt.print_level'] = 3
opts['ipopt.max_iter'] = 1000
# Solve the problem..
# via ipopt
nlp.init(nlpsolver='ipopt')
res_ipopt, stats_ipopt, dum,dum,dum = nlp.solve(winit=winit, param=params)
# via sqpmethod
nlp.init(nlpsolver='sqpmethod')
res_sqp, stats_sqp, dum,dum,dum = nlp.solve(winit=winit, param=params)
# via qpoases
nlp.init(is_qp = True, nlpsolver='qpoases')
res_qp, stats_qp, dum,dum,dum = nlp.solve(winit=winit, param=params)
# Solve the problem
"""
print('x:', res['w']['x'])
print('lambda ubx:', res['lam_h']['x_iq_ubx'])
print('lambda lbx:', res['lam_h']['x_iq_lbx'])
print('Number of iterations:', stats['iter_count'])
""" | [
"paul.daum@posteo.de"
] | paul.daum@posteo.de |
81d48422f1d924c39d76dcbf258ebdb977195149 | af3840c306fa5eeb2f0d9e5ed779a582d3c7d5d7 | /구구단 11-16.py | e45c083721e6ad8260dc97b620622219cfb596a2 | [] | no_license | HanseamChung/prac_python | 8c6053402a456eb3c78941b0917fa1570c56bbea | d8fc32989ae9707261deb602bea8853d7f7a0aed | refs/heads/master | 2021-05-05T03:12:16.940521 | 2018-02-09T08:37:10 | 2018-02-09T08:37:10 | 119,792,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | i, k = 0, 0
for i in range(11, 17) :
print('\n')
for k in range(1, 10) :
print(str(i*k)+" " , end="")
| [
"noreply@github.com"
] | HanseamChung.noreply@github.com |
9eea9265e1ace539b7498d06a98811dc189c3578 | 13486ab3a7c9f4221b25236d11a44caadf9f90f0 | /programming-three/three134.py | 67abd3857686a1d4fc7033b00938fa1b80d3ce94 | [] | no_license | strawwhat/diary | f0ec0c86b32d3ba7bedba33c9c629ec41c9e9a7b | 2d925b42e61c8aebbe6d212bceb3de7e10bd58c3 | refs/heads/master | 2021-01-22T22:34:09.645638 | 2017-10-02T07:44:41 | 2017-10-02T07:44:41 | 85,550,383 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | #!/usr/bin/python
# *-*coding:utf-8 *-*
"示例3-9 page134 redirect.py 重定向流到python对象"
"""
file-like objects that save standard output in a string and provide
standard input text a string ; redirect runs a passed-in function
with its output and input streams reset to these file-like class objects
类似文件的对象,用于在字符串中保存标准输出并提供
标准输入文本字符串; 重定向运行传入函数
其输出和输入流重新设置为这些类似文件的类对象
在Python中,任何在方法上与文件类似的对象都可以充当标准流。
它和对象数据类型无关,而取决于接口(有时被称为协议)即:
任何提供了类似于文件read方法的对象可以指定给sys.stdin,
以从该对象的read方法读取输入
任何定义了类似于文件write方法的对象可以指定给sys.stdout,
所有的标准输出将发送到该对象方法上
"""
import sys
class Output: #模拟输出文件
def __init__(self):
self.text = '' #新建空字符串
def write(self, string):
self.text += string #添加字节字符串
def writelines(self, lines): #在列表中添加每一行数据
for line in lines: self.write(line)
#模拟输入文件
class Input:
def __init__(self, input=''): #默认参数
self.text = input
def read(self, size=None): #保存新建字符串,可选参数
if size == None: #读取n个字节,或者所有字节
res, self.text = self.text, ''
else:
res, self.text = self.text[:size], self.text[size:]
return res
def readline(self):
eoln = self.text.find('\n') #查找下一个eoln的偏移位置
if eoln == -1: #清洗eoln,其值为-1
res, self.text = self.text, ''
else:
res, self.text = self.text[:eoln+1], self.text[eoln+1:]
return res
def redirect(function, pargs, kargs, input): #重定向stdin/out
savestreams = sys.stdin, sys.stdout #运行函数对象
sys.stdin = Input(input) #返回stdout文件
sys.stdout = Output()
try:
result = function(*pargs, **kargs) #运行带参数的函数
output = sys.stdout.text
finally:
sys.stdin, sys.stdout = savestreams #如果存在exc或者其他,重新存储数据
return (result, output) #如果不存在exc,返回结束
| [
"bnm1122@yeah.net"
] | bnm1122@yeah.net |
9d10240ecd698eca008640c0eabba53a44e88d15 | 3d8a2d2124c484a7ac81835296c0a8834af8df6e | /one/commands/shell.py | 1019bee8e283c55218e4903522be915893be2b46 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | DNXLabs/one-cli | e067a2a9d49c61494abcd9ba4b63626f656cdbb9 | 37265189ab184e6fa7569c201b181ba5d95a0a2a | refs/heads/master | 2022-12-14T09:15:32.052749 | 2021-09-16T00:08:35 | 2021-09-16T00:08:35 | 253,417,937 | 7 | 1 | Apache-2.0 | 2021-09-16T00:08:36 | 2020-04-06T06:51:25 | Python | UTF-8 | Python | false | false | 914 | py | import click
from one.docker.container import Container
from one.docker.image import Image
from one.utils.environment.aws import EnvironmentAws
container = Container()
image = Image()
environment = EnvironmentAws()
SHELL_IMAGE = image.get_image('shell')
@click.command(help='Shell container with awscli and terraform pre-installed.')
@click.argument('args', nargs=-1)
@click.option('-i', '--image', default=SHELL_IMAGE, type=str, help='Docker image to use.')
@click.option('-p', '--port', default=(), type=str, help='Ports to expose from the container.', multiple=True)
def shell(args, image, port):
envs = environment.build().get_env()
command = ''
for arg in args:
command += '%s ' % (arg)
ports = list(port)
container.create(
image=image,
command=command,
ports=ports,
entrypoint='',
volumes=['.:/work'],
environment=envs
)
| [
"arthurbdiniz@gmail.com"
] | arthurbdiniz@gmail.com |
64778fcbc1e5fce8a3fa62050a7bf3561afa5db0 | 5822e8bfd09ec98770966763a6637dfaeb0b6dbd | /TWLight/emails/views.py | 350215a5e68768e96691e19d9b15fa5c417b7043 | [
"MIT"
] | permissive | Jain-Aditya/TWLight | 997c01cab83b9af5073589808011c3cc3ecc6d51 | 4c005fb9346e262cc452509b029774bde6cff0dc | refs/heads/master | 2020-04-30T07:56:38.706751 | 2019-03-18T08:04:35 | 2019-03-18T08:04:35 | 176,700,371 | 0 | 0 | null | 2019-03-20T09:34:36 | 2019-03-20T09:34:36 | null | UTF-8 | Python | false | false | 2,763 | py | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.mail import BadHeaderError, send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from TWLight.emails.forms import ContactUsForm
from TWLight.emails.signals import ContactUs
@method_decorator(login_required, name='post')
class ContactUsView(FormView):
template_name = 'emails/contact.html'
form_class = ContactUsForm
success_url = reverse_lazy('contact')
def get_initial(self):
initial = super(ContactUsView, self).get_initial()
# @TODO: This sort of gets repeated in ContactUsForm.
# We could probably be factored out to a common place for DRYness.
if self.request.user.is_authenticated():
if self.request.user.email:
initial.update({
'email': self.request.user.email,
})
if ('message' in self.request.GET):
initial.update({
'message': self.request.GET['message'],
})
initial.update({
'next': reverse_lazy('contact'),
})
return initial
def form_valid(self, form):
# Adding an extra check to ensure the user is a wikipedia editor.
try:
assert self.request.user.editor
email = form.cleaned_data['email']
message = form.cleaned_data['message']
carbon_copy = form.cleaned_data['cc']
ContactUs.new_email.send(
sender=self.__class__,
user_email=email,
cc=carbon_copy,
editor_wp_username=self.request.user.editor.wp_username,
body=message
)
messages.add_message(self.request, messages.SUCCESS,
# Translators: Shown to users when they successfully submit a new message using the contact us form.
_('Your message has been sent. We\'ll get back to you soon!'))
return HttpResponseRedirect(reverse('contact'))
except (AssertionError, AttributeError) as e:
messages.add_message (self.request, messages.WARNING,
# Translators: This message is shown to non-wikipedia editors who attempt to post data to the contact us form.
_('You must be a Wikipedia editor to do that.'))
raise PermissionDenied
return self.request.user.editor | [
"uyscuti.wiki@gmail.com"
] | uyscuti.wiki@gmail.com |
7534fdc5e9d0e271082d603c5c0a1ba2262d679e | 873d858b79a51a6a14e74e1a6fe4cc97809a69bc | /rosserial_ws/devel/lib/rosserial_client/make_library.py | eed0f221f32c99f4c790655eeb0d5132d20cacf2 | [] | no_license | nichoteloo/ROS-Noetic-devel | cf3058014fc491f38a23426c136cb8fbdee7a397 | 81e7090c5dc0e548aed4aa57b9579e355e9bcd25 | refs/heads/master | 2023-05-07T19:21:03.804523 | 2021-06-02T21:13:48 | 2021-06-02T21:13:48 | 373,293,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/script.py.in
# creates a relay to a python script source file, acting as that file.
# The purpose is that of a symlink
python_script = '/home/nichotelo/ros/rosserial_ws/src/rosserial/rosserial_client/src/rosserial_client/make_library.py'
with open(python_script, 'r') as fh:
context = {
'__builtins__': __builtins__,
'__doc__': None,
'__file__': python_script,
'__name__': __name__,
'__package__': None,
}
exec(compile(fh.read(), python_script, 'exec'), context)
| [
"nicolaschristianto@mail.ugm.ac.id"
] | nicolaschristianto@mail.ugm.ac.id |
1faba7758fe0c892013372af65d23ba65c04f8b8 | f8f841bba1f3c05c69f3a43297b9d63016f95e2a | /format_w2v_file.py | f8577089ee49841b33990da82b63543e3fcb28ef | [] | no_license | valerie94/russian_nballs | 2246d764d16f3b0374193bb0ec14c49ad72df544 | c960de4a7bba40dcd3a7c6e723b1bbea272969f8 | refs/heads/master | 2020-05-18T09:13:10.098864 | 2019-06-04T20:31:29 | 2019-06-04T20:31:29 | 184,317,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | '''This .py program converts initial file with word2vec features to standard format such as
word_1 feature1 feature2 ... feature256 /n '''
'''This script creates file ru_w2v.txt which is w2v file which is required for constracting nballs '''
import re
def change_format(file_name):#input is the initial file
line_array = []
intial_file = open(file_name)
for line in intial_file:
#replace all special symbols and split by separator
if ("[" in line):
line = line.replace('\t', ' ')
line = line.replace('\n', '')
line = re.sub('\t', ' ', line)
line = line.replace('[', '')
line = line.split(" ")
line.pop(0)
line = list(filter(None, line))
for x in line:
line_array.append(x)
line_array.append(' ')
elif ("]" in line):
line = line.replace('\t', ' ')
line = line.replace(']', '')
line = line.split(" ")
line = list(filter(None, line))
for x in line:
line_array.append(x)
if ("\n" not in x):
line_array.append(' ')
else:
line = line.replace('\n', '')
line = line.split(" ")
line = list(filter(None, line))
for x in line:
line_array.append(x)
line_array.append(" ")
intial_file.close()
return line_array
def write_to_output_file(lines, file_name): #write to the output file
with open(file_name, "w") as file:
file.write("".join(lines))
file.close()
if __name__ == "__main__":
w2v_file = "ru.tsv" # initial source file from https://github.com/Kyubyong/wordvectors, put this file in the project directory or specify the path
output_w2v_file = "ru_w2v.txt" #the name of output file with w2v features
formatted_line = change_format(w2v_file)
write_to_output_file(formatted_line, output_w2v_file) | [
"noreply@github.com"
] | valerie94.noreply@github.com |
e23c70a6f0bf97c57a6a3211e8ce4ee4c23a4b01 | 22d3d698edfa66d071a77b98d9d293087e699d87 | /casanova/cli.py | 1ac34d646aac0c3f4e2ac82fc186be538414654c | [
"MIT"
] | permissive | medialab/casanova | 8b880b1848f8f1ea785fdba483395a7d7085b87f | fcd449df0fba33a48693bea4919c81e1654a6866 | refs/heads/master | 2023-07-30T04:16:33.079309 | 2023-07-13T12:41:43 | 2023-07-13T12:41:43 | 254,628,154 | 13 | 2 | MIT | 2023-01-17T16:00:31 | 2020-04-10T12:23:34 | Python | UTF-8 | Python | false | false | 14,540 | py | from typing import Optional, List
import re
import sys
import gzip
import json
import math
import random
import statistics
from itertools import islice
from types import GeneratorType
from os.path import join
from urllib.parse import urlsplit, urljoin
from multiprocessing import Pool as MultiProcessPool
from dataclasses import dataclass
from collections import Counter, defaultdict, deque, OrderedDict
from collections.abc import Mapping, Iterable
from casanova import (
Reader,
Enricher,
CSVSerializer,
RowWrapper,
Headers,
Writer,
InferringWriter,
)
from casanova.utils import import_target, flatmap
@dataclass
class InitializerOptions:
code: str
module: bool
row_len: int
args: List[str]
init_codes: List[str]
before_codes: List[str]
after_codes: List[str]
fieldnames: Optional[List[str]] = None
selected_indices: Optional[List[int]] = None
base_dir: Optional[str] = None
# NOTE: just a thin wrapper to make sure we catch KeyboardInterrupt in
# child processes gracefully.
class WorkerWrapper(object):
__slots__ = ("fn",)
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
try:
return self.fn(*args, **kwargs)
except KeyboardInterrupt:
sys.exit(1)
class SingleProcessPool(object):
def imap(self, worker, tasks, chunksize=1):
for t in tasks:
yield worker(t)
def imap_unordered(self, *args, **kwargs):
yield from self.imap(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
return
def get_pool(n: int, options: InitializerOptions):
initargs = (options,)
if n < 2:
multiprocessed_initializer(*initargs)
return SingleProcessPool()
return MultiProcessPool(
n, initializer=multiprocessed_initializer, initargs=initargs
)
def get_csv_serializer(cli_args):
return CSVSerializer(
plural_separator=cli_args.plural_separator,
none_value=cli_args.none_value,
true_value=cli_args.true_value,
false_value=cli_args.false_value,
)
def get_inferring_writer(output_file, cli_args):
return InferringWriter(
output_file,
fieldnames=cli_args.fieldnames,
plural_separator=cli_args.plural_separator,
none_value=cli_args.none_value,
true_value=cli_args.true_value,
false_value=cli_args.false_value,
)
# Global multiprocessing variables
CODE = None
FUNCTION = None
ARGS = None
SELECTION = None
BEFORE_CODES = []
AFTER_CODES = []
EVALUATION_CONTEXT = {}
ROW = None
BASE_DIR = None
def read(path, encoding: str = "utf-8") -> Optional[str]:
global BASE_DIR
if BASE_DIR is not None:
path = join(BASE_DIR, path)
if path.endswith(".gz"):
try:
with gzip.open(path, encoding=encoding, mode="rt") as f:
return f.read()
except FileNotFoundError:
return None
try:
with open(path, encoding="utf-8", mode="r") as f:
return f.read()
except FileNotFoundError:
return None
EVALUATION_CONTEXT_LIB = {
# lib
"join": join,
"math": math,
"mean": statistics.mean,
"median": statistics.median,
"random": random,
"re": re,
"read": read,
"urljoin": urljoin,
"urlsplit": urlsplit,
# classes
"Counter": Counter,
"defaultdict": defaultdict,
"deque": deque,
}
def initialize_evaluation_context():
global EVALUATION_CONTEXT
EVALUATION_CONTEXT = {
**EVALUATION_CONTEXT_LIB,
# state
"fieldnames": None,
"headers": None,
"index": 0,
"row": None,
"cell": None,
"cells": None,
}
def multiprocessed_initializer(options: InitializerOptions):
global CODE
global FUNCTION
global ARGS
global BEFORE_CODES
global AFTER_CODES
global ROW
global SELECTION
global BASE_DIR
# Reset in case of multiple execution from same process
CODE = None
FUNCTION = None
ARGS = None
SELECTION = None
BEFORE_CODES = []
AFTER_CODES = []
ROW = None
BASE_DIR = options.base_dir
initialize_evaluation_context()
if options.module:
FUNCTION = import_target(options.code)
ARGS = options.args
else:
CODE = options.code
BEFORE_CODES = options.before_codes
AFTER_CODES = options.after_codes
if options.selected_indices is not None:
SELECTION = options.selected_indices
if options.fieldnames is not None:
EVALUATION_CONTEXT["fieldnames"] = options.fieldnames
EVALUATION_CONTEXT["headers"] = Headers(options.fieldnames)
headers = EVALUATION_CONTEXT["headers"]
else:
headers = Headers(range(options.row_len))
for init_code in options.init_codes:
exec(init_code, None, EVALUATION_CONTEXT)
EVALUATION_CONTEXT["row"] = RowWrapper(headers, None)
ROW = EVALUATION_CONTEXT["row"]
def select(row):
if SELECTION is None:
return
cells = [row[i] for i in SELECTION]
EVALUATION_CONTEXT["cells"] = cells
EVALUATION_CONTEXT["cell"] = cells[0]
def multiprocessed_worker_using_eval(payload):
global EVALUATION_CONTEXT
i, row = payload
EVALUATION_CONTEXT["index"] = i
ROW._replace(row)
select(row)
try:
for before_code in BEFORE_CODES:
exec(before_code, EVALUATION_CONTEXT, None)
value = eval(CODE, EVALUATION_CONTEXT, None)
for after_code in AFTER_CODES:
exec(after_code, EVALUATION_CONTEXT, None)
return None, i, value
except Exception as e:
return e, i, None
def collect_args(i, row):
for arg_name in ARGS:
if arg_name == "row":
yield ROW
elif arg_name == "index":
yield i
elif arg_name == "fieldnames":
yield EVALUATION_CONTEXT["fieldnames"]
elif arg_name == "headers":
yield EVALUATION_CONTEXT["headers"]
elif arg_name == "cell":
# NOTE: we know SELECTION is relevant because it's validated by CLI
yield row[SELECTION[0]]
elif arg_name == "cells":
# NOTE: we know SELECTION is relevant because it's validated by CLI
for idx in SELECTION:
yield row[idx]
else:
raise TypeError("unknown arg_name: %s" % arg_name)
def multiprocessed_worker_using_function(payload):
i, row = payload
ROW._replace(row)
args = tuple(collect_args(i, row))
try:
value = FUNCTION(*args)
# NOTE: consuming generators
if isinstance(value, GeneratorType):
value = list(value)
return None, i, value
except Exception as e:
return e, i, None
# TODO: go to minet for progress bar and rich?
# TODO: write proper cli documentation
def mp_iteration(cli_args, reader: Reader):
worker = (
multiprocessed_worker_using_eval
if not cli_args.module
else multiprocessed_worker_using_function
)
if cli_args.processes > 1:
worker = WorkerWrapper(worker)
selected_indices = None
if cli_args.select:
if reader.headers is not None:
selected_indices = reader.headers.select(cli_args.select)
else:
selected_indices = Headers.select_no_headers(cli_args.select)
init_options = InitializerOptions(
code=cli_args.code,
module=cli_args.module,
args=cli_args.args,
init_codes=cli_args.init,
before_codes=cli_args.before,
after_codes=cli_args.after,
row_len=reader.row_len,
fieldnames=reader.fieldnames,
selected_indices=selected_indices,
base_dir=cli_args.base_dir,
)
with get_pool(cli_args.processes, init_options) as pool:
# NOTE: we keep track of rows being worked on from the main process
# to avoid serializing them back with worker result.
worked_rows = {}
def payloads():
for t in reader.enumerate():
worked_rows[t[0]] = t[1]
yield t
mapper = pool.imap if not cli_args.unordered else pool.imap_unordered
for exc, i, result in mapper(worker, payloads(), chunksize=cli_args.chunk_size):
row = worked_rows.pop(i)
if exc is not None:
if cli_args.ignore_errors:
result = None
else:
raise exc
yield i, row, result
def map_action(cli_args, output_file):
serialize = get_csv_serializer(cli_args)
with Enricher(
cli_args.file,
output_file,
add=[cli_args.new_column],
delimiter=cli_args.delimiter,
) as enricher:
for _, row, result in mp_iteration(cli_args, enricher):
enricher.writerow(row, [serialize(result)])
def flatmap_action(cli_args, output_file):
serialize = get_csv_serializer(cli_args)
with Enricher(
cli_args.file,
output_file,
add=[cli_args.new_column],
delimiter=cli_args.delimiter,
) as enricher:
for _, row, result in mp_iteration(cli_args, enricher):
for value in flatmap(result):
enricher.writerow(row, [serialize(value)])
def filter_action(cli_args, output_file):
with Enricher(cli_args.file, output_file, delimiter=cli_args.delimiter) as enricher:
for _, row, result in mp_iteration(cli_args, enricher):
if cli_args.invert_match:
result = not result
if result:
enricher.writerow(row)
def map_reduce_action(cli_args, output_file):
acc_fn = None
if cli_args.module:
acc_fn = import_target(cli_args.accumulator)
with Reader(
cli_args.file,
delimiter=cli_args.delimiter,
) as enricher:
acc_context = EVALUATION_CONTEXT_LIB.copy()
acc = None
initialized = False
if cli_args.init_value is not None:
initialized = True
acc = eval(cli_args.init_value, acc_context, None)
acc_context["acc"] = acc
for _, row, result in mp_iteration(cli_args, enricher):
if not initialized:
acc_context["acc"] = result
initialized = True
continue
if acc_fn is None:
acc_context["current"] = result
acc_context["acc"] = eval(cli_args.accumulator, acc_context, None)
else:
acc_context["acc"] = acc_fn(acc_context["acc"], result)
final_result = acc_context["acc"]
if cli_args.json:
json.dump(
final_result,
output_file,
indent=2 if cli_args.pretty else None,
ensure_ascii=False,
)
print(file=output_file)
elif cli_args.csv:
writer = get_inferring_writer(output_file, cli_args)
writer.writerow(final_result)
else:
print(final_result, file=output_file)
class GroupWrapper:
__slots__ = ("__name", "__rows", "__wrapper")
def __init__(self, fieldnames):
self.__wrapper = RowWrapper(Headers(fieldnames), range(len(fieldnames)))
def _replace(self, name, rows):
self.__name = name
self.__rows = rows
@property
def name(self):
return self.__name
def __len__(self):
return len(self.__rows)
def __iter__(self):
for row in self.__rows:
self.__wrapper._replace(row)
yield self.__wrapper
def groupby_action(cli_args, output_file):
agg_fn = None
if cli_args.module:
agg_fn = import_target(cli_args.aggregator)
with Reader(
cli_args.file,
delimiter=cli_args.delimiter,
) as enricher:
# NOTE: using an ordered dict to guarantee stability for all python versions
groups = OrderedDict()
# Grouping
for _, row, result in mp_iteration(cli_args, enricher):
l = groups.get(result)
if l is None:
l = [row]
groups[result] = l
else:
l.append(row)
# Aggregating
agg_context = EVALUATION_CONTEXT_LIB.copy()
header_emitted = False
writer = Writer(output_file)
fieldnames = ["group"]
mapping_fieldnames = None
serializer = get_csv_serializer(cli_args)
if cli_args.fieldnames is not None:
mapping_fieldnames = cli_args.fieldnames
fieldnames += cli_args.fieldnames
header_emitted = True
writer.writerow(fieldnames)
group_wrapper = GroupWrapper(enricher.fieldnames)
for name, rows in groups.items():
group_wrapper._replace(name, rows)
if agg_fn is not None:
result = agg_fn(group_wrapper)
else:
agg_context["group"] = group_wrapper
result = eval(cli_args.aggregator, agg_context, None)
name = serializer(name)
if isinstance(result, Mapping):
if not header_emitted:
mapping_fieldnames = list(result.keys())
fieldnames += mapping_fieldnames
writer.writerow(fieldnames)
header_emitted = True
writer.writerow(
[name] + serializer.serialize_dict_row(result, mapping_fieldnames)
)
elif isinstance(result, Iterable) and not isinstance(result, (bytes, str)):
if not header_emitted:
fieldnames += ["col%i" % i for i in range(1, len(result) + 1)]
writer.writerow(fieldnames)
header_emitted = True
writer.writerow([name] + serializer.serialize_row(result))
else:
if not header_emitted:
writer.writerow(fieldnames + ["value"])
header_emitted = True
writer.writerow([name, serializer(result)])
def reverse_action(cli_args, output_file):
with Enricher(
cli_args.file, output_file, delimiter=cli_args.delimiter, reverse=True
) as enricher:
it = enricher
if cli_args.lines is not None:
it = islice(enricher, cli_args.lines)
for row in it:
enricher.writerow(row)
| [
"guillaumeplique@gmail.com"
] | guillaumeplique@gmail.com |
f7af16fa31198ad465290cab948bded706b51328 | b716ab7cc296ba20f23bbc2aed5af6bef29923fc | /2.py | 0f72894c2caf328cb32d74883ee59561b00848b4 | [] | no_license | widyamellysa/dumbways-test | efea011988b962240af3a5f13fa9fc029737e0e7 | ec5a9d09afd12af35d8cd8073376f9727d6c3d25 | refs/heads/master | 2022-11-23T16:27:10.859000 | 2020-08-01T14:49:12 | 2020-08-01T14:49:12 | 284,274,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | total_belanja = float(input("Total belanja : "))
bayar = float(input("Jumlah uang : "))
#DumbwaysJos
if total_belanja >= 50000 and total_belanja <= 80000:
diskon1 = 20000
hasil1 = total_belanja - diskon1
sisa1 = bayar - hasil1
print("Diskon : ", diskon1)
print("Kembalian : ", sisa1)
elif total_belanja < 50000:
diskon2 = 0
hasil2 = total_belanja - diskon2
sisa2 = bayar - hasil2
print("Diskon : ", diskon2)
print("Kembalian : ", sisa2)
#DumbwaysMantap
elif total_belanja > 80000:
diskon3 = 40000
hasil3 = total_belanja - diskon3
sisa3 = bayar - hasil3
print("Diskon : ", diskon3)
print("Kembalian : ", sisa3) | [
"noreply@github.com"
] | widyamellysa.noreply@github.com |
19947694ca5e83f139404b18e79c23211e055d99 | 74c776f1a9a059bbc530fe5d6b12165425d3f954 | /run_cn.py | 33e53e3cce1647da732faa01a74054d3bb265f5a | [
"MIT"
] | permissive | Lyttonkeepfoing/SANER | 0d2ea7df305e0599511bb349dc2b05af59b64425 | ac30f441be615de5224411816935283eddfbe330 | refs/heads/master | 2023-08-25T05:23:01.412133 | 2021-10-19T14:58:11 | 2021-10-19T14:58:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | import os
# the number of similar words
num = 10
# dataset name
dataset = "WB"
seed = 14
attn_type = "dot"
fusion_type = "gate-concat"
# Path of bert model
bert_model = "data/bert-base-chinese"
# Path of the pre-trained word embeddings for getting similar words for each token
glove_path = "data/tencent_unigram.txt"
pool_method = "first"
# Path of the ZEN model
zen_model = "zen_base/"
log = "log/{}_zen_{}_{}.txt".format(dataset, pool_method, num)
os.system("python3 train_zen_cn.py --dataset {} "
"--seed {} --kv_attn_type {} --fusion_type {} --context_num {} "
"--bert_model {} --pool_method {} --glove_path {} "
"--zen_model {} "
"--lr 0.0001 --trans_dropout 0.2 --fc_dropout 0.4 --memory_dropout 0.2 "
"--fusion_dropout 0.2 --log {}".format(dataset, seed, attn_type, fusion_type,
num, bert_model, pool_method, glove_path, zen_model, log))
| [
"nyy477@qq.com"
] | nyy477@qq.com |
1c02be31ccbc204a6af0d465965f54c16f3cf3bf | fd2e10285787a4001665aae69c3c28e09a84ee75 | /breakout.py | 46687816234c78a6b1667f801c73be2fbd37b025 | [] | no_license | laflechejonathan/breakout | 967c60d2141bfd15730baaf1308f56d59cefa267 | be72348db2e1450cfb0c83325e4001d09c674bdd | refs/heads/master | 2021-01-18T20:24:01.702072 | 2017-04-05T04:21:45 | 2017-04-05T04:21:45 | 86,960,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,991 | py | import random
import pygame
import math
import geometry
import constants as const
class BrickGrid:
def __init__(self):
empty_horizontal_space = const.SCREEN_WIDTH - (2 + const.NUM_COLUMNS) * const.BRICK_WIDTH
brick_horizontal_space = empty_horizontal_space / const.NUM_COLUMNS
empty_vertical_space = const.SCREEN_HEIGHT / 2 - const.NUM_ROWS * const.BRICK_WIDTH
brick_vertical_space = empty_vertical_space / const.NUM_ROWS
self.bullet_set = set()
self.brick_set = set()
for i in range(const.NUM_COLUMNS):
for j in range(const.NUM_ROWS):
brick_x = (i + 1) * (brick_horizontal_space + const.BRICK_WIDTH)
brick_y = (j + 1) * (brick_vertical_space + const.BRICK_HEIGHT)
rect = geometry.Rect(brick_x, brick_y, const.BRICK_WIDTH, const.BRICK_HEIGHT)
self.brick_set.add(rect)
self.original_brick_count = len(self.brick_set)
def get_num_cleared(self):
return self.original_brick_count - len(self.brick_set)
def reset(self):
self.bullet_set = set()
def render(self, screen):
for b in self.brick_set:
pygame.draw.rect(screen, const.GREEN, [b.x, b.y, b.width, b.height])
for b in self.bullet_set:
pygame.draw.rect(screen, const.GREY, [b.x, b.y, b.width, b.height])
def interact(self):
if random.uniform(0.0, 1.0) < const.PERCENT_BULLET:
candidates = [
b for b in self.brick_set if
not any([other.x == b.x and other.y > b.y for other in self.brick_set])
]
fires_bullet = random.choice(candidates)
bullet_x = fires_bullet.x + (b.width + const.BULLET_WIDTH) / 2
bullet_y = fires_bullet.y + b.height
self.bullet_set.add(geometry.Rect(bullet_x, bullet_y, const.BULLET_WIDTH, const.BULLET_HEIGHT))
remove_set = set()
for b in self.bullet_set:
b.y += const.BULLET_SPEED
if b.y >= const.SCREEN_HEIGHT:
remove_set.add(b)
self.bullet_set -= remove_set
return True
class Paddle:
def __init__(self):
x = const.SCREEN_WIDTH / 2
y = const.SCREEN_HEIGHT - const.PADDLE_HEIGHT - const.PADDLE_SPACING
self.rect = geometry.Rect(x, y, const.PADDLE_WIDTH, const.PADDLE_HEIGHT)
self.speed = const.PADDLE_SPEED
def interact(self):
if pygame.key.get_pressed()[pygame.K_LEFT] != 0:
self.rect.x -= self.speed
if pygame.key.get_pressed()[pygame.K_RIGHT] != 0:
self.rect.x += self.speed
return True
def render(self, screen):
pygame.draw.rect(screen, const.RED, [self.rect.x, self.rect.y, self.rect.width, self.rect.height])
def get_angle_for_x(self, x):
'''
TODO - currently all collisions are 45 degrees which is a bit boring
With this code, depending on point of contact, rotation angle will
vary between min and max angle
'''
x = float(x)
delta = x - self.rect.x
percentage_of_paddle = delta / self.rect.width
degree_range = const.PADDLE_MAX_ANGLE - const.PADDLE_MIN_ANGLE
angle = degree_range * percentage_of_paddle + const.PADDLE_MIN_ANGLE
print 'For x={} in range {}/{}, got angle={}'.format(x, self.rect.x, self.rect.max_x, angle)
return angle
class Ball:
def __init__(self):
self.radius = const.BALL_RADIUS
self.speed = const.BALL_SPEED
self.x = random.randint(0, const.SCREEN_WIDTH)
self.y = const.SCREEN_HEIGHT - const.PADDLE_HEIGHT - const.PADDLE_SPACING - 4 * const.BALL_RADIUS
self.heading = (random.choice([-0.5, 0.5]), -0.5)
self.min_x = 0 + const.BALL_RADIUS
self.max_x = const.SCREEN_WIDTH - const.BALL_RADIUS
self.min_y = 0 + const.BALL_RADIUS
self.max_y = const.SCREEN_HEIGHT
self.paddle_y = const.SCREEN_HEIGHT - const.PADDLE_HEIGHT - const.PADDLE_SPACING - const.BALL_RADIUS
def line_of_movement(self):
current = self.x, self.y
prev = self.x - int(const.BALL_SPEED * self.heading[0]), self.y - int(const.BALL_SPEED * self.heading[1])
return (prev, current)
def interact(self):
self.x += int(const.BALL_SPEED * self.heading[0])
self.y += int(const.BALL_SPEED * self.heading[1])
if self.y >= self.max_y:
return False
return True
def rotate(self, angle):
theta = float(angle) * math.pi / 180.0
x, y = self.heading
new_x = x * math.cos(theta) - y * math.sin(theta)
new_y = x * math.sin(theta) + y * math.cos(theta)
self.heading = (new_x, new_y)
def render(self, screen):
pygame.draw.circle(screen, const.BLUE, (self.x, self.y), self.radius)
def collision_check(ball, paddle, brick_grid):
if ball.x <= ball.min_x or ball.x >= ball.max_x:
ball.heading = (-ball.heading[0], ball.heading[1])
if ball.y <= ball.min_y:
ball.heading = (ball.heading[0], -ball.heading[1])
if paddle.rect.intersect(ball.line_of_movement(), const.BALL_RADIUS) != geometry.Intersection.NONE:
ball.heading = (ball.heading[0], -ball.heading[1])
ball.interact()
for brick in brick_grid.brick_set:
intersection = brick.intersect(ball.line_of_movement(), const.BALL_RADIUS)
if intersection == geometry.Intersection.HORIZONTAL:
ball.heading = (ball.heading[0], -ball.heading[1])
elif intersection == geometry.Intersection.VERTICAL:
ball.heading = (-ball.heading[0], ball.heading[1])
if intersection != geometry.Intersection.NONE:
brick_grid.brick_set.remove(brick)
break
for bullet in brick_grid.bullet_set:
if bullet.y >= paddle.rect.y and bullet.x >= paddle.rect.x and bullet.x <= paddle.rect.max_x:
return False
return True
| [
"jlafleche@zenefits.com"
] | jlafleche@zenefits.com |
dde7d82754424f14d0b28a6142c13333535560f6 | e3adbec6cd8d0b50880b3b606352a1c751d4ac79 | /functions/singly_linked_list.py | 7cadf3954044adea1f9fcd0cccd0b5268d96d8b1 | [] | no_license | ZiyaoGeng/LeetCode | 3cc5b553df5eac2e5bbb3ccd0f0ed4229574fa2f | c4c60b289c0bd9d9f228d04abe948d6287e70ea8 | refs/heads/master | 2022-04-07T08:19:58.647408 | 2020-03-12T08:56:13 | 2020-03-12T08:56:13 | 218,981,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
| [
"593947521@qq.com"
] | 593947521@qq.com |
07a8d38422d30557d6ed10da8a9d60d2ec141308 | 3c397042e7fa0d7d4fa25cd75f0d10babd9f933f | /lab_8/mysite/dishes/urls.py | 5290d59685f5e3ad3fc3fb854489224b5e6a6100 | [] | no_license | StepanIonov/RIP_lab | f34f2a95fb8ddcfeeb703efd7088320f40ac1fc5 | 0fefaf77d664ed404d791422658a062fc3e9201c | refs/heads/master | 2023-02-20T12:38:33.389360 | 2021-01-18T10:13:24 | 2021-01-18T10:13:24 | 295,768,234 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django.urls import path
from . import views
urlpatterns = [
path('<int:dish_id>/', views.detail, name='detail'),
path('', views.index, name='index'),
] | [
"42943755+StepanIonov@users.noreply.github.com"
] | 42943755+StepanIonov@users.noreply.github.com |
8658832a7dcc4eabcb30779bae2a6408403a2d6a | 3f5e290adefe73d55c4170029629c9f4794920f1 | /rewrite_verify.py | 6362a48d1f9e92fa3daea97e8de31e2f69f66ab9 | [] | no_license | fengmu/mana1 | d318b36294ab17f6ee8d4b4536a5ddad94e0dce5 | 7d64c33580e2fdc2cbd81e17124dec44d1d8858a | refs/heads/master | 2020-12-24T13:28:28.937308 | 2012-12-11T09:30:43 | 2012-12-11T09:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,377 | py | # -*- coding: utf-8 -*-
'''
#=============================================================================
# FileName: rewrite_verify.py
# Desc:
# Author: solomon
# Email: 253376634@qq.com
# HomePage:
# Version: 0.0.1
# LastChange: 2012-10-28 18:31:19
# History:
#=============================================================================
'''
from django.template.loader import get_template
from django.template import Context
from django.http import HttpResponse
import confsql,datetime,memcache
from django.utils import simplejson
#import writemc
import functions
from mylog import log
def rewrite_verify(request): #审核处理重写
#os.system("cmd /c D:/django/mysite/mana1/writemc.py")
myjson = simplejson.loads(request.POST["myjson"])
rs1=functions.trim_csv(myjson["table"],7)
s="" #存放门店名单
for rs in rs1:
if rs[0]=="1": #勾选的门店
s+="'"+rs[1]+"',"
s=s[0:-1]
#os.system('cmd.exe /c c:/ZSW/memcached/memcached.exe -m 256 -p 11211') #开启内存服务
html=""
if s<>"":
writemc.Writemc(sqlstr=s)
result=confsql.runquery(u"select * from brainfo where 门店代码 in("+s+")") #获取门店综合信息 供门店审核是否需要重写
html="<tr><th>重算</th><th>门店代码</th><th>门店名称</th><th>品项数</th><th>库存数量</th><th>建议订货总量</th><th>建议订货总额</th></tr>"
for rs in result:
html+="<tr><td><input type='checkbox'></td><td>"+str(rs[0])+"</td><td>"+str(rs[1].encode("utf8"))+"</td><td>"+str(rs[2])+"</td><td>"+str(rs[3])+"</td><td>"+str(rs[4])+"</td><td>"+str(rs[5])+"</td></tr>"
return HttpResponse(html)
else:
writemc.Writemc() #全部重写
result=confsql.runquery("select * from brainfo") #获取门店综合信息 供门店审核是否需要重写
html="<tr><th>重算</th><th>门店代码</th><th>门店名称</th><th>品项数</th><th>库存数量</th><th>建议订货总量</th><th>建议订货总额</th></tr>"
for rs in result:
html+="<tr><td><input type='checkbox'></td><td>"+str(rs[0])+"</td><td>"+str(rs[1].encode("utf8"))+"</td><td>"+str(rs[2])+"</td><td>"+str(rs[3])+"</td><td>"+str(rs[4])+"</td><td>"+str(rs[5])+"</td></tr>"
return HttpResponse(html)
| [
"fengmu1225@qq.com"
] | fengmu1225@qq.com |
1baef92bd7d86cf90bd97918be9c5310d34eab69 | 6ca2b1b16fce00730bc47a5e6d724aad7698a1f4 | /settings.py | 6b962dbdae1cfd07d515a8220fc6f1cfbe6390ea | [] | no_license | eddy-suiyun/02_Alien-invasion | 72ef3801fb1c1f88743e92de75cf52ea94186b0a | 4ba470369f033d788a890a739d0d608842eb8035 | refs/heads/master | 2021-05-25T17:17:26.037632 | 2020-04-12T13:43:17 | 2020-04-12T13:43:17 | 253,839,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | class Settings(object):
"""存储《外星人入侵》的初始化类"""
def __init__(self):
"""初始化游戏的设置"""
# 屏幕设置
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230,230,230)
# 飞船设置
self.ship_speed_factor = 1.5
# 子弹设置
self.bullet_speed_factor = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 250,250,250
self.bullets_allowed = 6
| [
"125443673@qq.com"
] | 125443673@qq.com |
1e44102c3d0503284237207d931a11eff57d5169 | 11805f3f0ca426ae21b754526578fe6b69c9fd52 | /emotion-analysis-3-master/read_excel/read_oneStu_allWeek.py | 852b464b524bf620e6f7eedd64b3284516bced5e | [] | no_license | chensheng1/NLP_emotion | 106838156224cfc1ab9491df908d92bfdfac2915 | f43760e7493aa4d360dd0c4d0bca8ba100614612 | refs/heads/master | 2023-03-22T15:54:16.107975 | 2021-03-08T15:00:46 | 2021-03-08T15:00:46 | 345,604,118 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,740 | py | '''
读取某同学n的课前、课后和每周总结数据
'''
# -*- coding: utf-8 -*-
import xlrd
xlsfile1 = r"课前预习.xls"# 打开课前预习
xlsfile2 = r"课后作业.xls"# 打开课后作业
xlsfile3 = r"每周总结.xls"# 打开每周总结
xlsfile4 = r"实验练习.xls"# 打开实验练习
book1 = xlrd.open_workbook(xlsfile1)#得到Excel文件的book对象,实例化对象
book2 = xlrd.open_workbook(xlsfile2)
book3 = xlrd.open_workbook(xlsfile3)
book4 = xlrd.open_workbook(xlsfile4)
sheet1 = book1.sheet_by_index(0) # 通过sheet索引获得课前预习sheet对象
sheet2 = book2.sheet_by_index(0)
sheet3 = book3.sheet_by_index(0)
sheet4 = book4.sheet_by_index(0)
nrows1 = sheet1.nrows # 获取行总数
ncols1 = sheet1.ncols #获取列总数
nrows2 = sheet2.nrows
ncols2 = sheet2.ncols
nrows3 = sheet3.nrows
ncols3 = sheet3.ncols
print ("课前预习总行/列数",nrows1,ncols1)
print ("课后作业总行/列数",nrows2,ncols2)
print ("每周总结总行/列数",nrows3,ncols3)
f0 = open('./oneStudent/oneStu_allWeek.txt', 'w', encoding='utf-8')
#筛选同学X在n周内的所有课前、课后和每周总结评论,空的补“null”
def choiceAllText(name,week):
print("学生姓名:",name)
for k in range(1,week+1):
flag1 = "false"
flag2 = "false"
flag3 = "false"
#课前预习筛选
for i in range(nrows1):
a = sheet1.cell_value(i,2)#获取姓名
b = sheet1.cell_value(i,4)#获取周次
c = sheet1.cell_value(i,6)#获取评论文本
text = "null"
if(a == name and b == k):
flag1 = "true"
print(k,"周课前:",c)
f0.write(c)
f0.write('\n')
break
if flag1 == "false":
print(k,"周课前:null")
f0.write("null")
f0.write('\n')
#课后作业筛选
for i in range(nrows2):
a = sheet2.cell_value(i,2)#获取姓名
b = sheet2.cell_value(i,4)#获取周次
c = sheet2.cell_value(i,6)#获取评论文本
if(a == name and b == k):
flag2 = "true"
print(k,"周课后:",c)
f0.write(c)
f0.write('\n')
break
if flag2 == "false":
print(k,"周课后:null")
f0.write("null")
f0.write('\n')
#每周总结筛选
for i in range(nrows3):
a = sheet3.cell_value(i,2)#获取姓名
b = sheet3.cell_value(i,4)#获取周次
c = sheet3.cell_value(i,5)#获取评论文本
if(a == name and b == k):
flag3 = "true"
print(k,"周每周:",c)
f0.write(c)
f0.write('\n')
break
if flag3 == "false":
print(k,"周每周:null")
f0.write("null")
f0.write('\n')
name1 = "文习尚"#m1701
name2 = "林雨钦"#m1702
name3 = "赵华源"#m1703
name4 = "徐海标"#m1704
name5 = "李宵"
name6 = "朱智"
name7 = "刘晓稳"
name8 = "朱浩杰"
week = 13
choiceAllText(name4,week) | [
"544740618@qq.com"
] | 544740618@qq.com |
d8e85972fade73cbb7841a166d847c90f11b5bd4 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/operations/_virtual_machine_extensions_operations.py | 9e1af3df025f8ded444c980807a5abb4416ee1ed | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 44,661 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, vm_name: str, vm_extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"vmExtensionName": _SERIALIZER.url("vm_extension_name", vm_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, vm_name: str, vm_extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"vmExtensionName": _SERIALIZER.url("vm_extension_name", vm_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, vm_name: str, vm_extension_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"vmExtensionName": _SERIALIZER.url("vm_extension_name", vm_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_request(
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"vmExtensionName": _SERIALIZER.url("vm_extension_name", vm_extension_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(
resource_group_name: str, vm_name: str, subscription_id: str, *, expand: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"vmName": _SERIALIZER.url("vm_name", vm_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class VirtualMachineExtensionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_04_01.ComputeManagementClient`'s
:attr:`virtual_machine_extensions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtension, IO],
**kwargs: Any
) -> _models.VirtualMachineExtension:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(extension_parameters, (IO, bytes)):
_content = extension_parameters
else:
_json = self._serialize.body(extension_parameters, "VirtualMachineExtension")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: _models.VirtualMachineExtension,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation. Required.
:type extension_parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation. Required.
:type extension_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtension, IO],
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation. Is either a model type or a IO type. Required.
:type extension_parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
extension_parameters=extension_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
def _update_initial(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtensionUpdate, IO],
**kwargs: Any
) -> _models.VirtualMachineExtension:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(extension_parameters, (IO, bytes)):
_content = extension_parameters
else:
_json = self._serialize.body(extension_parameters, "VirtualMachineExtensionUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@overload
def begin_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: _models.VirtualMachineExtensionUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation. Required.
:type extension_parameters:
~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtensionUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation. Required.
:type extension_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtensionUpdate, IO],
**kwargs: Any
) -> LROPoller[_models.VirtualMachineExtension]:
"""The operation to update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation. Is either a model type or a IO type. Required.
:type extension_parameters:
~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtensionUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
extension_parameters=extension_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, vm_name: str, vm_extension_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@distributed_trace
def begin_delete(
self, resource_group_name: str, vm_name: str, vm_extension_name: str, **kwargs: Any
) -> LROPoller[None]:
"""The operation to delete the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be deleted.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@distributed_trace
def get(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> _models.VirtualMachineExtension:
"""The operation to get the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine containing the extension. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtension or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtension
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
cls: ClsType[_models.VirtualMachineExtension] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"
}
@distributed_trace
def list(
self, resource_group_name: str, vm_name: str, expand: Optional[str] = None, **kwargs: Any
) -> _models.VirtualMachineExtensionsListResult:
"""The operation to get all extensions of a Virtual Machine.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine containing the extension. Required.
:type vm_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionsListResult or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineExtensionsListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-04-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01"))
cls: ClsType[_models.VirtualMachineExtensionsListResult] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtensionsListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions"
}
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
ee714b917523630f20db920691fa7adf93352b79 | b373081e3dd8ddc7520ca57841a0c17d2682ad74 | /Source/systemrl/environments/cartpole.py | 7dc0691b9031afdb006b8d35bac9120361147e0a | [
"MIT"
] | permissive | aarsheem/696-ds | f8e680cf2d2acef6dd04aa14f406d31cce946dfe | 2d74b1e3f430e369202982d7ad8c56f362b00f76 | refs/heads/master | 2020-12-26T22:14:06.200655 | 2020-07-08T22:08:04 | 2020-07-08T22:08:04 | 237,663,363 | 2 | 2 | MIT | 2020-04-24T15:16:49 | 2020-02-01T19:05:05 | Python | UTF-8 | Python | false | false | 5,216 | py | import numpy as np
from typing import Tuple
from .skeleton import Environment
class Cartpole(Environment):
"""
The cart-pole environment as described in the 687 course material. This
domain is modeled as a pole balancing on a cart. The agent must learn to
move the cart forwards and backwards to keep the pole from falling.
Actions: left (0) and right (1)
Reward: 1 always
Environment Dynamics: See the work of Florian 2007
(Correct equations for the dynamics of the cart-pole system) for the
observation of the correct dynamics.
"""
def __init__(self):
self._name = "Cartpole"
# TODO: properly define the variables below
self._action = None
self._reward = 0
self._isEnd = False
self._gamma = 1.0
# define the state # NOTE: you must use these variable names
self._x = 0. # horizontal position of cart
self._v = 0. # horizontal velocity of the cart
self._theta = 0. # angle of the pole
self._dtheta = 0. # angular velocity of the pole
# dynamics
self._g = 9.8 # gravitational acceleration (m/s^2)
self._mp = 0.1 # pole mass
self._mc = 1.0 # cart mass
self._l = 0.5 # (1/2) * pole length
self._dt = 0.02 # timestep
self._t = 0.0 # total time elapsed NOTE: you must use this variable
self.xMin = -2.4
self.xMax = 2.4
self.vMin = -10
self.vMax = 10
self.thetaMin = -np.pi / 12.0
self.thetaMax = np.pi / 12.0
self.omegaMin = -np.pi
self.omegaMax = np.pi
@property
def name(self)->str:
return self._name
@property
def reward(self) -> float:
return self._reward
@property
def gamma(self) -> float:
return self._gamma
@property
def action(self) -> int:
return self._action
@property
def isEnd(self) -> bool:
return self._isEnd
@property
def state(self) -> np.ndarray:
return np.array((self._x, self._v, self._theta, self._dtheta))
def nextState(self, state: np.ndarray, action: int) -> np.ndarray:
"""
Compute the next state of the pendulum using the euler approximation to the dynamics
"""
dstate = np.zeros(4)
dstate[0] = state[1]
dstate[2] = state[3]
F = action * 20.0 - 10.0
cos_multiplier = (-F - self._mp * self._l * (state[3]**2) * np.sin(state[2])) / (self._mp + self._mc)
denominator = self._l * (4.0/3.0 - (self._mp * (np.cos(state[2])**2))/(self._mp + self._mc))
dstate[3] = (self._g * np.sin(state[2]) + np.cos(state[2]) * cos_multiplier) / denominator
ml_multipier = (dstate[2]**2) * np.sin(state[2]) - dstate[3] * np.cos(state[2])
dstate[1] = (F + self._mp * self._l * ml_multipier) / (self._mp + self._mc)
return state + dstate * self._dt
def R(self, state: np.ndarray, action: int, nextState: np.ndarray) -> float:
#note the new reward
#at 15 degrees reward will be -1
return np.cos(12 * state[2])
def step(self, action: int) -> Tuple[np.ndarray, float, bool]:
"""
takes one step in the environment and returns the next state, reward, and if it is in the terminal state
"""
next_state = self.nextState(self.state, action)
self._reward = self.R(self.state, action, next_state)
self._action = action
self._x = next_state[0]
self._v = next_state[1]
self._theta = next_state[2]
self._dtheta = next_state[3]
self._t += self._dt
self._isEnd = self.terminal()
return next_state, self._reward, self._isEnd
def reset(self) -> None:
"""
resets the state of the environment to the initial configuration
"""
self._isEnd = False
self._x = 0. # horizontal position of cart
self._v = 0. # horizontal velocity of the cart
self._theta = 0. # angle of the pole
self._dtheta = 0. # angular velocity of the pole
self._t = 0
self._action = None
def normState(self):
"""
Normalize state values in range 0 -- 1
"""
x = (self._x - self.xMin)/(self.xMax - self.xMin)
v = (self._v - self.vMin)/(self.vMax - self.vMin)
#to spread out the distribution
x = (x - 0.5) * 10 + 0.5
v = (v - 0.5) * 5 + 0.5
theta = (self._theta - self.thetaMin)/(self.thetaMax - self.thetaMin)
dtheta = (self._dtheta - self.omegaMin)/(self.omegaMax - self.omegaMin)
return np.array([x,v,theta,dtheta])
def terminal(self) -> bool:
"""
The episode is at an end if:
time is greater that 20 seconds
pole falls |theta| > (pi/12.0)
cart hits the sides |x| >= 3
"""
if self._t > 20:
return True
if np.abs(self._theta) > np.pi/12.0:
return True
if np.abs(self._x) >= 3.0:
return True
return False
def numActions(self):
return 2
def numFeatures(self):
return 4
| [
"aarsheemishra@1x-nat-vl931-172-30-152-76.wireless.umass.edu"
] | aarsheemishra@1x-nat-vl931-172-30-152-76.wireless.umass.edu |
d7057217c970f3bd51a646a4638bb77b219d723f | e055c9386652a4b2d271e81493f930e2e66515d1 | /Problems/mixedCase/task.py | 27342d93de5c0fe991cf2aa0069eaf621f70e2d2 | [] | no_license | flo62134/hyperskill_python_tic_tac_toe | 0d8e5bf893f754528bc36443598a58c8feec5872 | 36366860be5ec18835b7702bd26e29e81536de0f | refs/heads/master | 2022-08-02T11:26:56.526051 | 2020-05-21T17:45:32 | 2020-05-21T17:45:32 | 265,679,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | lower = input()
words = lower.split()
upper_words = [word.title() for word in words]
upper_words[0] = upper_words[0].lower()
camel = "".join(upper_words)
print(camel)
| [
"florentbrassart31@gmail.com"
] | florentbrassart31@gmail.com |
4e9df8b1a88f0a2c470ad97f303a25e010fa60f9 | 60a93f1c34617bd08f1862ef076dc77f766d37b2 | /thirdParty/bullet3-2.88/docs/pybullet_quickstart_guide/WordpressPreview/BuildMarkdeepUtility.py | 0738cdafd74b4278ae299b75c2dec90554cca285 | [
"MIT",
"Zlib"
] | permissive | dantros/MonaEngine | 4f4fdbd2344f553f9bfd0189a4872a0581022f40 | e3d0048c2fe2dd282b84686f0e31e5741714222b | refs/heads/master | 2023-07-27T10:51:18.565020 | 2021-09-17T00:11:39 | 2021-09-17T00:11:39 | 395,851,979 | 0 | 1 | MIT | 2021-08-15T03:23:15 | 2021-08-14T01:32:20 | null | UTF-8 | Python | false | false | 2,291 | py | import re
if(__name__=="__main__"):
# Assemble the script which embeds the Markdeep page into the preview blog
PreviewBlogPage=open("PreviewBlogPage.htm","rb").read().decode("utf-8");
HeadMatch=re.search("<head(.*?)>(.*?)</head>",PreviewBlogPage,re.DOTALL);
HeadAttributes=HeadMatch.group(1);
FullDocumentHead=HeadMatch.group(2);
BodyMatch=re.search("<body(.*?)>(.*?)</body>",PreviewBlogPage,re.DOTALL);
BodyAttributes=BodyMatch.group(1);
FullPreviewBody=BodyMatch.group(2);
ArticleHTMLCodeMacro="$(ARTICLE_HTML_CODE)";
iArticleHTMLCodeMacro=FullPreviewBody.find(ArticleHTMLCodeMacro);
DocumentBodyPrefix=FullPreviewBody[0:iArticleHTMLCodeMacro];
DocumentBodySuffix=FullPreviewBody[iArticleHTMLCodeMacro+len(ArticleHTMLCodeMacro):];
FullPrepareHTMLCode=open("PrepareHTML.js","rb").read().decode("utf-8");
ReplacementList=[
("$(FULL_DOCUMENT_HEAD)",FullDocumentHead),
("$(DOCUMENT_BODY_PREFIX)",DocumentBodyPrefix),
("$(DOCUMENT_BODY_SUFFIX)",DocumentBodySuffix)
];
for Macro,Replacement in ReplacementList:
FullPrepareHTMLCode=FullPrepareHTMLCode.replace(Macro,Replacement.replace("\r\n","\\r\\n\\\r\n").replace("'","\\'"));
# Generate code which sets body and head attributes appropriately
for Element,AttributeCode in [("head",HeadAttributes),("body",BodyAttributes)]:
FullPrepareHTMLCode+="\r\n// Setting "+Element+" attributes\r\n";
for Match in re.finditer("(\\w+)=\\\"(.*?)\\\"",AttributeCode):
FullPrepareHTMLCode+="document."+Element+".setAttribute(\""+Match.group(1)+"\",\""+Match.group(2)+"\");\r\n";
open("PrepareHTML.full.js","wb").write(FullPrepareHTMLCode.encode("utf-8"));
# Concatenate all the scripts together
SourceFileList=[
"PrepareHTML.full.js",
"SetMarkdeepMode.js",
"markdeep.min.js",
"DisplayMarkdeepOutput.js",
"InvokeMathJax.js"
];
OutputCode="\r\n\r\n".join(["// "+SourceFile+"\r\n\r\n"+open(SourceFile,"rb").read().decode("utf-8") for SourceFile in SourceFileList]);
OutputFile=open("MarkdeepUtility.js","wb");
OutputFile.write(OutputCode.encode("utf-8"));
OutputFile.close();
print("Done.");
| [
"byronaaronb@gmail.com"
] | byronaaronb@gmail.com |
811e650b58eaf4337be5d070b3152062620dfaa4 | 1d1a21b37e1591c5b825299de338d18917715fec | /Mathematics/Data science/Mathmatics/02/inverse_matrix.py | 5531c0cc7924c0fa9e1eb9313e95e425439086b8 | [] | no_license | brunoleej/study_git | 46279c3521f090ebf63ee0e1852aa0b6bed11b01 | 0c5c9e490140144caf1149e2e1d9fe5f68cf6294 | refs/heads/main | 2023-08-19T01:07:42.236110 | 2021-08-29T16:20:59 | 2021-08-29T16:20:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import numpy as np
A = np.array([[1,1,0],[0,1,1],[1,1,1]])
print(A)
'''
[[1 1 0]
[0 1 1]
[1 1 1]]
'''
# 역행렬(inverse_matrix 계산)
Ainv = np.linalg.inv(A)
print(Ainv)
'''
[[ 0. -1. 1.]
[ 1. 1. -1.]
[-1. 0. 1.]]
'''
| [
"jk04059@naver.com"
] | jk04059@naver.com |
f327af434bdb44b8db26624273fa576fedb584a9 | 371fe9a1fdeb62ad1142b34d732bde06f3ce21a0 | /scripts/compute_path_pair_distances.py | 32499ed5d2cd2871d18a77acc24343b70b16f798 | [] | no_license | maickrau/rdna_resolution | 971f3b7e803565c9432be69b8e2a2852f55b8b79 | aab42310c31e655cbbc318331082fa3436d69075 | refs/heads/master | 2023-03-03T05:14:33.966930 | 2021-02-17T20:45:20 | 2021-02-17T20:45:20 | 339,851,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,426 | py | #!/usr/bin/python
import sys
graphfile = sys.argv[1]
max_diff = int(sys.argv[2])
modulo = int(sys.argv[3])
moduloindex = int(sys.argv[4])
# name \t path from stdin
def revcomp(s):
comp = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
return "".join(comp[c] for c in s[::-1])
def pathseq(p):
global nodeseqs
seq_no_hpc = "".join(nodeseqs[n[1:]] if n[0] == '>' else revcomp(nodeseqs[n[1:]]) for n in p)
# seq_hpc = seq_no_hpc[0]
# for i in range(1, len(seq_no_hpc)):
# if seq_no_hpc[i] != seq_no_hpc[i-1]: seq_hpc += seq_no_hpc[i]
# return seq_hpc
return seq_no_hpc
def edit_distance_simple(p1, p2):
global max_diff
if len(p1) - len(p2) <= -max_diff or len(p1) - len(p2) >= max_diff: return None
last_row = []
for i in range(0, len(p2)+1):
last_row.append(i)
for i in range(1, len(p1)):
next_row = [i]
min_this_row = i
for j in range(0, len(p2)):
index = len(next_row)
next_row.append(min(next_row[index-1]+1, last_row[index]+1))
if p1[i] == p2[j]:
next_row[index] = min(next_row[index], last_row[index-1])
else:
next_row[index] = min(next_row[index], last_row[index-1]+1)
min_this_row = min(min_this_row, next_row[index])
last_row = next_row
# if min_this_row >= max_diff: return None
return last_row[-1]
def edit_distance_wfa(p1, p2):
global max_diff
# use wfa because new and fancy
# https://academic.oup.com/bioinformatics/advance-article/doi/10.1093/bioinformatics/btaa777/5904262?rss=1
if len(p1) - len(p2) < -max_diff or len(p1) - len(p2) > max_diff: return None
start_match = -1
while start_match+1 < len(p1) and start_match+1 < len(p2) and p1[start_match+1] == p2[start_match+1]:
start_match += 1
if start_match == len(p1) and start_match == len(p2): return 0
last_column = [start_match]
# sys.stderr.write("0" + "\n")
for i in range(1, max_diff):
offset = i-1
# sys.stderr.write(str(i) + "\n")
next_column = []
last_match =last_column[-i+offset+1]
while last_match+1-i < len(p1) and last_match+1 < len(p2) and p1[last_match+1-i] == p2[last_match+1]:
last_match += 1
if last_match+1-i >= len(p1) and last_match+1 >= len(p2):
return i
next_column.append(last_match)
for j in range(-i+1, +i):
last_match = last_column[j+offset]+1
if j > -i+1:
last_match = max(last_match, last_column[j+offset-1]-1)
if j < i-1:
last_match = max(last_match, last_column[j+offset+1])
while last_match+1+j < len(p1) and last_match+1 < len(p2) and p1[last_match+1+j] == p2[last_match+1]:
last_match += 1
if last_match+1+j >= len(p1) and last_match+1 >= len(p2):
return i
next_column.append(last_match)
last_match = last_column[i+offset-1]-1
while last_match+1+i < len(p1) and last_match+1 < len(p2) and p1[last_match+1+i] == p2[last_match+1]:
last_match += 1
if last_match+1+i >= len(p1) and last_match+1 >= len(p2):
return i
next_column.append(last_match)
last_column = next_column
return None
def edit_distance(p1, p2):
global max_diff
# use wfa because new and fancy
# https://academic.oup.com/bioinformatics/advance-article/doi/10.1093/bioinformatics/btaa777/5904262?rss=1
if len(p1) - len(p2) < -max_diff or len(p1) - len(p2) > max_diff: return None
start_match = -1
while start_match+1 < len(p1) and start_match+1 < len(p2) and p1[start_match+1] == p2[start_match+1]:
start_match += 1
if start_match == len(p1) and start_match == len(p2): return 0
last_column = {0: start_match}
for i in range(1, max_diff):
offset = i-1
next_column = {}
for column in last_column:
if column not in next_column: next_column[column] = 0
next_column[column] = max(next_column[column], last_column[column]+1)
if column+1 not in next_column: next_column[column+1] = 0
next_column[column+1] = max(next_column[column+1], last_column[column])
if column-1 not in next_column: next_column[column-1] = 0
next_column[column-1] = max(next_column[column-1], last_column[column]-1)
p1_pos = last_column[column]
p2_pos = last_column[column] + column
if p1_pos >= 4 and p2_pos >= 4:
if p1[p1_pos-4:p1_pos] == p2[p2_pos-4:p2_pos] and p1[p1_pos-4:p1_pos-2] == p1[p1_pos-2:p1_pos]:
if p1_pos+2 <= len(p1) and p1[p1_pos:p1_pos+2] == p1[p1_pos-2:p1_pos]:
extend_until = 0
while True:
if column-extend_until not in next_column: next_column[column-extend_until] = 0
next_column[column-extend_until] = max(next_column[column-extend_until], last_column[column]+extend_until)
if p1_pos+extend_until+2 <= len(p1) and p1[p1_pos+extend_until:p1_pos+extend_until+2] == p1[p1_pos-2:p1_pos]:
extend_until += 2
else:
break
if p2_pos+2 <= len(p2) and p2[p2_pos:p2_pos+2] == p2[p2_pos-2:p2_pos]:
extend_until = 0
while True:
if column+extend_until+2 not in next_column: next_column[column+extend_until+2] = 0
next_column[column+extend_until+2] = max(next_column[column+extend_until+2], last_column[column])
if p2_pos+extend_until+2 <= len(p2) and p2[p2_pos+extend_until:p2_pos+extend_until+2] == p2[p2_pos-2:p2_pos]:
extend_until += 2
else:
break
for column in next_column:
p1_pos = next_column[column]
p2_pos = next_column[column] + column
while p1_pos+1 < len(p1) and p2_pos+1 < len(p2) and p1[p1_pos+1] == p2[p2_pos+1]:
next_column[column] += 1
p1_pos += 1
p2_pos += 1
if p1_pos+1 >= len(p1) and p2_pos+1 >= len(p2): return i
last_column = next_column
return None
nodeseqs = {}
with open(graphfile) as f:
for l in f:
parts = l.strip().split('\t')
if parts[0] == 'S':
nodeseqs[parts[1]] = parts[2]
num = 0
pathnum = {}
paths = {}
for l in sys.stdin:
parts = l.strip().split('\t')
name = parts[0]
last_break = 0
path = []
pathstr = parts[1] + '>'
for i in range(1, len(pathstr)):
if pathstr[i] == '<' or pathstr[i] == '>':
path.append(pathstr[last_break:i])
last_break = i
if name in paths: print(name)
assert name not in paths
paths[name] = pathseq(path)
pathnum[name] = num
num += 1
# print(name + "\t" + paths[name])
for path1 in paths:
if pathnum[path1] % modulo != moduloindex: continue
for path2 in paths:
if path1 <= path2: continue
value = max_diff + 1
edit_dist = edit_distance(paths[path1], paths[path2])
# edit_dist = edit_distance_simple(paths[path1], paths[path2])
if edit_dist is None: continue
if edit_dist is not None: value = edit_dist
print(path1 + "\t" + path2 + "\t" + str(value))
| [
"m_rautiainen@hotmail.com"
] | m_rautiainen@hotmail.com |
823cfd503c40f9c544e77c62fdbcc4bb86ec03d6 | dd28aede0d492d265e27d491eb46be5bda03e26c | /experimento mnist nuevo/experiment.py | 6fdcd91bdd6ac6aaa2e16f69a3c049eb628d7613 | [] | no_license | beeva-ricardoguerrero/Floydhub_experiments | db008ccb75ca8e7a2ff952440c56076adfe69b2a | 015b59b7d798e6c468d09ff5dead45edb108750c | refs/heads/master | 2021-01-01T04:13:25.298975 | 2017-07-13T15:53:30 | 2017-07-13T15:53:30 | 97,140,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,925 | py | import json
import logging
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.core.protobuf import meta_graph_pb2
import mnist_model
import mnist
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('batch_size', 100,
'Batch size. Must divide evenly into the dataset sizes.')
flags.DEFINE_integer('max_steps', 10000, 'Number of steps to run trainer.')
flags.DEFINE_integer('checkpoint', 100, 'Interval steps to save checkpoint.')
flags.DEFINE_string('log_dir', '/tmp/logs',
'Directory to store checkpoints and summary logs')
flags.DEFINE_string('model_dir', '/tmp/model',
'Directory to store trained model')
flags.DEFINE_string('data_dir', '/tmp/data',
'Directory to store training data')
flags.DEFINE_boolean('local_data', False,
'If ture, don\'t fetch training data from the web')
# Global flags
BATCH_SIZE = FLAGS.batch_size
MODEL_DIR = FLAGS.model_dir
LOG_DIR = FLAGS.log_dir
DATA_DIR = FLAGS.data_dir
LOCAL_DATA = FLAGS.local_data
MAX_STEPS = FLAGS.max_steps
CHECKPOINT = FLAGS.checkpoint
def run_training():
with tf.Graph().as_default() as graph:
# Prepare training data
mnist_data = mnist.read_data_sets(DATA_DIR, one_hot=True,
local_only=LOCAL_DATA)
# Create placeholders
x = tf.placeholder(tf.float32, [None, 784])
t = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32, [])
global_step = tf.Variable(0, trainable=False) # This is a useless variable (in this code) but it's use to not brake the API
# Add test loss and test accuracy to summary
test_loss = tf.placeholder(tf.float32, [])
test_accuracy = tf.placeholder(tf.float32, [])
tf.summary.scalar('Test_loss', test_loss)
tf.summary.scalar('Test_accuracy', test_accuracy)
# Define a model
p = mnist_model.get_model(x, keep_prob, training=True)
train_step, loss, accuracy = mnist_model.get_trainer(p, t, global_step)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
summary = tf.summary.merge_all()
# Create a supervisor
sv = tf.train.Supervisor(is_chief=True, logdir=LOG_DIR,
init_op=init_op, saver=saver, summary_op=None,
global_step=global_step, save_model_secs=0)
# Create a session and start a training loop
with sv.managed_session() as sess:
reports, step = 0, 0
start_time = time.time()
while not sv.should_stop() and step < MAX_STEPS:
images, labels = mnist_data.train.next_batch(BATCH_SIZE)
feed_dict = {x:images, t:labels, keep_prob:0.5}
_, loss_val, step = sess.run([train_step, loss, global_step], feed_dict=feed_dict)
if step > CHECKPOINT * reports:
reports += 1
logging.info('Step: %d, Train loss: %f', step, loss_val)
# Evaluate the test loss and test accuracy
loss_vals, acc_vals = [], []
for _ in range(len(mnist_data.test.labels) // BATCH_SIZE):
images, labels = mnist_data.test.next_batch(BATCH_SIZE)
feed_dict = {x:images, t:labels, keep_prob:1.0}
loss_val, acc_val = sess.run([loss, accuracy], feed_dict=feed_dict)
loss_vals.append(loss_val)
acc_vals.append(acc_val)
loss_val, acc_val = np.sum(loss_vals), np.mean(acc_vals)
# Save summary
feed_dict = {test_loss:loss_val, test_accuracy:acc_val}
sv.summary_computed(sess, sess.run(summary, feed_dict=feed_dict), step)
sv.summary_writer.flush()
logging.info('Time elapsed: %d', (time.time() - start_time))
logging.info('Step: %d, Test loss: %f, Test accuracy: %f',
step, loss_val, acc_val)
sv.stop()
def main(_):
run_training()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tf.app.run()
| [
"ricardo.guerrero@beeva.com"
] | ricardo.guerrero@beeva.com |
582f4dc70b8e50a416444935ec568175df8bd5e6 | c0f5512aa25f8a3ead1933d1faeaa1593716bc6c | /files/Python/10791756-djcasing-4.py | e73ce7d93b288a3a1f8842b1249da4844164fa98 | [] | no_license | harthur/detect-indent | 89cc56d02257fad57f76c32f57091503932ce7a9 | c5764cc72a32722adc9f3667f6565c8529a19557 | refs/heads/master | 2016-09-05T12:58:59.686824 | 2014-09-08T05:42:39 | 2014-09-08T05:42:39 | 18,378,730 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | import string
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
def case_insensitive(func, case='lower', code=301):
"""
Django view function decorator which can enforce the case of a URL path by
redirecting to the properly cased URL. This *allows* for case insensitive
matches while ensuring that only a commonly cased-URL is used and seen.
"""
def inner(request, *args, **kwargs):
if case not in ['lower', 'upper']:
raise ValueError("{0} is not a valid case function: use 'lower' or 'upper'".format(case))
if code not in [301, 302]:
raise ValueError("{0} is not a valid HTTP redirect code".format(code))
redirect_klass = HttpResponseRedirect if code == 301 else HttpResponsePermanentRedirect
cased_path = getattr(string, case)(request.path)
if request.path != cased_path:
url = cased_path
if 'QUERY_STRING' in request.META:
url = "{0}?{1}".format(url, request.META['QUERY_STRING'])
return redirect_klass(url)
return func(request, *args, **kwargs)
return inner | [
"fayearthur@gmail.com"
] | fayearthur@gmail.com |
17b48235e0f05f20c63ef835371d9913b5d0a9e1 | 2b7f5c15b0d6b90ffd85cb90f9e3d7f9b7a03a9b | /counting_bunnies.py | e9f365726f85f047068181854ad76a87de179b56 | [] | no_license | dpatel698/Google-Foobar | 67fa5943eed33e1aa81ead11d588069647446930 | 434b2232b292b0b2e3d63ba46b948628e15e3261 | refs/heads/master | 2020-12-22T12:34:24.274893 | 2020-01-28T16:42:32 | 2020-01-28T16:42:32 | 236,782,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | def answer(x, y):
# your code here
vertical_sum = 1 + sum(range(y))
if x > 1:
horizontal_sum = sum(range(y + 1, (y + x)))
else:
horizontal_sum = 0
print(vertical_sum)
print(horizontal_sum)
return str(vertical_sum + horizontal_sum)
| [
"dpatel0698@gmail.com"
] | dpatel0698@gmail.com |
1459e00c12efcf943450d0d9fbb1d34e6ba7db4b | 93d78f2dd852b90d295bd523fd0bc09a644ee0d2 | /test/sql/test_operators.py | e8ad88511482f9009137ee1ea40257fb924e0846 | [
"MIT"
] | permissive | mrocklin/sqlalchemy | ff13d4d07ba46a049da9611d356d07498e95337d | 156f473de00024688404d73aea305cd4fc452638 | refs/heads/master | 2020-12-03T09:30:34.956612 | 2014-12-01T18:31:48 | 2014-12-01T18:31:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,743 | py | from sqlalchemy.testing import fixtures, eq_, is_, is_not_
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.sql import column, desc, asc, literal, collate, null, true, false
from sqlalchemy.sql.expression import BinaryExpression, \
ClauseList, Grouping, \
UnaryExpression, select, union, func, tuple_
from sqlalchemy.sql import operators, table
import operator
from sqlalchemy import String, Integer, LargeBinary
from sqlalchemy import exc
from sqlalchemy.engine import default
from sqlalchemy.sql.elements import _literal_as_text
from sqlalchemy.schema import Column, Table, MetaData
from sqlalchemy.types import TypeEngine, TypeDecorator, UserDefinedType, Boolean
from sqlalchemy.dialects import mysql, firebird, postgresql, oracle, \
sqlite, mssql
from sqlalchemy import util
import datetime
import collections
from sqlalchemy import text, literal_column
from sqlalchemy import and_, not_, between, or_
from sqlalchemy.sql import true, false, null
class LoopOperate(operators.ColumnOperators):
def operate(self, op, *other, **kwargs):
return op
class DefaultColumnComparatorTest(fixtures.TestBase):
def _do_scalar_test(self, operator, compare_to):
left = column('left')
assert left.comparator.operate(operator).compare(
compare_to(left)
)
self._loop_test(operator)
def _do_operate_test(self, operator, right=column('right')):
left = column('left')
assert left.comparator.operate(
operator,
right).compare(
BinaryExpression(
_literal_as_text(left),
_literal_as_text(right),
operator))
assert operator(
left,
right).compare(
BinaryExpression(
_literal_as_text(left),
_literal_as_text(right),
operator))
self._loop_test(operator, right)
def _loop_test(self, operator, *arg):
l = LoopOperate()
is_(
operator(l, *arg),
operator
)
def test_desc(self):
self._do_scalar_test(operators.desc_op, desc)
def test_asc(self):
self._do_scalar_test(operators.asc_op, asc)
def test_plus(self):
self._do_operate_test(operators.add)
def test_is_null(self):
self._do_operate_test(operators.is_, None)
def test_isnot_null(self):
self._do_operate_test(operators.isnot, None)
def test_is_null_const(self):
self._do_operate_test(operators.is_, null())
def test_is_true_const(self):
self._do_operate_test(operators.is_, true())
def test_is_false_const(self):
self._do_operate_test(operators.is_, false())
def test_equals_true(self):
self._do_operate_test(operators.eq, True)
def test_notequals_true(self):
self._do_operate_test(operators.ne, True)
def test_is_true(self):
self._do_operate_test(operators.is_, True)
def test_isnot_true(self):
self._do_operate_test(operators.isnot, True)
def test_is_false(self):
self._do_operate_test(operators.is_, False)
def test_isnot_false(self):
self._do_operate_test(operators.isnot, False)
def test_like(self):
self._do_operate_test(operators.like_op)
def test_notlike(self):
self._do_operate_test(operators.notlike_op)
def test_ilike(self):
self._do_operate_test(operators.ilike_op)
def test_notilike(self):
self._do_operate_test(operators.notilike_op)
def test_is(self):
self._do_operate_test(operators.is_)
def test_isnot(self):
self._do_operate_test(operators.isnot)
def test_no_getitem(self):
assert_raises_message(
NotImplementedError,
"Operator 'getitem' is not supported on this expression",
self._do_operate_test, operators.getitem
)
assert_raises_message(
NotImplementedError,
"Operator 'getitem' is not supported on this expression",
lambda: column('left')[3]
)
def test_in(self):
left = column('left')
assert left.comparator.operate(operators.in_op, [1, 2, 3]).compare(
BinaryExpression(
left,
Grouping(ClauseList(
literal(1), literal(2), literal(3)
)),
operators.in_op
)
)
self._loop_test(operators.in_op, [1, 2, 3])
def test_notin(self):
left = column('left')
assert left.comparator.operate(operators.notin_op, [1, 2, 3]).compare(
BinaryExpression(
left,
Grouping(ClauseList(
literal(1), literal(2), literal(3)
)),
operators.notin_op
)
)
self._loop_test(operators.notin_op, [1, 2, 3])
def test_in_no_accept_list_of_non_column_element(self):
left = column('left')
foo = ClauseList()
assert_raises_message(
exc.InvalidRequestError,
r"in_\(\) accepts either a list of expressions or a selectable:",
left.in_, [foo]
)
def test_in_no_accept_non_list_non_selectable(self):
left = column('left')
right = column('right')
assert_raises_message(
exc.InvalidRequestError,
r"in_\(\) accepts either a list of expressions or a selectable:",
left.in_, right
)
def test_in_no_accept_non_list_thing_with_getitem(self):
# test [ticket:2726]
class HasGetitem(String):
class comparator_factory(String.Comparator):
def __getitem__(self, value):
return value
left = column('left')
right = column('right', HasGetitem)
assert_raises_message(
exc.InvalidRequestError,
r"in_\(\) accepts either a list of expressions or a selectable:",
left.in_, right
)
def test_collate(self):
left = column('left')
right = "some collation"
left.comparator.operate(operators.collate, right).compare(
collate(left, right)
)
def test_concat(self):
self._do_operate_test(operators.concat_op)
class CustomUnaryOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def _factorial_fixture(self):
class MyInteger(Integer):
class comparator_factory(Integer.Comparator):
def factorial(self):
return UnaryExpression(self.expr,
modifier=operators.custom_op("!"),
type_=MyInteger)
def factorial_prefix(self):
return UnaryExpression(self.expr,
operator=operators.custom_op("!!"),
type_=MyInteger)
def __invert__(self):
return UnaryExpression(self.expr,
operator=operators.custom_op("!!!"),
type_=MyInteger)
return MyInteger
def test_factorial(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
col.factorial(),
"somecol !"
)
def test_double_factorial(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
col.factorial().factorial(),
"somecol ! !"
)
def test_factorial_prefix(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
col.factorial_prefix(),
"!! somecol"
)
def test_factorial_invert(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
~col,
"!!! somecol"
)
def test_double_factorial_invert(self):
col = column('somecol', self._factorial_fixture())
self.assert_compile(
~(~col),
"!!! (!!! somecol)"
)
def test_unary_no_ops(self):
assert_raises_message(
exc.CompileError,
"Unary expression has no operator or modifier",
UnaryExpression(literal("x")).compile
)
def test_unary_both_ops(self):
assert_raises_message(
exc.CompileError,
"Unary expression does not support operator and "
"modifier simultaneously",
UnaryExpression(literal("x"),
operator=operators.custom_op("x"),
modifier=operators.custom_op("y")).compile
)
class _CustomComparatorTests(object):
def test_override_builtin(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override(c1)
def test_column_proxy(self):
t = Table('t', MetaData(),
Column('foo', self._add_override_factory())
)
proxied = t.select().c.foo
self._assert_add_override(proxied)
self._assert_and_override(proxied)
def test_alias_proxy(self):
t = Table('t', MetaData(),
Column('foo', self._add_override_factory())
)
proxied = t.alias().c.foo
self._assert_add_override(proxied)
self._assert_and_override(proxied)
def test_binary_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override(c1 - 6)
self._assert_and_override(c1 - 6)
def test_reverse_binary_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override(6 - c1)
self._assert_and_override(6 - c1)
def test_binary_multi_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_add_override((c1 - 6) + 5)
self._assert_and_override((c1 - 6) + 5)
def test_no_boolean_propagate(self):
c1 = Column('foo', self._add_override_factory())
self._assert_not_add_override(c1 == 56)
self._assert_not_and_override(c1 == 56)
def _assert_and_override(self, expr):
assert (expr & text("5")).compare(
expr.op("goofy_and")(text("5"))
)
def _assert_add_override(self, expr):
assert (expr + 5).compare(
expr.op("goofy")(5)
)
def _assert_not_add_override(self, expr):
assert not (expr + 5).compare(
expr.op("goofy")(5)
)
def _assert_not_and_override(self, expr):
assert not (expr & text("5")).compare(
expr.op("goofy_and")(text("5"))
)
class CustomComparatorTest(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger
class TypeDecoratorComparatorTest(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(TypeDecorator):
impl = Integer
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger
class TypeDecoratorWVariantComparatorTest(
_CustomComparatorTests,
fixtures.TestBase):
def _add_override_factory(self):
class SomeOtherInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("not goofy")(other)
def __and__(self, other):
return self.expr.op("not goofy_and")(other)
class MyInteger(TypeDecorator):
impl = Integer
class comparator_factory(TypeDecorator.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
return MyInteger().with_variant(SomeOtherInteger, "mysql")
class CustomEmbeddedinTypeDecoratorTest(
_CustomComparatorTests,
fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
self.expr = expr
def __add__(self, other):
return self.expr.op("goofy")(other)
def __and__(self, other):
return self.expr.op("goofy_and")(other)
class MyDecInteger(TypeDecorator):
impl = MyInteger
return MyDecInteger
class NewOperatorTest(_CustomComparatorTests, fixtures.TestBase):
def _add_override_factory(self):
class MyInteger(Integer):
class comparator_factory(TypeEngine.Comparator):
def __init__(self, expr):
self.expr = expr
def foob(self, other):
return self.expr.op("foob")(other)
return MyInteger
def _assert_add_override(self, expr):
assert (expr.foob(5)).compare(
expr.op("foob")(5)
)
def _assert_not_add_override(self, expr):
assert not hasattr(expr, "foob")
def _assert_and_override(self, expr):
pass
def _assert_not_and_override(self, expr):
pass
class ExtensionOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_contains(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def contains(self, other, **kw):
return self.op("->")(other)
self.assert_compile(
Column('x', MyType()).contains(5),
"x -> :x_1"
)
def test_getitem(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __getitem__(self, index):
return self.op("->")(index)
self.assert_compile(
Column('x', MyType())[5],
"x -> :x_1"
)
def test_op_not_an_iterator(self):
# see [ticket:2726]
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __getitem__(self, index):
return self.op("->")(index)
col = Column('x', MyType())
assert not isinstance(col, collections.Iterable)
def test_lshift(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __lshift__(self, other):
return self.op("->")(other)
self.assert_compile(
Column('x', MyType()) << 5,
"x -> :x_1"
)
def test_rshift(self):
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __rshift__(self, other):
return self.op("->")(other)
self.assert_compile(
Column('x', MyType()) >> 5,
"x -> :x_1"
)
class BooleanEvalTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test standalone booleans being wrapped in an AsBoolean, as well
as true/false compilation."""
def _dialect(self, native_boolean):
d = default.DefaultDialect()
d.supports_native_boolean = native_boolean
return d
def test_one(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(c),
"SELECT x WHERE x",
dialect=self._dialect(True)
)
def test_two_a(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(c),
"SELECT x WHERE x = 1",
dialect=self._dialect(False)
)
def test_two_b(self):
c = column('x', Boolean)
self.assert_compile(
select([c], whereclause=c),
"SELECT x WHERE x = 1",
dialect=self._dialect(False)
)
def test_three_a(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(~c),
"SELECT x WHERE x = 0",
dialect=self._dialect(False)
)
def test_three_b(self):
c = column('x', Boolean)
self.assert_compile(
select([c], whereclause=~c),
"SELECT x WHERE x = 0",
dialect=self._dialect(False)
)
def test_four(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).where(~c),
"SELECT x WHERE NOT x",
dialect=self._dialect(True)
)
def test_five_a(self):
c = column('x', Boolean)
self.assert_compile(
select([c]).having(c),
"SELECT x HAVING x = 1",
dialect=self._dialect(False)
)
def test_five_b(self):
c = column('x', Boolean)
self.assert_compile(
select([c], having=c),
"SELECT x HAVING x = 1",
dialect=self._dialect(False)
)
def test_six(self):
self.assert_compile(
or_(false(), true()),
"1 = 1",
dialect=self._dialect(False)
)
def test_eight(self):
self.assert_compile(
and_(false(), true()),
"false",
dialect=self._dialect(True)
)
def test_nine(self):
self.assert_compile(
and_(false(), true()),
"0 = 1",
dialect=self._dialect(False)
)
def test_ten(self):
c = column('x', Boolean)
self.assert_compile(
c == 1,
"x = :x_1",
dialect=self._dialect(False)
)
def test_eleven(self):
c = column('x', Boolean)
self.assert_compile(
c.is_(true()),
"x IS true",
dialect=self._dialect(True)
)
def test_twelve(self):
c = column('x', Boolean)
# I don't have a solution for this one yet,
# other than adding some heavy-handed conditionals
# into compiler
self.assert_compile(
c.is_(true()),
"x IS 1",
dialect=self._dialect(False)
)
class ConjunctionTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test interaction of and_()/or_() with boolean , null constants
"""
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
def test_one(self):
self.assert_compile(~and_(true()), "false")
def test_two(self):
self.assert_compile(or_(~and_(true())), "false")
def test_three(self):
self.assert_compile(or_(and_()), "")
def test_four(self):
x = column('x')
self.assert_compile(
and_(or_(x == 5), or_(x == 7)),
"x = :x_1 AND x = :x_2")
def test_five(self):
x = column("x")
self.assert_compile(
and_(true()._ifnone(None), x == 7),
"x = :x_1"
)
def test_six(self):
x = column("x")
self.assert_compile(or_(true(), x == 7), "true")
self.assert_compile(or_(x == 7, true()), "true")
self.assert_compile(~or_(x == 7, true()), "false")
def test_six_pt_five(self):
x = column("x")
self.assert_compile(select([x]).where(or_(x == 7, true())),
"SELECT x WHERE true")
self.assert_compile(
select(
[x]).where(
or_(
x == 7,
true())),
"SELECT x WHERE 1 = 1",
dialect=default.DefaultDialect(
supports_native_boolean=False))
def test_seven(self):
x = column("x")
self.assert_compile(
and_(true(), x == 7, true(), x == 9),
"x = :x_1 AND x = :x_2")
def test_eight(self):
x = column("x")
self.assert_compile(
or_(false(), x == 7, false(), x == 9),
"x = :x_1 OR x = :x_2")
def test_nine(self):
x = column("x")
self.assert_compile(
and_(x == 7, x == 9, false(), x == 5),
"false"
)
self.assert_compile(
~and_(x == 7, x == 9, false(), x == 5),
"true"
)
def test_ten(self):
self.assert_compile(
and_(None, None),
"NULL AND NULL"
)
def test_eleven(self):
x = column("x")
self.assert_compile(
select([x]).where(None).where(None),
"SELECT x WHERE NULL AND NULL"
)
def test_twelve(self):
x = column("x")
self.assert_compile(
select([x]).where(and_(None, None)),
"SELECT x WHERE NULL AND NULL"
)
def test_thirteen(self):
x = column("x")
self.assert_compile(
select([x]).where(~and_(None, None)),
"SELECT x WHERE NOT (NULL AND NULL)"
)
def test_fourteen(self):
x = column("x")
self.assert_compile(
select([x]).where(~null()),
"SELECT x WHERE NOT NULL"
)
def test_constant_non_singleton(self):
is_not_(null(), null())
is_not_(false(), false())
is_not_(true(), true())
def test_constant_render_distinct(self):
self.assert_compile(
select([null(), null()]),
"SELECT NULL AS anon_1, NULL AS anon_2"
)
self.assert_compile(
select([true(), true()]),
"SELECT true AS anon_1, true AS anon_2"
)
self.assert_compile(
select([false(), false()]),
"SELECT false AS anon_1, false AS anon_2"
)
class OperatorPrecedenceTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table('op', column('field'))
def test_operator_precedence_1(self):
self.assert_compile(
self.table2.select((self.table2.c.field == 5) == None),
"SELECT op.field FROM op WHERE (op.field = :field_1) IS NULL")
def test_operator_precedence_2(self):
self.assert_compile(
self.table2.select(
(self.table2.c.field + 5) == self.table2.c.field),
"SELECT op.field FROM op WHERE op.field + :field_1 = op.field")
def test_operator_precedence_3(self):
self.assert_compile(
self.table2.select((self.table2.c.field + 5) * 6),
"SELECT op.field FROM op WHERE (op.field + :field_1) * :param_1")
def test_operator_precedence_4(self):
self.assert_compile(
self.table2.select(
(self.table2.c.field * 5) + 6),
"SELECT op.field FROM op WHERE op.field * :field_1 + :param_1")
def test_operator_precedence_5(self):
self.assert_compile(self.table2.select(
5 + self.table2.c.field.in_([5, 6])),
"SELECT op.field FROM op WHERE :param_1 + "
"(op.field IN (:field_1, :field_2))")
def test_operator_precedence_6(self):
self.assert_compile(self.table2.select(
(5 + self.table2.c.field).in_([5, 6])),
"SELECT op.field FROM op WHERE :field_1 + op.field "
"IN (:param_1, :param_2)")
def test_operator_precedence_7(self):
self.assert_compile(self.table2.select(
not_(and_(self.table2.c.field == 5,
self.table2.c.field == 7))),
"SELECT op.field FROM op WHERE NOT "
"(op.field = :field_1 AND op.field = :field_2)")
def test_operator_precedence_8(self):
self.assert_compile(
self.table2.select(
not_(
self.table2.c.field == 5)),
"SELECT op.field FROM op WHERE op.field != :field_1")
def test_operator_precedence_9(self):
self.assert_compile(self.table2.select(
not_(self.table2.c.field.between(5, 6))),
"SELECT op.field FROM op WHERE "
"op.field NOT BETWEEN :field_1 AND :field_2")
def test_operator_precedence_10(self):
self.assert_compile(
self.table2.select(
not_(
self.table2.c.field) == 5),
"SELECT op.field FROM op WHERE (NOT op.field) = :param_1")
def test_operator_precedence_11(self):
self.assert_compile(self.table2.select(
(self.table2.c.field == self.table2.c.field).
between(False, True)),
"SELECT op.field FROM op WHERE (op.field = op.field) "
"BETWEEN :param_1 AND :param_2")
def test_operator_precedence_12(self):
self.assert_compile(self.table2.select(
between((self.table2.c.field == self.table2.c.field),
False, True)),
"SELECT op.field FROM op WHERE (op.field = op.field) "
"BETWEEN :param_1 AND :param_2")
def test_operator_precedence_13(self):
self.assert_compile(
self.table2.select(
self.table2.c.field.match(
self.table2.c.field).is_(None)),
"SELECT op.field FROM op WHERE (op.field MATCH op.field) IS NULL")
def test_operator_precedence_collate_1(self):
self.assert_compile(
self.table1.c.name == literal('foo').collate('utf-8'),
"mytable.name = (:param_1 COLLATE utf-8)"
)
def test_operator_precedence_collate_2(self):
self.assert_compile(
(self.table1.c.name == literal('foo')).collate('utf-8'),
"mytable.name = :param_1 COLLATE utf-8"
)
def test_operator_precedence_collate_3(self):
self.assert_compile(
self.table1.c.name.collate('utf-8') == 'foo',
"(mytable.name COLLATE utf-8) = :param_1"
)
def test_operator_precedence_collate_4(self):
self.assert_compile(
and_(
(self.table1.c.name == literal('foo')).collate('utf-8'),
(self.table2.c.field == literal('bar')).collate('utf-8'),
),
"mytable.name = :param_1 COLLATE utf-8 "
"AND op.field = :param_2 COLLATE utf-8"
)
def test_operator_precedence_collate_5(self):
self.assert_compile(
select([self.table1.c.name]).order_by(
self.table1.c.name.collate('utf-8').desc()),
"SELECT mytable.name FROM mytable "
"ORDER BY mytable.name COLLATE utf-8 DESC"
)
def test_operator_precedence_collate_6(self):
self.assert_compile(
select([self.table1.c.name]).order_by(
self.table1.c.name.collate('utf-8').desc().nullslast()),
"SELECT mytable.name FROM mytable "
"ORDER BY mytable.name COLLATE utf-8 DESC NULLS LAST"
)
def test_operator_precedence_collate_7(self):
self.assert_compile(
select([self.table1.c.name]).order_by(
self.table1.c.name.collate('utf-8').asc()),
"SELECT mytable.name FROM mytable "
"ORDER BY mytable.name COLLATE utf-8 ASC"
)
def test_commutative_operators(self):
self.assert_compile(
literal("a") + literal("b") * literal("c"),
":param_1 || :param_2 * :param_3"
)
def test_op_operators(self):
self.assert_compile(
self.table1.select(self.table1.c.myid.op('hoho')(12) == 14),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable WHERE (mytable.myid hoho :myid_1) = :param_1"
)
def test_op_operators_comma_precedence(self):
self.assert_compile(
func.foo(self.table1.c.myid.op('hoho')(12)),
"foo(mytable.myid hoho :myid_1)"
)
def test_op_operators_comparison_precedence(self):
self.assert_compile(
self.table1.c.myid.op('hoho')(12) == 5,
"(mytable.myid hoho :myid_1) = :param_1"
)
def test_op_operators_custom_precedence(self):
op1 = self.table1.c.myid.op('hoho', precedence=5)
op2 = op1(5).op('lala', precedence=4)(4)
op3 = op1(5).op('lala', precedence=6)(4)
self.assert_compile(op2, "mytable.myid hoho :myid_1 lala :param_1")
self.assert_compile(op3, "(mytable.myid hoho :myid_1) lala :param_1")
class OperatorAssociativityTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_associativity_1(self):
f = column('f')
self.assert_compile(f - f, "f - f")
def test_associativity_2(self):
f = column('f')
self.assert_compile(f - f - f, "(f - f) - f")
def test_associativity_3(self):
f = column('f')
self.assert_compile((f - f) - f, "(f - f) - f")
def test_associativity_4(self):
f = column('f')
self.assert_compile((f - f).label('foo') - f, "(f - f) - f")
def test_associativity_5(self):
f = column('f')
self.assert_compile(f - (f - f), "f - (f - f)")
def test_associativity_6(self):
f = column('f')
self.assert_compile(f - (f - f).label('foo'), "f - (f - f)")
def test_associativity_7(self):
f = column('f')
# because - less precedent than /
self.assert_compile(f / (f - f), "f / (f - f)")
def test_associativity_8(self):
f = column('f')
self.assert_compile(f / (f - f).label('foo'), "f / (f - f)")
def test_associativity_9(self):
f = column('f')
self.assert_compile(f / f - f, "f / f - f")
def test_associativity_10(self):
f = column('f')
self.assert_compile((f / f) - f, "f / f - f")
def test_associativity_11(self):
f = column('f')
self.assert_compile((f / f).label('foo') - f, "f / f - f")
def test_associativity_12(self):
f = column('f')
# because / more precedent than -
self.assert_compile(f - (f / f), "f - f / f")
def test_associativity_13(self):
f = column('f')
self.assert_compile(f - (f / f).label('foo'), "f - f / f")
def test_associativity_14(self):
f = column('f')
self.assert_compile(f - f / f, "f - f / f")
def test_associativity_15(self):
f = column('f')
self.assert_compile((f - f) / f, "(f - f) / f")
def test_associativity_16(self):
f = column('f')
self.assert_compile(((f - f) / f) - f, "(f - f) / f - f")
def test_associativity_17(self):
f = column('f')
# - lower precedence than /
self.assert_compile((f - f) / (f - f), "(f - f) / (f - f)")
def test_associativity_18(self):
f = column('f')
# / higher precedence than -
self.assert_compile((f / f) - (f / f), "f / f - f / f")
def test_associativity_19(self):
f = column('f')
self.assert_compile((f / f) - (f - f), "f / f - (f - f)")
def test_associativity_20(self):
f = column('f')
self.assert_compile((f / f) / (f - f), "(f / f) / (f - f)")
def test_associativity_21(self):
f = column('f')
self.assert_compile(f / (f / (f - f)), "f / (f / (f - f))")
class InTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String)
)
def test_in_1(self):
self.assert_compile(self.table1.c.myid.in_(['a']),
"mytable.myid IN (:myid_1)")
def test_in_2(self):
self.assert_compile(~self.table1.c.myid.in_(['a']),
"mytable.myid NOT IN (:myid_1)")
def test_in_3(self):
self.assert_compile(self.table1.c.myid.in_(['a', 'b']),
"mytable.myid IN (:myid_1, :myid_2)")
def test_in_4(self):
self.assert_compile(self.table1.c.myid.in_(iter(['a', 'b'])),
"mytable.myid IN (:myid_1, :myid_2)")
def test_in_5(self):
self.assert_compile(self.table1.c.myid.in_([literal('a')]),
"mytable.myid IN (:param_1)")
def test_in_6(self):
self.assert_compile(self.table1.c.myid.in_([literal('a'), 'b']),
"mytable.myid IN (:param_1, :myid_1)")
def test_in_7(self):
self.assert_compile(
self.table1.c.myid.in_([literal('a'), literal('b')]),
"mytable.myid IN (:param_1, :param_2)")
def test_in_8(self):
self.assert_compile(self.table1.c.myid.in_(['a', literal('b')]),
"mytable.myid IN (:myid_1, :param_1)")
def test_in_9(self):
self.assert_compile(self.table1.c.myid.in_([literal(1) + 'a']),
"mytable.myid IN (:param_1 + :param_2)")
def test_in_10(self):
self.assert_compile(self.table1.c.myid.in_([literal('a') + 'a', 'b']),
"mytable.myid IN (:param_1 || :param_2, :myid_1)")
def test_in_11(self):
self.assert_compile(
self.table1.c.myid.in_(
[
literal('a') +
literal('a'),
literal('b')]),
"mytable.myid IN (:param_1 || :param_2, :param_3)")
def test_in_12(self):
self.assert_compile(self.table1.c.myid.in_([1, literal(3) + 4]),
"mytable.myid IN (:myid_1, :param_1 + :param_2)")
def test_in_13(self):
self.assert_compile(self.table1.c.myid.in_([literal('a') < 'b']),
"mytable.myid IN (:param_1 < :param_2)")
def test_in_14(self):
self.assert_compile(self.table1.c.myid.in_([self.table1.c.myid]),
"mytable.myid IN (mytable.myid)")
def test_in_15(self):
self.assert_compile(self.table1.c.myid.in_(['a', self.table1.c.myid]),
"mytable.myid IN (:myid_1, mytable.myid)")
def test_in_16(self):
self.assert_compile(self.table1.c.myid.in_([literal('a'),
self.table1.c.myid]),
"mytable.myid IN (:param_1, mytable.myid)")
def test_in_17(self):
self.assert_compile(
self.table1.c.myid.in_(
[
literal('a'),
self.table1.c.myid +
'a']),
"mytable.myid IN (:param_1, mytable.myid + :myid_1)")
def test_in_18(self):
self.assert_compile(
self.table1.c.myid.in_(
[
literal(1),
'a' +
self.table1.c.myid]),
"mytable.myid IN (:param_1, :myid_1 + mytable.myid)")
def test_in_19(self):
self.assert_compile(self.table1.c.myid.in_([1, 2, 3]),
"mytable.myid IN (:myid_1, :myid_2, :myid_3)")
def test_in_20(self):
self.assert_compile(self.table1.c.myid.in_(
select([self.table2.c.otherid])),
"mytable.myid IN (SELECT myothertable.otherid FROM myothertable)")
def test_in_21(self):
self.assert_compile(~self.table1.c.myid.in_(
select([self.table2.c.otherid])),
"mytable.myid NOT IN (SELECT myothertable.otherid FROM myothertable)")
def test_in_22(self):
self.assert_compile(
self.table1.c.myid.in_(
text("SELECT myothertable.otherid FROM myothertable")
),
"mytable.myid IN (SELECT myothertable.otherid "
"FROM myothertable)"
)
@testing.emits_warning('.*empty sequence.*')
def test_in_23(self):
self.assert_compile(self.table1.c.myid.in_([]),
"mytable.myid != mytable.myid")
def test_in_24(self):
self.assert_compile(
select([self.table1.c.myid.in_(select([self.table2.c.otherid]))]),
"SELECT mytable.myid IN (SELECT myothertable.otherid "
"FROM myothertable) AS anon_1 FROM mytable"
)
def test_in_25(self):
self.assert_compile(
select([self.table1.c.myid.in_(
select([self.table2.c.otherid]).as_scalar())]),
"SELECT mytable.myid IN (SELECT myothertable.otherid "
"FROM myothertable) AS anon_1 FROM mytable"
)
def test_in_26(self):
self.assert_compile(self.table1.c.myid.in_(
union(
select([self.table1.c.myid], self.table1.c.myid == 5),
select([self.table1.c.myid], self.table1.c.myid == 12),
)
), "mytable.myid IN ("
"SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1 "
"UNION SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_2)")
def test_in_27(self):
# test that putting a select in an IN clause does not
# blow away its ORDER BY clause
self.assert_compile(
select([self.table1, self.table2],
self.table2.c.otherid.in_(
select([self.table2.c.otherid],
order_by=[self.table2.c.othername],
limit=10, correlate=False)
),
from_obj=[self.table1.join(self.table2,
self.table1.c.myid == self.table2.c.otherid)],
order_by=[self.table1.c.myid]
),
"SELECT mytable.myid, "
"myothertable.otherid, myothertable.othername FROM mytable "
"JOIN myothertable ON mytable.myid = myothertable.otherid "
"WHERE myothertable.otherid IN (SELECT myothertable.otherid "
"FROM myothertable ORDER BY myothertable.othername "
"LIMIT :param_1) ORDER BY mytable.myid",
{'param_1': 10}
)
def test_in_28(self):
self.assert_compile(
self.table1.c.myid.in_([None]),
"mytable.myid IN (NULL)"
)
@testing.emits_warning('.*empty sequence.*')
def test_in_29(self):
self.assert_compile(self.table1.c.myid.notin_([]),
"mytable.myid = mytable.myid")
@testing.emits_warning('.*empty sequence.*')
def test_in_30(self):
self.assert_compile(~self.table1.c.myid.in_([]),
"mytable.myid = mytable.myid")
class MathOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
)
def _test_math_op(self, py_op, sql_op):
for (lhs, rhs, res) in (
(5, self.table1.c.myid, ':myid_1 %s mytable.myid'),
(5, literal(5), ':param_1 %s :param_2'),
(self.table1.c.myid, 'b', 'mytable.myid %s :myid_1'),
(self.table1.c.myid, literal(2.7), 'mytable.myid %s :param_1'),
(self.table1.c.myid, self.table1.c.myid,
'mytable.myid %s mytable.myid'),
(literal(5), 8, ':param_1 %s :param_2'),
(literal(6), self.table1.c.myid, ':param_1 %s mytable.myid'),
(literal(7), literal(5.5), ':param_1 %s :param_2'),
):
self.assert_compile(py_op(lhs, rhs), res % sql_op)
def test_math_op_add(self):
self._test_math_op(operator.add, '+')
def test_math_op_mul(self):
self._test_math_op(operator.mul, '*')
def test_math_op_sub(self):
self._test_math_op(operator.sub, '-')
def test_math_op_div(self):
if util.py3k:
self._test_math_op(operator.truediv, '/')
else:
self._test_math_op(operator.div, '/')
class ComparisonOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
)
def test_pickle_operators_one(self):
clause = (self.table1.c.myid == 12) & \
self.table1.c.myid.between(15, 20) & \
self.table1.c.myid.like('hoho')
eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause))))
def test_pickle_operators_two(self):
clause = tuple_(1, 2, 3)
eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause))))
def _test_comparison_op(self, py_op, fwd_op, rev_op):
dt = datetime.datetime(2012, 5, 10, 15, 27, 18)
for (lhs, rhs, l_sql, r_sql) in (
('a', self.table1.c.myid, ':myid_1', 'mytable.myid'),
('a', literal('b'), ':param_2', ':param_1'), # note swap!
(self.table1.c.myid, 'b', 'mytable.myid', ':myid_1'),
(self.table1.c.myid, literal('b'), 'mytable.myid', ':param_1'),
(self.table1.c.myid, self.table1.c.myid,
'mytable.myid', 'mytable.myid'),
(literal('a'), 'b', ':param_1', ':param_2'),
(literal('a'), self.table1.c.myid, ':param_1', 'mytable.myid'),
(literal('a'), literal('b'), ':param_1', ':param_2'),
(dt, literal('b'), ':param_2', ':param_1'),
(literal('b'), dt, ':param_1', ':param_2'),
):
# the compiled clause should match either (e.g.):
# 'a' < 'b' -or- 'b' > 'a'.
compiled = str(py_op(lhs, rhs))
fwd_sql = "%s %s %s" % (l_sql, fwd_op, r_sql)
rev_sql = "%s %s %s" % (r_sql, rev_op, l_sql)
self.assert_(compiled == fwd_sql or compiled == rev_sql,
"\n'" + compiled + "'\n does not match\n'" +
fwd_sql + "'\n or\n'" + rev_sql + "'")
def test_comparison_operators_lt(self):
self._test_comparison_op(operator.lt, '<', '>'),
def test_comparison_operators_gt(self):
self._test_comparison_op(operator.gt, '>', '<')
def test_comparison_operators_eq(self):
self._test_comparison_op(operator.eq, '=', '=')
def test_comparison_operators_ne(self):
self._test_comparison_op(operator.ne, '!=', '!=')
def test_comparison_operators_le(self):
self._test_comparison_op(operator.le, '<=', '>=')
def test_comparison_operators_ge(self):
self._test_comparison_op(operator.ge, '>=', '<=')
class NonZeroTest(fixtures.TestBase):
def _raises(self, expr):
assert_raises_message(
TypeError,
"Boolean value of this clause is not defined",
bool, expr
)
def _assert_true(self, expr):
is_(bool(expr), True)
def _assert_false(self, expr):
is_(bool(expr), False)
def test_column_identity_eq(self):
c1 = column('c1')
self._assert_true(c1 == c1)
def test_column_identity_gt(self):
c1 = column('c1')
self._raises(c1 > c1)
def test_column_compare_eq(self):
c1, c2 = column('c1'), column('c2')
self._assert_false(c1 == c2)
def test_column_compare_gt(self):
c1, c2 = column('c1'), column('c2')
self._raises(c1 > c2)
def test_binary_identity_eq(self):
c1 = column('c1')
expr = c1 > 5
self._assert_true(expr == expr)
def test_labeled_binary_identity_eq(self):
c1 = column('c1')
expr = (c1 > 5).label(None)
self._assert_true(expr == expr)
def test_annotated_binary_identity_eq(self):
c1 = column('c1')
expr1 = (c1 > 5)
expr2 = expr1._annotate({"foo": "bar"})
self._assert_true(expr1 == expr2)
def test_labeled_binary_compare_gt(self):
c1 = column('c1')
expr1 = (c1 > 5).label(None)
expr2 = (c1 > 5).label(None)
self._assert_false(expr1 == expr2)
class NegationTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_negate_operators_1(self):
for (py_op, op) in (
(operator.neg, '-'),
(operator.inv, 'NOT '),
):
for expr, expected in (
(self.table1.c.myid, "mytable.myid"),
(literal("foo"), ":param_1"),
):
self.assert_compile(py_op(expr), "%s%s" % (op, expected))
def test_negate_operators_2(self):
self.assert_compile(
self.table1.select((self.table1.c.myid != 12) &
~(self.table1.c.name == 'john')),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 "
"AND mytable.name != :name_1"
)
def test_negate_operators_3(self):
self.assert_compile(
self.table1.select((self.table1.c.myid != 12) &
~(self.table1.c.name.between('jack', 'john'))),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND "
"mytable.name NOT BETWEEN :name_1 AND :name_2"
)
def test_negate_operators_4(self):
self.assert_compile(
self.table1.select((self.table1.c.myid != 12) &
~and_(self.table1.c.name == 'john',
self.table1.c.name == 'ed',
self.table1.c.name == 'fred')),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND "
"NOT (mytable.name = :name_1 AND mytable.name = :name_2 "
"AND mytable.name = :name_3)"
)
def test_negate_operators_5(self):
self.assert_compile(
self.table1.select(
(self.table1.c.myid != 12) & ~self.table1.c.name),
"SELECT mytable.myid, mytable.name FROM "
"mytable WHERE mytable.myid != :myid_1 AND NOT mytable.name")
class LikeTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_like_1(self):
self.assert_compile(
self.table1.c.myid.like('somstr'),
"mytable.myid LIKE :myid_1")
def test_like_2(self):
self.assert_compile(
~self.table1.c.myid.like('somstr'),
"mytable.myid NOT LIKE :myid_1")
def test_like_3(self):
self.assert_compile(
self.table1.c.myid.like('somstr', escape='\\'),
"mytable.myid LIKE :myid_1 ESCAPE '\\'")
def test_like_4(self):
self.assert_compile(
~self.table1.c.myid.like('somstr', escape='\\'),
"mytable.myid NOT LIKE :myid_1 ESCAPE '\\'")
def test_like_5(self):
self.assert_compile(
self.table1.c.myid.ilike('somstr', escape='\\'),
"lower(mytable.myid) LIKE lower(:myid_1) ESCAPE '\\'")
def test_like_6(self):
self.assert_compile(
~self.table1.c.myid.ilike('somstr', escape='\\'),
"lower(mytable.myid) NOT LIKE lower(:myid_1) ESCAPE '\\'")
def test_like_7(self):
self.assert_compile(
self.table1.c.myid.ilike('somstr', escape='\\'),
"mytable.myid ILIKE %(myid_1)s ESCAPE '\\\\'",
dialect=postgresql.dialect())
def test_like_8(self):
self.assert_compile(
~self.table1.c.myid.ilike('somstr', escape='\\'),
"mytable.myid NOT ILIKE %(myid_1)s ESCAPE '\\\\'",
dialect=postgresql.dialect())
def test_like_9(self):
self.assert_compile(
self.table1.c.name.ilike('%something%'),
"lower(mytable.name) LIKE lower(:name_1)")
def test_like_10(self):
self.assert_compile(
self.table1.c.name.ilike('%something%'),
"mytable.name ILIKE %(name_1)s",
dialect=postgresql.dialect())
def test_like_11(self):
self.assert_compile(
~self.table1.c.name.ilike('%something%'),
"lower(mytable.name) NOT LIKE lower(:name_1)")
def test_like_12(self):
self.assert_compile(
~self.table1.c.name.ilike('%something%'),
"mytable.name NOT ILIKE %(name_1)s",
dialect=postgresql.dialect())
class BetweenTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_between_1(self):
self.assert_compile(
self.table1.c.myid.between(1, 2),
"mytable.myid BETWEEN :myid_1 AND :myid_2")
def test_between_2(self):
self.assert_compile(
~self.table1.c.myid.between(1, 2),
"mytable.myid NOT BETWEEN :myid_1 AND :myid_2")
def test_between_3(self):
self.assert_compile(
self.table1.c.myid.between(1, 2, symmetric=True),
"mytable.myid BETWEEN SYMMETRIC :myid_1 AND :myid_2")
def test_between_4(self):
self.assert_compile(
~self.table1.c.myid.between(1, 2, symmetric=True),
"mytable.myid NOT BETWEEN SYMMETRIC :myid_1 AND :myid_2")
def test_between_5(self):
self.assert_compile(
between(self.table1.c.myid, 1, 2, symmetric=True),
"mytable.myid BETWEEN SYMMETRIC :myid_1 AND :myid_2")
def test_between_6(self):
self.assert_compile(
~between(self.table1.c.myid, 1, 2, symmetric=True),
"mytable.myid NOT BETWEEN SYMMETRIC :myid_1 AND :myid_2")
class MatchTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
table1 = table('mytable',
column('myid', Integer),
column('name', String),
)
def test_match_1(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"mytable.myid MATCH ?",
dialect=sqlite.dialect())
def test_match_2(self):
self.assert_compile(
self.table1.c.myid.match('somstr'),
"MATCH (mytable.myid) AGAINST (%s IN BOOLEAN MODE)",
dialect=mysql.dialect())
def test_match_3(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"CONTAINS (mytable.myid, :myid_1)",
dialect=mssql.dialect())
def test_match_4(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"mytable.myid @@ to_tsquery(%(myid_1)s)",
dialect=postgresql.dialect())
def test_match_5(self):
self.assert_compile(self.table1.c.myid.match('somstr'),
"CONTAINS (mytable.myid, :myid_1)",
dialect=oracle.dialect())
class ComposedLikeOperatorsTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = 'default'
def test_contains(self):
self.assert_compile(
column('x').contains('y'),
"x LIKE '%%' || :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_contains_escape(self):
self.assert_compile(
column('x').contains('y', escape='\\'),
"x LIKE '%%' || :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_contains_literal(self):
self.assert_compile(
column('x').contains(literal_column('y')),
"x LIKE '%%' || y || '%%'",
checkparams={}
)
def test_contains_text(self):
self.assert_compile(
column('x').contains(text('y')),
"x LIKE '%%' || y || '%%'",
checkparams={}
)
def test_not_contains(self):
self.assert_compile(
~column('x').contains('y'),
"x NOT LIKE '%%' || :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_not_contains_escape(self):
self.assert_compile(
~column('x').contains('y', escape='\\'),
"x NOT LIKE '%%' || :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_contains_concat(self):
self.assert_compile(
column('x').contains('y'),
"x LIKE concat(concat('%%', %s), '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_contains_concat(self):
self.assert_compile(
~column('x').contains('y'),
"x NOT LIKE concat(concat('%%', %s), '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_contains_literal_concat(self):
self.assert_compile(
column('x').contains(literal_column('y')),
"x LIKE concat(concat('%%', y), '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_contains_text_concat(self):
self.assert_compile(
column('x').contains(text('y')),
"x LIKE concat(concat('%%', y), '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_startswith(self):
self.assert_compile(
column('x').startswith('y'),
"x LIKE :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_startswith_escape(self):
self.assert_compile(
column('x').startswith('y', escape='\\'),
"x LIKE :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_not_startswith(self):
self.assert_compile(
~column('x').startswith('y'),
"x NOT LIKE :x_1 || '%%'",
checkparams={'x_1': 'y'}
)
def test_not_startswith_escape(self):
self.assert_compile(
~column('x').startswith('y', escape='\\'),
"x NOT LIKE :x_1 || '%%' ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_startswith_literal(self):
self.assert_compile(
column('x').startswith(literal_column('y')),
"x LIKE y || '%%'",
checkparams={}
)
def test_startswith_text(self):
self.assert_compile(
column('x').startswith(text('y')),
"x LIKE y || '%%'",
checkparams={}
)
def test_startswith_concat(self):
self.assert_compile(
column('x').startswith('y'),
"x LIKE concat(%s, '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_startswith_concat(self):
self.assert_compile(
~column('x').startswith('y'),
"x NOT LIKE concat(%s, '%%')",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_startswith_firebird(self):
self.assert_compile(
column('x').startswith('y'),
"x STARTING WITH :x_1",
checkparams={'x_1': 'y'},
dialect=firebird.dialect()
)
def test_not_startswith_firebird(self):
self.assert_compile(
~column('x').startswith('y'),
"x NOT STARTING WITH :x_1",
checkparams={'x_1': 'y'},
dialect=firebird.dialect()
)
def test_startswith_literal_mysql(self):
self.assert_compile(
column('x').startswith(literal_column('y')),
"x LIKE concat(y, '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_startswith_text_mysql(self):
self.assert_compile(
column('x').startswith(text('y')),
"x LIKE concat(y, '%%')",
checkparams={},
dialect=mysql.dialect()
)
def test_endswith(self):
self.assert_compile(
column('x').endswith('y'),
"x LIKE '%%' || :x_1",
checkparams={'x_1': 'y'}
)
def test_endswith_escape(self):
self.assert_compile(
column('x').endswith('y', escape='\\'),
"x LIKE '%%' || :x_1 ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_not_endswith(self):
self.assert_compile(
~column('x').endswith('y'),
"x NOT LIKE '%%' || :x_1",
checkparams={'x_1': 'y'}
)
def test_not_endswith_escape(self):
self.assert_compile(
~column('x').endswith('y', escape='\\'),
"x NOT LIKE '%%' || :x_1 ESCAPE '\\'",
checkparams={'x_1': 'y'}
)
def test_endswith_literal(self):
self.assert_compile(
column('x').endswith(literal_column('y')),
"x LIKE '%%' || y",
checkparams={}
)
def test_endswith_text(self):
self.assert_compile(
column('x').endswith(text('y')),
"x LIKE '%%' || y",
checkparams={}
)
def test_endswith_mysql(self):
self.assert_compile(
column('x').endswith('y'),
"x LIKE concat('%%', %s)",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_not_endswith_mysql(self):
self.assert_compile(
~column('x').endswith('y'),
"x NOT LIKE concat('%%', %s)",
checkparams={'x_1': 'y'},
dialect=mysql.dialect()
)
def test_endswith_literal_mysql(self):
self.assert_compile(
column('x').endswith(literal_column('y')),
"x LIKE concat('%%', y)",
checkparams={},
dialect=mysql.dialect()
)
def test_endswith_text_mysql(self):
self.assert_compile(
column('x').endswith(text('y')),
"x LIKE concat('%%', y)",
checkparams={},
dialect=mysql.dialect()
)
class CustomOpTest(fixtures.TestBase):
def test_is_comparison(self):
c = column('x')
c2 = column('y')
op1 = c.op('$', is_comparison=True)(c2).operator
op2 = c.op('$', is_comparison=False)(c2).operator
assert operators.is_comparison(op1)
assert not operators.is_comparison(op2)
class TupleTypingTest(fixtures.TestBase):
def _assert_types(self, expr):
eq_(expr.clauses[0].type._type_affinity, Integer)
eq_(expr.clauses[1].type._type_affinity, String)
eq_(expr.clauses[2].type._type_affinity, LargeBinary()._type_affinity)
def test_type_coersion_on_eq(self):
a, b, c = column(
'a', Integer), column(
'b', String), column(
'c', LargeBinary)
t1 = tuple_(a, b, c)
expr = t1 == (3, 'hi', 'there')
self._assert_types(expr.right)
def test_type_coersion_on_in(self):
a, b, c = column(
'a', Integer), column(
'b', String), column(
'c', LargeBinary)
t1 = tuple_(a, b, c)
expr = t1.in_([(3, 'hi', 'there'), (4, 'Q', 'P')])
eq_(len(expr.right.clauses), 2)
for elem in expr.right.clauses:
self._assert_types(elem)
| [
"mike_mp@zzzcomputing.com"
] | mike_mp@zzzcomputing.com |
7d3a565b843d3a511283b8290b2e3e98f9f02a74 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/soisson.py | 2f90d49960b18e683a39c2e7e75ccc653b9bb91e | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 178 | py | ii = [('WilbRLW4.py', 1), ('CarlTFR.py', 6), ('CookGHP2.py', 1), ('KiddJAE.py', 1), ('ClarGE.py', 2), ('BuckWGM.py', 2), ('WadeJEB.py', 1), ('GodwWLN.py', 1), ('BuckWGM2.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
598aa5789fc89d20614a949df27117f073692147 | b2c780661aec8076a0b6d00bf8ea0d443a117df6 | /Popularity/DCAFPilot/test/utils_t.py | b5af29934995578af40c4def334385a5c2d302eb | [] | no_license | maitdaoud/DMWMAnalytics | 894fa2afb8d83a5275f0abd61b74f4f839150cb0 | fec7ef3e5240973db96ba53179940950002adbd8 | refs/heads/master | 2020-04-11T03:33:43.164136 | 2017-04-01T14:07:42 | 2017-04-01T14:07:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | #!/usr/bin/env python
#pylint: disable-msg=C0301,C0103
"""
Unit test for StorageManager class
"""
import os
import re
import time
import unittest
from pymongo import MongoClient
from DCAF.utils.utils import popdb_date, ndays
class testStorageManager(unittest.TestCase):
"""
A test class for the StorageManager class
"""
def setUp(self):
"set up connection"
pass
def tearDown(self):
"Perform clean-up"
pass
def test_popdb_date(self):
"Test popdb_date method"
result = popdb_date('20140105')
expect = '2014-1-5'
self.assertEqual(expect, result)
result = popdb_date(expect)
self.assertEqual(expect, result)
def test_ndays(self):
"Test ndays function"
time1, time2 = '20141120', '20141124'
result = ndays(time1, time2)
expect = 4
self.assertEqual(expect, result)
#
# main
#
if __name__ == '__main__':
unittest.main()
| [
"vkuznet@gmail.com"
] | vkuznet@gmail.com |
201f20209bdbb0451b07c576336b8ce2de92ec95 | 786de89be635eb21295070a6a3452f3a7fe6712c | /DataSummary/tags/V00-00-05/src/cspad.py | 12f8661db596a4ad88019f31c52e224d8b2a0a18 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,160 | py | import os
import psana
import numpy
import logging
import event_process
import pylab
from mpi4py import MPI
from common import strtype
class cspad(event_process.event_process):
def __init__(self):
self.logger = logging.getLogger(__name__+'.cspad')
self.output = event_process.event_process_output()
self.output['in_report'] = None
self.output['in_report_title'] = None
self.frame = None
self.nframes = numpy.array([0])
self.reducer_rank = 0
return
def beginJob(self):
return
def add_frame(self,frame):
if self.frame == None:
self.frame = numpy.zeros_like(frame,dtype='float64')
self.frame += frame
self.nframes[0] += 1
return
def set_stuff(self,psana_src,psana_device,in_report=None,in_report_title=None):
self.src = psana.Source(psana_src)
self.dev = psana_device
self.output['in_report'] = in_report
self.output['in_report_title'] = in_report_title
def replicate_info(self):
args = ( str(self.src), strtype(self.dev) )
kwargs = { 'in_report': self.output['in_report'], 'in_report_title': self.output['in_report_title'] }
self.logger.info('args: {:}'.format(repr(args)))
self.logger.info('kwargs: {:}'.format(repr(kwargs)))
return ('set_stuff',args,kwargs)
def event(self,evt):
cspad = evt.get(self.dev, self.src)
a = []
for i in range(0,4):
quad = cspad.quads(i)
d = quad.data()
a.append(numpy.vstack([ d[j] for j in range(0,8) ]))
frame_raw = numpy.hstack(a)
self.add_frame(frame_raw)
return
def reduce(self,comm,ranks=[],reducer_rank=None,tag=None):
self.mergedframe = numpy.zeros_like( self.frame, dtype='float64' )
self.mergednframes = numpy.array([0])
if reducer_rank is None and tag is None:
self.mergedframe += self.frame
self.mergednframes[0] += self.nframes[0]
elif reducer_rank == comm.Get_rank() and tag is not None:
for r in ranks:
if r == reducer_rank:
self.mergedframe += self.frame
self.mergednframes[0] += self.nframes[0]
else :
self.mergedframe += comm.recv( source=r, tag=tag+1 )
self.mergednframes[0] += comm.recv( source=r, tag=tag+2)
elif reducer_rank != comm.Get_rank() and tag is not None:
comm.send( self.frame , dest=reducer_rank, tag=tag+1 ) # replace vals with something appropriate
comm.send( self.nframes, dest=reducer_rank, tag=tag+2 )
return
def endJob(self):
self.logger.info('mpi reducing cspad')
self.reduce(self.parent.comm,ranks=self.reduce_ranks,reducer_rank=self.reducer_rank,tag=66)
if self.parent.rank == self.reducer_rank:
self.output['figures'] = {'mean': {}, 'mean_hist': {}, }
fig = pylab.figure()
self.avg = self.mergedframe/float(self.mergednframes[0])
pylab.imshow(self.avg)
pylab.colorbar()
self.flat = self.avg.flatten()
pylab.clim(self.flat.mean()-2.*self.flat.std(),self.flat.mean()+2.*self.flat.std())
pylab.title('CSPAD average of {:} frames'.format(self.nframes))
pylab.savefig( os.path.join( self.output_dir, 'figure_cspad.png' ))
self.output['figures']['mean']['png'] = os.path.join( self.output_dir, 'figure_cspad.png')
fig.clear()
pylab.hist(self.flat,1000)
pylab.xlim(self.flat.mean()-2.*self.flat.std(),self.flat.mean()+2.*self.flat.std())
pylab.title('histogram')
pylab.savefig( os.path.join( self.output_dir, 'figure_cspad_hist.png' ))
self.output['figures']['mean_hist']['png'] = os.path.join( self.output_dir, 'figure_cspad_hist.png')
del fig
self.parent.output.append(self.output)
return
| [
"justing@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | justing@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 |
ad790c2993b23a15711a5f19aa89999275d6fc8c | 32986e2c8aa585699f810534444c9a3ff1e4f269 | /task4_testing.py | 64f2871585898cfc8150f3430f8545e79e0eb39e | [] | no_license | mingYi-ch/aml_task4 | 5d24a3107a8ca00083c9c4b71a06fdfc7eb5947b | 438050dd3f7e09cc34d34f5db65a5ea9b72e8c6d | refs/heads/master | 2022-03-22T19:45:18.707357 | 2019-12-09T14:02:22 | 2019-12-09T14:02:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | import numpy as np
import biosppy.signals.eeg as eeg
import pandas as pd
def test_eeg(eeg1, eeg2):
# testing plot
x_sample = np.concatenate((eeg1, eeg2), axis = 0)
# transpose to put the signals into column
x_sample = np.transpose(x_sample)
# print theta
# print(x_sample)
# x_sample = np.concatenate((x_sample, x_sample), axis=0)
signal_processed = eeg.eeg(signal=x_sample, sampling_rate=128, show=False)
# # theta
theta = signal_processed[3]
alow = signal_processed[4]
ahigh = signal_processed[5]
beta = signal_processed[6]
gamma = signal_processed[7]
features = np.concatenate((theta, alow, ahigh, beta, gamma), axis=0).ravel()
print(features.shape)
def read_from_file(eeg1, eeg2, nrows = 10):
# read from files
x_train_eeg1 = pd.read_csv(eeg1, index_col='Id', nrows = nrows).to_numpy()
x_train_eeg2 = pd.read_csv(eeg2, index_col='Id', nrows = nrows).to_numpy()
return x_train_eeg1, x_train_eeg2
if __name__ == '__main__':
train_part = read_from_file("train_eeg1.csv", "train_eeg2.csv", 4)
eeg1s = train_part[0]
eeg2s = train_part[1]
# for mat in zip(eeg1s, eeg2s):
# print(mat)
# print(mat[0])
# break
eeg1 = eeg1s[3, :].reshape(1, -1)
eeg2 = eeg1s[3, :].reshape(1, -1)
# print(eeg1.shape) # size 1 * 512
test_eeg(eeg1, eeg2) | [
"myi@student.ethz.ch"
] | myi@student.ethz.ch |
3c699961c03db0286e4b397de0a722d189504754 | 30e2a85fc560165a16813b0486a862317c7a486a | /datastruct_algorithm/jan.py | bb5cbcfb654440320b08cce91cc4251879eb8dfd | [] | no_license | muryliang/python_prac | 2f65b6fdb86c3b3a44f0c6452a154cd497eb2d01 | 0301e8f523a2e31e417fd99a968ad8414e9a1e08 | refs/heads/master | 2021-01-21T11:03:48.397178 | 2017-09-18T04:13:27 | 2017-09-18T04:13:27 | 68,801,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py | import time
import sys
def perform(a, b, goal, failset, trueset):
"""a is limaL, b is limbL, failset is a list failseting action"""
# time.sleep(1)
# print(a, b)
global lima
global limb
res = False
if a == goal or b == goal or a + b == goal:
return True
if res is False and a > 0 and b < limb:
ares = max(a - (limb-b), 0)
bres = min(limb, b + a)
if (ares , bres) not in failset:
failset.append((ares, bres) )
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("rmove")
if res is False and b > 0 and a < lima:
ares = min(lima, a + b)
bres = max(b - (lima-a), 0)
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("lmove")
if res is False and b > 0:
ares = a
bres = 0
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("drop b")
if res is False and a > 0:
ares = 0
bres = b
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("drop a")
if res is False and a < lima:
ares = lima
bres = b
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("fill a")
if res is False and b < limb:
ares = a
bres = limb
if (ares , bres) not in failset:
failset.append((ares, bres))
res = perform(ares, bres, goal, failset, trueset)
if res:
trueset.append("fill b")
# if res is False:
# print ("nothing true, return")
return res
failset = [(0,0)]
trueset = list()
lima = int(sys.argv[1])
limb = int(sys.argv[2])
goal = int(sys.argv[3])
if perform(0, 0, goal, failset, trueset):
print ("success")
else:
print ("fail")
print (list(reversed(trueset)))
| [
"muryliang@gmail.com"
] | muryliang@gmail.com |
44e327bc73b3bedf3e3fae4de740348f8067cb99 | 671a669cc862f68d736a98b3d95bedf96cd7b09e | /Coreference/PDT/pdt_clusterization.py | b66174d38841135639be5bd2405124be4e692b1d | [] | no_license | Jankus1994/Coreference | e258b68c0a75ee3102614220f27c5d163e745c41 | 41b13ce6422ac6c3d139474641e75e502c446162 | refs/heads/master | 2021-01-23T01:55:56.732336 | 2018-05-03T18:06:40 | 2018-05-03T18:06:40 | 85,945,883 | 0 | 1 | null | 2017-03-23T13:08:14 | 2017-03-23T12:15:12 | Python | UTF-8 | Python | false | false | 2,610 | py | # Jan Faryad
# 23. 3. 2017
#
# pdt module to conversion of the coreferennce information from chains to clusters
class PDT_clusterization:
def __init__( self, list_of_corefs, init_cluster_id):
self.list_of_corefs = list_of_corefs
self.list_of_cluster_tuples = []
self.cluster_id = init_cluster_id # id of coreference clusters
def convert_chains_to_clusters( self):
"""
called from outside
"""
for record in self.list_of_corefs:
cluster_ID = self.find_coref_cluster( record.coref_ID)
if ( cluster_ID == None ):
# only for heads of the chains - their referents must be process seperately, as their don't have their own records
cluster_ID = self.new_cluster()
coref_cluster_record = (
record.coref_ID[0], record.coref_ID[1], record.coref_ID[2], cluster_ID, record.coref_dropped)
# paragraph ID, sentence ID, word ID
self.list_of_cluster_tuples.append( coref_cluster_record)
own_cluster_record = (
record.own_ID[0], record.own_ID[1], record.own_ID[2], cluster_ID, record.own_dropped)#, record.perspron)
# paragraph ID, sentence ID, word ID
self.list_of_cluster_tuples.append( own_cluster_record)
self.list_of_cluster_tuples = sorted( self.list_of_cluster_tuples)
list_of_cluster_records = [] # building object-records from tuples
for tuple in self.list_of_cluster_tuples:
cluster_record = Cluster_record( tuple)
list_of_cluster_records.append( cluster_record)
return list_of_cluster_records
def find_coref_cluster( self, ids): # -> int (cluster number)
"""
if this cluster was already used, returns its number. otherwise None - then will the caller set up a new cluster
"""
for tuple in self.list_of_cluster_tuples:
if ( ( tuple[0], tuple[1], tuple[2] ) == ids ):
return tuple[3]
return None
def new_cluster( self):
self.cluster_id += 1
return self.cluster_id
def get_cluster_id( self):
return self.cluster_id
class Cluster_record:
def __init__( self, tuple):
self.para_ID = tuple[0]
self.sent_ID = tuple[1]
self.word_ID = tuple[2]
self.cluster_ID = tuple[3]
self.dropped = tuple[4]
#self.perspron
| [
"noreply@github.com"
] | Jankus1994.noreply@github.com |
8bae87091c7930ff91f08ddc1c7770485a685c56 | cb27eabebe1a0b53ff16909f832780203cff063e | /script/script_template.py | 25902ee4cb47ae732d3c541f10776ffe5c8abeb3 | [
"MIT"
] | permissive | yota-p/kaggle_titanic | e31dfc3196d038c61336d2cbd814c76e14ace352 | 36d2c53711482195f519d9280abadf0d6afa9a15 | refs/heads/master | 2023-03-30T10:30:44.790817 | 2021-03-23T15:06:34 | 2021-03-23T15:06:34 | 343,035,079 | 0 | 0 | MIT | 2021-03-23T15:06:35 | 2021-02-28T06:14:06 | Python | UTF-8 | Python | false | false | 427 | py | import os
import gzip
import base64
from pathlib import Path
from typing import Dict
def main():
# this is base64 encoded source code
file_data: Dict = {file_data}
for path, encoded in file_data.items():
print(path)
path = Path(path)
os.makedirs(str(path.parent), exist_ok=True)
path.write_bytes(gzip.decompress(base64.b64decode(encoded)))
if __name__ == '__main__':
main()
| [
"930713yh@gmail.com"
] | 930713yh@gmail.com |
4853fa85b7e28aec71a8c26dce4015d65e359101 | 401ad378a3c5cf2faaa692680dba29726c7193d5 | /preprocess_data.py | ba74d189f25e56c88448b036e1f9eb406d51dff5 | [] | no_license | thaophung/ASL-recognition-Hololens- | 75293a214dbf067ec799b5b0852dfcca840714b0 | e90c546a6f27838be29e7f173baf685ef090dc92 | refs/heads/master | 2021-09-14T00:56:06.164850 | 2018-05-06T20:30:52 | 2018-05-06T20:30:52 | 113,247,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,202 | py | import numpy as np
import scipy.misc
import os, cv2, random
import shutil
def combine_list_txt(data_path):
trainlisttxt = 'trainlist.txt'
vallisttxt = 'vallist.txt'
#testlisttxt = 'testlist.txt'
trainlist = []
txt_path = os.path.join(data_path, trainlisttxt)
with open(txt_path) as fo:
for line in fo:
trainlist.append(line[:line.rfind(' ')])
vallist = []
txt_path = os.path.join(data_path, vallisttxt)
with open(txt_path) as fo:
for line in fo:
vallist.append(line[:line.rfind(' ')])
return trainlist, vallist
def regenerate_data(data_path):
sequence_length = 10
image_size = (224,224,3)
dest_dir = os.path.join(data_path, 'npy_dataset_2')
# generate sequence for optical flow
preprocessing(data_path, dest_dir, sequence_length, image_size, overwrite=True,
normalization=False, mean_subtraction=False, horizontal_flip=False,
random_crop=False, consistent=False, continuous_seq=True)
# compute optical flow data
def preprocessing(data_path, dest_dir, seq_len, img_size, overwrite=False,
normalization=False, mean_subtraction=False, horizontal_flip=False,
random_crop=False, consistent=False, continuous_seq=True):
'''
Extract video data to sequence of fixed length, and save it in npy file
:param list_dir
:param data_dir
:param seq_len
:param img_size:
:param overwrite:
:param normalizaation: normalize to (0,1)
:param mean_subtraction: subtract mean of RGB channels
:param horizontal_flip: add random noise to sequence data
:param random_crop: cropping using random location
:param consistent: whether horizontal flip, random crop is consistent in sequence
:param continuous_seq: whether frames extracted are continuous
:return:
'''
#write a txt file to keep parameter inforamtion
txt_file = os.path.join(dest_dir,'parameters.txt')
with open(txt_file,'w') as fo:
fo.write('seq_len: ' + str(seq_len) +
'\noverwrite: ' + str(overwrite) +
'\nnormalization: ' + str(normalization) +
'\nmean_subtraction: ' + str(mean_subtraction) +
'\nhorizontal_flip: ' + str(horizontal_flip) +
'\nrandom_crop: ' + str(random_crop) +
'\nconsistent: ' + str(consistent) +
'\ncontinuous_seq: ' + str(continuous_seq))
trainlist, vallist = combine_list_txt(data_path)
train_src = os.path.join(data_path, 'train')
val_src = os.path.join(data_path, 'val')
train_dir = os.path.join(dest_dir, 'train')
val_dir = os.path.join(dest_dir, 'val')
#os.mkdir(train_dir)
#os.mkdir(val_dir)
if mean_subtraction:
mean = calc_mean(UCF_dir, img_size).astype(dtype='float16')
np.save(os.path.join(dest_dir, 'mean.npy'), mean)
else:
mean = None
print('Preprocessing ASL data ....')
for clip_list, sub_dir in [(trainlist, train_dir)]: #, (vallist, val_dir)]:
for clip in clip_list:
clip_name = os.path.basename(clip)
clip_category = os.path.dirname(clip)
category_dir = os.path.join(sub_dir, clip_category)
if sub_dir == train_dir:
src_dir = os.path.join(train_src, clip)
else:
src_dir = os.path.join(val_src, clip)
dst_dir = os.path.join(category_dir, clip_name)
if not os.path.exists(category_dir):
os.mkdir(category_dir)
process_clip(clip_category, src_dir, dst_dir, seq_len, img_size, mean=mean,
normalization=normalization, horizontal_flip=horizontal_flip,
random_crop=random_crop, consistent=consistent,
continuous_seq=continuous_seq)
print("Processing done...")
# down sample image resolution to 216*216, and make sequence length 10
def process_clip(clip_category, src_dir, dst_dir, seq_len, img_size, mean=False, normalization=False,
horizontal_flip=False, random_crop=False, consistent=False, continuous_seq=False):
all_frames = []
cap = cv2.VideoCapture(src_dir)
while cap.isOpened():
succ, frame = cap.read()
if not succ:
break
# append frame that is not all zeros
if frame.any():
all_frames.append(frame)
clip_length = len(all_frames)
# save all frames
if seq_len is None or clip_length <= 10 or clip_category =='j' or clip_category == 'z':
#print('normal ' + src_dir)
print(src_dir)
all_frames = np.stack(all_frames, axis=0)
dst_dir = os.path.splitext(dst_dir)[0] + '.npy'
np.save(dst_dir, all_frames)
else:
step_size = int(clip_length / (seq_len))
frame_sequence = []
# select random first frame index for continous sequence
if continuous_seq:
start_index = random.randrange(clip_length-seq_len + 1)
# choose whether to flip or not for all frames
if not horizontal_flip:
flip = False
elif horizontal_flip and consistent:
flip = random.randrange(2) == 1
if not random_crop:
x, y = None, None
xy_set = False
for i in range(seq_len):
if continuous_seq:
index = start_index + i
else:
index = i * step_size + random.randrange(step_size)
frame = all_frames[index]
# compute flip for each frame
if horizontal_flip and not consistent:
flip = random.randrange(2) == 1
if random_crop and consistent and not xy_set:
x = random.randrange(frame.shape[0] - img_size[0])
y = random.randrange(frame.shape[1] - img_size[1])
xy_set = True
elif random_crop and not consistent:
x = random.randrange(frame.shape[0] - img_size[0])
y = random.randrange(frame.shape[1] - img_size[1])
frame = process_frame(frame, img_size, x, y, mean=mean,
normalization=normalization, flip=flip,
random_crop=random_crop)
frame_sequence.append(frame)
frame_sequence = np.stack(frame_sequence, axis=0)
dst_dis = os.path.splitext(dst_dir)[0] + '.npy'
np.save(dst_dir, frame_sequence)
cap.release()
def process_frame(frame, img_size, x, y, mean=None, normalization=True, flip=True,
random_crop=False):
if not random_crop:
frame = scipy.misc.imresize(frame, img_size)
else:
frame = frame[x:x+img_size[0], y:y+img_size[1],:]
# flip horizontally
if flip:
frame = frame[:, ::-1, :]
frame = frame.astype(dtype='float16')
if mean is not None:
frame -=mean
if normalization:
frame /= 255
return frame
if __name__ == '__main__':
'''
extract frames from videos as npy files
'''
#sequence_length = None
#image_size = (216,216,3)
data_path = '/Users/thaophung/workspace/senior_design/dataset'
regenerate_data(data_path)
| [
"noreply@github.com"
] | thaophung.noreply@github.com |
1df3e72e11e7eb14e23c5cb608c39717d22fb745 | bacd03dbc158458d3766c94655950b2de553089e | /dappx/migrations/0003_remove_userprofileinfo_city.py | f59a90eda0014ffe6786fa06578bc59de6ca5295 | [] | no_license | abhifindatution/django | bdad14b35da3d55a912b37be1d48cc337d30f5b4 | 7ac66fcbc0a6975c23bda6e8f53d176c2dbcae6d | refs/heads/master | 2023-05-03T21:33:44.124392 | 2019-08-04T11:58:53 | 2019-08-04T11:58:53 | 200,459,562 | 0 | 0 | null | 2023-04-21T20:35:32 | 2019-08-04T06:47:26 | CSS | UTF-8 | Python | false | false | 333 | py | # Generated by Django 2.1.1 on 2019-05-14 06:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dappx', '0002_auto_20190510_1323'),
]
operations = [
migrations.RemoveField(
model_name='userprofileinfo',
name='city',
),
]
| [
"rajjo@findatuton.com"
] | rajjo@findatuton.com |
3724941a22eb118782c4c142d7dc6097e8d37e35 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-fractionDigits-1-3.py | 32add0c922d5342c7b50eaabb85bc7ee39adc0d0 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 297 | py | from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_fraction_digits_1_xsd.nistschema_sv_iv_atomic_integer_fraction_digits_1 import NistschemaSvIvAtomicIntegerFractionDigits1
obj = NistschemaSvIvAtomicIntegerFractionDigits1(
value=825606520242485152
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
e5fef46757ac950b98d7bcd278ede4619963d734 | 11baa71016e83145d00f4b35316c77d40c205b6f | /lib/GA.py | ec1dbb10445469fc91ce3a859b60e4f2f8daefdb | [] | no_license | Syndorik/Genetic-Algorithm | 5b61b5fea39e8042c66f669d7b24fbf3b9c8e43e | bb8899cba83489576ef61da1d5284ddea4a9a2a4 | refs/heads/master | 2020-04-25T16:53:02.096595 | 2019-03-10T19:34:36 | 2019-03-10T19:34:36 | 172,927,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,215 | py | import random
import copy
import os
import time
import math
import csv
from Tree import Tree
from TreePop import TreePop
import copy
from joblib import Parallel, delayed
import joblib
"""
try:
from tkinter import *
from tkinter.ttk import *
except Exception as e:
print("[ERROR]: {0}".format(e))
from Tkinter import *
"""
class GA:
def __init__(self,list_files,k_mut_prob = 0.4, k_crossover = 3, tournament_size=7, elitism =True, method = "swap"):
self.k_mut_prob = k_mut_prob
self.tournament_size = tournament_size
self.elitism = elitism
self.list_files = list_files
self.k_crossover = k_crossover
self.nodeNum = list_files[0][0][0]
self.method = method
@staticmethod
def swap(tree, mut_pos1, mut_pos2):
tmp_tree = copy.deepcopy(tree)
# if they're the same, skip to the chase
if mut_pos1 == mut_pos2:
return tmp_tree
# Otherwise swap them:
hub1 = tmp_tree.prufer[mut_pos1]
hub2 = tmp_tree.prufer[mut_pos2]
tmp_tree.prufer[mut_pos2] = hub1
tmp_tree.prufer[mut_pos1] = hub2
return tmp_tree
@staticmethod
def swap_2opt(tree, mut_pos1, mut_pos2):
tmp_tree = copy.deepcopy(tree)
# if they're the same, skip to the chase
if mut_pos1 == mut_pos2:
return tmp_tree
if(mut_pos1>mut_pos2):
tmp = mut_pos1
mut_pos1 = mut_pos2
mut_pos2 = tmp
fp = [tree.prufer[l] for l in range(mut_pos1)]
lp = [tree.prufer[l] for l in range(mut_pos2+1, len(tree.prufer))]
middle = [tree.prufer[l] for l in range(mut_pos2,mut_pos1-1,-1)]
tree.prufer = fp+middle+lp
return tree
def tournament_select(self, population):
'''
TreePop() --> Tree(),TreePop()
Randomly selects tournament_size amount of Tree() from the input population.
Takes the fittest from the smaller number of Tree().
Principle: gives worse Tree() a chance of succeeding, but favours good Tree()
The first argument is the Tree with the best fitness, we call this func over and over until we have a new population
'''
# New smaller population (not intialised)
tournament_pop = TreePop(self.tournament_size,self.list_files)
# fills it with random individuals (can choose same twice)
for i in range(self.tournament_size-1):
tournament_pop.tree_pop.append(random.choice(population.tree_pop))
# returns the fittest:
return tournament_pop.get_fittest(),tournament_pop
def crossover_kpoint(self, parent1, parent2):
"""
Same as crossover_random. But this time there are k points and not just a start_pos and end_pos
"""
# new child
child_Tree = Tree(self.list_files, prufer = [None for k in range(self.nodeNum-2)])
#k_crossover random point
k_rd_point = []
while((len(list(set(k_rd_point)))!= self.k_crossover) and (len(k_rd_point) !=self.k_crossover)):
tmp = random.randint(0,len(parent1.prufer))
if tmp not in k_rd_point:
k_rd_point.append(tmp)
k_rd_point.sort()
start = 0
cpt = 0
parent_to_choose = [parent1, parent2]
#Creating the child prufer sequence. If k_crossover = 3, we have [parent1,parnet2,parent1]
for end in k_rd_point:
for i in range(start,end):
child_Tree.prufer[i] = parent_to_choose[cpt%2].prufer[i]
start = end
cpt+=1
#Replce the last None with the parents who should be last
for i in range(len(parent2.prufer)):
# complete the prufer sequence with parent2
if child_Tree.prufer[i] == None :
child_Tree.prufer[i] = parent_to_choose[cpt%2].prufer[i]
child_Tree.calc_fit()
return child_Tree
def crossover_random(self, parent1, parent2):
'''
Tree(), Tree() --> Tree()
Returns a child tree Tree() after breeding the two parent Tree.
Trees must be of same length.
Breeding is done by selecting a random range of parent1, and placing it into the empty child route (in the same place).
Gaps are then filled in, without duplicates, in the order they appear in parent2.
For example:
parent1: 0123456789
parent1: 5487961320
start_pos = 0
end_pos = 4
unfilled child: 01234*****
filled child: 0123458796
* = None
'''
# new child
child_Tree = Tree(self.list_files, prufer = [None for k in range(self.nodeNum-2)])
# Two random integer indices of the parent1:
start_pos = random.randint(0,len(parent1.prufer))
end_pos = random.randint(0,len(parent1.prufer))
#### takes the sub-route from parent one and sticks it in itself:
# if the start position is before the end:
if start_pos < end_pos:
# do it in the start-->end order
for x in range(start_pos,end_pos):
child_Tree.prufer[x] = parent1.prufer[x] # set the values to eachother
# if the start position is after the end:
elif start_pos > end_pos:
# do it in the end-->start order
for i in range(end_pos,start_pos):
child_Tree.prufer[i] = parent1.prufer[i] # set the values to eachother
# For the None values, replace it with parent2
for i in range(len(parent2.prufer)):
# complete the prufer sequence with parent2
if child_Tree.prufer[i] == None :
child_Tree.prufer[i] = parent2.prufer[i]
# returns the child route (of type Route())
child_Tree.calc_fit()
return child_Tree
def mutate(self, tree_mut):
'''
Tree() --> Tree()
Swaps two random indexes in the childs prufer sequence.
Runs k_mut_prob*100 % of the time
'''
tmp_tree = copy.deepcopy(tree_mut)
# k_mut_prob %
if random.random() < self.k_mut_prob:
# two random indices:
mut_pos1 = random.randint(0,len(tmp_tree.prufer)-1)
mut_pos2 = random.randint(0,len(tmp_tree.prufer)-1)
print("ind1 : {}".format(mut_pos1))
print("ind2 : {}".format(mut_pos2))
tmp_tree = GA.swap(tmp_tree,mut_pos1,mut_pos2)
# Recalculate the length of the route (updates it's .length)
tmp_tree.calc_fit()
return tmp_tree
def mutate_swap(self,tree_mut):
'''
Tree() --> Tree()
Swaps two random indexes in route_to_mut.route. Here it's more intelligent since the swap is effective only if the fitness function
after swap is lower.
This method allows us to have a good local search on solutions
Runs k_mut_prob*100 % of the time
'''
tree = copy.deepcopy(tree_mut)
tree.calc_fit()
# k_mut_prob %
breakk = False
lenn = len(tree.prufer)
if random.random() < self.k_mut_prob:
for i in range(lenn):
for j in range(lenn): # i is a, i + 1 is b, j is c, j+1 is d
tmp_tree = GA.swap(tree,i,j)
tmp_tree.calc_fit()
if(tree.fitness > tmp_tree.fitness):
tree = tmp_tree
breakk = True
break
if breakk:
breakk = False
break
tree.calc_fit()
return tree
def mutate_swap_nerfed(self,tree_mut):
'''
Tree() --> Tree()
Swaps two random indexes in route_to_mut.route. Here it's more intelligent since the swap is effective only if the fitness function
after swap is lower.
This method allows us to have a good local search on solutions
Runs k_mut_prob*100 % of the time
'''
tree = copy.deepcopy(tree_mut)
tree.calc_fit()
# k_mut_prob %
breakk = False
lenn = len(tree.prufer)
if random.random() < self.k_mut_prob:
list_indices = list(set(random.choices(list(range(lenn)), k = int(self.nodeNum/3))))
allposs = []
for i in list_indices:
for j in range(lenn):
tmp_tree = GA.swap(tree,i,j)
tmp_tree.calc_fit()
allposs.append(tmp_tree)
if(tree.fitness > tmp_tree.fitness):
tree = tmp_tree
breakk = True
break
if breakk:
breakk = False
break
if not breakk:
tree = sorted(allposs, key=lambda x: x.fitness, reverse=False)[0]
tree.calc_fit()
return tree
def fittest_swap(self,tree_mut):
'''
Tree() --> Tree()
Swaps two random indexes in route_to_mut.route. Here it's more intelligent since the swap is effective only if the fitness function
after swap is lower.
This method allows us to have a good local search on solutions
This is Local Search at the end of each generation
Runs k_mut_prob*100 % of the time
'''
tree = copy.deepcopy(tree_mut)
tree.calc_fit()
lenn = len(tree.prufer)
def toparall(i,j):
tmp_tree = GA.swap(tree,i,j)
tmp_tree.calc_fit()
return tmp_tree
if random.random() < self.k_mut_prob:
for i in range(lenn):
possibilities = joblib.Parallel(n_jobs=-1)(delayed(toparall)(i,j) for j in range(lenn))
tmplist = sorted(possibilities, key=lambda x: x.fitness, reverse=False)
tree = tmplist[0]
return tree
def mutate_2opt(self, tree_mut):
'''
Tree() --> Tree()
Doing a 2opt swap. We're keeping the best swap among the possibles 2opt swap. The fitness can be lower than the original one
This method allows us to have a good local search on solutions
Runs k_mut_prob*100 % of the time
'''
tree = copy.deepcopy(tree_mut)
tree.calc_fit()
# k_mut_prob %
lenn = len(tree.prufer)
def parallel(i,j):
tmp_tree = GA.swap_2opt(tree,i,j)
tmp_tree.calc_fit()
return tmp_tree
if random.random() < self.k_mut_prob:
list_indices = list(set(random.choices(list(range(lenn)), k = int(self.nodeNum/4))))
allposs =[]
for i in list_indices:
possibilities = joblib.Parallel(n_jobs=-1)(delayed(parallel)(i,j) for j in range(lenn))
tmplist = sorted(possibilities, key=lambda x: x.fitness, reverse=False)
allposs.append(tmplist[0])
tmplist = sorted(allposs, key=lambda x: x.fitness, reverse=False)
tree = tmplist[0]
tree.calc_fit()
return tree
def change_three_bests(self, population):
"""
TreePop() --> TreePop()
Change the first three best trees (in term of fitness). We're testing every swap possible.
### @TODO do it until there are 3 different trees
"""
population.sort_treepop()
#for k in range(3):
# population.tree_pop[k] = self.fittest_swap(population.tree_pop[k])
cpt = 0
k = 0
lenn = len(population.tree_pop)
done = []
while(cpt< 5 and k< lenn):
print(k)
population.tree_pop[k] = self.fittest_swap(population.tree_pop[k])
if(population.tree_pop[k] not in done):
cpt+=1
done.append(population.tree_pop[k])
k+=1
return population
def evolve_population(self, init_pop):
'''
TreePop() --> TreePop()
Takes a population and evolves it then returns the new population.
'''
#makes a new population:
descendant_pop = TreePop(list_files = self.list_files, size=init_pop.size, initialise=True)
# Elitism offset (amount of Tree() carried over to new population)
elitismOffset = 0
# if we have elitism, set the first of the new population to the fittest of the old
if self.elitism:
descendant_pop.tree_pop[0] = init_pop.fittest
elitismOffset = 1
# Goes through the new population and fills it with the child of two tournament winners from the previous populatio
for x in range(elitismOffset,descendant_pop.size):
# two parents:
tournament_parent1 = self.tournament_select(init_pop)[0]
tournament_parent2 = self.tournament_select(init_pop)[0]
while(tournament_parent2 == tournament_parent1):
tournament_parent2 = self.tournament_select(init_pop)[0]
# A child:
tournament_child = self.crossover_kpoint(tournament_parent1, tournament_parent2)
# Fill the population up with children
descendant_pop.tree_pop[x] = tournament_child
# Mutates all the Tree (mutation with happen with a prob p = k_mut_prob)
# tre_ind in range(len(descendant_pop.tree_pop)):
# descendant_pop.tree_pop[tre_ind] = self.mutate_2opt_nerfed(descendant_pop.tree_pop[tre_ind])
if(self.method == "swap"):
func = self.mutate_swap_nerfed
elif (self.method == "opt"):
func = self.mutate_2opt
tmp = Parallel(n_jobs=-1)(delayed(func)(descendant_pop.tree_pop[tre_ind]) for tre_ind in range(len(descendant_pop.tree_pop)))
descendant_pop.tree_pop = tmp
# Update the fittest Tree:
#descendant_pop.sort_treepop()
#descendant_pop.tree_pop[0] = self.fittest_2opt(descendant_pop.tree_pop[0])
descendant_pop.get_fittest()
return descendant_pop
| [
"alexandre.allani@telecom-bretagne.eu"
] | alexandre.allani@telecom-bretagne.eu |
bb8c7aede0462de9cd8180f39a0e1b02e5216735 | d3c4848338fe8a36a307c955e8a96f32fc880019 | /tests/test_selenium_common.py | f430bf2ca7c458bc29d650063b205594ee3e569e | [
"MIT"
] | permissive | panarahc/product-database | d111555f5f801c18a7a46c7fd3a2173149d8acd3 | af48bc3e580e3bd7b499990bb7c51aabed242f71 | refs/heads/master | 2023-04-19T16:09:08.115666 | 2021-01-17T22:23:45 | 2021-01-17T22:23:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,371 | py | """
Test suite for the selenium test cases
"""
import os
import pytest
import time
import re
from django.urls import reverse
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from tests import BaseSeleniumTest
@pytest.mark.online
@pytest.mark.selenium
class TestCommonFunctions(BaseSeleniumTest):
def test_login_only_mode(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# open the homepage
browser.get(liveserver + reverse("productdb:home"))
expected_homepage_text = "This database contains information about network equipment like routers and " \
"switches from multiple vendors."
assert expected_homepage_text in browser.find_element_by_tag_name("body").text
# Login as superuser - verify, that the "continue without login" button is visible
browser.find_element_by_id("navbar_login").click()
time.sleep(3)
expected_login_continue_text = "continue without login"
assert expected_login_continue_text in browser.find_element_by_tag_name("body").text
# login as superuser
browser.find_element_by_id("username").send_keys(self.ADMIN_USERNAME)
browser.find_element_by_id("password").send_keys(self.ADMIN_PASSWORD)
browser.find_element_by_id("login_button").click()
time.sleep(3)
# change settings to login only mode and save settings
browser.find_element_by_id("navbar_admin").click()
browser.find_element_by_id("navbar_admin_settings").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Settings")
browser.find_element_by_id("id_login_only_mode").click()
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Settings saved successfully")
# go to the Product Database Homepage - it must be visible
browser.get(liveserver + reverse("productdb:home"))
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_homepage_text)
# create the product list for the test case
test_pl_name = "LoginOnly Product List"
test_pl_description = "A sample description for the Product List."
test_pl_product_list_ids = "C2960X-STACK;CAB-ACE\nWS-C2960-24TT-L;WS-C2960-24TC-S"
test_pl_product_list_id = "C2960X-STACK"
browser.find_element_by_id("product_list_link").click()
WebDriverWait(browser, 10).until(EC.presence_of_element_located((
By.XPATH,
"id('product_list_table_wrapper')")
))
browser.find_element_by_xpath("//button[span='Add New']").click()
WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.ID, "id_name")))
browser.find_element_by_id("id_name").send_keys(test_pl_name)
browser.find_element_by_id("id_description").send_keys(test_pl_description)
browser.find_element_by_id("id_string_product_list").send_keys(test_pl_product_list_ids)
browser.find_element_by_id("id_vendor").send_keys("C")
browser.find_element_by_id("submit").click()
WebDriverWait(browser, 10).until(EC.presence_of_element_located((
By.XPATH,
"id('product_list_table_wrapper')")
))
# logout - the login screen is visible
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_logout").click()
expected_login_text = "Please enter your credentials below."
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_login_text)
# go manually to the Product Database Homepage - you must be redirected to the login screen
browser.get(liveserver + reverse("productdb:home"))
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_login_text)
# verify, that the "continue without login" button is not visible
assert expected_login_continue_text not in browser.find_element_by_tag_name("body").text
# the product list must be reachable, even when in login only mode
pl = self.api_helper.get_product_list_by_name(liveserver, test_pl_name)
browser.get(liveserver + reverse("productdb:share-product_list", kwargs={"product_list_id": pl["id"]}))
# verify some basic attributes of the page
body = browser.find_element_by_tag_name("body").text
assert test_pl_name in body
assert test_pl_description in body
assert test_pl_product_list_id in body
assert "maintained by %s" % self.ADMIN_DISPLAY_NAME in body
assert "%s</a>" % test_pl_product_list_id not in body, \
"Link to Product Details should not be available"
# login as API user
browser.get(liveserver + reverse("productdb:home"))
browser.find_element_by_id("username").send_keys(self.API_USERNAME)
browser.find_element_by_id("password").send_keys(self.API_PASSWORD)
browser.find_element_by_id("login_button").click()
time.sleep(3)
# the Product Database Homepage must be visible
assert expected_homepage_text in browser.find_element_by_tag_name("body").text
# disable the login only mode
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_logout").click()
browser.find_element_by_id("username").send_keys(self.ADMIN_USERNAME)
browser.find_element_by_id("password").send_keys(self.ADMIN_PASSWORD)
browser.find_element_by_id("login_button").click()
time.sleep(3)
browser.find_element_by_id("navbar_admin").click()
browser.find_element_by_id("navbar_admin_settings").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Settings")
assert "Settings" in browser.find_element_by_tag_name("body").text
browser.find_element_by_id("id_login_only_mode").click()
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Settings saved successfully")
# delete the new product list
browser.get(liveserver + reverse("productdb:list-product_lists"))
browser.find_element_by_xpath("id('product_list_table')/tbody/tr[1]/td[2]").click()
time.sleep(1)
browser.find_element_by_xpath("//button[span='Delete Selected']").click()
time.sleep(3)
body = browser.find_element_by_tag_name("body").text
assert "Delete Product List" in body
browser.find_element_by_name("really_delete").click()
browser.find_element_by_id("submit").click()
time.sleep(3)
# verify that the product list is deleted
body = browser.find_element_by_tag_name("body").text
assert test_pl_description not in body
assert "Product List %s successfully deleted." % test_pl_name in body
# end session
self.logout_user(browser)
def test_change_password(self, browser, liveserver):
"""
test change password procedure with a different user (part of the selenium_tests fixture)
"""
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# login as the default API user
browser.get(liveserver + reverse("login"))
browser.find_element_by_id("username").send_keys("testpasswordchange")
browser.find_element_by_id("password").send_keys("api")
browser.find_element_by_id("login_button").click()
time.sleep(3)
# go to the change password dialog
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_change_password").click()
time.sleep(3)
assert "Old password" in browser.find_element_by_tag_name("body").text
# chang the password to api1234
browser.find_element_by_id("id_old_password").send_keys("api")
browser.find_element_by_id("id_new_password1").send_keys("api1234")
browser.find_element_by_id("id_new_password2").send_keys("api1234")
browser.find_element_by_id("submit").click()
time.sleep(3)
assert "Password change successful" in browser.find_element_by_tag_name("body").text
# logout
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_logout").click()
time.sleep(3)
expected_login_text = "Please enter your credentials below."
assert expected_login_text in browser.find_element_by_tag_name("body").text
# login with new password
browser.find_element_by_id("username").send_keys("testpasswordchange")
browser.find_element_by_id("password").send_keys("api1234")
browser.find_element_by_id("login_button").click()
time.sleep(3)
# the Product Database Homepage must be visible
expected_text = "This database contains information about network equipment like routers and " \
"switches from multiple vendors."
assert expected_text in browser.find_element_by_tag_name("body").text
# end session
self.logout_user(browser)
@pytest.mark.selenium
class TestUserProfile(BaseSeleniumTest):
def test_preferred_vendor_user_profile(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
browser.get(liveserver + reverse("productdb:home"))
# verify the vendor selection if the user is not logged in
browser.find_element_by_id("nav_browse").click()
browser.find_element_by_id("nav_browse_all_vendor_products").click()
assert "Browse Products by Vendor" in browser.find_element_by_class_name("page-header").text, \
"Should view the Browse Product by Vendor page"
# login
browser.find_element_by_id("navbar_login").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Please enter your credentials below.")
homepage_message = "Browse Products by Vendor"
self.login_user(browser, self.API_USERNAME, self.API_PASSWORD, homepage_message)
# verify the selected default vendor
pref_vendor_select = browser.find_element_by_id("vendor_selection")
assert "Cisco Systems" in pref_vendor_select.text, "selected by default"
# view the edit settings page
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_user_profile").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Edit User Profile")
# verify that the vendor with the ID 1 is selected
pref_vendor_select = browser.find_element_by_id("id_preferred_vendor")
assert "Cisco Systems" in pref_vendor_select.text
pref_vendor_select = Select(pref_vendor_select)
# change the vendor selection
changed_vendor_name = "Juniper Networks"
pref_vendor_select.select_by_visible_text(changed_vendor_name)
browser.find_element_by_id("submit").send_keys(Keys.ENTER)
# redirect to the Browse Products by Vendor
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Browse Products by Vendor")
# verify that the new default vendor is selected
pref_vendor_select = browser.find_element_by_id("vendor_selection")
assert changed_vendor_name in pref_vendor_select.text
# end session
self.logout_user(browser)
def test_email_change_in_user_profile(self, browser, liveserver):
"""
use separate user from the selenium_tests fixture
"""
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
browser.get(liveserver + reverse("productdb:home"))
# login
browser.find_element_by_id("navbar_login").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Please enter your credentials below.")
homepage_message = "This database contains information about network equipment like routers and switches " \
"from multiple vendors."
self.login_user(browser, "testuserprofilemail", self.API_PASSWORD, homepage_message)
# view the edit settings page
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_user_profile").click()
assert "api@localhost.localhost" in browser.find_element_by_id("id_email").get_attribute('value')
# change email
new_email = "a@b.com"
browser.find_element_by_id("id_email").clear()
browser.find_element_by_id("id_email").send_keys(new_email)
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, homepage_message)
# verify redirect to homepage
assert "User Profile successful updated" in browser.find_element_by_tag_name("body").text, \
"Should view a message that the user profile was saved"
# verify new value in email address
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_user_profile").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Edit User Profile")
assert new_email in browser.find_element_by_id("id_email").get_attribute('value'), \
"Show view the correct email address of the user (%s)" % new_email
# end session
self.logout_user(browser)
def test_search_option_in_user_profile(self, browser, liveserver):
"""
use separate user from the selenium_tests fixture
"""
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
search_term = "WS-C2960X-24T(D|S)"
browser.get(liveserver + reverse("productdb:home"))
# login
homepage_message = "This database contains information about network equipment like routers and switches " \
"from multiple vendors."
browser.find_element_by_id("navbar_login").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Please enter your credentials below.")
self.login_user(browser, "testregexsession", self.API_PASSWORD, homepage_message)
# go to the all products view
expected_content = "On this page, you can view all products that are stored in the database."
browser.find_element_by_id("nav_browse").click()
browser.find_element_by_id("nav_browse_all_products").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_content)
# try to search for the product
browser.find_element_by_id("column_search_Product ID").send_keys(search_term)
self.wait_for_text_to_be_displayed_in_body_tag(browser, "No matching records found")
# enable the regular expression search feature in the user profile
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_user_profile").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Contact eMail:")
expected_content = "On this page, you can view all products that are stored in the database."
browser.find_element_by_id("id_regex_search").click()
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_content)
browser.find_element_by_id("column_search_Product ID").send_keys(search_term)
time.sleep(3)
assert "WS-C2960X-24TS" in browser.find_element_by_tag_name("body").text, \
"Should show no results (regular expression is used but by default not enabled)"
assert "WS-C2960X-24TD" in browser.find_element_by_tag_name("body").text, \
"Should show no results (regular expression is used but by default not enabled)"
# end session
self.logout_user(browser)
@pytest.mark.selenium
class TestProductLists(BaseSeleniumTest):
def test_product_list(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
add_button_xpath = "//button[span='Add New']"
edit_button_xpath = "//button[span='Edit Selected']"
delete_button_xpath = "//button[span='Delete Selected']"
test_pl_name = "Test Product List"
test_pl_description = "A sample description for the Product List."
test_pl_product_list_ids = "C2960X-STACK;CAB-ACE\nWS-C2960-24TT-L;WS-C2960-24TC-S"
test_pl_product_list_id = "C2960X-STACK"
# open the homepage
browser.get(liveserver + reverse("productdb:home"))
# go to product list view
browser.find_element_by_id("nav_browse").click()
browser.find_element_by_id("nav_browse_all_product_lists").click()
time.sleep(3)
# verify that the add, edit and delete button is not visible
body = browser.find_element_by_tag_name("body").text
assert "Add New" not in body
assert "Edit Selected" not in body
assert "Delete Selected" not in body
# login to the page as admin user
browser.find_element_by_id("navbar_login").click()
time.sleep(3)
self.login_user(browser, self.ADMIN_USERNAME, self.ADMIN_PASSWORD, "All Product Lists")
# verify that the add, edit and delete buttons are visible
body = browser.find_element_by_tag_name("body").text
assert "Add New" in body
assert "Edit Selected" in body
assert "Delete Selected" in body
# create a new product list
browser.find_element_by_xpath(add_button_xpath).click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Add Product List")
browser.find_element_by_id("id_name").send_keys(test_pl_name)
browser.find_element_by_id("id_description").send_keys(test_pl_description)
browser.find_element_by_id("id_string_product_list").send_keys(test_pl_product_list_ids)
browser.find_element_by_id("id_vendor").send_keys("C")
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "All Product Lists")
assert test_pl_name in browser.find_element_by_tag_name("body").text
# view the newly created product list
browser.find_element_by_link_text(test_pl_name).click()
time.sleep(3)
body = browser.find_element_by_tag_name("body").text
assert test_pl_name in body
assert test_pl_description in body
assert test_pl_product_list_id in body
assert "maintained by %s" % self.ADMIN_DISPLAY_NAME in body
assert browser.find_element_by_link_text(test_pl_product_list_id) is not None, \
"Link to Product Details should be available"
# go back to the product list overview
browser.find_element_by_id("_back").click()
# edit the new product list
browser.find_element_by_xpath("id('product_list_table')/tbody/tr[1]/td[2]").click()
time.sleep(3)
browser.find_element_by_xpath(edit_button_xpath).click()
time.sleep(3)
browser.find_element_by_id("id_description").send_keys(" EDITED")
test_pl_description += " EDITED"
browser.find_element_by_id("submit").click()
time.sleep(3)
body = browser.find_element_by_tag_name("body").text
assert test_pl_description in body
# delete the new product list
browser.find_element_by_xpath("id('product_list_table')/tbody/tr[1]/td[2]").click()
time.sleep(1)
browser.find_element_by_xpath(delete_button_xpath).click()
time.sleep(3)
body = browser.find_element_by_tag_name("body").text
assert "Delete Product List" in body
browser.find_element_by_name("really_delete").click()
browser.find_element_by_id("submit").click()
time.sleep(3)
# verify that the product list is deleted
body = browser.find_element_by_tag_name("body").text
assert test_pl_description not in body
assert "Product List %s successfully deleted." % test_pl_name in body
@pytest.mark.selenium
class TestProductDatabaseViews(BaseSeleniumTest):
def test_search_on_homepage(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# navigate to the homepage
browser.get(liveserver + reverse("productdb:home"))
browser.find_element_by_id("search_text_field").send_keys("WS-C2960X-24")
browser.find_element_by_id("submit_search").click()
# verify page by page title
assert "All Products" in browser.find_element_by_tag_name("body").text
time.sleep(2)
# test table content
expected_table_content = """Vendor Product ID Description List Price Lifecycle State"""
contain_table_rows = [
"Cisco Systems WS-C2960X-24PD-L Catalyst 2960-X 24 GigE PoE 370W, 2 x 10G SFP+, LAN Base 4595.00 USD",
"Cisco Systems WS-C2960X-24PS-L Catalyst 2960-X 24 GigE PoE 370W, 4 x 1G SFP, LAN Base 3195.00 USD",
]
not_contain_table_rows = [
"Juniper Networks"
]
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in contain_table_rows:
assert r in table.text
for r in not_contain_table_rows:
assert r not in table.text
def test_product_group_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# navigate to the homepage
browser.get(liveserver + reverse("productdb:home"))
# go to the "All Product Groups" view
browser.find_element_by_id("nav_browse").click()
browser.find_element_by_id("nav_browse_all_product_groups").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "All Product Groups")
# test table content
expected_table_content = """Vendor\nName"""
table_rows = [
'Cisco Systems Catalyst 3850',
'Cisco Systems Catalyst 2960X',
'Cisco Systems Catalyst 2960',
'Juniper Networks EX2200',
]
table = browser.find_element_by_id('product_group_table')
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_table_content)
for r in table_rows:
assert r in table.text
# search product group by vendor column
table_rows = [
'Juniper Networks EX2200',
]
browser.find_element_by_id("column_search_Vendor").send_keys("Juni")
table = browser.find_element_by_id('product_group_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Vendor").clear()
# search product group by vendor column
table_rows = [
'Cisco Systems Catalyst 3850',
'Cisco Systems Catalyst 2960X',
'Cisco Systems Catalyst 2960',
]
browser.find_element_by_id("column_search_Name").send_keys("yst")
time.sleep(2)
table = browser.find_element_by_id('product_group_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Name").clear()
time.sleep(2)
# click on the "Catalyst 2960X" link
browser.find_element_by_partial_link_text("Catalyst 2960X").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Catalyst 2960X Product Group details")
# verify table content
expected_table_content = """Product ID\nDescription\nList Price Lifecycle State"""
table_rows = [
'C2960X-STACK',
'CAB-ACE',
'CAB-STK-E-0.5M',
]
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
# search product group by vendor column
table_rows = [
'WS-C2960X-24PD-L',
'WS-C2960X-24TD-L',
]
browser.find_element_by_id("column_search_Description").send_keys("2 x")
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Description").clear()
time.sleep(2)
# open detail page
browser.find_element_by_partial_link_text("C2960X-STACK").click()
detail_link = browser.current_url
self.wait_for_text_to_be_displayed_in_body_tag(browser, "C2960X-STACK Product details")
# verify that the "Internal Product ID" is not visible (because not set)
assert "Internal Product ID" not in browser.find_element_by_tag_name("body").text
# add an internal product ID and verify that it is visible
test_internal_product_id = "123456789-abcdef"
p = self.api_helper.update_product(liveserver_url=liveserver, product_id="C2960X-STACK",
internal_product_id=test_internal_product_id)
browser.get(liveserver + reverse("productdb:product-detail", kwargs={"product_id": p["id"]}))
page_text = browser.find_element_by_tag_name("body").text
assert "Internal Product ID" in page_text
assert test_internal_product_id in page_text
# end session
self.logout_user(browser)
def test_add_notification_message(self, browser, liveserver):
# go to the Product Database Homepage
browser.get(liveserver + reverse("productdb:home"))
browser.find_element_by_id("navbar_login").click()
time.sleep(3)
expected_homepage_text = "This database contains information about network equipment like routers and " \
"switches from multiple vendors."
self.login_user(
browser,
expected_content=expected_homepage_text,
username=self.ADMIN_USERNAME,
password=self.ADMIN_PASSWORD
)
# add a new notification message
browser.find_element_by_id("navbar_admin").click()
browser.find_element_by_id("navbar_admin_notification_message").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Add Notification Message")
# add content
title = "My message title"
summary_message = "summary message"
detailed_message = "detailed message"
browser.find_element_by_id("id_title").send_keys(title)
browser.find_element_by_id("id_summary_message").send_keys(summary_message)
browser.find_element_by_id("id_detailed_message").send_keys(detailed_message)
browser.find_element_by_id("submit").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, title)
assert summary_message in browser.find_element_by_tag_name("body").text
# end session
self.logout_user(browser)
def test_browse_products_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
expected_cisco_row = "C2960X-STACK Catalyst 2960-X FlexStack Plus Stacking Module 1195.00 USD"
expected_juniper_row = "EX-SFP-1GE-LX SFP 1000Base-LX Gigabit Ethernet Optics, 1310nm for " \
"10km transmission on SMF 1000.00 USD"
default_vendor = "Cisco Systems"
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:browse_vendor_products"))
time.sleep(5)
# check that the user sees a table
page_text = browser.find_element_by_tag_name('body').text
assert "Showing 1 to" in page_text
# the user sees a selection field, where the value "Cisco Systems" is selected
pl_selection = browser.find_element_by_id("vendor_selection")
assert default_vendor in pl_selection.text
# the table has three buttons: Copy, CSV and a PDF
dt_buttons = browser.find_element_by_class_name("dt-buttons")
assert "PDF" == dt_buttons.find_element_by_xpath("//button[span='PDF']").text
assert "Copy" == dt_buttons.find_element_by_xpath("//button[span='Copy']").text
assert "CSV" == dt_buttons.find_element_by_xpath("//button[span='CSV']").text
assert "Excel" == dt_buttons.find_element_by_xpath("//button[span='Excel']").text
# the table shows 10 entries from the list (below the table, there is a string "Showing 1 to 10 of \d+ entries"
dt_wrapper = browser.find_element_by_id("product_table_info")
assert re.match(r"Showing 1 to \d+ of \d+ entries", dt_wrapper.text) is not None
# the page reloads and the table contains now the element "C2960X-STACK" as the first element of the table
table = browser.find_element_by_id('product_table')
rows = table.find_elements_by_tag_name('tr')
assert expected_cisco_row in [row.text for row in rows]
# navigate to a detail view
link = browser.find_element_by_link_text("PWR-C1-350WAC")
browser.execute_script("return arguments[0].scrollIntoView();", link)
time.sleep(1)
test_product_id = "WS-C2960-24LT-L"
browser.find_element_by_link_text(test_product_id).click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "%s Product details" % test_product_id)
# reopen the browse vendor products table
browser.get(liveserver + reverse("productdb:browse_vendor_products"))
time.sleep(5)
# the user sees a selection field, where the value "Cisco Systems" is selected
pl_selection = browser.find_element_by_id("vendor_selection")
assert default_vendor in pl_selection.text
pl_selection = Select(pl_selection)
# the user chooses the list named "Juniper Networks" and press the button "view product list"
pl_selection.select_by_visible_text("Juniper Networks")
browser.find_element_by_id("submit").send_keys(Keys.ENTER)
self.wait_for_text_to_be_displayed_in_body_tag(browser, "EX-SFP-1GE-LX")
# the page reloads and the table contains now the element "EX-SFP-1GE-LX" as the first element of the table
table = browser.find_element_by_id('product_table')
rows = table.find_elements_by_tag_name('tr')
match = False
for i in range(0, 3):
match = (expected_juniper_row, [row.text for row in rows])
if match:
break
time.sleep(3)
if not match:
pytest.fail("Element not found")
def test_browse_products_view_csv_export(self, browser, liveserver, test_download_dir):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:browse_vendor_products"))
# the user sees a selection field, where the value "Cisco Systems" is selected
vendor_name = "Cisco Systems"
pl_selection = browser.find_element_by_id("vendor_selection")
assert vendor_name in pl_selection.text
# the user hits the button CSV
dt_buttons = browser.find_element_by_class_name("dt-buttons")
dt_buttons.find_element_by_xpath("//button[span='CSV']").click()
# the file should download automatically (firefox is configured this way)
time.sleep(2)
# verify that the file is a CSV formatted field (with ";" as delimiter)
file = os.path.join(test_download_dir, "export products - %s.csv" % vendor_name)
with open(file, "r+", encoding="utf-8") as f:
assert "Product ID;Description;List Price;Lifecycle State\n" == f.readline()
def test_search_function_on_browse_vendor_products_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:browse_vendor_products"))
time.sleep(5)
# he enters a search term in the search box
search_term = "WS-C2960X-24P"
search_xpath = '//div[@class="col-sm-4"]/div[@id="product_table_filter"]/label/input[@type="search"]'
search = browser.find_element_by_xpath(search_xpath)
search.send_keys(search_term)
time.sleep(3)
# show product groups
dt_buttons = browser.find_element_by_class_name("dt-buttons")
dt_buttons.find_element_by_xpath("//button[span='show additional columns ']").click()
browser.find_element_by_link_text("Internal Product ID").click()
browser.find_element_by_link_text("Product Group").click()
# the table performs the search function and a defined amount of rows is displayed
expected_table_content = "Product ID Product Group Description " \
"List Price Lifecycle State Internal Product ID"
table_rows = [
"WS-C2960X-24PD-L Catalyst 2960X Catalyst 2960-X 24 GigE PoE 370W, 2 x 10G SFP+, "
"LAN Base 4595.00 USD 2960x-24pd-l",
"WS-C2960X-24PS-L Catalyst 2960X Catalyst 2960-X 24 GigE PoE 370W, 4 x 1G SFP, "
"LAN Base 3195.00 USD 2960x-24ps-l"
]
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_xpath(search_xpath).clear()
time.sleep(1)
# search product by column (contains)
browser.find_element_by_id("column_search_Product ID").send_keys("WS-C2960X-24P")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Product ID").clear()
# search product by column (contains)
browser.find_element_by_id("column_search_Product Group").send_keys("2960X")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Product Group").clear()
# search description by column
browser.find_element_by_id("column_search_Description").send_keys("10G SFP")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
assert table_rows[0] in table.text
browser.find_element_by_id("column_search_Description").clear()
# search description by column
browser.find_element_by_id("column_search_List Price").send_keys("3195")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
assert r[1] in table.text
browser.find_element_by_id("column_search_List Price").clear()
def test_browse_all_products_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
expected_cisco_row = "Cisco Systems C2960X-STACK Catalyst 2960-X FlexStack Plus Stacking Module 1195.00 USD"
expected_juniper_row = "Juniper Networks EX-SFP-1GE-LX SFP 1000Base-LX Gigabit Ethernet Optics, 1310nm for " \
"10km transmission on SMF 1000.00 USD"
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:all_products"))
# check that the user sees a table
time.sleep(5)
page_text = browser.find_element_by_tag_name('body').text
assert "Showing 1 to" in page_text
# the table has three buttons: Copy, CSV and a PDF
dt_buttons = browser.find_element_by_class_name("dt-buttons")
assert "PDF" == dt_buttons.find_element_by_xpath("//button[span='PDF']").text
assert "Copy" == dt_buttons.find_element_by_xpath("//button[span='Copy']").text
assert "CSV" == dt_buttons.find_element_by_xpath("//button[span='CSV']").text
assert "Excel" == dt_buttons.find_element_by_xpath("//button[span='Excel']").text
# the table shows 10 entries from the list (below the table, there is a string "Showing 1 to 10 of \d+ entries"
dt_wrapper = browser.find_element_by_id("product_table_info")
assert re.match(r"Showing 1 to \d+ of \d+ entries", dt_wrapper.text) is not None
# the page reloads and the table contains now the element "C2960X-STACK" as the first element of the table
table = browser.find_element_by_id('product_table')
rows = table.find_elements_by_tag_name('tr')
assert expected_cisco_row in [row.text for row in rows]
# the page reloads and the table contains now the element "EX-SFP-1GE-LX" as the first element of the table
table = browser.find_element_by_id('product_table')
rows = table.find_elements_by_tag_name('tr')
match = False
for i in range(0, 3):
match = (expected_juniper_row,
[row.text for row in rows])
if match:
break
time.sleep(3)
if not match:
pytest.fail("Element not found")
# navigate to a detail view
test_product_id = "GLC-LH-SMD="
browser.find_element_by_link_text(test_product_id).click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "%s Product details" % test_product_id)
def test_browse_all_products_view_csv_export(self, browser, liveserver, test_download_dir):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:all_products"))
# the user hits the button CSV
dt_buttons = browser.find_element_by_class_name("dt-buttons")
dt_buttons.find_element_by_xpath("//button[span='CSV']").click()
# the file should download automatically (firefox is configured this way)
time.sleep(2)
# verify that the file is a CSV formatted field (with ";" as delimiter)
file = os.path.join(test_download_dir, "export products.csv")
with open(file, "r+", encoding="utf-8") as f:
assert "Vendor;Product ID;Description;List Price;Lifecycle State\n" == f.readline()
def test_search_function_on_all_products_view(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# a user hits the browse product list url
browser.get(liveserver + reverse("productdb:all_products"))
# he enters a search term in the search box
search_term = "WS-C2960X-24P"
search_xpath = '//div[@class="col-sm-4"]/div[@id="product_table_filter"]/label/input[@type="search"]'
search = browser.find_element_by_xpath(search_xpath)
search.send_keys(search_term)
time.sleep(3)
# the table performs the search function and a defined amount of rows is displayed
expected_table_content = """Vendor Product ID Description List Price Lifecycle State"""
table_rows = [
'WS-C2960X-24PD-L Catalyst 2960-X 24 GigE PoE 370W, 2 x 10G SFP+, LAN Base 4595.00 USD',
'WS-C2960X-24PS-L Catalyst 2960-X 24 GigE PoE 370W, 4 x 1G SFP, LAN Base 3195.00 USD',
]
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_xpath(search_xpath).clear()
time.sleep(1)
# search vendor by column
browser.find_element_by_id("column_search_Vendor").send_keys("Cisco")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Vendor").clear()
# search product by column
browser.find_element_by_id("column_search_Product ID").send_keys("WS-C2960X-24P")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
for r in table_rows:
assert r in table.text
browser.find_element_by_id("column_search_Product ID").clear()
# search description by column
browser.find_element_by_id("column_search_Description").send_keys("10G SFP")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
assert table_rows[0] in table.text
browser.find_element_by_id("column_search_Description").clear()
# search description by column
browser.find_element_by_id("column_search_List Price").send_keys("3195")
time.sleep(2)
table = browser.find_element_by_id('product_table')
assert expected_table_content in table.text
assert r[1] in table.text
browser.find_element_by_id("column_search_List Price").clear()
| [
"henry@codingnetworker.com"
] | henry@codingnetworker.com |
cca9f2e5ed6c7cd9fe744913449f05e61d1ed854 | 8a47ab47a101d4b44dd056c92a1763d5fac94f75 | /力扣/简单练习/300-最长上升子序列.py | edecfbee733ea3c1f051716235583aa67c1a5524 | [] | no_license | Clint-cc/Leecode | d5528aa7550a13a5bcf2f3913be2d5db2b5299f3 | 8befe73ab3eca636944800e0be27c179c45e1dbf | refs/heads/master | 2020-09-14T07:35:41.382377 | 2020-07-01T01:27:18 | 2020-07-01T01:27:18 | 223,066,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | # !D:/Code/python
# -*- coding:utf-8 -*-
# @Author : Clint
# @Question : 给定一个无序的整数数组,找到其中最长上升子序列的长度。
def lengthOfLIS(nums):
'''
思路:遍历数组,当前的下一个元素大于当前,count+1,当不大于时比较count和max_count,
最后输出max_count
这题有坑: 输入[10,9,2,5,3,7,101,18],输出4,解释:最长的上升子序列是 [2,3,7,101],它的长度是 4
:param nums:
:return:
'''
count = 1
max_count = 1
for i in range(len(nums) - 1):
if nums[i + 1] >= nums[i]:
count += 1
else:
if count > max_count:
max_count = count
count = 1
else:
count = 1
if max_count < count:
max_count = count
return max_count
# 动态规划
def lengthOfLIS(nums):
if not nums:
return 0
dp = [1] * len(nums)
for i in range(len(nums)):
for j in range(i):
if nums[j] < nums[i]: # 如果要求非严格递增,将此行 '<' 改为 '<=' 即可。
dp[i] = max(dp[i], dp[j] + 1)
return max(dp)
# 二分查找
def lengthOfLIS(nums):
d = []
for n in nums:
if not d or n > d[-1]:
d.append(n)
else:
l, r = 0, len(d) - 1
loc = r
while l <= r:
mid = (l + r) // 2
if d[mid] >= n:
loc = mid
r = mid - 1
else:
l = mid + 1
d[loc] = n
return len(d)
print(lengthOfLIS([1, 2, 5, 3, 7, 11, 18]))
| [
"clint1801@163.com"
] | clint1801@163.com |
3fb8bf3f113cfb1319ebe26fea72dc1f8a19c78b | c592c565b4f9259933738e7801e100f64227175a | /questions/migrations/0003_question_title.py | f247190d977f18d7fe37040ec80568f25e20ec86 | [] | no_license | ericak11/questions_app | 1eb5d3dbd886d2fa0aaab7080a689b8f8b575934 | 85fecce9f4113841a36518fd4c5e5916ff68e964 | refs/heads/master | 2016-09-06T01:53:48.477642 | 2015-01-05T15:23:21 | 2015-01-05T15:23:21 | 27,886,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0002_auto_20141211_1832'),
]
operations = [
migrations.AddField(
model_name='question',
name='title',
field=models.CharField(default='exit', max_length=200),
preserve_default=False,
),
]
| [
"kantor.erica@gmail.com"
] | kantor.erica@gmail.com |
3039c444c18f0b492a472774de7ddcf70fefc723 | 01dd174a3a7d26226564711e32711f137513663f | /pyscf/grad/uks.py | cb2d2d1dd98fc530fe5f2ab8a3bcd9d5ad9f1214 | [
"Apache-2.0"
] | permissive | cherishyli/pyscf | 00cb09c873edc8890be8501414678cdfa54b177e | 468a4bfc4ce067eb7dab6f9289d71122b219609e | refs/heads/master | 2020-04-18T11:40:00.398066 | 2019-01-24T23:07:36 | 2019-01-24T23:07:36 | 167,508,739 | 1 | 0 | Apache-2.0 | 2019-01-25T08:00:12 | 2019-01-25T08:00:12 | null | UTF-8 | Python | false | false | 10,924 | py | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''Non-relativistic UKS analytical nuclear gradients'''
import time
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.grad import rhf as rhf_grad
from pyscf.grad import rks as rks_grad
from pyscf.grad import uhf as uhf_grad
from pyscf.dft import numint, gen_grid
from pyscf import __config__
def get_veff(ks_grad, mol=None, dm=None):
'''Coulomb + XC functional
'''
if mol is None: mol = ks_grad.mol
if dm is None: dm = ks_grad.base.make_rdm1()
t0 = (time.clock(), time.time())
mf = ks_grad.base
ni = mf._numint
if ks_grad.grids is not None:
grids = ks_grad.grids
else:
grids = mf.grids
if grids.coords is None:
grids.build(with_non0tab=True)
if mf.nlc != '':
raise NotImplementedError
#enabling range-separated hybrids
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
mem_now = lib.current_memory()[0]
max_memory = max(2000, ks_grad.max_memory*.9-mem_now)
if ks_grad.grid_response:
exc, vxc = get_vxc_full_response(ni, mol, grids, mf.xc, dm,
max_memory=max_memory,
verbose=ks_grad.verbose)
logger.debug1(ks_grad, 'sum(grids response) %s', exc.sum(axis=0))
else:
exc, vxc = get_vxc(ni, mol, grids, mf.xc, dm,
max_memory=max_memory, verbose=ks_grad.verbose)
t0 = logger.timer(ks_grad, 'vxc', *t0)
if abs(hyb) < 1e-10:
vj = ks_grad.get_j(mol, dm)
vxc += vj[0] + vj[1]
else:
vj, vk = ks_grad.get_jk(mol, dm)
vk *= hyb
if abs(omega) > 1e-10: # For range separated Coulomb operator
with mol.with_range_coulomb(omega):
vk += ks_grad.get_k(mol, dm) * (alpha - hyb)
vxc += vj[0] + vj[1] - vk
return lib.tag_array(vxc, exc1_grid=exc)
def get_vxc(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)
ao_loc = mol.ao_loc_nr()
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1, verbose)[1]
vrho = vxc[0]
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,0])
rks_grad._d1_dot_(vmat[0], mol, ao[1:4], aow, mask, ao_loc, True)
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,1])
rks_grad._d1_dot_(vmat[1], mol, ao[1:4], aow, mask, ao_loc, True)
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):
rho_a = make_rho(0, ao[:4], mask, 'GGA')
rho_b = make_rho(1, ao[:4], mask, 'GGA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1, verbose)[1]
wva, wvb = numint._uks_gga_wv0((rho_a,rho_b), vxc, weight)
rks_grad._gga_grad_sum_(vmat[0], mol, ao, wva, mask, ao_loc)
rks_grad._gga_grad_sum_(vmat[1], mol, ao, wvb, mask, ao_loc)
rho_a = rho_b = vxc = wva = wvb = None
elif xctype == 'NLC':
raise NotImplementedError('NLC')
else:
raise NotImplementedError('meta-GGA')
exc = numpy.zeros((mol.natm,3))
# - sign because nabla_X = -nabla_x
return exc, -vmat
def get_vxc_full_response(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
'''Full response including the response of the grids'''
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)
ao_loc = mol.ao_loc_nr()
aoslices = mol.aoslice_by_atom()
excsum = 0
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
ao_deriv = 1
for atm_id, (coords, weight, weight1) \
in enumerate(rks_grad.grids_response_cc(grids)):
ngrids = weight.size
sh0, sh1 = aoslices[atm_id][:2]
mask = gen_grid.make_mask(mol, coords)
ao = ni.eval_ao(mol, coords, deriv=ao_deriv, non0tab=mask)
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
exc, vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1, verbose)[:2]
vrho = vxc[0]
vtmp = numpy.zeros((3,nao,nao))
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,0])
rks_grad._d1_dot_(vtmp, mol, ao[1:4], aow, mask, ao_loc, True)
vmat[0] += vtmp
excsum += numpy.einsum('r,r,nxr->nx', exc, rho_a+rho_b, weight1)
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[0]) * 2
vtmp = numpy.zeros((3,nao,nao))
aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho[:,1])
rks_grad._d1_dot_(vtmp, mol, ao[1:4], aow, mask, ao_loc, True)
vmat[1] += vtmp
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[1]) * 2
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 2
for atm_id, (coords, weight, weight1) \
in enumerate(rks_grad.grids_response_cc(grids)):
ngrids = weight.size
sh0, sh1 = aoslices[atm_id][:2]
mask = gen_grid.make_mask(mol, coords)
ao = ni.eval_ao(mol, coords, deriv=ao_deriv, non0tab=mask)
rho_a = make_rho(0, ao[:4], mask, 'GGA')
rho_b = make_rho(1, ao[:4], mask, 'GGA')
exc, vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, relativity, 1, verbose)[:2]
wva, wvb = numint._uks_gga_wv0((rho_a,rho_b), vxc, weight)
vtmp = numpy.zeros((3,nao,nao))
rks_grad._gga_grad_sum_(vtmp, mol, ao, wva, mask, ao_loc)
vmat[0] += vtmp
excsum += numpy.einsum('r,r,nxr->nx', exc, rho_a[0]+rho_b[0], weight1)
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[0]) * 2
vtmp = numpy.zeros((3,nao,nao))
rks_grad._gga_grad_sum_(vtmp, mol, ao, wvb, mask, ao_loc)
vmat[1] += vtmp
excsum[atm_id] += numpy.einsum('xij,ji->x', vtmp, dms[1]) * 2
rho_a = rho_b = vxc = wva = wvb = None
elif xctype == 'NLC':
raise NotImplementedError('NLC')
else:
raise NotImplementedError('meta-GGA')
# - sign because nabla_X = -nabla_x
return excsum, -vmat
class Gradients(uhf_grad.Gradients):
grid_response = getattr(__config__, 'grad_uks_Gradients_grid_response', False)
def __init__(self, mf):
uhf_grad.Gradients.__init__(self, mf)
self.grids = None
self.grid_response = False
self._keys = self._keys.union(['grid_response', 'grids'])
def dump_flags(self):
uhf_grad.Gradients.dump_flags(self)
logger.info(self, 'grid_response = %s', self.grid_response)
return self
get_veff = get_veff
def extra_force(self, atom_id, envs):
'''Hook for extra contributions in analytical gradients.
Contributions like the response of auxiliary basis in density fitting
method, the grid response in DFT numerical integration can be put in
this function.
'''
if self.grid_response:
vhf = envs['vhf']
log = envs['log']
log.debug('grids response for atom %d %s',
atom_id, vhf.exc1_grid[atom_id])
return vhf.exc1_grid[atom_id]
else:
return 0
Grad = Gradients
if __name__ == '__main__':
from pyscf import gto
from pyscf import dft
mol = gto.Mole()
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.basis = '631g'
mol.charge = 1
mol.spin = 1
mol.build()
mf = dft.UKS(mol)
mf.conv_tol = 1e-12
#mf.grids.atom_grid = (20,86)
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.12090786243525126)
#[[-5.23195019e-16 -5.70291415e-16 5.32918387e-02]
# [ 1.33417513e-16 6.75277008e-02 -2.66519852e-02]
# [ 1.72274651e-16 -6.75277008e-02 -2.66519852e-02]]
g.grid_response = True
print(lib.finger(g.kernel()) - -0.12091122429043633)
#[[-2.95956939e-16 -4.22275612e-16 5.32998759e-02]
# [ 1.34532051e-16 6.75279140e-02 -2.66499379e-02]
# [ 1.68146089e-16 -6.75279140e-02 -2.66499379e-02]]
mf.xc = 'b88,p86'
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.11509739136150157)
#[[ 2.58483362e-16 5.82369026e-16 5.17616036e-02]
# [-5.46977470e-17 6.39273304e-02 -2.58849008e-02]
# [ 5.58302713e-17 -6.39273304e-02 -2.58849008e-02]]
g.grid_response = True
print(lib.finger(g.kernel()) - -0.11507986316077731)
mf.xc = 'b3lypg'
e0 = mf.scf()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.10202554999695367)
#[[ 6.47874920e-16 -2.75292214e-16 3.97215970e-02]
# [-6.60278148e-17 5.87909340e-02 -1.98650384e-02]
# [ 6.75500259e-18 -5.87909340e-02 -1.98650384e-02]]
mol = gto.Mole()
mol.atom = [
['H' , (0. , 0. , 1.804)],
['F' , (0. , 0. , 0. )], ]
mol.unit = 'B'
mol.basis = '631g'
mol.charge = -1
mol.spin = 1
mol.build()
mf = dft.UKS(mol)
mf.conv_tol = 1e-14
mf.kernel()
print(lib.finger(Gradients(mf).kernel()) - 0.10365160440876001)
# sum over z direction non-zero, due to meshgrid response
# H -0.0000000000 0.0000000000 -0.1481125370
# F -0.0000000000 0.0000000000 0.1481164667
mf = dft.UKS(mol)
mf.grids.prune = None
mf.grids.level = 6
mf.conv_tol = 1e-14
mf.kernel()
print(lib.finger(Gradients(mf).kernel()) - 0.10365040148752827)
# H 0.0000000000 0.0000000000 -0.1481124925
# F -0.0000000000 0.0000000000 0.1481122913
| [
"osirpt.sun@gmail.com"
] | osirpt.sun@gmail.com |
119da14a29035eb8a5b1c9ba0c64dc7cb316c170 | fab39aa4d1317bb43bc11ce39a3bb53295ad92da | /nncf/tensorflow/graph/pattern_operations.py | 23435d263c3de7adf57353e47709a005e220e0df | [
"Apache-2.0"
] | permissive | dupeljan/nncf | 8cdce27f25f01ce8e611f15e1dc3036fb8548d6e | 0abfd7103ca212888a946ba4d0fbdb9d436fdaff | refs/heads/develop | 2023-06-22T00:10:46.611884 | 2021-07-22T10:32:11 | 2021-07-22T10:32:11 | 388,719,455 | 0 | 0 | Apache-2.0 | 2021-07-23T07:46:15 | 2021-07-23T07:43:43 | null | UTF-8 | Python | false | false | 3,416 | py | """
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.graph.patterns import merge_two_types_of_operations
from nncf.tensorflow.graph.metatypes.common import ELEMENTWISE_LAYER_METATYPES
from nncf.tensorflow.graph.metatypes.common import GENERAL_CONV_LAYER_METATYPES
from nncf.tensorflow.graph.metatypes.common import LAYER_METATYPES_AGNOSTIC_TO_DATA_PRECISION_WITH_ONE_INPUT
from nncf.tensorflow.graph.metatypes.common import LINEAR_LAYER_METATYPES
LINEAR_OPERATIONS = {'type': list(
{
*{layer_name for m in GENERAL_CONV_LAYER_METATYPES for layer_name in m.get_all_aliases()},
*{layer_name for m in LINEAR_LAYER_METATYPES for layer_name in m.get_all_aliases()},
}
),
'label': 'LINEAR'
}
ELEMENTWISE_OPERATIONS = {'type': list(set(
layer_name for m in ELEMENTWISE_LAYER_METATYPES for layer_name in m.get_all_aliases()
)),
'label': 'ELEMENTWISE'
}
QUANTIZATION_AGNOSTIC_OPERATIONS = {
'type': list(set(
layer_name for m in LAYER_METATYPES_AGNOSTIC_TO_DATA_PRECISION_WITH_ONE_INPUT for layer_name in m.get_all_aliases()
)),
'label': 'ELEMENTWISE'
}
BATCH_NORMALIZATION_OPERATIONS = {'type': ['BatchNormalization',
'SyncBatchNormalization',],
'label': 'BATCH_NORMALIZATION'
}
KERAS_ACTIVATIONS_OPERATIONS = {
'type': ['ReLU',
'ThresholdedReLU',
'ELU',
'PReLU',
'LeakyReLU',
'Activation'],
'label': 'KERAS_ACTIVATIONS'
}
TF_ACTIVATIONS_OPERATIONS = {
'type': ['Relu'],
'label': 'TF_ACTIVATIONS'
}
ATOMIC_ACTIVATIONS_OPERATIONS = merge_two_types_of_operations(KERAS_ACTIVATIONS_OPERATIONS,
TF_ACTIVATIONS_OPERATIONS,
'ATOMIC_ACTIVATIONS')
POOLING_OPERATIONS = {'type': ['AveragePooling2D',
'AveragePooling3D',
'GlobalAveragePooling2D',
'GlobalAveragePooling3D'],
'label': 'POOLING'}
SINGLE_OPS = merge_two_types_of_operations(POOLING_OPERATIONS,
{
'type': [
'Average',
'LayerNormalization',
'UpSampling2D'
]
}, label='SINGLE_OPS')
ARITHMETIC_OPERATIONS = {'type': ['__iadd__',
'__add__',
'__mul__',
'__rmul__'],
'label': 'ARITHMETIC'}
| [
"noreply@github.com"
] | dupeljan.noreply@github.com |
976828ea55563b1986da76957c19a1fc536486b2 | 6364bb727b623f06f6998941299c49e7fcb1d437 | /msgraph-cli-extensions/src/userscontacts/azext_userscontacts/vendored_sdks/userscontacts/aio/__init__.py | 03db4e735a0c8c4b412b41f6a92f232c27276d81 | [
"MIT"
] | permissive | kanakanaidu/msgraph-cli | 1d6cd640f4e10f4bdf476d44d12a7c48987b1a97 | b3b87f40148fb691a4c331f523ca91f8a5cc9224 | refs/heads/main | 2022-12-25T08:08:26.716914 | 2020-09-23T14:29:13 | 2020-09-23T14:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._users_contacts_async import UsersContacts
__all__ = ['UsersContacts']
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
a1be5212f3088beec374525a35b087342ba34213 | f269ca79b405a05a7a5b715b0fb6a416e78ddbea | /process_SimRNA_results.py | 3b932be33b95ca9350a81644b5876fa245427cbb | [] | no_license | fryzjergda/simrna_scripts | ccccc3aa5811d2925aa29e765a30ec4f4add7f93 | f6f81c586046d12d74ed592c7a5bbe9f3eed8178 | refs/heads/master | 2020-05-25T05:27:26.603044 | 2020-05-16T11:14:57 | 2020-05-16T11:14:57 | 187,649,955 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,882 | py | #!/usr/bin/python
#created, Michal Boniecki, for automatic processing results from SimRNA runs, 2015.12.14
#this script prepares processes output data from SimRNA runs
import sys, os, shutil
from glob import glob
if(len(sys.argv) < 2):
print >>sys.stderr, "usage: process_SimRNA_results.py job_id_name"
print >>sys.stderr, "note: job_id_name should be legal string, to be used to set up working directory for this job"
print >>sys.stderr, "note: job_id_name should be the same, as used in the script that launches simulations"
sys.exit(1)
WORKING_DIR = "WORKING_SPACE"
RUN_MASK = "run_??"
PROCESSING_DIR = "processing_results"
JOB_ID_NAME = sys.argv[1]
JOB_PATH = WORKING_DIR+"/"+JOB_ID_NAME
ALL_TRAFL_filename = JOB_ID_NAME+"_ALL.trafl"
PDB_REFERENCE_FILE_FOR_TRAFL_CONVERSION = JOB_ID_NAME+"_run_01_01-000001.pdb"
PDB_REFERENCE_PATH_FOR_TRAFL_CONVERSION = "../run_01/"+PDB_REFERENCE_FILE_FOR_TRAFL_CONVERSION
ALL_TRAFL_low_size_thrs = 1024 # 1kb for the time being
FRACTION_LOWEST_ENERGY_FRAMES_TO_CLUSTER = 0.01
OUTPUT_PDBS_DIR = "output_PDBS"
if(os.path.exists(JOB_PATH) == False):
print >>sys.stderr, "expected path: "+JOB_PATH+" doesn't exist"
print >>sys.stderr, "this path should exist for given job_id_name: "+JOB_ID_NAME
sys.exit(2)
dir_list = glob(JOB_PATH+"/"+RUN_MASK)
dir_list.sort()
#print dir_list
n_runs = len(dir_list)
if(n_runs == 0):
print >>sys.stderr, "inside directory: "+JOB_PATH+" there are no expected directories named: "+RUN_MASK+" where ?? is numbering field: 01, 02, 03 ..."
sys.exit(3)
print >>sys.stderr, "number of run directories detected: "+str(n_runs)
os.chdir(JOB_PATH)
print >>sys.stderr, "making directory: "+JOB_PATH+"/"+PROCESSING_DIR
if(os.path.exists(PROCESSING_DIR) == False):
os.mkdir(PROCESSING_DIR)
else:
print >>sys.stderr, "requested subdirectory: "+PROCESSING_DIR+" already exists"
print >>sys.stderr, "check it, maybe delete it, ... program termination"
sys.exit(4)
os.chdir(PROCESSING_DIR)
command = "cat ../"+RUN_MASK+"/*.trafl > "+ALL_TRAFL_filename
print >>sys.stderr, "being in "+JOB_PATH+"/"+PROCESSING_DIR+" running command:"
print >>sys.stderr, command
os.system(command)
if(os.path.isfile(ALL_TRAFL_filename) == False):
print >>sys.stderr, "expected (from previous step) file: "+ALL_TRAFL_filename+" doesn't exist"
sys.exit(5)
file_size = os.path.getsize(ALL_TRAFL_filename)
if(file_size < ALL_TRAFL_low_size_thrs):
print >>sys.stderr, "file: "+ALL_TRAFL_filename+" is too small: "+str(file_size)+" bytes"
print >>sys.stderr, "it seems the file contains no data, something went wrong before ..."
sys.exit(6)
# some tests if ALL_TRAFL_filename is correct (sometimes there are problems, when there is no disk space during SimRNA running)
# if ALL_TRAFL_filename is not correct, it should be repaired here
inpfile = open(ALL_TRAFL_filename)
first_line = inpfile.readline()
second_line = inpfile.readline().rstrip()
inpfile.close()
#assuming that second line in file ALL_TRAFL_filename is first line containing coordinated, thus is possible to calculate the size of system (seq length)
#by dividing of number of items by 15 (3 coordinates x,y,z and 5 atoms per nucleotide)
coords_list = second_line.split()
seq_length = len(coords_list) / 15
if(seq_length < 4):
print >>sys.stderr, "it seems that seq_lenght detected from file: "+ALL_TRAFL_filename+" is too low"
print >>sys.stderr, "something went wrong"
sys.exit(7)
print >>sys.stderr, "clustering ... assuming:"
print >>sys.stderr, "--- fraction of lowest energy frames to clustering: "+str(FRACTION_LOWEST_ENERGY_FRAMES_TO_CLUSTER)
rmsd_thrs = 0.1*float(seq_length)
rmsd_thrs_str = "%.1f" % rmsd_thrs
print >>sys.stderr, "--- rmsd thrs for clustering 0.1*seq_lenght which is: "+rmsd_thrs_str
command = "../../../bin/clustering "+ALL_TRAFL_filename+" "+str(FRACTION_LOWEST_ENERGY_FRAMES_TO_CLUSTER)+" "+rmsd_thrs_str+" > clustering.log 2>&1"
print >>sys.stderr, command
os.system(command)
clust_1_2_3_names = glob("*clust0[1-3].trafl")
clust_1_2_3_names.sort()
n_clusts = len(clust_1_2_3_names)
print >>sys.stderr, "number of clusters to process: "+str(n_clusts)
print >>sys.stderr, "extracting pdbs, reconstructing all atom representation"
print >>sys.stderr, "creating symlink 'data'"
os.symlink("../../../data","data")
if(os.path.exists(PDB_REFERENCE_PATH_FOR_TRAFL_CONVERSION) == False):
print >>sys.stderr, "expected file at location: "+PDB_REFERENCE_PATH_FOR_TRAFL_CONVERSION+" doesn't exist"
sys.exit(8)
for curr_clust_name in clust_1_2_3_names:
clust_reconstr_log_name = curr_clust_name.replace(".trafl",".log")
command = "../../../bin/SimRNA_trafl2pdbs "+PDB_REFERENCE_PATH_FOR_TRAFL_CONVERSION+" "+curr_clust_name+" 1 AA > "+clust_reconstr_log_name+" 2>&1"
print >>sys.stderr, command
os.system(command)
os.chdir("..")
print >>sys.stderr, "making directory: "+OUTPUT_PDBS_DIR
if(os.path.exists(OUTPUT_PDBS_DIR) == False):
os.mkdir(OUTPUT_PDBS_DIR)
else:
print >>sys.stderr, "directory already exists"
os.chdir(PROCESSING_DIR)
pdbs_list = glob("*.pdb")
#pdbs_AA_list = glob("*_AA.pdb")
ss_detected_list = glob("*.ss_detected")
if(len(pdbs_list) < 0):
print >>sys.stderr, "inside directory: "+PROCESSING_DIR+" there is no pdb files, something when wrong in previous step"
sys.exit(9)
else:
pdbs_list.sort()
print >>sys.stderr, "detected pdb files in: "+PROCESSING_DIR+":"
for curr_pdb_name in pdbs_list:
print >>sys.stderr, curr_pdb_name
print >>sys.stderr, "copying pdb and ss_detected files to: "+OUTPUT_PDBS_DIR+" just to store them there"
for curr_pdb_name in pdbs_list:
shutil.copy(curr_pdb_name, "../"+OUTPUT_PDBS_DIR)
for curr_ss_detected_name in ss_detected_list:
shutil.copy(curr_ss_detected_name, "../"+OUTPUT_PDBS_DIR)
os.chdir("..")
print >>sys.stderr, "DONE :-)"
| [
"twirecki@genesilico.pl"
] | twirecki@genesilico.pl |
d4b3c37168303b568f64ff5fef401bc1cc1264b2 | 3400394303380c2510b17b95839dd4095abc55a4 | /src/py310/lesson02/comments.py | a4dca2ef7c776bd871c81c1adcdd13adb12c2fce | [
"MIT"
] | permissive | IBRAR21/py310_sp2021 | daf53b76decf060d72201a3db66f0f7c697876a7 | 584e37b9d96654c1241fc787d157c292301d5bf7 | refs/heads/master | 2023-05-30T16:43:09.614565 | 2021-06-09T21:41:14 | 2021-06-09T21:41:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,828 | py | # --------------------------------------------------------------------------------- #
# AQUABUTTON wxPython IMPLEMENTATION
#
# Andrea Gavana, @ 07 October 2008
# Latest Revision: 24 Nov 2011, 22.00 GMT
#
#
# TODO List
#
# 1) Anything to do?
#
#
# For all kind of problems, requests of enhancements and bug reports, please
# write to me at:
#
# andrea.gavana@gmail.com
# andrea.gavana@maerskoil.com
#
# Or, obviously, to the wxPython mailing list!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------------- #
"""
:class:`AquaButton` is another custom-drawn button class which *approximatively* mimics
the behaviour of Aqua buttons on the Mac.
Description
===========
:class:`AquaButton` is another custom-drawn button class which *approximatively* mimics
the behaviour of Aqua buttons on the Mac. At the moment this class supports:
* Bubble and shadow effects;
* Customizable background, foreground and hover colours;
* Rounded-corners buttons;
* Text-only or image+text buttons;
* Pulse effect on gaining focus.
And a lot more. Check the demo for an almost complete review of the functionalities.
Usage
=====
Sample usage::
import wx
import wx.lib.agw.aquabutton as AB
app = wx.App(0)
frame = wx.Frame(None, -1, "AquaButton Test")
mainPanel = wx.Panel(frame)
mainPanel.SetBackgroundColour(wx.WHITE)
# Initialize AquaButton 1 (with image)
bitmap = wx.Bitmap("my_button_bitmap.png", wx.BITMAP_TYPE_PNG)
btn1 = AB.AquaButton(mainPanel, -1, bitmap, "AquaButton")
# Initialize AquaButton 2 (no image)
btn2 = AB.AquaButton(mainPanel, -1, None, "Hello World!")
frame.Show()
app.MainLoop()
Supported Platforms
===================
AquaButton has been tested on the following platforms:
* Windows (Windows XP);
* Linux Ubuntu (10.10).
Window Styles
=============
`No particular window styles are available for this class.`
Events Processing
=================
This class processes the following events:
================= ==================================================
Event Name Description
================= ==================================================
``wx.EVT_BUTTON`` Process a `wxEVT_COMMAND_BUTTON_CLICKED` event, when the button is clicked.
================= ==================================================
License And Version
===================
:class:`AquaButton` control is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 22 Nov 2011, 22.00 GMT
Version 0.4
"""
x = x + 1 # allow for border
BORDER = 1
x = x + BORDER
def allow_for_border(coordinate):
return coordinate + 1
y = allow_for_border(y)
def calc(num1, num2):
# calc product 2 numbers
return num1 + num2
def calculate_product(left, right):
return left * right
| [
"akmiles@icloud.com"
] | akmiles@icloud.com |
ccc5024e30b508be56f625b5cc2eb06203b0013e | f39d88e77080198681994a65fb0137523e23895c | /module_00.py | 847c34d9966b7b3fbee3d61c7dd2e52d80ffa7ed | [] | no_license | v-mail-81/Project_0 | 536901b1c8e517daa48982005f71a404f714aa7e | 8fcb01c1c6cb15e6502b918de93a23247ac6627b | refs/heads/master | 2022-11-11T19:01:12.419937 | 2020-07-05T14:21:34 | 2020-07-05T14:21:34 | 277,103,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,558 | py | #!/usr/bin/env python
# coding: utf-8
# In[79]:
'''Загружаем модуль numpy для работы со случайными числами и массивом из них'''
import numpy as np
'''Определяем функцию game_core_v2 с аргументом number (загаданное число), которая будет угадывать число'''
def game_core_v2(number):
'''Первой попыткой предполагаем число 50, объявляя для этого переменную predict,
затем в зависимости от того, больше оно или меньше загаданного, изменяем предполагаемое число на шаг step,
который с увеличением числа попыток на 1 уменьшается в 2 раза. Функция принимает загаданное число и
возвращает число попыток'''
predict = 50
count = 1
step = 51
while number != predict:
count += 1
step = round(step / 2)
if predict < number:
predict += step
elif predict > number:
predict -= step
return(count)
'''Определяем функцию score_game, которая будет 1000 раз генерировать случайное число.
Функция принимает результат работы функции game_core_v2 и возвращает среднее количество попыток за заданное число повторов (1000)'''
def score_game(game_core_v2):
count_ls = []
np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
random_array = np.random.randint(1,101, size=(1000))
for number in random_array:
count_ls.append(game_core_v2(number))
#print(game_core_v2(number)) #эта часть кода выводит на экран число попыток для каждого повтора работы программы
#print(len(count_ls)) #эта часть кода печатает номер текущей попытки (через длину списка)
score = int(np.mean(count_ls)) #эта часть кода находит среднее количество попыток за 1000 повторов
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return(score)
score_game(game_core_v2)
| [
"noreply@github.com"
] | v-mail-81.noreply@github.com |
7334b51d4d6dd97c7c79768d9c96ddcd4c916b09 | 4917860c0be0b281af29ee0f1f92dcddfffb3a6b | /faust_proj/wsgi.py | 7dc2c9726a9acaef51aef894bb4268df2d272b75 | [] | no_license | Dineshs91/faust-example | fdb8238e8464eb971b8d65025efe83dd25151c37 | 99e19260a0ab591d91bb40ebea1b1bfc901ca1fd | refs/heads/master | 2020-04-10T16:08:24.219541 | 2018-12-14T08:58:21 | 2018-12-14T08:58:21 | 161,134,463 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for faust_proj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'faust_proj.settings')
application = get_wsgi_application()
| [
"dineshpy07@gmail.com"
] | dineshpy07@gmail.com |
2582af6476aaa57b460d1979815cf654b2556508 | 6ab022b549cfd105cedb3acd84679a663b770103 | /home/migrations/0002_auto_20210123_0010.py | 5a25e027d92b06540a1c45f6fc30104fb125e3bf | [
"Apache-2.0"
] | permissive | d-shaktiranjan/WebifyTask | 2b358d6b80bbe723f10a8ebabeb0ff4f701a2c53 | 188c9519dd5d9fe406ed7ce4e27dd491240302da | refs/heads/main | 2023-04-23T17:07:38.930584 | 2021-05-18T05:48:10 | 2021-05-18T05:48:10 | 331,989,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | # Generated by Django 3.1.4 on 2021-01-22 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='alltask',
name='id',
),
migrations.AddField(
model_name='alltask',
name='task_id',
field=models.IntegerField(default=1, primary_key=True, serialize=False),
preserve_default=False,
),
migrations.AlterField(
model_name='alltask',
name='about',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='alltask',
name='subDateTime',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='alltask',
name='taskName',
field=models.CharField(max_length=30),
),
]
| [
"debatashaktiranjan@gmail.com"
] | debatashaktiranjan@gmail.com |
d081c90ee2be7a970eccc901bbe411b6143cc227 | bb98adfcca0865092761eb3ce95da6d3e016bc42 | /wisdomgate/settings.py | 73b2b8a4fe8ca2927991df9382aeb125f1648ec8 | [] | no_license | iskenderserkan/wisdomgate | e2332afa3786e6440a046e65bdad4d5b48dd5cd9 | 258041aeefac7ae654726271fc1b1bc409fc7ddd | refs/heads/master | 2021-01-25T14:56:45.081826 | 2018-03-04T00:24:59 | 2018-03-04T00:24:59 | 123,740,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,249 | py | """
Django settings for wisdomgate project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tickai_&nsyh#@!86bozw-_6of-fi(ri$_2v9!w!mj#uhnbvb-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gb_knowledge',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wisdomgate.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wisdomgate.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'TR-tr'
TIME_ZONE = 'Europe/Istanbul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') | [
"iskenderserkan@gmail.com"
] | iskenderserkan@gmail.com |
b6aedc802e87484a48e035f95f533be5d35b6c1d | 8d90e93d5c7c430bba840783efea760eb37d4f3c | /Sword Offer/31.py | bf77d964602d4c023a1a558211a271d34b9607f5 | [] | no_license | handsome-fish/Leetcode | a0639735d27979dc7c8c0a1e7fa381f17904b0ad | b3893a5cc6ff0f2311dcdef55766e3ba2a3ba812 | refs/heads/master | 2021-07-22T01:30:09.859679 | 2021-07-14T15:05:27 | 2021-07-14T15:05:27 | 178,343,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | """
剑指 Offer 31. 栈的压入、弹出序列
输入两个整数序列,第一个序列表示栈的压入顺序,请判断第二个序列是否为该栈的弹出顺序。
假设压入栈的所有数字均不相等。例如,序列 {1,2,3,4,5} 是某栈的压栈序列,序列 {4,5,3,2,1} 是该压栈序列对应的一个弹出序列,
但 {4,3,5,1,2} 就不可能是该压栈序列的弹出序列。
示例 1:
输入:pushed = [1,2,3,4,5], popped = [4,5,3,2,1]
输出:true
解释:我们可以按以下顺序执行:
push(1), push(2), push(3), push(4), pop() -> 4,
push(5), pop() -> 5, pop() -> 3, pop() -> 2, pop() -> 1
示例 2:
输入:pushed = [1,2,3,4,5], popped = [4,3,5,1,2]
输出:false
解释:1 不能在 2 之前弹出。
提示:
0 <= pushed.length == popped.length <= 1000
0 <= pushed[i], popped[i] < 1000
pushed 是 popped 的排列。
"""
from typing import List
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
stack, popped = [], popped[::-1]
for i in pushed:
stack.append(i)
while stack and stack[-1] == popped[-1]:
stack.pop()
popped.pop()
return stack == []
| [
"gitfish@163.com"
] | gitfish@163.com |
1a8275ca53430b7a7de208eaccbad36efa3e3dac | 23e9b76b5702b364bba2e4565a1ab1b6eb5d1d21 | /4. Project/face_tracking/face_tracker.py | e3316b4b9d8cd1f41be0010e42835409afee7f4c | [
"MIT"
] | permissive | gjustin40/Pytorch-Cookbook | 5b346598019e540b05019e3fa465e9d1132c914a | 52a7f4e8c053c775d8c6c90ab0926540b2e1f7cf | refs/heads/master | 2023-02-27T19:42:49.179167 | 2023-02-14T06:22:13 | 2023-02-14T06:22:13 | 148,883,451 | 0 | 1 | MIT | 2022-12-09T05:49:21 | 2018-09-15T08:20:16 | Jupyter Notebook | UTF-8 | Python | false | false | 2,418 | py | from imutils.video import VideoStream, FPS
from centroidtracker import CentroidTracker
import numpy as np
import argparse
import imutils
import time
import cv2
prototxt = 'deploy.prototxt'
model = 'res10_300x300_ssd_iter_140000.caffemodel'
# model = 'opencv_face_detector.caffemodel'
confidence = 0.8
ct = CentroidTracker()
(H, W) = (None, None)
print("[INFO] loading model....")
net = cv2.dnn.readNetFromCaffe(prototxt, model)
parser = argparse.ArgumentParser()
parser.add_argument('--source', required=True, help='video or camera')
args = parser.parse_args()
if args.source == '0':
print("[INFO] starting video streams...")
stream = VideoStream(src=0).start()
else:
print("[INFO] starting video Capture...")
stream = cv2.VideoCapture(args.source)
fps = FPS().start()
time.sleep(2.0)
while True:
try:
if args.source == '0':
frame = stream.read()
frame = imutils.resize(frame, width=500)
else:
(grabbed, frame) = stream.read()
frame = imutils.resize(frame, width=500)
if W is None or H is None:
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (W, H), (104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
rects = []
# detections = [[]]
for i in range(0, detections.shape[2]):
if detections[0, 0, i, 2] > confidence:
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
rects.append(box.astype("int"))
(startX, startY, endX, endY) = box.astype('int')
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)
objects = ct.update(rects)
for (objectID, centroid) in objects.items():
text = f'ID {objectID}'
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] -10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0,255,0), -1)
except Exception as e:
# print('No Objects')
print(e)
cv2.imshow("frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
fps.update()
fps.stop()
print(fps.fps())
cv2.destropyAllWindows()
stream.stop() | [
"gjustin@naver.com"
] | gjustin@naver.com |
c8f2954d5848b31990dc4b5336d0ef62f6f1881e | 1bf03446a30dbc51c0249c41a7b457581725dfed | /client.py | 23473e0b776bb2dc84bf59cf407825c12e3dbbe1 | [] | no_license | ichbinkenny/redefined-destruction-networking | 567ca8ee643be53154d7f1072c95c70ca375b078 | fc92f4957e3734226324dd1678d0c9306c80771c | refs/heads/master | 2023-01-31T12:35:11.238835 | 2020-12-09T21:48:02 | 2020-12-09T21:48:02 | 273,066,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,725 | py | import socket
import threading
import sys
import select
client_socket = None
address = "asimplenerd.com"#"192.168.72.1" # This will be the server's address on the local network. It is imperative that the client is already on the hostapd network!
default_port = 1287
id = -1
end_flag = True
close_message = "PLZCLOSENOW"
ACK = 'e'
NACK = 'f'
game_in_progress = False
message_size = 256
READY = 1
BUSY = 2
DEV_ADDED = 3
DEV_REMOVED = 4
ENTER_COMBAT = 5
EXIT_COMBAT = 6
### setupClient test notes
# Clients trying to connect to the server improperly, i.e. on server close all terminate as appropriate.
# Received IDs are not used until verified by an ACK from the server!
# any non-ack status from the server simply disconnects this client! This is good!
# ACKs from client IDs allow for a bot connection and start a game loop for the client to receive messages
# and send updates!
def setupClient():
global client_socket
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Attempting to connect to server...")
client_socket.connect((address, default_port))
id = int.from_bytes(client_socket.recv(message_size), byteorder='big')
status = client_socket.recv(message_size).decode('utf-8')
if status == ACK:
# Send id back to bluetooth device
sys.stdout.write("id: %d\n" % id)
sys.stdout.flush()
beginConnLoop()
elif status == NACK:
print("Failed to get id!")
else:
print("UNKNOWN STATUS: {}".format(str(status)))
### readDevUpdates test notes
# until this client is instructed to stop running, updates are being read from BluetoothControl correctly.
# on client termination, this function cleans up its resources nicely!
# all four handled commands are properly recognized on the server end!
# I.e. DEV_ADDED is producing 3: device id, DEV_REMOVE is producing 4: device id
def readDevUpdates():
global end_flag
while not end_flag:
info = sys.stdin.readline().strip()
status = "NONE"
if ':' in info:
status = int(info[:info.index(':')])
if status == DEV_ADDED:
client_socket.sendall(bytes(info, 'utf-8'))
elif status == DEV_REMOVED:
client_socket.sendall(bytes(info, 'utf-8'))
elif status == ENTER_COMBAT:
client_socket.sendall(bytes(info, 'utf-8'))
elif status == EXIT_COMBAT:
client_socket.sendall(bytes(info, 'utf-8'))
else:
client_socket.sendall(bytes(info, 'utf-8'))
### Test notes
# On start, update thread launches successfully!
# Server registers components as 0:0:0:Sword to allow for proper updating!
# Reads are only happening when data is available to save resources!
# On receiving the close request from the server, this client is properly cleaned up!
# Data received is being registered in BluetoothControl from this file's pipe!
def beginConnLoop():
global end_flag
end_flag = False
bot_updated_thread = threading.Thread(target=readDevUpdates)
bot_updated_thread.setDaemon(True)
bot_updated_thread.start()
components = "0:0:0:Sword" # No armor and sword weapon
client_socket.sendall(bytes(components, 'utf-8'))
while not end_flag:
read_list, write_list, err = select.select([client_socket], [], [])
for sock in read_list:
msg = sock.recv(message_size).decode('utf-8')
end_flag = msg == close_message
if end_flag:
sys.stdout.write("SOCKCLOSED")
client_socket.close()
break
else:
sys.stdout.write(msg + "\n")
sys.stdout.flush()
### This works!
if __name__ == "__main__":
setupClient()
| [
"kennethdhunter@gmail.com"
] | kennethdhunter@gmail.com |
e5514210e89b80409333bef0bf14804be6c84f11 | 4546a96d55a2cc3736dbf668c45677b572735f08 | /get_focused_output.sh | 31c299ae5e1814901c72c08051bb5e66b3f4ee00 | [] | no_license | teunissenstefan/scripts | eea268faee617acd80f8700b47e47e50032a0497 | efab9151f4e93e616ec9846e841629b329be9596 | refs/heads/master | 2022-09-11T17:45:30.285304 | 2022-08-09T07:17:44 | 2022-08-09T07:17:44 | 244,189,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | sh | #!/usr/bin/env python3
import subprocess
import json
try:
outputs = subprocess.check_output(["swaymsg", "-t", "get_outputs"])
outputs = json.loads(outputs.decode('utf-8').replace("'", '"'))
for idx, output in enumerate(outputs):
if output["focused"]:
print(idx + 1)
except:
print(0)
| [
"stefanteunissen1@gmail.com"
] | stefanteunissen1@gmail.com |
766d894a674bb679015ec516a8fdc1796ea7ebc7 | 46ae325c342957bdeddf9e92b2fbb97f769237f6 | /Galois Counter Mode/Correc_TP1_AES.py | 7a0dc8a2fb088d25a98f3ae96e1a4599db014564 | [] | no_license | DenizSungurtekin/Cryptography-and-security | 557b029bb0a8dabc2b42176916da317478085f6a | 4050d5be02e7186ada87084008f60f515970c073 | refs/heads/main | 2023-03-17T14:56:18.055721 | 2021-03-05T12:10:28 | 2021-03-05T12:10:28 | 344,783,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,154 | py | # Here's the S-boxes as tuples. You can change their form if you prefer to
# work with lists or other structures.
S_box = (
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
)
S_box_inv = (
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
)
MixColMatrix = [[0x02,0x03,0x01,0x01],
[0x01,0x02,0x03,0x01],
[0x01,0x01,0x02,0x03],
[0x03,0x01,0x01,0x02]]
MixColInverse = [[0x0e,0x0b,0x0d,0x09],
[0x09,0x0e,0x0b,0x0d],
[0x0d,0x09,0x0e,0x0b],
[0x0b,0x0d,0x09,0x0e]]
# Applies the S-Box on 32 bit blocks (lists of 4 bytes)
def Sbox(lst,box):
result = [box[lst[0]],box[lst[1]],box[lst[2]],box[lst[3]]]
return result
# xor on two lists of 4 bytes
def xor_lst(lst1,lst2):
result = []
for i in range(4):
result.append(lst1[i] ^ lst2[i])
return result
# Polynomial product in GF(2^8), with irreductible polynomial x^8+x^4+x^3+x+1
def poly_multiplication(p1,p2):
liste_p1 = list(str(bin(p1))[2:])
res = 0
degrep1 = len(liste_p1)-1
for i in range(degrep1+1):
if liste_p1[i] == '1':
temp = p2 * pow(2,degrep1-i)
res = res ^ temp
while res > 255:
bigger_byte = res // 256
lesser_byte = res % 256
poly = 0b11011
res = lesser_byte ^ poly_multiplication(bigger_byte,poly)
return res
# Key expansion step, returns a list of keys (each key will be composed of four
# lists of 4 bytes). The given key is either 4x4, 6x4 or 8x4 bytes.
def key_expansion(key,box):
N = len(key)
if len(key) == 4:
steps = 10
elif len(key) == 6:
steps = 12
elif len(key) == 8:
steps = 14
else:
raise ValueError("Invalid Key Length")
rc_table = [0b00000001, 0b00000010 , 0b00000100 ,
0b00001000, 0b00010000 , 0b00100000 ,
0b01000000, 0b10000000 , 0b00011011 ,
0b00110110]
rcon_table = []
for i in range(10):
rcon_table.append([rc_table[i], 0b0, 0b0, 0b0])
Expanded_Key = []
for i in range(N):
Expanded_Key.append(key[i])
for i in range(N,4*(steps+1)):
if i % N == 0:
rotated = Expanded_Key[i-1][1:] + [Expanded_Key[i-1][0]]
Expanded_Key.append(xor_lst(xor_lst(Expanded_Key[i-N],
Sbox(rotated,box)),rcon_table[int(i/N)-1]))
elif N > 6 and i % N == 4:
Expanded_Key.append(xor_lst(Expanded_Key[i-N],Sbox(Expanded_Key[i-1],box)))
else:
Expanded_Key.append(xor_lst(Expanded_Key[i-N],Expanded_Key[i-1]))
return Expanded_Key
# ByteSub operation on a 4x4 matrix of bytes.
def ByteSub(matrix,sub_box):
result = []
for i in range(4):
result.append(Sbox(matrix[i],sub_box))
return result
# ShiftRow operation on a 4x4 matrix of bytes.
def ShiftRow(matrix, encrypt):
if encrypt == True :
row1 = [matrix[0][0],matrix[1][1],matrix[2][2],matrix[3][3]]
row2 = [matrix[1][0],matrix[2][1],matrix[3][2],matrix[0][3]]
row3 = [matrix[2][0],matrix[3][1],matrix[0][2],matrix[1][3]]
row4 = [matrix[3][0],matrix[0][1],matrix[1][2],matrix[2][3]]
else:
row1 = [matrix[0][0],matrix[3][1],matrix[2][2],matrix[1][3]]
row2 = [matrix[1][0],matrix[0][1],matrix[3][2],matrix[2][3]]
row3 = [matrix[2][0],matrix[1][1],matrix[0][2],matrix[3][3]]
row4 = [matrix[3][0],matrix[2][1],matrix[1][2],matrix[0][3]]
result = [row1,row2,row3,row4]
return result
# Vector Multiplication with polynomial multiplication for lists of 4 bytes,
# Returns an integer value (Addition is also polynomial).
def Vector_poly_multi(polylst,polylst2):
result = 0
for i in range(4):
temp = poly_multiplication(polylst[i],polylst2[i])
result = result ^ temp
return result
# MixColumn operation on a 4x4 matrix of bytes.
# We have to work with column instead of rows here, so we have to build the
# columns first
def MixColumn(matrix, mixcol):
result = [[],[],[],[]]
# Recuperation of the adequate column
for i in range(4):
col = matrix[i]
# Matrix multiplication, multiplies each time the corresponding row of
# MixCol with the column of values.
for j in range(4):
result[i].append(Vector_poly_multi(mixcol[j],col))
return result
# AddRoundKey operation on a 4x4 matrix of bytes.
def AddRoundKey(matrix,sub_key):
result = []
for i in range(4):
result.append(xor_lst(matrix[i],sub_key[i]))
return result
# Changes any message in a vector of 32-bit words, i.e. a matrix of bytes 4xN.
# In particular, this means a key will already be organised as words,
# And a 128 bit message will already we a 4x4 matrix of bytes.
# A byte is here just an integer value between 0 and 255.
def MessageToMatrix(string):
longueur = len(string)
matrice = []
for i in range(longueur):
byte = ord(string[i])
if i % 4 == 0:
matrice.append([byte])
else:
matrice[-1].append(byte)
return matrice
# Vice-versa : transforms a matrix 4xN of bytes into a string.
def MatrixToMessage(matrice):
string = ""
for word in matrice:
for j in range(4):
string += chr(word[j])
return string
# FINAL AES FUNCTION :
def AES(message,key,S_box, mixcol):
message = MessageToMatrix(message)
key = MessageToMatrix(key)
All_Keys = key_expansion(key,S_box)
Init_Key = All_Keys[0:4]
# First xor
message = AddRoundKey(message,Init_Key)
N = len(key)
if N == 4:
steps = 10
elif N == 6:
steps = 12
elif N == 8 :
steps = 14
else:
raise ValueError("Invalid Key Length")
for i in range(1,steps+1):
next_key = All_Keys[4*i:4*(i+1)]
message = ByteSub(message,S_box)
message = ShiftRow(message, True)
if i < steps:
message = MixColumn(message, mixcol)
message = AddRoundKey(message, next_key)
message = MatrixToMessage(message)
return message
# AES Decryption :
def AES_Inv(message,key,S_box, S_box_Inv, mixcol):
message = MessageToMatrix(message)
key = MessageToMatrix(key)
All_Keys = key_expansion(key,S_box)
Init_Key = All_Keys[0:4]
N = len(key)
if N == 4:
steps = 10
elif N == 6:
steps = 12
elif N == 8 :
steps = 14
else:
raise ValueError("Invalid Key Length")
for i in range(steps,0,-1):
next_key = All_Keys[4*i:4*(i+1)]
message = AddRoundKey(message, next_key)
if i < steps:
message = MixColumn(message, mixcol)
message = ShiftRow(message, False)
message = ByteSub(message,S_box_Inv)
# Reverse the First xor
message = AddRoundKey(message,Init_Key)
message = MatrixToMessage(message)
return message
message = "Two One Nine Two"
key = "Thats my Kung Fu"
#print(MessageToMatrix(message))
#ciphertext = AES(message,key,S_box,MixColMatrix)
#plaintext = AES_Inv(ciphertext, key, S_box, S_box_inv, MixColInverse)
#print(message)
#print(ciphertext)
#print(plaintext)
# This only does the minimum needed for TP2 : an AES_Box which uses blocks of
# 16 characters (i.e. 128 bits). To do exactly what was asked for TP1, it
# misses the part where you need to cut the message into 128 bits blocks.
| [
"noreply@github.com"
] | DenizSungurtekin.noreply@github.com |
821605a24dc98e2f6d96ac6410769972c52bcdd4 | fa5fb155ba4bc5f4335859b8a93b73be8c1a2abb | /tt.py | 7cb12887e5f30f3d9851b55e6de33ba0bb50c54c | [] | no_license | wujifan/test_allure | c9703a58d344bbb8825186f8c360595a5486506a | bf5cbb71b7a0781cba8d3a0da766592d680e8dc3 | refs/heads/master | 2023-03-31T09:44:10.391654 | 2021-04-07T09:26:44 | 2021-04-07T09:26:44 | 355,466,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from base.init__driver import ini_data
a = ini_data('test_data', ['name', 'exp_value']) | [
"wujifan4811@163.com"
] | wujifan4811@163.com |
31a6c319b7c69134890d2911c524ff347b6efc76 | 4109762775f6a465639550b6de36b50450a209ad | /strategy/blackjack/playercheatstrategy.py | 52da55b106d966347e46767a699c600de46c6a4c | [] | no_license | samuelcstewart/oosd | 7f8fb79f6cb5ea7a211d5f0ea6278a1eb475a534 | 5f15cdd34ec3db0a4fcf01be5e2cb00b7f6e221f | refs/heads/master | 2020-04-15T18:37:10.148250 | 2015-05-14T08:51:20 | 2015-05-14T08:51:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import abc
from strategy import Strategy
class PlayerCheatStrategy(Strategy):
""" Basic cheat strat, show the next card in deck """
def hit(self):
print("Next card: " + str(self.game_state.deck.cards[-1]))
return raw_input("h to hit or s to stand: ") == 'h'
| [
"stewasc3@student.op.ac.nz"
] | stewasc3@student.op.ac.nz |
700e1bb227aa8d04f4608999e5cb91fc629ffc26 | 0bc1cf3ce50a035dc85b56e32bd930c91a8776c0 | /blog/migrations/0001_initial.py | 91ae6deba16376ca31124388427bb7421762a9a4 | [] | no_license | RiaLolwut/djangogirls | 7c2fa64a2824b48fbbafc2f980d654f511562063 | a81c14065bbf74ff04f5e8874b3acacc347651e9 | refs/heads/master | 2020-04-29T06:35:30.459881 | 2019-03-16T06:20:00 | 2019-03-16T06:20:00 | 175,921,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Generated by Django 2.0.13 on 2019-03-16 03:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"riaparishseo@gmail.com"
] | riaparishseo@gmail.com |
3b4b65765a6275e2b4fed60d9412aac3f7fb9665 | d12b59b33df5c467abf081d48e043dac70cc5a9c | /ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/passcriteria_985f11fda90dc3b8dac84a4a881b8740.py | 6920d6cfe8478b76037b42d0c156e50d2daa5519 | [
"MIT"
] | permissive | ajbalogh/ixnetwork_restpy | 59ce20b88c1f99f95a980ff01106bda8f4ad5a0f | 60a107e84fd8c1a32e24500259738e11740069fd | refs/heads/master | 2023-04-02T22:01:51.088515 | 2021-04-09T18:39:28 | 2021-04-09T18:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,492 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class PassCriteria(Base):
"""This applies the Pass Criteria to each trial in the test and determines whether the trial passed or failed.
The PassCriteria class encapsulates a required passCriteria resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'passCriteria'
_SDM_ATT_MAP = {
'EnablePassFail': 'enablePassFail',
}
def __init__(self, parent):
super(PassCriteria, self).__init__(parent)
@property
def EnablePassFail(self):
"""
Returns
-------
- bool: If true, the pass fail criteria is set.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnablePassFail'])
@EnablePassFail.setter
def EnablePassFail(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnablePassFail'], value)
def update(self, EnablePassFail=None):
"""Updates passCriteria resource on the server.
Args
----
- EnablePassFail (bool): If true, the pass fail criteria is set.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def Apply(self):
"""Executes the apply operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self):
"""Executes the applyAsync operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self):
"""Executes the applyAsyncResult operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self):
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self):
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(InputParameters=string)list
-------------------------------
- InputParameters (str): The input arguments of the test.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(InputParameters=string)
-----------------------------
- InputParameters (str): The input arguments of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self):
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('waitForTest', payload=payload, response_object=None)
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
f26a1a8661049360f37f28aae77619d1416b9783 | 25d8dd91bcdb2ae02e260d81a4c6e79417ee213e | /hw/project/text_classification/mapreduce/knn/map.py | fd155d3551f0310add2108b0b3053e14538b4875 | [] | no_license | huhuk/FBDP | 4b885c2cccda24ffea4edcaad90e4495a6870819 | 95a4d3271c95d13db9ff1f7396177e34148bc970 | refs/heads/master | 2021-04-25T15:37:28.086901 | 2018-02-28T07:25:50 | 2018-02-28T07:25:50 | 109,667,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | #!/usr/bin/python3
import sys
import pickle
import numpy as np
import scipy as sp
from sklearn.feature_extraction.text import TfidfVectorizer
k = 3
def load(filename):
f = open(filename, 'rb')
obj = pickle.load(f)
f.close()
return obj
# count_vec = load('./tfidf.model')
x_train = load('./x_train.model')
y_train = load('./y_train.model')
x_test = load('./x_test.model')
n = x_train.shape[0]
one = sp.sparse.csr_matrix(np.ones(n).reshape((n,1)))
def get_dist(x):
dists = sp.sparse.linalg.norm((x_train - one * x), axis=1)
return sorted(zip(dists, y_train))[:k]
def get_cl(x):
aDict = dict()
cls = get_dist(x)
for j, i in cls:
if i not in aDict:
aDict[i] = 1
else:
aDict[i] += 1
if len(aDict) == 3:
ret = cls[0][1]
else:
for i, j in aDict.items():
if int(j) > 1:
ret = i
return ret
for i, line in enumerate(sys.stdin):
y= line.strip()
x = x_test[i]
print(y, get_cl(x))
| [
"huhu_qs@163.com"
] | huhu_qs@163.com |
ec356e53c4d259f06b48074389ec9b57fb66f575 | 199522cb43b4e2c7e3bf034a0e604794258562b1 | /0x03-python-data_structures/7-add_tuple.py | 96d715528f3d23cdf3d725a9838247a97a8e4635 | [] | no_license | jormao/holbertonschool-higher_level_programming | a0fd92f2332f678e6fe496057c04f2995d24a4ac | 360b3a7294e9e0eadcadb57d4c48c22369c05111 | refs/heads/master | 2020-09-29T01:36:20.094209 | 2020-05-15T03:27:06 | 2020-05-15T03:27:06 | 226,915,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!/usr/bin/python3
def add_tuple(tuple_a=(), tuple_b=()):
if len(tuple_a) != 2:
if len(tuple_a) == 1:
tuple_a = (tuple_a[0], 0)
if len(tuple_a) == 0:
tuple_a = (0, 0)
if len(tuple_b) != 2:
if len(tuple_b) == 1:
tuple_b = (tuple_b[0], 0)
if len(tuple_b) == 0:
tuple_b = (0, 0)
tuple_c = ((tuple_a[0] + tuple_b[0]), (tuple_a[1] + tuple_b[1]))
return (tuple_c)
| [
"jormao@gmail.com"
] | jormao@gmail.com |
4eff2793d09f4ab95e340f10730da9548630656b | f348191ea2ee92f4154f9f2819dbc11ba209dc8c | /app.py | 74b0f6f02805b342507aef2101c130969aa6cbb6 | [
"MIT"
] | permissive | ish-u/sparrow | f9be3e6bcd2df4a655fc18f4938fe7c95f2f09c6 | 18fb031a96c5443e6922c3b7d54f79cc4eb141bf | refs/heads/master | 2020-12-03T12:08:21.958397 | 2020-01-05T09:58:24 | 2020-01-05T09:58:24 | 231,310,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,732 | py | from hashlib import md5
import urllib, hashlib
from datetime import datetime
from flask import Flask, escape, request, render_template, redirect ,session ,flash
import sqlite3
from passlib.hash import sha256_crypt
from functools import wraps
from flask_avatars import Avatars
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'user_id' in session:
return f(*args, **kwargs)
else:
return redirect('/login')
return wrap
app = Flask(__name__)
avatars = Avatars(app)
app.secret_key = 'LOL'
@app.route('/')
@login_required
def hello():
conn = sqlite3.connect('users')
c = conn.cursor()
info = c.execute("SELECT * FROM info WHERE user=:user",{"user":session["user_id"]}).fetchone()
conn.commit()
user = session["user_id"]
status = c.execute(f"SELECT * FROM {user}")
return render_template("index.html",info = info,status=status)
conn.commit()
conn.close()
@app.route('/register',methods=['GET', 'POST'])
def register():
if request.method == 'POST':
conn = sqlite3.connect('users')
c = conn.cursor()
u_name = request.form.get("username")
name = request.form.get("name")
password = (request.form.get("password"))
email = request.form.get("email")
dob = request.form.get("age")
age = datetime.now().year - int(dob[0:4])
if not name or not password or not email or not u_name or not dob:
flash("ONE OR MORE FEILD ARE NOT LEFT EMPTY DURING SUBMISSION")
return redirect('/register')
elif password != request.form.get("confirmation"):
flash("PASSWORDS DON'T MATCH")
return redirect('/register')
if c.execute("SELECT * FROM info WHERE user =:user",{"user":name}).fetchone() != None:
flash("USERNAME ALREADY EXISTS")
return redirect("/register")
if c.execute("SELECT * FROM info WHERE email =:email",{"email":email}).fetchone() != None:
flash("EMAIL ALREADY EXISTS")
return redirect("/register")
passw = sha256_crypt.hash(password)
avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
c.execute("INSERT INTO info(user,pass,email,avatar,dob,age,name) VALUES(?,?,?,?,?,?,?)", (u_name, passw,email,avatar_hash,dob,age,name))
c.execute(f"CREATE TABLE {u_name} ('s_no' INTEGER PRIMARY KEY NOT NULL, 'post' TEXT NOT NULL, 'ddmmyy' DATETIME NOT NULL)")
conn.commit()
conn.close()
return redirect("/")
elif request.method == 'GET':
return render_template("register.html")
@app.route('/home')
def home():
return redirect("/")
@app.route('/login',methods=['POST','GET'])
def login():
if request.method == 'POST':
session.clear()
name = request.form.get("username")
password = request.form.get("password")
if not name or not password:
flash("ONE OR MORE FIELDS ARE LEFT EMPTY")
return redirect('/login')
conn = sqlite3.connect('users')
c = conn.cursor()
row = c.execute("SELECT * FROM info WHERE user = :name", {"name":name})
data = c.fetchone()
if data == None:
flash("BRU.. u have to register to login, that's how these things work i guess")
return redirect('/login')
passw = data[1]
if sha256_crypt.verify(password,passw):
session["user_id"] = data[0]
else:
flash("INCORRECT PASSWORD")
return redirect("/login")
conn.commit()
conn.close()
return redirect("/")
else:
return render_template("login.html")
@app.route('/logout',methods=['POST','GET'])
def logout():
session.clear()
return redirect('/')
@app.route('/feed' ,methods=['GET','POST'])
@login_required
def feed():
if request.method == 'POST':
status = request.form.get("status")
user = session["user_id"]
conn = sqlite3.connect('users')
c = conn.cursor()
avatar = c.execute("SELECT avatar FROM info WHERE user=:user",{"user":user}).fetchone()[0]
c.execute(f"INSERT INTO {user}(post,ddmmyy) VALUES(?,?)",(status,datetime.now()))
c.execute("INSERT INTO status(user,post,ddmmyy,avatar) VALUES(?,?,?,?)",(user,status,datetime.now(),avatar))
conn.commit()
data = c.execute("SELECT * FROM status")
return render_template("feed.html",data = data)
conn.commit()
conn.close()
else:
conn = sqlite3.connect('users')
c = conn.cursor()
data = c.execute("SELECT * FROM status")
return render_template("feed.html",data = data)
conn.commit()
conn.close()
@app.route('/people' ,methods=['GET','POST'])
@login_required
def people():
if request.method == 'POST':
search = request.form.get("search")
conn = sqlite3.connect('users')
c = conn.cursor()
data = c.execute("SELECT * FROM info WHERE user= :user",{"user":search})
return render_template("find_people.html",data=data)
conn.commit()
conn.close()
else:
conn = sqlite3.connect('users')
c = conn.cursor()
data = c.execute("SELECT * FROM info")
return render_template("find_people.html",data=data)
conn.commit()
conn.close()
@app.route('/user' ,methods=['GET','POST'])
@login_required
def user():
if request.method == 'POST':
user = request.form.get("button")
if user == session["user_id"]:
return redirect('/')
conn = sqlite3.connect('users')
c = conn.cursor()
info = c.execute("SELECT * FROM info WHERE user=:user",{"user":user}).fetchone()
conn.commit()
status = c.execute(f"SELECT * FROM {user}")
return render_template("user.html",info = info,status=status)
conn.commit()
conn.close()
@app.route('/status',methods=['POST','GET'])
@login_required
def redirect_status():
return render_template('status.html')
@app.route('/edit',methods=['POST','GET'])
@login_required
def edit():
status = request.form.get("edit")
conn = sqlite3.connect('users')
c = conn.cursor()
c.execute("UPDATE info SET status = :status WHERE user =:user",{"status":status,"user":session["user_id"]})
conn.commit()
conn.close()
return redirect('/')
#if __name__ == "__main__":
# app.run(debug=True)
| [
"anmolgupta520@gmail.com"
] | anmolgupta520@gmail.com |
b9bb003ddc62e1d45453d22efe039b1eb758af9c | 20ae4d697181fb9810e13213313f97071e28e8ef | /parse/__main__.py | 5fa9d49de4d1330cc22a9f8513310b2c9a4cc402 | [] | no_license | software-opal/nz-local-election | 79afb0ad34a81f5be5018abe0062f0c159c4156a | 904cd985ef9b225bf3c92c82fcbd66c68b1aa43d | refs/heads/master | 2020-08-03T02:16:59.057271 | 2019-09-28T06:19:21 | 2019-09-28T06:19:21 | 211,593,768 | 0 | 0 | null | 2020-06-07T08:06:50 | 2019-09-29T02:57:20 | Python | UTF-8 | Python | false | false | 2,985 | py | import itertools
import json
import pathlib
from . import (
COUNCILLORS_URL_FORMATS,
DHB_URLS,
MAYOR_URLS,
REGIONAL_COUNCILLORS_URL_FORMATS,
)
from .load import InvalidPage, parse
from .visit import Requester
ROOT = pathlib.Path(__file__).parent.parent
DATA = ROOT / "public/data"
LOOKUP = ROOT / "src/assets/data_lookup.json"
COMBINED = DATA / "combined.json"
def candidates_json_safe(candidates):
return [c.as_dict() for c in candidates]
class Data:
def __init__(self):
self.r = Requester()
self.data = {}
self.grouped = {}
self.named = {}
def write(self):
LOOKUP.write_text(
json.dumps(
{"grouped": self.grouped, "named": self.named}, sort_keys=True, indent=2
)
)
COMBINED.write_text(json.dumps(self.data, sort_keys=True, indent=2))
def persist(self, url):
print(f"Requesting {url}")
base_url, response = self.r.request(url)
print(f" Parsing {len(response)} bytes of response")
election = parse(base_url, response)
fname = f"{election.id}.json"
self.data[election.id] = election.as_dict()
self.grouped.setdefault(election.type, {}).setdefault(election.region, {})[
election.electorate
] = election.id
self.named[election.id] = fname
(DATA / fname).write_text(
json.dumps(election.as_dict(), sort_keys=True, indent=2)
)
print(f" Written data to {fname}\n")
return election, fname
def main():
DATA.mkdir(parents=True, exist_ok=True)
d = Data()
for url_group in [DHB_URLS, MAYOR_URLS]:
for url in url_group:
try:
d.persist(url)
except InvalidPage:
print("Page didn't represent an election")
pass
d.write()
for url_format_group in [
COUNCILLORS_URL_FORMATS,
REGIONAL_COUNCILLORS_URL_FORMATS,
]:
for format in url_format_group:
old_election_region = None
for i in itertools.count(1):
url = format.format(i)
try:
election, _ = d.persist(url)
except InvalidPage:
print("Page didn't represent an election")
break
if old_election_region is not None:
assert (
old_election_region == election.region
), f"{old_election_region} != {election.region}"
old_election_region = election.region
d.write()
akl_local_board_format = "https://www.policylocal.nz/candidates/CB_076{:02}"
for i in itertools.count(3):
url = akl_local_board_format.format(i)
try:
d.persist(url)
except InvalidPage:
print("Page didn't represent an election")
break
d.write()
if __name__ == "__main__":
main()
| [
"leesdolphin@gmail.com"
] | leesdolphin@gmail.com |
4494b119785b3c4ad00149d3407d60b1f571ca22 | 4c7e44b17782f1f1823238cf874ff298900d98ef | /config.py | c2878ccab1deee984f877ba8569500e6dc40ab83 | [] | no_license | dynamodenis/blog-arena | c2195262f2dec199b159e6383a2b5f14a4046799 | 10be4fffe63c061b3c67c664458b45eee00e8b95 | refs/heads/master | 2022-06-29T18:36:06.261840 | 2020-05-09T07:10:48 | 2020-05-09T07:10:48 | 262,506,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | class Config:
pass
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG=True
class TestConfig(Config):
pass
config_options={
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
} | [
"dmbugua66@gmail.com"
] | dmbugua66@gmail.com |
9a29224011fd3cca3b90735ed27fe0b1c8ed6bb9 | 699b767e854162231914f4bce177382a9880f221 | /LinkedList/AddTwoNumbers.py | 7f503cc9821d09b900119e244d002187998d27d2 | [] | no_license | XiwangLi/LeetcodeArchive | a66dc32f0a7248ff05cbe09dd0095191fefc602f | 3de4a48f9f7de2970cf02751c1620281bae0947d | refs/heads/master | 2021-04-28T02:57:55.604505 | 2019-02-28T01:07:51 | 2019-02-28T01:07:51 | 122,128,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
head=ListNode(0)
curr=head
val=0
ten=0
while l1 or l2 or val:
if l1:
val+=l1.val
l1=l1.next
if l2:
val+=l2.val
l2=l2.next
curr.next=ListNode(val%10)
curr=curr.next
val=val//10
return head.next | [
"xiwangli2010@gmail.com"
] | xiwangli2010@gmail.com |
954591c4ca9b4b8c04d67211df68bdaf6f07a24a | fdf616efcf505843621f830879ca3ff44e296772 | /myproject/accounts/tests/test_view_signup.py | 2c58c0ae28411268e1e00d0e5742581d73da65ba | [] | no_license | dym0080/learn-django | 5cbab1c9696638ffe47a3335cf33d7638fe77523 | 2d9450098f516ed887de0a953b0945ee5047d9f5 | refs/heads/master | 2022-05-05T12:00:23.127079 | 2020-01-09T07:52:37 | 2020-01-09T07:52:37 | 228,332,281 | 0 | 0 | null | 2022-04-22T22:58:00 | 2019-12-16T07:53:45 | Python | UTF-8 | Python | false | false | 2,749 | py | from django.urls import reverse, resolve
from django.contrib.auth.models import User
# from django.contrib.auth.forms import UserCreationForm
from django.test import TestCase
from ..views import signup
from ..forms import SignUpForm
class SignUpTests(TestCase):
def setUp(self):
url = reverse('signup')
self.response = self.client.get(url)
def test_signup_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_signup_url_resolves_signup_view(self):
view = resolve('/accounts/signup/')
self.assertEqual(view.func, signup)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, SignUpForm)
def test_form_inputs(self):
'''
The view must contain five inputs: csrf, username, email,
password1, password2
'''
self.assertContains(self.response, '<input', 5)
self.assertContains(self.response, 'type="text"', 1)
self.assertContains(self.response, 'type="email"', 1)
self.assertContains(self.response, 'type="password"', 2)
class SuccessfulSignUpTests(TestCase):
def setUp(self):
url = reverse('signup')
data = {
'username': 'john',
'email': 'john@doe.com',
'password1': 'abcdef123456',
'password2': 'abcdef123456'
}
self.response = self.client.post(url, data)
self.home_url = reverse('home')
def test_redirection(self):
'''
A valid form submission should redirect the user to the home page
'''
self.assertRedirects(self.response, self.home_url)
def test_user_creation(self):
self.assertTrue(User.objects.exists())
def test_user_authentication(self):
'''
Create a new request to an arbitrary page.
The resulting response should now have a `user` to its context,
after a successful sign up.
'''
response = self.client.get(self.home_url)
user = response.context.get('user')
self.assertTrue(user.is_authenticated)
class InvalidSignUpTests(TestCase):
def setUp(self):
url = reverse('signup')
self.response = self.client.post(url, {})
def test_signup_status_code(self):
'''
An invalid form submission should return to the same page
'''
self.assertEqual(self.response.status_code, 200)
def test_form_errors(self):
form = self.response.context.get('form')
self.assertTrue(form.errors)
def test_dont_create_user(self):
self.assertFalse(User.objects.exists()) | [
"308960474@qq.com"
] | 308960474@qq.com |
3d97b330f77024758577eaa3b15f7e554fac1016 | d880f73ae1b791f73789ab51916e5eb74c6c2a23 | /aiologger/filters.py | 87cbba86d9771928ce63e25b5a45686b14645b7b | [
"MIT"
] | permissive | decaz/aiologger | d16e651f1358416e51f84d4103fb037f39f085c8 | 94e9c126280d1ede315a6fc3531ac17c21a2c33a | refs/heads/master | 2020-05-01T05:00:02.343984 | 2019-03-03T16:57:51 | 2019-03-03T16:57:51 | 177,288,933 | 0 | 0 | MIT | 2019-03-23T12:56:44 | 2019-03-23T12:56:43 | null | UTF-8 | Python | false | false | 146 | py | import logging
class StdoutFilter(logging.Filter):
def filter(self, record):
return record.levelno in (logging.DEBUG, logging.INFO)
| [
"magalhaesmartins@icloud.com"
] | magalhaesmartins@icloud.com |
5e3d18247eb1b3e3f1789add50668361ee4ebffd | f894c0969d30437b27ef4d0d81a99660bab10716 | /learn_python/Day1/assignment1/quickpython.py | ee38744c52832b0503f6d108f472fea9ba41d649 | [] | no_license | AnandSankarR/Data_BootCamp_2018 | 364357e73a47e60990fd43dd37f427823f0b40ab | c5e4f818722864f8f56df5387a9f9cbe849ae3e4 | refs/heads/master | 2021-05-03T13:01:31.131736 | 2018-02-22T00:56:25 | 2018-02-22T00:56:25 | 120,506,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | print("This File Works")
| [
"anandsankar.r@gmail.com"
] | anandsankar.r@gmail.com |
23c8d69f239e68820d41f4185adcd5f0106ad42c | 799a1bbafe9ceb6fcf6530d176633a7f97980dad | /rosExploration/rrt_exploration/scripts/functions.py | ff7a90d07e7e15a5893aa3a916f02fcd97deeea5 | [
"MIT"
] | permissive | dingjianfeng/rosExplorationNew | e6598ed4b0907d7bc8740acf4ea05d8bae9a1524 | 53b8b6bcdd3372c5e6fbaecae9f66f266dcf70c0 | refs/heads/master | 2021-09-01T06:38:27.366634 | 2017-12-25T11:45:22 | 2017-12-25T11:45:22 | 115,333,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,206 | py | #!/usr/bin/env python
#coding=utf-8
import rospy
import tf
from numpy import array
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.srv import GetPlan
from geometry_msgs.msg import PoseStamped
from numpy import floor
from numpy.linalg import norm
from numpy import inf
#________________________________________________________________________________
class robot:
goal = MoveBaseGoal()
start = PoseStamped()
end = PoseStamped()
def __init__(self,name):
self.assigned_point=[]
self.name=name
self.global_frame=rospy.get_param('~global_frame','/map') #by ding
self.listener=tf.TransformListener()
self.listener.waitForTransform(self.global_frame, '/base_footprint', rospy.Time(0),rospy.Duration(10.0)) #by ding 不知道这个self.name要不要去掉
cond=0;
while cond==0:
try:
(trans,rot) = self.listener.lookupTransform(self.global_frame, '/base_footprint', rospy.Time(0)) #by ding
cond=1
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
cond==0
self.position=array([trans[0],trans[1]])
self.assigned_point=self.position
self.client=actionlib.SimpleActionClient('/move_base', MoveBaseAction) #by ding
self.client.wait_for_server()
robot.goal.target_pose.header.frame_id=self.global_frame
robot.goal.target_pose.header.stamp=rospy.Time.now()
rospy.wait_for_service('/move_base_node/GlobalPlanner/make_plan') #by ding 学习move_base 了解/NavfnROS/make_plan
self.make_plan = rospy.ServiceProxy('/move_base_node/GlobalPlanner/make_plan', GetPlan) #by ding
robot.start.header.frame_id=self.global_frame
robot.end.header.frame_id=self.global_frame
#获取位置坐标
def getPosition(self):
cond=0;
while cond==0:
try:
(trans,rot) = self.listener.lookupTransform(self.global_frame, '/base_footprint', rospy.Time(0)) #by ding
cond=1
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
cond==0
self.position=array([trans[0],trans[1]])
return self.position
#发送目标点
def sendGoal(self,point):
robot.goal.target_pose.pose.position.x=point[0]
robot.goal.target_pose.pose.position.y=point[1]
robot.goal.target_pose.pose.orientation.w = 1.0
self.client.send_goal(robot.goal)
self.assigned_point=array(point)
rospy.loginfo("the functions.py send goal"+robot.goal) #by ding
#取消目标点
def cancelGoal(self):
self.client.cancel_goal()
self.assigned_point=self.getPosition()
def getState(self):
return self.client.get_state()
def makePlan(self,start,end):
robot.start.pose.position.x=start[0]
robot.start.pose.position.y=start[1]
robot.end.pose.position.x=end[0]
robot.end.pose.position.y=end[1]
start=self.listener.transformPose('/map', robot.start) #by ding
end=self.listener.transformPose('/map', robot.end) #by ding
#plan=self.make_plan(start = start, goal = end, tolerance = 0.0)
plan=self.make_plan(start = start, goal = end, tolerance = 0.1) #tolerance的单位是meter by ding
rospy.loginfo("the functions.py makeplan") #by ding
return plan.plan.poses
#________________________________________________________________________________
def index_of_point(mapData,Xp): #个人理解,可能有偏差,坐标点的索引
resolution=mapData.info.resolution
Xstartx=mapData.info.origin.position.x
Xstarty=mapData.info.origin.position.y
width=mapData.info.width
Data=mapData.data
index=int( ( floor((Xp[1]-Xstarty)/resolution)*width)+( floor((Xp[0]-Xstartx)/resolution) ))
return index
def point_of_index(mapData,i): #索引处点的坐标
y=mapData.info.origin.position.y+(i/mapData.info.width)*mapData.info.resolution
x=mapData.info.origin.position.x+(i-(i/mapData.info.width)*(mapData.info.width))*mapData.info.resolution
return array([x,y])
#________________________________________________________________________________
def informationGain(mapData,point,r): #计算点的信息量
infoGain=0;
index=index_of_point(mapData,point)
r_region=int(r/mapData.info.resolution)
init_index=index-r_region*(mapData.info.width+1)
for n in range(0,2*r_region+1):
start=n*mapData.info.width+init_index
end=start+2*r_region
limit=((start/mapData.info.width)+2)*mapData.info.width
for i in range(start,end+1):
if (i>=0 and i<limit and i<len(mapData.data)):
if(mapData.data[i]==-1 and norm(array(point)-point_of_index(mapData,i))<=r):
infoGain+=1
return infoGain*(mapData.info.resolution**2)
#________________________________________________________________________________
def discount(mapData,assigned_pt,centroids,infoGain,r):
index=index_of_point(mapData,assigned_pt)
r_region=int(r/mapData.info.resolution)
init_index=index-r_region*(mapData.info.width+1)
for n in range(0,2*r_region+1):
start=n*mapData.info.width+init_index
end=start+2*r_region
limit=((start/mapData.info.width)+2)*mapData.info.width
for i in range(start,end+1):
if (i>=0 and i<limit and i<len(mapData.data)):
for j in range(0,len(centroids)):
current_pt=centroids[j]
if(mapData.data[i]==-1 and norm(point_of_index(mapData,i)-current_pt)<=r and norm(point_of_index(mapData,i)-assigned_pt)<=r):
infoGain[j]-=1 #this should be modified, subtract the area of a cell, not 1
return infoGain
#________________________________________________________________________________
def pathCost(path):
if (len(path)>0):
i=len(path)/2
p1=array([path[i-1].pose.position.x,path[i-1].pose.position.y])
p2=array([path[i].pose.position.x,path[i].pose.position.y])
return norm(p1-p2)*(len(path)-1)
else:
return inf
#________________________________________________________________________________
def unvalid(mapData,pt):
index=index_of_point(mapData,pt)
r_region=5
init_index=index-r_region*(mapData.info.width+1)
for n in range(0,2*r_region+1):
start=n*mapData.info.width+init_index
end=start+2*r_region
limit=((start/mapData.info.width)+2)*mapData.info.width
for i in range(start,end+1):
if (i>=0 and i<limit and i<len(mapData.data)):
if(mapData.data[i]==1):
return True
return False
#________________________________________________________________________________
def Nearest(V,x):
n=inf
i=0
for i in range(0,V.shape[0]):
n1=norm(V[i,:]-x)
if (n1<n):
n=n1
result=i
return result
#________________________________________________________________________________
def Nearest2(V,x):
n=inf
result=0
for i in range(0,len(V)):
n1=norm(V[i]-x)
if (n1<n):
n=n1
return i
#________________________________________________________________________________
def gridValue(mapData,Xp):
resolution=mapData.info.resolution
Xstartx=mapData.info.origin.position.x
Xstarty=mapData.info.origin.position.y
width=mapData.info.width
Data=mapData.data
# returns grid value at "Xp" location
#map data: 100 occupied -1 unknown 0 free
index=( floor((Xp[1]-Xstarty)/resolution)*width)+( floor((Xp[0]-Xstartx)/resolution) )
if int(index) < len(Data):
return Data[int(index)]
else:
return 100
| [
"623395241@qq.com"
] | 623395241@qq.com |
b6061c81fb9c14cfe8a4b4a93e891fc90327de11 | 38444340385ab91a9148b10db3a981246b4496ff | /app/forms.py | cf23a56282b7fbe1de24973ded5f0f3faf55254e | [] | no_license | dannzii/info3180-lab3 | e27d7e82b18c27402f4e32ff2c8857f72b728b44 | 9a8c14c83ac8bad1c1895e05ebf03430caed6464 | refs/heads/master | 2020-04-22T06:15:11.017922 | 2019-02-14T20:28:16 | 2019-02-14T20:28:16 | 170,184,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField
from wtforms.validators import DataRequired, Email
class ContactForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
subject = StringField('Subject', validators=[DataRequired()])
Text_Area = TextAreaField('Message', validators=[DataRequired()])
| [
"34076867+dannzii@users.noreply.github.com"
] | 34076867+dannzii@users.noreply.github.com |
80540c5cae67b520926b96635014c4e26aefccd2 | 2f1f7d0711e054a96f10e849bdac5efcb45c4f39 | /deps/v8/SConstruct | c7b6cdd9f20818e0de94545202ba89fd4188bb46 | [
"BSD-3-Clause",
"MIT"
] | permissive | skyformat99/lbtt | ef25d0054ec539d6d77e25e5b0ce8febc4fdc0e8 | 6cf6418a639cc339615c3ecc6e78e1939a9cc1cb | refs/heads/master | 2021-05-27T18:13:03.711188 | 2009-09-12T01:17:41 | 2009-09-12T01:17:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,173 | # Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import platform
import re
import sys
import os
from os.path import join, dirname, abspath
from types import DictType, StringTypes
root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
# variable 'TOP'. You will also need to set the CXX, CC, AR and RANLIB
# environment variables to the cross-compiling tools.
ANDROID_TOP = os.environ.get('TOP')
if ANDROID_TOP is None:
ANDROID_TOP=""
# TODO: Sort these issues out properly but as a temporary solution for gcc 4.4
# on linux we need these compiler flags to avoid crashes in the v8 test suite
# and avoid dtoa.c strict aliasing issues
if os.environ.get('GCC_VERSION') == '44':
GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
GCC_DTOA_EXTRA_CCFLAGS = ['-fno-strict-aliasing']
else:
GCC_EXTRA_CCFLAGS = []
GCC_DTOA_EXTRA_CCFLAGS = []
ANDROID_FLAGS = ['-march=armv5te',
'-mtune=xscale',
'-msoft-float',
'-fpic',
'-mthumb-interwork',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-fmessage-length=0',
'-finline-functions',
'-fno-inline-functions-called-once',
'-fgcse-after-reload',
'-frerun-cse-after-loop',
'-frename-registers',
'-fomit-frame-pointer',
'-fno-strict-aliasing',
'-finline-limit=64',
'-MD']
ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
ANDROID_TOP + '/bionic/libc/include',
ANDROID_TOP + '/bionic/libstdc++/include',
ANDROID_TOP + '/bionic/libc/kernel/common',
ANDROID_TOP + '/bionic/libc/kernel/arch-arm',
ANDROID_TOP + '/bionic/libm/include',
ANDROID_TOP + '/bionic/libm/include/arch/arm',
ANDROID_TOP + '/bionic/libthread_db/include',
ANDROID_TOP + '/frameworks/base/include',
ANDROID_TOP + '/system/core/include']
ANDROID_LINKFLAGS = ['-nostdlib',
'-Bdynamic',
'-Wl,-T,' + ANDROID_TOP + '/build/core/armelf.x',
'-Wl,-dynamic-linker,/system/bin/linker',
'-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
'-Wl,-rpath-link=' + ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtbegin_dynamic.o',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.2.1/lib/gcc/arm-eabi/4.2.1/interwork/libgcc.a',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtend_android.o'];
LIBRARY_FLAGS = {
'all': {
'CPPDEFINES': [''],
'CPPPATH': [join(root_dir, 'src')],
'regexp:native': {
'CPPDEFINES': ['V8_NATIVE_REGEXP']
},
'mode:debug': {
'CPPDEFINES': ['V8_ENABLE_CHECKS']
}
},
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
},
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
'os:android': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
'CCFLAGS': ['-mthumb']
}
},
'mode:release': {
'CCFLAGS': ['-O3', '-fomit-frame-pointer', '-fdata-sections',
'-ffunction-sections'],
'os:android': {
'CCFLAGS': ['-mthumb', '-Os'],
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG', 'ENABLE_DEBUGGER_SUPPORT']
}
},
'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': {
'CPPDEFINES': ['V8_SHARED'],
'LIBS': ['pthread']
}
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
},
'os:freebsd': {
'CPPPATH' : ['/usr/local/include'],
'LIBPATH' : ['/usr/local/lib'],
'CCFLAGS': ['-ansi'],
},
'os:win32': {
'CCFLAGS': ['-DWIN32'],
'CXXFLAGS': ['-DWIN32'],
},
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'WARNINGFLAGS': ['-Wall', '-Wno-unused', '-Werror=return-type',
'-Wstrict-aliasing=2'],
'CPPPATH': ANDROID_INCLUDES,
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
},
'simulator:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'],
},
'prof:oprofile': {
'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
}
},
'msvc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '/GR-', '/Gy'],
'CPPDEFINES': ['WIN32'],
'LINKFLAGS': ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'],
'CCPDBFLAGS': ['/Zi']
},
'verbose:off': {
'DIALECTFLAGS': ['/nologo'],
'ARFLAGS': ['/NOLOGO']
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32', '_USE_32BIT_TIME_T'],
'LINKFLAGS': ['/MACHINE:X86'],
'ARFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'LINKFLAGS': ['/MACHINE:X64'],
'ARFLAGS': ['/MACHINE:X64']
},
'mode:debug': {
'CCFLAGS': ['/Od', '/Gm'],
'CPPDEFINES': ['_DEBUG', 'ENABLE_DISASSEMBLER', 'DEBUG'],
'LINKFLAGS': ['/DEBUG'],
'msvcrt:static': {
'CCFLAGS': ['/MTd']
},
'msvcrt:shared': {
'CCFLAGS': ['/MDd']
}
},
'mode:release': {
'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'msvcrt:static': {
'CCFLAGS': ['/MT']
},
'msvcrt:shared': {
'CCFLAGS': ['/MD']
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
'ARFLAGS': ['/LTCG'],
}
}
}
}
V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'CXXFLAGS': [], #['-fvisibility=hidden'],
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
},
'os:win32': {
'WARNINGFLAGS': ['-pedantic', '-Wno-long-long']
},
'os:linux': {
'WARNINGFLAGS': ['-pedantic'],
'library:shared': {
'soname:on': {
'LINKFLAGS': ['-Wl,-soname,${SONAME}']
}
}
},
'os:macos': {
'WARNINGFLAGS': ['-pedantic']
},
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
}
},
'msvc': {
'all': {
'WARNINGFLAGS': ['/WX', '/wd4355', '/wd4800']
},
'library:shared': {
'CPPDEFINES': ['BUILDING_V8_SHARED'],
'LIBS': ['winmm', 'ws2_32']
},
'arch:ia32': {
'WARNINGFLAGS': ['/W3']
},
'arch:x64': {
'WARNINGFLAGS': ['/W2']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
# /wd4996 is to silence the warning about sscanf
# used by the arm simulator.
'WARNINGFLAGS': ['/wd4996']
},
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
}
}
}
MKSNAPSHOT_EXTRA_FLAGS = {
'gcc': {
'os:linux': {
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
},
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
},
},
'msvc': {
'all': {
'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
'LIBS': ['winmm', 'ws2_32']
}
}
}
DTOA_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Werror', '-Wno-uninitialized'],
'CCFLAGS': GCC_DTOA_EXTRA_CCFLAGS
}
},
'msvc': {
'all': {
'WARNINGFLAGS': ['/WX', '/wd4018', '/wd4244']
}
}
}
CCTEST_EXTRA_FLAGS = {
'all': {
'CPPPATH': [join(root_dir, 'src')],
'LIBS': ['$LIBRARY']
},
'gcc': {
'all': {
'LIBPATH': [abspath('.')]
},
'os:linux': {
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
},
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
},
'msvc': {
'all': {
'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
'LIBS': ['winmm', 'ws2_32']
},
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64']
},
}
}
SAMPLE_FLAGS = {
'all': {
'CPPPATH': [join(abspath('.'), 'include')],
'LIBS': ['$LIBRARY'],
},
'gcc': {
'all': {
'LIBPATH': ['.'],
'CCFLAGS': ['-fno-rtti', '-fno-exceptions']
},
'os:linux': {
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
},
'os:freebsd': {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'arch:ia32': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
},
'simulator:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'mode:release': {
'CCFLAGS': ['-O2']
},
'mode:debug': {
'CCFLAGS': ['-g', '-O0']
},
'prof:oprofile': {
'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'],
'LIBS': ['opagent']
}
},
'msvc': {
'all': {
'LIBS': ['winmm', 'ws2_32']
},
'verbose:off': {
'CCFLAGS': ['/nologo'],
'LINKFLAGS': ['/NOLOGO']
},
'verbose:on': {
'LINKFLAGS': ['/VERBOSE']
},
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'prof:on': {
'LINKFLAGS': ['/MAP']
},
'mode:release': {
'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'msvcrt:static': {
'CCFLAGS': ['/MT']
},
'msvcrt:shared': {
'CCFLAGS': ['/MD']
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
}
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
'LINKFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'LINKFLAGS': ['/MACHINE:X64']
},
'mode:debug': {
'CCFLAGS': ['/Od'],
'LINKFLAGS': ['/DEBUG'],
'msvcrt:static': {
'CCFLAGS': ['/MTd']
},
'msvcrt:shared': {
'CCFLAGS': ['/MDd']
}
}
}
}
D8_FLAGS = {
'gcc': {
'console:readline': {
'LIBS': ['readline']
},
'os:linux': {
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
},
'os:freebsd': {
'LIBS': ['pthread'],
},
'os:android': {
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm'],
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
},
},
'msvc': {
'all': {
'LIBS': ['winmm', 'ws2_32']
}
}
}
SUFFIXES = {
'release': '',
'debug': '_g'
}
def Abort(message):
print message
sys.exit(1)
def GuessToolchain(os):
tools = Environment()['TOOLS']
if 'gcc' in tools:
return 'gcc'
elif 'msvc' in tools:
return 'msvc'
else:
return None
OS_GUESS = utils.GuessOS()
TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
ARCH_GUESS = utils.GuessArchitecture()
SIMPLE_OPTIONS = {
'toolchain': {
'values': ['gcc', 'msvc'],
'default': TOOLCHAIN_GUESS,
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android'],
'default': OS_GUESS,
'help': 'the os to build for (' + OS_GUESS + ')'
},
'arch': {
'values':['arm', 'ia32', 'x64'],
'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')'
},
'regexp': {
'values': ['native', 'interpreted'],
'default': 'native',
'help': 'Whether to use native or interpreted regexp implementation'
},
'snapshot': {
'values': ['on', 'off', 'nobuild'],
'default': 'off',
'help': 'build using snapshots for faster start-up'
},
'prof': {
'values': ['on', 'off', 'oprofile'],
'default': 'off',
'help': 'enable profiling of build target'
},
'library': {
'values': ['static', 'shared'],
'default': 'static',
'help': 'the type of library to produce'
},
'soname': {
'values': ['on', 'off'],
'default': 'off',
'help': 'turn on setting soname for Linux shared library'
},
'msvcrt': {
'values': ['static', 'shared'],
'default': 'static',
'help': 'the type of Microsoft Visual C++ runtime library to use'
},
'msvcltcg': {
'values': ['on', 'off'],
'default': 'on',
'help': 'use Microsoft Visual C++ link-time code generation'
},
'simulator': {
'values': ['arm', 'none'],
'default': 'none',
'help': 'build with simulator'
},
'disassembler': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable the disassembler to inspect generated code'
},
'sourcesignatures': {
'values': ['MD5', 'timestamp'],
'default': 'MD5',
'help': 'set how the build system detects file changes'
},
'console': {
'values': ['dumb', 'readline'],
'default': 'dumb',
'help': 'the console to use for the d8 shell'
},
'verbose': {
'values': ['on', 'off'],
'default': 'off',
'help': 'more output from compiler and linker'
}
}
def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
result.Add('sample', 'build sample (shell, process)', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
for (name, option) in SIMPLE_OPTIONS.iteritems():
help = '%s (%s)' % (name, ", ".join(option['values']))
result.Add(name, help, option.get('default'))
return result
def GetVersionComponents():
MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)")
MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)")
BUILD_NUMBER_PATTERN = re.compile(r"#define\s+BUILD_NUMBER\s+(.*)")
PATCH_LEVEL_PATTERN = re.compile(r"#define\s+PATCH_LEVEL\s+(.*)")
patterns = [MAJOR_VERSION_PATTERN,
MINOR_VERSION_PATTERN,
BUILD_NUMBER_PATTERN,
PATCH_LEVEL_PATTERN]
source = open(join(root_dir, 'src', 'version.cc')).read()
version_components = []
for pattern in patterns:
match = pattern.search(source)
if match:
version_components.append(match.group(1).strip())
else:
version_components.append('0')
return version_components
def GetVersion():
version_components = GetVersionComponents()
if version_components[len(version_components) - 1] == '0':
version_components.pop()
return '.'.join(version_components)
def GetSpecificSONAME():
SONAME_PATTERN = re.compile(r"#define\s+SONAME\s+\"(.*)\"")
source = open(join(root_dir, 'src', 'version.cc')).read()
match = SONAME_PATTERN.search(source)
if match:
return match.group(1).strip()
else:
return ''
def SplitList(str):
return [ s for s in str.split(",") if len(s) > 0 ]
def IsLegal(env, option, values):
str = env[option]
for s in SplitList(str):
if not s in values:
Abort("Illegal value for option %s '%s'." % (option, s))
return False
return True
def VerifyOptions(env):
if not IsLegal(env, 'mode', ['debug', 'release']):
return False
if not IsLegal(env, 'sample', ["shell", "process"]):
return False
if not IsLegal(env, 'regexp', ["native", "interpreted"]):
return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.")
if env['prof'] == 'oprofile' and env['os'] != 'linux':
Abort("OProfile is only supported on Linux.")
if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':
Abort("Shared Object soname not applicable for static library.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %
(name, ", ".join(option['values'])))
Abort(message)
if not env[name] in option['values']:
message = ("Unknown %s value '%s'. Possible values are (%s)." %
(name, env[name], ", ".join(option['values'])))
Abort(message)
class BuildContext(object):
def __init__(self, options, env_overrides, samples):
self.library_targets = []
self.mksnapshot_targets = []
self.cctest_targets = []
self.sample_targets = []
self.d8_targets = []
self.options = options
self.env_overrides = env_overrides
self.samples = samples
self.use_snapshot = (options['snapshot'] != 'off')
self.build_snapshot = (options['snapshot'] == 'on')
self.flags = None
def AddRelevantFlags(self, initial, flags):
result = initial.copy()
toolchain = self.options['toolchain']
if toolchain in flags:
self.AppendFlags(result, flags[toolchain].get('all'))
for option in sorted(self.options.keys()):
value = self.options[option]
self.AppendFlags(result, flags[toolchain].get(option + ':' + value))
self.AppendFlags(result, flags.get('all'))
return result
def AddRelevantSubFlags(self, options, flags):
self.AppendFlags(options, flags.get('all'))
for option in sorted(self.options.keys()):
value = self.options[option]
self.AppendFlags(options, flags.get(option + ':' + value))
def GetRelevantSources(self, source):
result = []
result += source.get('all', [])
for (name, value) in self.options.iteritems():
source_value = source.get(name + ':' + value, [])
if type(source_value) == dict:
result += self.GetRelevantSources(source_value)
else:
result += source_value
return sorted(result)
def AppendFlags(self, options, added):
if not added:
return
for (key, value) in added.iteritems():
if key.find(':') != -1:
self.AddRelevantSubFlags(options, { key: value })
else:
if not key in options:
options[key] = value
else:
prefix = options[key]
if isinstance(prefix, StringTypes): prefix = prefix.split()
options[key] = prefix + value
def ConfigureObject(self, env, input, **kw):
if (kw.has_key('CPPPATH') and env.has_key('CPPPATH')):
kw['CPPPATH'] += env['CPPPATH']
if self.options['library'] == 'static':
return env.StaticObject(input, **kw)
else:
return env.SharedObject(input, **kw)
def ApplyEnvOverrides(self, env):
if not self.env_overrides:
return
if type(env['ENV']) == DictType:
env['ENV'].update(**self.env_overrides)
else:
env['ENV'] = self.env_overrides
def PostprocessOptions(options):
# Adjust architecture if the simulator option has been set
if (options['simulator'] != 'none') and (options['arch'] != options['simulator']):
if 'arch' in ARGUMENTS:
# Print a warning if arch has explicitly been set
print "Warning: forcing architecture to match simulator (%s)" % options['simulator']
options['arch'] = options['simulator']
def ParseEnvOverrides(arg, imports):
# The environment overrides are in the format NAME0:value0,NAME1:value1,...
# The environment imports are in the format NAME0,NAME1,...
overrides = {}
for var in imports.split(','):
if var in os.environ:
overrides[var] = os.environ[var]
for override in arg.split(','):
pos = override.find(':')
if pos == -1:
continue
overrides[override[:pos].strip()] = override[pos+1:].strip()
return overrides
def BuildSpecific(env, mode, env_overrides):
options = {'mode': mode}
for option in SIMPLE_OPTIONS:
options[option] = env[option]
PostprocessOptions(options)
context = BuildContext(options, env_overrides, samples=SplitList(env['sample']))
# Remove variables which can't be imported from the user's external
# environment into a construction environment.
user_environ = os.environ.copy()
try:
del user_environ['ENV']
except KeyError:
pass
library_flags = context.AddRelevantFlags(user_environ, LIBRARY_FLAGS)
v8_flags = context.AddRelevantFlags(library_flags, V8_EXTRA_FLAGS)
mksnapshot_flags = context.AddRelevantFlags(library_flags, MKSNAPSHOT_EXTRA_FLAGS)
dtoa_flags = context.AddRelevantFlags(library_flags, DTOA_EXTRA_FLAGS)
cctest_flags = context.AddRelevantFlags(v8_flags, CCTEST_EXTRA_FLAGS)
sample_flags = context.AddRelevantFlags(user_environ, SAMPLE_FLAGS)
d8_flags = context.AddRelevantFlags(library_flags, D8_FLAGS)
context.flags = {
'v8': v8_flags,
'mksnapshot': mksnapshot_flags,
'dtoa': dtoa_flags,
'cctest': cctest_flags,
'sample': sample_flags,
'd8': d8_flags
}
# Generate library base name.
target_id = mode
suffix = SUFFIXES[target_id]
library_name = 'v8' + suffix
version = GetVersion()
if context.options['soname'] == 'on':
# When building shared object with SONAME version the library name.
library_name += '-' + version
env['LIBRARY'] = library_name
# Generate library SONAME if required by the build.
if context.options['soname'] == 'on':
soname = GetSpecificSONAME()
if soname == '':
soname = 'lib' + library_name + '.so'
env['SONAME'] = soname
# Build the object files by invoking SCons recursively.
(object_files, shell_files, mksnapshot) = env.SConscript(
join('src', 'SConscript'),
build_dir=join('obj', target_id),
exports='context',
duplicate=False
)
context.mksnapshot_targets.append(mksnapshot)
# Link the object files into a library.
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
if context.options['library'] == 'static':
library = env.StaticLibrary(library_name, object_files)
else:
# There seems to be a glitch in the way scons decides where to put
# PDB files when compiling using MSVC so we specify it manually.
# This should not affect any other platforms.
pdb_name = library_name + '.dll.pdb'
library = env.SharedLibrary(library_name, object_files, PDB=pdb_name)
context.library_targets.append(library)
d8_env = Environment()
d8_env.Replace(**context.flags['d8'])
shell = d8_env.Program('d8' + suffix, object_files + shell_files)
context.d8_targets.append(shell)
for sample in context.samples:
sample_env = Environment(LIBRARY=library_name)
sample_env.Replace(**context.flags['sample'])
context.ApplyEnvOverrides(sample_env)
sample_object = sample_env.SConscript(
join('samples', 'SConscript'),
build_dir=join('obj', 'sample', sample, target_id),
exports='sample context',
duplicate=False
)
sample_name = sample + suffix
sample_program = sample_env.Program(sample_name, sample_object)
sample_env.Depends(sample_program, library)
context.sample_targets.append(sample_program)
cctest_program = env.SConscript(
join('test', 'cctest', 'SConscript'),
build_dir=join('obj', 'test', target_id),
exports='context object_files',
duplicate=False
)
context.cctest_targets.append(cctest_program)
return context
def Build():
opts = GetOptions()
env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
VerifyOptions(env)
env_overrides = ParseEnvOverrides(env['env'], env['importenv'])
SourceSignatures(env['sourcesignatures'])
libraries = []
mksnapshots = []
cctests = []
samples = []
d8s = []
modes = SplitList(env['mode'])
for mode in modes:
context = BuildSpecific(env.Copy(), mode, env_overrides)
libraries += context.library_targets
mksnapshots += context.mksnapshot_targets
cctests += context.cctest_targets
samples += context.sample_targets
d8s += context.d8_targets
env.Alias('library', libraries)
env.Alias('mksnapshot', mksnapshots)
env.Alias('cctests', cctests)
env.Alias('sample', samples)
env.Alias('d8', d8s)
if env['sample']:
env.Default('sample')
else:
env.Default('library')
# We disable deprecation warnings because we need to be able to use
# env.Copy without getting warnings for compatibility with older
# version of scons. Also, there's a bug in some revisions that
# doesn't allow this flag to be set, so we swallow any exceptions.
# Lovely.
try:
SetOption('warn', 'no-deprecated')
except:
pass
Build()
| [
"lucas@l3f.org"
] | lucas@l3f.org | |
1a406e49cacab5c4dcd0d7e60c97c70a3b1a7a36 | bb05e1fafef1a62b85d5c97f10c9557cf7a240cc | /task_07_01.py | 5bf2d6e493ddc44f970bb650a0474271f3285a79 | [] | no_license | Nafani4/Home_work | 3bbc192dc6a43c40fd0358dfec863241e7b3ab96 | 69ce5d4321b3fc6b4bf4db7191912c3d4f4f8907 | refs/heads/master | 2021-05-16T03:31:23.457108 | 2017-11-26T21:11:30 | 2017-11-26T21:11:30 | 105,476,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | def fibonacci (num):
x, y = 0, 1
for i in range(num):
x, y = y, x + y
yield x
if __name__ == '__main__':
print(fibonacci(10))
for i in fibonacci(10):
print(i) | [
"grebennikov.mikhail@gmail.com"
] | grebennikov.mikhail@gmail.com |
0a884db25123476946a8d29963a9b291c969e426 | aa89d7e6dfcf1da91fd5dfed6b966f342c24cc16 | /p.py | b4757ebf5434fb383faf678e34598c858773c642 | [] | no_license | nanfeng-web/mine-pictures | 6aad709904b07acd288603f793759b707e9f4b04 | 7a7cd2f3de63feacac7fd6824b9fc030eb8e23b3 | refs/heads/main | 2023-08-07T09:52:05.801232 | 2021-10-01T09:02:44 | 2021-10-01T09:02:44 | 399,729,566 | 1 | 0 | null | 2021-09-23T23:58:45 | 2021-08-25T07:29:52 | null | UTF-8 | Python | false | false | 149 | py | import requests
response = requests.get("https://api.nmb.show/1985acg.php")
file = open("paqu","wb")
file.write(response.content)
file.close()
| [
"noreply@github.com"
] | nanfeng-web.noreply@github.com |
affeefebfe3fea12f782e19ec9b4436fcfec1e64 | 8489a961a13492fea2ef76f18b86fa2ecaec93c2 | /web_app_interface/marfSite/manage.py | 67388189c584ba2d010e526d7c135676b6f18c5e | [] | no_license | kavanomo/teamMarf | 84ff8496488cc8f27a997fddbd550798ee6218d4 | 461d23144d26e8836e04e6c930a961fccef28465 | refs/heads/master | 2020-03-30T05:14:15.681749 | 2019-03-15T03:33:35 | 2019-03-15T03:33:35 | 150,788,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'marfSite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"robbie.windsor+git@gmail.com"
] | robbie.windsor+git@gmail.com |
796a852c4ccdd0bc598e0b373567c854094d0cfd | 45fb509bf21ac003a40fd404d7c0cc995e741672 | /perceptron_algorithm/perceptron_algo_2nd_method.py | 59807adb1a2c854110b8644f2b103f49899851f4 | [] | no_license | FarahAgha/MachineLearning | 0d17511f7495190dfd2368554428208c7d0eadf7 | cf385135e016a63fb16bd326586fcd8ecb3c4355 | refs/heads/master | 2021-01-04T01:03:08.810401 | 2020-03-15T18:42:16 | 2020-03-15T18:42:16 | 240,314,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | # Perceptron Algorithm perceptron_algo_2nd_method.py
# See https://medium.com/@thomascountz/19-line-line-by-line-python-perceptron-b6f113b161f3 for details.
import numpy as np
class Perceptron(object):
def __init__(self, no_of_inputs, threshold=100, learning_rate=0.01):
self.threshold = threshold
self.learning_rate = learning_rate
self.weights = np.zeros(no_of_inputs + 1)
def predict(self, inputs):
summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
if summation > 0:
activation = 1
else:
activation = 0
return activation
def train(self, training_inputs, labels):
for _ in range(self.threshold):
for inputs, label in zip(training_inputs, labels):
prediction = self.predict(inputs)
self.weights[1:] += self.learning_rate * (label - prediction) * inputs
self.weights[0] += self.learning_rate * (label - prediction) | [
"you@example.com"
] | you@example.com |
f85417abce36ad169336e4c143d0ed7ba2a666ea | 1082d5cde908e101c51f69923212dcae4b4b60f4 | /examples/sub_menu.py | 9d7901f2f97b295d736199ba1b9a578957e289d8 | [
"MIT"
] | permissive | SaxonRah/UnrealEnginePython | 88039c62024d01672138c4adeecac0fc9729bed0 | d5f3b2e7a42209af31cbbd47377633e8d452439c | refs/heads/master | 2021-01-04T23:56:44.556697 | 2020-02-16T00:38:43 | 2020-02-16T00:38:43 | 240,805,780 | 7 | 1 | MIT | 2020-02-16T00:03:20 | 2020-02-16T00:03:19 | null | UTF-8 | Python | false | false | 1,313 | py | import unreal_engine as ue
def open_submenu001(builder):
builder.begin_section('submenu001', 'i am a tooltip')
builder.add_menu_entry('sub_one', 'tooltip', lambda: ue.log('hello from submenu001'))
builder.add_menu_entry('sub_one_2', 'tooltip 2', lambda: ue.log('hello again'))
builder.end_section()
def open_sub_submenu(builder):
builder.begin_section('sub_submenu003', 'i am a tooltip for the submenu')
builder.add_menu_entry('sub_sub_three', 'tooltip', lambda: ue.log('hello from sub_submenu003'))
builder.end_section()
def open_submenu002(builder):
builder.begin_section('submenu002', 'i am a tooltip')
builder.add_menu_entry('sub_two', 'tooltip', lambda: ue.log('hello from submenu002'))
builder.add_sub_menu('sub sub menu', 'tooltip !', open_sub_submenu)
builder.end_section()
def open_menu(builder):
builder.begin_section('test1', 'test2')
builder.add_menu_entry('one', 'two', lambda: ue.log('ciao 1'))
builder.add_sub_menu('i am a submenu', 'tooltip for the submenu', open_submenu001)
builder.add_menu_entry('three', 'four', lambda: ue.log('ciao 2'))
builder.add_sub_menu('i am another submenu', 'tooltip for the second submenu', open_submenu002)
builder.end_section()
ue.add_menu_bar_extension('SimpleMenuBarExtension', open_menu) | [
"roberto.deioris@gmail.com"
] | roberto.deioris@gmail.com |
f9dd6d91e8aaee9919ed20cb74c14fc6f2d22c8b | 44c81d8cc9c148c93cf9a77faec345693059c973 | /fetch.py | 568adf1e9271c6ebe976f93a3b0c8306a2ea428a | [] | no_license | neoatlantis/currency-data | 26566a5131b814f324153db451ae9f879fda9b72 | c19bc94d6d6ba6706f625e94e176b77bee455b04 | refs/heads/master | 2020-06-10T19:02:58.973856 | 2016-12-08T06:35:46 | 2016-12-08T06:35:46 | 75,902,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | #!/usr/bin/env python
import os
import time
import requests
import shelve
import sys
BASEPATH = os.path.realpath(os.path.dirname(sys.argv[0]))
filepath = lambda *i: os.path.join(BASEPATH, *i)
# check for api key
try:
apikeyFilepath = filepath('apikey')
apikey = open(apikeyFilepath).read().strip()
except:
print "Put your API key at `openexchangerates.org` into file `apikey`."
sys.exit(1)
# check for database
db = shelve.open(filepath('currencies.db'), flag='c')
latest = 0
for key in db:
timestamp = float(key)
if timestamp > latest:
latest = timestamp
if time.time() - latest < 3000 and 'force' not in sys.argv:
print "You are requesting too frequent. Abandoned to prevent API",
print "exhaustion. Use `force` in command line to force a request."
db.close()
sys.exit(2)
# fetch url
url = "https://openexchangerates.org/api/latest.json?app_id=%s" % apikey
try:
req = requests.get(url)
if req.status_code != 200: raise
json = req.json()
json = {
'rates': json['rates'],
'timestamp': json['timestamp']
}
except:
print "Failed fetching newest data. Abort."
sys.exit(3)
print json
db[str(time.time())] = json
db.close()
sys.exit(0)
| [
"contact@chaobai.li"
] | contact@chaobai.li |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.