blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d5cdb112efb4d4444cc8f3b8bea1c8cfc78507e | fe68647975427e507a21baf0ce59123d9c67cf46 | /myblog/settings.py | cb88e6d40fbf62c00e07a13fb3be986c75b1d694 | [] | no_license | ankitdevelops/blogforcoders | cf923fb1113286f678b86b0bf9a2619d124248a7 | 8bf1746713957afc98944ee7ad1855067c63babe | refs/heads/master | 2023-04-29T14:00:14.960962 | 2021-05-15T04:53:52 | 2021-05-15T04:53:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,217 | py | """
Django settings for myblog project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import environ
env = environ.Env()
environ.Env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-_uo_5ewoi$gd@9$32gfo5*0ip9g+648!$$@tw^lys((j%i&(1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['blogforcoders.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'post.apps.PostConfig',
'users.apps.UsersConfig',
'webpages.apps.WebpagesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'ckeditor',
'storages'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'myblog',
# 'USER': 'postgres',
# 'PASSWORD': "#ankit#",
# 'HOST': "localhost",
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': env('POSTGRES_DB_NAME'),
'USER': env('POSTGRES_USER'),
'PASSWORD': env('POSTGRES_PASSWORD'),
'HOST': env('POSTGRES_HOST'),
'PORT': env('POSTGRES_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'staticfiles')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'home'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
| [
"heyankitandrel@gmail.com"
] | heyankitandrel@gmail.com |
fde928b59f07ba939d51ffedbd226a131c7d7bf2 | da496ed0de7c5a768ddd42241363c82e15133b7f | /api/coachApi.py | b9538d3618e3bfd32ec24b6dc352b4a14b776cca | [] | no_license | benwong9832/SoccerAnalysisAndPrediction | ffb5e592f267f1d15db5618488e4309a71f3a4f9 | b4e2bc584723a6c1cb5b82b89549a7923cb7dc70 | refs/heads/main | 2023-03-09T16:16:23.434848 | 2021-02-13T06:26:11 | 2021-02-13T06:26:11 | 329,558,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | import api.config as cf
import requests
import json
import os
from datetime import datetime
import codecs
import time
# f = open("C:/Users/kenpe/PycharmProjects/soccersApi/data/teams/primite_leayg/teams_primite_leayg_2013.json", "r")
# print(f.read())
config = cf.config()
seasons = config.seasons
leagues = config.leagues
path = '\\'.join(os.getcwd().split('\\')[:-1])
count = 0
for league_id, league_name in leagues.items():
league_seasons = seasons[league_id]
for season_id, season_name in league_seasons.items():
file_path = path.replace('\\', '/') + '/data/fixtures/' + league_name + '/fixtures_{}_{}.json'.format(league_name, season_name)
json_file = open(file_path, 'r')
data = json.load(json_file)
for fixture in data['data']:
fixture_home_coach_id = fixture['teams']['home']['coach_id']
# fixture_id = fixture['id']
# fixture_datatime = fixture['time']['date'][0:4] + fixture['time']['date'][5:7] + fixture['time']['date'][8:10]
url = config.endpoint + 'coaches/?' + 'user={}&token={}'.format(config.user, config.token)\
+ '&t={}'.format('info')\
+ '&id={}'.format(fixture_home_coach_id)
print()
# payload = {}
# headers = {}
# response = requests.request("GET", url, headers = headers, data = payload)
#
# print(' 22235 / ' + str(count) + ' ' + url)
# print(response.text.encode('utf8'))
#
# responseData = json.loads(response.text)
# if responseData['meta']['requests_left'] < 5:
# time.sleep(2000)
#
# count += 1
# print('3000 / ' + str(responseData['meta']['requests_left']))
# # print(response.text.encode('utf8'))
#
# check_path = path + '/data/match/' + league_name
# if not os.path.exists(check_path):
# os.mkdir(check_path)
#
# check_path = check_path + '/' + season_name
# if not os.path.exists(check_path):
# os.mkdir(check_path)
#
# file_name = check_path + "/match_{}_{}_{}.json".format(league_name, fixture_datatime, fixture_id)
# f = codecs.open(file_name, "w+", 'utf-8')
# f.write(response.text)
# # f.write('{}')
# f.close()
| [
"kenpetere@gmail.com"
] | kenpetere@gmail.com |
b92646d2d8bb086be2c6d59e672848ab3b3050cf | 8036b9e0a3ba8c8042e5fb04f67d0b846fb05b1a | /spouts/tdms_spout.py | 4409f2e7f28349d84425b7b6633844bde4d6282d | [] | no_license | shouwang-sun/processing_node | 4b90632f36cf2f0ea0962dbc24820a80ac996ffd | a78ae07a513368fbd43fff4cf3a20ac63a12d3f3 | refs/heads/master | 2020-03-23T04:10:47.420093 | 2018-07-16T00:48:45 | 2018-07-16T00:48:50 | 141,068,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | # -*- coding: utf-8 -*-
__author__ = 'SUN Shouwang'
import time
from os import listdir, path
import nptdms
class TdmsSpout(object):
def __init__(self, folder, channel_list):
random_index = [3, 0, 2, 5, 1, 7, 4, 8, 6, 9]
self.file_list = [path.join(folder, listdir(folder)[ind]) for ind in random_index]
# self.file_list = [path.join(folder, file_name) for file_name in listdir(folder)][:100]
self.channel_list = channel_list
def process(self):
for tup in self._parse():
yield tup
def _parse(self):
for file_name in self.file_list:
tdms_file = nptdms.TdmsFile(file_name)
for channel_name in self.channel_list:
channel_object = tdms_file.object(u'未命名', channel_name)
# acquire this channel's 'wf_start_time' property
# and get its timestamp value for JSON serialize
start_time = channel_object.property('wf_start_time')
timestamp = time.mktime(start_time.timetuple())
tup = [timestamp]
# acquire this channel's other properties
others = [v for k, v in channel_object.properties.items() if k != 'wf_start_time']
tup.extend(others)
# acquire channel data
data = channel_object.data.tolist()
tup.append(data)
yield tup
| [
"shouwang.sun@aliyun.com"
] | shouwang.sun@aliyun.com |
b7c41240fa74e52ba4534e26961d3cbf7322a0d6 | 43ed422113d58b27d5012f5ccf405700a46fc0f2 | /MaskRCNN/model/loss.py | eb4cb8a2d3d03b016b3857b3071a40cc1977da99 | [] | no_license | wprazuch/DeepLearningPlayground | 99a86945818e8a42e77408369e566b793ac612b9 | 53859fb4fd7bfc314121c85870afabd47627ce73 | refs/heads/master | 2022-12-16T05:50:22.757434 | 2020-09-03T09:23:26 | 2020-09-03T09:23:26 | 187,896,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,227 | py | import tensorflow as tf
import tensorflow.keras.backend as K
from utils import batch_pack_graph
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), 'float32')
loss = (less_than_one * 0.5 * diff ** 2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
rpn_match = tf.squeeze(rpn_match, -1)
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
indices = tf.where(K.not_equal(rpn_match, 0))
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
loss = K.sparse_categorical_crossentropy(target=anchor_class, output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
configs: the model configs object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits, active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits
)
loss = loss * pred_active
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_class_ids), tf.int64
)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64
)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
loss = K.switch(tf.size(y_true) > 0, K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
| [
"wojciechprazuch3@gmail.com"
] | wojciechprazuch3@gmail.com |
411d7c55267cfccce2048ad8cd1fd9304f605fe2 | 8a1a0a657a400ffac264bbf9eec55eb3c0a264c8 | /pyverilog-0.9.1/pyverilog/utils/scope.py | 1511633b52a9806ea8375b332551fbb3e0337bbf | [] | no_license | gayatri267/PyverilogTutorial | e82295f7ba329dda6b438e54c2d667e15ce015ce | a63fa9fe3acef4afc9d7bbe8a742996039560d8a | refs/heads/master | 2021-09-07T02:42:35.992051 | 2018-02-16T01:17:04 | 2018-02-16T01:17:04 | 112,436,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,129 | py | #-------------------------------------------------------------------------------
# scope.py
#
# classes for definition of scope
#
# Copyright (C) 2013, Shinya Takamaeda-Yamazaki
# License: Apache 2.0
#-------------------------------------------------------------------------------
import sys
import os
import copy
scopetype_list_unprint = ('generate', 'always', 'function', #'functioncall',
'task', 'taskcall', 'initial', 'for', 'while', 'if')
scopetype_list_print = ('module', 'block', 'signal', 'functioncall',)
scopetype_list = scopetype_list_unprint + scopetype_list_print + ('any', )
class ScopeLabel(object):
def __init__(self, scopename, scopetype='any', scopeloop=None):
self.scopename = scopename
if scopetype not in scopetype_list:
raise DefinitionError('No such Scope type')
self.scopetype = scopetype
self.scopeloop = scopeloop
def __repr__(self):
ret = []
ret.append(self.scopename)
if self.scopeloop is not None:
ret.append('[')
ret.append(str(self.scopeloop))
ret.append(']')
return ''.join(ret)
def tocode(self):
if self.scopetype in scopetype_list_unprint: return ''
return self.scopename
def __eq__(self, other):
if type(self) != type(other): return False
if self.scopetype == 'any' or other.scopetype == 'any':
return ((self.scopename, self.scopeloop) ==
(other.scopename, other.scopeloop))
return (self.scopename, self.scopetype, self.scopeloop) == (other.scopename, other.scopetype, other.scopeloop)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
#return hash((self.scopename, self.scopetype, self.scopeloop))
return hash((self.scopename, self.scopeloop)) # to use for dict key with any scopetype
def isPrintable(self):
return self.scopetype in (scopetype_list_print + ('any',))
class ScopeChain(object):
def __init__(self, scopechain=None):
self.scopechain = []
if scopechain is not None:
self.scopechain = scopechain
def __add__(self, r):
new_chain = copy.deepcopy(self)
if isinstance(r, ScopeLabel):
new_chain.append(r)
elif isinstance(r, ScopeChain):
new_chain.extend(r.scopechain)
else:
raise verror.DefinitionError('Can not add %s' % str(r))
return new_chain
def append(self, r):
self.scopechain.append(r)
def extend(self, r):
self.scopechain.extend(r)
def tocode(self):
ret = []
it = None
for scope in self.scopechain:
l = scope.tocode()
if l:
ret.append(l)
if it is not None:
ret.append(it)
if l:
#ret.append('.')
#ret.append('_dot_')
ret.append('_')
if scope.scopetype == 'for' and scope.scopeloop is not None:
#it = '[' + str(scope.scopeloop) + ']'
#it = '_L_' + str(scope.scopeloop) + '_R_'
it = '_' + str(scope.scopeloop) + '_'
else:
it = None
ret = ret[:-1]
return ''.join(ret)
def __repr__(self):
ret = ''
for scope in self.scopechain:
l = scope.__repr__()
ret += l + '.'
ret = ret[:-1]
return ret
def __len__(self):
return len(self.scopechain)
def __eq__(self, other):
if type(self) != type(other): return False
return self.scopechain == other.scopechain
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(tuple(self.scopechain))
def __getitem__(self, key):
if isinstance(key, slice):
indices = key.indices(len(self))
return ScopeChain([self.scopechain[x] for x in range(*indices)])
return self.scopechain[key]
def __iter__(self):
for scope in self.scopechain:
yield scope
| [
"gayatri267@gmail.com"
] | gayatri267@gmail.com |
9e9bd761750fdacff2550f9144c914ddc1e8529c | 7bead245354e233f76fff4608938bf956abb84cf | /test/test_docx_table_cell.py | f1aec4840f9d6c266499020f55fa9f2df8b0c8a9 | [
"Apache-2.0"
] | permissive | Cloudmersive/Cloudmersive.APIClient.Python.Convert | 5ba499937b9664f37cb2700509a4ba93952e9d6c | dba2fe7257229ebdacd266531b3724552c651009 | refs/heads/master | 2021-10-28T23:12:42.698951 | 2021-10-18T03:44:49 | 2021-10-18T03:44:49 | 138,449,321 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | # coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.docx_table_cell import DocxTableCell # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestDocxTableCell(unittest.TestCase):
"""DocxTableCell unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocxTableCell(self):
"""Test DocxTableCell"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.docx_table_cell.DocxTableCell() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"35204726+Cloudmersive@users.noreply.github.com"
] | 35204726+Cloudmersive@users.noreply.github.com |
8149ad53008be36f6ec49fd392e43c5a4ce75f24 | 36261c250e1715255b7a22df9b59f3da0d288968 | /app/forms.py | 3e684e1319591d4db5763ec15d03f9e16d00a79b | [] | no_license | karishay/tiny_teacher | 06017c044f7697488c4289d46580c54c4d489d75 | 3918b785511f3551ba67b3aeafae8b7c06d7a3f5 | refs/heads/master | 2021-01-19T20:29:56.700039 | 2015-05-03T00:59:45 | 2015-05-03T00:59:45 | 28,541,948 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from wtforms import Form, TextField, TextAreaField, PasswordField, validators
class LoginForm(Form):
email = TextField("Email", [validators.Required(), validators.Email()])
password = PasswordField("Password", [validators.Required()])
| [
"kari.shay@gmail.com"
] | kari.shay@gmail.com |
b0852cf85d9083b3a78990c4c4ecb96b24190dc2 | 191d18fae52df2b10fc3c78676612ce0828c1ad8 | /essentials/multi_server.py | b345cfc321697ef93d206779c4ae5ae4b88e165c | [
"MIT"
] | permissive | yada-yoda/pollmaster | 1e44ef42f68bf971e67b75c84842556d2ef2d687 | c7431d6b952599671c6408209528dceaad19116e | refs/heads/master | 2021-10-19T14:32:24.222515 | 2019-02-21T18:57:07 | 2019-02-21T18:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,061 | py | import time
import discord
from essentials.settings import SETTINGS
from utils.paginator import embed_list_paginated
async def get_pre(bot, message):
'''Gets the prefix for a message.'''
if str(message.channel.type) == 'private':
shared_server_list = await get_servers(bot, message)
if shared_server_list.__len__() == 0:
return 'pm!'
elif shared_server_list.__len__() == 1:
return await get_server_pre(bot, shared_server_list[0])
else:
# return a tuple of all prefixes.. this will check them all!
return tuple([await get_server_pre(bot, s) for s in shared_server_list])
else:
return await get_server_pre(bot, message.server)
async def get_server_pre(bot, server):
'''Gets the prefix for a server.'''
try:
#result = await bot.db.config.find_one({'_id': str(server.id)})
result = bot.pre[str(server.id)]
except AttributeError:
return 'pm!'
if not result: #or not result.get('prefix'):
return 'pm!'
return result #result.get('prefix')
async def get_servers(bot, message, short=None):
'''Get best guess of relevant shared servers'''
if message.server is None:
list_of_shared_servers = []
for s in bot.servers:
if message.author.id in [m.id for m in s.members]:
list_of_shared_servers.append(s)
if short is not None:
query = bot.db.polls.find({'short': short})
if query is not None:
server_ids_with_short = [poll['server_id'] async for poll in query]
servers_with_short = [bot.get_server(x) for x in server_ids_with_short]
shared_servers_with_short = list(set(servers_with_short).intersection(set(list_of_shared_servers)))
if shared_servers_with_short.__len__() >= 1:
return shared_servers_with_short
# do this if no shared server with short is found
if list_of_shared_servers.__len__() == 0:
return []
else:
return list_of_shared_servers
else:
return [message.server]
async def ask_for_server(bot, message, short=None):
server_list = await get_servers(bot, message, short)
if server_list.__len__() == 0:
if short == None:
await bot.say(
'I could not find a common server where we can see eachother. If you think this is an error, please contact the developer.')
else:
await bot.say(f'I could not find a server where the poll {short} exists that we both can see.')
return None
elif server_list.__len__() == 1:
return server_list[0]
else:
text = 'I\'m not sure which server you are referring to. Please tell me by typing the corresponding number.\n'
i = 1
for name in [s.name for s in server_list]:
text += f'\n**{i}** - {name}'
i += 1
embed = discord.Embed(title="Select your server", description=text, color=SETTINGS.color)
server_msg = await bot.send_message(message.channel, embed=embed)
valid_reply = False
nr = 1
while valid_reply == False:
reply = await bot.wait_for_message(timeout=60, author=message.author)
if reply and reply.content:
if reply.content.startswith(await get_pre(bot, message)):
# await bot.say('You can\'t use bot commands while I am waiting for an answer.'
# '\n I\'ll stop waiting and execute your command.')
return False
if str(reply.content).isdigit():
nr = int(reply.content)
if 0 < nr <= server_list.__len__():
valid_reply = True
return server_list[nr - 1]
async def ask_for_channel(bot, server, message):
# if performed from a channel, return that channel
if str(message.channel.type) == 'text':
return message.channel
# if exactly 1 channel, return it
channel_list = [c for c in server.channels if str(c.type) == 'text']
if channel_list.__len__() == 1:
return channel_list[0]
# if no channels, display error
if channel_list.__len__() == 0:
embed = discord.Embed(title="Select a channel", description='No text channels found on this server. Make sure I can see them.', color=SETTINGS.color)
await bot.say(embed=embed)
return False
# otherwise ask for a channel
i = 1
text = 'Polls are bound to a specific channel on a server. Please select the channel for this poll by typing the corresponding number.\n'
for name in [c.name for c in channel_list]:
to_add = f'\n**{i}** - {name}'
# check if length doesn't exceed allowed maximum or split it into multiple messages
if text.__len__() + to_add.__len__() > 2048:
embed = discord.Embed(title="Select a channel", description=text, color=SETTINGS.color)
await bot.say(embed=embed)
text = 'Polls are bound to a specific channel on a server. Please select the channel for this poll by typing the corresponding number.\n'
else:
text += to_add
i += 1
embed = discord.Embed(title="Select a channel", description=text, color=SETTINGS.color)
await bot.say(embed=embed)
valid_reply = False
nr = 1
while valid_reply == False:
reply = await bot.wait_for_message(timeout=60, author=message.author)
if reply and reply.content:
if reply.content.startswith(await get_pre(bot, message)):
# await bot.say('You can\'t use bot commands while I am waiting for an answer.'
# '\n I\'ll stop waiting and execute your command.')
return False
if str(reply.content).isdigit():
nr = int(reply.content)
if 0 < nr <= channel_list.__len__():
valid_reply = True
return channel_list[nr - 1] | [
"matnad@gmail.com"
] | matnad@gmail.com |
30244110794a223bf5bf5b3dc1c1b5ebab399117 | 9ac19ccdfcb0de2b9ec4295e3291cfc2a597e70f | /bert_variant/datautils/configs/base.py | 32c5d241c1f384d54014014c424c9659757ba6df | [] | no_license | pdsxsf/AgriNER | 30ffac831861a3656e2b51e6ee4bc1823a643afe | 766c1e165c77d10956b5a262ebc4b5d75638bcc3 | refs/heads/master | 2023-03-24T03:59:36.687709 | 2021-03-24T06:50:59 | 2021-03-24T06:50:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py |
from pathlib import Path
BASE_DIR = Path('datautils')
config = {
'data_dir': BASE_DIR / 'dataset',
'log_dir': BASE_DIR / 'output/log',
'writer_dir': BASE_DIR / "output/TSboard",
'figure_dir': BASE_DIR / "output/figure",
'checkpoint_dir': BASE_DIR / "output/checkpoints",
'cache_dir': BASE_DIR / 'model/',
'result_dir': BASE_DIR / "output/result",
}
| [
"2838588360@qq.com"
] | 2838588360@qq.com |
388a367dcb7a39b2bc2e59de0a6af9923be7e32e | 91a549716b68535644814e46f8d4fcf8b714b496 | /node_modules/sharp/binding.gyp | 3a1893849fa97075fce86029e22b2f48262dde25 | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | KishanMaurya/personal-portfolio | 837d4c0b075a414b5bf85477e39fe9a9134ad0c1 | 2df0f228e6fe494b795381d45ae39ddc18095c44 | refs/heads/master | 2023-02-19T15:28:24.326898 | 2020-08-01T16:56:47 | 2020-08-01T16:56:47 | 284,300,267 | 3 | 0 | MIT | 2021-01-06T08:28:21 | 2020-08-01T16:47:37 | JavaScript | UTF-8 | Python | false | false | 6,787 | gyp | {
'targets': [{
'target_name': 'libvips-cpp',
'conditions': [
['OS == "win"', {
# Build libvips C++ binding for Windows due to MSVC std library ABI changes
'type': 'shared_library',
'defines': [
'VIPS_CPLUSPLUS_EXPORTS',
'_ALLOW_KEYWORD_MACROS'
],
'sources': [
'src/libvips/cplusplus/VError.cpp',
'src/libvips/cplusplus/VInterpolate.cpp',
'src/libvips/cplusplus/VImage.cpp'
],
'include_dirs': [
'vendor/include',
'vendor/include/glib-2.0',
'vendor/lib/glib-2.0/include'
],
'libraries': [
'../vendor/lib/libvips.lib',
'../vendor/lib/libglib-2.0.lib',
'../vendor/lib/libgobject-2.0.lib'
],
'configurations': {
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1
}
},
'msvs_disabled_warnings': [
4275
]
}
}
}, {
# Ignore this target for non-Windows
'type': 'none'
}]
]
}, {
'target_name': 'sharp',
'dependencies': [
'libvips-cpp'
],
'variables': {
'runtime_link%': 'shared',
'conditions': [
['OS != "win"', {
'pkg_config_path': '<!(node -e "console.log(require(\'./lib/libvips\').pkgConfigPath())")',
'use_global_libvips': '<!(node -e "console.log(Boolean(require(\'./lib/libvips\').useGlobalLibvips()).toString())")'
}, {
'pkg_config_path': '',
'use_global_libvips': ''
}]
]
},
'sources': [
'src/common.cc',
'src/metadata.cc',
'src/stats.cc',
'src/operations.cc',
'src/pipeline.cc',
'src/sharp.cc',
'src/utilities.cc'
],
'include_dirs': [
'<!(node -e "require(\'nan\')")'
],
'conditions': [
['use_global_libvips == "true"', {
# Use pkg-config for include and lib
'include_dirs': ['<!@(PKG_CONFIG_PATH="<(pkg_config_path)" pkg-config --cflags-only-I vips-cpp vips glib-2.0 | sed s\/-I//g)'],
'conditions': [
['runtime_link == "static"', {
'libraries': ['<!@(PKG_CONFIG_PATH="<(pkg_config_path)" pkg-config --libs --static vips-cpp)']
}, {
'libraries': ['<!@(PKG_CONFIG_PATH="<(pkg_config_path)" pkg-config --libs vips-cpp)']
}],
['OS == "linux"', {
'defines': [
# Inspect libvips-cpp.so to determine which C++11 ABI version was used and set _GLIBCXX_USE_CXX11_ABI accordingly. This is quite horrible.
'_GLIBCXX_USE_CXX11_ABI=<!(if readelf -Ws "$(PKG_CONFIG_PATH="<(pkg_config_path)" pkg-config --variable libdir vips-cpp)/libvips-cpp.so" | c++filt | grep -qF __cxx11;then echo "1";else echo "0";fi)'
]
}]
]
}, {
# Use pre-built libvips stored locally within node_modules
'include_dirs': [
'vendor/include',
'vendor/include/glib-2.0',
'vendor/lib/glib-2.0/include'
],
'conditions': [
['OS == "win"', {
'defines': [
'_ALLOW_KEYWORD_MACROS',
'_FILE_OFFSET_BITS=64'
],
'libraries': [
'../vendor/lib/libvips.lib',
'../vendor/lib/libglib-2.0.lib',
'../vendor/lib/libgobject-2.0.lib'
]
}],
['OS == "mac"', {
'libraries': [
'../vendor/lib/libvips-cpp.42.dylib',
'../vendor/lib/libvips.42.dylib',
'../vendor/lib/libglib-2.0.0.dylib',
'../vendor/lib/libgobject-2.0.0.dylib',
# Ensure runtime linking is relative to sharp.node
'-rpath \'@loader_path/../../vendor/lib\''
]
}],
['OS == "linux"', {
'defines': [
'_GLIBCXX_USE_CXX11_ABI=0'
],
'libraries': [
'../vendor/lib/libvips-cpp.so',
'../vendor/lib/libvips.so',
'../vendor/lib/libglib-2.0.so',
'../vendor/lib/libgobject-2.0.so',
# Dependencies of dependencies, included for openSUSE support
'../vendor/lib/libcairo.so',
'../vendor/lib/libcroco-0.6.so',
'../vendor/lib/libexif.so',
'../vendor/lib/libexpat.so',
'../vendor/lib/libffi.so',
'../vendor/lib/libfontconfig.so',
'../vendor/lib/libfreetype.so',
'../vendor/lib/libfribidi.so',
'../vendor/lib/libgdk_pixbuf-2.0.so',
'../vendor/lib/libgif.so',
'../vendor/lib/libgio-2.0.so',
'../vendor/lib/libgmodule-2.0.so',
'../vendor/lib/libgsf-1.so',
'../vendor/lib/libgthread-2.0.so',
'../vendor/lib/libharfbuzz.so',
'../vendor/lib/libjpeg.so',
'../vendor/lib/liblcms2.so',
'../vendor/lib/liborc-0.4.so',
'../vendor/lib/libpango-1.0.so',
'../vendor/lib/libpangocairo-1.0.so',
'../vendor/lib/libpangoft2-1.0.so',
'../vendor/lib/libpixman-1.so',
'../vendor/lib/libpng.so',
'../vendor/lib/librsvg-2.so',
'../vendor/lib/libtiff.so',
'../vendor/lib/libwebp.so',
'../vendor/lib/libwebpdemux.so',
'../vendor/lib/libwebpmux.so',
'../vendor/lib/libxml2.so',
'../vendor/lib/libz.so',
# Ensure runtime linking is relative to sharp.node
'-Wl,--disable-new-dtags -Wl,-rpath=\'$${ORIGIN}/../../vendor/lib\''
]
}]
]
}]
],
'cflags_cc': [
'-std=c++0x',
'-fexceptions',
'-Wall',
'-O3'
],
'xcode_settings': {
'CLANG_CXX_LANGUAGE_STANDARD': 'c++11',
'CLANG_CXX_LIBRARY': 'libc++',
'MACOSX_DEPLOYMENT_TARGET': '10.7',
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'GCC_ENABLE_CPP_RTTI': 'YES',
'OTHER_CPLUSPLUSFLAGS': [
'-fexceptions',
'-Wall',
'-O3'
]
},
'configurations': {
'Release': {
'conditions': [
['OS == "linux"', {
'cflags_cc': [
'-Wno-cast-function-type'
]
}],
['OS == "win"', {
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1
}
},
'msvs_disabled_warnings': [
4275
]
}]
]
}
},
}]
}
| [
"kkmaurya.0095@gmail.com"
] | kkmaurya.0095@gmail.com |
6ec36cea031f3fe6e3bb6b5a147da2fb27a1a303 | f5599ceb7e04828bdd26cb4c27ffd76396d33a72 | /app/datasets.py | c2c1ae86b3f4d6d1fbd68fec7e4ec280d2414096 | [
"MIT"
] | permissive | TLMichael/Acc-SZOFW | 4d41436ff258d6678728fa70f070657632bdcf05 | ed1b0947bf43da02d2648c84e9dca0686fc41976 | refs/heads/main | 2023-01-09T13:57:25.829712 | 2020-10-15T06:32:25 | 2020-10-15T06:32:25 | 238,661,607 | 3 | 0 | MIT | 2020-09-06T03:13:38 | 2020-02-06T10:22:42 | Python | UTF-8 | Python | false | false | 6,972 | py | import numpy as np
import os.path as osp
from sklearn.datasets import load_svmlight_file
import torch
from torch.utils.data import Dataset
from torchvision import transforms, datasets
# DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
DEVICE = torch.device('cpu')
PHISHING_PATH = '~/datasets/phishing/phishing'
A9A_PATH = '~/datasets/a9a/a9a'
W8A_PATH = '~/datasets/w8a/w8a'
COVTYPE_PATH = '~/datasets/covtype/covtype.libsvm.binary.scale.bz2'
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
np.random.seed(0)
p = np.random.permutation(len(a))
return a[p], b[p]
class Phishing(Dataset):
""" `Phishing <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html#phishing>`_ Dataset. """
def __init__(self, path=PHISHING_PATH, train=True):
self.path = path
self.split = 'Train' if train else 'Test'
data = load_svmlight_file(osp.expanduser(self.path))
X, y = data[0].toarray(), data[1]
X, y = unison_shuffled_copies(X, y)
y[y == 0] = -1
if train:
X, y = X[:len(y)//2], y[:len(y)//2]
else:
X, y = X[len(y)//2:], y[len(y)//2:]
self.data = X
self.targets = y
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
x = self.data[idx]
y = self.targets[idx]
x = torch.tensor(x, device=DEVICE)
y = torch.tensor(y, device=DEVICE)
return x, y
def __repr__(self):
head = self.__class__.__name__ + ' ' + self.split
body = ["Number of datapoints: {}".format(self.__len__())]
if self.path is not None:
body.append("File location: {}".format(self.path))
lines = [head] + [" " * 4 + line for line in body]
return '\n'.join(lines)
class A9A(Dataset):
""" `A9A <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html#a9a>`_ Dataset. """
def __init__(self, path=A9A_PATH, train=True):
self.path = path
self.split = 'Train' if train else 'Test'
data = load_svmlight_file(osp.expanduser(self.path))
X, y = data[0].toarray(), data[1]
X, y = unison_shuffled_copies(X, y)
if train:
X, y = X[:len(y)//2], y[:len(y)//2]
else:
X, y = X[len(y)//2:], y[len(y)//2:]
self.data = X
self.targets = y
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
x = self.data[idx]
y = self.targets[idx]
x = torch.tensor(x, device=DEVICE)
y = torch.tensor(y, device=DEVICE)
return x, y
def __repr__(self):
head = self.__class__.__name__ + ' ' + self.split
body = ["Number of datapoints: {}".format(self.__len__())]
if self.path is not None:
body.append("File location: {}".format(self.path))
lines = [head] + [" " * 4 + line for line in body]
return '\n'.join(lines)
class W8A(Dataset):
""" `W8A <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html#w8a>`_ Dataset. """
def __init__(self, path=W8A_PATH, train=True):
self.path = path
self.split = 'train' if train else 'test'
data = load_svmlight_file(osp.expanduser(self.path))
X, y = data[0].toarray(), data[1]
X, y = unison_shuffled_copies(X, y)
if train:
X, y = X[:len(y)//2], y[:len(y)//2]
else:
X, y = X[len(y)//2:], y[len(y)//2:]
self.data = X
self.targets = y
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
x = self.data[idx]
y = self.targets[idx]
x = torch.tensor(x, device=DEVICE)
y = torch.tensor(y, device=DEVICE)
return x, y
def __repr__(self):
head = self.__class__.__name__ + ' ' + self.split
body = ["Number of datapoints: {}".format(self.__len__())]
if self.path is not None:
body.append("File location: {}".format(self.path))
lines = [head] + [" " * 4 + line for line in body]
return '\n'.join(lines)
class CovtypeBinary(Dataset):
""" `Covtype.binary <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html#covtype.binary>`_ Dataset. """
def __init__(self, path=COVTYPE_PATH, train=True):
self.path = path
self.split = 'train' if train else 'test'
data = load_svmlight_file(osp.expanduser(self.path))
X, y = data[0].toarray(), data[1]
X, y = unison_shuffled_copies(X, y)
y[ y== 2] = -1
if train:
X, y = X[:len(y)//2], y[:len(y)//2]
else:
X, y = X[len(y)//2:], y[len(y)//2:]
self.data = X
self.targets = y
def __len__(self):
return len(self.targets)
def __getitem__(self, idx):
x = self.data[idx]
y = self.targets[idx]
x = torch.tensor(x, device=DEVICE)
y = torch.tensor(y, device=DEVICE)
return x, y
def __repr__(self):
head = self.__class__.__name__ + ' ' + self.split
body = ["Number of datapoints: {}".format(self.__len__())]
if self.path is not None:
body.append("File location: {}".format(self.path))
lines = [head] + [" " * 4 + line for line in body]
return '\n'.join(lines)
def get_dataset(dataset, train=True):
if dataset == 'phishing':
data = Phishing(train=train)
elif dataset == 'a9a':
data = A9A(train=train)
elif dataset == 'w8a':
data = W8A(train=train)
elif dataset == 'covtype':
data = CovtypeBinary(train=train)
else:
raise Exception('Unsupported dataset ({}) !'.format(dataset))
return data
if __name__ == '__main__':
def count(x, v):
return (x == v).sum()
# data = Phishing()
# print(data)
# print(count(data.targets, 1), count(data.targets, -1))
# print()
# data = Phishing(train=False)
# print(data)
# print(count(data.targets, 1), count(data.targets, -1))
# print()
# data = A9A()
# print(data)
# print(count(data.targets, 1), count(data.targets, -1))
# print()
# data = A9A(train=False)
# print(data)
# print(count(data.targets, 1), count(data.targets, -1))
# print()
# data = W8A()
# print(data)
# print(count(data.targets, 1), count(data.targets, -1))
# print()
# data = W8A(train=False)
# print(data)
# print(count(data.targets, 1), count(data.targets, -1))
# print()
data = CovtypeBinary()
print(data)
print(count(data.targets, 1), count(data.targets, -1))
print()
data = CovtypeBinary(train=False)
print(data)
print(count(data.targets, 1), count(data.targets, -1))
print()
from torch.utils.data import DataLoader
loader = DataLoader(data, batch_size=2)
print('Done')
| [
"tlmichael@nuaa.edu.cn"
] | tlmichael@nuaa.edu.cn |
aa75e48a6f029780cc486fc33fe08e38602e0b2b | eb1896359cd718ba86f2e0657872710e914c6161 | /app/admin/invite_view.py | 27f6c07094b2bc383d60e4f37371fb2999ae1793 | [
"MIT"
] | permissive | dwcaraway/govly | 575333c6a5cb4cad6c2615e5d4152330d85ff72b | c3a134c2d8ae911c0ab05d9b96014a7c18bfac45 | refs/heads/master | 2020-12-26T04:37:39.642750 | 2015-12-22T02:08:21 | 2015-12-22T02:08:21 | 34,965,531 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | __author__ = 'dave'
from . import MyModelView
from ..models.users import Invite
from wtforms import form, fields, validators, ValidationError
from flask_admin import expose
from flask import current_app, url_for, render_template
from flask_security import current_user
from urlparse import urljoin
from ..framework.utils import generate_invitation_token
from app.framework.utils import send_message
from flask_admin.helpers import get_form_data
class InviteForm(form.Form):
invitee_email = fields.StringField(u'Email To Invite',
validators=[validators.required(), validators.email()])
invitor_id = fields.HiddenField()
token = fields.HiddenField()
class InviteView(MyModelView):
def __init__(self, session):
"""
Creates a new view.
:param session: An SQLAlchemy session object e.g. db.session
:return: the created instance
"""
return super(InviteView, self).__init__(Invite, session)
def create_form(self, obj=None):
"""Overriding the default create form to add some hidden field values"""
form_data = get_form_data()
i = InviteForm()
if form_data:
i.invitee_email.data = form_data['invitee_email']
i.invitor_id.data = current_user.id
i.token.data = generate_invitation_token(current_user)
return i
def after_model_change(self, form, model, is_created):
"""
Override the default after_model_change to send notification email to the invitee.
called after the model is committed to the database
"""
if is_created:
invite_link = urljoin(current_app.config['CLIENT_DOMAIN'], '/#/register?token='+model.token)
#TODO this mail send should be performed asynchronously using celery, see issue #88850472
send_message(
subject="You've been given early access to FogMine",
sender="do-not-reply@fogmine.com",
recipients = [model.invitee_email],
html_body=render_template('email/invite.html', user=current_user, confirmation_link=invite_link),
text_body=render_template('email/invite.txt', user=current_user, confirmation_link=invite_link)
)
return super(InviteView, self).after_model_change(form, model, is_created)
| [
"dave@fogmine.com"
] | dave@fogmine.com |
aa5650cfa845d0f8a1a8b2048a907d06c2b3d36d | 1061216c2c33c1ed4ffb33e6211565575957e48f | /python-legacy/test/test_custom_profile_field.py | 9c780d683beda23dc85ae0a5a0c376b149184f96 | [] | no_license | MSurfer20/test2 | be9532f54839e8f58b60a8e4587348c2810ecdb9 | 13b35d72f33302fa532aea189e8f532272f1f799 | refs/heads/main | 2023-07-03T04:19:57.548080 | 2021-08-11T19:16:42 | 2021-08-11T19:16:42 | 393,920,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,526 | py | # coding: utf-8
"""
Zulip REST API
Powerful open source group chat # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.custom_profile_field import CustomProfileField # noqa: E501
from openapi_client.rest import ApiException
class TestCustomProfileField(unittest.TestCase):
"""CustomProfileField unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test CustomProfileField
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.custom_profile_field.CustomProfileField() # noqa: E501
if include_optional :
return CustomProfileField(
id = 56,
type = 56,
order = 56,
name = '',
hint = '',
field_data = ''
)
else :
return CustomProfileField(
)
def testCustomProfileField(self):
"""Test CustomProfileField"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"suyash.mathur@research.iiit.ac.in"
] | suyash.mathur@research.iiit.ac.in |
804db3ce7450bad877259edbd96312fba3b9df27 | 6cb9f81432fd3f4b3f98c7df293acd5bb6829cbf | /cookiesDemo.py | 774092f8c17cc38643221b936a8b0ec5eea098b3 | [] | no_license | ruivers/Flask_on_date | ae69b7c2c37020486e9a4adbafb3bcecfb372d1a | 19884abe2d5e1b3033771fd9e9d87f2a1e966bda | refs/heads/master | 2020-03-18T05:28:21.096446 | 2018-05-29T05:42:49 | 2018-05-29T05:42:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | from flask import Flask
from flask import request
app = Flask(__name__)
@app .route('/')
@app.route('/')
def index():
resp = make_response(render_template('home.html'))
resp.set_cookie('username', 'the username')
return resp
if __name__ == '__main__':
app.run(host='0.0.0.0')
| [
"ruiweilai@163.com"
] | ruiweilai@163.com |
a737506c9c92729017569ef7d60e7a6f191776fd | 06de6ed71aa33d99b11bb1176c2db8244e9a93f7 | /Driver/RightHand_NS.py | 14b4d2b51d46a17243a6cadaa490dbb78ff98524 | [] | no_license | janenie/MCM_c | b646b0586aaca6cee118a443d20053c53b454d23 | 6148bae0189949630329e3f9932a3d88650e3770 | refs/heads/master | 2021-01-22T07:27:25.542793 | 2014-02-10T21:08:12 | 2014-02-10T21:08:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | import random
from system import *
class FSA(object):
def __init__(self):
return
def decideLane(self,driver):
driver.lane = "Right"
def judge(self,driver):
velocity = driver.velocity
accelorate = max(1 , driver.maxa - driver.roadFc)
moderate = max(1 , driver.roadFc)
if driver.option == "crash":
driver.nextVelocity = max(0,velocity - moderate)
return "crash"
if driver.ThisBefore == None:
ThisBeforeDis = bigV
else:
ThisBeforeDis = driver.ThisBefore.journey - driver.journey
if driver.OtherBefore == None:
OtherBeforeDis = bigV
else:
OtherBeforeDis = driver.OtherBefore.journey - driver.journey
if driver.ThisAfter == None:
ThisAfterDis = bigV
else:
ThisAfterDis = driver.journey - driver.ThisAfter.journey
if driver.OtherAfter == None:
OtherAfterDis = bigV
else:
OtherAfterDis = driver.journey - driver.OtherAfter.journey
if ThisBeforeDis < 0:
print "ThisBefore"
if ThisAfterDis < 0:
print "ThisAfter"
if OtherBeforeDis < 0:
print "OtherBefore"
if OtherAfterDis < 0:
print "OtherAfter"
driver.nextVelocity = min(velocity + accelorate , driver.MaxV , ThisBeforeDis - 1)
#driver.nextVelocity = max(driver.nextVelocity , velocity - moderate)
lane = driver.lane
if driver.OtherAfter == None:
OtherAfterV = 0
else:
OtherAfterV = driver.OtherAfter.velocity
if OtherAfterV < OtherAfterDis - 1:
if lane == "Right":
if velocity > ThisBeforeDis - 1 and OtherBeforeDis > ThisBeforeDis:
driver.nextVelocity = min(velocity + accelorate , driver.MaxV , OtherBeforeDis - 1)
return "changeLane"
else:
if velocity < OtherBeforeDis - 1:
driver.nextVelocity = min(velocity + accelorate , driver.MaxV , OtherBeforeDis - 1)
return "changeLane"
return "move"
| [
"janlovefree@gmail.com"
] | janlovefree@gmail.com |
013a8c17bd649838df798ceb7233a19105545f6b | 1f269060150f19de1b123589037ca0cde82cbca6 | /task2.py | 6aa9db3b93a6e8bdace68504ec6032dbed93a64d | [] | no_license | ndk03/Image-Filtering-and-template-matching | d6b9298971f14e8428d015afd8156974990f70ef | a3c116db68a3196189bc7579080102aaf736f40b | refs/heads/master | 2022-12-10T03:22:18.783343 | 2020-08-31T18:32:35 | 2020-08-31T18:32:35 | 291,792,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,619 | py | import argparse
import json
import os
import utils
from task1 import *
def parse_args():
parser = argparse.ArgumentParser(description="cse 473/573 project 1.")
parser.add_argument(
"--img-path",
type=str,
default="./data/proj1-task2.jpg",
help="path to the image")
parser.add_argument(
"--template-path",
type=str,
default="./data/proj1-task2-template.jpg",
help="path to the template"
)
parser.add_argument(
"--result-saving-path",
dest="rs_path",
type=str,
default="./results/task2.json",
help="path to file which results are saved (do not change this arg)"
)
args = parser.parse_args()
return args
def norm_xcorr2d(patch, template):
"""Computes the NCC value between a image patch and a template.
The image patch and the template are of the same size. The formula used to compute the NCC value is:
sum_{i,j}(x_{i,j} - x^{m}_{i,j})(y_{i,j} - y^{m}_{i,j}) / (sum_{i,j}(x_{i,j} - x^{m}_{i,j}) ** 2 * sum_{i,j}(y_{i,j} - y^{m}_{i,j})) ** 0.5
This equation is the one shown in Prof. Yuan's ppt.
Args:
patch: nested list (int), image patch.
template: nested list (int), template.
Returns:
value (float): the NCC value between a image patch and a template.
"""
flipped_template = template
#print(flipped_template.shape)
#print(patch.shape)
rows = 0
cols = 0
mean_patch = 0
mean_template = 0
#calculating the mean of patch
for i in range(0,len(patch)):
for j in range(0,len(patch[1])):
mean_patch = mean_patch + patch[i][j]
mean_patch = mean_patch/(len(patch)*len(patch[1]))
#calculating the mean of template
for i in range(0,len(flipped_template)):
for j in range(0,len(flipped_template[1])):
mean_template = mean_template + flipped_template[i][j]
mean_template = mean_template/(len(flipped_template)*len(flipped_template[1]))
numerator = 0.0
denominator1 = 0.0
denominator2 = 0.0
for i in range(0,len(patch)):
for j in range(0,len(patch[1])):
numerator = numerator + (flipped_template[i][j]-mean_template)*(patch[i][j] - mean_patch)
denominator1 = denominator1 + (flipped_template[i][j]-mean_template)**2
denominator2 = denominator2 + (patch[i][j]-mean_patch)**2
denominator = (denominator1*denominator2)**(1/2)
return(numerator/denominator)
#raise NotImplementedError
def match(img, template):
"""Locates the template, i.e., a image patch, in a large image using template matching techniques, i.e., NCC.
Args:
img: nested list (int), image that contains character to be detected.
template: nested list (int), template image.
Returns:
x (int): row that the character appears (starts from 0).
y (int): column that the character appears (starts from 0).
max_value (float): maximum NCC value.
"""
position = []
ncc = []
for i in range(0,len(img)-len(template)):
for j in range(0,len(img[1])-len(template[1])):
patch = utils.crop(img,i,i+len(template),j,j+len(template[0]))
"""for ki in range(0,len(template)):
new_row = []
for kj in range(0,len(template[1])):
new_row.append(img[i+ki][j+kj])
patch.append(new_row)"""
ncc.append(norm_xcorr2d(patch,template))
position.append([i,j])
max_index = 0
max = ncc[0]
for i in range(1,len(ncc)):
if(ncc[i]>max):
max = ncc[i]
max_index = i
x = position[max_index][0]
y = position[max_index][1]
return x,y,max
def save_results(coordinates, template, template_name, rs_directory):
results = {}
results["coordinates"] = sorted(coordinates, key=lambda x: x[0])
results["templat_size"] = (len(template), len(template[0]))
with open(os.path.join(rs_directory, template_name), "w") as file:
json.dump(results, file)
def main():
args = parse_args()
img = read_image(args.img_path)
# template = utils.crop(img, xmin=10, xmax=30, ymin=10, ymax=30)
# template = np.asarray(template, dtype=np.uint8)
# cv2.imwrite("./data/proj1-task2-template.jpg", template)
template = read_image(args.template_path)
x, y, max_value = match(img, template)
with open(args.rs_path, "w") as file:
json.dump({"x": x, "y": y, "value": max_value}, file)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
c3902da83d4bbf653f3646329166df8e9cb6ac8a | afe6819d7b22d1023ca05356ac16bbb059936a45 | /venv/bin/pytest | 85e15de5a4aacad50194ac120c96c29ad163de1c | [] | no_license | MaximkaKash/todo | 91a518fa314c232794292bf1dc299d646b7f2167 | ab5c1c5f045bfe0dd4cb778d67081888e67a4fe7 | refs/heads/main | 2023-07-23T13:34:37.169958 | 2021-09-11T08:29:06 | 2021-09-11T08:29:06 | 404,414,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | #!/home/maksim/python/todo/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pytest import console_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(console_main())
| [
"kanashitsusu@gamil.com"
] | kanashitsusu@gamil.com | |
0f64588d8eabb24d126ca11a7baf5283462f158f | ebd1e49fc405d6711c36c1aa16450682176f622f | /Snake Water Gun.py | 7e48498b5554b0b51a248e0c32c873f40db05603 | [] | no_license | lokesh2509/Snake-Water-Gun | 332b775984cc5849cce35f97fd5be86cb71769f5 | cd5b6f53aa1bc1bb832c5b6612c8b2367585c846 | refs/heads/main | 2023-07-04T14:16:23.839595 | 2021-08-19T06:40:12 | 2021-08-19T06:40:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,686 | py | #Snake Water Gun
"""Create a program using while loop or for loop and use random module. take input from user and also use random. If
the inut and random value mathecs show you won. The game willl continue till 10 times and then shows the score"""
"""Following are the rules of the game:
Snake vs. Water: Snake drinks the water hence wins.
Water vs. Gun: The gun will drown in water, hence a point for water
Gun vs. Snake: Gun will kill the snake and win.
In situations where both players choose the same object, the result will be a draw."""
print("Welcome!! In the \"Snake Water Gun Game\"\n Choose any one out of Snake, Water and Gun\n")
rounds = 0
win = 0
lost = 0
draw = 0
while(True):
print("Round> ", rounds)
print("You won> ", win)
print("You lost> ", lost)
print("Draw between you and device> ", draw,"\n")
print("This game will continue till 10 rounds")
user = input("Enter Your Choice: ")
lst = ["Snake", "Water", "Gun"]
import random
ran = random.choice(lst)
if rounds >= 10:
print(f"10 Rounds are over\nYou won {win} times.\n You Lost {lost} times.\n Draw between you and device {draw} times")
print("Thanks for playing this game.")
exit()
if user == "Snake" and ran == "Snake":
print("Its a Draw.\n")
draw = draw + 1
rounds = rounds + 1
elif user == "Water" and ran == "Water":
print("Its a Draw.\n")
draw = draw + 1
rounds = rounds + 1
elif user == "Gun" and ran == "Gun":
print("Its a Draw.\n")
draw = draw + 1
rounds = rounds + 1
elif user == "Snake" and ran == "Water":
print("WOW!!!\n Congrats, You won this round.\n")
win = win + 1
rounds = rounds + 1
elif user == "Snake" and ran == "Gun":
print("OOPS!!!\n Sorry, You lost this round.\n")
lost = lost + 1
rounds = rounds + 1
elif user == "Water" and ran == "Snake":
print("OOPS!!!\n Sorry, You lost this round.\n")
lost = lost + 1
rounds = rounds + 1
elif user == "Water" and ran == "Gun":
print("WOW!!!\n Congrats, You won this round.\n")
win = win + 1
rounds = rounds + 1
elif user == "Gun" and ran == "Snake":
print("WOW!!!\n Congrats, You won this round.\n")
win = win + 1
rounds = rounds + 1
elif user == "Gun" and ran == "Water":
print("OOPS!!!\n Sorry, You lost this round.\n")
lost = lost + 1
rounds = rounds + 1
else:
print("Error\n Check before you type.\n")
break | [
"noreply@github.com"
] | noreply@github.com |
482518e8aa5babb705a3fb0fb72f1cd3ebccfe8f | 2f02494259ddd45ad0b961a14364a70eb78994a6 | /exam-review/9/clark-bains/problem9-tester.py | e333239558760ce6b5a9fb6979af73a4e116a3b6 | [] | no_license | malcolm-smith/1405-practice | 331a513795494d21d52597b54ab91e7c535f2f2e | 6265bf4a13f1b21b51c184c5c092f3b8557e0804 | refs/heads/master | 2022-03-24T15:15:11.948946 | 2019-12-18T04:55:40 | 2019-12-18T04:55:40 | 209,166,774 | 2 | 6 | null | 2019-12-23T19:41:37 | 2019-09-17T22:19:12 | Python | UTF-8 | Python | false | false | 3,247 | py |
import copy
import importlib
modname = "problem9"
funcname = "isvalidseries"
information = [[[[8, 4, 8, 3, 1, 2, 7, 9], 3, 19], False], [[[2, 4, 8, 3, 1, 2, 7, 9], 3, 19], True], [[[2, 4, 8, 3, 1, 2, 7, 9], 3, 16], False], [[[5, 5, 5, 5, 5, 5, 5, 5], 3, 19], True], [[[5, 5, 5, 5, 5, 5, 5, 5], 4, 19], False], [[[5, 5, 5, 5, 5, 5, 5, 5], 4, 20], True], [[[1, 4, 6, 6, 8, 10, 9, 2, 4, 8, 1, 2, 9, 9, 1], 5, 32], False], [[[1, 3, 8, 4, 8, 6, 5, 5], 4, 25], False], [[[8, 6, 6, 2, 10, 2, 7, 3, 6], 4, 27], True], [[[8, 4, 2, 8, 5, 5, 2, 9, 1, 2, 2, 6, 5, 7, 5, 1], 4, 22], False], [[[9, 10, 8, 6, 8, 3, 5, 10, 10], 4, 38], True], [[[2, 3, 7, 7, 9, 2, 3, 6, 6, 9, 3, 4, 7], 6, 36], True], [[[6, 7, 9, 3, 7, 9, 10, 7, 4, 3, 10, 10, 5, 7, 1, 5, 2, 5, 10, 9, 8, 2], 6, 44], False], [[[10, 8, 9, 8, 5, 8, 7, 7, 4, 5, 7, 4, 1, 8, 7, 6, 1], 6, 43], False], [[[8, 4, 10, 5, 8, 9, 4, 10, 9, 5, 6, 6], 6, 49], True], [[[1, 10, 4, 10, 10, 5, 1, 10], 5, 38], False], [[[7, 7, 1, 7, 3, 4, 9, 1, 6, 2], 3, 18], True], [[[6, 8, 6, 6, 7, 8, 5, 6, 7, 8], 4, 33], True], [[[8, 10, 7, 5, 5, 4, 4, 4, 5, 9, 6, 9, 2, 4, 4, 1, 3], 6, 37], False], [[[10, 2, 7, 5, 9, 3, 3, 7, 3, 6, 10, 5], 6, 40], True], [[[5, 6, 4, 2, 3, 4, 3, 5, 2, 10, 7, 3, 7, 9, 8, 5, 6, 7], 5, 32], False], [[[6, 2, 8, 3, 4, 2, 5, 5, 9, 6, 6, 4, 2, 9, 9], 6, 37], True], [[[6, 4, 2, 9, 8, 1, 3, 8, 4, 4, 2, 6, 7, 10], 4, 26], True], [[[7, 2, 7, 6, 4, 3, 5, 1, 3, 5, 4, 8, 4, 6, 5], 3, 18], True], [[[6, 9, 6, 2, 7, 6, 4, 2, 3], 3, 20], False]]
resulttype = "bool"
try:
module = importlib.import_module(modname)
func = getattr(module,funcname)
except:
print("Error loading module and/or function - check the names?")
quit()
correct = 0
incorrect = []
print("Checking function with test inputs...")
print()
for info in information:
inputs = copy.deepcopy(info[0])
goal = info[1]
print("Inputs:", str(inputs)[1:-1])
print("Goal:",goal)
result = func(*inputs)
print("Your Result:", result)
success = False
if resulttype == "int" and isinstance(result, int):
success = goal == result
elif resulttype == "bool" and isinstance(result, bool):
success = goal == result
elif resulttype == "float" and isinstance(result, (int,float)):
success = abs(goal - result) < 0.001
elif resulttype == "string" and isinstance(result, str):
success = goal.lower() == result.lower()
elif resulttype == "orderedlist" and isinstance(result, list):
success = False
if len(goal) == len(result):
success = True
for i in range(len(goal)):
if goal[i] != result[i]:
success = False
if success:
correct += 1
print("Good!")
else:
incorrect.append([inputs,goal,result])
print("Incorrect!")
print()
print()
print("Your code produced",correct,"out of", len(information),"correct results.")
print()
if len(incorrect) > 0:
input("Hit enter to see the incorrect cases...")
print("The inputs for which your program failed were:")
print()
for info in incorrect:
print("Inputs:", str(info[0])[1:-1])
print("Goal:", info[1])
print("Your Result:", info[2])
print()
| [
"clarkbains@gmail.com"
] | clarkbains@gmail.com |
bd51f7d2254adf5b66b9f623b6a902a082b95b74 | 52bd6040667388c1cb25802feeaa78260d870858 | /finalscripts/fit_lines_4triangles.py | 4456c45b406ef44ae98b5229d27ab8c10ea46bc7 | [] | no_license | mainCSG/DotTuning | 837bb418618b6df4e15db8190953a0456094f856 | b2cb52cdd343ea64d39c6e75fd8f340d2b709198 | refs/heads/master | 2020-07-02T08:32:20.558372 | 2019-08-09T19:43:01 | 2019-08-09T19:43:01 | 201,474,350 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,492 | py | # it takes 5 vertices as the input along with the cluster. It first finds the boundary points and
# puts the point into 5 groups based on which edge it is closest to. Then fits a line through these groups
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
# import csv
# from curr_thresh_filter import curr_thresh_filter
# from matplotlib import cm
# from pandas import DataFrame
# from find_Vgms import find_Vgms
# from find_Ecs import find_Ecs
# from find_Cgs import find_Cgs
# from find_Cratios import find_Cratios
from scipy.optimize import minimize
from numpy.linalg import inv
# from skimage import feature
# from DBSCAN import DBSCAN
# from scipy import ndimage
vertices=[[],[],[],[]]
def onpick1(event):
global vertices
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
points = tuple(zip(xdata[ind], ydata[ind]))
print('onpick points:', points)
vertices[0]=vertices[0]+[points[0]]
def onpick2(event):
global vertices
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
points = tuple(zip(xdata[ind], ydata[ind]))
print('onpick points:', points)
vertices[1]=vertices[1]+[points[0]]
def onpick3(event):
global vertices
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
points = tuple(zip(xdata[ind], ydata[ind]))
print('onpick points:', points)
vertices[2]=vertices[2]+[points[0]]
def onpick4(event):
global vertices
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
points = tuple(zip(xdata[ind], ydata[ind]))
print('onpick points:', points)
vertices[3]=vertices[3]+[points[0]]
def filter_grp(group,line,vertice):
#keeps points (of the group) on the same side of the line as the vertice
grp=[]
for s in range(0, len(group)):
x=group[s][0]
y=group[s][1]
if (vertice[1]-(line[0]*vertice[0])-line[1])*(y-(line[0]*x)-line[1])>0:
grp=grp+[[x,y]]
return grp
def error_line(params,*args):
#line[0] has slope, line[1] has intercept of the line (the parameters to be fit)
line=[[],[]]
line[0],line[1]=params[0],params[1]
dx1,dy1,dx2,dy2= params[2],params[3],params[4],params[5]
pts=[[],[],[],[]]
pts[0],pts[1],pts[2],pts[3]= args[0],args[1],args[2],args[3]
error=0.0
for r in range(0,len(pts[0])):
error= error+ ((pts[0][r][1]-(line[0]*pts[0][r][0])-line[1])**2)
for r in range(0,len(pts[1])):
x=pts[1][r][0]-dx1
y=pts[1][r][1]-dy1
error= error+ ((y-(line[0]*x)-line[1])**2)
for r in range(0,len(pts[2])):
x=pts[2][r][0]-dx2
y=pts[2][r][1]-dy2
error= error+ ((y-(line[0]*x)-line[1])**2)
for r in range(0,len(pts[3])):
x=pts[3][r][0]-(dx1+dx2)
y=pts[3][r][1]-(dy1+dy2)
error= error+ ((y-(line[0]*x)-line[1])**2)
return error
def error_parallel_lines(params,*args):
#line[0] has slope, line[1] has intercept of the line (the parameters to be fit are the shifts)
line= [args[4],args[5]]
m=args[6] #slope along which shift is taken
dx1,dy1,dx2,dy2=args[7],args[8],args[9],args[10]
shift = params[0]
x_shift= shift/(1+m**2)**0.5
y_shift= shift*m/(1+m**2)**0.5
#pts correspond to the points of the line to be fit
pts1,pts2,pts3,pts4= args[0],args[1],args[2],args[3]
error=0.0
for r in range(0, len(pts1)):
error= error+ ((pts1[r][1]+y_shift)-(line[0]*(pts1[r][0]+x_shift))-line[1])**2
for r in range(0, len(pts2)):
x=pts2[r][0]-dx1
y=pts2[r][1]-dy1
error= error+ ((y+y_shift)-(line[0]*(x+x_shift))-line[1])**2
for r in range(0, len(pts3)):
x=pts3[r][0]-dx2
y=pts3[r][1]-dy2
error= error+ ((y+y_shift)-(line[0]*(x+x_shift))-line[1])**2
for r in range(0, len(pts4)):
x=pts4[r][0]-(dx2+dx1)
y=pts4[r][1]-(dy2+dy1)
error= error+ ((y+y_shift)-(line[0]*(x+x_shift))-line[1])**2
return error
def error_lines_together(params,*args):
error=0.0
dx1,dy1,dx2,dy2= params[6],params[7],params[8],params[9]
for m in range(0,3):
#line[0] has slope, line[1] has intercept of the line (the parameters to be fit)
line=[[],[]]
line[0],line[1]=params[2*m],params[(2*m)+1]
pts=[[],[],[],[]]
pts[0],pts[1],pts[2],pts[3]= args[m][0],args[m][1],args[m][2],args[m][3]
for r in range(0,len(pts[0])):
error= error+ ((pts[0][r][1]-(line[0]*pts[0][r][0])-line[1])**2)
for r in range(0,len(pts[1])):
x=pts[1][r][0]-dx1
y=pts[1][r][1]-dy1
error= error+ ((y-(line[0]*x)-line[1])**2)
for r in range(0,len(pts[2])):
x=pts[2][r][0]-dx2
y=pts[2][r][1]-dy2
error= error+ ((y-(line[0]*x)-line[1])**2)
for r in range(0,len(pts[3])):
x=pts[3][r][0]-(dx1+dx2)
y=pts[3][r][1]-(dy1+dy2)
error= error+ ((y-(line[0]*x)-line[1])**2)
return error
#find slope of a line
def line_slope(point1,point2,resolution):
#if the line is exactly vertical, there would be zero division error warning. To avoid this, add a very small shift in x much
#smaller than the data resolution.
shift= 0.001*resolution
if point1[0]==point2[0]:
return (point1[1]-point2[1])/(point1[0]-point2[0]-shift)
else:
return (point1[1]-point2[1])/(point1[0]-point2[0])
#find intercept of a line
def line_intercept(point1,point2,resolution):
return -(line_slope(point1,point2,resolution)*point1[0])+point1[1]
#takes as input slope and intercepts of both lines
def line_intersection(m_1,c_1,m_2,c_2):
mat= np.array([[m_1,-1.0],[m_2,-1.0]])
const= np.array([[-1*c_1,-1*c_2]])
return np.matmul(inv(mat),np.transpose(const))
#find boundary points of cluster
def clear_bulk(x,y,resolution,boundary_thickness_factor):
#filters out the boundary points so lines could be fit through them
#checks for centroid of a point within a radius. If there isn't a significant shift in the centroid from the point, it is in the bulk
boundary_pts=np.zeros((1,2))
boundary_x= np.array([0])
boundary_y= np.array([0])
rad= 3*resolution
for r in range(0,len(x)):
pt= np.array([[x[r],y[r]]])
count=0.0
centroid=np.zeros((1,2))
for s in range(0,len(x)):
other_pt=np.array([[x[s],y[s]]])
if dist(pt,other_pt)< rad:
count=count+1.0
centroid= centroid+other_pt
centroid= centroid/count
#check for shift of centroid from the point
if dist(centroid,pt) > (0.2*rad)/boundary_thickness_factor:
boundary_pts = np.append(boundary_pts,pt,0)
boundary_x= np.append(boundary_x,pt[0][0])
boundary_y= np.append(boundary_y,pt[0][1])
boundary_pts=boundary_pts[1:]
boundary_x=boundary_x [1:]
boundary_y=boundary_y[1:]
return boundary_pts, boundary_x,boundary_y #first indice had a dummy point (0,0)
def dist(a,b):
return ((a[0][0]-b[0][0])**2+(a[0][1]-b[0][1])**2)**0.5
#groups the points based on the vertices
def grp_points(lines,x,y):
groups= [[],[],[],[],[]]
#for every point calculate distances to all lines and group points based on closest line
for s in range(0,len(x)):
pt_x= x[s]
pt_y= y[s]
#calculate distance from every line and take minimum of it
#line[i][0] has slope and line[i][1] has intercept of line i
dist_lines=np.zeros(5)
for q in range(0,5):
dist_lines[q]= ((pt_y-(lines[q][0]*pt_x)-lines[q][1])**2)/(1+lines[q][0]**2)**0.5
closest_line=np.argmin(dist_lines)
#put the point in the group corresponding to the line it is closest to
groups[closest_line]=groups[closest_line]+[[pt_x,pt_y]]
return np.array(groups)
def fit_lines_4triangles(x1,y1,x2,y2,x3,y3,x4,y4,centroids,resolution,boundary_thickness_factor,Use_clear_bulk,guess_vertices):
if Use_clear_bulk==True:
#find boundary points
boundary_pts1, boundary_x1,boundary_y1=clear_bulk(x1,y1,resolution,boundary_thickness_factor)
boundary_pts2, boundary_x2,boundary_y2=clear_bulk(x2,y2,resolution,boundary_thickness_factor)
boundary_pts3, boundary_x3,boundary_y3=clear_bulk(x3,y3,resolution,boundary_thickness_factor)
boundary_pts4, boundary_x4,boundary_y4=clear_bulk(x4,y4,resolution,boundary_thickness_factor)
else:
boundary_x1,boundary_y1=x1,y1
boundary_x2,boundary_y2=x2,y2
boundary_x3,boundary_y3=x3,y3
boundary_x4,boundary_y4=x4,y4
'''
#pick vertices
fig = plt.figure()
plt.plot(boundary_x1,boundary_y1, 'ro',picker=5)
fig.canvas.mpl_connect('pick_event', onpick1)
print("pick 5 vertices and close graph")
plt.show()
fig = plt.figure()
plt.plot(boundary_x2,boundary_y2, 'go',picker=5)
fig.canvas.mpl_connect('pick_event', onpick2)
print("pick 5 vertices and close graph")
plt.show()
fig = plt.figure()
plt.plot(boundary_x3,boundary_y3, 'bo',picker=5)
fig.canvas.mpl_connect('pick_event', onpick3)
print("pick 5 vertices and close graph")
plt.show()
fig = plt.figure()
plt.plot(boundary_x4,boundary_y4, 'go',picker=5)
fig.canvas.mpl_connect('pick_event', onpick4)
print("pick 5 vertices and close graph")
plt.show()
'''
vertices[0]= guess_vertices
#guess vertices for other triangles
#find guesses for dx, dy based on centroids of triangles (the order in which centroids have been given- base triangle, its 2 neighbours,4th triangle)
if abs(centroids[0][0]-centroids[1][0])>abs(centroids[0][0]-centroids[2][0]):
dy2= centroids[2][1]- centroids[0][1]
dx2=centroids[2][0]- centroids[0][0]
dx1= centroids[1][0]- centroids[0][0]
dy1=centroids[1][1]- centroids[0][1]
#guess vertices
vertices[1]=vertices[0]+np.tile([dx1,dy1],(5,1))
vertices[2]=vertices[0]+np.tile([dx2,dy2],(5,1))
else:
dy2= centroids[1][1]- centroids[0][1]
dx2= centroids[1][0]- centroids[0][0]
dx1= centroids[2][0]- centroids[0][0]
dy1= centroids[2][1]- centroids[0][1]
#guess vertices
vertices[2]=vertices[0]+np.tile([dx1,dy1],(5,1))
vertices[1]=vertices[0]+np.tile([dx2,dy2],(5,1))
vertices[3]=vertices[0]+np.tile([dx2+dx1,dy2+dy1],(5,1))
#find the slopes and intercepts of all lines
lines=[[[],[],[],[],[]],[[],[],[],[],[]],[[],[],[],[],[]],[[],[],[],[],[]]]
for s in range(0,4):
for r in range(0,5):
slope= line_slope(vertices[s][r],vertices[s][(r+1)%5],resolution)
intercept= line_intercept(vertices[s][r],vertices[s][(r+1)%5],resolution)
lines[s][r]= lines[s][r]+[slope,intercept]
#group the points
groups1= grp_points(lines[0],boundary_x1,boundary_y1)
groups2= grp_points(lines[1],boundary_x2,boundary_y2)
groups3= grp_points(lines[2],boundary_x3,boundary_y3)
groups4= grp_points(lines[3],boundary_x4,boundary_y4)
if abs(centroids[0][0]-centroids[1][0])<abs(centroids[0][0]-centroids[2][0]):
#reorder groups 3 and 2 so that groups2 corresponds to dx1,dy1 and 3 to dx2,dy2
tempg=groups2
tempv=vertices[1] #these are vertices of groups2
groups2=groups3
groups3=tempg
vertices[1]=vertices[2]
vertices[2]=tempv
#fit lines
lines_fit=[[],[],[],[],[]]
#fit line 0,1,4 separately. these are lines with many points and that are clear. Find shifts for 2,3 later
#fit line 0. paramters to fit are slope, intercept of the line
ans_0= minimize(error_line,x0=np.array([lines[0][0][0],lines[0][0][1],dx1,dy1,dx2,dy2]),args=(groups1[0],groups2[0],groups3[0],groups4[0]))
#fit line 1. paramters to fit are slope, intercept of the line
ans_1= minimize(error_line,x0=np.array([lines[0][1][0],lines[0][1][1],dx1,dy1,dx2,dy2]),args=(groups1[1],groups2[1],groups3[1],groups4[1]))
#fit line 4. paramters to fit are slope, intercept of the line
ans_4= minimize(error_line,x0=np.array([lines[0][4][0],lines[0][4][1],dx1,dy1,dx2,dy2]),args=(groups1[4],groups2[4],groups3[4],groups4[4]))
#try to fit lines 0,1,4 together
ans= minimize(error_lines_together,x0=np.array([lines[0][0][0],lines[0][0][1],lines[0][1][0],lines[0][1][1],lines[0][4][0],lines[0][4][1],dx1,dy1,dx2,dy2]),args=([groups1[0],groups2[0],groups3[0],groups4[0]],[groups1[1],groups2[1],groups3[1],groups4[1]],[groups1[4],groups2[4],groups3[4],groups4[4]]))
ans_0.x=np.array([ans.x[0],ans.x[1],ans.x[6],ans.x[7],ans.x[8],ans.x[9]])
ans_1.x=np.array([ans.x[2],ans.x[3],ans.x[6],ans.x[7],ans.x[8],ans.x[9]])
ans_4.x=np.array([ans.x[4],ans.x[5],ans.x[6],ans.x[7],ans.x[8],ans.x[9]])
#fit lines 2 and 3. parameters to fit x,y shifts to locate them from lines 4 and 1 respectively
for m in range(0,2):
#calculate guess for shift along line 0. A good guess could be distance between vertices 2 and 4
shift=dist([vertices[0][2]],[vertices[0][4]])
ans_3= minimize(error_parallel_lines,x0=shift,args=(groups1[3],groups2[3],groups3[3],groups4[3],ans_1.x[0],ans_1.x[1],ans_0.x[0],ans_1.x[2],ans_1.x[3],ans_1.x[4],ans_1.x[5]))
ans_2= minimize(error_parallel_lines,x0=shift,args=(groups1[2],groups2[2],groups3[2],groups4[2],ans_4.x[0],ans_4.x[1],ans_0.x[0],ans_4.x[2],ans_4.x[3],ans_4.x[4],ans_4.x[5]))
#put in final slopes and intercepts of lines 2 and 3 into lines_fit
avg_shift= (abs(ans_3.x)+abs(ans_2.x))/2.0
shift_1=ans_3.x*avg_shift/abs(ans_3.x)
m=ans_0.x[0] #slope along which shift is taken
x_shift_1= shift_1/(1+m**2)**0.5
y_shift_1= shift_1*m/(1+m**2)**0.5
lines_fit[3]=np.array([ans_1.x[0], (ans_1.x[1]-y_shift_1+(ans_1.x[0]*x_shift_1))[0]])
shift_2=ans_2.x*avg_shift/abs(ans_2.x)
x_shift_2= shift_2/(1+m**2)**0.5
y_shift_2= shift_2*m/(1+m**2)**0.5
lines_fit[2]=np.array([ans_4.x[0], (ans_4.x[1]-y_shift_2+(ans_4.x[0]*x_shift_2))[0]])
'''
#plot the points in groups
plt.figure()
for r in range(0,len(groups1[2])):
plt.plot(groups1[2][r][0],groups1[2][r][1],'bo')
for r in range(0,len(groups1[3])):
plt.plot(groups1[3][r][0],groups1[3][r][1],'go')
for r in range(0,len(groups2[2])):
plt.plot(groups2[2][r][0],groups2[2][r][1],'bo')
for r in range(0,len(groups2[3])):
plt.plot(groups2[3][r][0],groups2[3][r][1],'go')
for r in range(0,len(groups3[2])):
plt.plot(groups3[2][r][0],groups3[2][r][1],'bo')
for r in range(0,len(groups3[3])):
plt.plot(groups3[3][r][0],groups3[3][r][1],'go')
for r in range(0,len(groups4[2])):
plt.plot(groups4[2][r][0],groups4[2][r][1],'bo')
for r in range(0,len(groups4[3])):
plt.plot(groups4[3][r][0],groups4[3][r][1],'go')
plt.show()
'''
#filter out points in the groups 2 and 3 and refit lines. For group 2 keep points
#on same side as vertice 1 and likewise vertice 0 for group 3
groups1[2]=filter_grp(groups1[2],lines_fit[2],vertices[0][1])
groups1[3]=filter_grp(groups1[3],lines_fit[3],vertices[0][0])
groups2[2]=filter_grp(groups2[2],[lines_fit[2][0],lines_fit[2][1]+ans_4.x[2]],vertices[1][1])
groups2[3]=filter_grp(groups2[3],[lines_fit[3][0],lines_fit[3][1]+ans_1.x[2]],vertices[1][0])
groups3[2]=filter_grp(groups3[2],[lines_fit[2][0],lines_fit[2][1]+ans_4.x[4]],vertices[2][1])
groups3[3]=filter_grp(groups3[3],[lines_fit[3][0],lines_fit[3][1]+ans_1.x[4]],vertices[2][0])
groups4[2]=filter_grp(groups4[2],[lines_fit[2][0],lines_fit[2][1]+ans_4.x[2]+ans_4.x[4]],vertices[3][1])
groups4[3]=filter_grp(groups4[3],[lines_fit[3][0],lines_fit[3][1]+ans_1.x[2]+ans_1.x[4]],vertices[3][0])
#put in final slopes and intercepts of other lines into lines_fit
lines_fit[0]=ans_0.x
lines_fit[1]=np.array([ans_1.x[0], ans_1.x[1]])
lines_fit[4]=np.array([ans_4.x[0], ans_4.x[1]])
#calculate the vertices from the intersections of lines and return it
vertices_calc= np.zeros((5,2))
for r in range(0,5):
vertices_calc[r][0], vertices_calc[r][1]= line_intersection(lines_fit[r][0],lines_fit[r][1],lines_fit[(r-1)%5][0],lines_fit[(r-1)%5][1])
#calculate average dx1,dy1,dx2,dy2
avg_dx1= (ans_0.x[2]+ans_1.x[2]+ans_4.x[2])/3.0
avg_dy1= (ans_0.x[3]+ans_1.x[3]+ans_4.x[3])/3.0
avg_dx2= (ans_0.x[4]+ans_1.x[4]+ans_4.x[4])/3.0
avg_dy2= (ans_0.x[5]+ans_1.x[5]+ans_4.x[5])/3.0
# #plot them boundary points and lines
plt.figure()
plt.scatter(x1, y1, c='g', marker='o')
plt.scatter(x2, y2, c='r', marker='o')
plt.scatter(x3, y3, c='r', marker='o')
plt.scatter(x4, y4, c='r', marker='o')
x=np.array([vertices_calc[0][0],vertices_calc[1][0],vertices_calc[2][0],vertices_calc[3][0],vertices_calc[4][0],vertices_calc[0][0]])
y=np.array([vertices_calc[0][1],vertices_calc[1][1],vertices_calc[2][1],vertices_calc[3][1],vertices_calc[4][1],vertices_calc[0][1]])
x_1=x+np.tile(avg_dx1,(6,))
y_1=y+np.tile(avg_dy1,(6,))
x_2=x+np.tile(avg_dx2,(6,))
y_2=y+np.tile(avg_dy2,(6,))
x_3=x+np.tile(avg_dx2+avg_dx1,(6,))
y_3=y+np.tile(avg_dy2+avg_dy1,(6,))
plt.plot(x_3,y_3,'b-',x,y,'b-',x_2,y_2,'b-',x_1,y_1,'b-')
#plt.plot([vertices[0][0],vertices[1][0],vertices[2][0],vertices[3][0],vertices[4][0],vertices[0][0]],[vertices[0][1],vertices[1][1],vertices[2][1],vertices[3][1],vertices[4][1],vertices[0][1]],'g-')
plt.show()
return vertices_calc, lines_fit,avg_dx1,avg_dy1,avg_dx2,avg_dy2
if __name__ == "__main__":
# #check the code
data= pd.read_excel('cluster1.xlsx')
x1=data['x1'].values
y1=data['y1'].values
data= pd.read_excel('cluster2.xlsx')
x2=data['x2'].values
y2=data['y2'].values
data= pd.read_excel('cluster3.xlsx')
x3=data['x3'].values
y3=data['y3'].values
data= pd.read_excel('cluster4.xlsx')
x4=data['x4'].values
y4=data['y4'].values
fit_lines_4triangles(x1,y1,x2,y2,x3,y3,x4,y4,np. array([[1.70475207, 1.59290041],
[1.72185748, 1.59276907],
[1.70280256, 1.61168564],
[1.72011364, 1.6114008 ]]),abs(y1[0]-y1[1]),1.0,True)
| [
"51793790+zachparrott@users.noreply.github.com"
] | 51793790+zachparrott@users.noreply.github.com |
65ef2c35a54d445ef9cd2c14e351b654783cbda1 | 8799db59af87cc332a26aabb7082e89d812d3c0d | /venv/bin/django-admin.py | f4ed5fa8883ae9e6a801faca212daaa66ab060e0 | [] | no_license | kureLeGrill/telegramBotGanna | fbbb7de887de3f9eb4c5319f512d3dfb551d65c2 | cff4be229ab57975d9dea22b3d1911394e844450 | refs/heads/master | 2022-12-13T22:01:59.490215 | 2020-09-07T17:24:50 | 2020-09-07T17:24:50 | 292,842,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #!/home/ed/PycharmProjects/HannaTelegramBot/venv/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"vetok88@gmail.com"
] | vetok88@gmail.com |
00f4f432b42195fe0d5718333d4fea31f17c3546 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /layout-blt/configs/bert_layout_publaynet_config.py | c468d18d59fde1f6a87c790cc4dbb6815ec3e80b | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,219 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default Hyperparameter configuration."""
import ml_collections
def get_config():
"""Gets the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
# Exp info
config.dataset_path = "/path/to/publaynet/"
config.dataset = "PubLayNet"
config.vocab_size = 137
config.experiment = "bert_layout"
config.model_class = "bert_layout"
config.image_size = 256
# Training info
config.seed = 0
config.log_every_steps = 100
config.eval_num_steps = 1000
config.max_length = 130
config.batch_size = 64
config.train_shuffle = True
config.eval_pad_last_batch = False
config.eval_batch_size = 64
config.num_train_steps = 100_000
config.checkpoint_every_steps = 5000
config.eval_every_steps = 5000
config.num_eval_steps = 100
# Model info
config.layout_dim = 2
config.dtype = "float32"
config.autoregressive = False
config.shuffle_buffer_size = 10
config.use_vae = True
config.share_embeddings = True
config.num_layers = 4
config.qkv_dim = 512
config.emb_dim = 512
config.mlp_dim = 2048
config.num_heads = 8
config.dropout_rate = 0.1
config.attention_dropout_rate = 0.3
config.restore_checkpoints = True
config.label_smoothing = 0.
config.sampling_method = "top-p"
config.use_vertical_info = False
# Optimizer info
config.optimizer = ml_collections.ConfigDict()
config.optimizer.type = "adam"
config.optimizer.warmup_steps = 4000
config.optimizer.lr = 5e-3
config.optimizer.beta1 = 0.9
config.optimizer.beta2 = 0.98
config.optimizer.weight_decay = 0.01
config.beta_rate = 1 / 20_000
return config
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
2bb7dd33ca94ab0010891f6b17ba99ddfad25061 | 0911ccd808776b1e1e1ebaffcf0b77162653621b | /1_Intro_to_Python/2_Variables.py | 3b6b06c14a15a0e907b197525fe06145be9bb9c1 | [] | no_license | harfordt/Python-Lessons | 088494e3eaa204705df7fb021ba74eb7eba6741b | 885c871f2192b4abbf52ea8f1159bc1b13158e0d | refs/heads/master | 2020-09-25T17:54:48.662540 | 2019-09-10T08:46:19 | 2019-09-10T08:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,332 | py | ####################################################################################################################################################
# This lesson is to store some information in a location on the computer called a Variable. A Variable can change over the course of the program. #
# We will then print the variable on to the screen. #
# ##################################################################################################################################################
x = 5 #Let x equals 5
print (x)
y = 10 #Let y equals 10
print (y)
print(x + y) # you can do basic math as long as all the variables are the same type, for example in this case, integers
print(x * y)
print(y / x)
print('--------------------')
#############################################################################################
# Now lets try saving some text in a variable #
# ###########################################################################################
x = 'My First ' # So as you can see, when I replace x this time with text, it overwrites the value of x which was 5
y = 'Python program' # and same here, y was 10 but now replaced by a string value
print(x + y)
print('---------------------')
#############################################################################################
# Now lets try adding string and integers #
# ###########################################################################################
x = 'My phone number is '
y = ' 001-4-555-6778' #This example is still adding string with string as the number is inside quote marks
print(x + y)
print('---------------------')
#########################################################
# Assigning multiple variables at the same time #
#########################################################
x, y, z = 'Audi ', 'Bentley ', 'Corvette '
print('My three favorite cars are ' + x + y + z)
# Key points about Variables
# 1- must start with a letter or underscore character, and cannot start with a number or characters
# 2- names are case sensitive | [
"pravinvaz@gmail.com"
] | pravinvaz@gmail.com |
3e5c4ac18106af76ebc63078c8b44562469ecb48 | 91fbfa52c5eea1f3d0df8fc7c634eedf0fe67c68 | /Python/pythonREST.py | 719a206bcd12191a8748cdc4d2a541fd36f05dbc | [] | no_license | M-Anwar/ARGEL | 57e54e887ffc82f9abe712a33aa2822a4cf47aba | 2d779f5da65d043cd94b46822b619fd11259abdc | refs/heads/master | 2021-01-21T04:31:40.242700 | 2016-07-15T03:14:00 | 2016-07-15T03:14:00 | 50,461,572 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | import requests;
import json;
print("hello world gonna make a REST call\n");
# Replace with the correct URL
url = "http://localhost:3000/api/helloworld"
myResponse = requests.get(url)
print ("Response: {0}".format(myResponse.status_code))
# For successful API call, response code will be 200 (OK)
if(myResponse.ok):
# Loading the response data into a dict variable
# json.loads takes in only binary or string variables so using content to fetch binary content
# Loads (Load String) takes a Json file and converts into python data structure (dict or list, depending on JSON)
jData = json.loads(myResponse.content)
print("The response contains {0} properties:".format(len(jData)))
for key in jData:
print key + " : " + jData[key]
else:
# If response code is not ok (200), print the resulting http error code with description
myResponse.raise_for_status()
# Replace with the correct URL
url = "http://localhost:3000/api/add"
postData = {'arg1':'5', 'arg2':'10'};
myResponse = requests.post(url, data = postData);
print ("Response: {0}".format(myResponse.status_code))
print myResponse.text
| [
"muhammedshabbeer@hotmail.com"
] | muhammedshabbeer@hotmail.com |
2ba209f565ab992e4ee4495511470584b0b781b0 | d19f6d677f1598f2840822d53f7217fbca0bc77c | /additional files/hand_rank.py | a832f1d1b71303c7ba87308d670340cd64d28677 | [] | no_license | molex/Python_Scripts | ec11f800e79ee515ed15d1929d29ddac726bf488 | 134ea6407e744fb5cf9f8b02f16ce612e52ebc19 | refs/heads/master | 2021-01-21T12:10:52.206371 | 2016-04-20T14:05:47 | 2016-04-20T14:05:47 | 5,499,911 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,929 | py | # -----------
# User Instructions
#
# Modify the hand_rank function so that it returns the
# correct output for the remaining hand types, which are:
# full house, flush, straight, three of a kind, two pair,
# pair, and high card hands.
#
# Do this by completing each return statement below.
#
# You may assume the following behavior of each function:
#
# straight(ranks): returns True if the hand is a straight.
# flush(hand): returns True if the hand is a flush.
# kind(n, ranks): returns the first rank that the hand has
# exactly n of. For A hand with 4 sevens
# this function would return 7.
# two_pair(ranks): if there is a two pair, this function
# returns their corresponding ranks as a
# tuple. For example, a hand with 2 twos
# and 2 fours would cause this function
# to return (4, 2).
# card_ranks(hand) returns an ORDERED tuple of the ranks
# in a hand (where the order goes from
# highest to lowest rank).
#
# Since we are assuming that some functions are already
# written, this code will not RUN. Clicking SUBMIT will
# tell you if you are correct.
def poker(hands):
"Return the best hand: poker([hand,...]) => hand"
return max(hands, key=hand_rank)
def hand_rank(hand):
ranks = card_ranks(hand)
if straight(ranks) and flush(hand): # straight flush
return (8, max(ranks))
elif kind(4, ranks): # 4 of a kind
return (7, kind(4, ranks), kind(1, ranks))
elif kind(3, ranks) and kind(2, ranks): # full house
return (6,kind(3,ranks),kind(2,ranks))
elif flush(hand): # flush
return (5,card_ranks(hand))
elif straight(ranks): # straight
return (4,max(card_ranks(hand)))
elif kind(3, ranks): # 3 of a kind
return (3,kind(3,ranks),card_ranks(hand))
elif two_pair(ranks): # 2 pair
return (2, two_pair(ranks), card_ranks(hand))
elif kind(2, ranks): # kind
return (1,kind(2, ranks),card_ranks(hand))
else: # high card
return (0,card_ranks(hand))
def test():
"Test cases for the functions in poker program"
sf = "6C 7C 8C 9C TC".split() # Straight Flush
fk = "9D 9H 9S 9C 7D".split() # Four of a Kind
fh = "TD TC TH 7C 7D".split() # Full House
assert poker([sf, fk, fh]) == sf
assert poker([fk, fh]) == fk
assert poker([fh, fh]) == fh
assert poker([sf]) == sf
assert poker([sf] + 99*[fh]) == sf
assert hand_rank(sf) == (8, 10)
assert hand_rank(fk) == (7, 9, 7)
assert hand_rank(fh) == (6, 10, 7)
return 'tests pass'
# -----------
# User Instructions
#
# Modify the test() function to include three new test cases.
# These should assert that hand_rank gives the appropriate
# output for the given straight flush, four of a kind, and
# full house.
#
# For example, calling hand_rank on sf should output (8, 10)
#
# Since the program is still incomplete, clicking RUN won't do
# anything, but clicking SUBMIT will let you know if you
# have gotten the problem right.
def poker(hands):
"Return the best hand: poker([hand,...]) => hand"
return max(hands, key=hand_rank)
def test():
"Test cases for the functions in poker program"
sf = "6C 7C 8C 9C TC".split() # Straight Flush
fk = "9D 9H 9S 9C 7D".split() # Four of a Kind
fh = "TD TC TH 7C 7D".split() # Full House
assert poker([sf, fk, fh]) == sf
assert poker([fk, fh]) == fk
assert poker([fh, fh]) == fh
assert poker([sf]) == sf
assert poker([sf] + 99*[fh]) == sf
assert hand_rank(sf) == (8,10)
assert hand_rank(fk) == (7,9,7)
assert hand_rank(fh) == (6,10,7)
print test()
| [
"molex333@gmail.com"
] | molex333@gmail.com |
e1606654ea93653cb2dce8ceff18357e12273bfa | 5112b951c8bf666a16c00f238a469a015453598a | /src/models/blog.py | 90ed237ebe726bda13addbb2205ae39e99402edc | [] | no_license | jushita/web-blog | c99172e5b3a4b05554565b84056cee8997deae69 | b0ff36036c66e145922be1ae8d546622391a4208 | refs/heads/master | 2021-01-01T20:21:08.127012 | 2017-07-30T20:41:41 | 2017-07-30T20:41:41 | 95,841,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | import uuid
import datetime
from src.models.post import Post
from src.common.database import Database
__author__ = 'jrahman'
class Blog(object):
def __init__(self, author, title, description, author_id, _id=None):
self.author = author
self.author_id = author_id
self.title = title
self.description = description
self._id = uuid.uuid4().hex if _id is None else _id
def new_post(self, title, content, date=datetime.datetime.utcnow()):
post = Post(blog_id=self._id,
title=title,
content=content,
author=self.author,
created_date=date)
post.save_to_mongo()
def get_posts(self):
return Post.from_blog(self._id)
def save_to_mongo(self):
Database.insert(collection='blogs',
data=self.json())
def json(self):
return {
'author': self.author,
'author_id': self.author_id,
'title': self.title,
'description': self.description,
'_id': self._id
}
@classmethod
def from_mongo(cls, id):
blog_data = Database.find_one(collection='blogs',
query={'_id': id})
return cls(**blog_data)
@classmethod
def find_by_author_id(cls, author_id):
blogs = Database.find(collection='blogs',
query={'author_id': author_id})
return [cls(**blog) for blog in blogs]
| [
"jushitaa@gmail.com"
] | jushitaa@gmail.com |
d65909b61cd0a46b411ee9e6d5f181c7f00dbd42 | 454c0564acc5d6b194603985a5dcb792651661dc | /manualDrive/__init__.py | 798ed7c79ce972c1abc398024d90e358beb9414c | [] | no_license | rsoome/Digi6RX2017 | 64eed9be3f2202e9d5bf00e96af232a1b3104563 | 26bcb2e6169c90b71cfa23f29e27f4c51c0936e1 | refs/heads/master | 2021-08-24T02:27:18.708506 | 2017-11-26T22:13:10 | 2017-11-26T22:13:10 | 104,649,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | import manualDrive.ManualDrive | [
"rsoome16@gmail.com"
] | rsoome16@gmail.com |
921548cdfb11ada7eb5d4be07398294bf09ce197 | b9963ffb80aad7e057bc375edb85ac7ed5a837d0 | /adventofcode2017/03b.py | 44f43305774184f644e62bce54dfc526c453e223 | [
"MIT"
] | permissive | matslindh/codingchallenges | a2db9f4579e9f35189f5cdf74590863cf84bdf95 | a846e522f7a31e988c470cda87955ee3ef20a274 | refs/heads/main | 2022-12-23T15:56:19.776354 | 2022-12-15T21:03:37 | 2022-12-15T21:03:37 | 76,491,177 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | from itertools import repeat
from math import floor
map = []
s_y = s_x = 1001
for y in range(0, s_y):
map.append(list(repeat(0, s_x)))
x = y = floor(s_x/2)
map[y][x] = 1
x += 1
dir = 'R'
written = 0
while written <= 289326:
if dir == 'R':
if not map[y-1][x]:
dir = 'U'
else:
x += 1
elif dir == 'U':
if not map[y][x-1]:
dir = 'L'
else:
y -= 1
elif dir == 'L':
if not map[y+1][x]:
dir = 'D'
else:
x -= 1
elif dir == 'D':
if not map[y][x+1]:
dir = 'R'
else:
y += 1
written = map[y-1][x-1] + map[y-1][x] + map[y-1][x+1] + \
map[y][x-1] + map[y][x+1] + \
map[y+1][x-1] + map[y+1][x] + map[y+1][x+1]
print(dir, x, y, written)
map[y][x] = written
| [
"mats@lindh.no"
] | mats@lindh.no |
0a77cc0e157849e364d05eba2e50154cbdd20923 | 6db36a7bc7a45d8a5dfd53d3660d45ac475d5c03 | /mysite/main/migrations/0010_auto_20190612_1728.py | f9b7c212467546b24b3722a9fde6226caab4a0e7 | [] | no_license | matimontes/Grupo47 | f63c52a3533d8f5ad35f4ae2e2cbcd0dea32eb4e | 1ca077a2563aec8d5052565e8aa854ee15797758 | refs/heads/master | 2020-05-04T17:11:58.792406 | 2019-07-18T02:44:18 | 2019-07-18T02:44:18 | 179,302,112 | 2 | 1 | null | 2019-06-16T19:58:43 | 2019-04-03T14:00:37 | Python | UTF-8 | Python | false | false | 361 | py | # Generated by Django 2.2.1 on 2019-06-12 20:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20190610_1706'),
]
operations = [
migrations.AlterModelOptions(
name='puja',
options={'ordering': ['subasta', '-dinero_pujado']},
),
]
| [
"mati.montes@hotmail.com"
] | mati.montes@hotmail.com |
1bd3328dc8166ab5d74439832d739adbdd69d664 | 206123d13078ae1d08aa20f98b76349210165c17 | /trees/binary_tree/main.py | 6613b96cc99d8342504d3b5b07d749f089d94455 | [] | no_license | Avinashgurugubelli/python_data_structures | b29e13bafd3190abe7c93102705d01f41a8d411f | 7141d237112e13fc90dc81702263d121779036d1 | refs/heads/master | 2022-12-18T23:49:56.137001 | 2019-08-01T19:04:07 | 2019-08-01T19:04:07 | 157,427,278 | 0 | 0 | null | 2022-06-21T21:37:53 | 2018-11-13T18:31:48 | Python | UTF-8 | Python | false | false | 629 | py | # Below os and sys imports required to match the custom imports
import os, sys
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(CURRENT_DIR))
from binary_tree_node import BinaryTreeNode
from binary_tree import BinaryTree
from .utils.binary_tree_traversal_types import BinaryTreeTraversalMethodType, BinaryTreeTraversalType
if __name__ == "__main__":
node1 = BinaryTreeNode(1)
node1.left = BinaryTreeNode(2)
node1.right = BinaryTreeNode(3)
binaryTree = BinaryTree()
binaryTree.traverse(BinaryTreeTraversalType.PRE_ORDER, BinaryTreeTraversalMethodType.ITERATIVE, node1) | [
"avinashgurugubelli@gmail.com"
] | avinashgurugubelli@gmail.com |
5a04ed0ab197d53e561347947e8dc56c871128b9 | 7365ae430024c039e3079e9cc0cc2fcb6079ee22 | /zshprompt2.py | d5c18461a73ad013cee929509fa085472b5ceab6 | [] | no_license | jedamus/zsh-config | fef7757b9302ae45920948f4232829aea89ef61c | 0c6eda9a604095ea14493835bca0ad7dd5919114 | refs/heads/master | 2023-01-21T11:03:08.620219 | 2023-01-09T08:22:44 | 2023-01-09T08:22:44 | 42,779,306 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,418 | py | #!/usr/bin/env python2
# coding=utf-8
# erzeugt Mittwoch, 11. März 2015 21:01 2015 von Leander Jedamus
# modifiziert Samstag, 13. August 2022 08:49 von Leander Jedamus
# modifiziert Montag, 02. Mai 2022 20:27 von Leander Jedamus
# modifiziert Montag, 07. Mai 2018 22:24 von Leander Jedamus
# modifiziert Montag, 21. September 2015 17:01 von Leander Jedamus
# modifiziert Samstag, 19. September 2015 18:36 von Leander Jedamus
# modifiziert Mittwoch, 11. März 2015 21:03 von Leander Jedamus
"""Print out zsh prompts.
Based on: https://gist.github.com/seanh/5233082
Customized
"""
import os
import os.path
import subprocess
import socket
def get_username():
import pwd
return pwd.getpwuid(os.getuid())[0]
def get_machname():
name = socket.gethostname()
if name.find('.') >= 0:
name = socket.gethostbyaddr(socket.gethostname())[0]
name = name.split(".")[0]
return name
def _zero_width(s):
'''Return the given string, wrapped in zsh zero-width codes.
This tells zsh that the string is a zero-width string, eg. for prompt
alignment and cursor positioning purposes. For example, ANSI escape
sequences should be marked as zero-width.
'''
return "%{{{s}%}}".format(s=s)
def _foreground(s, color):
colors = {
'black': '\x1b[30m',
'red': '\x1b[31m',
'green': '\x1b[32m',
'yellow': '\x1b[33m',
'blue': '\x1b[34m',
'magenta': '\x1b[35m',
'cyan': '\x1b[1;34;40m',
'white': '\x1b[37m',
'gray': '\x1b[1;30m'
}
return "{color}{s}".format(color=_zero_width(colors[color]), s=s)
def _background(s, color):
colors = {
'red': '\x1b[41m',
'black': '\x1b[40m',
'green': '\x1b[42m',
'yellow': '\x1b[43m',
'blue': '\x1b[44m',
'magenta': '\x1b[45m',
'cyan': '\x1b[46m',
'white': '\x1b[47m',
}
return "{color}{s}".format(color=_zero_width(colors[color]), s=s)
def _bold(s):
return "{bold}{s}".format(bold=_zero_width("\x1b[1m"), s=s)
def _underline(s):
return "{underline}{s}".format(underline=_zero_width("\x1b[4m"), s=s)
def _reverse(s):
return "{reverse}{s}".format(reverse=_zero_width("\x1b[7m"), s=s)
def _reset(s):
return "{s}{reset}".format(s=s, reset=_zero_width("\x1b[0m"))
def color(s, foreground=None, background=None, bold=False, underline=False,
reverse=False):
'''Return the given string, wrapped in the given colour.
Foreground and background can be one of:
black, red, green, yellow, blue, magenta, cyan, white.
Also resets the colour and other attributes at the end of the string.
'''
if not s:
return s
if foreground:
s = _foreground(s, foreground)
if background:
s = _background(s, background)
if bold:
s = _bold(s)
if underline:
s = _underline(s)
if reverse:
s = _reverse(s)
s = _reset(s)
return s
def shorten_path(path, max_length=20):
'''Return the given path, shortened if it's too long.
Parent directories will be collapsed, fish-style. Examples:
/home/seanh -> ~
/home/seanh/Projects/ckan/ckan/ckan -> ~/P/c/c/ckan
/home/seanh/Projects/ckan/ckan-> ~/Projects/ckan/ckan
'''
# Replace the user's homedir in path with ~
homedir = os.path.expanduser('~')
if path.startswith(homedir):
path = '~' + path[len(homedir):]
parts = path.split(os.sep)
# Remove empty strings.
parts = [part for part in parts if part]
path = os.sep.join(parts)
# Starting from the root dir, truncate each dir to just its first letter
# until the full path is < max_length or all the dirs have already been
# truncated. Never truncate the last dir.
while len(path) > max_length:
for i in range(0, len(parts) - 1):
part = parts[i]
if len(part) > 1:
part = part[0]
parts[i] = part
path = os.sep.join(parts)
continue
break
return path
def current_working_dir():
'''Return the full absolute path to the current working directory.'''
# Code for getting the current working directory, copied from
# <https://github.com/Lokaltog/powerline/>.
try:
try:
cwd = os.getcwdu()
except AttributeError:
cwd = os.getcwd()
except OSError as e:
if e.errno == 2:
# User most probably deleted the directory, this happens when
# removing files from Mercurial repos for example.
cwd = "[not found]"
else:
raise
return cwd
def _is_root():
return 'SUDO_UID' in os.environ.keys() or os.getuid() == 0
def virtualenv():
path = os.environ.get('VIRTUAL_ENV', '')
if path:
path = "{}{}{}".format(
color("(", foreground="gray"),
color(os.path.basename(path), foreground="green"),
color(")", foreground="gray"))
return path
def git_branch():
# Warning: subprocess.check_output() is new in Python 2.7.
try:
output = subprocess.check_output('git symbolic-ref HEAD'.split(),
stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
# Non-zero return code, assume the current working dir is not in a git
# repo.
return ''
first_line = output.split('\n')[0]
branch_name = first_line.split('/', 2)[-1]
branch = "{}{}{}".format(
color("[", foreground="gray"),
color(branch_name, foreground="red", background="yellow"),
color("]", foreground="gray"))
return branch
def host_name():
return socket.gethostname()
def left_prompt():
'''Return my zsh left prompt.
'''
if _is_root():
root_status = '(^)'
else:
root_status = ''
return "{zsh}{user} {cwd} {root}".format(
zsh=color("z", foreground='yellow', background='blue'),
user=color(get_username() + "@" + get_machname(), foreground='blue', background='yellow'),
cwd=color(shorten_path(current_working_dir()), foreground='white', background='red'),
root=color(root_status, foreground='red')
)
def right_prompt(last_exit_status):
'''Return my zsh right prompt.
'''
if last_exit_status in (None, 0):
last_exit_status = ''
else:
last_exit_status = ':( ' + str(last_exit_status)
parts = [
virtualenv(),
git_branch(),
color(last_exit_status, foreground='red'),
]
# Remove empty strings from parts.
parts = [part for part in parts if part]
prompt = ' '.join(parts).strip()
return prompt
def main():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('side', metavar='left|right',
choices=('left', 'right'),
help="which zsh prompt to print (the left- or right-side prompt)")
parser.add_argument('--last-exit-status', dest='last_exit_status',
type=int,
help='the exit status (int) of the previous shell command (default: None)')
args = parser.parse_args()
if args.side == 'left':
print left_prompt()
else:
assert args.side == 'right'
print right_prompt(args.last_exit_status)
if __name__ == '__main__':
main()
# vim:ai sw=2 sts=4 expandtab
| [
"ljedamus@web.de"
] | ljedamus@web.de |
5d1f9fac5630919623b5c3ad9f7d43e77f63a1a3 | 275f85955acabac247fe306b0161a6d758f4d057 | /ArielZurita/tests/test.py | 52e515731976567f27fef1d1405f7c5ada8f8990 | [] | no_license | mauricioZelaya/QETraining_BDT_python | 295bb58a99a36b0b973afd153109c510191b4ec7 | d7cc798e7063ab32e5002e4deda3ddec8a8a0c59 | refs/heads/master | 2021-05-08T05:01:13.181273 | 2017-11-24T21:53:46 | 2017-11-24T21:53:46 | 108,473,352 | 0 | 0 | null | 2017-11-24T21:53:47 | 2017-10-26T22:43:32 | Python | UTF-8 | Python | false | false | 954 | py | def listMonths():
monthEntered = str(input("Enter a month \n"))
months = ["january", "february", "march", "april", "may", "june", "july", "august", "september", "octuber", "november", "december"]
days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if monthEntered in months:
index = months.index(monthEntered)
days = days[index]
print("Month %s has %d days" %(monthEntered, days))
else: print("Invalid value entered")
#listMonths()
def findURL(text):
startUrl = text.find("http://")
if startUrl != -1:
newUrlString = text[startUrl:]
urlEnd = newUrlString.find(" ")
if urlEnd != -1:
urlEndSize = startUrl + urlEnd
url = text[startUrl:urlEndSize]
print(url)
else: print(text[startUrl:])
stringWithURL = "this is a test with url http://www.google.com"
#findURL(stringWithURL)
test = "this is a text"
i = test.count("i")
print(i) | [
"Ariel Zurita@jalasoft.local"
] | Ariel Zurita@jalasoft.local |
f829374ecf93d80a724d38e00dff9ecc2cb9c16b | f68065baf489013c926dcfea9994878716d19586 | /accounts/views.py | 323deb2d9a062d75f066d39db1854285279ddd21 | [] | no_license | groyce/pots | 06667fdc686b74a897c42879cbed5803e9efb154 | ac839943c84c3135cb4596a8f734e4a061086e10 | refs/heads/master | 2020-04-10T01:42:55.863071 | 2018-12-06T19:47:18 | 2018-12-06T19:47:18 | 160,723,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm
from django.contrib.auth.decorators import login_required
from .models import Profile
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(request,
username=cd['username'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated '\
'successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('Invalid login')
else:
form = LoginForm()
return render(request, 'accounts/login.html', {'form': form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(instance=request.user.profile)
return render(request,
'accounts/edit.html',
{'user_form': user_form,
'profile_form': profile_form})
@login_required
def dashboard(request):
return render(request,
'accounts/dashboard.html',
{'section': 'dashboard'})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data['password'])
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
return render(request,
'accounts/register_done.html',
{'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request,
'accounts/register.html',
{'user_form': user_form})
| [
"groyce@unomaha.edu"
] | groyce@unomaha.edu |
140bcc017ac11e31a04350b4432b9f9da84b34d4 | a0b048e4f5c3bc8db7bf3ce2c39ae5387d74d99f | /nnet/nn_models/Parser_biaffine.py | d11f1b78f32df5e7e56e2661c6e0f321a25a1f59 | [] | no_license | AndreiC9/SRL_DEP | 49ce8d985b9290fb23a264b3b46b354be3138021 | 231a2533bb84e24d7eb0681b3d1190809faafeb8 | refs/heads/master | 2021-03-04T17:05:42.179139 | 2020-04-13T13:45:03 | 2020-04-13T13:45:03 | 246,051,575 | 0 | 0 | null | 2020-03-09T14:03:51 | 2020-03-09T14:03:50 | null | UTF-8 | Python | false | false | 12,986 | py | from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
from nnet.util import *
import nnet.decoder as decoder
import numpy as np
import torch
import math
import torch.nn as nn
import torch.autograd
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn
import torch.nn.init as init
from numpy import random as nr
from operator import itemgetter
_BIG_NUMBER = 10. ** 6.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
get_data = (lambda x: x.data.cpu()) if True else (lambda x: x.data)
def cat(l, dimension=-1):
valid_l = l
if dimension < 0:
dimension += len(valid_l[0].size())
return torch.cat(valid_l, dimension)
class BiLSTMTagger(nn.Module):
#def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
def __init__(self, hps, *_):
super(BiLSTMTagger, self).__init__()
batch_size = hps['batch_size']
lstm_hidden_dim = hps['sent_hdim']
sent_embedding_dim_DEP = 1*hps['sent_edim'] + 1*hps['pos_edim']
sent_embedding_dim_SRL = 3 * hps['sent_edim'] + 1 * hps['pos_edim'] + 16
## for the region mark
role_embedding_dim = hps['role_edim']
frame_embedding_dim = role_embedding_dim
vocab_size = hps['vword']
self.tagset_size = hps['vbio']
self.pos_size = hps['vpos']
self.dep_size = hps['vdep']
self.frameset_size = hps['vframe']
self.num_layers = hps['rec_layers']
self.batch_size = batch_size
self.hidden_dim = lstm_hidden_dim
self.word_emb_dim = hps['sent_edim']
self.specific_dep_size = hps['svdep']
self.word_embeddings_SRL = nn.Embedding(vocab_size, hps['sent_edim'])
self.word_embeddings_DEP = nn.Embedding(vocab_size, hps['sent_edim'])
self.pos_embeddings = nn.Embedding(self.pos_size, hps['pos_edim'])
self.pos_embeddings_DEP = nn.Embedding(self.pos_size, hps['pos_edim'])
self.p_lemma_embeddings = nn.Embedding(self.frameset_size, hps['sent_edim'])
self.dep_embeddings = nn.Embedding(self.dep_size, self.pos_size)
self.region_embeddings = nn.Embedding(2, 16)
#self.lr_dep_embeddings = nn.Embedding(self.lr_dep_size, hps[])
self.word_fixed_embeddings = nn.Embedding(vocab_size, hps['sent_edim'])
self.word_fixed_embeddings.weight.data.copy_(torch.from_numpy(hps['word_embeddings']))
self.word_fixed_embeddings_DEP = nn.Embedding(vocab_size, hps['sent_edim'])
self.word_fixed_embeddings_DEP.weight.data.copy_(torch.from_numpy(hps['word_embeddings']))
self.role_embeddings = nn.Embedding(self.tagset_size, role_embedding_dim)
self.frame_embeddings = nn.Embedding(self.frameset_size, frame_embedding_dim)
self.VR_word_embedding = nn.Parameter(torch.from_numpy(np.ones((1, self.word_emb_dim), dtype='float32')))
self.VR_POS_embedding = nn.Parameter(
torch.from_numpy(np.ones((1, 16), dtype='float32')))
self.hidden2tag = nn.Linear(4*lstm_hidden_dim, 2*lstm_hidden_dim)
self.MLP = nn.Linear(2*lstm_hidden_dim, self.dep_size)
self.tag2hidden = nn.Linear(self.dep_size, self.pos_size)
self.hidden2tag_spe = nn.Linear(2 * lstm_hidden_dim, 2 * lstm_hidden_dim)
self.MLP_spe = nn.Linear(2 * lstm_hidden_dim, 4)
self.tag2hidden_spe = nn.Linear(4, self.pos_size)
#self.elmo_embeddings_0 = nn.Embedding(vocab_size, 1024)
#self.elmo_embeddings_0.weight.data.copy_(torch.from_numpy(hps['elmo_embeddings_0']))
#self.elmo_embeddings_1 = nn.Embedding(vocab_size, 1024)
#self.elmo_embeddings_1.weight.data.copy_(torch.from_numpy(hps['elmo_embeddings_1']))
self.elmo_emb_size = 200
self.elmo_mlp_word = nn.Sequential(nn.Linear(1024, self.elmo_emb_size), nn.ReLU())
self.elmo_word = nn.Parameter(torch.Tensor([0.5, 0.5]))
self.elmo_gamma_word = nn.Parameter(torch.ones(1))
self.elmo_mlp = nn.Sequential(nn.Linear(2 * lstm_hidden_dim, self.elmo_emb_size), nn.ReLU())
self.elmo_w = nn.Parameter(torch.Tensor([0.5, 0.5]))
self.elmo_gamma = nn.Parameter(torch.ones(1))
self.SRL_input_dropout = nn.Dropout(p=0.3)
self.DEP_input_dropout = nn.Dropout(p=0.3)
self.hidden_state_dropout = nn.Dropout(p=0.3)
self.label_dropout = nn.Dropout(p=0.5)
self.link_dropout = nn.Dropout(p=0.5)
#self.use_dropout = nn.Dropout(p=0.2)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.num_layers = 1
self.BiLSTM_0 = nn.LSTM(input_size=sent_embedding_dim_DEP , hidden_size=lstm_hidden_dim, batch_first=True,
bidirectional=True, num_layers=self.num_layers)
init.orthogonal_(self.BiLSTM_0.all_weights[0][0])
init.orthogonal_(self.BiLSTM_0.all_weights[0][1])
init.orthogonal_(self.BiLSTM_0.all_weights[1][0])
init.orthogonal_(self.BiLSTM_0.all_weights[1][1])
self.num_layers = 1
self.BiLSTM_1 = nn.LSTM(input_size=lstm_hidden_dim * 2, hidden_size=lstm_hidden_dim, batch_first=True,
bidirectional=True, num_layers=self.num_layers)
init.orthogonal_(self.BiLSTM_1.all_weights[0][0])
init.orthogonal_(self.BiLSTM_1.all_weights[0][1])
init.orthogonal_(self.BiLSTM_1.all_weights[1][0])
init.orthogonal_(self.BiLSTM_1.all_weights[1][1])
self.num_layers = 4
self.BiLSTM_SRL = nn.LSTM(input_size=sent_embedding_dim_SRL + self.elmo_emb_size * 1 , hidden_size=lstm_hidden_dim, batch_first=True,
bidirectional=True, num_layers=self.num_layers)
init.orthogonal_(self.BiLSTM_SRL.all_weights[0][0])
init.orthogonal_(self.BiLSTM_SRL.all_weights[0][1])
init.orthogonal_(self.BiLSTM_SRL.all_weights[1][0])
init.orthogonal_(self.BiLSTM_SRL.all_weights[1][1])
# non-linear map to role embedding
self.role_map = nn.Linear(in_features=role_embedding_dim * 2, out_features=self.hidden_dim * 4)
# Init hidden state
self.hidden = self.init_hidden_spe()
self.hidden_2 = self.init_hidden_spe()
self.hidden_3 = self.init_hidden_spe()
self.hidden_4 = self.init_hidden_share()
self.ldims = lstm_hidden_dim
self.hidLayerFOH = nn.Linear(self.ldims * 2, self.ldims)
self.hidLayerFOM = nn.Linear(self.ldims * 2, self.ldims)
self.W_R = nn.Parameter(torch.rand(lstm_hidden_dim+1, 1+lstm_hidden_dim))
def init_hidden_share(self):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
#return (Variable(torch.zeros(1, self.batch_size, self.hidden_dim)),
# Variable(torch.zeros(1, self.batch_size, self.hidden_dim)))
return (torch.zeros(4 * 2, self.batch_size, self.hidden_dim, requires_grad=False).to(device),
torch.zeros(4 * 2, self.batch_size, self.hidden_dim, requires_grad=False).to(device))
def init_hidden_spe(self):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
#return (Variable(torch.zeros(1, self.batch_size, self.hidden_dim)),
# Variable(torch.zeros(1, self.batch_size, self.hidden_dim)))
return (torch.zeros(1 * 2, self.batch_size, self.hidden_dim, requires_grad=False).to(device),
torch.zeros(1 * 2, self.batch_size, self.hidden_dim, requires_grad=False).to(device))
def forward(self, sentence, p_sentence, pos_tags, lengths, target_idx_in, region_marks,
local_roles_voc, frames, local_roles_mask,
sent_pred_lemmas_idx, dep_tags, dep_heads, targets, specific_dep_tags, specific_dep_relations, test=False):
"""
elmo_embedding_0 = self.elmo_embeddings_0(sentence).view(self.batch_size, len(sentence[0]), 1024)
elmo_embedding_1 = self.elmo_embeddings_1(sentence).view(self.batch_size, len(sentence[0]), 1024)
w = F.softmax(self.elmo_word, dim=0)
elmo_emb = self.elmo_gamma_word * (w[0] * elmo_embedding_0 + w[1] * elmo_embedding_1)
elmo_emb_word = self.elmo_mlp_word(elmo_emb)
"""
#contruct input for DEP
#torch.tensor(np.zeros((self.batch_size, 1)).astype('int64'), requires_grad=True).to(device)
#sentence_cat = torch.cat((sentence[:, 0:1], sentence), 1)
#log(sentence_cat.requires_grad)
#log(sentence.requires_grad)
embeds_DEP = self.word_embeddings_DEP(sentence)
add_zero = torch.zeros((self.batch_size, 1, self.word_emb_dim)).to(device)
embeds_DEP = embeds_DEP.view(self.batch_size, len(sentence[0]), self.word_emb_dim)
embeds_DEP = torch.cat((self.VR_word_embedding+add_zero, embeds_DEP), 1)
pos_embeds = self.pos_embeddings(pos_tags)
add_zero = torch.zeros((self.batch_size, 1, 16)).to(device)
pos_embeds = torch.cat((self.VR_POS_embedding+add_zero, pos_embeds), 1)
embeds_forDEP = torch.cat((embeds_DEP, pos_embeds), 2)
#embeds_forDEP = self.DEP_input_dropout(embeds_forDEP)
#first layer
embeds_sort, lengths_sort, unsort_idx = self.sort_batch(embeds_forDEP, lengths+1)
embeds_sort = rnn.pack_padded_sequence(embeds_sort, lengths_sort, batch_first=True)
# hidden states [time_steps * batch_size * hidden_units]
hidden_states, self.hidden = self.BiLSTM_0(embeds_sort, self.hidden)
# it seems that hidden states is already batch first, we don't need swap the dims
# hidden_states = hidden_states.permute(1, 2, 0).contiguous().view(self.batch_size, -1, )
hidden_states, lens = rnn.pad_packed_sequence(hidden_states, batch_first=True)
# hidden_states = hidden_states.transpose(0, 1)
hidden_states_0 = hidden_states[unsort_idx]
# second_layer
embeds_sort, lengths_sort, unsort_idx = self.sort_batch(hidden_states_0, lengths+1)
embeds_sort = rnn.pack_padded_sequence(embeds_sort, lengths_sort, batch_first=True)
# hidden states [time_steps * batch_size * hidden_units]
hidden_states, self.hidden_2 = self.BiLSTM_1(embeds_sort, self.hidden_2)
# it seems that hidden states is already batch first, we don't need swap the dims
# hidden_states = hidden_states.permute(1, 2, 0).contiguous().view(self.batch_size, -1, )
hidden_states, lens = rnn.pad_packed_sequence(hidden_states, batch_first=True)
#hidden_states = hidden_states.transpose(0, 1)
hidden_states_1 = hidden_states[unsort_idx]
##########################################
Head_hidden = F.relu(self.hidLayerFOH(hidden_states_1))
Dependent_hidden = F.relu(self.hidLayerFOM(hidden_states_1))
bias_one = torch.ones((self.batch_size, len(sentence[0])+1, 1)).to(device)
Head_hidden = torch.cat((Head_hidden, Variable(bias_one)), 2)
bias_one = torch.ones((self.batch_size, len(sentence[0]) + 1, 1)).to(device)
Dependent_hidden = torch.cat((Dependent_hidden, Variable(bias_one)), 2)
left_part = torch.mm(Dependent_hidden.view(self.batch_size * (len(sentence[0])+1), -1), self.W_R)
left_part = left_part.view(self.batch_size, (len(sentence[0])+1), -1)
Head_hidden = Head_hidden.view(self.batch_size, (len(sentence[0])+1), -1).transpose(1,2)
tag_space = torch.bmm(left_part, Head_hidden).view(
(len(sentence[0])+1) * self.batch_size, len(sentence[0])+1)
heads = np.argmax(tag_space.cpu().data.numpy(), axis=1)
nums = 0.0
wrong_nums = 0.0
log(heads)
log(dep_heads.flatten())
for a, b in zip(heads, dep_heads.flatten()):
if b == -1:
continue
nums+=1
if a != b:
wrong_nums+=1
loss_function = nn.CrossEntropyLoss(ignore_index=-1)
DEPloss = loss_function(tag_space, torch.from_numpy(dep_heads).to(device).view(-1))
log("loss : ", DEPloss)
log("dep error rate:", wrong_nums/nums)
return DEPloss, DEPloss, DEPloss, DEPloss, 0, 1, 1, 1, 1, \
1, 1, 1,\
1, 1, 1
@staticmethod
def sort_batch(x, l):
l = torch.from_numpy(np.asarray(l))
l_sorted, sidx = l.sort(0, descending=True)
x_sorted = x[sidx]
_, unsort_idx = sidx.sort()
return x_sorted, l_sorted, unsort_idx | [
"Rui.Cai@ed.ac.uk"
] | Rui.Cai@ed.ac.uk |
c02f87454674133188c46ec524fb31ff09fa867f | 245ab7db202f911acdf3c8091c204c485f6745ae | /parse_players.py | e1bc63030cd746589a989c8c8a271ec0483acdd3 | [
"MIT"
] | permissive | AVirolainen/football_guess | 2c4fd54b41fe6be748940a3729bf72b890bfac68 | 86c8c5ef2b428a9bde4d98e00df70ed08b1f89fd | refs/heads/main | 2023-04-08T07:07:04.657425 | 2021-04-08T22:11:27 | 2021-04-08T22:11:27 | 356,186,003 | 0 | 0 | MIT | 2021-04-09T07:56:27 | 2021-04-09T07:56:26 | null | UTF-8 | Python | false | false | 340 | py | from bs4 import BeautifulSoup
url = 'example.html'
players = open('players.txt', 'a')
soup = BeautifulSoup(open(url), 'html.parser')
table_wiki = soup.find('table', {'id': 'playerTopList'})
for td in table_wiki.find_all('td', {'class': 'name'}):
players.write("_".join(td.text.replace('\n', '').split(' ')) + '\n')
players.close()
| [
"jackshendrikov@gmail.com"
] | jackshendrikov@gmail.com |
48439c3e69fc5c2270951aa9ecbeed3a36dac1ef | e66e30711887a8bad38deeaa0da2558b333deb1c | /ship.py | 06d8fa89d7afa4fa043532042d692fea175fcaa5 | [] | no_license | Duanhs/Alien-Invasion | 9012de22fec46975f0ccefd734598b7572f327b5 | 8f44669c3e82e6e8ad4457ee7d8277c75f1d80f3 | refs/heads/master | 2020-04-10T12:46:57.514041 | 2018-12-09T11:29:37 | 2018-12-09T11:29:37 | 161,032,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | import pygame
class Ship():
def __init__(self,screen):
self.screen = screen
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
#将飞船的位置设置到屏幕底部中央
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
def blitme(self):
"""绘制飞船"""
self.screen.blit(self.image,self.rect) | [
"duanhongsi@meituan.com"
] | duanhongsi@meituan.com |
95e084e2db2796dd5bfa76335cfa156cdae7f351 | 71cea814dcf5ceb6d2b3a86f33273e532f5a9b77 | /simpa_tests/automatic_tests/TestPathManager.py | 554e50fb7053f0a7d56d466388f64b2150d14285 | [
"MIT"
] | permissive | mschllnbrg/simpa | 7744bba073f9c01b7a60f076a49fe80b5e6d12fa | 2ca2b81039a7252da7c618d8ba69527f2d6f240f | refs/heads/master | 2023-06-19T18:59:02.301717 | 2021-06-24T12:47:55 | 2021-06-24T12:47:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,862 | py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
import unittest
import os, inspect
from simpa.utils import PathManager
from pathlib import Path
from dotenv import unset_key
class TestLogging(unittest.TestCase):
def setUp(self):
self.path = '/path_config.env'
self.save_path = "/workplace/data/"
self.mcx_path = "/workplace/mcx.exe"
self.matlab_path = "/workplace/matlab.exe"
self.file_content = (f"# Example path_config file. Please define all required paths for your simulation here.\n"
f"# Afterwards, either copy this file to your current working directory, to your home directory,\n"
f"# or to the SIMPA base directry.\n"
f"SAVE_PATH={self.save_path}\n"
f"MCX_BINARY_PATH={self.mcx_path}\n"
f"MATLAB_BINARY_PATH={self.matlab_path}")
self.home_file = str(Path.home()) + self.path
self.home_file_exists = os.path.exists(self.home_file)
self.cwd_file = os.getcwd() + "/" + self.path
self.cwd_file_exists = os.path.exists(self.cwd_file)
self.current_file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
self.simpa_home = self.current_file_path + "/../../" + self.path
self.simpa_home_exists = os.path.exists(self.simpa_home)
@unittest.expectedFailure
def test_instantiate_path_manager_with_wrong_path(self):
PathManager("rubbish/path/does/not/exist")
def test_instantiate_when_file_is_in_home(self):
if not self.home_file_exists:
self.write_config_file(self.home_file)
path_manager = PathManager()
self.check_path_manager_correctly_loaded(path_manager)
if not self.home_file_exists:
self.delete_config_file(self.home_file)
@unittest.expectedFailure
def test_fail_if_no_default_directories_set(self):
if self.home_file_exists:
self.hide_config_file(self.home_file)
if self.cwd_file_exists:
self.hide_config_file(self.cwd_file)
if self.simpa_home_exists:
self.hide_config_file(self.simpa_home)
try:
PathManager()
finally:
if self.home_file_exists:
self.restore_config_file(self.home_file)
if self.cwd_file_exists:
self.restore_config_file(self.cwd_file)
if self.simpa_home_exists:
self.restore_config_file(self.simpa_home)
def test_instantiate_when_file_is_in_cwd(self):
if self.home_file_exists:
self.hide_config_file(self.home_file)
if self.simpa_home_exists:
self.hide_config_file(self.simpa_home)
if not self.cwd_file_exists:
self.write_config_file(self.cwd_file)
path_manager = PathManager()
self.check_path_manager_correctly_loaded(path_manager)
if self.home_file_exists:
self.restore_config_file(self.home_file)
if self.simpa_home_exists:
self.restore_config_file(self.simpa_home)
if not self.cwd_file_exists:
self.delete_config_file(self.cwd_file)
def test_instantiate_when_file_is_in_simpa_home(self):
if self.home_file_exists:
self.hide_config_file(self.home_file)
if self.cwd_file_exists:
self.hide_config_file(self.cwd_file)
if not self.simpa_home_exists:
self.write_config_file(self.simpa_home)
path_manager = PathManager()
self.check_path_manager_correctly_loaded(path_manager)
if self.home_file_exists:
self.restore_config_file(self.home_file)
if self.cwd_file_exists:
self.restore_config_file(self.cwd_file)
if not self.simpa_home_exists:
self.delete_config_file(self.simpa_home)
def check_path_manager_correctly_loaded(self, path_manager: PathManager):
self.assertEqual(path_manager.get_hdf5_file_save_path(), self.save_path)
self.assertEqual(path_manager.get_mcx_binary_path(), self.mcx_path)
self.assertEqual(path_manager.get_matlab_binary_path(), self.matlab_path)
def write_config_file(self, path):
with open(path, "w") as write_path:
write_path.writelines(self.file_content)
def delete_config_file(self, path):
os.remove(path)
def hide_config_file(self, path: str):
os.rename(path, path.replace("path_config.env", "path_config.env.backup"))
def restore_config_file(self, path: str):
os.rename(path.replace("path_config.env", "path_config.env.backup"), path)
| [
"janek.grohl@cruk.cam.ac.uk"
] | janek.grohl@cruk.cam.ac.uk |
1055aa3e500f07c7be0eca6e0c57b8bee6333395 | 68329275022f3a392161b7746caf899e2516d547 | /10. HAFTA ODEVI AMIRAL BATTI.py | b6ac42ae3962c3e2410678e7b78ee2d2c2d19535 | [] | no_license | Osmandursunn/10.Hafta-Odev | 32e2f75870a0b4085190dc8443162987b90dd517 | 1f3a0ad4642a7818ac8aa3cec919b040bd178f52 | refs/heads/master | 2020-07-09T21:29:31.590476 | 2019-08-24T00:31:51 | 2019-08-24T00:31:51 | 204,088,061 | 0 | 0 | null | 2019-08-24T00:30:31 | 2019-08-24T00:30:31 | null | UTF-8 | Python | false | false | 8,894 | py | import random
import time
print('\u272A'*16,"AMIRAL BATTI",'\u272A'*16,sep="")
print("""
*******************************************************
*******************************************************
AMIRAL BATTI OYUNUNA HOSGELDINIZ
*******************************************************
*******************************************************
_________Oyunumuzda 2 adet4'lu Ucak gemimiz____________
______2 adet 3'lu, 2 adet 2'li, 2 adet de tekli________
_________Destroyerimiz 10X10 bir denizde yatay_________
___________veya dikey olarak konumlanmistir____________
________Lutfen tablodan atis tahmininizi yapiniz_______
*******************************************************
*******************************************************
""")
tahta = [''," 1", " 2", " 3"," 4", " 5", " 6"," 7", " 8", " 9","10\n",
"11", "12", "13","14", "15", "16","17", "18", "19","20\n",
"21", "22", "23","24", "25", "26","27", "28", "29","30\n",
"31", "32", "33","34", "35", "36","37", "38", "39","40\n",
"41", "42", "43","44", "45", "46","47", "48", "49","50\n",
"51", "52", "53","54", "55", "56","57", "58", "59","60\n",
"61", "62", "63","64", "65", "66","67", "68", "69","70\n",
"71", "72", "73","74", "75", "76","77", "78", "79","80\n",
"81", "82", "83","84", "85", "86","87", "88", "89","90\n",
"91", "92", "93","94", "95", "96","97", "98", "99","100\n",]
def ydortlu(): #Fonksiyonlarimizla yatay ve dikey olarak gemilerimizi konumlandiriyoruz.
drtlu = [1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27,
31, 32, 33, 34, 35, 36, 37, 41, 42, 43, 44, 45, 46, 47, 51, 52, 53, 54, 55, 56, 57,
61, 62, 63, 64, 65, 66, 67, 71, 72, 73, 74, 75, 76,
77, 81, 82, 83, 84, 85, 86, 87, 91, 92, 93, 94, 95, 96, 97]
global bos
bos=[]
say=random.choice(drtlu)
bos.append(say)
bos.append(say+1)
bos.append(say+2)
bos.append(say+3)
return bos
ydortlu()
def ddortlu(): #Dikey Dortlu gemi.
drtlu = [x for x in range(1,71)]
global dbos
dbos=[]
say=random.choice(drtlu)
dbos.append(say)
dbos.append(say+10)
dbos.append(say+20)
dbos.append(say+30)
return dbos
ddortlu()
def yuclu(): ##Yatay uclu gemi.
ucclu=[1,2,3,4,5,6,7,8,11,12, 13, 14, 15, 16, 17,18,21, 22, 23, 24, 25, 26, 27,28,
31, 32, 33, 34, 35, 36, 37,38,41, 42, 43, 44, 45, 46, 47,48,51 ,52 ,53 ,54 ,55 ,56 ,57,58,
61, 62, 63, 64, 65, 66, 67,68,71,72, 73, 74, 75, 76,
77,78,81,82, 83, 84, 85, 86, 87,88,91, 92, 93, 94, 95, 96, 97,98]
global ubos
ubos=[]
say=random.choice(ucclu)
ubos.append(say)
ubos.append(say+1)
ubos.append(say+2)
return ubos
yuclu()
def duclu(): ##dikey uclu gemi.
ucclu=[x for x in range(1,81)]
global dubos
dubos=[]
say=random.choice(ucclu)
dubos.append(say)
dubos.append(say+10)
dubos.append(say+20)
return dubos
duclu()
def yikili(): ##yatay ikili gemi.
ikli = [1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,21,22,23,24,25,26,27, 28,29,
31,32,33,34,35,36,37,38,39,41,42,43,44,45,46,47,48,49,51,52,53,54,55,56,57,58,59,
61, 62, 63, 64, 65, 66, 67, 68,69, 71, 72, 73, 74, 75, 76,
77, 78,79, 81, 82, 83, 84, 85, 86, 87, 88,89,91,92,93,94,95,96,97,98,99]
global yibos
yibos = []
say = random.choice(ikli)
yibos.append(say)
yibos.append(say + 1)
return yibos
yikili()
def dikili(): ##dikey ikili gemi.
ikli = [x for x in range(1,91)]
global dibos
dibos = []
say = random.choice(ikli)
dibos.append(say)
dibos.append(say + 10)
return dibos
dikili()
def tekli(): #Tekli gemiler.
tkli = [x for x in range(1,101)]
global tbos
tbos = []
say = random.choice(tkli)
tbos.append(say)
say1 = random.choice(tkli)
tbos.append(say1)
return tbos
tekli()
#Eksiklik=Gemilerin birbiriyle cakismasi durumlari ayarlanmadi!
secim=[] #Kullanicinin girdigi degerleri ve vurdugu gemileri bu dosyaya atip sorguluyoruz.
deneme=0
while deneme<15: #Kullaniciya 15 hak verdik. 15 hakki bitirdiginde dongumuz sonlanacak.
while True: #dongu icinde kullanici dogru atis,hatali girse
print("\n") #veya ayni atisi yapsa da donecek dongumuz.
for i in tahta:
print(i, end=" ")
print("Kalan hakkiniz:",(15-deneme))
secim1 = (input('Seciminiz:'))
if secim1.isnumeric()==False:
print("Lutfen sayi giriniz.")
continue
elif int(secim1)<0 or int(secim1)>101:
print("0 ile 100 arasinda secim yapiniz.")
continue
elif secim1 in secim or int(secim1) in secim:
print("Bu atisi zaten yaptiniz")
continue
else:
secim.append(int(secim1))
time.sleep(1)
for i in tahta: #Burada artik kul. giris yaptigi degeri
if not(bos==dbos==ubos==dubos==yibos==dibos==tbos==[]): #olusturdugumuz gemilerden herhangi birinin
if int(secim1) in bos: #icerisinde olup olmadigini sorguluyoruz.
for i in bos:
secim.append(i)
print(i,"Ucak Gemisi Vurdunuz!",end="")
del tahta[int(i)]
tahta.insert(int(i), '\u272A')
bos.clear()
break
elif int(secim1) in dbos:
for i in dbos: #sayet atisimiz herhangi bir gemiye isabet etmisse
secim.append(i) #bu geminin tamamini tahtaya yazip, "secim" dosyasina kaydederek
print(i,"Ucak Gemisi Vurdunuz!",end="") #ayni gemiye tekrar atis yapilmasini engelliyoruz.
del tahta[int(i)]
tahta.insert(int(i), '\u272A')
dbos.clear()
break
elif int(secim1) in ubos:
for i in ubos:
secim.append(i)
print(i,"Kruvazor Vurdunuz!",end="")
del tahta[int(i)]
tahta.insert(int(i), '\u272A')
ubos.clear()
break
elif int(secim1) in dubos:
for i in dubos:
secim.append(i)
print(i, "Kruvazor Vurdunuz!", end="")
del tahta[int(i)]
tahta.insert(int(i), '\u272A')
dubos.clear()
break
elif int(secim1) in yibos:
for i in yibos:
secim.append(i)
print(i,"Destroyer Vurdunuz!",end="")
del tahta[int(i)]
tahta.insert(int(i), '\u272A')
yibos.clear()
break
elif int(secim1) in dibos:
for i in dibos:
secim.append(i)
print(i,"Destroeyer Vurdunuz!",end="")
del tahta[int(i)]
tahta.insert(int(i), '\u272A')
dibos.clear()
break
elif int(secim1) in tbos:
for i in tbos:
if int(i)==int(secim1):
print(i,"Hafif gemi Vurdunuz!",end="")
del tahta[int(i)]
tahta.insert(int(i), '\u272A')
tbos.remove(int(i))
else:
pass
break
else: #Tum gemiler vuruldugunda oyun bitecek
print("Tebrikler...Tum gemileri batirdiniz") #sayet gemiler cakismamissa (!)
quit()
else:
print("Hoop! Karavana...") #bosa atis durumu, denem hakkinin azalmasi...
deneme += 1
del tahta[int(secim1)]
tahta.insert(int(secim1), "?")
break
print("Hakkiniz bitti, Tekrar deneyin!")
| [
"noreply@github.com"
] | noreply@github.com |
f6066d060c195e6f9ef837b859b666ab4f30bdb8 | 096167807fa625681beae7e25919357c90b89e75 | /emails/models.py | 1fb86f349ab69c1489f2ef26d7c95be401ff5b2d | [] | no_license | bussiere/Sumomo | c849484fbae37490998bcc44e232bf6a252fe9d7 | ac3efc46014e66e193c5f852d121a25dd0a9ec5e | refs/heads/master | 2021-01-19T11:34:42.645970 | 2012-08-31T04:15:32 | 2012-08-31T04:15:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | from django.db import models
# Create your models here.
class Contact(models.Model):
Emails = models.TextField(null=True, blank=True)
class Email(models.Model):
Sender = models.ForeignKey("Contact",related_name="Sender", null=True, blank=True)
Recepter = models.ManyToManyField("Contact", related_name="Recepter",null=True, blank=True)
Title = models.TextField(null=True, blank=True)
Date = models.DateField(null=True, blank=True)
Content = models.TextField(null=True, blank=True)
File = models.ManyToManyField("attachments.File", null=True, blank=True)
Tag = models.ManyToManyField("tags.Tag", null=True, blank=True) | [
"bussiere@gmail.com"
] | bussiere@gmail.com |
2909402b00fb3a6e6b883535089989ab85eb7e84 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/graph_objs/mesh3d/legendgrouptitle/_font.py | 766e46a49d072ae53fadbf9814e540e8ef7dfdbb | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 8,471 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "mesh3d.legendgrouptitle"
_path_str = "mesh3d.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.mesh3d.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
305a52c242ec94adeaaf52fb037f2f4072fe2272 | 8961efe29765a8093bcd669adb3fa6ca186eadfd | /toolbox/attacks/FGSM-Attack/run_all.py | de63948e966ad6a4eb5ec4441dfcdcc3b516de03 | [] | no_license | JayceeLee/adversarial-toolbox | 12bfe720fd0984b6dc1c10d61486b2e36f22fde9 | 01d624d995d3c55d220cdf570ca00510f32cc43a | refs/heads/master | 2020-03-27T04:22:55.631986 | 2018-02-06T01:25:27 | 2018-02-06T01:25:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | python fgsm_inception_v3.py
python fgsm_inception_resnet_v2.py
python fgsm_resnet_v2_101.py
| [
"nealeratzlaff@gmail.com"
] | nealeratzlaff@gmail.com |
119d5dd6903d69bdfa649d76ecd2c0552b636918 | 5099e029c341e6111a331c66a49c8ef2a295d441 | /common/encryption.py | c6c087cdd4eb2cc808303177e9ae97091aa4778a | [] | no_license | cherrishes/weilaiDemo | 6abfcd8260f0ca515fa8a92aef53546362f28b34 | c6e3a6341630de0f7906055e4120f1bb0f258d64 | refs/heads/master | 2021-01-10T13:40:43.437971 | 2016-04-05T11:06:11 | 2016-04-05T11:06:11 | 54,546,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # !/bash/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'rdy'
import hashlib
def md5(val):
"""
字符串MD5加密
:param val:
:return:
"""
if isinstance(val, str):
m = hashlib.md5()
m.update(val.encode('utf-8'))
return m.hexdigest()
else:
return ''
if __name__ == '__main__':
r = md5('123')
print(r) | [
"rendy@56iq.com"
] | rendy@56iq.com |
8c2130f6ba303911b5d6f295829119020fd3ec10 | f4927e1b2ad87cff185a9d6a5100c40ffcf85ffd | /blog/urls.py | 18a04bd99c32e5a1a9735ba30accbc1160739c29 | [] | no_license | abhitechno01/my-first-blog | e930d184b6ee6e4c3075be85a8a41c7e5a00d6f7 | a14163e98735674b6b6338d7daeea3893bd9240d | refs/heads/master | 2021-01-10T14:47:57.687645 | 2017-03-05T09:21:33 | 2017-03-05T09:21:33 | 47,018,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>[0-9]+)/$', views.post_detail, name='post_detail'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>[0-9]+)/edit/$', views.post_edit, name='post_edit'),
]
| [
"abhi.techno01@gmail.com"
] | abhi.techno01@gmail.com |
2cf4e7801619ee837dad4b3378ec60eb0aef2667 | 444468f946f906063b79e381cba03f8027553024 | /tools/demo.py | 89fdcc9c70324c0b877086fb8b77e9f2a5508c19 | [
"MIT"
] | permissive | neuqgz/modify-faster-rcnn-tf | ea0af9f42b986373db64b723b5295c31aed3f047 | e935660d7645d405be309843d1778adc6cd57332 | refs/heads/master | 2020-03-21T23:49:16.571284 | 2018-06-30T10:29:27 | 2018-06-30T10:29:27 | 139,207,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,631 | py | #!/usr/bin/env python
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen, based on code from Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
from model.config import cfg
from model.test import im_detect
from model.nms_wrapper import nms
from utils.timer import Timer
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os, cv2
import argparse
from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1
CLASSES = ('__background__',
'1','2')
# CLASSES = ('__background__',
# 'aeroplane', 'bicycle', 'bird', 'boat',
# 'bottle', 'bus', 'car', 'cat', 'chair',
# 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant',
# 'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('vgg16_faster_rcnn_iter_5000.ckpt',),'res101': ('res101_faster_rcnn_iter_110000.ckpt',)}
DATASETS= {'pascal_voc': ('voc_2007_trainval',),'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)}
def vis_detections(im, class_name, dets, ax, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
# im = im[:, :, (2, 1, 0)]
# fig, ax = plt.subplots(figsize=(12, 12))
# ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
# plt.axis('off')
# plt.tight_layout()
# plt.draw()
def demo(sess, net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(sess, net, im)
timer.toc()
print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, ax, thresh=CONF_THRESH)
plt.axis('off')
plt.tight_layout()
plt.draw()
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',
choices=NETS.keys(), default='vgg16')
parser.add_argument('--dataset', dest='dataset', help='Trained dataset [pascal_voc pascal_voc_0712]',
choices=DATASETS.keys(), default='pascal_voc')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
# model path
demonet = args.demo_net
dataset = args.dataset
tfmodel = os.path.join('output', demonet, DATASETS[dataset][0], 'default',
NETS[demonet][0])
if not os.path.isfile(tfmodel + '.meta'):
raise IOError(('{:s} not found.\nDid you download the proper networks from '
'our server and place them properly?').format(tfmodel + '.meta'))
# set config
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
# load network
if demonet == 'vgg16':
net = vgg16()
elif demonet == 'res101':
net = resnetv1(num_layers=101)
else:
raise NotImplementedError
net.create_architecture("TEST", 3,
tag='default', anchor_scales=[8, 16, 32])
saver = tf.train.Saver()
saver.restore(sess, tfmodel)
print('Loaded network {:s}'.format(tfmodel))
# im_names = ['000001.jpg', '000002.jpg', '000003.jpg', '000004.jpg', '000005.jpg', '000006.jpg', '000007.jpg',
# '000008.jpg', '000009.jpg', '000010.jpg', '000011.jpg', '000012.jpg', '000013.jpg', '000014.jpg',
# '000015.jpg', '000016.jpg', '000017.jpg', '000018.jpg', '000019.jpg', '000020.jpg', '000021.jpg',
# '000022.jpg', '000023.jpg', '000024.jpg', '000025.jpg', '000026.jpg', '000027.jpg', '000028.jpg',
# '000029.jpg', '000030.jpg', '000031.jpg', '000032.jpg', '000033.jpg', '000034.jpg', '000035.jpg',
# '000036.jpg', '000037.jpg', '000038.jpg', '000039.jpg', '000040.jpg', '000041.jpg', '000042.jpg',
# '000043.jpg', '000044.jpg']
#
# im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
# '001763.jpg', '004545.jpg']
im_names = ['000101.jpg', '000102.jpg', '000103.jpg',
'000104.jpg', '000105.jpg', '000106.jpg',
'000107.jpg', '000108.jpg', '000109.jpg', '000110.jpg',
'000111.jpg', '000112.jpg', '000113.jpg', '000114.jpg','000115.jpg','000116.jpg']
for im_name in im_names:
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Demo for data/demo/{}'.format(im_name))
demo(sess, net, im_name)
plt.savefig("../Pictures/" + im_name)
#plt.show()
| [
"1072464610@qq.com"
] | 1072464610@qq.com |
7b971f292708ee7e0c19c8b4c0113acc5eaa8c75 | 07d110e3350b42982c493be12fbdce0d5b6acdd3 | /users/migrations/0002_auto_20200819_2130.py | bdcda405b91eeddfc0baf8726df33e2862574693 | [] | no_license | AlissonS47/django-challenge | 387f99067e4b478db20de27e7922abe96e79555b | 2ca67ac0696bd30f94236832514641374347a73e | refs/heads/master | 2022-12-08T07:19:16.718695 | 2020-09-05T00:09:22 | 2020-09-05T00:09:22 | 287,840,861 | 0 | 0 | null | 2020-08-15T23:29:22 | 2020-08-15T23:29:22 | null | UTF-8 | Python | false | false | 382 | py | # Generated by Django 3.1 on 2020-08-20 00:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='naver',
name='admission_date',
field=models.DateField(auto_now_add=True),
),
]
| [
"tec.alisson47@gmail.com"
] | tec.alisson47@gmail.com |
8cd85855d175d322e73f636de7aed0b6850bdf52 | 2f233b31ea7ffefad4b901b561f341fabe3bbb1f | /2017/02a.py | 77f9ee8c4d1e176ea1331fdbdd314eff205802e3 | [
"MIT"
] | permissive | cz-fish/advent-of-code | 066b63c3ac2e3b13bf88ae86843a7a9a7b687e96 | ecbcef544e8d89ec019464811760ce86f84dbc6e | refs/heads/master | 2023-08-03T19:41:23.186666 | 2023-03-14T08:59:04 | 2023-03-14T08:59:04 | 226,355,674 | 0 | 0 | MIT | 2023-07-20T02:51:13 | 2019-12-06T15:17:10 | Python | UTF-8 | Python | false | false | 593 | py | #!/usr/bin/env python3
grid = []
with open('input02.txt', 'rt') as f:
for ln in f.readlines():
grid.append([int(x) for x in ln.strip().split('\t')])
print(sum([max(l) - min(l) for l in grid]))
print('-----')
s = 0
for ln in grid:
srt = sorted(ln)
stop = False
for i in range(len(srt) - 1):
x = srt[i]
if x == 0:
continue
for j in range(i+1, len(srt)):
y = srt[j]
if y // x * x == y:
s += y // x
stop = True
break
if stop:
break
print(s) | [
"filip.simek@gmail.com"
] | filip.simek@gmail.com |
dc543898fee01c7ed58926a7c5f42df05801e873 | 9cf369ce8ea40142917e0fae6dd0dae7d60667ed | /Blog/apps/blog/models.py | c2ba5823d8771f31d2476275e125bf9e399a106c | [] | no_license | wcleonard/interview | ef60b5a2bec36bc3a077b54ceb88ea43a30ab3d2 | bf396556d1a65fbae536373967e2d1bf6de52b4d | refs/heads/master | 2022-11-30T17:12:18.545996 | 2019-07-10T08:51:53 | 2019-07-10T08:51:53 | 187,006,175 | 0 | 0 | null | 2022-11-22T03:49:12 | 2019-05-16T10:29:38 | JavaScript | UTF-8 | Python | false | false | 3,042 | py | from django.db import models
from django.utils.timezone import now
class Tag(models.Model):
name = models.CharField(verbose_name='标签名', max_length=64)
created_time = models.DateTimeField(verbose_name='创建时间', default=now)
last_mod_time = models.DateTimeField(verbose_name='修改时间', default=now)
# 使对象在后台显示更友好
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = '标签名称' # 指定后台显示模型名称
verbose_name_plural = '标签列表' # 指定后台显示模型复数名称
db_table = "tag" # 数据库表名
class Category(models.Model):
name = models.CharField(verbose_name='类别名称', max_length=64)
created_time = models.DateTimeField(verbose_name='创建时间', default=now)
last_mod_time = models.DateTimeField(verbose_name='修改时间', default=now)
class Meta:
ordering = ['name']
verbose_name = "类别名称"
verbose_name_plural = '分类列表'
db_table = "category" # 数据库表名
# 使对象在后台显示更友好
def __str__(self):
return self.name
class Article(models.Model):
STATUS_CHOICES = (
('d', '草稿'),
('p', '发表'),
)
title = models.CharField(verbose_name='标题', max_length=100)
content = models.TextField(verbose_name='正文', blank=True, null=True)
status = models.CharField(verbose_name='状态', max_length=1, choices=STATUS_CHOICES, default='p')
views = models.PositiveIntegerField(verbose_name='浏览量', default=0)
created_time = models.DateTimeField(verbose_name='创建时间', default=now)
pub_time = models.DateTimeField(verbose_name='发布时间', blank=True, null=True)
last_mod_time = models.DateTimeField(verbose_name='修改时间', default=now)
category = models.ForeignKey(Category, verbose_name='分类', on_delete=models.CASCADE, blank=False, null=False)
tags = models.ManyToManyField(Tag, verbose_name='标签集合', blank=True)
# 使对象在后台显示更友好
def __str__(self):
return self.title
# 更新浏览量
def viewed(self):
self.views += 1
self.save(update_fields=['views'])
# 下一篇
def next_article(self):
# id比当前id大,状态为已发布,发布时间不为空
return Article.objects.filter(id__gt=self.id, status='p', pub_time__isnull=False).first()
# 前一篇
def prev_article(self):
# id比当前id小,状态为已发布,发布时间不为空
return Article.objects.filter(id__lt=self.id, status='p', pub_time__isnull=False).first()
class Meta:
ordering = ['-pub_time'] # 按文章创建日期降序
verbose_name = '文章' # 指定后台显示模型名称
verbose_name_plural = '文章列表' # 指定后台显示模型复数名称
db_table = 'article' # 数据库表名
get_latest_by = 'created_time'
| [
"wancanin@163.com"
] | wancanin@163.com |
56b112ec026b96dc86c4812ccb51a04bbdb70086 | 9b9e4af541fdf3609fdcd4c4f880cfc04c2de610 | /utils/testing_fashionmnist.py | ace075520517204059a29f98648c2319b4b7a616 | [] | no_license | HeleneFabia/fashion-mnist | e1f279c62197ae7037c09ab39a1042699901aab2 | 3510dfa0af9708088ac17e912d9e4c61913f614b | refs/heads/master | 2023-01-08T02:29:16.187348 | 2020-11-04T07:14:18 | 2020-11-04T07:14:18 | 299,336,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | from sklearn.metrics import accuracy_score
import torch
from torch.nn import functional as F
from torch import nn
from torch.utils import data
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot
import numpy as np
def test_model(net, test_ds, device):
"""
Testing a model with the test set
"""
net = net.to(device)
net.eval()
loss = nn.CrossEntropyLoss()
correct = 0
best_val_acc = 0
loss_examples = []
idx_false_preds = []
net.eval()
with torch.no_grad():
X_test = test_ds.images.to(device)
y_test = test_ds.labels.to(device)
test_preds = net(X_test)
test_loss = loss(test_preds, y_test).detach().item()
predicted = torch.max(test_preds.data, 1)[1]
correct += (predicted == y_test).sum()
correct = correct.float()
correct_epoch = (correct/(10000))
for i in range(len(predicted)):
if predicted[i] != y_test[i]:
idx_false_preds.append(i)
print(f'Test Loss: {test_loss:.4f}, Test Accuracy: {correct_epoch:.4f}')
return best_val_acc, test_preds, idx_false_preds
def get_results(model, test_ds_cnn, batch_size, verbose=False):
"""
Get predictions, true labels and visualization of images of a random batch of the test set.
"""
x, y = get_test_batch(batch_size=25, test_ds_cnn=test_ds_cnn)
with torch.no_grad():
pred = model(x)
pred = pred.cpu().detach().numpy()
predictions = []
probabilities = []
for i, pred in enumerate(pred):
pred_soft = softmax(pred)
pred_label = np.argmax(pred_soft)
pred_prob = pred_soft[pred_label]
predictions.append(pred_label)
probabilities.append(pred_prob)
if verbose:
print('Predicted Label:', pred_label)
print('Predicted Probability:', pred_soft[pred_label])
print('Actual Label:', y[i])
print('___')
show_predicted_images(x.reshape(batch_size,28,28), int(batch_size/5), 5, true=(get_label)(y), pred=get_label(predictions), probabilities=probabilities)
def get_test_batch(batch_size, test_ds_cnn):
"""
Get a random batch of size batch_size.
"""
assert (batch_size%5 == 0),"Choose batch_size that is multiple of 5."
test_dl = data.DataLoader(test_ds_cnn, batch_size=batch_size, shuffle=True)
for batch in test_dl:
x = batch[0].cuda()
y = batch[1].detach().cpu().numpy()
break
return x, y
def show_predicted_images(images, num_rows, num_cols, true=None, pred=None, probabilities=None, scale=1.5):
"""
Show image alongside the predicted and true label.
"""
figsize = (num_cols * 2, num_rows * 1.5)
figure, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
figure.tight_layout()
for i, (ax, images) in enumerate(zip(axes, images.cpu())):
ax.imshow(np.array(images), cmap='gray')
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if true and pred:
ax.set_title(f'Label: {true[i]}\nPred: {pred[i]} ({probabilities[i]:.2f})')
plt.tight_layout()
return axes
def get_label(label):
"""
To get a label as a string when entering a numeric label.
"""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[i] for i in label]
def softmax(x):
e_x = np.exp(x)
return e_x / e_x.sum() | [
"noreply@github.com"
] | noreply@github.com |
d39213359879393713f56cfeaab771126b841676 | ec205806d24c256cb276534b9fc2cdc80fb728b6 | /16_user_name.py | cd328a7bf9482113d4eeb6c49092a69c05729904 | [] | no_license | siddhusalvi/basic-python | a4094665f8c22fa164f749b8bc6884970922abb9 | 8b637a9a468110c95f02871f2bb947913b495014 | refs/heads/master | 2020-12-12T03:47:54.803601 | 2020-01-16T11:53:46 | 2020-01-16T11:53:46 | 234,035,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | """
Write a Python program to get the current username
"""
import getpass
print("Current username is :", getpass.getuser())
| [
"siddheshsalvi525@gmail.com"
] | siddheshsalvi525@gmail.com |
698b0601027480d0838e9463b081837db17caabc | 55631088b41f203027c399a501e9c344d99d7dae | /app/routes/route.py | 10c6bf047ce1e8f1b929d235addd7ce365157c0e | [] | no_license | subham2126/Rest-API-using-Flask-and-Cassandra | 89f5e4885b3b0ed28e35fc6f8eb472f1e341333d | 0fb8a9f828a0fcb39d2f99c97fbca79c8e96ef29 | refs/heads/master | 2021-08-22T03:38:50.863931 | 2017-11-29T05:12:06 | 2017-11-29T05:12:06 | 112,316,110 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | py | from flask import Blueprint,Flask,request
import json
from cassandra.cluster import Cluster
cluster = Cluster(["127.0.0.1"])
api = Blueprint('api', __name__,url_prefix='/module');
class myClass:
@api.route('/login')
def login_in():
session = cluster.connect('tutorialspoint')
rows = session.execute('SELECT emp_id, emp_city, emp_name FROM emp')
rows_as_dict = []
for row in rows:
temp = {
'id' : row.emp_id,
'city' : row.emp_city,
'name' : row.emp_name}
rows_as_dict.append(temp)
#print (row.emp_id, row.emp_city, row.emp_name)
return ((json.dumps(rows_as_dict)));
@api.route('/signup')
def sign_up():
return 'signup!'
@api.route("/sumNumber",methods=['POST'])
def doSum():
a = int(json.loads(request.data)['a'])
b = int(json.loads(request.data)['b'])
return str(a+b)
@api.route("/insertData",methods=['post'])
def doInsert():
id = int(json.loads(request.data)['id'])
city= json.loads(request.data)['city']
name = json.loads(request.data)['name']
session = cluster.connect('tutorialspoint')
session.execute(
"""
INSERT INTO emp (emp_id, emp_city, emp_name)
VALUES (%s, %s, %s)
""",
(id, city, name)
)
return "SUCCESS"
@api.route('/search')
def search():
id_search = (request.args['x'])
session = cluster.connect('tutorialspoint')
rows = session.execute("SELECT emp_id, emp_city, emp_name FROM emp WHERE emp_id=" + id_search)
rows_as_dict = []
for row in rows:
temp = {
'id' : row.emp_id,
'city' : row.emp_city,
'name' : row.emp_name}
rows_as_dict.append(temp)
#print (row.emp_id, row.emp_city, row.emp_name)
return ((json.dumps(rows_as_dict)))
| [
"subham.gupta@medlife.com"
] | subham.gupta@medlife.com |
69e047b18d45718a8b61d0b513fee6d2958265d9 | 9c371b80b2b0cc610ba53c1d719f9ccf6150ea32 | /Task-2/codeforces/cf-71A.py | 55e74f433fc43d154cbb83ab9f037b4c9596ceef | [] | no_license | praveenjm2000/amfoss-tasks | cd329021fdd67cb86ceccceaa364c425bf2f3c40 | 3509c578b55e9f0154b2eeb646cac7de2473f4b9 | refs/heads/master | 2020-06-28T14:44:41.777644 | 2019-09-18T17:25:44 | 2019-09-18T17:25:44 | 200,258,896 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | s=input('Enter the word: ')
l=len(s)
print(s[0],l-2,s[l-1],sep='') if(l>10) else print(s)
| [
"noreply@github.com"
] | noreply@github.com |
c108208bc9db3256a41319f9146f6ee3f21eaa0b | 34edc8b21515817caa87aedeb07b87515c33ebd0 | /basket/migrations/0020_auto_20180522_2041.py | 2124680ee2ea695b05fe1a93fcea0fe017647183 | [] | no_license | waelbeso/Ftrina | b20c277030132b195af621d9e739040d42943a9b | 449868f8c095bb920a2aef2e2dc4cb80de8ec82a | refs/heads/master | 2022-09-06T16:34:40.391965 | 2018-05-27T12:19:05 | 2018-05-27T12:19:05 | 134,336,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-05-22 20:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basket', '0019_auto_20180522_2035'),
]
operations = [
migrations.AlterField(
model_name='checkout',
name='currency',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='checkout',
name='rate',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=19, null=True),
),
]
| [
"waelabbas@live.com"
] | waelabbas@live.com |
0047d29abf94e16535d124ff7d4ef10ea63e1275 | 0bbddea89877506c12b62137ed77ff47d1bb2f05 | /manage.py | 78234b70a42c1efda4fafcd4a46620f18889214c | [] | no_license | TheHene/owlt | 93fa9df61fccbc5997f433ca1feeae157cf9bbab | c7f4eff9fca87f0116813b04311b7c1bf3052ef3 | refs/heads/master | 2020-03-18T15:24:34.458671 | 2018-05-29T20:32:11 | 2018-05-29T20:32:11 | 134,905,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "owlp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"hendrik.setzer@hotmail.de"
] | hendrik.setzer@hotmail.de |
e9d942313fddaf59f9f83f27c7bc26137e53b214 | 8cd4e38f9cc69f384175458eb308c56a591a782d | /Pulso político por provincias en Ecuador/Script usados/sucumbiosAA.py | 38c0422e6a3dd233455ea3c5eb1ad60e57332c11 | [] | no_license | Jonathan141999/proyecto-analisis-de-datos-JEGY | cb328b57cd51b56c9d13de5da05832770f663437 | 42391badeb510e1a484774a3fd229abe4d5d55f3 | refs/heads/master | 2023-03-23T17:38:14.733267 | 2021-03-19T19:47:44 | 2021-03-19T19:47:44 | 348,162,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,506 | py |
import sys
import couchdb
from tweepy import Stream #autentica las credenciales
from tweepy import OAuthHandler #
from tweepy.streaming import StreamListener #hereda la clase
import json
###API ########################
ckey = "X1cHhuKlzjY6eUQ2J6i4MuVUR"
csecret = "kEd5yq95noDC726oYSGYSXkuF4S71kj2IFauS3qmOGIDQHJ7XC"
atoken = "1339966381102665730-uXm8t9BvPwJk6z2JYjIsD0f6RT3f3i"
asecret = "3AxhyMbXhL43J4cUFssbZAFpIIqS92tqOjSYLJbC4jqIi"
#####################################
class listener(StreamListener):
def on_data(self, data):
dictTweet = json.loads(data)
try:
dictTweet["_id"] = str(dictTweet['id'])
doc = db.save(dictTweet)
print ("SAVED" + str(doc) +"=>" + str(data))
except:
print ("Already exists")
pass
return True
def on_error(self, status):
print (status)
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
'''========couchdb'=========='''
server = couchdb.Server('http://Jonathan14:Familia141999@localhost:5984/') #('http://115.146.93.184:5984/')
try:
db = server.create('sucumbiosaa')
except:
db = server('sucumbiosaa')
'''===============LOCATIONS=============='''
#Filtro por geolocalización
twitterStream.filter(locations=[-77.9795,-0.6558,-75.2233,0.6621])
#Filtro por palabras
twitterStream.filter(track=['Elecciones Ecuador 2021','Andres Arauz','CENTRO'])
| [
"jona94alquinga@gmail.com"
] | jona94alquinga@gmail.com |
cb8a5a7e749c620eccbb32e9dae70307ab887439 | 6d5a85bb2347f06d5d23ced47fdbcd3966ec9db3 | /resources/tests/test_resource_api.py | 47d0857c50ab918096a48be26e691585124a7025 | [
"MIT"
] | permissive | City-of-Lappeenranta/Respa | e3224a469fb175b22f79cad4f4f96fe658f892bb | 6559359e4ad800d582bb19b9a421d78500b272e8 | refs/heads/master | 2022-12-12T12:10:14.323868 | 2022-04-04T06:43:21 | 2022-04-04T06:43:21 | 165,219,424 | 0 | 0 | MIT | 2022-12-07T23:55:22 | 2019-01-11T09:45:47 | Python | UTF-8 | Python | false | false | 22,999 | py | import datetime
import pytest
from copy import deepcopy
from django.core.urlresolvers import reverse
from django.contrib.gis.geos import Point
from django.utils import timezone
from freezegun import freeze_time
from guardian.shortcuts import assign_perm, remove_perm
from resources.models import (Day, Equipment, Period, Reservation, ReservationMetadataSet, ResourceEquipment,
ResourceType)
from .utils import assert_response_objects, check_only_safe_methods_allowed
@pytest.fixture
def list_url():
return reverse('resource-list')
def get_detail_url(resource):
return '%s%s/' % (reverse('resource-list'), resource.pk)
@pytest.mark.django_db
@pytest.fixture
def detail_url(resource_in_unit):
return reverse('resource-detail', kwargs={'pk': resource_in_unit.pk})
def _check_permissions_dict(api_client, resource, is_admin, can_make_reservations,
can_ignore_opening_hours):
"""
Check that user permissions returned from resource endpoint contain correct values
for given user and resource. api_client should have the user authenticated.
"""
url = reverse('resource-detail', kwargs={'pk': resource.pk})
response = api_client.get(url)
assert response.status_code == 200
permissions = response.data['user_permissions']
assert len(permissions) == 3
assert permissions['is_admin'] == is_admin
assert permissions['can_make_reservations'] == can_make_reservations
assert permissions['can_ignore_opening_hours'] == can_ignore_opening_hours
@pytest.mark.django_db
def test_disallowed_methods(all_user_types_api_client, list_url, detail_url):
"""
Tests that only safe methods are allowed to unit list and detail endpoints.
"""
check_only_safe_methods_allowed(all_user_types_api_client, (list_url, detail_url))
@pytest.mark.django_db
def test_user_permissions_in_resource_endpoint(api_client, resource_in_unit, user, group):
"""
Tests that resource endpoint returns a permissions dict with correct values.
"""
api_client.force_authenticate(user=user)
# normal user, reservable = True
_check_permissions_dict(api_client, resource_in_unit, is_admin=False,
can_make_reservations=True, can_ignore_opening_hours=False)
# normal user, reservable = False
resource_in_unit.reservable = False
resource_in_unit.save()
_check_permissions_dict(api_client, resource_in_unit, is_admin=False,
can_make_reservations=False, can_ignore_opening_hours=False)
# staff member, reservable = False
user.is_staff = True
user.save()
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=True,
can_make_reservations=True, can_ignore_opening_hours=True)
user.is_staff = False
user.save()
# user has explicit permission to make reservation
user.groups.add(group)
assign_perm('unit:can_make_reservations', group, resource_in_unit.unit)
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=False,
can_make_reservations=True, can_ignore_opening_hours=False)
remove_perm('unit:can_make_reservations', group, resource_in_unit.unit)
resource_group = resource_in_unit.groups.create(name='rg1')
assign_perm('group:can_make_reservations', group, resource_group)
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=False,
can_make_reservations=True, can_ignore_opening_hours=False)
assign_perm('unit:can_ignore_opening_hours', group, resource_in_unit.unit)
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=False,
can_make_reservations=True, can_ignore_opening_hours=True)
@pytest.mark.django_db
def test_non_public_resource_visibility(api_client, resource_in_unit, user):
"""
Tests that non-public resources are not returned for non-staff.
"""
resource_in_unit.public = False
resource_in_unit.save()
url = reverse('resource-detail', kwargs={'pk': resource_in_unit.pk})
response = api_client.get(url)
assert response.status_code == 404
# Unauthenticated
url = reverse('resource-list')
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 0
# Authenticated as non-staff
api_client.force_authenticate(user=user)
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 0
# Authenticated as staff
user.is_staff = True
user.save()
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 1
url = reverse('resource-detail', kwargs={'pk': resource_in_unit.pk})
response = api_client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_api_resource_geo_queries(api_client, resource_in_unit):
id_base = resource_in_unit.pk
res = resource_in_unit
res.location = None
res.save()
res.pk = id_base + "r2"
res.location = Point(24, 60, srid=4326)
res.save()
res.pk = id_base + "r3"
res.location = Point(25, 60, srid=4326)
res.save()
unit = resource_in_unit.unit
unit.location = None
unit.save()
unit.pk = unit.pk + "u2"
unit.location = Point(24, 61, srid=4326)
unit.save()
res.pk = id_base + "r4"
res.location = None
res.unit = unit
res.save()
base_url = reverse('resource-list')
response = api_client.get(base_url)
assert response.data['count'] == 4
results = response.data['results']
assert 'distance' not in results[0]
url = base_url + '?lat=60&lon=24'
response = api_client.get(url)
assert response.data['count'] == 4
results = response.data['results']
assert results[0]['id'].endswith('r2')
assert results[0]['distance'] == 0
assert results[1]['id'].endswith('r3')
assert results[1]['distance'] == 55597
assert results[2]['distance'] == 111195
assert 'distance' not in results[3]
# Check that location is inherited from the resource's unit
url = base_url + '?lat=61&lon=25&distance=100000'
response = api_client.get(url)
assert response.data['count'] == 1
results = response.data['results']
assert results[0]['id'].endswith('r4')
assert results[0]['distance'] == 53907
@pytest.mark.django_db
def test_resource_favorite(staff_api_client, staff_user, resource_in_unit):
url = '%sfavorite/' % get_detail_url(resource_in_unit)
response = staff_api_client.post(url)
assert response.status_code == 201
assert resource_in_unit in staff_user.favorite_resources.all()
response = staff_api_client.post(url)
assert response.status_code == 304
assert resource_in_unit in staff_user.favorite_resources.all()
@pytest.mark.django_db
def test_resource_favorite_non_official(user_api_client, user, resource_in_unit):
url = '%sfavorite/' % get_detail_url(resource_in_unit)
response = user_api_client.post(url)
assert response.status_code == 201
assert resource_in_unit in user.favorite_resources.all()
response = user_api_client.post(url)
assert response.status_code == 304
assert resource_in_unit in user.favorite_resources.all()
@pytest.mark.django_db
def test_resource_unfavorite(staff_api_client, staff_user, resource_in_unit):
url = '%sunfavorite/' % get_detail_url(resource_in_unit)
response = staff_api_client.post(url)
assert response.status_code == 304
staff_user.favorite_resources.add(resource_in_unit)
response = staff_api_client.post(url)
assert response.status_code == 204
assert resource_in_unit not in staff_user.favorite_resources.all()
@pytest.mark.django_db
def test_resource_unfavorite_non_official(user_api_client, user, resource_in_unit):
url = '%sunfavorite/' % get_detail_url(resource_in_unit)
response = user_api_client.post(url)
assert response.status_code == 304
user.favorite_resources.add(resource_in_unit)
response = user_api_client.post(url)
assert response.status_code == 204
assert resource_in_unit not in user.favorite_resources.all()
@pytest.mark.django_db
def test_is_favorite_field(api_client, staff_api_client, staff_user, resource_in_unit):
url = get_detail_url(resource_in_unit)
response = api_client.get(url)
assert response.status_code == 200
assert response.data['is_favorite'] is False
response = staff_api_client.get(url)
assert response.status_code == 200
assert response.data['is_favorite'] is False
staff_user.favorite_resources.add(resource_in_unit)
response = staff_api_client.get(url)
assert response.status_code == 200
assert response.data['is_favorite'] is True
@pytest.mark.django_db
def test_filtering_by_is_favorite(list_url, api_client, staff_api_client, staff_user, resource_in_unit,
resource_in_unit2):
staff_user.favorite_resources.add(resource_in_unit)
# anonymous users don't need the filter atm, just check that using the filter doesn't cause any errors
response = api_client.get('%s?is_favorite=true' % list_url)
assert response.status_code == 200
assert response.data['count'] == 0
response = staff_api_client.get('%s?is_favorite=true' % list_url)
assert response.status_code == 200
assert response.data['count'] == 1
assert response.data['results'][0]['id'] == resource_in_unit.id
response = staff_api_client.get('%s?is_favorite=false' % list_url)
assert response.status_code == 200
assert response.data['count'] == 1
assert response.data['results'][0]['id'] == resource_in_unit2.id
@pytest.mark.django_db
def test_api_resource_terms_of_use(api_client, resource_in_unit, detail_url):
response = api_client.get(detail_url)
assert response.status_code == 200
generic_terms = response.data['generic_terms']
specific_terms = response.data['specific_terms']
assert set(generic_terms) == {'fi', 'en'}
assert generic_terms['fi'] == 'kaikki on kielletty'
assert generic_terms['en'] == 'everything is forbidden'
assert set(specific_terms) == {'fi', 'en'}
assert specific_terms['fi'] == 'spesifiset käyttöehdot'
assert specific_terms['en'] == 'specific terms of use'
@pytest.mark.django_db
def test_price_per_hour_fields(api_client, resource_in_unit, detail_url):
resource_in_unit.min_price_per_hour = '5.05'
resource_in_unit.max_price_per_hour = None
resource_in_unit.save()
response = api_client.get(detail_url)
assert response.status_code == 200
assert response.data['min_price_per_hour'] == '5.05'
assert response.data['max_price_per_hour'] is None
@freeze_time('2016-10-25')
@pytest.mark.django_db
def test_reservable_in_advance_fields(api_client, resource_in_unit, test_unit, detail_url):
response = api_client.get(detail_url)
assert response.status_code == 200
# the unit and the resource both have days None, so expect None in the fields
assert response.data['reservable_days_in_advance'] is None
assert response.data['reservable_before'] is None
test_unit.reservable_days_in_advance = 5
test_unit.save()
response = api_client.get(detail_url)
assert response.status_code == 200
# only the unit has days set, expect those on the resource
assert response.data['reservable_days_in_advance'] == 5
before = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(days=6)
assert response.data['reservable_before'] == before
resource_in_unit.reservable_days_in_advance = 10
resource_in_unit.save()
response = api_client.get(detail_url)
assert response.status_code == 200
# both the unit and the resource have days set, expect the resource's days to override the unit's days
assert response.data['reservable_days_in_advance'] == 10
before = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0) + datetime.timedelta(days=11)
assert response.data['reservable_before'] == before
@pytest.mark.django_db
def test_resource_group_filter(api_client, resource_in_unit, resource_in_unit2, resource_group, resource_group2,
list_url):
extra_unit = deepcopy(resource_in_unit)
extra_unit.id = None
extra_unit.save()
# no group
response = api_client.get(list_url)
assert response.status_code == 200
assert len(response.data['results']) == 3
# one group
response = api_client.get('%s?resource_group=%s' % (list_url, resource_group.identifier))
assert response.status_code == 200
assert set(r['id'] for r in response.data['results']) == {resource_in_unit.id}
# multiple groups
response = api_client.get(
'%s?resource_group=%s,%s' % (list_url, resource_group.identifier, resource_group2.identifier)
)
assert response.status_code == 200
assert set(r['id'] for r in response.data['results']) == {resource_in_unit.id, resource_in_unit2.id}
@pytest.mark.django_db
def test_reservation_extra_fields(api_client, resource_in_unit):
default_set = ReservationMetadataSet.objects.get(name='default')
resource_in_unit.reservation_metadata_set = default_set
resource_in_unit.save(update_fields=('reservation_metadata_set',))
response = api_client.get(get_detail_url(resource_in_unit))
assert response.status_code == 200
supported_fields = set(default_set.supported_fields.values_list('field_name', flat=True))
assert set(response.data['supported_reservation_extra_fields']) == supported_fields
required_fields = set(default_set.required_fields.values_list('field_name', flat=True))
assert set(response.data['required_reservation_extra_fields']) == required_fields
@pytest.mark.django_db
def test_resource_type_filter(api_client, resource_in_unit, resource_in_unit2, resource_in_unit3, list_url):
type_1 = ResourceType.objects.create(name='type_1', main_type='space')
type_2 = ResourceType.objects.create(name='type_2', main_type='space')
extra_type = ResourceType.objects.create(name='extra_type', main_type='space')
resource_in_unit.type = type_1
resource_in_unit.save()
resource_in_unit2.type = type_2
resource_in_unit2.save()
resource_in_unit3.type = extra_type
resource_in_unit3.save()
response = api_client.get(list_url + '?type=%s' % type_1.id)
assert response.status_code == 200
assert {resource['id'] for resource in response.data['results']} == {resource_in_unit.id}
response = api_client.get(list_url + '?type=%s,%s' % (type_1.id, type_2.id))
assert response.status_code == 200
assert {resource['id'] for resource in response.data['results']} == {resource_in_unit.id, resource_in_unit2.id}
@pytest.mark.django_db
def test_resource_equipment_filter(api_client, resource_in_unit, resource_in_unit2, resource_in_unit3,
equipment_category, resource_equipment, list_url):
equipment_1 = Equipment.objects.create(
name='equipment 1',
category=equipment_category,
)
ResourceEquipment.objects.create(
equipment=equipment_1,
resource=resource_in_unit,
description='resource equipment 1',
)
equipment_2 = Equipment.objects.create(
name='equipment 2',
category=equipment_category,
)
ResourceEquipment.objects.create(
equipment=equipment_2,
resource=resource_in_unit2,
description='resource equipment 2',
)
resource_in_unit3.resource_equipment = [resource_equipment]
response = api_client.get(list_url + '?equipment=%s' % equipment_1.id)
assert response.status_code == 200
assert {resource['id'] for resource in response.data['results']} == {resource_in_unit.id}
response = api_client.get(list_url + '?equipment=%s,%s' % (equipment_1.id, equipment_2.id))
assert response.status_code == 200
assert {resource['id'] for resource in response.data['results']} == {resource_in_unit.id, resource_in_unit2.id}
@pytest.mark.parametrize('filtering, expected_resource_indexes', (
({}, [0, 1]),
({'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T10:00:00+02:00'}, [0, 1]),
({'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T10:00:01+02:00'}, [1]),
({'available_between': '2115-04-08T10:59:59+02:00,2115-04-08T12:00:00+02:00'}, [1]),
({'available_between': '2115-04-08T10:59:59+02:00,2115-04-08T12:00:01+02:00'}, []),
({'available_between': '2115-04-08T13:00:00+02:00,2115-04-08T18:00:00+02:00'}, [0, 1]),
))
@pytest.mark.django_db
def test_resource_available_between_filter_reservations(user_api_client, list_url, user, resource_in_unit,
resource_in_unit2, filtering, expected_resource_indexes):
resources = (resource_in_unit, resource_in_unit2)
Reservation.objects.create(
resource=resource_in_unit,
begin='2115-04-08T10:00:00+02:00',
end='2115-04-08T11:00:00+02:00',
user=user,
)
Reservation.objects.create(
resource=resource_in_unit2,
begin='2115-04-08T12:00:00+02:00',
end='2115-04-08T13:00:00+02:00',
user=user,
)
# set resources open practically the whole so that opening hours don't intervene in this test
for resource in resources:
p1 = Period.objects.create(start=datetime.date(2115, 4, 1),
end=datetime.date(2115, 4, 30),
resource=resource)
for weekday in range(0, 7):
Day.objects.create(period=p1, weekday=weekday,
opens=datetime.time(0, 0),
closes=datetime.time(23, 59))
resource.update_opening_hours()
response = user_api_client.get(list_url, filtering)
assert response.status_code == 200
assert_response_objects(response, [resources[index] for index in expected_resource_indexes])
@pytest.mark.parametrize('filtering, expected_resource_indexes', (
({}, [0, 1]),
({'available_between': '2115-04-08T06:00:00+02:00,2115-04-08T07:00:00+02:00'}, []),
({'available_between': '2115-04-08T07:59:59+02:00,2115-04-08T16:00:00+02:00'}, []),
({'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T16:00:00+02:00'}, [0]),
({'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T16:00:01+02:00'}, []),
({'available_between': '2115-04-08T12:00:00+02:00,2115-04-08T14:00:00+02:00'}, [0, 1]),
({'available_between': '2115-04-14T12:00:00+02:00,2115-04-14T14:00:00+02:00'}, [0]),
))
@pytest.mark.django_db
def test_resource_available_between_filter_opening_hours(user_api_client, list_url, resource_in_unit, resource_in_unit2,
filtering, expected_resource_indexes):
resources = (resource_in_unit, resource_in_unit2)
p1 = Period.objects.create(start=datetime.date(2115, 4, 1),
end=datetime.date(2115, 4, 30),
resource=resource_in_unit)
for weekday in range(0, 7):
Day.objects.create(period=p1, weekday=weekday,
opens=datetime.time(8, 0),
closes=datetime.time(16, 0))
p1 = Period.objects.create(start=datetime.date(2115, 4, 1),
end=datetime.date(2115, 4, 30),
resource=resource_in_unit2)
for weekday in range(0, 6):
Day.objects.create(period=p1, weekday=weekday,
opens=datetime.time(12, 0),
closes=datetime.time(14, 0))
resource_in_unit.update_opening_hours()
resource_in_unit2.update_opening_hours()
response = user_api_client.get(list_url, filtering)
assert response.status_code == 200
assert_response_objects(response, [resources[index] for index in expected_resource_indexes])
@pytest.mark.django_db
def test_resource_available_between_filter_constraints(user_api_client, list_url, resource_in_unit):
response = user_api_client.get(list_url, {
'available_between': '2115-04-08T00:00:00+02:00'
})
assert response.status_code == 400
assert 'available_between takes exactly two comma-separated values.' in str(response.data)
response = user_api_client.get(list_url, {
'available_between': '2115-04-08T00:00:00+02:00,2115-04-09T00:00:00+02:00'
})
assert response.status_code == 400
assert 'available_between timestamps must be on the same day.' in str(response.data)
@pytest.mark.django_db
def test_resource_available_between_considers_inactive_reservations(user_api_client, user, list_url, resource_in_unit):
p1 = Period.objects.create(start=datetime.date(2115, 4, 1),
end=datetime.date(2115, 4, 30),
resource=resource_in_unit)
for weekday in range(0, 7):
Day.objects.create(period=p1, weekday=weekday,
opens=datetime.time(0, 0),
closes=datetime.time(23, 59))
resource_in_unit.update_opening_hours()
# First no reservations
params = {'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T16:00:00+02:00'}
response = user_api_client.get(list_url, params)
assert response.status_code == 200
assert_response_objects(response, [resource_in_unit])
# One confirmed reservation
rv = Reservation.objects.create(
resource=resource_in_unit,
begin='2115-04-08T10:00:00+02:00',
end='2115-04-08T11:00:00+02:00',
user=user,
)
# Reload the reservation from database to make sure begin and end are
# datetimes (not strings).
rv = Reservation.objects.get(id=rv.id)
params = {'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T16:00:00+02:00'}
response = user_api_client.get(list_url, params)
assert response.status_code == 200
assert_response_objects(response, [])
# Cancelled reservations should be ignored
rv.set_state(Reservation.CANCELLED, user)
response = user_api_client.get(list_url, params)
assert response.status_code == 200
assert_response_objects(response, [resource_in_unit])
# Requested should be taken into account
rv.set_state(Reservation.REQUESTED, user)
response = user_api_client.get(list_url, params)
assert response.status_code == 200
assert_response_objects(response, [])
# Denied ignored
rv.set_state(Reservation.DENIED, user)
response = user_api_client.get(list_url, params)
assert response.status_code == 200
assert_response_objects(response, [resource_in_unit])
| [
"andrey.aleksandrov@octo3.fi"
] | andrey.aleksandrov@octo3.fi |
4a83f922a050991ffa8ce57cd3a8290bb54b295d | 95501489b913ca04f3756edff53ab1dd1eb22595 | /keras_contrib/backend/theano_backend.py | 8195937327b5621395fd14643009ac68e04c1c79 | [
"MIT"
] | permissive | mustgoplay/keras-contrib | 5c6965c923e3a579fbac3fc62b89445623c38bd8 | d1f0eed42441ec1e83bbf6b2d005588bab54c0f3 | refs/heads/master | 2020-05-30T03:18:02.649351 | 2017-02-21T03:16:01 | 2017-02-21T03:16:01 | 82,624,770 | 0 | 0 | null | 2017-02-21T02:06:22 | 2017-02-21T02:06:21 | null | UTF-8 | Python | false | false | 4,543 | py | import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.sandbox.neighbours import images2neibs
from theano.tensor.signal import pool
from theano.tensor.nnet import conv3d2d
from theano.printing import Print
try:
import theano.sparse as th_sparse_module
except ImportError:
th_sparse_module = None
try:
from theano.tensor.nnet.nnet import softsign as T_softsign
except ImportError:
from theano.sandbox.softsign import softsign as T_softsign
from keras import backend as K
from keras.backend import theano_backend as KTH
import inspect
import numpy as np
from keras.backend.common import _FLOATX, floatx, _EPSILON, image_dim_ordering
from keras.backend.theano_backend import _preprocess_conv3d_input
from keras.backend.theano_backend import _preprocess_conv3d_kernel
from keras.backend.theano_backend import _preprocess_conv3d_filter_shape
from keras.backend.theano_backend import _preprocess_border_mode
from keras.backend.theano_backend import _postprocess_conv3d_output
py_all = all
def deconv3d(x, kernel, output_shape, strides=(1, 1, 1),
border_mode='valid',
dim_ordering='default',
image_shape=None, filter_shape=None):
'''3D deconvolution (transposed convolution).
# Arguments
kernel: kernel tensor.
output_shape: desired dimensions of output.
strides: strides tuple.
border_mode: string, "same" or "valid".
dim_ordering: "tf" or "th".
Whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
'''
flip_filters = False
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
output_shape = (output_shape[0], output_shape[4], output_shape[1],
output_shape[2], output_shape[3])
x = _preprocess_conv3d_input(x, dim_ordering)
kernel = _preprocess_conv3d_kernel(kernel, dim_ordering)
kernel = kernel.dimshuffle((1, 0, 2, 3, 4))
th_border_mode = _preprocess_border_mode(border_mode)
if hasattr(kernel, '_keras_shape'):
kernel_shape = kernel._keras_shape
else:
# Will only work if `kernel` is a shared variable.
kernel_shape = kernel.eval().shape
filter_shape = _preprocess_conv3d_filter_shape(dim_ordering, filter_shape)
filter_shape = tuple(filter_shape[i] for i in (1, 0, 2, 3, 4))
conv_out = T.nnet.abstract_conv.conv3d_grad_wrt_inputs(
x, kernel, output_shape,
filter_shape=filter_shape,
border_mode=th_border_mode,
subsample=strides,
filter_flip=not flip_filters)
conv_out = _postprocess_conv3d_output(conv_out, x, border_mode,
kernel_shape, strides, dim_ordering)
return conv_out
def extract_image_patches(X, ksizes, strides, border_mode="valid", dim_ordering="th"):
'''
Extract the patches from an image
Parameters
----------
X : The input image
ksizes : 2-d tuple with the kernel size
strides : 2-d tuple with the strides size
border_mode : 'same' or 'valid'
dim_ordering : 'tf' or 'th'
Returns
-------
The (k_w,k_h) patches extracted
TF ==> (batch_size,w,h,k_w,k_h,c)
TH ==> (batch_size,w,h,c,k_w,k_h)
'''
patch_size = ksizes[1]
if border_mode == "same":
border_mode = "ignore_borders"
if dim_ordering == "tf":
X = KTH.permute_dimensions(X, [0, 3, 1, 2])
# Thanks to https://github.com/awentzonline for the help!
batch, c, w, h = KTH.shape(X)
xs = KTH.shape(X)
num_rows = 1 + (xs[-2] - patch_size) // strides[1]
num_cols = 1 + (xs[-1] - patch_size) // strides[1]
num_channels = xs[-3]
patches = images2neibs(X, ksizes, strides, border_mode)
# Theano is sorting by channel
patches = KTH.reshape(patches, (batch, num_channels, num_rows * num_cols, patch_size, patch_size))
patches = KTH.permute_dimensions(patches, (0, 2, 1, 3, 4))
# arrange in a 2d-grid (rows, cols, channels, px, py)
patches = KTH.reshape(patches, (batch, num_rows, num_cols, num_channels, patch_size, patch_size))
if dim_ordering == "tf":
patches = KTH.permute_dimensions(patches, [0, 1, 2, 4, 5, 3])
return patches
| [
"noreply@github.com"
] | noreply@github.com |
1c9832b0b85c1b52d6843f79ec2dcb1fa84e81b1 | 68ab00c77312827e522151e6e9f2fff166e85b9c | /mypy_boto3_builder/structures/collection.py | 41b5f2550bb9a33bdb6cd53825ecc814e7734f48 | [
"MIT"
] | permissive | pyto86pri/mypy_boto3_builder | 2cdfb3ed55ea1ff23cdffd5a9ee5400e71562450 | e8132dc4632430e0abd4cd330af51a8b1c82028f | refs/heads/master | 2023-01-25T04:06:11.174287 | 2020-12-03T23:39:06 | 2020-12-03T23:39:06 | 319,283,736 | 0 | 0 | MIT | 2020-12-07T10:29:52 | 2020-12-07T10:29:51 | null | UTF-8 | Python | false | false | 1,280 | py | """
Boto3 ServiceResource or Resource collection.
"""
from typing import Set
from mypy_boto3_builder.import_helpers.import_string import ImportString
from mypy_boto3_builder.structures.class_record import ClassRecord
from mypy_boto3_builder.type_annotations.external_import import ExternalImport
from mypy_boto3_builder.type_annotations.fake_annotation import FakeAnnotation
class Collection(ClassRecord):
"""
Boto3 ServiceResource or Resource collection.
"""
def __init__(
self,
name: str,
attribute_name: str,
parent_name: str,
type_annotation: FakeAnnotation,
docstring: str = "",
):
super().__init__(
name=name,
use_alias=True,
docstring=docstring,
bases=[
ExternalImport(
source=ImportString("boto3", "resources", "collection"),
name="ResourceCollection",
)
],
)
self.attribute_name = attribute_name
self.parent_name = parent_name
self.type_annotation = type_annotation
def get_types(self) -> Set[FakeAnnotation]:
types = super().get_types()
types.update(self.type_annotation.get_types())
return types
| [
"volshebnyi@gmail.com"
] | volshebnyi@gmail.com |
5390d4b1ceb14885cc274564814a2e976a44a68f | b7716089291b9f915c326ad43b6df50e286860b9 | /nasa/__init__.py | 2a2ba71fe5345a2a0c07dd7b0015a295b5d71a4b | [
"MIT"
] | permissive | saturnfive050/aio-nasa | bf20110e3a26f9239dba097ded288e82339c6746 | 62fc6dcafb0417e2ec79709fd46d8500652d3ae5 | refs/heads/master | 2022-11-16T03:27:38.445440 | 2020-07-13T19:05:21 | 2020-07-13T19:05:21 | 279,712,917 | 0 | 0 | MIT | 2020-07-14T23:17:06 | 2020-07-14T23:17:06 | null | UTF-8 | Python | false | false | 150 | py |
from .nasa_client import NASA
# from .base_client import BaseClient
from .apod.apod_client import APOD
from .insight.insight_client import InSight
| [
"48489521+nwunderly@users.noreply.github.com"
] | 48489521+nwunderly@users.noreply.github.com |
c9272ab9ce25ad997cef0881159ae4bc9c13e0ef | e0d9844e123fa0706388814b9f29758258589487 | /torch/distributions/distribution.py | 33c48de39d91713f0e5c6f65b31cd98687ac56d8 | [] | no_license | pigpigman8686/seg | b5cf5261a5744e89ed5e5b145f60b0ccc3ba2c0c | 61c3816f7ba76243a872fe5c5fc0dede17026987 | refs/heads/master | 2023-04-10T22:22:35.035542 | 2021-04-22T06:24:36 | 2021-04-22T06:24:36 | 360,398,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,229 | py | import torch
import warnings
from torch.distributions import constraints
from torch.distributions.utils import lazy_property
from typing import Dict, Optional, Any
class Distribution(object):
r"""
Distribution is the abstract base class for probability distributions.
"""
has_rsample = False
has_enumerate_support = False
_validate_args = __debug__
@staticmethod
def set_default_validate_args(value):
"""
Sets whether validation is enabled or disabled.
The default behavior mimics Python's ``assert`` statement: validation
is on by default, but is disabled if Python is run in optimized mode
(via ``python -O``). Validation may be expensive, so you may want to
disable it once a model is working.
Args:
value (bool): Whether to enable validation.
"""
if value not in [True, False]:
raise ValueError
Distribution._validate_args = value
def __init__(self, batch_shape=torch.Size(), event_shape=torch.Size(), validate_args=None):
self._batch_shape = batch_shape
self._event_shape = event_shape
if validate_args is not None:
self._validate_args = validate_args
if self._validate_args:
try:
arg_constraints = self.arg_constraints
except NotImplementedError:
arg_constraints = {}
warnings.warn(f'{self.__class__} does not define `arg_constraints`. ' +
'Please set `arg_constraints = {}` or initialize the distribution ' +
'with `validate_args=False` to turn off validation.')
for param, constraint in arg_constraints.items():
if constraints.is_dependent(constraint):
continue # skip constraints that cannot be checked
if param not in self.__dict__ and isinstance(getattr(type(self), param), lazy_property):
continue # skip checking lazily-constructed args
if not constraint.check(getattr(self, param)).all():
raise ValueError("The parameter {} has invalid values".format(param))
super(Distribution, self).__init__()
def expand(self, batch_shape, _instance=None):
"""
Returns a new distribution instance (or populates an existing instance
provided by a derived class) with batch dimensions expanded to
`batch_shape`. This method calls :class:`~torch.Tensor.expand` on
the distribution's parameters. As such, this does not allocate new
memory for the expanded distribution instance. Additionally,
this does not repeat any args checking or parameter broadcasting in
`__init__.py`, when an instance is first created.
Args:
batch_shape (torch.Size): the desired expanded size.
_instance: new instance provided by subclasses that
need to override `.expand`.
Returns:
New distribution instance with batch dimensions expanded to
`batch_size`.
"""
raise NotImplementedError
@property
def batch_shape(self):
"""
Returns the shape over which parameters are batched.
"""
return self._batch_shape
@property
def event_shape(self):
"""
Returns the shape of a single sample (without batching).
"""
return self._event_shape
@property
def arg_constraints(self) -> Dict[str, constraints.Constraint]:
"""
Returns a dictionary from argument names to
:class:`~torch.distributions.constraints.Constraint` objects that
should be satisfied by each argument of this distribution. Args that
are not tensors need not appear in this dict.
"""
raise NotImplementedError
@property
def support(self) -> Optional[Any]:
"""
Returns a :class:`~torch.distributions.constraints.Constraint` object
representing this distribution's support.
"""
raise NotImplementedError
@property
def mean(self):
"""
Returns the mean of the distribution.
"""
raise NotImplementedError
@property
def variance(self):
"""
Returns the variance of the distribution.
"""
raise NotImplementedError
@property
def stddev(self):
"""
Returns the standard deviation of the distribution.
"""
return self.variance.sqrt()
def sample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped sample or sample_shape shaped batch of
samples if the distribution parameters are batched.
"""
with torch.no_grad():
return self.rsample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
"""
Generates a sample_shape shaped reparameterized sample or sample_shape
shaped batch of reparameterized samples if the distribution parameters
are batched.
"""
raise NotImplementedError
def sample_n(self, n):
"""
Generates n samples or n batches of samples if the distribution
parameters are batched.
"""
warnings.warn('sample_n will be deprecated. Use .sample((n,)) instead', UserWarning)
return self.sample(torch.Size((n,)))
def log_prob(self, value):
"""
Returns the log of the probability density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def cdf(self, value):
"""
Returns the cumulative density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def icdf(self, value):
"""
Returns the inverse cumulative density/mass function evaluated at
`value`.
Args:
value (Tensor):
"""
raise NotImplementedError
def enumerate_support(self, expand=True):
"""
Returns tensor containing all values supported by a discrete
distribution. The result will enumerate over dimension 0, so the shape
of the result will be `(cardinality,) + batch_shape + event_shape`
(where `event_shape = ()` for univariate distributions).
Note that this enumerates over all batched tensors in lock-step
`[[0, 0], [1, 1], ...]`. With `expand=False`, enumeration happens
along dim 0, but with the remaining batch dimensions being
singleton dimensions, `[[0], [1], ..`.
To iterate over the full Cartesian product use
`itertools.product(m.enumerate_support())`.
Args:
expand (bool): whether to expand the support over the
batch dims to match the distribution's `batch_shape`.
Returns:
Tensor iterating over dimension 0.
"""
raise NotImplementedError
def entropy(self):
"""
Returns entropy of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
raise NotImplementedError
def perplexity(self):
"""
Returns perplexity of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
return torch.exp(self.entropy())
def _extended_shape(self, sample_shape=torch.Size()):
"""
Returns the size of the sample returned by the distribution, given
a `sample_shape`. Note, that the batch and event shapes of a distribution
instance are fixed at the time of construction. If this is empty, the
returned shape is upcast to (1,).
Args:
sample_shape (torch.Size): the size of the sample to be drawn.
"""
if not isinstance(sample_shape, torch.Size):
sample_shape = torch.Size(sample_shape)
return sample_shape + self._batch_shape + self._event_shape
def _validate_sample(self, value):
"""
Argument validation for distribution methods such as `log_prob`,
`cdf` and `icdf`. The rightmost dimensions of a value to be
scored via these methods must agree with the distribution's batch
and event shapes.
Args:
value (Tensor): the tensor whose log probability is to be
computed by the `log_prob` method.
Raises
ValueError: when the rightmost dimensions of `value` do not match the
distribution's batch and event shapes.
"""
if not isinstance(value, torch.Tensor):
raise ValueError('The value argument to log_prob must be a Tensor')
event_dim_start = len(value.size()) - len(self._event_shape)
if value.size()[event_dim_start:] != self._event_shape:
raise ValueError('The right-most size of value must match event_shape: {} vs {}.'.
format(value.size(), self._event_shape))
actual_shape = value.size()
expected_shape = self._batch_shape + self._event_shape
for i, j in zip(reversed(actual_shape), reversed(expected_shape)):
if i != 1 and j != 1 and i != j:
raise ValueError('Value is not broadcastable with batch_shape+event_shape: {} vs {}.'.
format(actual_shape, expected_shape))
try:
support = self.support
except NotImplementedError:
warnings.warn(f'{self.__class__} does not define `support` to enable ' +
'sample validation. Please initialize the distribution with ' +
'`validate_args=False` to turn off validation.')
return
assert support is not None
if not support.check(value).all():
raise ValueError('The value argument must be within the support')
def _get_checked_instance(self, cls, _instance=None):
if _instance is None and type(self).__init__ != cls.__init__:
raise NotImplementedError("Subclass {} of {} that defines a custom __init__ method "
"must also define a custom .expand() method.".
format(self.__class__.__name__, cls.__name__))
return self.__new__(type(self)) if _instance is None else _instance
def __repr__(self):
param_names = [k for k, _ in self.arg_constraints.items() if k in self.__dict__]
args_string = ', '.join(['{}: {}'.format(p, self.__dict__[p]
if self.__dict__[p].numel() == 1
else self.__dict__[p].size()) for p in param_names])
return self.__class__.__name__ + '(' + args_string + ')'
| [
"952361195@qq.com"
] | 952361195@qq.com |
033451d08b6344e87d7017977780ce8cefee7dea | 405c89cf9ca91b3bf72c030410afbd7e4ba797e8 | /odl_boke/my_app/migrations/0016_subproject_settime.py | 33eb4fb66496f7ccfd06f24b5d2f0255a6ccf4d1 | [] | no_license | andwang130/django_web | 11941c1cc43314399ab9d487a02836e718c70369 | 85f0adb0f142fdb4dbdd49cb01e0763e74624fec | refs/heads/master | 2021-01-25T01:07:55.892949 | 2018-06-14T01:57:32 | 2018-06-14T01:57:32 | 123,306,749 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | # Generated by Django 2.0.1 on 2018-01-20 15:53
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('my_app', '0015_auto_20180120_2300'),
]
operations = [
migrations.AddField(
model_name='subproject',
name='settime',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| [
"627656470@qq.com"
] | 627656470@qq.com |
d81279866b63a11b890694dcb86e0d92f04ccedd | 26fcacb804565222c566e9e661ea3850c4c36f69 | /tmp/test_streaming_f1.py | 87a3fc85d00c2a848a3adff81733b64fa39a7ab3 | [] | no_license | entrepreneur-interet-general/tf-han | 84052f96f5abf3f1b7ce09c22dc2d9e29921b412 | f745942dd56209cf4b81b95ccc830fff8e7487ba | refs/heads/master | 2022-12-12T01:50:03.544882 | 2018-11-30T09:08:49 | 2018-11-30T09:08:49 | 128,797,777 | 2 | 1 | null | 2022-12-08T01:15:14 | 2018-04-09T16:00:13 | Python | UTF-8 | Python | false | false | 9,291 | py | import tensorflow as tf
import numpy as np
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
from sklearn.metrics import f1_score
def metric_variable(shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES`) collections.
from https://github.com/tensorflow/tensorflow/blob/r1.8/tensorflow/python/ops/metrics_impl.py
"""
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES],
validate_shape=validate_shape,
name=name,
)
def streaming_counts(y_true, y_pred, num_classes):
"""Computes the TP, FP and FN counts for the micro and macro f1 scores.
The weighted f1 score can be inferred from the macro f1 score provided
we compute the weights also.
This function also defines the update ops to these counts
Args:
y_true (Tensor): 2D Tensor representing the target labels
y_pred (Tensor): 2D Tensor representing the predicted labels
num_classes (int): number of possible classes
Returns:
tuple: the first element in the tuple is itself a tuple grouping the counts,
the second element is the grouped update op.
"""
# Weights for the weighted f1 score
weights = metric_variable(
shape=[num_classes], dtype=tf.int64, validate_shape=False, name="weights"
)
# Counts for the macro f1 score
tp_mac = metric_variable(
shape=[num_classes], dtype=tf.int64, validate_shape=False, name="tp_mac"
)
fp_mac = metric_variable(
shape=[num_classes], dtype=tf.int64, validate_shape=False, name="fp_mac"
)
fn_mac = metric_variable(
shape=[num_classes], dtype=tf.int64, validate_shape=False, name="fn_mac"
)
# Counts for the micro f1 score
tp_mic = metric_variable(
shape=[], dtype=tf.int64, validate_shape=False, name="tp_mic"
)
fp_mic = metric_variable(
shape=[], dtype=tf.int64, validate_shape=False, name="fp_mic"
)
fn_mic = metric_variable(
shape=[], dtype=tf.int64, validate_shape=False, name="fn_mic"
)
# Update ops, as in the previous section:
# - Update ops for the macro f1 score
up_tp_mac = tf.assign_add(tp_mac, tf.count_nonzero(y_pred * y_true, axis=0))
up_fp_mac = tf.assign_add(fp_mac, tf.count_nonzero(y_pred * (y_true - 1), axis=0))
up_fn_mac = tf.assign_add(fn_mac, tf.count_nonzero((y_pred - 1) * y_true, axis=0))
# - Update ops for the micro f1 score
up_tp_mic = tf.assign_add(tp_mic, tf.count_nonzero(y_pred * y_true, axis=None))
up_fp_mic = tf.assign_add(
fp_mic, tf.count_nonzero(y_pred * (y_true - 1), axis=None)
)
up_fn_mic = tf.assign_add(
fn_mic, tf.count_nonzero((y_pred - 1) * y_true, axis=None)
)
# Update op for the weights, just summing
up_weights = tf.assign_add(weights, tf.reduce_sum(y_true, axis=0))
# Grouping values
counts = (tp_mac, fp_mac, fn_mac, tp_mic, fp_mic, fn_mic, weights)
updates = tf.group(
up_tp_mic, up_fp_mic, up_fn_mic, up_tp_mac, up_fp_mac, up_fn_mac, up_weights
)
return counts, updates
def streaming_f1(y_true, y_pred, num_classes):
"""Compute and update the F1 scores given target labels
and predicted labels
Args:
y_true (Tensor): 2D one-hot Tensor of the target labels.
Possibly several ones for multiple labels
y_pred (Tensor): 2D one-hot Tensor of the predicted labels.
Possibly several ones for multiple labels
num_classes (int): Number of possible classes labels can take
Returns:
tuple: f1s as tuple of three tensors: micro macro and weighted F1,
second element is the group of updates to counts making the F1s
"""
counts, updates = streaming_counts(y_true, y_pred, num_classes)
f1s = streaming_f1_from_counts(counts)
return f1s, updates
def streaming_f1_from_counts(counts):
"""Computes the f1 scores from the TP, FP and FN counts
Args:
counts (tuple): macro and micro counts, and weights in the end
Returns:
tuple(Tensor): The 3 tensors representing the micro, macro and weighted
f1 score
"""
# unpacking values
tp_mac, fp_mac, fn_mac, tp_mic, fp_mic, fn_mic, weights = counts
# normalize weights
weights /= tf.reduce_sum(weights)
# computing the micro f1 score
prec_mic = tp_mic / (tp_mic + fp_mic)
rec_mic = tp_mic / (tp_mic + fn_mic)
f1_mic = 2 * prec_mic * rec_mic / (prec_mic + rec_mic)
f1_mic = tf.reduce_mean(f1_mic)
# computing the macro and wieghted f1 score
prec_mac = tp_mac / (tp_mac + fp_mac)
rec_mac = tp_mac / (tp_mac + fn_mac)
f1_mac = 2 * prec_mac * rec_mac / (prec_mac + rec_mac)
f1_wei = tf.reduce_sum(f1_mac * weights)
f1_mac = tf.reduce_mean(f1_mac)
return f1_mic, f1_mac, f1_wei
def tf_f1_score(y_true, y_pred):
"""Computes 3 different f1 scores, micro macro
weighted.
micro: f1 score accross the classes, as 1
macro: mean of f1 scores per class
weighted: weighted average of f1 scores per class,
weighted from the support of each class
Args:
y_true (Tensor): labels, with shape (batch, num_classes)
y_pred (Tensor): model's predictions, same shape as y_true
Returns:
tupe(Tensor): (micro, macro, weighted)
tuple of the computed f1 scores
"""
f1s = [0, 0, 0]
y_true = tf.cast(y_true, tf.float64)
y_pred = tf.cast(y_pred, tf.float64)
for i, axis in enumerate([None, 0]):
TP = tf.count_nonzero(y_pred * y_true, axis=axis)
FP = tf.count_nonzero(y_pred * (y_true - 1), axis=axis)
FN = tf.count_nonzero((y_pred - 1) * y_true, axis=axis)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f1 = 2 * precision * recall / (precision + recall)
f1s[i] = tf.reduce_mean(f1)
weights = tf.reduce_sum(y_true, axis=0)
weights /= tf.reduce_sum(weights)
f1s[2] = tf.reduce_sum(f1 * weights)
micro, macro, weighted = f1s
return micro, macro, weighted
def alter_data(_data):
"""Adds noise to the data to simulate predictions.
Each label for each sample has 20% chance of being flipped
Args:
_data (np.array): true values to perturb
Returns:
np.array: predictions
"""
data = _data.copy()
new_data = []
for d in data:
for i, l in enumerate(d):
if np.random.rand() < 0.2:
d[i] = (d[i] + 1) % 2
new_data.append(d)
return np.array(new_data)
def get_data(multilabel=True):
"""Generate random multilabel data:
y_true and y_pred are one-hot arrays, but since it's a multi-label setting,
there may be several `1` per line.
Returns:
tuple: y_true, y_pred
"""
# Number of different classes
num_classes = 10
classes = list(range(num_classes))
# Numberof samples in synthetic dataset
examples = 10000
# Max number of labels per sample. Minimum is 1
max_labels = 5 if multilabel else 1
class_probabilities = np.array(
list(6 * np.exp(-i * 5 / num_classes) + 1 for i in range(num_classes))
)
class_probabilities /= class_probabilities.sum()
labels = [
np.random.choice(
classes,
# number of labels for this sample
size=np.random.randint(1, max_labels + 1),
p=class_probabilities, # Probability of drawing each class
replace=False, # A class can only occure once
)
for _ in range(examples) # Do it `examples` times
]
y_true = np.zeros((examples, num_classes)).astype(np.int64)
for i, l in enumerate(labels):
y_true[i][l] = 1
y_pred = alter_data(y_true)
return y_true, y_pred
if __name__ == "__main__":
np.random.seed(0)
y_true, y_pred = get_data(False)
num_classes = y_true.shape[-1]
bs = 100
t = tf.placeholder(tf.int64, [None, None], "y_true")
p = tf.placeholder(tf.int64, [None, None], "y_pred")
tf_f1 = tf_f1_score(t, p)
counts, update = streaming_counts(t, p, num_classes)
streamed_f1 = streaming_f1_from_counts(counts)
with tf.Session() as sess:
tf.local_variables_initializer().run()
mic, mac, wei = sess.run(tf_f1, feed_dict={t: y_true, p: y_pred})
print("{:40}".format("\nTotal, overall f1 scores: "), mic, mac, wei)
for i in range(len(y_true) // bs):
y_t = y_true[i * bs : (i + 1) * bs].astype(np.int64)
y_p = y_pred[i * bs : (i + 1) * bs].astype(np.int64)
_ = sess.run(update, feed_dict={t: y_t, p: y_p})
mic, mac, wei = [f.eval() for f in streamed_f1]
print("{:40}".format("\nStreamed, batch-wise f1 scores:"), mic, mac, wei)
mic = f1_score(y_true, y_pred, average="micro")
mac = f1_score(y_true, y_pred, average="macro")
wei = f1_score(y_true, y_pred, average="weighted")
print("{:40}".format("\nFor reference, scikit f1 scores:"), mic, mac, wei)
| [
"vsch@protonmail.com"
] | vsch@protonmail.com |
9b5798055f88b7a1a338ac59202cf9820a91f147 | 48bf18fd4c495aaec8c3c03cb5f56a0cd511dd70 | /todolist/urls.py | c7ad30b772ca89b1f4b40db77d77645b3a0cdc83 | [] | no_license | williampepple1/Todolistwithdjango | c1f587eba8e038f9008a5a444c24296ce4fc90e8 | 0533a775753be74c9b5e30a5c5d6627c68f92293 | refs/heads/main | 2023-04-22T15:37:53.033204 | 2021-05-08T15:35:16 | 2021-05-08T15:35:16 | 364,580,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | """todolist URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("todo.urls"))
]
| [
"36817380+williampepple1@users.noreply.github.com"
] | 36817380+williampepple1@users.noreply.github.com |
bbb2322ace2b612dcc069fc950c3e1896bb9a0ac | d6659785d05e35daa190470e55fd662bf0235497 | /models/sfrtrans.py | baeb6989667c5c76dd7701a6f1ec54ee6e45ee19 | [] | no_license | fahihdrh/wuliang | 2fc3aae1c729112a7ee85cf2b573ccb197ff29d1 | 949eadacb13856e1d39986404e9d717fe39282da | refs/heads/master | 2023-06-28T04:22:02.836370 | 2023-06-15T01:47:21 | 2023-06-15T01:47:21 | 76,614,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,763 | py | import torch
from models.utils import SATransformer, CATransformer
import torch.nn as nn
from mmcv.cnn.bricks.conv_module import ConvModule
from einops import rearrange
class SFRTrans(nn.Module):
""" backbone_dim: The number of channels of feature maps output from the backbone (UNet Encoder).
seq_dim: The dimension after the sequentialization.
feature_map_size: The spatial size (int) of the feature map input to SFRTrans.
patch_size: The patch size for the sequentialization.
ffn_dim, head_dim, head_num, dropout, depth: See in utils.py """
def __init__(self, backbone_dim, seq_dim, feature_map_size, patch_size,
ffn_dim, head_dim, head_num=4, dropout=0., depth=3):
super(SFRTrans, self).__init__()
self.feature_map_size = feature_map_size
self.patch_size = patch_size
self.seq_conv_E = ConvModule(backbone_dim, seq_dim, kernel_size=patch_size, stride=patch_size,
norm_cfg=dict(type='BN'))
self.seq_conv_D = ConvModule(backbone_dim, seq_dim, kernel_size=patch_size, stride=patch_size,
norm_cfg=dict(type='BN'))
self.shared_pe = nn.Parameter(torch.zeros(1, (feature_map_size // patch_size) ** 2, seq_dim))
self.catransformer = CATransformer(seq_dim, ffn_dim=ffn_dim, head_dim=head_dim,
head_num=head_num, dropout=dropout)
self.satransformer = SATransformer(head_dim * head_num, ffn_dim=ffn_dim, head_dim=head_dim,
head_num=head_num, dropout=dropout, depth=depth)
self.conv1_1 = ConvModule(head_dim * head_num, backbone_dim, kernel_size=1, norm_cfg=dict(type='BN'))
if patch_size > 1:
self.output_upsample = nn.Upsample(scale_factor=patch_size)
def forward(self, E, D):
seq_E = rearrange(self.seq_conv_E(E), 'b c h w -> b (h w) c') + self.shared_pe
seq_D = rearrange(self.seq_conv_D(D), 'b c h w -> b (h w) c') + self.shared_pe
Z_nplus1 = self.satransformer(self.catransformer(seq_E, seq_D))
output = self.conv1_1(rearrange(Z_nplus1, 'b (h w) c -> b c h w',
h=self.feature_map_size // self.patch_size))
if self.patch_size > 1:
output = self.output_upsample(output)
return output
if __name__ == '__main__':
sfrtrans = SFRTrans(backbone_dim=512, seq_dim=256, feature_map_size=28, patch_size=1,
ffn_dim=256, head_dim=64).cuda()
_E = torch.rand(4, 512, 28, 28).cuda()
_D = torch.rand(4, 512, 28, 28).cuda()
sfrtrans_output = sfrtrans(_E, _D)
print(sfrtrans_output.shape)
print('debugger')
| [
"237583937@qq.com"
] | 237583937@qq.com |
6fb95dced758154aad6d1a944de9536f17a0b428 | 1a2cc414889af45a08af88e30253761fc9a804b1 | /scripts/fastest-infra-wheel-mirror.py | 4c75acfa79263972d9b05dc3332a181089101e2b | [
"Apache-2.0"
] | permissive | jimmy42/openstack-ansible | eb098ff8ca360ce712c692084f7cbe34cd18e9b2 | 77cd6b1d3fc67bff4936b793e7949bf757a6f31c | refs/heads/master | 2023-01-08T18:20:11.333027 | 2016-06-02T15:25:50 | 2016-06-02T15:25:50 | 60,303,603 | 0 | 0 | Apache-2.0 | 2022-12-24T00:58:30 | 2016-06-02T23:17:57 | Python | UTF-8 | Python | false | false | 5,668 | py | #!/usr/bin/env python
#
# Copyright 2016, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2016, Jesse Pretorius <jesse.pretorius@rackspace.co.uk>
#
# Based on the mirror test script posted at
# http://code.activestate.com/recipes/284631-a-python-script-to-test-download-mirrors/
import platform
import Queue
import re
import threading
import time
import urllib
HTTP_TIMEOUT = 10.0 # Max. seconds to wait for a response
HTTP_TITLE = "Wheel Index" # HTTP Title to look for to validate the page
MAX_THREADS = 10
MIRROR_LIST = ["http://mirror.dfw.rax.openstack.org/wheel/",
"http://mirror.ord.rax.openstack.org/wheel/",
"http://mirror.iad.rax.openstack.org/wheel/",
"http://mirror.gra1.ovh.openstack.org/wheel/",
"http://mirror.bhs1.ovh.openstack.org/wheel/",
"http://mirror.sjc1.bluebox.openstack.org/wheel/",
"http://mirror.nyj01.internap.openstack.org/wheel/",
"http://mirror.cloud1.osic.openstack.org/wheel/"]
def TestUrl(workQueue, resultQueue):
'''Worker thread procedure.
Test how long it takes to return the mirror index page,
then return the results into resultQueue.
'''
def SubthreadProc(url, result):
'''Subthread procedure.
Actually get the mirror index page in a subthread, so that we can time
out using join rather than wait for a very slow server. Passing in a
list for result lets us simulate pass-by-reference, since callers
cannot get the return code from a Python thread.
'''
startTime = time.time()
try:
data = urllib.urlopen(url).read()
except Exception:
# Could be a socket error or an HTTP error--either way, we
# don't care--it's a failure to us.
result.append(-1)
else:
if not CheckTitle(data):
result.append(-1)
else:
elapsed = int((time.time() - startTime) * 1000)
result.append(elapsed)
def CheckTitle(html):
'''Check that the HTML title is the expected value.
Check the HTML returned for the presence of a specified
title. This caters for a situation where a service provider
may be redirecting DNS resolution failures to a web search
page, or where the returned data is invalid in some other
way.
'''
titleRegex = re.compile("<title>(.+?)</title>")
try:
title = titleRegex.search(html).group(1)
except Exception:
# If there is no match, then we consider it a failure.
result.append(-1)
else:
if title == HTTP_TITLE:
return True
else:
return False
while 1:
# Continue pulling data from the work queue until it's empty
try:
url = workQueue.get(0)
except Queue.Empty:
# work queue is empty--exit the thread proc.
return
# Create a single subthread to do the actual work
result = []
subThread = threading.Thread(target=SubthreadProc, args=(url, result))
# Daemonize the subthread so that even if a few are hanging
# around when the process is done, the process will exit.
subThread.setDaemon(True)
# Run the subthread and wait for it to finish, or time out
subThread.start()
subThread.join(HTTP_TIMEOUT)
if [] == result:
# Subthread hasn't give a result yet. Consider it timed out.
resultQueue.put((url, "TIMEOUT"))
elif -1 == result[0]:
# Subthread returned an error from geturl.
resultQueue.put((url, "FAILED"))
else:
# Subthread returned a time. Store it.
resultQueue.put((url, result[0]))
# Set the number of threads to use
numThreads = min(MAX_THREADS, len(MIRROR_LIST))
# Build a queue to feed the worker threads
workQueue = Queue.Queue()
for url in MIRROR_LIST:
# Build the complete URL
distro = platform.linux_distribution()[0].lower()
version = platform.linux_distribution()[1]
architecture = platform.machine()
fullUrl = url + distro + "-" + version + "-" + architecture + "/"
workQueue.put(fullUrl)
workers = []
resultQueue = Queue.Queue()
# Create worker threads to load-balance the retrieval
for threadNum in range(0, numThreads):
workers.append(threading.Thread(target=TestUrl,
args=(workQueue, resultQueue)))
workers[-1].start()
# Wait for all the workers to finish
for w in workers:
w.join()
# Separate the successes from failures
timings = []
failures = []
while not resultQueue.empty():
url, result = resultQueue.get(0)
if isinstance(result, str):
failures.append((result, url))
else:
timings.append((result, url))
# Sort by increasing time or result string
timings.sort()
failures.sort()
# If all results are failed, then exit silently
if len(timings) > 0:
# Print out the fastest mirror URL
print(timings[0][1])
| [
"jesse.pretorius@rackspace.co.uk"
] | jesse.pretorius@rackspace.co.uk |
a1c2a87889a40ad080a4ee6a9d04be8cc1ac71a4 | bd6bf1f587cce6ecf9088f6e97034c5a45ea968b | /write_file.py | b81b15175e6473c09d3585c802bcf7aa2c7dd9dc | [] | no_license | daisykha/pythonSimpleProgram | b4d6670c53b7dd36f4310fa149df3cacb3348d48 | 02835eaaeee473dbc6e2f47c081a05c5e759dd2c | refs/heads/master | 2022-03-21T17:24:11.531308 | 2022-02-08T19:40:14 | 2022-02-08T19:40:14 | 213,254,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | #Q1
#a_word="abc"
#file_name=open("text.txt","w")
#file_name.write(a_word)
#file_name.close()
#Q2
#def saveListToFile(sentences, filename):
# filename = open(filename,"w")
# for i in sentences:
# filename.write(i)
# filename.write("\n")
# filename.close()
#file_name=input("Enter file name: ")
#sentences = ["a","b","cd"]
#saveListToFile(sentences,file_name)
#Q3
#def saveToLog(sentences, filename):
# filename = open(filename,"a")
# filename.write(sentences)
# filename.close()
#string = "abcde"
#file_name=input("Enter file name: ")
#saveToLog(string,file_name)
| [
"noreply@github.com"
] | noreply@github.com |
082dec6b6de5e27366fa56a22a7a33984dbdfd45 | b851b6db51b0948d3a2a1c27d4616ecc451bcbe0 | /PetriDish/ConcurrentTransition.py | 8299dc50bd8347b66aece27f93d567607bec1d4a | [] | no_license | jomondogu/petri-dish | 321bb90d0084b357b6f8e52667d243bea45d7b0c | 5098e1baf4d35dd61e9eaad34777b7e292f614cd | refs/heads/master | 2020-04-11T10:17:49.442439 | 2018-12-14T00:22:08 | 2018-12-14T00:22:08 | 161,710,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | import sys
import time
from threading import Thread
class Transition(Thread):
def __init__(self, id = 0, inputs = None, outputs = None, position = [0,0]):
super(Transition, self).__init__()
self.id = id
if inputs is None:
self.M = 0
self.inputs = []
else:
self.M = len(inputs)
self.inputs = inputs
if outputs is None:
self.N = 0
self.outputs = []
else:
self.N = len(outputs)
self.outputs = outputs
self.position = position
self.locks = []
self.fires = 0
def __str__(self):
return "Transition " + str(self.id)
def add_input(self, input):
self.inputs.append(input)
self.M = len(self.inputs)
def add_output(self, output):
self.outputs.append(output)
self.N = len(self.outputs)
def set_position(self, pos):
self.position = pos
def release_locks(self):
for lock in self.locks:
lock.release()
self.locks.remove(lock)
def eligible(self):
print(str(self) + " is checking eligibility...",flush=True)
elig = True
for state in self.inputs:
if not state.ready():
elig = False
else:
pass
print(str(self) + ": " + str(state) + " is ready!",flush=True)
return elig
def fire(self):
print(str(self) + " fires! ",end="",flush=True)
for state in self.inputs:
state.output()
print("1 token consumed from " + str(state) + ". ",end="",flush=True)
for state in self.outputs:
state.input()
print("1 token produced to " + str(state) + ". ",end="",flush=True)
self.fires += 1
def run(self):
counter = 0
while counter < 1000:
time.sleep(0.001)
locked = True
for state in self.inputs + self.outputs:
if not state.lock.acquire(False):
print(str(self) + ": " + str(state.id) + " is locked. Releasing all locks.",flush=True)
locked = False
self.release_locks()
break
else:
print(str(self) + " locks " + str(state) + ".",flush=True)
self.locks.append(state.lock)
if locked and self.eligible():
self.fire()
print(str(self) + " releases all locks.",flush=True)
self.release_locks()
counter += 1
print("***" + str(self) + " fired " + str(self.fires) + " times!",flush=True)
| [
"noreply@github.com"
] | noreply@github.com |
d8bef9d2257e646945921eef2184ee0089672dc5 | dabc9c7ec7cce125a12c6243ff67fd91e620d636 | /tap/line.py | 2784be13d982350342f9cef81eb316a081153234 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | Mark-E-Hamilton/tappy | 7634209c2862c9e837b58602d4b59636fd9a8e89 | 62c1a4ef1d9e724d3c7bbb31361c17c3bf071d04 | refs/heads/master | 2021-01-15T09:04:09.813683 | 2016-03-21T04:51:45 | 2016-03-21T04:51:45 | 53,630,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,921 | py | # Copyright (c) 2016, Matt Layman
class Line(object):
"""Base type for TAP data.
TAP is a line based protocol. Thus, the most primitive type is a line.
"""
@property
def category(self):
raise NotImplementedError
class Result(Line):
"""Information about an individual test line."""
def __init__(
self, ok, number=None, description='', directive=None,
diagnostics=None):
self._ok = ok
if number:
self._number = int(number)
else:
# The number may be an empty string so explicitly set to None.
self._number = None
self._description = description
self.directive = directive
self.diagnostics = diagnostics
@property
def category(self):
""":returns: ``test``"""
return 'test'
@property
def ok(self):
"""Get the ok status.
:rtype: bool
"""
return self._ok
@property
def number(self):
"""Get the test number.
:rtype: int
"""
return self._number
@property
def description(self):
"""Get the description."""
return self._description
@property
def skip(self):
"""Check if this test was skipped.
:rtype: bool
"""
return self.directive.skip
@property
def todo(self):
"""Check if this test was a TODO.
:rtype: bool
"""
return self.directive.todo
def __str__(self):
is_not = ''
if not self.ok:
is_not = 'not '
directive = ''
if self.directive is not None:
directive = ' # {0}'.format(self.directive.text)
diagnostics = ''
if self.diagnostics is not None:
diagnostics = '\n' + self.diagnostics.rstrip()
return "{0}ok {1} - {2}{3}{4}".format(
is_not, self.number, self.description, directive, diagnostics)
class Plan(Line):
"""A plan line to indicate how many tests to expect."""
def __init__(self, expected_tests, directive=None):
self._expected_tests = expected_tests
self.directive = directive
@property
def category(self):
""":returns: ``plan``"""
return 'plan'
@property
def expected_tests(self):
"""Get the number of expected tests.
:rtype: int
"""
return self._expected_tests
@property
def skip(self):
"""Check if this plan should skip the file.
:rtype: bool
"""
return self.directive.skip
class Diagnostic(Line):
"""A diagnostic line (i.e. anything starting with a hash)."""
def __init__(self, text):
self._text = text
@property
def category(self):
""":returns: ``diagnostic``"""
return 'diagnostic'
@property
def text(self):
"""Get the text."""
return self._text
class Bail(Line):
"""A bail out line (i.e. anything starting with 'Bail out!')."""
def __init__(self, reason):
self._reason = reason
@property
def category(self):
""":returns: ``bail``"""
return 'bail'
@property
def reason(self):
"""Get the reason."""
return self._reason
class Version(Line):
"""A version line (i.e. of the form 'TAP version 13')."""
def __init__(self, version):
self._version = version
@property
def category(self):
""":returns: ``version``"""
return 'version'
@property
def version(self):
"""Get the version number.
:rtype: int
"""
return self._version
class Unknown(Line):
"""A line that represents something that is not a known TAP line.
This exists for the purpose of a Null Object pattern.
"""
@property
def category(self):
""":returns: ``unknown``"""
return 'unknown'
| [
"matthewlayman@gmail.com"
] | matthewlayman@gmail.com |
97876c1143af3c1bbcf63ea5db171555c18fc239 | 242086b8c6a39cbc7af3bd7f2fd9b78a66567024 | /python/PP4E-Examples-1.4/Examples/PP4E/Gui/Intro/gui3.py | 6617d3e8edd2b088131c50e73653265dc000e795 | [] | no_license | chuzui/algorithm | 7537d0aa051ac4cbe9f6a7ca9a3037204803a650 | c3006b24c4896c1242d3ceab43ace995c94f10c8 | refs/heads/master | 2021-01-10T13:05:30.902020 | 2015-09-27T14:39:02 | 2015-09-27T14:39:02 | 8,404,397 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | import sys
from tkinter import *
def quit(): # a custom callback handler
print('Hello, I must be going...') # kill windows and process
sys.exit()
widget = Button(None, text='Hello event world', command=quit)
widget.pack()
widget.mainloop()
| [
"zui"
] | zui |
4fd2db085bebdf0fb2594d570603ecce95d71f50 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03227/s459999028.py | 03236a4d5fabd077163769a4c50f0ed805cccd94 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | def main():
s = input().rstrip()
if len(s) == 2:
print(s)
else:
print(s[::-1])
if __name__ == "__main__":
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2784c3c98d088cdfd95b608f240655a8d740bc7e | f59e084cc1dc682cabd2ccb2bc33815f7163fcf7 | /modelo_oo.py | 756784a68d95351cda9a0d43535819734c5520ae | [] | no_license | jolubanco/locadora-filmes | 77614d737c39add79f4393ebf16997d3c9072a13 | ed9195702ed28e5d1841c41bd51445b8853acda0 | refs/heads/main | 2023-03-17T11:33:05.156560 | 2021-03-20T20:28:08 | 2021-03-20T20:28:08 | 348,409,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,538 | py | class Programa: #super classe ou classe mãe
def __init__(self,nome,ano):
self._nome = nome.title()
self.ano = ano
self._likes = 0
@property
def likes(self):
return self._likes
def dar_like(self):
self._likes += 1
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, novo_nome):
self._nome = novo_nome.title()
def __str__(self): #representação textual do meu objeto
return f'{self._nome} - {self.ano} - {self._likes} Likes'
class Filme(Programa): #herança
def __init__(self, nome, ano, duracao):
super().__init__(nome,ano) #super() chama qualquer método da classe mãe, neste caso o inicializador # cria um objeto na classe mãe e depois modifica para essa classe
self.duracao = duracao #extensão da classe mãe
def __str__(self):
return f'{self._nome} - {self.ano} - {self.duracao} min - {self._likes} Likes'
class Serie(Programa):
def __init__(self, nome, ano, temporadas):
super().__init__(nome,ano)
self.temporadas = temporadas
def __str__(self):
return f'{self._nome} - {self.ano} - {self.temporadas} temporadas - {self._likes} Likes'
class Playlist():
def __init__(self,nome,programas):
self.nome = nome
self._programas = programas
def __getitem__(self,item): #faz com que minha classe seja considerada uma 'sequência' para realizar for,in
return self._programas[item]
@property
def listagem(self):
return self._programas
def __len__(self):
return len(self._programas)
vingadores = Filme('vingadores - guerra infinita',2018,160)
atlanta = Serie('atlanta',2018,2)
tmep = Filme('Todo mundo em pânico', 1999,100)
demolidor = Serie('demolidor',2016,2)
vingadores.dar_like()
tmep.dar_like()
tmep.dar_like()
tmep.dar_like()
tmep.dar_like()
demolidor.dar_like()
demolidor.dar_like()
atlanta.dar_like()
atlanta.dar_like()
atlanta.dar_like()
filmes_e_series = [vingadores, atlanta,demolidor,tmep]
playlist_fim_de_semana = Playlist('fim de semana',filmes_e_series)
#o objeto playlist_fim_de_semana não era uma lista e nao funciona no for, mas com herança ele herda as caracteristicas de uma lista
#e agora funciona como um objeto interável
print(f'Tamanho da playlist: {len(playlist_fim_de_semana)}')
for programa in playlist_fim_de_semana:
print(programa)
print(f'Tá ou não tá?: {demolidor in playlist_fim_de_semana}')
print(playlist_fim_de_semana[0])
| [
"jolubanco@gmail.com"
] | jolubanco@gmail.com |
0a18a36b0b379bb71c4592b344cbbe3866ef6924 | 23cdddefa2eaaed538e1b8131adb139299d29768 | /framebuffer.py | 48b357ee6fc1e44eae64f845c2fabf5323f47d12 | [] | no_license | samuelpetersson/photobooth | 02c46bdd22a975c2813da76d235e61f42bf2dc40 | 81c5c8de879a0bf2d3a90a5ca44fa0c57ae95e6c | refs/heads/master | 2021-01-17T07:33:57.888031 | 2016-06-07T10:16:58 | 2016-06-07T10:16:58 | 37,460,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | import os
import pygame
def createScreen():
"Ininitializes a new pygame screen using the framebuffer"
# Based on "Python GUI in Linux frame buffer"
# http://www.karoltomala.com/blog/?p=679
disp_no = os.getenv("DISPLAY")
if disp_no:
print "I'm running under X display = {0}".format(disp_no)
# Check which frame buffer drivers are available
# Start with fbcon since directfb hangs with composite output
drivers = ['fbcon', 'directfb', 'svgalib']
found = False
for driver in drivers:
# Make sure that SDL_VIDEODRIVER is set
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except pygame.error:
print 'Driver: {0} failed.'.format(driver)
continue
found = True
break
if not found:
raise Exception('No suitable video driver found!')
size = (pygame.display.Info().current_w, pygame.display.Info().current_h)
print "Framebuffer size: %d x %d" % (size[0], size[1])
screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
# Clear the screen to start
screen.fill((0, 0, 0))
# Initialise font support
pygame.font.init()
# Render the screen
pygame.display.update()
# Return the screen
return screen | [
"sam@f09.org"
] | sam@f09.org |
e2e77603dceef720d9b2f0d2b691e2c6403a7e9c | f5bb79f7925d53ccaf21d2c98334222a8aa9a377 | /Hackerrank/06. Itertools/maximize-it.py | 6b8675cd3f6adf041adbe3ad186ab97932d8929f | [] | no_license | StanislavRadkov/python-learning | 511b1d8b80fb1eff500739d0c24b42af84948e75 | 46268b3107510b4a7ad9fbc868372de22b7459ae | refs/heads/master | 2021-03-24T12:12:51.303476 | 2017-09-11T12:27:26 | 2017-09-11T12:27:26 | 101,641,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | import itertools
k, m = map(int, input().split(" "))
arrays = [map(int, input().split(" ")[1:]) for _ in range(k)]
def f(*nums):
return sum(x * x for x in nums) % m
print(max(itertools.starmap(f, itertools.product(*arrays)))) | [
"stanislavradkov@skyscanner.net"
] | stanislavradkov@skyscanner.net |
1412f35638ca0ea7b9a84f157d78d221431a2524 | 810ce1c1ac47743e253171ec7541c0e431d952c2 | /small_programme/crawler/crawling.py | e445437136947a14712e6ade780429dd6b18b819 | [] | no_license | hjlarry/practise-py | 91052c25dc7ab706c6234f6d657db76667a27124 | 871e06b9652d356f55e3888f1f7ea180ac2b1954 | refs/heads/master | 2022-09-11T17:47:48.557194 | 2022-08-10T02:07:24 | 2022-08-10T02:07:24 | 136,263,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,379 | py | import asyncio
import collections
import logging
import re
import time
import urllib
import cgi
import sys
import aiohttp
from reporting import report
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
FetchStatistic = collections.namedtuple(
"FetchStatistic",
[
"url",
"next_url",
"status",
"exception",
"size",
"content_type",
"encoding",
"num_urls",
"num_new_urls",
],
)
def lenient_host(host):
parts = host.split(".")[-2:]
return "".join(parts)
def is_redirect(response):
return response.status in (300, 301, 302, 303, 307)
class Crawler:
def __init__(
self,
roots,
exclude=None,
strict=True,
max_redirect=10,
max_tries=4,
max_tasks=10,
*,
loop=None,
):
self.roots = roots
self.loop = loop or asyncio.get_event_loop()
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.q = asyncio.Queue(loop=self.loop)
self.session = aiohttp.ClientSession(loop=self.loop)
self.seen_urls = set()
self.done = []
self.root_domains = set()
for root in self.roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r"\A[\d\.]*\Z", host):
self.root_domains.add(root)
else:
host = host.lower()
if self.strict:
self.root_domains.add(host)
else:
self.root_domains.add(lenient_host(host)) # 非严格模式则a.bc.com添加为bc.com
for root in self.roots:
self.add_url(root)
self.t0 = time.time()
self.t1 = None
def close(self):
self.session.close()
def add_url(self, url, max_redirect=None):
if max_redirect is None:
max_redirect = self.max_redirect
LOGGER.info(f"adding url: {url}, {max_redirect}")
self.seen_urls.add(url)
self.q.put_nowait((url, max_redirect))
def record_statistic(self, fetch_statistic):
self.done.append(fetch_statistic)
def _host_okay_strictish(self, host):
host = host[4:] if host.startswith("www.") else "www." + host
return host in self.root_domains
def _host_okay_lenident(self, host):
return lenient_host(host) in self.root_domains
def host_okay(self, host):
host = host.lower()
if host in self.root_domains:
return True
if re.match(r"\A[\d\.]*\Z", host):
return False
if self.strict:
return self._host_okay_strictish(host)
else:
return self._host_okay_lenident(host)
def url_allowed(self, url):
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ("http", "https"):
LOGGER.debug(f"skip non http url: {url}")
return False
host, part = urllib.parse.splitport(parts.netloc)
if not self.host_okay(host):
LOGGER.debug(f"skip non-root host in {url}")
return False
return True
async def parse_links(self, response):
links = set()
content_type = None
encoding = None
body = await response.read()
if response.status == 200:
content_type = response.headers.get("content-type")
pdict = {}
if content_type:
content_type, pdict = cgi.parse_header(content_type)
encoding = pdict.get("charset", "utf-8")
if content_type in ("text/html", "application/xml"):
text = await response.text()
# href 替换为 (?:href|src) 可以拿到图片的链接
urls = set(re.findall(r"""(?i)href=["']([^\s"'<>]+)""", text))
if urls:
LOGGER.info(f"got {len(urls)} distinct urls from {response.url}")
for url in urls:
normalized = urllib.parse.urljoin(str(response.url), url)
defragmented, frag = urllib.parse.urldefrag(normalized)
if self.url_allowed(defragmented):
links.add(defragmented)
stat = FetchStatistic(
url=response.url,
next_url=None,
status=response.status,
exception=None,
size=len(body),
content_type=content_type,
encoding=encoding,
num_urls=len(links),
num_new_urls=len(links - self.seen_urls),
)
return stat, links
async def fetch(self, url, max_redirect):
tries = 0
exception = None
while tries < self.max_tries:
try:
response = await self.session.get(url, allow_redirects=False)
if tries > 1:
LOGGER.info(f"try {tries} for {url} success")
break
except aiohttp.ClientError as client_err:
LOGGER.info(f"try {tries} for {url} raise {client_err}")
exception = client_err
tries += 1
else:
LOGGER.error(f"{url} failed after {self.max_tries} tries")
self.record_statistic(
FetchStatistic(
url=url,
next_url=None,
status=None,
exception=exception,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0,
)
)
return
try:
if is_redirect(response):
location = response.headers["location"]
next_url = urllib.parse.urljoin(url, location)
self.record_statistic(
FetchStatistic(
url=url,
next_url=next_url,
status=response.status,
exception=None,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0,
)
)
if next_url in self.seen_urls:
return
if max_redirect > 0:
LOGGER.info(f"redirect to {next_url} from {url}")
self.add_url(next_url, max_redirect - 1)
else:
LOGGER.error(f"redirect num limit for {next_url} from {url}")
else:
stat, links = await self.parse_links(response)
self.record_statistic(stat)
for link in links.difference(self.seen_urls):
self.q.put_nowait((link, self.max_redirect))
self.seen_urls.update(links)
finally:
await response.release()
async def worker(self):
try:
while True:
url, max_redirect = await self.q.get()
assert url in self.seen_urls
await self.fetch(url, max_redirect)
self.q.task_done()
except asyncio.CancelledError:
pass
async def crawl(self):
workers = [
asyncio.Task(self.worker(), loop=self.loop) for _ in range(self.max_tasks)
]
self.t0 = time.time()
await self.q.join()
self.t1 = time.time()
for w in workers:
w.cancel()
def main():
loop = asyncio.get_event_loop()
roots = ("http://doc.1.com/platform/realname/",)
crawler = Crawler(roots)
try:
loop.run_until_complete(crawler.crawl())
except KeyboardInterrupt:
sys.stderr.flush()
print("\nInterrupted\n")
finally:
f = open("report.txt", "w+")
report(crawler, file=f)
crawler.close()
loop.stop()
loop.run_forever()
loop.close()
if __name__ == "__main__":
main()
| [
"hjlarry@163.com"
] | hjlarry@163.com |
8332c450f1334adc650c3da9b9cf8c44f36cac49 | 7e1079b46b08bbe60a66e105c73bb9ab10397743 | /src/bin/tlvfyrule | 8737cfe35a28c7e166a5d9446573079e2f67225b | [] | no_license | ppjsand/pyteal | f810697e59ecb393e3d7c3b9eb69b5150f7f7f70 | eba6c1489b503fdcf040a126942643b355867bcd | refs/heads/master | 2020-05-17T22:44:18.135207 | 2012-08-01T14:38:56 | 2012-08-05T02:02:56 | 4,961,237 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,150 | #!/usr/bin/env python
# begin_generated_IBM_copyright_prolog
#
# This is an automatically generated copyright prolog.
# After initializing, DO NOT MODIFY OR MOVE
# ================================================================
#
# (C) Copyright IBM Corp. 2010,2011
# Eclipse Public License (EPL)
#
# ================================================================
#
# end_generated_IBM_copyright_prolog
# locale setup
import os
import gettext
curdir = os.path.abspath(os.path.dirname(__file__))
localedir = os.path.join(curdir, '..', 'locale')
t = gettext.translation('messages', localedir, fallback=True)
_ = t.lgettext
import sys
import logging
from optparse import OptionParser
from ibm.teal import registry
from ibm.teal.registry import SERVICE_LOGGER, SERVICE_ALERT_METADATA
from ibm.teal.teal import TealLogger, Teal
from ibm.teal.analyzer.gear.ruleset import GearRuleset
from ibm.teal.teal_error import XMLParsingError
from ibm.teal.metadata import Metadata, META_TYPE_ALERT
if __name__ == '__main__':
# Start up teal in data-only mode since we don't need the pipeline set up
my_teal = Teal(None, logFile='stderr', msgLevel='critical', data_only=True)
# Parse the command line
parser = OptionParser('usage: %prog [options] rule-file')
parser.add_option('-m', '--metadata',
type='string',
action='store',
default=None,
help=_('verify the rule using this alert metadata specification'))
# TODO: Add location support
# parser.add_option('-l', '--location',
# type='string',
# action='store',
# default=None,
# help=_('verify the rule using this location specification'))
alert_input = False # otherwise not defined error
parser.add_option('-a', '--alert',
dest='alert_input',
action='store_true',
default=False,
help=_('verifying a rule that also processes alerts'))
parser.add_option('-c', '--conf_attr',
type='string',
action='store',
default=None,
help=_('verify the rule assuming these configuration attributes'))
parser.add_option('-x', '--cref',
type='string',
action='store',
default=None,
help=_('if valid provide a cross reference of id usage'))
(options, args) = parser.parse_args()
#print options,args
if len(args) < 1:
print >> sys.stderr, _('rule file to process must be specified')
sys.exit(1)
result = 0
# process metadata
if options.metadata is not None:
#print 'metadata ', options.metadata
# Wipe out existing metadata
registry.unregister_service(SERVICE_ALERT_METADATA)
# Create new metadata
alert_metadata = Metadata(META_TYPE_ALERT, [])
registry.register_service(SERVICE_ALERT_METADATA, alert_metadata)
# TODO: Currently this is relative to the data dir ...
# should we change to make relative to where we are running?
alert_metadata.add_files([options.metadata], use_data_dir=False)
# # process location
# if options.location is not None:
# print 'location ', options.location
# process configuation entries
if options.conf_attr is None:
config_dict = None
else:
#print 'conf_attr ', options.conf_attr
config_dict = {}
for entry in options.conf_attr.split(','):
key, value = entry.split(':')
config_dict[key.strip()] = value.strip()
#print config_dict
try:
rs = GearRuleset(args[0], config_dict=config_dict, event_input=True, alert_input=alert_input, name=str(args[0]), use_checkpoint=False)
if options.cref is not None:
rs.print_cross_ref()
except XMLParsingError,e:
print >> sys.stderr, e
result = 1
sys.exit(result)
| [
"psanders@riven.rchland.ibm.com"
] | psanders@riven.rchland.ibm.com | |
e52c8ee0663d70cbdeced042476008e6cc432727 | 6a42ddc432ee0a62cf52df21b9306f24177cc3f9 | /planet_prop.py | c1c1b53aeecfb841ef1b747c107f3bf55ad1352a | [] | no_license | jo276/MESAplanet | a42b9a27f35b7aa156466abbd67a56f187862563 | 4da81e113f6c6466597256aded6ff51e722abf1c | refs/heads/master | 2023-03-16T13:29:16.708571 | 2021-02-27T09:40:21 | 2021-02-27T09:40:21 | 293,260,759 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | # python script containing info and functions for planetary calculations
import math as mt
# constants
msun=1.9891e33 #grams
rsun=6.955e10 # cm
lsun=3.846e33 # erg/s
mjup=1.8986e30 # grams
rjup=6.9911e9 # cm
mearth=5.97219e27 # grams
rearth=6.371e8 # cm
mneptune=1.0243e29 #grams
rneptune=2.4622e9 # cm
au_to_cm=1.49597871e13 #cm
a=[0.0912, 0.0592]
def get_core_prop(Mass,Xiron,Xice):
# uses fortney et al. 2007ab fitting formula
# Mass is core mass in earth masses
# Radius is in earth masses
# density is in core density in cgs units
#fitting constants
a=[0.0912, 0.0592]
b=[0.1603, 0.0975]
c=[0.3330, 0.2337]
d=[0.7387, 0.4938]
e=[0.4639, 0.3102]
f=[1.1193, 0.7932]
prop=[0]*2
Xrock=1.0-Xiron
if Xice > 0.0:
if Xiron > 0.0:
print("Error both ice and iron frac cannot be > 0")
return prop;
else:
prop[0]=(a[0]*Xice+b[0])*(mt.log10(Mass))**2.0+(c[0]*Xice+d[0])*(mt.log10(Mass))+(e[0]*Xice+f[0])
else:
prop[0]=(a[1]*Xrock+b[1])*(mt.log10(Mass))**2.0+(c[1]*Xrock+d[1])*(mt.log10(Mass))+(e[1]*Xrock+f[1])
# now calculate radius
prop[1]=Mass*mearth/(4.0/3.0*mt.pi*(prop[0]*rearth)**3)
return prop;
| [
"james.owen@imperial.ac.uk"
] | james.owen@imperial.ac.uk |
83924ae8dfe91ebcbd4034577b0faf83e5a30402 | 6a2cc7f9e8e0cbcfa85e81272bd507c5226534af | /profile.py | 50c5db32803fc549822f107dfee1837780112b6e | [] | no_license | 20171CSE0726/pytax | 348d83465c1bac1c4c85eef47e91c31333d8a81d | 188f6ac0c1dcc395620b3acd2fa3c832cf3064b7 | refs/heads/master | 2021-02-17T13:18:56.105583 | 2018-11-04T18:31:48 | 2018-11-04T18:31:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | import taxjar as jar
client = jar.Client(api_key='36e5eb69d62562d468b07da2ef8252e4')
class TaxProfile:
def __init__(self, state, city, zip_code):
self.zip_code = zip_code
self.state = state
self.city = city
def get_rate(self):
rates = client.rates_for_location(self.zip_code, {
'city': self.city,
'state': self.state
})
return rates
def print_profile(self):
rates = self.get_rate()
print("User is from {0}, {1} their zip code is {2} and their tax rate is %{3}"
.format(self.city, self.state, self.zip_code, rates.combined_rate))
| [
"brennengreen@outlook.com"
] | brennengreen@outlook.com |
b11986b3974295a315c63bf1ec08b07e1e0e3087 | dde9442399c78414c05f7f36803c861638065ca3 | /Multidimensional-Lists-Exercise/Radioactive-Mutant-Vampire-Bunnies.py | a22c9f63fe0ef1c68063385ce0f936bf2bfc334d | [] | no_license | Vigyrious/python_advanced | 6778eed9e951b5a11b22f6c6d8ea5b160c3aa00d | 67db470e78b194aea1f9a35283d5a88b0f6ab94c | refs/heads/main | 2023-03-23T12:24:59.688699 | 2021-03-12T20:53:04 | 2021-03-12T20:53:04 | 347,192,305 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,396 | py | row, col = map(int, input().split(" "))
matrix = []
[matrix.append(list(input())) for _ in range(row)]
movements = list(input())
player_row, player_col = [[row_index,col_index] for row_index in range(row) for col_index in range(col) if matrix[row_index][col_index] == "P"][0]
is_dead = False
has_won = False
while not is_dead and not has_won:
bunnies = [[bunny_row, bunny_col] for bunny_row in range(row) for bunny_col in range(col) if matrix[bunny_row][bunny_col] == "B"]
current_movement = movements.pop(0)
if current_movement == "U":
if player_row-1 in range(row):
if matrix[player_row-1][player_col] == "B":
player_row -= 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row - 1][player_col] = "P"
player_row -= 1
else:
matrix[player_row][player_col] = "."
has_won = True
elif current_movement == "D":
if player_row+1 in range(row):
if matrix[player_row+1][player_col] == "B":
player_row += 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row + 1][player_col] = "P"
player_row += 1
else:
matrix[player_row][player_col] = "."
has_won = True
elif current_movement == "L":
if player_col-1 in range(col):
if matrix[player_row][player_col - 1] == "B":
player_col -= 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row][player_col - 1] = "P"
player_col -= 1
else:
matrix[player_row][player_col] = "."
has_won = True
elif current_movement == "R":
if player_col+1 in range(col):
if matrix[player_row][player_col + 1] == "B":
player_col += 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row][player_col + 1] = "P"
player_col += 1
else:
matrix[player_row][player_col] = "."
has_won = True
for bunny in bunnies:
bunny_row, bunny_col = bunny
if bunny_row+1 in range(row):
if matrix[bunny_row+1][bunny_col] == "P":
is_dead = True
matrix[bunny_row + 1][bunny_col] = "B"
if bunny_row-1 in range(row):
if matrix[bunny_row-1][bunny_col] == "P":
is_dead = True
matrix[bunny_row - 1][bunny_col] = "B"
if bunny_col + 1 in range(col):
if matrix[bunny_row][bunny_col+1] == "P":
is_dead = True
matrix[bunny_row][bunny_col+1] = "B"
if bunny_col - 1 in range(col):
if matrix[bunny_row][bunny_col-1] == "P":
is_dead = True
matrix[bunny_row][bunny_col-1] = "B"
[print(''.join(sub)) for sub in matrix]
print(f"won: {player_row} {player_col}") if has_won else print(f"dead: {player_row} {player_col}") | [
"73179295+Vigyrious@users.noreply.github.com"
] | 73179295+Vigyrious@users.noreply.github.com |
72002c248848b5d46a7f14d3d0f222a47809859d | 455885bbf49a83ae3e31f20bbd1bd1b8c7185f0a | /data/xlreaderweeks.py | 5017579d846451067a8553ed3972fa395c28d7bb | [] | no_license | eivankin/pulkovo-flex | 6acb22847f3a8338f41aa6c3ec56c6e0526f6cc9 | 6400eda9f7d5a01e77949b9b3cdcc8543992f30b | refs/heads/master | 2022-12-09T05:40:16.762301 | 2020-08-25T17:32:59 | 2020-08-25T17:32:59 | 287,484,853 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py |
from glob import glob
import os
import pandas as pd
if __name__ == '__main__':
paths = glob(os.path.dirname(os.path.abspath(__file__)) + '/Расписание по неделям.xlsx', recursive=True)
bigdata = []
for path in paths:
xl = pd.ExcelFile(path)
for sheet_name in xl.sheet_names:
minidata = xl.parse(sheet_name=sheet_name)
bigdata.append(minidata)
"""
document = Document(path)
table = document.tables[0]
dictionary = dict()
collumns = []
minidata = []
for cell in table.rows[1].cells:
dictionary.update({cell.text:''})
collumns.append(cell.text)
minidata.append([])
for row in enumerate(start=row[0], iterable=table.rows[2:]):
for cell in enumerate(row[1].cells):
minidata[cell[0]].append(cell[1].text)
for col in enumerate(minidata):
dictionary.update({collumns[col[0]]:tuple(col[1])})
frame = pd.DataFrame(dictionary)
bigdata.append(frame)
"""
Excel = pd.ExcelWriter('ExcelWeeks.xlsx')
for sheet_id in range(len(bigdata)):
bigdata[sheet_id].to_excel(excel_writer=Excel ,sheet_name='Year '+ str(sheet_id))
Excel.save() | [
"69670642+DanteTemplar@users.noreply.github.com"
] | 69670642+DanteTemplar@users.noreply.github.com |
8d999821adab2f90c1385a6dd5e14875c3fc2216 | f085eeb8315b310032d93756f1fc60cb3b9539c4 | /Compare_Boolean/1.py | f02d6b8e0385cb12bc6ae489cd1330c689d3fd81 | [] | no_license | dongho108/python-ruby | 0fe538d8c70afe66bff256aecd33bf6bf306f6e4 | 158c34063fc8415310b27134994b329e62318728 | refs/heads/master | 2021-01-05T03:13:53.636301 | 2020-03-05T11:36:11 | 2020-03-05T11:36:11 | 240,858,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | print(1==1)
print(1==2)
print(1>2)
print(True)
print(False)
| [
"noreply@github.com"
] | noreply@github.com |
3cb3be8a872fd8a7f21d7372025d4bd859d75b2a | d594ae226c00f78259520e5c9f4b7872c050359c | /day18/demo06.py | eb782b904bdf108af3f68322d0d0fb1cf5bf6982 | [] | no_license | mangodayup/month01-resource | abebc13827498b96257f83387f6d205f8f5c7c04 | b194de4a6affc651c5a631970adc02429f0b2b5c | refs/heads/master | 2023-03-11T13:58:05.920374 | 2021-03-02T09:27:25 | 2021-03-02T09:27:25 | 343,714,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | """
装饰器-使用方式
返回值
将旧功能返回值作为内函数的返回值
"""
def new_func(func):
def wrapper():
print("新功能1")
result = func()
# 将旧功能返回值作为内函数的返回值
return result
return wrapper
@new_func
def func01():
print("功能1")
return 100
@new_func
def func02():
print("功能2")
return 200
print(func01())# 调用的是内函数
print(func02())
| [
"chenjingru@chenjingrudeMacBook-Pro.local"
] | chenjingru@chenjingrudeMacBook-Pro.local |
150e94de46fd36d8894916a2e55dd739f19740e3 | 7105658942c1fc03b2540f37f099e8e55c6ded85 | /28.implement-strstr.py | 08845cf34a071bfadeaecad2e683d50e7bf09338 | [] | no_license | luyao777/leetcode_python | a2c60f3df4688e8bd0209553d834fa68e1e0dc62 | 7df7bd1a6cb0c8590684f8600414fdcc9f0b8070 | refs/heads/master | 2021-07-04T17:11:46.932373 | 2020-09-21T10:03:20 | 2020-09-21T10:03:20 | 172,448,639 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | #
# @lc app=leetcode.cn id=28 lang=python3
#
# [28] 实现strStr()
#
# https://leetcode-cn.com/problems/implement-strstr/description/
#
# algorithms
# Easy (37.47%)
# Total Accepted: 35.1K
# Total Submissions: 93.7K
# Testcase Example: '"hello"\n"ll"'
#
# 实现 strStr() 函数。
#
# 给定一个 haystack 字符串和一个 needle 字符串,在 haystack 字符串中找出 needle 字符串出现的第一个位置
# (从0开始)。如果不存在,则返回 -1。
#
# 示例 1:
#
# 输入: haystack = "hello", needle = "ll"
# 输出: 2
#
#
# 示例 2:
#
# 输入: haystack = "aaaaa", needle = "bba"
# 输出: -1
#
#
# 说明:
#
# 当 needle 是空字符串时,我们应当返回什么值呢?这是一个在面试中很好的问题。
#
# 对于本题而言,当 needle 是空字符串时我们应当返回 0 。这与C语言的 strstr() 以及 Java的 indexOf() 定义相符。
#
#
class Solution:
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if needle == "":
return 0
return haystack.find(needle)
| [
"luyao777@vip.qq.com"
] | luyao777@vip.qq.com |
2439b71785d57c53f36a960340c033738e810909 | 198778d08e008e75407bb10bb01e49899311a6d6 | /utils/liquid_nitrogen.py | 04ceee00239872661af4ba716476d68901d1c157 | [] | no_license | kate-v-stepanova/B250_lab_monitor | febf6934a016e0ac1d05e061a57bb6320432aa85 | c2ab87999fdcfb13da091abbe7609eac526364fd | refs/heads/master | 2021-11-15T05:44:37.417199 | 2021-09-27T15:42:47 | 2021-09-27T15:42:47 | 186,001,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | import os
import click
import pandas as pd
import redis
import json
localhost = "172.22.24.88" # local
remote_host = "172.22.25.100" # remote
remote_host = "172.22.54.5" # windows
home = "192.168.0.157"
port = "6379"
@click.group()
def cli():
# do nothing
pass
def rows_from_range(row):
df = pd.DataFrame(columns=row.index)
if row['x1'] is not None:
for i in range(row['x1'], row['x2'] + 1):
row['x'] = i
df = df.append(row)
return df
@cli.command()
@click.option('--remote/--local', default=False)
@click.option('--host', '-h', 'host')
@click.argument('filename')
@click.argument('tower', required=False) # example: tower7
def upload(remote, host, filename, tower=None):
if os.path.isfile(filename):
df = pd.read_csv(filename, sep=";")
import pdb; pdb.set_trace()
df = df.drop(['Unnamed: 4'], axis=1)
if tower is None:
towers = ['tower{}'.format(tower) for tower in df['Tower'].dropna().astype(int).unique().tolist()]
else:
towers = [tower]
# df1 = df.drop(['Rack', 'Drawer', 'Position', 'passage no.', 'Unnamed: 16', 'Date', 'Responsible person',
# 'Comments'], axis='columns')
df1 = df.drop(['Rack', 'Position', 'Tower', 'Date', 'Responsible person', 'Comments'], axis='columns')
df1 = df1.fillna('')
df1 = df1.drop_duplicates('ID')
data = df1.to_dict('list')
data = json.dumps(data)
if host is None:
host = remote_host if remote else localhost
rdb = redis.StrictRedis(host)
rdb.set('cell_lines', data)
# locations
df2 = df[['ID', 'Rack', 'Position', 'Date', 'Responsible person', 'Comments']]
pos = df2['Position'].str.split('-', expand=True)
y = pos[0].str[0] # e.g. A, B, C...
x1 = pos[0].str[1:] #
x2 = pos[1]
df2['y'] = y.fillna('')
df2['x1'] = x1.fillna(0).astype(int)
df2['x2'] = x2.fillna(0).astype(int)
df2['x'] = 0
for tower in towers:
df3 = pd.DataFrame(columns=df2.columns)
for i, row in df2.iterrows():
df3 = df3.append(rows_from_range(row))
df3 = df3.drop(['x1', 'x2', 'Position'], axis='columns')
df3['Rack'] = df3['Rack'].fillna(0)
df3['Rack'] = df3['Rack'].astype(int)
df3['pos'] = df3['y'].astype(str) + df3['x'].astype(str)
data = df3.to_dict('list')
data = json.dumps(data)
rdb.set(tower, data)
rdb.sadd('towers', tower)
else:
print("File does not exist? {}".format(filename))
if __name__ == '__main__':
cli()
| [
"kate-v-stepanova@github.com"
] | kate-v-stepanova@github.com |
d81ce2cfdfb74c318af7a42b753a9cef7c42c4b3 | eef542a3d8eddf7f9e496817f2346179ef45ff4b | /utils/__init__.py | 7b36f56ec2c5aeca576045143a222496527edca5 | [
"BSD-3-Clause"
] | permissive | cwheel/gather | 1b82b3fd421e209b7f384fef35074cc326911891 | b6eab474a0550f008a243cd5add074bff04086b4 | refs/heads/master | 2021-01-19T14:00:44.832430 | 2017-08-20T17:47:44 | 2017-08-20T17:47:44 | 100,875,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | import jsonPath
import resolve
| [
"cameron@scelos.com"
] | cameron@scelos.com |
7e7b3fe43e3b8ca055aef0f1d5fb529379d0015f | 49bf93024f1a4f501a627e1fe965fb99477de779 | /main.py | e6d42edfaa5ca1f52d92dba19f097cc6bc23c1b6 | [] | no_license | Nazmus-Sakib-1987/git-tutorial | 88a016176dbff7936d9e5d082c853b0e29f7ad80 | a33c709da6d7066cbe4049404d4f32425abe3815 | refs/heads/master | 2020-05-24T01:47:13.475718 | 2019-05-16T15:38:42 | 2019-05-16T15:38:42 | 187,041,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | day = 'friday'
if (day == 'friday')
print("Hell") | [
"sakib@tb-bd.com"
] | sakib@tb-bd.com |
d807abdc220d8649a4f546bf8715b4be597aec77 | 5a71ca1f5c964f803350e3c1238cb48986db565c | /coinlibbitfinex/tests/test_bitfinex_streamapi.py | 25331ab7bbc5c1144c8083305e500db7203b9b85 | [] | no_license | tetocode/coinliball | fd644cbc16039ecad7e43228ea4e287ead5c8e5f | 41ebbac13c1fbba98aedaa766b9a505cb157f374 | refs/heads/master | 2022-09-28T21:58:08.130006 | 2020-06-04T03:00:56 | 2020-06-04T03:00:56 | 269,247,318 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | import time
from queue import Queue, Empty
import pytest
from coinlib.datatypes.streamdata import StreamData
from coinlibbitbankcc.streamapi import StreamApi
WAIT = 3
N = 10
def test_subscribe(stream_api: StreamApi):
xrp_usd_params = {
'event': 'subscribe',
'channel': 'book',
'pair': 'XRPUSD',
'prec': 'P0',
'freq': 'F0',
'len': '25',
}
xrp_btc_params = xrp_usd_params.copy()
xrp_btc_params['pair'] = 'XRPBTC'
q = Queue()
stream_api.on_raw_data = q.put
stream_api.subscribe(('xrp_usd', xrp_usd_params))
stream_api.subscribe(('xrp_btc', xrp_btc_params))
keys = set()
time.sleep(1)
for _ in range(N):
d: StreamData = q.get(timeout=WAIT)
k = d.key
keys.add(k)
assert keys == {'xrp_usd', 'xrp_btc'}
stream_api.unsubscribe('xrp_usd')
time.sleep(1)
for _ in range(q.qsize() + N):
q.get(timeout=WAIT)
keys = set()
for _ in range(q.qsize() + N):
d = q.get(timeout=WAIT)
k = d.key
keys.add(k)
assert keys == {'xrp_btc'}
stream_api.unsubscribe('xrp_btc')
with pytest.raises(Empty):
for _ in range(q.qsize() + N):
q.get(timeout=WAIT)
# re-subscribe
stream_api.subscribe(('xrp_usd', xrp_usd_params), ('xrp_btc', xrp_btc_params))
keys = set()
for _ in range(N):
d = q.get(timeout=WAIT)
k = d.key
keys.add(k)
assert keys == {'xrp_usd', 'xrp_btc'}
| [
"_"
] | _ |
48e1ea0a076abcf635500cd6452dcecf62b3ab6f | 65a18a1aec95c4691e7a35d5005e4bdc63994f0c | /node_modules/socket.io-client/node_modules/engine.io-client/node_modules/ws/node_modules/utf-8-validate/build/config.gypi | efecf0382afd8201b747d76c0df1ec8737899a8c | [
"MIT"
] | permissive | lizine/voting-client | a51e7d59f114c2012516df161ee9318080ee8b40 | 14bd0e931271f264ba578d7f617172c34e6f48da | refs/heads/master | 2021-01-10T08:11:37.926410 | 2015-11-09T15:08:27 | 2015-11-09T15:08:27 | 45,112,566 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,714 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "56",
"node_byteorder": "little",
"node_install_npm": "true",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"python": "/home/iojs/bin/python",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"nodedir": "/home/nina/.node-gyp/4.2.1",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/2.14.7 node/v4.2.1 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"userconfig": "/home/nina/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/home/nina/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "true",
"access": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "4.2.1",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/nina/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": "",
"spin": "true"
}
}
| [
"nina.liljeblad@gmail.com"
] | nina.liljeblad@gmail.com |
36c5182c5a293eab5a8bde749d483b6198bb717a | 75ba9b6d2f700061b936666cbc5c2db03c7712f2 | /p45_triangular_pentagonal_hexagonal.py | 3c4397b72ac819f2c0e1932734bbae99f310a847 | [] | no_license | yansenkeler/pyApp | 9f401a14fe3c10fd54b8bbc424c09d05130bf8a4 | 2666767b6c3d6a3a222cfbe27a2d2daad3f2d4f0 | refs/heads/master | 2021-01-12T10:20:58.380209 | 2016-12-21T01:50:48 | 2016-12-21T01:50:48 | 76,427,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | # Triangle, pentagonal, and hexagonal numbers are generated by the following formulae:
# Triangle Tn=n(n+1)/2 1, 3, 6, 10, 15, ...
# Pentagonal Pn=n(3n−1)/2 1, 5, 12, 22, 35, ...
# Hexagonal Hn=n(2n−1) 1, 6, 15, 28, 45, ...
# It can be verified that T285 = P165 = H143 = 40755.
# Find the next triangle number that is also pentagonal and hexagonal.
import time
import tools
start_time = time.time()
start_number = 286
flag = True
while flag:
t_number = int(start_number * (start_number + 1) / 2)
if tools.is_pentagon_number(t_number) and tools.is_hexagonal_number(t_number):
print(start_number, t_number)
flag = False
start_number += 1
print('result is ', '')
print('total time is ', time.time() - start_time, 'ms') | [
"qianyuxinjustone@gmail.com"
] | qianyuxinjustone@gmail.com |
369add1f2e8ed2f7a86b91b166f88feef21733e3 | 63b79eb44cf682ece74be1fc866f7651837db448 | /powerplay/models/game_content_media.py | cc2654fda4508741a4901f39caab2b020b8b674c | [] | no_license | bclark86/powerplay-py | c8cc4df8acd9ada91299706b7a7113ab9c963645 | 584d754629936a93d95157356ff806a5c68438dc | refs/heads/main | 2023-07-19T04:23:16.510338 | 2021-09-02T13:17:12 | 2021-09-02T13:17:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,861 | py | # coding: utf-8
"""
NHL API
Documenting the publicly accessible portions of the NHL API. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GameContentMedia(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'epg': 'list[AnyOfGameContentMediaEpgItems]',
'milestones': 'GameContentMediaMilestones'
}
attribute_map = {
'epg': 'epg',
'milestones': 'milestones'
}
def __init__(self, epg=None, milestones=None): # noqa: E501
"""GameContentMedia - a model defined in Swagger""" # noqa: E501
self._epg = None
self._milestones = None
self.discriminator = None
if epg is not None:
self.epg = epg
if milestones is not None:
self.milestones = milestones
@property
def epg(self):
"""Gets the epg of this GameContentMedia. # noqa: E501
:return: The epg of this GameContentMedia. # noqa: E501
:rtype: list[AnyOfGameContentMediaEpgItems]
"""
return self._epg
@epg.setter
def epg(self, epg):
"""Sets the epg of this GameContentMedia.
:param epg: The epg of this GameContentMedia. # noqa: E501
:type: list[AnyOfGameContentMediaEpgItems]
"""
self._epg = epg
@property
def milestones(self):
"""Gets the milestones of this GameContentMedia. # noqa: E501
:return: The milestones of this GameContentMedia. # noqa: E501
:rtype: GameContentMediaMilestones
"""
return self._milestones
@milestones.setter
def milestones(self, milestones):
"""Sets the milestones of this GameContentMedia.
:param milestones: The milestones of this GameContentMedia. # noqa: E501
:type: GameContentMediaMilestones
"""
self._milestones = milestones
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GameContentMedia, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GameContentMedia):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"saiem.gilani@gmail.com"
] | saiem.gilani@gmail.com |
b221bd8d3a7663132e3024e8df731bbba6327505 | eb1ff39da7264fb48274bb6f21d894317e69f499 | /TLS_Extended_Master_Checker.py | 66d3a958fe85a378677f82d3d613a1e64045d0ff | [
"Python-2.0"
] | permissive | princeofdarkness76/TLS_Extended_Master_Checker | fb0fe511a5cefcbdd20eaca98279c67c956d4bdf | 7a78dde5d5898ca8a29bf2a41d9184741f305459 | refs/heads/master | 2017-12-02T21:40:11.490326 | 2015-11-11T01:17:35 | 2015-11-11T01:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,914 | py | #!/usr/bin/env python
import sys
import socket
import time
import struct
strTitle = "Tripwire VERT TLS Triple Handshake Detection Tool (Extended Master Secret Extension Checker) v0.1"
if len(sys.argv)<2:
print "%s by Tripwire VERT (@TripwireVERT)\nUsage: %s <host> [port=443]" % (strTitle, sys.argv[0])
quit()
else:
strHost = sys.argv[1]
if len(sys.argv)>2:
try:
iPort = int(sys.argv[2])
except:
print "%s\nUsage: %s <host> [port=443]" % (strTitle,sys.argv[0])
quit()
else:
iPort = 443
print "***%s***\nBrought to you by Tripwire VERT (@TripwireVERT)" % (strTitle)
dSSL = {
"SSLv3" : "\x03\x00",
"TLSv1" : "\x03\x01",
"TLSv1.1" : "\x03\x02",
"TLSv1.2" : "\x03\x03",
}
# The following is a complete list of ciphers for the SSLv3 family up to TLSv1.2
ssl3_cipher = dict()
ssl3_cipher['\x00\x00'] = "TLS_NULL_WITH_NULL_NULL"
ssl3_cipher['\x00\x01'] = "TLS_RSA_WITH_NULL_MD5"
ssl3_cipher['\x00\x02'] = "TLS_RSA_WITH_NULL_SHA"
ssl3_cipher['\x00\x03'] = "TLS_RSA_EXPORT_WITH_RC4_40_MD5"
ssl3_cipher['\x00\x04'] = "TLS_RSA_WITH_RC4_128_MD5"
ssl3_cipher['\x00\x05'] = "TLS_RSA_WITH_RC4_128_SHA"
ssl3_cipher['\x00\x06'] = "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5"
ssl3_cipher['\x00\x07'] = "TLS_RSA_WITH_IDEA_CBC_SHA"
ssl3_cipher['\x00\x08'] = "TLS_RSA_EXPORT_WITH_DES40_CBC_SHA"
ssl3_cipher['\x00\x09'] = "TLS_RSA_WITH_DES_CBC_SHA"
ssl3_cipher['\x00\x0a'] = "TLS_RSA_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x0b'] = "TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA"
ssl3_cipher['\x00\x0c'] = "TLS_DH_DSS_WITH_DES_CBC_SHA"
ssl3_cipher['\x00\x0d'] = "TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x0e'] = "TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA"
ssl3_cipher['\x00\x0f'] = "TLS_DH_RSA_WITH_DES_CBC_SHA"
ssl3_cipher['\x00\x10'] = "TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x11'] = "TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA"
ssl3_cipher['\x00\x12'] = "TLS_DHE_DSS_WITH_DES_CBC_SHA"
ssl3_cipher['\x00\x13'] = "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x14'] = "TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA"
ssl3_cipher['\x00\x15'] = "TLS_DHE_RSA_WITH_DES_CBC_SHA"
ssl3_cipher['\x00\x16'] = "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x17'] = "TLS_DH_anon_EXPORT_WITH_RC4_40_MD5"
ssl3_cipher['\x00\x18'] = "TLS_DH_anon_WITH_RC4_128_MD5"
ssl3_cipher['\x00\x19'] = "TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA"
ssl3_cipher['\x00\x1a'] = "TLS_DH_anon_WITH_DES_CBC_SHA"
ssl3_cipher['\x00\x1b'] = "TLS_DH_anon_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x1c'] = "SSL_FORTEZZA_KEA_WITH_NULL_SHA"
ssl3_cipher['\x00\x1d'] = "SSL_FORTEZZA_KEA_WITH_FORTEZZA_CBC_SHA"
ssl3_cipher['\x00\x1e'] = "SSL_FORTEZZA_KEA_WITH_RC4_128_SHA"
ssl3_cipher['\x00\x1E'] = "TLS_KRB5_WITH_DES_CBC_SHA"
ssl3_cipher['\x00\x1F'] = "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x20'] = "TLS_KRB5_WITH_RC4_128_SHA"
ssl3_cipher['\x00\x21'] = "TLS_KRB5_WITH_IDEA_CBC_SHA"
ssl3_cipher['\x00\x22'] = "TLS_KRB5_WITH_DES_CBC_MD5"
ssl3_cipher['\x00\x23'] = "TLS_KRB5_WITH_3DES_EDE_CBC_MD5"
ssl3_cipher['\x00\x24'] = "TLS_KRB5_WITH_RC4_128_MD5"
ssl3_cipher['\x00\x25'] = "TLS_KRB5_WITH_IDEA_CBC_MD5"
ssl3_cipher['\x00\x26'] = "TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA"
ssl3_cipher['\x00\x27'] = "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA"
ssl3_cipher['\x00\x28'] = "TLS_KRB5_EXPORT_WITH_RC4_40_SHA"
ssl3_cipher['\x00\x29'] = "TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5"
ssl3_cipher['\x00\x2A'] = "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5"
ssl3_cipher['\x00\x2B'] = "TLS_KRB5_EXPORT_WITH_RC4_40_MD5"
ssl3_cipher['\x00\x2C'] = "TLS_PSK_WITH_NULL_SHA"
ssl3_cipher['\x00\x2D'] = "TLS_DHE_PSK_WITH_NULL_SHA"
ssl3_cipher['\x00\x2E'] = "TLS_RSA_PSK_WITH_NULL_SHA"
ssl3_cipher['\x00\x2F'] = "TLS_RSA_WITH_AES_128_CBC_SHA"
ssl3_cipher['\x00\x30'] = "TLS_DH_DSS_WITH_AES_128_CBC_SHA"
ssl3_cipher['\x00\x31'] = "TLS_DH_RSA_WITH_AES_128_CBC_SHA"
ssl3_cipher['\x00\x32'] = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"
ssl3_cipher['\x00\x33'] = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"
ssl3_cipher['\x00\x34'] = "TLS_DH_anon_WITH_AES_128_CBC_SHA"
ssl3_cipher['\x00\x35'] = "TLS_RSA_WITH_AES_256_CBC_SHA"
ssl3_cipher['\x00\x36'] = "TLS_DH_DSS_WITH_AES_256_CBC_SHA"
ssl3_cipher['\x00\x37'] = "TLS_DH_RSA_WITH_AES_256_CBC_SHA"
ssl3_cipher['\x00\x38'] = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"
ssl3_cipher['\x00\x39'] = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"
ssl3_cipher['\x00\x3A'] = "TLS_DH_anon_WITH_AES_256_CBC_SHA"
ssl3_cipher['\x00\x3B'] = "TLS_RSA_WITH_NULL_SHA256"
ssl3_cipher['\x00\x3C'] = "TLS_RSA_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\x00\x3D'] = "TLS_RSA_WITH_AES_256_CBC_SHA256"
ssl3_cipher['\x00\x3E'] = "TLS_DH_DSS_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\x00\x3F'] = "TLS_DH_RSA_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\x00\x40'] = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\x00\x41'] = "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA"
ssl3_cipher['\x00\x42'] = "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA"
ssl3_cipher['\x00\x43'] = "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA"
ssl3_cipher['\x00\x44'] = "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA"
ssl3_cipher['\x00\x45'] = "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA"
ssl3_cipher['\x00\x46'] = "TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA"
ssl3_cipher['\x00\x60'] = "TLS_RSA_EXPORT1024_WITH_RC4_56_MD5"
ssl3_cipher['\x00\x61'] = "TLS_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5"
ssl3_cipher['\x00\x62'] = "TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA"
ssl3_cipher['\x00\x63'] = "TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA"
ssl3_cipher['\x00\x64'] = "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA"
ssl3_cipher['\x00\x65'] = "TLS_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA"
ssl3_cipher['\x00\x66'] = "TLS_DHE_DSS_WITH_RC4_128_SHA"
ssl3_cipher['\x00\x67'] = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\x00\x68'] = "TLS_DH_DSS_WITH_AES_256_CBC_SHA256"
ssl3_cipher['\x00\x69'] = "TLS_DH_RSA_WITH_AES_256_CBC_SHA256"
ssl3_cipher['\x00\x6A'] = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256"
ssl3_cipher['\x00\x6B'] = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256"
ssl3_cipher['\x00\x6C'] = "TLS_DH_anon_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\x00\x6D'] = "TLS_DH_anon_WITH_AES_256_CBC_SHA256"
ssl3_cipher['\x00\x80'] = "TLS_GOSTR341094_WITH_28147_CNT_IMIT"
ssl3_cipher['\x00\x81'] = "TLS_GOSTR341001_WITH_28147_CNT_IMIT"
ssl3_cipher['\x00\x82'] = "TLS_GOSTR341094_WITH_NULL_GOSTR3411"
ssl3_cipher['\x00\x83'] = "TLS_GOSTR341001_WITH_NULL_GOSTR3411"
ssl3_cipher['\x00\x84'] = "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA"
ssl3_cipher['\x00\x85'] = "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA"
ssl3_cipher['\x00\x86'] = "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA"
ssl3_cipher['\x00\x87'] = "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA"
ssl3_cipher['\x00\x88'] = "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA"
ssl3_cipher['\x00\x89'] = "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA"
ssl3_cipher['\x00\x8A'] = "TLS_PSK_WITH_RC4_128_SHA"
ssl3_cipher['\x00\x8B'] = "TLS_PSK_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x8C'] = "TLS_PSK_WITH_AES_128_CBC_SHA"
ssl3_cipher['\x00\x8D'] = "TLS_PSK_WITH_AES_256_CBC_SHA"
ssl3_cipher['\x00\x8E'] = "TLS_DHE_PSK_WITH_RC4_128_SHA"
ssl3_cipher['\x00\x8F'] = "TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x90'] = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA"
ssl3_cipher['\x00\x91'] = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA"
ssl3_cipher['\x00\x92'] = "TLS_RSA_PSK_WITH_RC4_128_SHA"
ssl3_cipher['\x00\x93'] = "TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\x00\x94'] = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA"
ssl3_cipher['\x00\x95'] = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA"
ssl3_cipher['\x00\x96'] = "TLS_RSA_WITH_SEED_CBC_SHA"
ssl3_cipher['\x00\x97'] = "TLS_DH_DSS_WITH_SEED_CBC_SHA"
ssl3_cipher['\x00\x98'] = "TLS_DH_RSA_WITH_SEED_CBC_SHA"
ssl3_cipher['\x00\x99'] = "TLS_DHE_DSS_WITH_SEED_CBC_SHA"
ssl3_cipher['\x00\x9A'] = "TLS_DHE_RSA_WITH_SEED_CBC_SHA"
ssl3_cipher['\x00\x9B'] = "TLS_DH_anon_WITH_SEED_CBC_SHA"
ssl3_cipher['\x00\x9C'] = "TLS_RSA_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\x00\x9D'] = "TLS_RSA_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\x00\x9E'] = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\x00\x9F'] = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\x00\xA0'] = "TLS_DH_RSA_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\x00\xA1'] = "TLS_DH_RSA_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\x00\xA2'] = "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\x00\xA3'] = "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\x00\xA4'] = "TLS_DH_DSS_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\x00\xA5'] = "TLS_DH_DSS_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\x00\xA6'] = "TLS_DH_anon_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\x00\xA7'] = "TLS_DH_anon_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\x00\xA8'] = "TLS_PSK_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\x00\xA9'] = "TLS_PSK_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\x00\xAA'] = "TLS_DHE_PSK_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\x00\xAB'] = "TLS_DHE_PSK_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\x00\xAC'] = "TLS_RSA_PSK_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\x00\xAD'] = "TLS_RSA_PSK_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\x00\xAE'] = "TLS_PSK_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\x00\xAF'] = "TLS_PSK_WITH_AES_256_CBC_SHA384"
ssl3_cipher['\x00\xB0'] = "TLS_PSK_WITH_NULL_SHA256"
ssl3_cipher['\x00\xB1'] = "TLS_PSK_WITH_NULL_SHA384"
ssl3_cipher['\x00\xB2'] = "TLS_DHE_PSK_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\x00\xB3'] = "TLS_DHE_PSK_WITH_AES_256_CBC_SHA384"
ssl3_cipher['\x00\xB4'] = "TLS_DHE_PSK_WITH_NULL_SHA256"
ssl3_cipher['\x00\xB5'] = "TLS_DHE_PSK_WITH_NULL_SHA384"
ssl3_cipher['\x00\xB6'] = "TLS_RSA_PSK_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\x00\xB7'] = "TLS_RSA_PSK_WITH_AES_256_CBC_SHA384"
ssl3_cipher['\x00\xB8'] = "TLS_RSA_PSK_WITH_NULL_SHA256"
ssl3_cipher['\x00\xB9'] = "TLS_RSA_PSK_WITH_NULL_SHA384"
ssl3_cipher['\x00\xBA'] = "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256"
ssl3_cipher['\x00\xBB'] = "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256"
ssl3_cipher['\x00\xBC'] = "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256"
ssl3_cipher['\x00\xBD'] = "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256"
ssl3_cipher['\x00\xBE'] = "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256"
ssl3_cipher['\x00\xBF'] = "TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256"
ssl3_cipher['\x00\xC0'] = "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256"
ssl3_cipher['\x00\xC1'] = "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256"
ssl3_cipher['\x00\xC2'] = "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256"
ssl3_cipher['\x00\xC3'] = "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256"
ssl3_cipher['\x00\xC4'] = "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256"
ssl3_cipher['\x00\xC5'] = "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256"
ssl3_cipher['\x00\x00'] = "TLS_EMPTY_RENEGOTIATION_INFO_SCSV"
ssl3_cipher['\xc0\x01'] = "TLS_ECDH_ECDSA_WITH_NULL_SHA"
ssl3_cipher['\xc0\x02'] = "TLS_ECDH_ECDSA_WITH_RC4_128_SHA"
ssl3_cipher['\xc0\x03'] = "TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xc0\x04'] = "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA"
ssl3_cipher['\xc0\x05'] = "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA"
ssl3_cipher['\xc0\x06'] = "TLS_ECDHE_ECDSA_WITH_NULL_SHA"
ssl3_cipher['\xc0\x07'] = "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA"
ssl3_cipher['\xc0\x08'] = "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xc0\x09'] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
ssl3_cipher['\xc0\x0a'] = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"
ssl3_cipher['\xc0\x0b'] = "TLS_ECDH_RSA_WITH_NULL_SHA"
ssl3_cipher['\xc0\x0c'] = "TLS_ECDH_RSA_WITH_RC4_128_SHA"
ssl3_cipher['\xc0\x0d'] = "TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xc0\x0e'] = "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA"
ssl3_cipher['\xc0\x0f'] = "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA"
ssl3_cipher['\xc0\x10'] = "TLS_ECDHE_RSA_WITH_NULL_SHA"
ssl3_cipher['\xc0\x11'] = "TLS_ECDHE_RSA_WITH_RC4_128_SHA"
ssl3_cipher['\xc0\x12'] = "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xc0\x13'] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"
ssl3_cipher['\xc0\x14'] = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
ssl3_cipher['\xc0\x15'] = "TLS_ECDH_anon_WITH_NULL_SHA"
ssl3_cipher['\xc0\x16'] = "TLS_ECDH_anon_WITH_RC4_128_SHA"
ssl3_cipher['\xc0\x17'] = "TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xc0\x18'] = "TLS_ECDH_anon_WITH_AES_128_CBC_SHA"
ssl3_cipher['\xc0\x19'] = "TLS_ECDH_anon_WITH_AES_256_CBC_SHA"
ssl3_cipher['\xC0\x1A'] = "TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xC0\x1B'] = "TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xC0\x1C'] = "TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xC0\x1D'] = "TLS_SRP_SHA_WITH_AES_128_CBC_SHA"
ssl3_cipher['\xC0\x1E'] = "TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA"
ssl3_cipher['\xC0\x1F'] = "TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA"
ssl3_cipher['\xC0\x20'] = "TLS_SRP_SHA_WITH_AES_256_CBC_SHA"
ssl3_cipher['\xC0\x21'] = "TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA"
ssl3_cipher['\xC0\x22'] = "TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA"
ssl3_cipher['\xC0\x23'] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\xC0\x24'] = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"
ssl3_cipher['\xC0\x25'] = "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\xC0\x26'] = "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384"
ssl3_cipher['\xC0\x27'] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\xC0\x28'] = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"
ssl3_cipher['\xC0\x29'] = "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\xC0\x2A'] = "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384"
ssl3_cipher['\xC0\x2B'] = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\xC0\x2C'] = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\xC0\x2D'] = "TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\xC0\x2E'] = "TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\xC0\x2F'] = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\xC0\x30'] = "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\xC0\x31'] = "TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256"
ssl3_cipher['\xC0\x32'] = "TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384"
ssl3_cipher['\xC0\x33'] = "TLS_ECDHE_PSK_WITH_RC4_128_SHA"
ssl3_cipher['\xC0\x34'] = "TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xC0\x35'] = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA"
ssl3_cipher['\xC0\x36'] = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA"
ssl3_cipher['\xC0\x37'] = "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256"
ssl3_cipher['\xC0\x38'] = "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384"
ssl3_cipher['\xC0\x39'] = "TLS_ECDHE_PSK_WITH_NULL_SHA"
ssl3_cipher['\xC0\x3A'] = "TLS_ECDHE_PSK_WITH_NULL_SHA256"
ssl3_cipher['\xC0\x3B'] = "TLS_ECDHE_PSK_WITH_NULL_SHA384"
ssl3_cipher['\xfe\xfe'] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA"
ssl3_cipher['\xfe\xff'] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xff\xe0'] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA"
ssl3_cipher['\xff\xe1'] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA"
def getSSLRecords(strBuf):
lstRecords = []
if len(strBuf)>=9:
sslStatus = struct.unpack('>BHHI', strBuf[0:9])
iType = (sslStatus[3] & (0xFF000000))>>24
iRecordLen = sslStatus[3] & (0x00FFFFFF)
iShakeProtocol = sslStatus[0]
strRecord = strBuf[9:9+iRecordLen]
iSSLLen = sslStatus[2]
#log(2,"iSSLLen == %d, len(strBuf) == %d, iRecordLen == %d",iSSLLen,len(strBuf),iRecordLen)
if (iRecordLen + 5 < iSSLLen):
#log(2,"Multiple Handshakes")
lstRecords.append((iShakeProtocol,iType,strRecord))
iLoopStopper = 0
iNextOffset = iRecordLen + 9
while iNextOffset < len(strBuf):
iLoopStopper += 1
iCount = 0
while ((iNextOffset+4) > len(strBuf) and iCount < 5):
#log(2,"Need more data to fill buffer")
iCount += 1
rule.waitForData()
if len(rule.buffer) > 0:
strBuf += rule.buffer
if ((iNextOffset+4) > len(strBuf)):
#log(2,"End of message")
break
iTypeAndLen = struct.unpack(">I",strBuf[iNextOffset:iNextOffset+4])[0]
iRecordLen = iTypeAndLen & (0x00FFFFFF)
iType = (iTypeAndLen & (0xFF000000))>>24
strRecord = strBuf[iNextOffset+4:iNextOffset+4+iRecordLen]
lstRecords.append((iShakeProtocol,iType,strRecord))
iNextOffset += (iRecordLen + 4)
if iLoopStopper > 8:
break
return lstRecords
elif (iRecordLen + 9 < len(strBuf)):
#log(2,"Multiple Records")
lstRecords.append((iShakeProtocol,iType,strRecord))
iNextOffset = iRecordLen + 9
iLoopStopper = 0
while iNextOffset+6 < len(strBuf):
iLoopStopper += 1
iShakeProtocol = struct.unpack(">B",strBuf[iNextOffset])[0]
iRecordLen = struct.unpack(">H",strBuf[iNextOffset+3:iNextOffset+5])[0]
iType = struct.unpack(">B",strBuf[iNextOffset+5])[0]
strRecord = strBuf[iNextOffset+6:iRecordLen+iNextOffset+6]
#log(2,"iShakeProto == %d, iRecordLen == %d, iType == %d",iShakeProtocol,iRecordLen,iType)
lstRecords.append((iShakeProtocol,iType,strRecord))
iNextOffset += iRecordLen + 5
if iLoopStopper > 8:
break
return lstRecords
elif (iRecordLen + 9 == len(strBuf)):
#log(2,"Single record")
sslStatus = checkSSLHeader(strBuf)
lstRecords.append((sslStatus[0],sslStatus[2],strRecord))
return lstRecords
return None
def checkSSLHeader(strBuf):
if len(strBuf)>=6:
sslStatus = struct.unpack('>BHHI', strBuf[0:9])
iType = (sslStatus[3] & (0xFF000000))>>24
iRecordLen = sslStatus[3] & (0x00FFFFFF)
iShakeProtocol = sslStatus[0]
iSSLLen = sslStatus[2]
return (iShakeProtocol,iSSLLen,iType,iRecordLen)
return None
def makeHello(strSSLVer):
TLS_EXTENDED_MASTER = "\x00\x04\x00\x17\x00\x00"
r = "\x16" # Message Type 22
r += dSSL[strSSLVer]
strCiphers = ""
for c in ssl3_cipher.keys():
strCiphers += c
dLen = 43 + len(strCiphers) + len(TLS_EXTENDED_MASTER)
r += struct.pack("!H",dLen)
h = "\x01"
strPlen = struct.pack("!L",dLen-4)
h+=strPlen[1:]
h+= dSSL[strSSLVer]
rand = struct.pack("!L", int(time.time()))
rand += "\x36\x24\x34\x16\x27\x09\x22\x07\xd7\xbe\xef\x69\xa1\xb2"
rand += "\x37\x23\x14\x96\x27\xa9\x12\x04\xe7\xce\xff\xd9\xae\xbb"
h+=rand
h+= "\x00" # No Session ID
h+=struct.pack("!H",len(strCiphers))
h+=strCiphers
h+= "\x01\x00"
h+= TLS_EXTENDED_MASTER
return r+h
iVulnCount = 0
for strVer in ["TLSv1.2","TLSv1.1","TLSv1"]:
strHello = makeHello(strVer)
strLogPre = "[%s] %s:%d" % (strVer,strHost,iPort)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((strHost,iPort))
s.settimeout(5)
except:
print "Failure connecting to %s:%d." % (strHost,iPort)
quit()
s.send(strHello)
#print "Sending %s Client Hello" % (strVer)
iCount = 0
fServerHello = False
fCert = False
fKex = False
fHelloDone = False
while iCount<5:
iCount += 1
try:
recv = s.recv(2048)
except:
continue
lstRecords = getSSLRecords(recv)
#strLogMessage = "iCount = %d; lstRecords = %s" % (iCount,lstRecords)
#log(2,strLogMessage)
if lstRecords != None and len(lstRecords) > 0:
for (iShakeProtocol,iType,strRecord) in lstRecords:
if iShakeProtocol == 22:
if iType == 2:
fServerHello = True
strServerHello = strRecord
elif iType == 11:
fCert = True
elif iType == 12:
fKex = True
elif iType == 14:
fHelloDone = True
if (fServerHello and fCert):
break
else:
#log(2, "Handshake missing or invalid. Aborting.")
continue
if not (fServerHello and fCert):
print "%s Invalid handshake." % (strLogPre)
elif len(recv)>0:
if strServerHello.endswith("\x04\x00\x17\x00\x00"):
fVuln = False
else:
fVuln = True
try:
s.send('\x15' + dSSL[strVer] + '\x00\x02\x01\x00')
except socket.error:
print "Connection closed by server."
if fVuln:
print "[%s] %s:%d is vulnerable to TLS triple handshake (Extended ClientHello rejected)" % (strVer,strHost,iPort)
iVulnCount += 1
else:
print "[%s] %s:%d responded with support for Extended Master Secret TLS Extension" % (strVer,strHost,iPort)
else:
print "[%s] No response from %s:%d" % (strVer,strHost,iPort)
try:
s.close()
except:
pass
if iVulnCount > 0:
print "***This System Exhibits Potentially Vulnerable Behavior***"
quit(1)
else:
print "No need to patch. (Server indicates support for TLS Extended Master Secret)"
quit(0) | [
"cyoung@tripwire.com"
] | cyoung@tripwire.com |
38249fa8185cebfdb4c30d7dddd3e605bad8b96b | 5bf245e55b756ca3e664d857f36db092855c7a98 | /externals/mne/fixes.py | 399715ee615775de35fa7a26ddf1c8dde4d48d47 | [
"BSD-3-Clause"
] | permissive | kingjr/decoding_challenge_cortana_2016_3rd | b264fabbe8fb2f3788d11dc2c4deebcf217a64a5 | 26c2ebf5200b5a5cd268fa73ac3928d7257d08d3 | refs/heads/master | 2021-01-20T17:54:12.617430 | 2016-07-13T22:31:58 | 2016-07-13T22:31:58 | 63,120,115 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 33,605 | py | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
# XXX : originally copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD
from __future__ import division
import collections
from distutils.version import LooseVersion
from functools import partial
from gzip import GzipFile
import inspect
from math import ceil, log
from operator import itemgetter
import re
import warnings
import numpy as np
from numpy.fft import irfft
import scipy
from scipy import linalg, sparse
from .externals import six
from .externals.six.moves import copyreg, xrange
###############################################################################
# Misc
# helpers to get function arguments
if hasattr(inspect, 'signature'): # py35
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
else:
def _get_args(function, varargs=False):
out = inspect.getargspec(function) # args, varargs, keywords, defaults
if varargs:
return out[:2]
else:
return out[0]
class gzip_open(GzipFile): # python2.6 doesn't have context managing
def __enter__(self):
if hasattr(GzipFile, '__enter__'):
return GzipFile.__enter__(self)
else:
return self
def __exit__(self, exc_type, exc_value, traceback):
if hasattr(GzipFile, '__exit__'):
return GzipFile.__exit__(self, exc_type, exc_value, traceback)
else:
return self.close()
class _Counter(collections.defaultdict):
"""Partial replacement for Python 2.7 collections.Counter."""
def __init__(self, iterable=(), **kwargs):
super(_Counter, self).__init__(int, **kwargs)
self.update(iterable)
def most_common(self):
return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
for x, n in six.iteritems(other):
self[x] += n
else:
for x in other:
self[x] += 1
try:
Counter = collections.Counter
except AttributeError:
Counter = _Counter
def _unique(ar, return_index=False, return_inverse=False):
"""A replacement for the np.unique that appeared in numpy 1.4.
While np.unique existed long before, keyword return_inverse was
only added in 1.4.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
if LooseVersion(np.__version__) < LooseVersion('1.5'):
unique = _unique
else:
unique = np.unique
def _bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if minlength is None or len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if LooseVersion(np.__version__) < LooseVersion('1.6'):
bincount = _bincount
else:
bincount = np.bincount
def _copysign(x1, x2):
"""Slow replacement for np.copysign, which was introduced in numpy 1.4"""
return np.abs(x1) * np.sign(x2)
if not hasattr(np, 'copysign'):
copysign = _copysign
else:
copysign = np.copysign
def _in1d(ar1, ar2, assume_unique=False, invert=False):
"""Replacement for in1d that is provided for numpy >= 1.4"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
if not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':
in1d = _in1d
else:
in1d = np.in1d
def _digitize(x, bins, right=False):
"""Replacement for digitize with right kwarg (numpy < 1.7).
Notes
-----
This fix is only meant for integer arrays. If ``right==True`` but either
``x`` or ``bins`` are of a different type, a NotImplementedError will be
raised.
"""
if right:
x = np.asarray(x)
bins = np.asarray(bins)
if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
raise NotImplementedError("Only implemented for integer input")
return np.digitize(x - 1e-5, bins)
else:
return np.digitize(x, bins)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
digitize = _digitize
else:
digitize = np.digitize
def _tril_indices(n, k=0):
"""Replacement for tril_indices that is provided for numpy >= 1.4"""
mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
indices = np.where(mask)
return indices
if not hasattr(np, 'tril_indices'):
tril_indices = _tril_indices
else:
tril_indices = np.tril_indices
def _unravel_index(indices, dims):
"""Add support for multiple indices in unravel_index that is provided
for numpy >= 1.4"""
indices_arr = np.asarray(indices)
if indices_arr.size == 1:
return np.unravel_index(indices, dims)
else:
if indices_arr.ndim != 1:
raise ValueError('indices should be one dimensional')
ndims = len(dims)
unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
for coord, idx in zip(unraveled_coords, indices_arr):
coord[:] = np.unravel_index(idx, dims)
return tuple(unraveled_coords.T)
if LooseVersion(np.__version__) < LooseVersion('1.4'):
unravel_index = _unravel_index
else:
unravel_index = np.unravel_index
def _qr_economic_old(A, **kwargs):
"""
Compat function for the QR-decomposition in economic mode
Scipy 0.9 changed the keyword econ=True to mode='economic'
"""
with warnings.catch_warnings(record=True):
return linalg.qr(A, econ=True, **kwargs)
def _qr_economic_new(A, **kwargs):
return linalg.qr(A, mode='economic', **kwargs)
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
qr_economic = _qr_economic_old
else:
qr_economic = _qr_economic_new
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
if hasattr(np, 'count_nonzero'):
from numpy import count_nonzero
else:
def count_nonzero(X):
return len(np.flatnonzero(X))
# little dance to see if np.copy has an 'order' keyword argument
if 'order' in _get_args(np.copy):
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
def _meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,) * (ndim - 2)
output[1].shape = (-1, 1) + (1,) * (ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
meshgrid = _meshgrid
else:
meshgrid = np.meshgrid
###############################################################################
# Back porting firwin2 for older scipy
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) # doctest: +SKIP
>>> print(taps[72:78]) # doctest: +SKIP
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s'
% (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from scipy.signal.signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def get_firwin2():
"""Helper to get firwin2"""
try:
from scipy.signal import firwin2
except ImportError:
firwin2 = _firwin2
return firwin2
def _filtfilt(*args, **kwargs):
"""wrap filtfilt, excluding padding arguments"""
from scipy.signal import filtfilt
# cut out filter args
if len(args) > 4:
args = args[:4]
if 'padlen' in kwargs:
del kwargs['padlen']
return filtfilt(*args, **kwargs)
def get_filtfilt():
"""Helper to get filtfilt from scipy"""
from scipy.signal import filtfilt
if 'padlen' in _get_args(filtfilt):
return filtfilt
return _filtfilt
def _get_argrelmax():
try:
from scipy.signal import argrelmax
except ImportError:
argrelmax = _argrelmax
return argrelmax
def _argrelmax(data, axis=0, order=1, mode='clip'):
"""Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
"""
comparator = np.greater
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return np.where(results)
###############################################################################
# Back porting matrix_rank for numpy < 1.7
def _matrix_rank(M, tol=None):
""" Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that
are greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for
linear least squares [2].
This default threshold is designed to detect rank deficiency accounting
for the numerical errors of the SVD computation. Imagine that there is a
column in `M` that is an exact (in floating point) linear combination of
other columns in `M`. Computing the SVD on `M` will not produce a
singular value exactly equal to 0 in general: any difference of the
smallest SVD value from 0 will be caused by numerical imprecision in the
calculation of the SVD. Our threshold for small SVD values takes this
numerical imprecision into account, and the default threshold will detect
such numerical rank deficiency. The threshold may declare a matrix `M`
rank deficient even if the linear combination of some columns of `M` is
not exactly equal to another column of `M` but only numerically very
close to another column of `M`.
We chose our default threshold because it is in wide use. Other
thresholds are possible. For example, elsewhere in the 2007 edition of
*Numerical recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance
values to detect *effective* rank deficiency. The most useful measure of
the tolerance depends on the operations you intend to use on your matrix.
For example, if your data come from uncertain measurements with
uncertainties greater than floating point epsilon, choosing a tolerance
near that uncertainty may be preferable. The tolerance may be absolute if
the uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return np.int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
if LooseVersion(np.__version__) > '1.7.1':
from numpy.linalg import matrix_rank
else:
matrix_rank = _matrix_rank
def _reconstruct_partial(func, args, kwargs):
"""Helper to pickle partial functions"""
return partial(func, *args, **(kwargs or {}))
def _reduce_partial(p):
"""Helper to pickle partial functions"""
return _reconstruct_partial, (p.func, p.args, p.keywords)
# This adds pickling functionality to older Python 2.6
# Please always import partial from here.
copyreg.pickle(partial, _reduce_partial)
def normalize_colors(vmin, vmax, clip=False):
"""Helper to handle matplotlib API"""
import matplotlib.pyplot as plt
try:
return plt.Normalize(vmin, vmax, clip=clip)
except AttributeError:
return plt.normalize(vmin, vmax, clip=clip)
def assert_true(expr, msg='False is not True'):
"""Fake assert_true without message"""
if not expr:
raise AssertionError(msg)
def assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr1 is not expr2, msg)
assert_raises_regex_impl = None
# from numpy 1.9.1
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
"""
__tracebackhide__ = True # Hide traceback for py.test
import nose
global assert_raises_regex_impl
if assert_raises_regex_impl is None:
try:
# Python 3.2+
assert_raises_regex_impl = nose.tools.assert_raises_regex
except AttributeError:
try:
# 2.7+
assert_raises_regex_impl = nose.tools.assert_raises_regexp
except AttributeError:
# 2.6
# This class is copied from Python2.7 stdlib almost verbatim
class _AssertRaisesContext(object):
def __init__(self, expected, expected_regexp=None):
self.expected = expected
self.expected_regexp = expected_regexp
def failureException(self, msg):
return AssertionError(msg)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
def impl(cls, regex, callable_obj, *a, **kw):
mgr = _AssertRaisesContext(cls, regex)
if callable_obj is None:
return mgr
with mgr:
callable_obj(*a, **kw)
assert_raises_regex_impl = impl
return assert_raises_regex_impl(exception_class, expected_regexp,
callable_obj, *args, **kwargs)
def _sparse_block_diag(mats, format=None, dtype=None):
"""An implementation of scipy.sparse.block_diag since old versions of
scipy don't have it. Forms a sparse matrix by stacking matrices in block
diagonal form.
Parameters
----------
mats : list of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the
matrix is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of blocks.
Returns
-------
res : sparse matrix
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None] * nmat
row[ia] = a
rows.append(row)
return sparse.bmat(rows, format=format, dtype=dtype)
try:
from scipy.sparse import block_diag as sparse_block_diag
except Exception:
sparse_block_diag = _sparse_block_diag
def _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False], dtype=bool)
>>> isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True], dtype=bool)
>>> isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True], dtype=bool)
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
dt = np.core.multiarray.result_type(y, 1.)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if np.all(xfin) and np.all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
return cond
if LooseVersion(np.__version__) < LooseVersion('1.7'):
isclose = _isclose
else:
isclose = np.isclose
| [
"jeanremi.king+github@gmail.com"
] | jeanremi.king+github@gmail.com |
7db05f705d72bdf87180f6a7bff371d915d8b61e | 299e5934971f9de638692e2667d6e270bcab5cbd | /214.最短回文串.py | fd576408b90eb365d8d4759abcade422cdf7f582 | [] | no_license | ycj123/Leetcode-Python3 | 14bcd6c9f4d26191d5d40c77e923df4d0be4c0e5 | 1593960cdf2655ef1dcf68e3517e7121670c6ac3 | refs/heads/master | 2022-12-16T23:12:19.326702 | 2020-09-18T00:17:45 | 2020-09-18T00:17:45 | 295,302,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #
# @lc app=leetcode.cn id=214 lang=python3
#
# [214] 最短回文串
#
# https://leetcode-cn.com/problems/shortest-palindrome/description/
#
# algorithms
# Hard (36.30%)
# Likes: 262
# Dislikes: 0
# Total Accepted: 23.3K
# Total Submissions: 64.2K
# Testcase Example: '"aacecaaa"'
#
# 给定一个字符串 s,你可以通过在字符串前面添加字符将其转换为回文串。找到并返回可以用这种方式转换的最短回文串。
#
# 示例 1:
#
# 输入: "aacecaaa"
# 输出: "aaacecaaa"
#
#
# 示例 2:
#
# 输入: "abcd"
# 输出: "dcbabcd"
#
#
# @lc code=start
class Solution:
def shortestPalindrome(self, s: str) -> str:
r = s[::-1]
for i in range(len(s) + 1):
if s.startswith(r[i:]):
return r[:i] + s
# @lc code=end
| [
"yangchijiang@icloud.com"
] | yangchijiang@icloud.com |
b158c6b766b190a354696a9b56f4c4b8b3e4e9e8 | 7d146952331a5351b251c386335d23831499a4b1 | /Max_Sum_Subarray_of Size_K.py | cd4531d6617cd5fc7fe47ff720ced67fd3b83e2d | [] | no_license | angela-laien/leetcode_problems | 12c98921994044d90a8d80bf23deb5fe6d9af7a9 | 31fda85d694f79f429f429ac64f5c6baa8d4bbdb | refs/heads/main | 2023-05-30T17:31:49.551098 | 2021-06-27T17:16:48 | 2021-06-27T17:16:48 | 364,058,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | # Given an array of positive numbers and a positive number ‘k,’ find the maximum sum of any contiguous subarray of size ‘k’.
# Input: [2, 1, 5, 1, 3, 2], k=3
# Output: 9
# Explanation: Subarray with maximum sum is [5, 1, 3].
def max_sub_array_of_size_k(k, arr):
# TODO: Write your code here
window_start = 0
window_sum = 0
max_sum = 0
for window_end in range(len(arr)):
window_sum += arr[window_end]
if window_end >= k-1:
max_sum = max(max_sum, window_sum)
window_sum -= arr[window_start]
window_start += 1
return max_sum
| [
"laienxie@gmail.com"
] | laienxie@gmail.com |
16518bf24c2f192812f3d6ef3df3a7f824e8a741 | 264c19e525926110eaee703680eed9dddf0d948b | /untils/__init__.py | 952b02a1660763dfcc5e0022b1eb6f27298956a7 | [] | no_license | sunny-xiabo/Appium_py3 | 424b224cfa111fb994aa5405327e802c0a7df229 | 499fabc0db7727db748b3fd648ee09c0d467017f | refs/heads/master | 2023-05-27T14:31:06.758117 | 2021-06-23T03:59:40 | 2021-06-23T03:59:40 | 379,110,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | """
# coding: utf-8
# @Author:xiabo
# @File : __init__.py.py
# @Date :2021/6/16 下午5:17
"""
'''
公共的工具模块
''' | [
"xiabo@xiabo-MacBook-Pro.local"
] | xiabo@xiabo-MacBook-Pro.local |
f484cfaede76ca9e2378408414f754d0bdd1778b | 0cd5763307766ce063d01a497dc20a49ebb52215 | /mysite/settings.py | d0f801feefa66e16fabe08128f81f55dd697a8be | [] | no_license | kuzur/my-first-blog | 903b5a4ed344fa47eed1bb64b695d63f35063514 | b48f723cbd2db7f3be027ab82d5d3825a298a552 | refs/heads/master | 2020-03-21T05:37:24.696260 | 2018-07-09T08:54:53 | 2018-07-09T08:54:53 | 138,169,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,193 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g8hl-l&4d@tqhu%x$7e6w(0q0lws#ien2-^yelka4$1_%htien'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"a5816028@aoyama.jp"
] | a5816028@aoyama.jp |
1f433819a3c1afa8f824679e2a7ef418890153d1 | 5e671191f33500cf0f6f92e7795cfa5eb8fd528a | /UTAGL-AIML-Project5-Q5.py | e059cab652aa0f6048e55fb58fe92b6a8537f7d3 | [] | no_license | GreatLearningAIML1/gl-pgp-aiml-uta-intl-may20-dhavalbbhatt | 5b8dac4c6859d5ad8dd0172760e0da8c44702c17 | 25a540199ef3f280d1af089f04f0cbb9b1a2896f | refs/heads/master | 2022-12-17T20:17:47.400264 | 2020-09-19T15:26:22 | 2020-09-19T15:26:22 | 272,688,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import zscore
from scipy.spatial.distance import cdist
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import silhouette_score
from scipy.cluster.hierarchy import fcluster
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.cluster.hierarchy import fcluster
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
from sklearn.cluster import KMeans
import warnings
warnings.filterwarnings('ignore')
sns.set(color_codes=True)
pd.set_option('display.max_columns', 1500)
pd.set_option('display.max_rows', 1500)
# ###########################################
# ############# Read data ###################
data = pd.read_excel('CCCustData.xlsx')
data_ops = data.drop(['Sl_No', 'Customer Key'], axis=1)
data_ops = data_ops.apply(zscore)
# # FINAL MODEL KMEANS CLUSTERING AND SILHOUETTE SCORE ###
model = KMeans(n_clusters=3, n_init=15, random_state=1)
model.fit(data_ops)
prediction = model.predict(data_ops)
cluster_pred = model.fit_predict(data_ops)
final_score = silhouette_score(data_ops, cluster_pred)
print("FINAL Silhouette score is for KMEANS model is ", final_score)
print('*' * 100)
# # FINAL MODEL HIERARCHICAL CLUSTERING AND SILHOUETTE SCORE ###
z_ops = linkage(data_ops, method='ward', metric='euclidean')
ops_clusters = fcluster(z_ops, t=18, criterion='distance')
ops_silhouette_score = silhouette_score(data_ops, ops_clusters)
print("FINAL Silhouette Score for HIERARCHICAL model is {}".format(ops_silhouette_score))
print('*' * 100) | [
"noreply@github.com"
] | noreply@github.com |
d7b4e049f95736c4a3a270a0a6e326a8bc7e03d5 | 887b9fd5f4fd4b9448f32750788b138b2e94be3e | /stock/futu/import_requests _income.py | 6411f1232d8b5870521859d6a0da9b07f5f729fa | [] | no_license | hong0396/hotwind_git | 8fa11b3bc46aadd0b83b297cb6c6919102b7b920 | 544d984d8a8cdc42b422792a5064d19d24e0c831 | refs/heads/master | 2020-04-04T01:11:50.010424 | 2018-11-03T07:24:59 | 2018-11-03T07:24:59 | 136,184,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | import requests
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Cookie': 'UM_distinctid=165fa9285fb762-07c06f613d5cac-8383268-e1000-165fa9285fc20a; cipher_device_id=1537507232150902; tgw_l7_route=8d34ab350eb9a9772a5a0c377f34d47d',
'Host': 'finance.futunn.com',
'Origin': 'https://www.futunn.com',
'Referer': 'https://www.futunn.com/quote/stock-info?m=us&code=CYTXW&type=finance_analyse',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
url='https://finance.futunn.com/api/finance/balance-sheet?code=CYTXW&label=us&quarter=0&page=0'
r = requests.get(url,headers=headers).json()
print(r.get("data").get("list"))
print(r.get("data").get("pages"))
| [
"hong0396@126.com"
] | hong0396@126.com |
b457bebee7fa5e0ee5d2b272b11cfbb9d94bea0a | 71f202160b4b0655586334385d5534fef8de87a8 | /node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi | d5402abadccbf7d64c721b596d82741eb26bca6d | [
"MIT"
] | permissive | allisonwmoss/devdo-frontend | 03b7ae9a971660a9fb38dd4ab144eb84d5b9e526 | 9e295953f60b527fb72496544da667f60f0326d0 | refs/heads/main | 2023-08-24T04:16:42.218294 | 2021-10-25T13:46:08 | 2021-10-25T13:46:08 | 416,338,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,834 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"icu_ver_major": "69",
"is_debug": 0,
"llvm_version": "13.0",
"napi_build_version": "8",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "false",
"node_library_files": [
"lib/constants.js",
"lib/net.js",
"lib/trace_events.js",
"lib/events.js",
"lib/repl.js",
"lib/util.js",
"lib/dgram.js",
"lib/vm.js",
"lib/stream.js",
"lib/child_process.js",
"lib/assert.js",
"lib/_tls_wrap.js",
"lib/http2.js",
"lib/inspector.js",
"lib/os.js",
"lib/_http_server.js",
"lib/console.js",
"lib/perf_hooks.js",
"lib/readline.js",
"lib/punycode.js",
"lib/_http_incoming.js",
"lib/https.js",
"lib/_stream_wrap.js",
"lib/domain.js",
"lib/dns.js",
"lib/_http_client.js",
"lib/diagnostics_channel.js",
"lib/tty.js",
"lib/_http_agent.js",
"lib/timers.js",
"lib/_http_outgoing.js",
"lib/querystring.js",
"lib/_tls_common.js",
"lib/module.js",
"lib/_stream_passthrough.js",
"lib/_stream_transform.js",
"lib/worker_threads.js",
"lib/sys.js",
"lib/_stream_duplex.js",
"lib/path.js",
"lib/_http_common.js",
"lib/string_decoder.js",
"lib/cluster.js",
"lib/v8.js",
"lib/crypto.js",
"lib/wasi.js",
"lib/_stream_readable.js",
"lib/zlib.js",
"lib/url.js",
"lib/tls.js",
"lib/_stream_writable.js",
"lib/async_hooks.js",
"lib/process.js",
"lib/http.js",
"lib/buffer.js",
"lib/fs.js",
"lib/util/types.js",
"lib/timers/promises.js",
"lib/path/win32.js",
"lib/path/posix.js",
"lib/stream/consumers.js",
"lib/stream/promises.js",
"lib/stream/web.js",
"lib/internal/constants.js",
"lib/internal/abort_controller.js",
"lib/internal/net.js",
"lib/internal/v8_prof_processor.js",
"lib/internal/event_target.js",
"lib/internal/inspector_async_hook.js",
"lib/internal/validators.js",
"lib/internal/linkedlist.js",
"lib/internal/cli_table.js",
"lib/internal/repl.js",
"lib/internal/util.js",
"lib/internal/histogram.js",
"lib/internal/error_serdes.js",
"lib/internal/dgram.js",
"lib/internal/child_process.js",
"lib/internal/assert.js",
"lib/internal/fixed_queue.js",
"lib/internal/blocklist.js",
"lib/internal/v8_prof_polyfill.js",
"lib/internal/options.js",
"lib/internal/worker.js",
"lib/internal/dtrace.js",
"lib/internal/idna.js",
"lib/internal/watchdog.js",
"lib/internal/encoding.js",
"lib/internal/tty.js",
"lib/internal/freeze_intrinsics.js",
"lib/internal/timers.js",
"lib/internal/heap_utils.js",
"lib/internal/querystring.js",
"lib/internal/js_stream_socket.js",
"lib/internal/errors.js",
"lib/internal/priority_queue.js",
"lib/internal/freelist.js",
"lib/internal/blob.js",
"lib/internal/socket_list.js",
"lib/internal/socketaddress.js",
"lib/internal/stream_base_commons.js",
"lib/internal/url.js",
"lib/internal/async_hooks.js",
"lib/internal/http.js",
"lib/internal/buffer.js",
"lib/internal/trace_events_async_hooks.js",
"lib/internal/crypto/sig.js",
"lib/internal/crypto/rsa.js",
"lib/internal/crypto/aes.js",
"lib/internal/crypto/util.js",
"lib/internal/crypto/scrypt.js",
"lib/internal/crypto/random.js",
"lib/internal/crypto/keys.js",
"lib/internal/crypto/x509.js",
"lib/internal/crypto/certificate.js",
"lib/internal/crypto/ec.js",
"lib/internal/crypto/keygen.js",
"lib/internal/crypto/mac.js",
"lib/internal/crypto/diffiehellman.js",
"lib/internal/crypto/hkdf.js",
"lib/internal/crypto/cipher.js",
"lib/internal/crypto/hash.js",
"lib/internal/crypto/pbkdf2.js",
"lib/internal/crypto/webcrypto.js",
"lib/internal/crypto/dsa.js",
"lib/internal/crypto/hashnames.js",
"lib/internal/cluster/shared_handle.js",
"lib/internal/cluster/round_robin_handle.js",
"lib/internal/cluster/worker.js",
"lib/internal/cluster/primary.js",
"lib/internal/cluster/utils.js",
"lib/internal/cluster/child.js",
"lib/internal/webstreams/util.js",
"lib/internal/webstreams/writablestream.js",
"lib/internal/webstreams/readablestream.js",
"lib/internal/webstreams/queuingstrategies.js",
"lib/internal/webstreams/encoding.js",
"lib/internal/webstreams/transformstream.js",
"lib/internal/webstreams/transfer.js",
"lib/internal/bootstrap/loaders.js",
"lib/internal/bootstrap/pre_execution.js",
"lib/internal/bootstrap/node.js",
"lib/internal/bootstrap/environment.js",
"lib/internal/bootstrap/switches/does_not_own_process_state.js",
"lib/internal/bootstrap/switches/is_not_main_thread.js",
"lib/internal/bootstrap/switches/does_own_process_state.js",
"lib/internal/bootstrap/switches/is_main_thread.js",
"lib/internal/test/binding.js",
"lib/internal/test/transfer.js",
"lib/internal/util/types.js",
"lib/internal/util/inspector.js",
"lib/internal/util/comparisons.js",
"lib/internal/util/debuglog.js",
"lib/internal/util/inspect.js",
"lib/internal/util/iterable_weak_map.js",
"lib/internal/streams/add-abort-signal.js",
"lib/internal/streams/compose.js",
"lib/internal/streams/duplexify.js",
"lib/internal/streams/destroy.js",
"lib/internal/streams/legacy.js",
"lib/internal/streams/passthrough.js",
"lib/internal/streams/readable.js",
"lib/internal/streams/from.js",
"lib/internal/streams/writable.js",
"lib/internal/streams/state.js",
"lib/internal/streams/buffer_list.js",
"lib/internal/streams/end-of-stream.js",
"lib/internal/streams/utils.js",
"lib/internal/streams/transform.js",
"lib/internal/streams/lazy_transform.js",
"lib/internal/streams/duplex.js",
"lib/internal/streams/pipeline.js",
"lib/internal/readline/utils.js",
"lib/internal/readline/emitKeypressEvents.js",
"lib/internal/readline/callbacks.js",
"lib/internal/repl/history.js",
"lib/internal/repl/utils.js",
"lib/internal/repl/await.js",
"lib/internal/legacy/processbinding.js",
"lib/internal/assert/calltracker.js",
"lib/internal/assert/assertion_error.js",
"lib/internal/http2/util.js",
"lib/internal/http2/core.js",
"lib/internal/http2/compat.js",
"lib/internal/per_context/messageport.js",
"lib/internal/per_context/primordials.js",
"lib/internal/per_context/domexception.js",
"lib/internal/vm/module.js",
"lib/internal/tls/secure-pair.js",
"lib/internal/tls/parse-cert-string.js",
"lib/internal/tls/secure-context.js",
"lib/internal/child_process/serialization.js",
"lib/internal/debugger/inspect_repl.js",
"lib/internal/debugger/inspect_client.js",
"lib/internal/debugger/inspect.js",
"lib/internal/worker/io.js",
"lib/internal/worker/js_transferable.js",
"lib/internal/main/repl.js",
"lib/internal/main/print_help.js",
"lib/internal/main/eval_string.js",
"lib/internal/main/check_syntax.js",
"lib/internal/main/prof_process.js",
"lib/internal/main/worker_thread.js",
"lib/internal/main/inspect.js",
"lib/internal/main/eval_stdin.js",
"lib/internal/main/run_main_module.js",
"lib/internal/modules/run_main.js",
"lib/internal/modules/package_json_reader.js",
"lib/internal/modules/esm/module_job.js",
"lib/internal/modules/esm/get_source.js",
"lib/internal/modules/esm/translators.js",
"lib/internal/modules/esm/resolve.js",
"lib/internal/modules/esm/create_dynamic_module.js",
"lib/internal/modules/esm/module_map.js",
"lib/internal/modules/esm/get_format.js",
"lib/internal/modules/esm/transform_source.js",
"lib/internal/modules/esm/loader.js",
"lib/internal/modules/cjs/helpers.js",
"lib/internal/modules/cjs/loader.js",
"lib/internal/source_map/source_map.js",
"lib/internal/source_map/prepare_stack_trace.js",
"lib/internal/source_map/source_map_cache.js",
"lib/internal/dns/promises.js",
"lib/internal/dns/utils.js",
"lib/internal/fs/watchers.js",
"lib/internal/fs/promises.js",
"lib/internal/fs/read_file_context.js",
"lib/internal/fs/rimraf.js",
"lib/internal/fs/sync_write_stream.js",
"lib/internal/fs/dir.js",
"lib/internal/fs/streams.js",
"lib/internal/fs/utils.js",
"lib/internal/fs/cp/cp.js",
"lib/internal/fs/cp/cp-sync.js",
"lib/internal/perf/nodetiming.js",
"lib/internal/perf/usertiming.js",
"lib/internal/perf/performance_entry.js",
"lib/internal/perf/performance.js",
"lib/internal/perf/timerify.js",
"lib/internal/perf/utils.js",
"lib/internal/perf/observe.js",
"lib/internal/perf/event_loop_delay.js",
"lib/internal/perf/event_loop_utilization.js",
"lib/internal/policy/manifest.js",
"lib/internal/policy/sri.js",
"lib/internal/process/task_queues.js",
"lib/internal/process/per_thread.js",
"lib/internal/process/warning.js",
"lib/internal/process/policy.js",
"lib/internal/process/promises.js",
"lib/internal/process/signal.js",
"lib/internal/process/execution.js",
"lib/internal/process/esm_loader.js",
"lib/internal/process/report.js",
"lib/internal/process/worker_thread_only.js",
"lib/internal/console/constructor.js",
"lib/internal/console/global.js",
"lib/assert/strict.js",
"lib/dns/promises.js",
"lib/fs/promises.js"
],
"node_module_version": 93,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/16.10.0_1",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_brotli": "true",
"node_shared_cares": "true",
"node_shared_http_parser": "false",
"node_shared_libuv": "true",
"node_shared_nghttp2": "true",
"node_shared_nghttp3": "false",
"node_shared_ngtcp2": "false",
"node_shared_openssl": "true",
"node_shared_zlib": "true",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"openssl_quic": "false",
"ossfuzz": "false",
"shlib_suffix": "93.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_enable_webassembly": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"nodedir": "/Users/allisonmoss/Library/Caches/node-gyp/16.10.0",
"standalone_static_library": 1,
"metrics_registry": "https://registry.npmjs.org/",
"global_prefix": "/usr/local",
"local_prefix": "/Users/allisonmoss/dev/devdo/devdo-frontend/frontend",
"globalconfig": "/usr/local/etc/npmrc",
"userconfig": "/Users/allisonmoss/.npmrc",
"init_module": "/Users/allisonmoss/.npm-init.js",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"save_exact": "true",
"cache": "/Users/allisonmoss/.npm",
"user_agent": "npm/7.24.0 node/v16.10.0 darwin x64 workspaces/false",
"prefix": "/usr/local"
}
}
| [
"allisonwmoss@icloud.com"
] | allisonwmoss@icloud.com |
662555aca7c1d66e74f10738efcc2f5fb35f7b05 | c2284525657ab9793998f7c24c77f5771e71dcf5 | /python/schedule_table.py | fe1a5cda44c43e03dbe0deb80a2794f43c2029c4 | [
"MIT"
] | permissive | cebarbosa/spanet_gc2018 | 5663b4d139d4f38ca3c4ea9b2277f2a7b287865c | 73864099826cc906bf0ea708570381400fee079b | refs/heads/master | 2021-05-04T20:36:02.215227 | 2018-06-15T17:06:40 | 2018-06-15T17:06:40 | 119,824,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # -*- coding: utf-8 -*-
"""
Created on 20/03/18
Author : Carlos Eduardo Barbosa
Produces table with schedule
"""
from __future__ import print_function, division
if __name__ == "__main__":
with open("SPAnet_schedule.tsv", "rb") as f:
text = f.read()
schedule = ["\tschedule: ["]
for line in text.split("\n"):
fields = line.strip().split("\t")
print(fields)
if len(fields) < 2:
continue
if len(fields) == 2 :
s = '\t{{\n\tname : "{0[1]}",\n\ttime: "{0[0]}"\n\t}},'.format(
fields)
schedule.append(s)
else:
s = ["{", 'name: "{}",'.format(fields[2]),
'company: "{}",'.format(fields[3]),
"""link: {href: "", text: "" },""", "presentation: {",
'title: "{}",'.format(fields[2]),
'description: "{}",'.format(fields[1]),
'time: "{}"'.format(fields[0]),
'}', "},"]
schedule.append("\n\t".join(s))
schedule.append("\t],")
with open("schedule.txt", "w") as f:
f.write("\n\n".join(schedule))
| [
"carlos.barbosa@usp.br"
] | carlos.barbosa@usp.br |
e7eb2234e064282efebfdb573efbd8e47c608a09 | 32a89f276e8e5784b41c652f2f6eded6cda0bf5a | /src/blog/urls.py | 0312104053fb8a8ab7b3fa70d9b0100e7e15b63f | [] | no_license | Chirag-py/Blog | 54c2d316fb104c888074f928a8cf5f5dae3ed5ca | b75449453cb765011a01ffad8cadb96e9b521a30 | refs/heads/master | 2022-12-07T20:30:48.593930 | 2020-09-01T19:07:50 | 2020-09-01T19:07:50 | 287,372,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py |
from django.urls import path
from .views import (
blog_post_detail_view,
blog_post_list_view,
blog_post_update_view,
blog_post_delete_view,
)
urlpatterns = [
path('', blog_post_list_view),
path('<str:slug>', blog_post_detail_view),
path('<str:slug>/edit/', blog_post_update_view),
path('<str:slug>/delete/', blog_post_delete_view),
]
| [
"64911607+Chirag-py@users.noreply.github.com"
] | 64911607+Chirag-py@users.noreply.github.com |
11832a870cc5cd62a03c290b44260a6fe6dfca23 | b9a360c33aa6f2a33b3cc45a94667923aefb2d91 | /blueprints/puzzle/models.py | a18748f36d92714515423f401769d815cc23ff69 | [] | no_license | vkotek/web.kotek.co | 8068ab16926edee86c2bbb4829e52ca5d9707fb6 | d2f621fd3a16531efe4750f6969deb4a2e921690 | refs/heads/master | 2022-12-10T00:57:47.202437 | 2020-07-20T23:00:45 | 2020-07-20T23:00:45 | 192,185,513 | 0 | 0 | null | 2022-12-08T05:15:54 | 2019-06-16T11:55:59 | HTML | UTF-8 | Python | false | false | 3,177 | py | import datetime
class PuzzleCounter():
def __init__(self):
# with open('counter.txt', 'r+', encoding="utf-8-sig") as f:
with open('/home/vojtech/web.kotek.co/myapp/counter.txt', 'r+', encoding="utf-8-sig") as f:
self.errors = []
self.counter = f.readlines()
self.count = self.count()
self.sessions = self.sessions()
self.last = self.sessions[-1]
self.state = self.state() # (state, time started (unix), time started (nice))
self.time
pass
def add(self, action):
if action not in ('STA','END','ADD'):
print("ERROR! Wrong action:", action)
pass
def count(self):
# Get total count
count = 0
for line in self.counter:
if 'ADD' in line:
count += 1
return count
def elapsed_seconds(self):
seconds = 0
return [seconds+x['DURATION'] for x in self.sessions]
def sessions(self):
sessions = []
for n, line in enumerate(self.counter):
try:
action, timestamp = line.split(';')
if action == "STA":
session = {
"START": round(float(timestamp.strip())),
"PIECES": 0,
}
elif action == "ADD":
session['PIECES'] += 1
elif action == "END":
session["END"] = round(float(timestamp.strip()))
session["DURATION"] = session["END"] - session["START"]
session["START"] = datetime.datetime.fromtimestamp(session["START"]).strftime("%Y-%m-%d %H:%M:%S")
session["END"] = datetime.datetime.fromtimestamp(session["END"]).strftime("%Y-%m-%d %H:%M:%S")
sessions += [session]
except Exception as e:
error = "ERROR: {}\nLINE: {}\nVALUE: {}".format(e, n, line)
self.errors += [error]
print(error)
return sessions
def state(self):
lines = self.counter
lines.reverse()
# Get last start/end date
for line in lines:
state = line.strip().split(";")
if state[0] in ['STA', 'END']:
break
time = float( state[1] )
time_nice = datetime.datetime.fromtimestamp(time).strftime("%H:%M:%S")
return (state[0], time, time_nice)
def stats(self):
def duration(seconds):
return {
'days': int( seconds // (60*60*24) ),
'hours': int( ( seconds % (60*60*24) ) // (60*60) ),
'minutes': int( ( seconds % (60*60) ) // (60) ),
'seconds': int( seconds % (60*60) ),
}
self.rate = int(total_seconds / count)
self.remaining_seconds = ( 9000 - count ) * self.rate
self.remaining_time = duration(self.remaining_seconds)
self.elapsed_seconds = [
return {
'total_seconds': duration(total_seconds),
'time_pretty': time_nice,
}
| [
"kotek.vojtech@gmail.com"
] | kotek.vojtech@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.