blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
00164b917dd18b0a31f8dd2c58136718f475bcae | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/rna-transcription/73f4d972b2c74182aa5cae54291062e3.py | 78dc6c4a79f5f91885ccb23cd500e3eb79dd4a86 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 201 | py | conversion_dict = {
'G' : 'C',
'C' : 'G',
'T' : 'A',
'A' : 'U'
}
def to_rna(dna):
converted = ""
for nucleotide in dna:
converted += conversion_dict[nucleotide]
return converted
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
b454d8b153e5548dd92f8da3f9c6ae1b1b9b1b0c | 4ae7e4805e7b9ff0d949da276f59ec63a10b9fbb | /custom_stock_shipwire/__manifest__.py | ad304ece5ecbf8ff7908d58d65f2babd395c68b0 | [] | no_license | h3llopy/sasmar-addons12 | c94acb1994f0e17f245f0ff6b14d2d21b939c314 | 51a3ae074158fbc695711438888a5ec6c982a2fa | refs/heads/master | 2022-04-07T11:38:24.915350 | 2020-03-03T05:44:42 | 2020-03-03T05:44:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': "Stock Configuration Customization For Shipwire Shipping",
'description': "Shipwire installation Option in Stock COnfiguration Screen",
'author': "BrowseInfo",
'website': "https://www.brwoseinfo.com",
'category': 'Technical Settings',
'version': '1.0',
'depends': ['delivery', 'mail', 'stock'],
'data': [
'views/stock_config.xml'
],
'auto_install' :True,
}
| [
"shubhajit.kushari@browseinfo.in"
] | shubhajit.kushari@browseinfo.in |
0cd10debc2702f4d604aafa5725e36ac4b73485f | 3dc3bbe607ab7b583eb52dbaae86636eb642960a | /tools/data/multisports/format_det_result.py | 84fd78811ee3267ab3beebc4ba497895779781a8 | [
"Apache-2.0"
] | permissive | open-mmlab/mmaction2 | 659c36c6083fd3d9d072e074a8d4b3a50342b9bd | 582b78fd6c3240500d5cacd292339d7d1ddbb056 | refs/heads/main | 2023-08-28T18:14:50.423980 | 2023-08-10T09:20:06 | 2023-08-10T09:20:06 | 278,810,244 | 3,498 | 1,028 | Apache-2.0 | 2023-09-07T06:50:44 | 2020-07-11T07:19:10 | Python | UTF-8 | Python | false | false | 2,169 | py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser
import numpy as np
from mmengine import dump, load
from rich.progress import track
from mmaction.evaluation import link_tubes
def parse_args():
parser = ArgumentParser()
parser.add_argument('test-result', help='path of dumped reuslts')
parser.add_argument(
'--anno-path',
default='data/multisports/videos/trainval/multisports_GT.pkl')
parser.add_argument(
'--frm_out_path',
default=None,
help='frame-level detection results output path')
parser.add_argument(
'--tube_out_path',
default=None,
help='tube-level detection results output path')
args = parser.parse_args()
if not args.frm_out_path:
args.frm_out_path = args.test_result[:-4] + '-formated.pkl'
if not args.tube_out_path:
args.tube_out_path = args.test_result[:-4] + '_vid_dets.pkl'
return args
def format_det_result():
"""convert test results to specified format in MultiSports competition."""
test_results = load(args.test_result)
annos = load(args.anno_path)
test_videos = annos['test_videos'][0]
resolutions = annos['resolution']
frm_dets = []
for pred in track(test_results, description='formating...'):
video_key = pred['video_id'].split('.mp4')[0]
frm_num = pred['timestamp']
bboxes = pred['pred_instances']['bboxes']
cls_scores = pred['pred_instances']['scores']
for bbox, cls_score in zip(bboxes, cls_scores):
video_idx = test_videos.index(video_key)
pred_label = np.argmax(cls_score)
score = cls_score[pred_label]
h, w = resolutions[video_key]
bbox *= np.array([w, h, w, h])
instance_result = np.array(
[video_idx, frm_num, pred_label, score, *bbox])
frm_dets.append(instance_result)
frm_dets = np.array(frm_dets)
video_tubes = link_tubes(annos, frm_dets, K=1)
dump(frm_dets, args.frm_out_path)
dump(video_tubes, args.tube_out_path)
if __name__ == '__main__':
args = parse_args()
format_det_result()
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
ce9d8dab3c03c95c47082aca01989154eb9e0553 | 7890d130081829a7c41302066dc1934badb5a474 | /students/lerastromtsova/3/constants.py | f4c293477a6f15e7ba48515caff4b96e7e39d337 | [
"MIT"
] | permissive | sobolevn/itmo-2019 | 3a05592ff8e2e62c1cadfb361914db42f0d39733 | cf39721874edf52deebc7f72f53455f69317f84a | refs/heads/master | 2020-07-21T06:23:08.581693 | 2020-01-24T12:49:59 | 2020-01-24T12:49:59 | 206,768,545 | 4 | 17 | MIT | 2020-04-12T12:52:50 | 2019-09-06T10:14:25 | Python | UTF-8 | Python | false | false | 557 | py | # -*- coding: utf-8 -*-
from contextlib import contextmanager
from datetime import datetime
import pytest
TEST_DIR = 'test_dirs'
FORMAT_CONSTANT = '{0}/{1}'
SCOPE = 'function'
FOLDER = 'folder'
FILE = 'file'
FOLDERS = 'folders'
FILES = 'files'
FILE_NOT_FOUND = pytest.raises(FileNotFoundError)
FILE_EXISTS = pytest.raises(FileExistsError)
NOW = datetime.now()
TEST_DATE = NOW.replace(year=NOW.year - 3).strftime('%d.%m.%Y %H:%M:%S')
@contextmanager
def does_not_raise():
"""Empty function that is used to show there is no Exception."""
yield
| [
"mail@sobolevn.me"
] | mail@sobolevn.me |
29c5b059d58988be82fa3c896883f93cde3c55ff | 9cf434b6ee59ab22496ee031fb4ab145bbaff1a2 | /tranque_v1.8.4_source/backend/src/alerts/tests/modules/ef/m2/escenarios_falla/test_eventos_problemas_instrumentacion.py | d1b22280b70a4619588a67b211e25032b7883730 | [] | no_license | oliverhernandezmoreno/SourcesOH | f2ff1a5e3377f0ac1fb8b3153d99d0ee703700b7 | 5d9ca5ab1caceafd4d11207139c9e56210156ef8 | refs/heads/master | 2023-01-05T02:51:25.172103 | 2020-08-27T14:39:34 | 2020-08-27T14:39:34 | 64,422,812 | 0 | 1 | null | 2022-12-30T17:25:10 | 2016-07-28T19:33:44 | JavaScript | UTF-8 | Python | false | false | 1,807 | py | from alerts.tests.modules.ef.ef_controller_base import EFControllerBase
from targets.models import Timeseries
class ProblemasInstrumentacionTestCase(EFControllerBase.TestCase):
def setUp(self):
super().setUp()
target = self.target_object
controller = '.ef.m2.escenarios_falla.eventos_problemas_instrumentacion.'
piezometer_id = self.piezometers[0]
accelerograph_id = self.accelerographs[0]
flowmeter_id = self.flowmeters[0]
turbidimeter_id = self.turbidimeters[0]
self.base_name = '.ef-mvp.m2.failure_scenarios.fi-01.'
self.suffixes = ["A1", "A2", "A3"]
input_independent_ts = [[piezometer_id, self.suffixes],
[accelerograph_id, self.suffixes],
[flowmeter_id, self.suffixes],
[turbidimeter_id, self.suffixes]]
self.modules_hints = [(
f's({instrument.hardware_id}){controller}{suffix}')
for instrument, suffixes in input_independent_ts for suffix in suffixes]
ts = []
for instrument, suffixes in input_independent_ts:
for suffix in suffixes:
self.canonical_name = f'{target.canonical_name}.s-{instrument.hardware_id}{self.base_name}{suffix}'
ts.append((
Timeseries.objects.create(
target=target,
name=suffix,
canonical_name=self.canonical_name,
data_source=instrument,
type=Timeseries.TimeseriesType.TEST
),
suffix
)
)
self.timeseries = ts
self.independent = ts
| [
"oliverhernandezmoreno@gmail.com"
] | oliverhernandezmoreno@gmail.com |
c66332176917e79373d21e4ec4db1bc8890df8e4 | fc1141aabffe60455898b014fd8b4a2e8307ce85 | /chapter16_exporting_data/image_exporter.py | 1fdef438720f66da618ab6149297ce98a7ff80e9 | [] | no_license | Karagul/reportlabbookcode | b5bff1609d62fe2bcfb17bfd7b65777121ac175c | e271348d5562f4842b9d1628ef917539a8ebcd5d | refs/heads/master | 2020-09-21T14:58:43.427964 | 2018-12-19T17:40:46 | 2018-12-19T17:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # image_exporter.py
import os
import subprocess
def image_exporter(pdf_path, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cmd = ['pdfimages', '-all', pdf_path,
'{}/prefix'.format(output_dir)]
subprocess.call(cmd)
print('Images extracted:')
print(os.listdir(output_dir))
if __name__ == '__main__':
pdf_path = 'reportlab-sample.pdf'
image_exporter(pdf_path, output_dir='images') | [
"mike@pythonlibrary.org"
] | mike@pythonlibrary.org |
6a395b495a62beb5c2164f0226f0f60938285b99 | 0cf9bb9c50c6efc1bc4a7923f42f6fad79039598 | /Homeworks/HW 09_ Catching Hackers Starter Code/testbadlogindetector.py | 6306b04d23901933d31be7a09293687dcd663448 | [] | no_license | AlbMej/CSE-2050-Data-Structures-and-Object-Oriented-Design | c950bada185823c70370522e0735533b41bd726b | bfbe91d698e650d78c20fd535c45108a8dba1030 | refs/heads/master | 2020-04-25T13:20:57.537243 | 2019-03-12T19:54:04 | 2019-03-12T19:54:04 | 172,806,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,275 | py | import unittest
from badlogindetector import BadLoginDetector
from logentry import LogEntry
class TestBadLoginDetector(unittest.TestCase):
def testinit(self):
BadLoginDetector(3,10)
BadLoginDetector(30,10000)
def testprocess_all_success(self):
log = ['[%d][1.1.1.1][SUCCESS]' % i for i in range(1000)]
d = BadLoginDetector(3,1000)
for e in log:
newentry = LogEntry.fromstring(e)
self.assertTrue(d.process(newentry))
def testprocess_somefails(self):
log = ['[%d][1.1.1.1][SUCCESS]' % i for i in range(1000)]
log[100] = '[100][2.2.2.2][FAIL]'
log[200] = '[200][2.2.2.2][FAIL]'
log[300] = '[300][2.2.2.2][FAIL]'
d = BadLoginDetector(3,1000)
for e in log:
newentry = LogEntry.fromstring(e)
if newentry.time == 300:
self.assertFalse(d.process(newentry))
else:
self.assertTrue(d.process(newentry))
def testprocess_fails_far_apart(self):
log = ['[%d][1.1.1.1][SUCCESS]' % i for i in range(1000)]
log[100] = '[100][2.2.2.2][FAIL]'
log[200] = '[200][2.2.2.2][FAIL]'
log[300] = '[300][2.2.2.2][FAIL]'
d = BadLoginDetector(3,200)
for e in log:
newentry = LogEntry.fromstring(e)
self.assertTrue(d.process(newentry))
def testprocess_allfails_far_apart(self):
log = ['[%d][1.1.1.1][FAIL]' % i for i in range(1000)]
d = BadLoginDetector(3,2)
for e in log:
newentry = LogEntry.fromstring(e)
self.assertTrue(d.process(newentry))
def testreport_onefail(self):
log = ['[%d][1.1.1.1][SUCCESS]' % i for i in range(1000)]
log[100] = '[100][2.2.2.2][FAIL]'
log[200] = '[200][2.2.2.2][FAIL]'
log[300] = '[300][2.2.2.2][FAIL]'
d = BadLoginDetector(3,201)
for e in log:
newentry = LogEntry.fromstring(e)
d.process(newentry)
self.assertEqual(d.report(), ['2.2.2.2'])
def testreport_twofails_same_ip(self):
log = ['[%d][1.1.1.1][SUCCESS]' % i for i in range(1000)]
log[100] = '[100][2.2.2.2][FAIL]'
log[200] = '[200][2.2.2.2][FAIL]'
log[300] = '[300][2.2.2.2][FAIL]'
log[400] = '[400][2.2.2.2][FAIL]'
d = BadLoginDetector(3,1000)
for e in log:
newentry = LogEntry.fromstring(e)
d.process(newentry)
self.assertEqual(d.report(), ['2.2.2.2'])
def testreport_lots_of_fails(self):
log = ['[%d][1.1.1.%d][FAIL]' % (i, i//3) for i in range(900)]
d = BadLoginDetector(3,3)
for e in log:
newentry = LogEntry.fromstring(e)
d.process(newentry)
self.assertEqual(len(d.report()), 300)
def testreport_onefail_too_far_apart(self):
log = ['[%d][1.1.1.1][SUCCESS]' % i for i in range(1000)]
log[100] = '[100][2.2.2.2][FAIL]'
log[200] = '[200][2.2.2.2][FAIL]'
log[300] = '[300][2.2.2.2][FAIL]'
d = BadLoginDetector(3,150)
for e in log:
newentry = LogEntry.fromstring(e)
d.process(newentry)
self.assertEqual(d.report(), [])
if __name__ == '__main__':
unittest.main()
| [
"albertomejia295@gmail.com"
] | albertomejia295@gmail.com |
6b509628bae8d7e370d8a30f240ccb933c8a259b | bb5b63774924abe86c2cb0d8a09795fcf1a4d822 | /realtime_chat_app/settings.py | a993b4cb91db8e23f4b5bb0125e7902a99710b73 | [] | no_license | IdenTiclla/realtime_chat_app | 769bf432e993ee79cb93bd54489305db3526f4d5 | d2a5187bb9f257c5e8fefe6735d23e5d0eec64e6 | refs/heads/master | 2023-06-23T17:47:41.766605 | 2021-07-21T21:00:25 | 2021-07-21T21:00:25 | 387,920,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,305 | py | """
Django settings for realtime_chat_app project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-c4b8_#+0@)emex-0l&uq=)z7b91=6bu=y$*!8k^u#k(0%hcfkk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chat'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'realtime_chat_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'realtime_chat_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"iden.ticlla@gmail.com"
] | iden.ticlla@gmail.com |
5ee5cb9bf9402ad2216dd4aa9568e06ed20148e8 | fc3ffd1a5f4f229bc585f62fe8ae0db55c8a435a | /ml4rt/jtech2021/make_site_figure.py | 4ec16416b1ffe3a449e986c5e9e41a278f8de660 | [] | no_license | thunderhoser/ml4rt | b587d96ae7094e672d0445458e7b812c33941fc6 | 517d7cb2008a0ff06014c81e158c13bf8e17590a | refs/heads/master | 2023-08-05T04:28:29.691564 | 2023-07-31T22:25:50 | 2023-07-31T22:25:50 | 270,113,792 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,692 | py | """Creates paneled figure with different views of sites."""
import os
import argparse
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.plotting import imagemagick_utils
PATHLESS_INPUT_FILE_NAMES = [
'all_sites.jpg', 'tropical_sites.jpg', 'assorted2_sites.jpg'
]
CONVERT_EXE_NAME = '/usr/bin/convert'
TITLE_FONT_SIZE = 100
TITLE_FONT_NAME = 'DejaVu-Sans-Bold'
PANEL_SIZE_PX = int(5e6)
CONCAT_FIGURE_SIZE_PX = int(2e7)
INPUT_DIR_ARG_NAME = 'input_dir_name'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
INPUT_DIR_HELP_STRING = (
'Name of input directory, containing images to be paneled together.'
)
OUTPUT_DIR_HELP_STRING = (
'Name of output directory. Output images (paneled figure and temporary '
'figures) will be saved here.'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + INPUT_DIR_ARG_NAME, type=str, required=True,
help=INPUT_DIR_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING
)
def _overlay_text(
image_file_name, x_offset_from_left_px, y_offset_from_top_px,
text_string):
"""Overlays text on image.
:param image_file_name: Path to image file.
:param x_offset_from_left_px: Left-relative x-coordinate (pixels).
:param y_offset_from_top_px: Top-relative y-coordinate (pixels).
:param text_string: String to overlay.
:raises: ValueError: if ImageMagick command (which is ultimately a Unix
command) fails.
"""
command_string = (
'"{0:s}" "{1:s}" -pointsize {2:d} -font "{3:s}" '
'-fill "rgb(0, 0, 0)" -annotate {4:+d}{5:+d} "{6:s}" "{1:s}"'
).format(
CONVERT_EXE_NAME, image_file_name, TITLE_FONT_SIZE, TITLE_FONT_NAME,
x_offset_from_left_px, y_offset_from_top_px, text_string
)
exit_code = os.system(command_string)
if exit_code == 0:
return
raise ValueError(imagemagick_utils.ERROR_STRING)
def _run(input_dir_name, output_dir_name):
"""Creates paneled figure with different views of sites.
This is effectively the main method.
:param input_dir_name: See documentation at top of file.
:param output_dir_name: Same.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name
)
panel_file_names = [
'{0:s}/{1:s}'.format(input_dir_name, p)
for p in PATHLESS_INPUT_FILE_NAMES
]
resized_panel_file_names = [
'{0:s}/{1:s}'.format(output_dir_name, p)
for p in PATHLESS_INPUT_FILE_NAMES
]
letter_label = None
for i in range(len(panel_file_names)):
print('Resizing panel and saving to: "{0:s}"...'.format(
resized_panel_file_names[i]
))
imagemagick_utils.trim_whitespace(
input_file_name=panel_file_names[i],
output_file_name=resized_panel_file_names[i],
border_width_pixels=TITLE_FONT_SIZE + 75
)
if letter_label is None:
letter_label = 'a'
else:
letter_label = chr(ord(letter_label) + 1)
_overlay_text(
image_file_name=resized_panel_file_names[i],
x_offset_from_left_px=0, y_offset_from_top_px=TITLE_FONT_SIZE + 150,
text_string='({0:s})'.format(letter_label)
)
imagemagick_utils.trim_whitespace(
input_file_name=resized_panel_file_names[i],
output_file_name=resized_panel_file_names[i]
)
imagemagick_utils.resize_image(
input_file_name=resized_panel_file_names[i],
output_file_name=resized_panel_file_names[i],
output_size_pixels=PANEL_SIZE_PX
)
concat_figure_file_name = '{0:s}/sites_concat.jpg'.format(output_dir_name)
print('Concatenating panels to: "{0:s}"...'.format(concat_figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=resized_panel_file_names,
output_file_name=concat_figure_file_name,
num_panel_rows=len(resized_panel_file_names), num_panel_columns=1
)
imagemagick_utils.trim_whitespace(
input_file_name=concat_figure_file_name,
output_file_name=concat_figure_file_name
)
imagemagick_utils.resize_image(
input_file_name=concat_figure_file_name,
output_file_name=concat_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
input_dir_name=getattr(INPUT_ARG_OBJECT, INPUT_DIR_ARG_NAME),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| [
"lagerqui@ualberta.ca"
] | lagerqui@ualberta.ca |
b32f5035cf85169d11f1cb0b73654819c498f5d6 | 15fae17aadc1ff83ad84ad2ee3db14ec40c6ffce | /app/articles/admin.py | c6bc56c32480f9df92b025a4cb61be49c96cb0c5 | [] | no_license | elmcrest/feincms3-example | 2eaaed3bd2bb68b9cfa6c21c9e60b190c193e08f | 3ec92b1bb23656d52c3cb46f4a0c8a138a088cbf | refs/heads/master | 2020-03-21T23:29:33.704979 | 2017-08-18T10:08:59 | 2017-08-18T10:08:59 | 139,190,393 | 0 | 0 | null | 2018-06-29T19:59:32 | 2018-06-29T19:59:32 | null | UTF-8 | Python | false | false | 954 | py | from __future__ import unicode_literals
from django.contrib import admin
from feincms3.plugins.versatileimage import AlwaysChangedModelForm
from . import models
class ImageInline(admin.TabularInline):
form = AlwaysChangedModelForm
model = models.Image
extra = 0
@admin.register(models.Article)
class ArticleAdmin(admin.ModelAdmin):
date_hierarchy = 'publication_date'
inlines = [ImageInline]
list_display = [
'title', 'is_active', 'publication_date', 'category']
list_editable = ['is_active']
list_filter = ['is_active', 'category']
prepopulated_fields = {
'slug': ('title',),
}
radio_fields = {
'category': admin.HORIZONTAL,
}
fieldsets = [
(None, {
'fields': (
('is_active',),
('title', 'slug'),
'publication_date',
'category',
'body',
)
}),
]
| [
"mk@feinheit.ch"
] | mk@feinheit.ch |
486fda6e1753ed2136f830174096f2c571d665ad | 29f830670675cea44bf3aad6e50e98e5b1692f70 | /scripts/import_permissions_and_roles.py | 867979ebca2c717d403bdf57b45d34d2dce26019 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | forksbot/byceps | 02db20149f1f0559b812dacad276e4210993e300 | ac29a0cb50e2ef450d4e5ebd33419ed490c96e4f | refs/heads/main | 2023-03-04T05:55:07.743161 | 2021-02-14T06:03:37 | 2021-02-14T06:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | #!/usr/bin/env python
"""Import permissions, roles, and their relations from a TOML file.
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
import click
from byceps.services.authorization import impex_service
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.argument('data_file', type=click.File())
def execute(data_file):
permission_count, role_count = impex_service.import_from_file(data_file)
click.secho(
f'Imported {permission_count} permissions and {role_count} roles.',
fg='green',
)
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
b936ffc9e6684b9c97da4d88a0f2e59e3e42aab1 | cc101e71d4b47e1ade22159bc3273aab5386a49e | /integration-tests/fake_spine/fake_spine/vnp_request_matcher_wrappers.py | 3591870d8d4679c49b285dbbc7a413cba93a0ceb | [
"Apache-2.0"
] | permissive | nhsconnect/integration-adaptors | 20f613f40562a79428e610df916835f4e3c3e455 | 8420d9d4b800223bff6a648015679684f5aba38c | refs/heads/develop | 2023-02-22T22:04:31.193431 | 2022-03-15T16:01:25 | 2022-03-15T16:01:25 | 179,653,046 | 15 | 7 | Apache-2.0 | 2023-08-23T14:52:10 | 2019-04-05T09:18:56 | Python | UTF-8 | Python | false | false | 803 | py | from fake_spine.request_matching import RequestMatcher
def async_express():
return RequestMatcher('async-express-vnp',
lambda request: '<eb:Action>QUPC_IN160101UK05</eb:Action>' in request.body.decode())
def async_reliable():
return RequestMatcher('async-reliable-vnp',
lambda request: '<eb:Action>REPC_IN150016UK05</eb:Action>' in request.body.decode())
def sync():
return RequestMatcher('sync-vnp',
lambda request: '<wsa:Action>urn:nhs:names:services:pdsquery/QUPA_IN040000UK32</wsa:Action>' in request.body.decode())
def forward_reliable():
return RequestMatcher('forward-reliable-vnp',
lambda request: '<eb:Action>COPC_IN000001UK01</eb:Action>' in request.body.decode())
| [
"noreply@github.com"
] | nhsconnect.noreply@github.com |
525929dc1eeca4dacff44536fcb21918ee9ee501 | 3f41bafb8012f264605724dbe9b1a6ee11a1f767 | /competitions/EMNIST/resize_380_B4.py | 80995b99e649cb8fcb0364b6c8c07df6932621e3 | [] | no_license | pervin0527/pervinco | 6d0c9aad8dbf6d944960b2e2c963054d1d91b29a | 9ced846438130341726e31954cc7e45a887281ef | refs/heads/master | 2022-11-26T02:11:00.848871 | 2022-11-24T00:56:14 | 2022-11-24T00:56:14 | 223,062,903 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,169 | py | import cv2, pathlib, datetime, os
import numpy as np
import pandas as pd
import tensorflow as tf
from matplotlib import pyplot as plt
from functools import partial
from tqdm import tqdm
from sklearn.model_selection import KFold
# GPU setup
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 1:
try:
print("Activate Multi GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
except RuntimeError as e:
print(e)
else:
try:
print("Activate Sigle GPU")
tf.config.experimental.set_memory_growth(gpus[0], True)
strategy = tf.distribute.experimental.CentralStorageStrategy()
except RuntimeError as e:
print(e)
# Disable AutoShard.
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
def get_dataset(df):
CLASSES = [c for c in df]
CLASSES = CLASSES[1:]
# print(len(df))
X = np.zeros([len(df), IMG_SIZE, IMG_SIZE, 3], dtype=np.uint8)
y = np.zeros([len(df), len(CLASSES)], dtype=np.uint8)
for idx in tqdm(range(len(df))):
file_name = str(df.iloc[idx, 0]).zfill(5)
image = cv2.imread(f'{TRAIN_DS_PATH}/{file_name}.png')
image2 = np.where((image <= 254) & (image != 0), 0, image)
X[idx] = image2
label = df.iloc[idx, 1:].values.astype('float')
y[idx] = label
return X, y, CLASSES
def normalize_image(image, label):
image = tf.image.resize(image, [RE_SIZE, RE_SIZE])
image = tf.cast(image, tf.float32)
image = tf.keras.applications.resnet.preprocess_input(image)
label = tf.cast(label, tf.float32)
return image, label
def make_tf_dataset(images, labels):
images = tf.data.Dataset.from_tensor_slices(images)
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.repeat()
dataset = dataset.map(normalize_image, num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
dataset = dataset.with_options(options)
return dataset
def get_model():
with strategy.scope():
base_model = tf.keras.applications.EfficientNetB4(input_shape=(RE_SIZE, RE_SIZE, 3),
weights='imagenet', # noisy-student
include_top=False)
base_model.trainable = True
avg = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)
output = tf.keras.layers.Dense(26, activation="sigmoid")(avg)
model = tf.keras.Model(inputs=base_model.input, outputs=output)
model.compile(optimizer='adam', loss = 'binary_crossentropy', metrics = ['binary_accuracy'])
return model
def split_dataset():
df = pd.read_csv(f'{DS_PATH}/dirty_mnist_2nd_answer.csv')
kfold = KFold(n_splits=N_FOLD)
for fold, (train, valid) in enumerate(kfold.split(df, df.index)):
df.loc[valid, 'kfold'] = int(fold)
if not(os.path.isdir(f'{DS_PATH}/custom_split')):
os.makedirs(f'{DS_PATH}/custom_split')
df.to_csv(f'{DS_PATH}/custom_split/split_kfold.csv', index=False)
def train_cross_validate():
split_dataset()
df = pd.read_csv(f'{DS_PATH}/custom_split/split_kfold.csv')
if not(os.path.isdir(f'/{SAVED_PATH}/{LOG_TIME}')):
os.makedirs(f'/{SAVED_PATH}/{LOG_TIME}')
os.system('clear')
for i in range(N_FOLD):
df_train = df[df['kfold'] != i].reset_index(drop=True)
df_valid = df[df['kfold'] == i].reset_index(drop=True)
df_train.drop(['kfold'], axis=1).to_csv(f'{DS_PATH}/custom_split/train-kfold-{i}.csv', index=False)
df_valid.drop(['kfold'], axis=1).to_csv(f'{DS_PATH}/custom_split/valid-kfold-{i}.csv', index=False)
df_train = pd.read_csv(f'{DS_PATH}/custom_split/train-kfold-{i}.csv')
df_valid = pd.read_csv(f'{DS_PATH}/custom_split/valid-kfold-{i}.csv')
train_x, train_y, _ = get_dataset(df_train)
valid_x, valid_y, _ = get_dataset(df_valid)
print('FOLD', i + 1)
output_path = f'/{SAVED_PATH}/{LOG_TIME}/{i+1}'
os.makedirs(output_path)
print(train_x.shape, train_y.shape, valid_x.shape, valid_y.shape)
WEIGHT_FNAME = '{epoch:02d}-{val_binary_accuracy:.2f}.hdf5'
checkpoint_path = f'{output_path}/{i+1}-{WEIGHT_FNAME}'
cb_checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
monitor='val_binary_accuracy',
save_best_only=True,
mode='max')
cb_early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
TRAIN_STEPS_PER_EPOCH = int(tf.math.ceil(len(train_x) / BATCH_SIZE).numpy())
VALID_STEPS_PER_EPOCH = int(tf.math.ceil(len(valid_x) / BATCH_SIZE).numpy())
model = get_model()
model.fit(make_tf_dataset(train_x, train_y),
steps_per_epoch = TRAIN_STEPS_PER_EPOCH,
epochs = EPOCHS,
validation_data = make_tf_dataset(valid_x, valid_y),
validation_steps = VALID_STEPS_PER_EPOCH,
verbose=1,
callbacks = [cb_checkpointer, cb_early_stopping])
model.save(f'{output_path}/{i+1}_dmnist.h5')
del train_x, train_y
del valid_x, valid_y
if __name__ == "__main__":
EPOCHS = 100
IMG_SIZE = 256
RE_SIZE = IMG_SIZE + 124
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 5 * strategy.num_replicas_in_sync
N_FOLD = 5
DS_PATH = '/data/tf_workspace/datasets/dirty_mnist_2'
SAVED_PATH = '/data/tf_workspace/model/dirty_mnist'
TRAIN_DS_PATH = f'{DS_PATH}/dirty_mnist_2nd'
LOG_TIME = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")
train_cross_validate()
| [
"zfbtldnz@gmail.com"
] | zfbtldnz@gmail.com |
1446845eeccf87263d870b37805acf3b3c96d21d | 4c8c0f857500b5f4b572f139602e46a6c813f6e3 | /Polymorhphism_and_Magic_methods_exercises/project/cat.py | 12c5083c335f08076e5581860eca49af0764f67d | [] | no_license | svetoslavastoyanova/Python_OOP | 3d21fb0480c088ecad11211c2d9a01139cde031f | 518f73ecc8a39e7085d4b8bf5657a1556da3dcfa | refs/heads/main | 2023-08-04T19:46:58.906739 | 2021-09-18T07:46:02 | 2021-09-18T07:46:02 | 352,304,158 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from project.animal import Animal
class Cat(Animal):
def __repr__(self):
return f"This is {self.name}. {self.name} is a {self.age} year old {self.gender} {self.__class__.__name__}"
def make_sound(self):
return f"Meow meow!" | [
"svetoslava_stoyanova92@abv.bg"
] | svetoslava_stoyanova92@abv.bg |
5efcb7fb86370b09e864d1c00759871aabe142ae | 593b23cd61932e8206d89e43925f038c86758288 | /covid19_pipeline/engine/module.py | c9e80824df08107c21b3a61deabe4888864d0db2 | [] | no_license | HDU-DSRC-AI/HKBU_HPML_COVID-19 | ad4f311777176d469b07c155e252df26d57f5056 | 0f685312d26c0b50fffb433408a913243638a14a | refs/heads/master | 2022-10-12T09:32:15.635509 | 2020-06-09T13:53:42 | 2020-06-09T13:53:42 | 271,016,743 | 1 | 0 | null | 2020-06-09T13:55:31 | 2020-06-09T13:55:28 | null | UTF-8 | Python | false | false | 6,807 | py | import os
from collections import OrderedDict
import numpy as np
import torch
from sklearn import metrics
from torchline.engine import MODULE_REGISTRY, DefaultModule, build_module
from torchline.utils import AverageMeterGroup, topk_acc
from .utils import mixup_data, mixup_loss_fn
__all__ = [
'CTModule'
]
@MODULE_REGISTRY.register()
class CTModule(DefaultModule):
def __init__(self, cfg):
super(CTModule, self).__init__(cfg)
h, w = self.cfg.input.size
self.example_input_array = torch.rand(1, 3, 2, h, w)
self.crt_batch_idx = 0
self.inputs = self.example_input_array
def training_step_end(self, output):
self.print_log(self.trainer.batch_idx, True, self.inputs, self.train_meters)
return output
def validation_step_end(self, output):
self.crt_batch_idx += 1
self.print_log(self.crt_batch_idx, False, self.inputs, self.valid_meters)
return output
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop
:param batch:
:return:
"""
try:
# forward pass
inputs, gt_labels, paths = batch
self.crt_batch_idx = batch_idx
self.inputs = inputs
if self.cfg.mixup.enable:
inputs, gt_labels_a, gt_labels_b, lam = mixup_data(inputs, gt_labels, self.cfg.mixup.alpha)
mixup_y = [gt_labels_a, gt_labels_b, lam]
predictions = self.forward(inputs)
# calculate loss
if self.cfg.mixup.enable:
loss_val = mixup_loss_fn(self.loss, predictions, *mixup_y)
else:
loss_val = self.loss(predictions, gt_labels)
# acc
acc_results = topk_acc(predictions, gt_labels, self.cfg.topk)
tqdm_dict = {}
if self.on_gpu:
acc_results = [torch.tensor(x).to(loss_val.device.index) for x in acc_results]
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp or self.trainer.use_ddp2:
loss_val = loss_val.unsqueeze(0)
acc_results = [x.unsqueeze(0) for x in acc_results]
tqdm_dict['train_loss'] = loss_val
for i, k in enumerate(self.cfg.topk):
tqdm_dict[f'train_acc_{k}'] = acc_results[i]
output = OrderedDict({
'loss': loss_val,
'progress_bar': tqdm_dict,
'log': tqdm_dict
})
self.train_meters.update({key: val.item() for key, val in tqdm_dict.items()})
# can also return just a scalar instead of a dict (return loss_val)
return output
except Exception as e:
print(str(e))
print(batch_idx, paths)
pass
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
inputs, gt_labels, paths = batch
self.inputs = inputs
predictions = self.forward(inputs)
loss_val = self.loss(predictions, gt_labels)
# acc
val_acc_1, val_acc_k = topk_acc(predictions, gt_labels, self.cfg.topk)
if self.on_gpu:
val_acc_1 = val_acc_1.cuda(loss_val.device.index)
val_acc_k = val_acc_k.cuda(loss_val.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp or self.trainer.use_ddp2:
loss_val = loss_val.unsqueeze(0)
val_acc_1 = val_acc_1.unsqueeze(0)
val_acc_k = val_acc_k.unsqueeze(0)
output = OrderedDict({
'valid_loss': torch.tensor(loss_val),
'valid_acc_1': torch.tensor(val_acc_1),
f'valid_acc_{self.cfg.topk[-1]}': val_acc_k,
})
tqdm_dict = {k: v for k, v in dict(output).items()}
self.valid_meters.update({key: val.item() for key, val in tqdm_dict.items()})
# self.print_log(batch_idx, False, inputs, self.valid_meters)
if self.cfg.module.analyze_result:
output.update({
'predictions': predictions.detach(),
'gt_labels': gt_labels.detach(),
})
# can also return just a scalar instead of a dict (return loss_val)
return output
def validation_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from validation_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
self.crt_batch_idx = 0
tqdm_dict = {key: val.avg for key, val in self.valid_meters.meters.items()}
valid_loss = torch.tensor(self.valid_meters.meters['valid_loss'].avg)
valid_acc_1 = torch.tensor(self.valid_meters.meters['valid_acc_1'].avg)
result = {'progress_bar': tqdm_dict, 'log': tqdm_dict,
'valid_loss': valid_loss,
'valid_acc_1': valid_acc_1}
if self.cfg.module.analyze_result:
predictions = []
gt_labels = []
for output in outputs:
predictions.append(output['predictions'])
gt_labels.append(output['gt_labels'])
predictions = torch.cat(predictions)
gt_labels = torch.cat(gt_labels)
analyze_result = self.analyze_result(gt_labels, predictions)
self.log_info(analyze_result)
result.update({'analyze_result': analyze_result, 'predictions': predictions, 'gt_labels': gt_labels})
return result
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, outputs):
result = self.validation_epoch_end(outputs)
predictions = result['predictions'].cpu().detach().numpy()
gt_labels = result['gt_labels'].cpu().detach().numpy()
path = self.cfg.log.path
np.save(os.path.join(path,'predictions.npy'), predictions)
np.save(os.path.join(path,'gt_labels.npy'), gt_labels)
result = {key:val for key, val in result.items() if key not in ['predictions', 'gt_labels']}
return result
def analyze_result(self, gt_labels, predictions):
'''
Args:
gt_lables: tensor (N)
predictions: tensor (N*C)
'''
return str(metrics.classification_report(gt_labels.cpu(), predictions.cpu().argmax(1), digits=4))
| [
"1435679023@qq.com"
] | 1435679023@qq.com |
80ff391e57858bfe6654bb74b3b2aad7a68da33c | ab5ef28065b0ad3f8d86fc894be569074a4569ea | /mirari/SV/migrations/0020_auto_20190321_1210.py | 063b11284a810c9cdddd97f51c5c6e61e556b605 | [
"MIT"
] | permissive | gcastellan0s/mirariapp | 1b30dce3ac2ee56945951f340691d39494b55e95 | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | refs/heads/master | 2023-01-22T22:21:30.558809 | 2020-09-25T22:37:24 | 2020-09-25T22:37:24 | 148,203,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | # Generated by Django 2.0.5 on 2019-03-21 18:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('SV', '0019_auto_20190315_1330'),
]
operations = [
migrations.AlterModelOptions(
name='ticket',
options={'default_permissions': [], 'ordering': ['-id'], 'permissions': [('Can_View__Ticket', 'Ve tickets'), ('Can_Delete__Ticket', 'Elimina tickets')], 'verbose_name': 'Ticket', 'verbose_name_plural': 'Tickets'},
),
]
| [
"g@gustavo-castellanos.com"
] | g@gustavo-castellanos.com |
ba6e5b9e1e9ff2e71c68568c7835e0414609b61d | 0062ceae0071aaa3e4e8ecd9025e8cc9443bcb3b | /solved/2579.py | 90890849ba15de80636cc12fe760fd9e34b65942 | [] | no_license | developyoun/AlgorithmSolve | 8c7479082528f67be9de33f0a337ac6cc3bfc093 | 5926924c7c44ffab2eb8fd43290dc6aa029f818d | refs/heads/master | 2023-03-28T12:02:37.260233 | 2021-03-24T05:05:48 | 2021-03-24T05:05:48 | 323,359,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | N = int(input())
arr = [int(input()) for _ in range(N)]
dp = [[0, 0] for _ in range(N)]
dp[0][0] = arr[0]
if N != 1:
dp[1][0] = arr[1]
dp[1][1] = arr[0] + arr[1]
for i in range(2, N):
dp[i][0] = max(dp[i-2]) + arr[i]
dp[i][1] = dp[i-1][0] + arr[i]
print(max(dp[N-1])) | [
"pyoun820@naver.com"
] | pyoun820@naver.com |
190cb5e443625842236cc6cbe8c93583f288a126 | 6c524d7c4114531dd0b9872090bd7389a3cd3fd8 | /poems/migrations/0003_auto_20200731_1245.py | ec917ae86031cdc2623eba1b2a2431d466946ae0 | [] | no_license | cement-hools/poems_project | e33bcd03ca8b2b1f1fa558d1036928aee73c87c9 | 493e6d517b65faab6b25a9fda485e165b6eea03d | refs/heads/master | 2022-11-28T02:11:50.837816 | 2020-08-01T10:12:16 | 2020-08-01T10:12:16 | 284,234,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | # Generated by Django 2.2.14 on 2020-07-31 09:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('poems', '0002_auto_20200731_1219'),
]
operations = [
migrations.AlterModelOptions(
name='poem',
options={'ordering': ('title',), 'verbose_name': 'Стихотворение'},
),
migrations.AddField(
model_name='poem',
name='author',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='poems.Poet', verbose_name='Автор(ша)'),
preserve_default=False,
),
migrations.AddField(
model_name='poem',
name='text',
field=models.TextField(default=1, verbose_name='Текст'),
preserve_default=False,
),
migrations.AddField(
model_name='poem',
name='title',
field=models.CharField(default=1, max_length=250, verbose_name='Название'),
preserve_default=False,
),
migrations.AddField(
model_name='poem',
name='year',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Год(ы)'),
),
]
| [
"cement-fan@ya.ru"
] | cement-fan@ya.ru |
a15de310a575dd2dfa1b63f03ee73cd8ee65edf5 | 13084338fa9d1c72fe32d323bcd2df1417b98e83 | /src/bxcommon/models/blockchain_peer_info.py | 7ad272ad39cc2493b806aa95ec618225f48830a7 | [
"MIT"
] | permissive | bloXroute-Labs/bxcommon | ad45e3a060a7d1afd119513248da036818c7f885 | 03c4cc5adab1ae182e59a609eff273957499ba5d | refs/heads/master | 2023-02-22T00:10:46.755175 | 2022-08-16T19:38:22 | 2022-08-16T19:38:22 | 220,556,144 | 14 | 7 | MIT | 2023-02-07T22:58:14 | 2019-11-08T22:16:37 | Python | UTF-8 | Python | false | false | 896 | py | from dataclasses import dataclass
from typing import Optional
from bxcommon.utils.blockchain_utils.eth import eth_common_constants
@dataclass
class BlockchainPeerInfo:
ip: str
port: int
node_public_key: Optional[str] = None
blockchain_protocol_version: int = eth_common_constants.ETH_PROTOCOL_VERSION
connection_established: bool = False
def __repr__(self):
return f"BlockchainPeerInfo(ip address: {self.ip}, " \
f"port: {self.port}, " \
f"node public key: {self.node_public_key}, " \
f"blockchain protocol version: {self.blockchain_protocol_version})"
def __eq__(self, other) -> bool:
return (
isinstance(other, BlockchainPeerInfo)
and other.port == self.port
and other.ip == self.ip
)
def __hash__(self):
return hash(f"{self.ip}:{self.port}")
| [
"vc.shane@gmail.com"
] | vc.shane@gmail.com |
b137928399ca34af62b565e767f81889f316ac21 | 2af28d499c4865311d7b350d7b8f96305af05407 | /model-optimizer/mo/front/mxnet/extractor.py | bce78e371dcb4aa4d043ad108a22efe1cbaf7f3d | [
"Apache-2.0"
] | permissive | Dipet/dldt | cfccedac9a4c38457ea49b901c8c645f8805a64b | 549aac9ca210cc5f628a63174daf3e192b8d137e | refs/heads/master | 2021-02-15T11:19:34.938541 | 2020-03-05T15:12:30 | 2020-03-05T15:12:30 | 244,893,475 | 1 | 0 | Apache-2.0 | 2020-03-04T12:22:46 | 2020-03-04T12:22:45 | null | UTF-8 | Python | false | false | 2,912 | py | """
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.mxnet.extractors.batchnorm import batch_norm_ext
from mo.front.mxnet.extractors.concat import concat_ext
from mo.front.mxnet.extractors.crop import crop_ext
from mo.front.mxnet.extractors.l2_normalization import l2_normalization_ext
from mo.front.mxnet.extractors.lrn import lrn_ext
from mo.front.mxnet.extractors.multibox_detection import multi_box_detection_ext
from mo.front.mxnet.extractors.multibox_prior import multi_box_prior_ext
from mo.front.mxnet.extractors.null import null_ext
from mo.front.mxnet.extractors.scaleshift import scale_shift_ext
from mo.front.mxnet.extractors.slice_axis import slice_axis_ext
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.graph.graph import Node
from mo.utils.error import Error
from mo.utils.utils import refer_to_faq_msg
def extractor_wrapper(mxnet_extractor):
return lambda node: mxnet_extractor(get_mxnet_layer_attrs(node.symbol_dict))
mxnet_op_extractors = {
'BatchNorm': extractor_wrapper(batch_norm_ext),
'ScaleShift': extractor_wrapper(scale_shift_ext),
'slice_axis': extractor_wrapper(slice_axis_ext),
'null': lambda node: null_ext(node.symbol_dict),
'Concat': extractor_wrapper(concat_ext),
'LRN': extractor_wrapper(lrn_ext),
'L2Normalization': extractor_wrapper(l2_normalization_ext),
'_contrib_MultiBoxPrior': extractor_wrapper(multi_box_prior_ext),
'_contrib_MultiBoxDetection': extractor_wrapper(multi_box_detection_ext),
}
def common_mxnet_fields(node: Node):
return {
'kind': 'op',
'name': node.id,
'type': node['symbol_dict']['op'],
'op': node['symbol_dict']['op'],
'infer': None,
'precision': 'FP32'
}
def mxnet_op_extractor(node: Node):
result = common_mxnet_fields(node)
op = result['op']
if op not in mxnet_op_extractors:
raise Error(
"Operation '{}' not supported. Please register it as custom op. " +
refer_to_faq_msg(86),
op)
result_attr = mxnet_op_extractors[op](node)
if result_attr is None:
raise Error('Model Optimizer does not support layer "{}". Please, implement extension. '.format(node.name) +
refer_to_faq_msg(45))
result.update(result_attr)
supported = bool(result_attr)
return supported, result
| [
"alexey.suhov@intel.com"
] | alexey.suhov@intel.com |
49881c0afd820a8d03c284c032931f34cb14c3ef | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /_use_in_my_scripts/switch/008_decorators_template_Simulating a simple Switch in Python with dict.py | 00900f8959a0911755babf3db801ea1c231eaa31 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 459 | py | def dow_switch_dict(dow):
dow_dict = {
1: lambda: print('Monday'),
2: lambda: print('Tuesday'),
3: lambda: print('Wednesday'),
4: lambda: print('Thursday'),
5: lambda: print('Friday'),
6: lambda: print('Saturday'),
7: lambda: print('Sunday'),
'default': lambda: print('Invalid day of week')
}
return dow_dict.get(dow, dow_dict['default'])()
dow_switch_dict(1)
dow_switch_dict(100) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
16ca6ca8294bb2452eda7a18ad8e9c5ef5fc649e | a12bd907b26934978a09173039e7eed361d09670 | /nbs/models/supplier.py | c1c899d7caac32a40f373b6ffc86e1e8f7ad3a0f | [
"MIT"
] | permissive | coyotevz/nobix-app | 489e2dc8cafc40a3022ef02913461e324bc9f752 | 9523d150e0299b851779f42927992810184e862d | refs/heads/master | 2020-12-20T23:22:10.302025 | 2015-12-18T22:06:44 | 2015-12-18T22:06:44 | 32,998,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | # -*- coding: utf-8 -*-
from sqlalchemy.ext.associationproxy import association_proxy
from nbs.models import db
from nbs.models.entity import Entity
class Supplier(Entity):
__tablename__ = 'supplier'
__mapper_args__ = {'polymorphic_identity': u'supplier'}
FREIGHT_SUPPLIER = 'FREIGHT_SUPPLIER'
FREIGHT_CUSTOMER = 'FREIGHT_CUSTOMER'
_freight_types = {
FREIGHT_SUPPLIER: 'Flete de proveedor',
FREIGHT_CUSTOMER: 'Flete de cliente',
}
supplier_id = db.Column(db.Integer, db.ForeignKey('entity.id'),
primary_key=True)
name = Entity._name_1
fiscal_data_id = db.Column(db.Integer, db.ForeignKey('fiscal_data.id'))
fiscal_data = db.relationship('FiscalData',
backref=db.backref('supplier',
uselist=False))
#: our number as customer with this supplier
customer_no = db.Column(db.Unicode)
payment_term = db.Column(db.Integer) # in days
freight_type = db.Column(db.Enum(*_freight_types.keys(),
name='freight_type'), default=FREIGHT_CUSTOMER)
leap_time = db.Column(db.Integer) # in days
supplier_contacts = db.relationship('SupplierContact',
cascade='all,delete-orphan',
backref='supplier')
contacts = association_proxy('supplier_contacts', 'contact')
#: 'bank_accounts' attribute added by BankAccount.supplier relation
#: 'purchases' attribute added by PurchaseDocument.supplier relation
#: 'orders' attribute added by PurchaseOrder.supplier relation
#: Inherited from Entity
#: - address (collection)
#: - phone (collection)
#: - email (collection)
#: - extrafield (collection)
@property
def freight_type_str(self):
return self._freight_types[self.freight_type]
def add_contact(self, contact, role):
self.supplier_contacts.append(SupplierContact(contact, role))
| [
"augusto@rioplomo.com.ar"
] | augusto@rioplomo.com.ar |
90ad9fcdb2334a3144853bebdcabed989714fc08 | d54e1b89dbd0ec5baa6a018464a419e718c1beac | /Python from others/文件/wk_03_分行读取文件.py | ae79c246fb71c837847b4312137a77ae4ae62097 | [] | no_license | cjx1996/vscode_Pythoncode | eda438279b7318e6cb73211e26107c7e1587fdfb | f269ebf7ed80091b22334c48839af2a205a15549 | refs/heads/master | 2021-01-03T19:16:18.103858 | 2020-05-07T13:51:31 | 2020-05-07T13:51:31 | 240,205,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | # 1. 打开文件
file = open("README")
# 2. 读取文件内容
while True:
text = file.readline()
# 判断时候有内容
if not text:
break
print(text)
# 3. 关闭
file.close()
| [
"1121287904@qq.com"
] | 1121287904@qq.com |
bfd6fab30015421b87ddfbb130b4c0fda5ced7dd | 8e474edd3954c4679061bb95970ba40e20c39c2d | /pre_analysis/observable_analysis/qtq0eff_mass_mc_intervals.py | 83a06fd90bb8b0503b0d5ac9de8309a5479a28ad | [
"MIT"
] | permissive | JinKiwoog/LatticeAnalyser | c12f5c11f2777c343a2e1e1cd4e70e91471b4e79 | 6179263e30555d14192e80d94121f924a37704c9 | refs/heads/master | 2020-04-17T18:35:24.240467 | 2019-01-21T11:25:19 | 2019-01-21T11:25:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | from pre_analysis.observable_analysis import QtQ0EffectiveMassAnalyser
import copy
import numpy as np
import os
from tools.folderreadingtools import check_folder
import statistics.parallel_tools as ptools
class QtQ0EffectiveMassMCAnalyser(QtQ0EffectiveMassAnalyser):
"""Correlator of <QtQ0> in euclidean time analysis class."""
observable_name = r""
observable_name_compact = "qtq0effmc"
x_label = r"$t_e[fm]$"
y_label = r"$am_\textrm{eff} = \ln \frac{\langle Q_{t_e} Q_0 \rangle}{\langle Q_{t_e+1} Q_0 \rangle}$"
mark_interval = 1
error_mark_interval = 1
def __str__(self):
def info_string(s1, s2): return "\n{0:<20s}: {1:<20s}".format(s1, s2)
return_string = ""
return_string += "\n" + self.section_seperator
return_string += info_string("Data batch folder",
self.batch_data_folder)
return_string += info_string("Batch name", self.batch_name)
return_string += info_string("Observable",
self.observable_name_compact)
return_string += info_string("Beta", "%.2f" % self.beta)
return_string += info_string("Flow time t0",
"%.2f" % self.q0_flow_time)
return_string += info_string("MC-interval: ",
"[%d,%d)" % self.mc_interval)
return_string += "\n" + self.section_seperator
return return_string
def main():
exit("Module QtQ0EffectiveMassAnalyser not intended for standalone usage.")
if __name__ == '__main__':
main()
| [
"hmvege@ulrik.uio.no"
] | hmvege@ulrik.uio.no |
455467f723018a27dbe6a7830158e27d70b9d9a8 | 7a915ae2c07c652cb3abffccd3b1b54c04fd2918 | /main/views.py | 26333397c360b25743f2035cebddc66815dfc322 | [] | no_license | YUNKWANGYOU/healingWheel | 410135bd21f9a4f6922051e63bc50fcf090edc3c | 416434e5ee8f79b366cdee7b81d58382e073020e | refs/heads/master | 2022-10-18T17:59:33.272890 | 2020-06-14T04:10:36 | 2020-06-14T04:10:36 | 264,862,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | from django.shortcuts import render,redirect
from .forms import DrivingTimeForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from datetime import timedelta
def index(request):
return render(request,'main/index.html',)
def aboutus(request):
return render(request,'main/aboutus.html',)
def loc_ren(request):
return render(request,'main/loc_ren.html',)
def how_to(request):
return render(request,'main/how_to.html')
def contact(request):
return render(request,'main/contact.html')
@login_required
def charge(request):
if request.method == 'POST':
us = request.user
profile = us.profile
if request.POST['detail_menu'] == '10':
profile.duration += timedelta(minutes = 10)
elif request.POST['detail_menu'] == '30':
profile.duration += timedelta(minutes = 30)
else:
profile.duration += timedelta(minutes = 60)
profile.save()
return redirect('profile')
return render(request,'main/charge.html',{
})
@login_required
def profile(request):
us = request.user
return render(request,'main/profile.html',{
'user' : us
})
| [
"1996yyk@gmail.com"
] | 1996yyk@gmail.com |
abe9c326d87ac84ae84d1b1abc67baad6d8cd389 | 657d549ffa47c4ef599aa5e0f5760af8de77fec4 | /src/runner/predictors/base_predictor.py | 865569b96277b12446c2ca90d1e3595b99495a53 | [] | no_license | Tung-I/Incremental_Learning | 68357d3db5a646aa6b3df844b85e12fa45e3eb3e | 95602f404ab8dd627c5dd5fcc94a4a071ad330ab | refs/heads/master | 2021-01-14T15:18:21.941132 | 2020-03-30T04:04:13 | 2020-03-30T04:04:13 | 242,659,450 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | import logging
import torch
from tqdm import tqdm
from src.runner.utils import EpochLog
LOGGER = logging.getLogger(__name__.split('.')[-1])
class BasePredictor:
"""The base class for all predictors.
Args:
device (torch.device): The device.
test_dataloader (Dataloader): The testing dataloader.
net (BaseNet): The network architecture.
loss_fns (LossFns): The loss functions.
loss_weights (LossWeights): The corresponding weights of loss functions.
metric_fns (MetricFns): The metric functions.
"""
def __init__(self, saved_dir, device, test_dataloader, net, loss_fns, loss_weights, metric_fns):
self.saved_dir = saved_dir
self.device = device
self.test_dataloader = test_dataloader
self.net = net.to(device)
self.loss_fns = loss_fns
self.loss_weights = loss_weights
self.metric_fns = metric_fns
def predict(self):
"""The testing process.
"""
self.net.eval()
dataloader = self.test_dataloader
pbar = tqdm(dataloader, desc='test', ascii=True)
epoch_log = EpochLog()
for i, batch in enumerate(pbar):
with torch.no_grad():
test_dict = self._test_step(batch)
loss = test_dict['loss']
losses = test_dict.get('losses')
metrics = test_dict.get('metrics')
if (i + 1) == len(dataloader) and not dataloader.drop_last:
batch_size = len(dataloader.dataset) % dataloader.batch_size
else:
batch_size = dataloader.batch_size
epoch_log.update(batch_size, loss, losses, metrics)
pbar.set_postfix(**epoch_log.on_step_end_log)
test_log = epoch_log.on_epoch_end_log
LOGGER.info(f'Test log: {test_log}.')
def _test_step(self, batch):
"""The user-defined testing logic.
Args:
batch (dict or sequence): A batch of the data.
Returns:
test_dict (dict): The computed results.
test_dict['loss'] (torch.Tensor)
test_dict['losses'] (dict, optional)
test_dict['metrics'] (dict, optional)
"""
raise NotImplementedError
def load(self, path):
"""Load the model checkpoint.
Args:
path (Path): The path to load the model checkpoint.
"""
checkpoint = torch.load(path, map_location='cpu')
self.net.load_state_dict(checkpoint['net'])
| [
"dong893610@gmail.com"
] | dong893610@gmail.com |
c3771eaa61b07b3cd184d7bdca9ff13270cbd4b8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2577/60660/296683.py | 4283bd8bc19eca118b313934e87233f0cbfd48c0 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | def jobScheduling(startTime, endTime, profit):
n = len(startTime)
# 按结束时间排序
work = sorted(zip(startTime, endTime, profit))
# 计算OPT数组
dp = [0] * (n + 1)
pos=0#记录与当前不重合的最大区间序号,减少循环量
s=0
for i in range(n):
for j in range(pos, i+1):
# 区间不重合
if work[i][0] >= work[j][1]:
if j == pos:
pos += 1
s = max(s, dp[j])
dp[i]=s+work[i][2]
print(dp[n-1])
st=eval('['+input()+']')
et=eval('['+input()+']')
pf=eval('['+input()+']')
jobScheduling(st,et,pf) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
0569a5ee3ff6e13a8e55562b2dde689b330181d1 | 945b3c14b5a58f8d98955cdf27aef9469e21523c | /flod_matrikkel_address_restapi/matrikkel.py | 370a48d6bcdd2bec075df00225106e533e8fb18f | [
"BSD-2-Clause-Views"
] | permissive | Trondheim-kommune/Bookingbasen | 34e595e9c57ea6428406b2806559aab17e9a3031 | 58235a5a1fd6ad291cb237e6ec9a67bfe8c463c6 | refs/heads/master | 2022-11-29T00:20:18.681549 | 2017-05-29T19:33:43 | 2017-05-29T19:33:43 | 49,863,780 | 1 | 1 | NOASSERTION | 2022-11-22T00:27:34 | 2016-01-18T08:47:46 | JavaScript | UTF-8 | Python | false | false | 6,031 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from suds.client import Client
import suds
import re
import logging
import httplib
import ssl
import urllib2
import socket
import base64
logging.basicConfig(level=logging.INFO)
class HTTPSConnectionV3(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
try:
self.sock = ssl.wrap_socket(
sock, self.key_file,
self.cert_file,
ssl_version=ssl.PROTOCOL_SSLv3
)
except ssl.SSLError:
print("Trying SSLv23.")
self.sock = ssl.wrap_socket(
sock,
self.key_file,
self.cert_file,
ssl_version=ssl.PROTOCOL_SSLv23
)
class HTTPSHandlerV3(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(HTTPSConnectionV3, req)
class MatrikkelService(object):
def __init__(self, url, wsdl_url, username, password):
self.url = url
self.wsdl_url = wsdl_url
self.username = username
self.password = password
# install opener
opener = urllib2.build_opener(HTTPSHandlerV3())
self.transport = suds.transport.https.HttpAuthenticated(
username=username,
password=password
)
self.transport.urlopener = opener
self.client = self.create_client()
def create_client(self):
base64string = base64.encodestring(
'%s:%s' % (self.username, self.password)
).replace('\n', '')
authentication_header = {
"WWW-Authenticate": "https://www.test.matrikkel.no",
"Authorization": "Basic %s" % base64string
}
client = Client(
url=self.wsdl_url,
location=self.url,
transport=self.transport,
username=self.username,
password=self.password
)
client.set_options(headers=authentication_header)
return client
def serialize_ident(ident):
dict = {
"kommunenr": str(ident.kommunenr),
"gardsnr": ident.gardsnr,
"bruksnr": ident.bruksnr
}
try:
dict["festenr"] = ident.festenr
except AttributeError:
pass
try:
dict["seksjonsnr"] = ident.seksjonsnr
except AttributeError:
pass
return dict
def get_number_and_letter(query):
#finds out if query ends in number or number + character
match = re.search(r'\d+(\s+)?([A-Za-z]?)$', query)
number = None
letter = None
if match:
number_and_letter = match.group()
query = query.replace(number_and_letter, "")
number_match = re.search(r'\d+', number_and_letter)
if number_match:
number = number_match.group()
letter_match = re.search(r'[A-Za-z]$', number_and_letter)
if letter_match:
letter = letter_match.group()
return query, number, letter
class MatrikkelAdressService(MatrikkelService):
def search_address(self, query, municipality_number):
matrikkel_context = self.client.factory.create('ns2:MatrikkelContext')
query, search_number, search_letter = get_number_and_letter(query)
try:
adresses = self.client.service.findAdresserForVeg(
query,
municipality_number,
matrikkel_context
)
except Exception, e:
print type(e)
adresses = []
result = []
for address in adresses:
address_ident = address.vegadresseIdent
if search_number and int(search_number) != int(address_ident.nr):
continue
try:
letter = address_ident.bokstav
except AttributeError:
letter = None
if search_letter and letter.lower() != search_letter.lower():
continue
address_response = {
"name": "%s %s" % (address.adressenavn, address_ident.nr)
}
if letter:
address_response["name"] += letter
try:
address_response["matrikkel_ident"] = serialize_ident(
address.matrikkelenhetIdent
)
result.append(address_response)
except AttributeError:
pass
return result
def create_point_dict(point):
coord_string = point.point.coordinates.value.split(" ")
return {
"lon": float(coord_string[0]),
"lat": float(coord_string[1])
}
class MatrikkelBuildingService(MatrikkelService):
def find_buildings(self,
kommunenr,
gardsnr,
bruksnr,
festenr=None,
seksjonsnr=None):
matrikkelenhetident = self.client.factory.create('ns5:MatrikkelenhetIdent')
matrikkelenhetident.kommunenr = kommunenr
matrikkelenhetident.gardsnr = gardsnr
matrikkelenhetident.bruksnr = bruksnr
matrikkelenhetident.festenr = festenr
matrikkelenhetident.seksjonsnr = seksjonsnr
matrikkel_context = self.client.factory.create('ns2:MatrikkelContext')
#EPSG:4326
matrikkel_context.sosiKode = 84
buildings = self.client.service.findBygningerForMatrikkelenhet(
matrikkelenhetident,
matrikkel_context
)
return [
{
"position": create_point_dict(building.representasjonspunkt),
"building_number": building.bygningIdent.bygningsnr
}
for building in buildings if str(building.__class__) == "suds.sudsobject.Bygning"
]
| [
"teeejay@gmail.com"
] | teeejay@gmail.com |
aa8b90ef142a6c6eab8212204d6d4306724706ae | 19bc8a9343aa4120453abeff3deddda7d900f774 | /ProgrammingInterviewQuestions/24_DynamicProgrammingFibonacci.py | da078e3a21ff91ec68517a6074e80e997e529813 | [] | no_license | ArunkumarRamanan/CLRS-1 | 98643cde2f561d9960c26378ae29dd92b4c3fc89 | f085db885bcee8d09c1e4f036517acdbd3a0918e | refs/heads/master | 2020-06-28T08:30:44.029970 | 2016-11-19T15:27:55 | 2016-11-19T15:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 25 10:29:52 2016
@author: Rahul Patni
"""
# Dynamic Programming Fibonacci
def recursiveApproach(n):
if n == 0 or n == 1:
return 1
return recursiveApproach(n - 1) + recursiveApproach(n - 2)
def iterativeApproach(n):
x1 = 0
x2 = 1
for i in range(1, n + 1):
x3 = x1 + x2
x1 = x2
x2 = x3
return x2
def dynamicApproach(n):
fib = dict()
fib[0] = 1
fib[1] = 1
for i in range(2, n + 1):
fib[i] = fib[i - 1] + fib[i - 2]
# print fib
return fib[n]
def main():
x = 10
print recursiveApproach(x)
print iterativeApproach(x)
print dynamicApproach(x)
main() | [
"rahul20patni@gmail.com"
] | rahul20patni@gmail.com |
7bdbc7a11bdde9e5916deb7091b35bd212766c1d | 08db28fa3836c36433aa105883a762396d4883c6 | /spider/learning/day_01/01_url.py | 1359c9c42a557bf47dfc6cf4ab93d3ca22994db6 | [] | no_license | xieyipeng/FaceRecognition | 1127aaff0dd121319a8652abcfe8a59a7beaaf43 | dede5b181d6b70b87ccf00052df8056a912eff0f | refs/heads/master | 2022-09-19T07:02:33.624410 | 2020-06-02T03:03:58 | 2020-06-02T03:03:58 | 246,464,586 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | import urllib.request
# 学习网址 https://www.bilibili.com/video/av68030937?p=26
def load_data():
url = "http://www.baidu.com/"
# get请求
# http请求
# response:http相应对象
response = urllib.request.urlopen(url)
print(response)
# 读取内容 byte类型
data = response.read()
print(data)
# 将文件获取的内容转换成字符串
str_data = data.decode("utf-8")
print(str_data)
# 将数据写入文件
with open("01-baidu.html", "w", encoding="utf-8")as f:
f.write(str_data)
# 将字符串类型转换为bytes
str_name="baidu"
byte_name=str_name.encode("utf-8")
print(byte_name)
# 如果爬取bytes,类型,要写入str: decode
# 如果爬取str,类型,要写入bytes: encode
load_data()
| [
"3239202719@qq.com"
] | 3239202719@qq.com |
0d3a0639593a2f61d15a0d586b1eec308bd1662b | 933a4f98b3ab1df987bce525d20ca904b225140f | /scripts/slave/recipe_modules/chromium/tests/run_gn.py | 1fa3b5362367efa0f9276e1b2c2d605e9d943b9e | [
"BSD-3-Clause"
] | permissive | mcgreevy/chromium-build | 3881c489b4d9be2f113da755487808b3593f8156 | f8e42c70146c1b668421ee6358dc550a955770a3 | refs/heads/master | 2020-12-30T12:32:15.685191 | 2017-05-17T06:58:18 | 2017-05-17T06:58:18 | 91,419,271 | 0 | 2 | NOASSERTION | 2020-07-22T09:27:35 | 2017-05-16T05:52:45 | Python | UTF-8 | Python | false | false | 1,002 | py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'chromium',
'recipe_engine/platform',
'recipe_engine/properties',
]
def RunSteps(api):
api.chromium.set_config(
api.properties.get('chromium_config', 'chromium'),
BUILD_CONFIG=api.properties.get('build_config', 'Release'),
TARGET_PLATFORM=api.properties.get('target_platform', 'linux'))
api.chromium.run_gn(use_goma=True, gn_path=api.properties.get('gn_path'))
def GenTests(api):
yield api.test('basic')
yield (
api.test('custom_gn_path') +
api.properties(gn_path='some/other/path/gn')
)
yield (
api.test('mac') +
api.platform('mac', 64) +
api.properties(target_platform='mac')
)
yield (
api.test('android') +
api.properties(target_platform='android')
)
yield (
api.test('debug') +
api.properties(build_config='Debug')
)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
e146a9201f1636b25f025374ad8d9c41871ad505 | f0581fa08ef790606ca019890a2233f91b1c42a7 | /PythonSrc/Unused/Rotations/vector3d.py | 23e7968dd55e8668927443f19b0b467adbd8ada3 | [] | no_license | jaycoskey/IntroToPythonCourse | de758f0dd0a1b541edb2ef4dcc20950a8d8788bb | d1373ec6602584a6791fd48d37ae66ff5f104487 | refs/heads/master | 2023-02-22T16:32:50.533091 | 2021-01-27T08:22:14 | 2021-01-27T08:22:14 | 333,007,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | package rotations
import unittest
class Vector3d:
ZERO = Vector3d(0.0, 0.0, 0.0)
XUNIT = Vector3d(1.0, 0.0, 0.0)
YUNIT = Vector3d(0.0, 1.0, 0.0)
ZUNIT = Vector3d(0.0, 0.0, 1.0)
def __init__(self, *args):
if len(args) == 0:
self.x = self.y = self.z = 0.0
elif len(args) == 1:
self.x = args[0].x
self.y = args[0].y
self.z = args[0].z
elif len(args) == 3:
self.x = args[0]
self.y = args[1]
self.z = args[2]
else:
raise ValueError('Vector3d() requires 0, 1, or 3 arguments')
def __add__(other):
return Vector3d(self.x + other.x, self.y + other.y, self.z + other.z)
def __mul__(other):
return Vector3d(other * v.x, other * v.y, other * v.z)
def __rmul__(other):
return Vector3d(other * v.x, other * v.y, other * v.z)
def __str__():
return '({0:f}, {1:f}, {2:f})'.format(self.x, self.y, self.z)
def __sub__(other):
return Vector3d(self.x - other.x, self.y - other.y, self.z - other.z)
def __truediv__(other):
return Vector3d(self.x / other, self.y / other, self.z / other)
def as_quaternion():
'''Identifies the imaginary subspace of quaternionic space with R^3.'''
return quaternion(0.0, self.x, self.y, self.z)
@staticmethod
def cross(a, b):
result = Vector3d(
a.Y * b.Z - a.Z * b.Y,
a.Z * b.X - a.X * b.Z,
a.X * b.Y - a.Y * b.X )
return result
def cross(self, b):
return Vector3d.cross(self, b)
@staticmethod
def dot(a, b):
return(a.x * b.x + a.y * b.y + a.z * b.z)
def interpolate(a, b, t):
return (1 - t) * a + t * b
def norm(self):
return math.sqrt(self.norm2())
def norm2(self):
return self.x ** 2 + self.y ** 2 + self.z ** 2
| [
"jay.coskey@gmail.com"
] | jay.coskey@gmail.com |
7b8e5a0b99e1f8250761f4bebafb28c015e5515a | b47f2e3f3298388b1bcab3213bef42682985135e | /experiments/heat-3d/tmp_files/1618.py | 79d9cc6c1db159c2a66cbdad061c4da75f285850 | [
"BSD-2-Clause"
] | permissive | LoopTilingBenchmark/benchmark | 29cc9f845d323431e3d40e878cbfc6d1aad1f260 | 52a3d2e70216552a498fd91de02a2fa9cb62122c | refs/heads/master | 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/heat-3d/tmp_files/1618.c')
procedure('kernel_heat_3d')
loop(0)
tile(0,2,16,2)
tile(0,4,16,4)
tile(0,6,32,6)
tile(1,2,16,2)
tile(1,4,16,4)
tile(1,6,32,6)
| [
"nashenruoyang@163.com"
] | nashenruoyang@163.com |
9395a2ed51c260190fff3c4e43d459356f20f233 | ce9d22c3e0e06d5543b404d0c254a582231a0f4b | /tensorflow_federated/python/aggregators/measurements.py | fbf0d5aae3e8289aff9de732e98c67698bdac830 | [
"Apache-2.0"
] | permissive | stjordanis/federated | d9da8c68072a4eb7871f8e293dafebd7584a00c4 | 6819c65eb823dcb7f3f5666051529b9e2346cb28 | refs/heads/master | 2021-09-08T21:41:56.552453 | 2021-09-02T23:45:17 | 2021-09-02T23:46:21 | 191,418,366 | 0 | 0 | Apache-2.0 | 2019-06-11T17:25:27 | 2019-06-11T17:25:26 | null | UTF-8 | Python | false | false | 7,531 | py | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aggregation factory for adding custom measurements."""
import inspect
from typing import Any, Callable, Dict, Optional
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.core.templates import measured_process
def add_measurements(
inner_agg_factory: factory.AggregationFactory,
*,
client_measurement_fn: Optional[Callable[..., Dict[str, Any]]] = None,
server_measurement_fn: Optional[Callable[..., Dict[str, Any]]] = None,
) -> factory.AggregationFactory:
"""Wraps `AggregationFactory` to report additional measurements.
The function `client_measurement_fn` should be a Python callable that will be
called as `client_measurement_fn(value)` or `client_measurement_fn(value,
weight)` depending on whether `inner_agg_factory` is weighted or unweighted.
It must be traceable by TFF and expect `tff.Value` objects placed at `CLIENTS`
as inputs, and return a `collections.OrderedDict` mapping string names to
tensor values placed at `SERVER`, which will be added to the measurement dict
produced by the `inner_agg_factory`.
Similarly, `server_measurement_fn` should be a Python callable that will be
called as `server_measurement_fn(result)` where `result` is the result (on
server) of the inner aggregation.
One or both of `client_measurement_fn` and `server_measurement_fn` must be
specified.
Args:
inner_agg_factory: The factory to wrap and add measurements.
client_measurement_fn: A Python callable that will be called on `value`
(and/or `weight`) provided to the `next` function to compute additional
measurements of the client values/weights.
server_measurement_fn: A Python callable that will be called on the `result`
of aggregation at server to compute additional measurements of the result.
Returns:
An `AggregationFactory` that reports additional measurements.
"""
py_typecheck.check_type(inner_agg_factory,
factory.AggregationFactory.__args__)
if not (client_measurement_fn or server_measurement_fn):
raise ValueError('Must specify one or both of `client_measurement_fn` or '
'`server_measurement_fn`.')
if client_measurement_fn:
py_typecheck.check_callable(client_measurement_fn)
if isinstance(inner_agg_factory, factory.UnweightedAggregationFactory):
if len(inspect.signature(client_measurement_fn).parameters) != 1:
raise ValueError(
'`client_measurement_fn` must take a single parameter if '
'`inner_agg_factory` is unweighted.')
elif isinstance(inner_agg_factory, factory.WeightedAggregationFactory):
if len(inspect.signature(client_measurement_fn).parameters) != 2:
raise ValueError(
'`client_measurement_fn` must take a two parameters if '
'`inner_agg_factory` is weighted.')
if server_measurement_fn:
py_typecheck.check_callable(server_measurement_fn)
if len(inspect.signature(server_measurement_fn).parameters) != 1:
raise ValueError('`server_measurement_fn` must take a single parameter.')
@computations.tf_computation()
def dict_update(orig_dict, new_values):
if not orig_dict:
return new_values
orig_dict.update(new_values)
return orig_dict
if isinstance(inner_agg_factory, factory.WeightedAggregationFactory):
class WeightedWrappedFactory(factory.WeightedAggregationFactory):
"""Wrapper for `WeightedAggregationFactory` that adds new measurements."""
def create(
self, value_type: factory.ValueType, weight_type: factory.ValueType
) -> aggregation_process.AggregationProcess:
py_typecheck.check_type(value_type, factory.ValueType.__args__)
py_typecheck.check_type(weight_type, factory.ValueType.__args__)
inner_agg_process = inner_agg_factory.create(value_type, weight_type)
init_fn = inner_agg_process.initialize
@computations.federated_computation(
init_fn.type_signature.result,
computation_types.at_clients(value_type),
computation_types.at_clients(weight_type))
def next_fn(state, value, weight):
inner_agg_output = inner_agg_process.next(state, value, weight)
measurements = inner_agg_output.measurements
if client_measurement_fn:
client_measurements = client_measurement_fn(value, weight)
measurements = intrinsics.federated_map(
dict_update, (measurements, client_measurements))
if server_measurement_fn:
server_measurements = server_measurement_fn(inner_agg_output.result)
measurements = intrinsics.federated_map(
dict_update, (measurements, server_measurements))
return measured_process.MeasuredProcessOutput(
state=inner_agg_output.state,
result=inner_agg_output.result,
measurements=measurements)
return aggregation_process.AggregationProcess(init_fn, next_fn)
return WeightedWrappedFactory()
else:
class UnweightedWrappedFactory(factory.UnweightedAggregationFactory):
"""Wrapper for `UnweightedAggregationFactory` that adds new measurements."""
def create(
self, value_type: factory.ValueType
) -> aggregation_process.AggregationProcess:
py_typecheck.check_type(value_type, factory.ValueType.__args__)
inner_agg_process = inner_agg_factory.create(value_type)
init_fn = inner_agg_process.initialize
@computations.federated_computation(
init_fn.type_signature.result,
computation_types.at_clients(value_type))
def next_fn(state, value):
inner_agg_output = inner_agg_process.next(state, value)
measurements = inner_agg_output.measurements
if client_measurement_fn:
client_measurements = client_measurement_fn(value)
measurements = intrinsics.federated_map(
dict_update, (measurements, client_measurements))
if server_measurement_fn:
server_measurements = server_measurement_fn(inner_agg_output.result)
measurements = intrinsics.federated_map(
dict_update, (measurements, server_measurements))
return measured_process.MeasuredProcessOutput(
state=inner_agg_output.state,
result=inner_agg_output.result,
measurements=measurements)
return aggregation_process.AggregationProcess(init_fn, next_fn)
return UnweightedWrappedFactory()
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
9b029ba1462be18dcc18bfc84ccc15d1ca07a792 | 0c2583011200a5bed73315fde7ef30678075fce7 | /modules/db/entities/US_TOIMP.py | c7bdec4c93c6cdf081aa1872ccff8557411b87aa | [] | no_license | enzococca/pyarchinit_3 | 3d3b5784a3b2e4b753581f28064748043f8c47fe | 00626ba5c24d447fc54c267071f0584a2962182c | refs/heads/master | 2020-03-09T16:59:21.853411 | 2018-03-12T14:49:51 | 2018-03-12T14:49:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,869 | py | '''
Created on 19 feb 2018
@author: Serena Sensini
'''
class US_TOIMP(object):
#def __init__"
def __init__(self,
id_us,
sito,
area,
us,
d_stratigrafica,
d_interpretativa,
descrizione,
interpretazione,
periodo_iniziale,
fase_iniziale,
periodo_finale,
fase_finale,
scavato,
attivita,
anno_scavo,
metodo_di_scavo,
inclusi,
campioni,
rapporti,
data_schedatura,
schedatore,
formazione,
stato_di_conservazione,
colore,
consistenza,
struttura
):
self.id_us = id_us #0
self.sito = sito #1
self.area = area #2
self.us = us #3
self.d_stratigrafica = d_stratigrafica #4
self.d_interpretativa = d_interpretativa #5
self.descrizione = descrizione #6
self.interpretazione = interpretazione #7
self.periodo_iniziale = periodo_iniziale #8
self.fase_iniziale = fase_iniziale #9
self.periodo_finale = periodo_finale #10
self.fase_finale = fase_finale #11
self.scavato = scavato #12
self.attivita = attivita #13
self.anno_scavo = anno_scavo #14
self.metodo_di_scavo = metodo_di_scavo #15
self.inclusi = inclusi #16
self.campioni = campioni #17
self.rapporti = rapporti #18
self.data_schedatura = data_schedatura #19
self.schedatore = schedatore #20
self.formazione = formazione #21
self.stato_di_conservazione = stato_di_conservazione #22
self.colore = colore #23
self.consistenza = consistenza #24
self.struttura = struttura #25
#def __repr__"
def __repr__(self):
return "<US_TOIMP('%d', '%s', '%s', '%d','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')>" % (
self.id_us,
self.sito,
self.area,
self.us,
self.d_stratigrafica,
self.d_interpretativa,
self.descrizione,
self.interpretazione,
self.periodo_iniziale,
self.fase_iniziale,
self.periodo_finale,
self.fase_finale,
self.scavato,
self.attivita,
self.anno_scavo,
self.metodo_di_scavo,
self.inclusi,
self.campioni,
self.rapporti,
self.data_schedatura,
self.schedatore,
self.formazione,
self.stato_di_conservazione,
self.colore,
self.consistenza,
self.struttura
) | [
"serena.sensini@gmail.com"
] | serena.sensini@gmail.com |
4dffd211b37de445ce2265d53a8f960213309ae9 | fc2d1f44ec35577b0e291f403907ccc8c7859edf | /docs/conf.py | d59a6ebf0dd656cf813d7cab8dbcd6f4446c78ff | [
"MIT"
] | permissive | sobolevn/python-humanfriendly | 35403b4e611f0f95ad474de8e8efd354f12b5369 | 03d1db48e8ab4539403a58d7dea7ef0bd6672ae3 | refs/heads/master | 2020-04-26T10:52:16.294536 | 2019-02-21T20:21:43 | 2019-02-21T20:21:43 | 173,498,753 | 0 | 0 | MIT | 2019-03-02T21:04:23 | 2019-03-02T21:04:23 | null | UTF-8 | Python | false | false | 2,325 | py | # -*- coding: utf-8 -*-
"""Documentation build configuration file for the `humanfriendly` package."""
import os
import sys
# Add the 'humanfriendly' source distribution's root directory to the module path.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# Sphinx extension module names.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'humanfriendly.sphinx',
]
# Configuration for the `autodoc' extension.
autodoc_member_order = 'bysource'
# Paths that contain templates, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'humanfriendly'
copyright = u'2018, Peter Odding'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Find the package version and make it the release.
from humanfriendly import __version__ as humanfriendly_version # noqa
# The short X.Y version.
version = '.'.join(humanfriendly_version.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = humanfriendly_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Refer to the Python standard library.
# From: http://twistedmatrix.com/trac/ticket/4582.
intersphinx_mapping = dict(
python2=('https://docs.python.org/2', None),
python3=('https://docs.python.org/3', None),
coloredlogs=('https://coloredlogs.readthedocs.io/en/latest/', None),
)
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
| [
"peter@peterodding.com"
] | peter@peterodding.com |
cb7af099f940f5ba112e7f53d43e231bfca9550a | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/third_party/catapult/tracing/bin/run_py_tests | d5cf781888af6fb4e715bae9e5e21c30164b2f3a | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 1,115 | #!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import platform
import sys
_CATAPULT_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir))
_TRACING_PATH = os.path.join(_CATAPULT_PATH, 'tracing')
def _RunTestsOrDie(top_level_dir):
exit_code = run_with_typ.Run(top_level_dir, path=[_TRACING_PATH])
if exit_code:
sys.exit(exit_code)
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
if __name__ == '__main__':
_AddToPathIfNeeded(_CATAPULT_PATH)
from hooks import install
if '--no-install-hooks' in sys.argv:
sys.argv.remove('--no-install-hooks')
else:
install.InstallHooks()
from catapult_build import run_with_typ
# https://github.com/catapult-project/catapult/issues/2050
if platform.system() != 'Windows':
_RunTestsOrDie(os.path.join(_TRACING_PATH, 'tracing'))
_RunTestsOrDie(os.path.join(_TRACING_PATH, 'tracing_build'))
sys.exit(0)
| [
"changhyeok.bae@lge.com"
] | changhyeok.bae@lge.com | |
ad40c75916ec2d3d84483c9477a39ee50804f258 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03089/s954098144.py | 4781af4d76b7517d6d52dd675494067d64a378b7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | n = int(input())
B = [int(i) for i in input().split()]
ans = []
while B:
L = []
for i in range(len(B)):
if B[i] == i+1:
L.append(B[i])
if L:
ans.append(L[-1])
B.pop(L[-1]-1)
else:
print(-1)
exit()
for i in ans[::-1]:
print(i) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
75ca957fcc6b0ca47831ce5f73d697ab54cb8436 | 1d0a4750e216f301ec49a247bf7bf07cd61fa29f | /app/views/commuter/company_commuter_plan_view.py | ec9cd4dcec20664a89e39d135f1290141e2b2699 | [] | no_license | smoothbenefits/BenefitMY_Python | 52745a11db2cc9ab394c8de7954974e6d5a05e13 | b7e8474a728bc22778fd24fe88d1918945a8cfc8 | refs/heads/master | 2021-03-27T15:57:34.798289 | 2018-04-29T19:04:04 | 2018-04-29T19:04:04 | 24,351,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | from rest_framework.views import APIView
from django.http import Http404
from rest_framework.response import Response
from rest_framework import status
from app.models.commuter.company_commuter_plan import CompanyCommuterPlan
from app.serializers.commuter.company_commuter_plan_serializer import (
CompanyCommuterPlanSerializer,
CompanyCommuterPlanPostSerializer)
class CompanyCommuterPlanView(APIView):
def _get_object(self, pk):
try:
return CompanyCommuterPlan.objects.get(pk=pk)
except CompanyCommuterPlan.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
plan = self._get_object(pk)
serializer = CompanyCommuterPlanSerializer(plan)
return Response(serializer.data)
def delete(self, request, pk, format=None):
plan = self._get_object(pk)
plan.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def put(self, request, pk, format=None):
plan = self._get_object(pk)
serializer = CompanyCommuterPlanSerializer(plan, data=request.DATA)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def post(self, request, pk, format=None):
serializer = CompanyCommuterPlanPostSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
response_serializer = CompanyCommuterPlanSerializer(serializer.object)
return Response(response_serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CompanyCommuterPlanByCompanyView(APIView):
def _get_object(self, company_id):
try:
return CompanyCommuterPlan.objects.filter(company=company_id)
except CompanyCommuterPlan.DoesNotExist:
raise Http404
def get(self, request, company_id, format=None):
plans = self._get_object(company_id)
serializer = CompanyCommuterPlanSerializer(plans, many=True)
return Response(serializer.data)
| [
"jeffzhang_misc@hotmail.com"
] | jeffzhang_misc@hotmail.com |
a4b03be3990b0a990a9b9b5921833e1949890b55 | 27b4d1b7723845812111a0c6c659ef87c8da2755 | /PythonCookBook/1_数据结构和算法/查找最大或者最小的N个元素列表/03.py | de7b91bd9c47a32638d00d17ac4d93dab161ccb6 | [] | no_license | NAMEs/Python_Note | 59a6eff7b4287aaef04bd69fbd4af3faf56cccb4 | f560e00af37c4f22546abc4c2756e7037adcc40c | refs/heads/master | 2022-04-11T09:32:17.512962 | 2020-03-17T09:30:58 | 2020-03-17T09:30:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | '''
如果你想在一个集合中查找最小或最大的 N 个元素,并且 N 小于集合元素数量,
那么这些函数提供了很好的性能。因为在底层实现里面,首先会先将集合数据进行堆排
序后放入一个列表中
堆数据结构最重要的特征是 heap[0] 永远是最小的元素。并且剩余的元素可以很
容易的通过调用 heapq.heappop() 方法得到,该方法会先将第一个元素弹出来,然后
用下一个最小的元素来取代被弹出元素(这种操作时间复杂度仅仅是 O(log N),N 是
堆大小)。比如,如果想要查找最小的 3 个元素,你可以这样做:
当要查找的元素个数相对比较小的时候,函数 nlargest() 和 nsmallest() 是很
合适的。如果你仅仅想查找唯一的最小或最大(N=1)的元素的话,那么使用 min() 和
max() 函数会更快些。类似的,如果 N 的大小和集合大小接近的时候,通常先排序这个
集合然后再使用切片操作会更快点(sorted(items)[:N] 或者是 sorted(items)[-N:]
)。需要在正确场合使用函数 nlargest() 和 nsmallest() 才能发挥它们的优势(如果
N 快接近集合大小了,那么使用排序操作会更好些)。
尽管你没有必要一定使用这里的方法,但是堆数据结构的实现是一个很有趣并且
值得你深入学习的东西。基本上只要是数据结构和算法书籍里面都会有提及到。heapq
模块的官方文档里面也详细的介绍了堆数据结构底层的实现细节。
'''
import heapq
nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]
print("nums:",nums)
heap = list(nums)
print("heap:",heap)
# 堆排序
heapq.heapify(heap)
print("heap:",heap)
for i in range(1,len(heap)+1):
num = heapq.heappop(heap)
print("{0} --- {1}".format(i,num))
print(heap) | [
"1558255789@qq.com"
] | 1558255789@qq.com |
c22b288eeec61c012ac1e9fb29b0cd92193615b1 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/libsigc++.py | bd7663119f55fe29bf5236253c373c8e8888cf25 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | from .base import GnuRecipe
class LibSigCppRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(LibSigCppRecipe, self).__init__(*args, **kwargs)
self.sha256 = '774980d027c52947cb9ee4fac6ffe2ca' \
'60cc2f753068a89dfd281c83dbff9651'
self.name = 'libsigc++'
self.version = '2.8.0'
short_version = '.'.join(self.version.split('.')[:2])
self.url = 'http://ftp.gnome.org/pub/gnome/sources/$name/' \
'%s/$name-$version.tar.xz' % short_version
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
bdaca89a365a3445264646da386645b9b5fad002 | 03a2c1eb549a66cc0cff72857963eccb0a56031d | /acmicpc/14427.py | 133779d5c5f1c126eba92eb72a510a05dea57127 | [] | no_license | nobe0716/problem_solving | c56e24564dbe3a8b7093fb37cd60c9e0b25f8e59 | cd43dc1eddb49d6b5965419e36db708c300dadf5 | refs/heads/master | 2023-01-21T14:05:54.170065 | 2023-01-15T16:36:30 | 2023-01-15T16:36:30 | 80,906,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | import math
import sys
input = sys.stdin.readline
_DEFAULT = (10 ** 9 + 1, 1)
_GET = lambda x, y: min(x, y)
_SET = lambda i, v: (v, i)
n = int(input())
a = [int(x) for x in input().split()]
a = [_SET(i, v) for i, v in enumerate(a, start=1)]
m = int(input())
BASE = 2 ** math.ceil(math.log(n, 2))
st = [_DEFAULT] * BASE * 2
st[BASE:BASE + n] = a
for i in range(BASE - 1, 0, -1):
st[i] = _GET(st[i * 2], st[i * 2 + 1])
def get(lo: int, hi: int) -> int:
lo += BASE - 1
hi += BASE - 1
v = _DEFAULT
while lo < hi:
if lo % 2 == 1:
v = _GET(v, st[lo])
lo += 1
if hi % 2 == 0:
v = _GET(v, st[hi])
hi -= 1
lo //= 2
hi //= 2
if lo == hi:
v = _GET(v, st[lo])
return v
def set(i: int, v: int):
st[BASE + i - 1] = _SET(i, v)
i = (i + BASE - 1) // 2
while i > 0:
st[i] = _GET(st[i * 2], st[i * 2 + 1])
i //= 2
for _ in range(m):
line = list(map(int, input().split()))
if line[0] == 2:
print(get(1, n)[1])
else:
set(line[1], line[2])
| [
"sunghyo.jung@navercorp.com"
] | sunghyo.jung@navercorp.com |
3e0fc28c46e9bd40233e17d0b10f99cee105f0c6 | b2ed893d04f04eeaf7209187133de7431c476a96 | /icc/merge_data.py | 7e76b3267853a20f5be07f2f3caa9dc3cd1a9150 | [] | no_license | liruikaiyao/workshop | 4b5221259f59ad504d87d73c31f5fa0e58d4a1f0 | 6dbde74e35ef02f5e92c76dcdd1909f1d0afb89e | refs/heads/master | 2021-01-17T16:09:13.248109 | 2015-08-05T09:43:21 | 2015-08-05T09:43:21 | 23,420,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | #coding:utf-8
from config.db import bda
all_cluster=bda['all_cluster']
db_list=bda.collection_names()
a=db_list
db_list=[]
for elem in a:
if len(elem)==32:
db_list.append(elem)
for elem in db_list:
db=bda[elem]
for item in db.find():
all_cluster.insert(item)
| [
"liruikaiyao@gmail.com"
] | liruikaiyao@gmail.com |
aa63d3f03980b5759d81dab4f148f013d82a0cab | f62fd455e593a7ad203a5c268e23129473d968b6 | /python-watcher-1.0.1/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py | e0664158a07fed9442d6ba6ed109f10802e82eff | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 5,643 | py | # -*- encoding: utf-8 -*-
#
# Authors: Vojtech CIMA <cima@zhaw.ch>
# Bruno GRAZIOLI <gaea@zhaw.ch>
# Sean MURPHY <murp@zhaw.ch>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import model_root as modelroot
class FakerModelCollector(base.BaseClusterDataModelCollector):
def __init__(self, config=None, osc=None):
if config is None:
config = mock.Mock()
super(FakerModelCollector, self).__init__(config)
@property
def notification_endpoints(self):
return []
def execute(self):
return self.generate_scenario_1()
def load_data(self, filename):
cwd = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(cwd, "data")
with open(os.path.join(data_folder, filename), 'rb') as xml_file:
xml_data = xml_file.read()
return xml_data
def load_model(self, filename):
return modelroot.ModelRoot.from_xml(self.load_data(filename))
def generate_scenario_1(self):
"""Simulates cluster with 2 nodes and 2 instances using 1:1 mapping"""
return self.load_model('scenario_1_with_metrics.xml')
def generate_scenario_2(self):
"""Simulates a cluster
With 4 nodes and 6 instances all mapped to a single node
"""
return self.load_model('scenario_2_with_metrics.xml')
def generate_scenario_3(self):
"""Simulates a cluster
With 4 nodes and 6 instances all mapped to one node
"""
return self.load_model('scenario_3_with_metrics.xml')
def generate_scenario_4(self):
"""Simulates a cluster
With 4 nodes and 6 instances spread on all nodes
"""
return self.load_model('scenario_4_with_metrics.xml')
class FakeCeilometerMetrics(object):
def __init__(self, model):
self.model = model
def mock_get_statistics(self, resource_id, meter_name, period=3600,
aggregate='avg'):
if meter_name == "compute.node.cpu.percent":
return self.get_node_cpu_util(resource_id)
elif meter_name == "cpu_util":
return self.get_instance_cpu_util(resource_id)
elif meter_name == "memory.usage":
return self.get_instance_ram_util(resource_id)
elif meter_name == "disk.root.size":
return self.get_instance_disk_root_size(resource_id)
def get_node_cpu_util(self, r_id):
"""Calculates node utilization dynamicaly.
node CPU utilization should consider
and corelate with actual instance-node mappings
provided within a cluster model.
Returns relative node CPU utilization <0, 100>.
:param r_id: resource id
"""
node_uuid = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
node = self.model.get_node_by_uuid(node_uuid)
instances = self.model.get_node_instances(node)
util_sum = 0.0
for instance_uuid in instances:
instance = self.model.get_instance_by_uuid(instance_uuid)
total_cpu_util = instance.vcpus * self.get_instance_cpu_util(
instance.uuid)
util_sum += total_cpu_util / 100.0
util_sum /= node.vcpus
return util_sum * 100.0
@staticmethod
def get_instance_cpu_util(r_id):
instance_cpu_util = dict()
instance_cpu_util['INSTANCE_0'] = 10
instance_cpu_util['INSTANCE_1'] = 30
instance_cpu_util['INSTANCE_2'] = 60
instance_cpu_util['INSTANCE_3'] = 20
instance_cpu_util['INSTANCE_4'] = 40
instance_cpu_util['INSTANCE_5'] = 50
instance_cpu_util['INSTANCE_6'] = 100
instance_cpu_util['INSTANCE_7'] = 100
instance_cpu_util['INSTANCE_8'] = 100
instance_cpu_util['INSTANCE_9'] = 100
return instance_cpu_util[str(r_id)]
@staticmethod
def get_instance_ram_util(r_id):
instance_ram_util = dict()
instance_ram_util['INSTANCE_0'] = 1
instance_ram_util['INSTANCE_1'] = 2
instance_ram_util['INSTANCE_2'] = 4
instance_ram_util['INSTANCE_3'] = 8
instance_ram_util['INSTANCE_4'] = 3
instance_ram_util['INSTANCE_5'] = 2
instance_ram_util['INSTANCE_6'] = 1
instance_ram_util['INSTANCE_7'] = 2
instance_ram_util['INSTANCE_8'] = 4
instance_ram_util['INSTANCE_9'] = 8
return instance_ram_util[str(r_id)]
@staticmethod
def get_instance_disk_root_size(r_id):
instance_disk_util = dict()
instance_disk_util['INSTANCE_0'] = 10
instance_disk_util['INSTANCE_1'] = 15
instance_disk_util['INSTANCE_2'] = 30
instance_disk_util['INSTANCE_3'] = 35
instance_disk_util['INSTANCE_4'] = 20
instance_disk_util['INSTANCE_5'] = 25
instance_disk_util['INSTANCE_6'] = 25
instance_disk_util['INSTANCE_7'] = 25
instance_disk_util['INSTANCE_8'] = 25
instance_disk_util['INSTANCE_9'] = 25
return instance_disk_util[str(r_id)]
| [
"gongwayne@hotmail.com"
] | gongwayne@hotmail.com |
75508448eb04949efc0a5950f4ce7749c1dfc7fe | 2b16a66bfc186b52ed585081ae987e97cab8223b | /script/document_classification/import_lr_wiki_classification_result.py | d1509efa93e4dbe0e8ffc75c8f82eb869f157495 | [] | no_license | OldPickles/SKnowledgeGraph | d334000c7a41dd5014fd59154bbe070fcc754e4c | 6d131ad6bf3a09a5ce6461fa03690117d703c9e8 | refs/heads/master | 2022-01-09T11:27:00.043712 | 2019-06-06T07:57:06 | 2019-06-06T07:57:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from db_importer.wiki_classification_result import WikiClassificationResultDBImporter
if __name__ == "__main__":
importer = WikiClassificationResultDBImporter()
importer.import_lr_wiki_classification_result(result_json_file_name="lr_result.v2.json")
| [
"467701860@qq.com"
] | 467701860@qq.com |
b311deac9287395ba96912fa77bba8b7069189ba | 1e1c85d0d74bc1b111e77f082cd24c94219d7eb0 | /VE-Tests/tests/KD/test_device_logout.py | 4fcd4b6840117995b378d5aa39690bbfe93167a1 | [] | no_license | anshsaikia/GSSDeliverables-YesProject | b6f5e4de8d853ce21dfe7401c4b9179c40f32a89 | ed786ccfd7b8c344802c7ff6d0cfd4afbffe015e | refs/heads/master | 2020-04-06T04:07:49.034461 | 2017-02-24T13:39:48 | 2017-02-24T13:39:48 | 83,044,504 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,896 | py | import pytest
__author__ = 'srevg'
from tests_framework.ve_tests.ve_test import VeTestApi
from vgw_test_utils.IHmarks import IHmark
@IHmark.LV_L2
@IHmark.O_iOS
@IHmark.O_Android
@IHmark.MF1530
@IHmark.MF1342
@pytest.mark.commit
@pytest.mark.MF1530_LogOut
@pytest.mark.MF1342_LogOut
@pytest.mark.level2
def test_log_out():
ve_test = VeTestApi("log_out_feature")
ve_test.begin()
device_details_milestones = ve_test.milestones.getDeviceDetails()
deviceId_1 = device_details_milestones['drm-device-id']
hh_id = ve_test.configuration["he"]["generated_household"]
user_name = ve_test.configuration["he"]["generated_username"]
ve_test.wait(7)
ve_test.screens.settings.log_out()
# Re Sign In with Same User Name and verify if the device id which is used is same:
login_screen = ve_test.screens.login_screen
login_screen.sign_in(hh_id,user_name)
device_details_milestones = ve_test.milestones.getDeviceDetails()
deviceId_2 = device_details_milestones['drm-device-id']
ve_test.log_assert(deviceId_1 == deviceId_2, "Device ids are different")
ve_test.wait(7)
ve_test.screens.settings.log_out()
#Query from upm and see if the device id is still present in the household
d = ve_test.he_utils.getDeviceIdFromDeviceAndHH(deviceId_2, hh_id)
ve_test.log_assert(deviceId_2.upper() == d, "device id deleted in upm")
#Re Sign In with different User Name and verify if the device id which is used is different:
hhId, login = ve_test.he_utils.createTestHouseHold()
ve_test.he_utils.setHHoffers(hhId)
ve_test.screens.login_screen.sign_in(hhId, user_name=hhId, password='123')
device_details_milestones = ve_test.milestones.getDeviceDetails()
deviceId_3 = device_details_milestones['drm-device-id']
ve_test.log_assert(deviceId_3 is not deviceId_2,"Device ids are same")
ve_test.wait(7)
ve_test.end() | [
"anshuman.saikia@lnttechservices.com"
] | anshuman.saikia@lnttechservices.com |
fd30b078e6d7cffb844e3d1190637df352e04368 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /able_hand_and_little_case/own_group.py | b3f4f5a358d913248b2a972a20c5acd10078ea22 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py |
#! /usr/bin/env python
def be_point(str_arg):
do_new_part(str_arg)
print('right_case')
def do_new_part(str_arg):
print(str_arg)
if __name__ == '__main__':
be_point('eye_or_person')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
e07a8919ecdfb3638a538d4e5a1d875b6b48b2b3 | bf20548c143fdaecc1d8b5746dab142414b27786 | /galaxy-tool-BLAST/utilities/bold/add_taxonomy_bold.py | 1f03eabf49e5f4c97606b6edbf03d50fbf3cf580 | [] | no_license | zeromtmu/galaxy-tool-temp-2019 | e9f58956b014e2e4e9260b028c14549f90756f05 | 704c3b850e8ddf5420dc458a0282717ab2268c40 | refs/heads/master | 2021-10-25T05:02:55.328975 | 2019-04-01T11:40:41 | 2019-04-01T11:40:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,294 | py | """
"""
from Bio import SeqIO
import argparse
parser = argparse.ArgumentParser(description='Add taxonomy to BOLD fasta file')
parser.add_argument('-t', '--taxonomy', dest='taxonomy', type=str, required=True)
parser.add_argument('-g', '--gbif_taxonomy', dest='gbif', type=str, required=True)
parser.add_argument('-b', '--bold_fasta', dest='bold', type=str, required=True)
parser.add_argument('-o', '--output', dest='output', type=str, required=True)
args = parser.parse_args()
def make_taxon_dict():
taxonDict = {}
with open(args.taxonomy,"r") as taxonomy:
for x in taxonomy:
x = x.strip().split("\t")
unknowns = ["unknown kingdom", "unknown phylum", "unknown class", "unknown order", "unknown family", "unknown genus", "unknown species"]
for known in unknowns[len(x):]:
x.append(known)
valueCount = 0
for value in x:
if not value:
x[valueCount] = unknowns[valueCount]
valueCount += 1
taxonDict[x[0]] = x
return taxonDict
def make_kingdom_dict():
kingdomDict = {}
with open(args.gbif,"r") as gbif:
for x in gbif:
x = x.split("\t")
if x[1] not in kingdomDict:
kingdomDict[x[1]] = x[0]
if x[2] not in kingdomDict:
kingdomDict[x[2]] = x[0]
if x[3] not in kingdomDict:
kingdomDict[x[3]] = x[0]
if x[4] not in kingdomDict:
kingdomDict[x[4]] = x[0]
if x[5] not in kingdomDict:
kingdomDict[x[5]] = x[0]
return kingdomDict
def add_taxonomy(taxonDict, kingdomDict):
with open(args.bold, "r") as bold, open(args.output,"a") as output:
for record in SeqIO.parse(bold, "fasta"):
accession = str(record.description).split("|")[0]
if accession in taxonDict:
if taxonDict[accession][1] in kingdomDict:
kingdom = kingdomDict[taxonDict[accession][1]]
elif taxonDict[accession][2] in kingdomDict:
kingdom = kingdomDict[taxonDict[accession][2]]
elif taxonDict[accession][3] in kingdomDict:
kingdom = kingdomDict[taxonDict[accession][3]]
elif taxonDict[accession][4] in kingdomDict:
kingdom = kingdomDict[taxonDict[accession][4]]
elif taxonDict[accession][5] in kingdomDict:
kingdom = kingdomDict[taxonDict[accession][5]]
else:
#print accession+" no kingdom"
kingdom = "unknown kingdom"
output.write(">BOLD|"+accession+"|"+taxonDict[accession][-1]+"|"+kingdom+"|"+taxonDict[accession][1]+"|"+taxonDict[accession][2]+"|"+taxonDict[accession][3]+"|"+taxonDict[accession][4]+"|"+taxonDict[accession][5]+"|"+taxonDict[accession][-1]+"\n")
output.write(str(record.seq)+"\n")
else:
print accession+" no taxonomy"
def main():
taxonDict = make_taxon_dict()
kingdomDict = make_kingdom_dict()
add_taxonomy(taxonDict, kingdomDict)
if __name__=="__main__":
main()
| [
"martenhoogeveen@gmail.com"
] | martenhoogeveen@gmail.com |
a22cfb2be3ed7a20604c5c82392355d9e69ae696 | 008bc57ad937f0d76edbe29376220b33ff2fddc1 | /CRC/crc_report_regression_testing.py | b48f74725a8653f7462fc73b88bfeed1032599aa | [] | no_license | chetandg123/cQubeTesting-2.0 | f1b15d77401e677a6e4d2e9e497a364e3dd001b2 | bd3ab2b6c8be65bfc1aef3a42585360d70483bd5 | refs/heads/master | 2023-07-12T22:10:51.705709 | 2021-08-11T11:20:51 | 2021-08-11T11:20:51 | 374,532,154 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,482 | py | import unittest
from CRC.check_clusterwise_records import crc_schoolevel_records
from CRC.check_crc_tabledata_by_selecting_districts import districtwise_tabledata
from CRC.check_districtwise_records import test_crc_report_districtwise
from CRC.check_homebtn import Homeicon
from CRC.check_table_data_order import Check_order_of_tabledata
from CRC.check_xaxis_and_yaxis_from_selectbox import plot_values
from CRC.click_on_hyperlink import click_on_hyperlinks
from CRC.download_blockwise_csv import donwload_blockwise_csv
from CRC.download_clusterwise_csv import load_clusterwise_csv
from CRC.download_districtwise_csv import Districtwise_donwload
from CRC.download_schoolwise_csv import school_wise_download
from CRC.navigate_to_crc_and_click_on_logout import Logout_function
from CRC.navigate_to_crc_report import loading_crc
from reuse_func import GetData
class cQube_CRC_Report(unittest.TestCase):
@classmethod
def setUpClass(self):
self.data = GetData()
self.driver = self.data.get_driver()
self.data.open_cqube_appln(self.driver)
self.data.login_cqube(self.driver)
self.data.navigate_to_crc_report()
self.data.page_loading(self.driver)
self.driver.implicitly_wait(100)
def test_navigate_crc(self):
b = loading_crc(self.driver)
res = b.test_crc()
if "crc-report" in self.driver.current_url:
print("Navigated back to crc report")
else:
print("CRC report is not loaded ")
self.data.page_loading(self.driver)
def test_download_districtwise(self):
b = Districtwise_donwload(self.driver)
result = b.test_districtwise()
self.assertEqual(0, result, msg="File is not downloaded")
print("district wise csv file is downloaded ")
self.data.page_loading(self.driver)
def test_download_blockwise_csv(self):
b = donwload_blockwise_csv(self.driver)
result = b.test_blockwise()
self.assertEqual(0,result, msg="File is not downloaded")
print("blockwise csv file is downloaded ")
self.data.page_loading(self.driver)
def test_download_clusterwise_csv(self):
b = load_clusterwise_csv(self.driver)
result = b.test_clusterwise()
self.assertEqual(0, result, msg="File is not downloaded")
print("cluster wise csv file is downloaded ")
self.data.page_loading(self.driver)
def test_download_schoolwise(self):
b = school_wise_download(self.driver)
result = b.test_schoolwise()
self.assertEqual(0, result, msg="File is not downloaded")
print("district wise csv file is downloaded ")
self.data.page_loading(self.driver)
def test_crc_districtwise(self):
b = test_crc_report_districtwise(self.driver)
result = b.test_districtwise()
self.assertEqual(0, result, msg="File is not downloaded")
print('checked with districts records')
self.data.page_loading(self.driver)
def test_homeicon(self):
b = Homeicon(self.driver)
result = b.test_homeicon()
self.assertTrue(result, msg="Home button not working ")
print("checking with home icon and it is working ")
self.data.page_loading(self.driver)
def test_schools_per_cluster_csv_download1(self):
school = crc_schoolevel_records(self.driver)
result = school.check_csv_download()
self.assertEqual(result,0,msg='csv file is not downloaded')
self.data.page_loading(self.driver)
def test_districtwise_tabledata(self):
b = districtwise_tabledata(self.driver)
result = b.test_table_data()
if result != 0:
raise self.failureException('Data not found on table')
print("checked with districtwise table data")
self.data.page_loading(self.driver)
def test_logout(self):
b = Logout_function(self.driver)
res = b.test_logout()
if "crc-report" in self.driver.current_url:
print("Navigated back to crc report")
else:
print("CRC report is not loaded ")
self.data.page_loading(self.driver)
def test_crc_graph(self):
b = plot_values(self.driver)
res1, res2 = b.test_plots()
self.assertNotEqual(0, res1, msg="Xaxis options are not present")
self.assertNotEqual(0, res2, msg='Yaxis options are not present')
self.data.page_loading(self.driver)
print("checked graph x and y axis options")
def test_orderwise_tabledata(self):
b = Check_order_of_tabledata(self.driver)
result = b.test_order()
self.assertEqual(result, "menu", msg="Menu is not exist")
print("check order of table records is working ")
self.data.page_loading(self.driver)
def test_on_clusterlevel_to_hyperlinks(self):
b = click_on_hyperlinks(self.driver)
result = b.test_hyperlink()
print("checking hyperlink from cluster levels ")
self.data.page_loading(self.driver)
def test_homebutton(self):
b = Homeicon(self.driver)
result = b.test_homebutton()
self.assertEqual(0,result,msg="Home button is not working ")
print("checking with home icon and it is working ")
self.data.page_loading(self.driver)
@classmethod
def tearDownClass(cls):
cls.driver.close()
| [
"chetan.goudar@tibilsolutions.com"
] | chetan.goudar@tibilsolutions.com |
1c6210564d19565b0fb0d19d5a16faa49512c900 | f5c3841a08c3faa1818d3ee210c8b9921dc9499d | /parsing_JSON_1.py | e41a258c27dd0efb92e08a6fbfd4055cf60134e0 | [] | no_license | villancikos/realpython-book2 | a4e74b51fe1d3a8e5af206c2938ff4966ef00df6 | 6c9a2ef714531f1163f3c78c80fad335661dacf2 | refs/heads/master | 2016-09-06T10:06:49.227106 | 2014-09-22T18:56:58 | 2014-09-22T18:56:58 | 23,493,659 | 1 | 1 | null | 2014-09-19T23:35:40 | 2014-08-30T14:44:52 | Python | UTF-8 | Python | false | false | 193 | py | # JSON Parsing 1
import json
# decodes the json file
output = json.load(open('cars.json'))
# display output screen
print output
print " "
print json.dumps(output, indent=4, sort_keys=True)
| [
"villancikos@gmail.com"
] | villancikos@gmail.com |
681a5376528ffab913b8a88d30fc3a66a36752f2 | 5456502f97627278cbd6e16d002d50f1de3da7bb | /chrome/test/mini_installer/verifier_runner.py | 9f3f99f54ca5533d699a6f03c95fc058d6f53633 | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/Chromium_7C66 | 72d108a413909eb3bd36c73a6c2f98de1573b6e5 | c8649ab2a0f5a747369ed50351209a42f59672ee | refs/heads/master | 2023-03-16T12:51:40.231959 | 2017-12-20T10:38:26 | 2017-12-20T10:38:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import file_verifier
import process_verifier
import registry_verifier
class VerifierRunner:
"""Runs all Verifiers."""
def __init__(self):
"""Constructor."""
# TODO(sukolsak): Implement other verifiers
self._verifiers = {
'Files': file_verifier.FileVerifier(),
'Processes': process_verifier.ProcessVerifier(),
'RegistryEntries': registry_verifier.RegistryVerifier(),
}
def VerifyAll(self, property, variable_expander):
"""Verifies that the current machine states match the property dictionary.
A property dictionary is a dictionary where each key is a verifier's name
and the associated value is the input to that verifier. For details about
the input format for each verifier, take a look at http://goo.gl/1P85WL
Args:
property: A property dictionary.
variable_expander: A VariableExpander object.
"""
for verifier_name, verifier_input in property.iteritems():
if verifier_name not in self._verifiers:
raise KeyError('Unknown verifier %s' % verifier_name)
self._verifiers[verifier_name].VerifyInput(verifier_input,
variable_expander)
| [
"lixiaodonglove7@aliyun.com"
] | lixiaodonglove7@aliyun.com |
e5d1941ea66a0350ed8fe6c0a7e0f6f1275e4f81 | 061c36c4b33dd0c47d9d62c2057559d4c5973681 | /hdfs_find_replication_factor_1.py | 8745848be2d4daa4237a72ad514a994daa990440 | [
"MIT"
] | permissive | ashkankamyab/DevOps-Python-tools | 0847f9e1b74d7864d17b0a9833beeef1f149e5a5 | dc4b1ce2b2fbee3797b66501ba3918a900a79769 | refs/heads/master | 2022-10-09T15:23:31.108086 | 2022-09-01T14:32:56 | 2022-09-01T14:32:56 | 189,855,037 | 1 | 0 | NOASSERTION | 2019-06-02T14:15:18 | 2019-06-02T14:15:18 | null | UTF-8 | Python | false | false | 5,894 | py | #!/usr/bin/env python
# coding=utf-8
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2018-11-28 16:37:00 +0000 (Wed, 28 Nov 2018)
#
# https://github.com/HariSekhon/DevOps-Python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/HariSekhon
#
"""
Tool to find HDFS file with replication factor 1
These cause problems because taking a single datanode offline may result in alerts for files with missing blocks
Uses any arguments are directory tree paths to starting scanning down. If no argument paths are given, searches under
top level directory /
Uses Hadoop configuration files it expects to find in $HADOOP_HOME/conf to auto-detect
NameNodes HA, Kerberos etc (just kinit first)
Optionally resets such files back to replication factor 3 if specifying --set-replication-factor-3
Tested on Hadoop 2.7 on HDP 2.6 with Kerberos
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import traceback
import krbV
import snakebite
from snakebite.client import AutoConfigClient
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import log, log_option, validate_int
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.3'
class HdfsFindReplicationFactor1(CLI):
def __init__(self):
# Python 2.x
super(HdfsFindReplicationFactor1, self).__init__()
# Python 3.x
# super().__init__()
self.path_list = None
self.replication_factor = None
def add_options(self):
super(HdfsFindReplicationFactor1, self).add_options()
self.add_opt('--hadoop-home',
help='Sets $HADOOP_HOME, expects to find config in $HADOOP_HOME/conf, ' + \
'otherwise inherits from environment or tries default paths')
self.add_opt('--set-replication', metavar='N', type=int,
help='Resets any files with replication factor 1 back to this replication factor (optional)')
def process_options(self):
super(HdfsFindReplicationFactor1, self).process_options()
self.path_list = self.args
if not self.path_list:
self.path_list = ['/']
self.replication_factor = self.get_opt('set_replication')
if self.replication_factor is not None:
validate_int(self.replication_factor, 'set replication', 2, 5)
hadoop_home = self.get_opt('hadoop_home')
if hadoop_home is not None:
os.environ['HADOOP_HOME'] = hadoop_home
hadoop_home_env = os.getenv('HADOOP_HOME')
log_option('HADOOP_HOME', hadoop_home_env)
if hadoop_home_env:
log.info('will search for Hadoop config in %s/conf', hadoop_home_env)
def run(self):
log.info('initiating snakebite hdfs client')
try:
client = AutoConfigClient()
except krbV.Krb5Error as _: # pylint: disable=no-member
if self.verbose:
print('', file=sys.stderr)
print(_, file=sys.stderr)
start_time = time.time()
dir_count = 0
file_count = 0
repl1_count = 0
for path in self.path_list:
try:
result_list = client.ls([path], recurse=True, include_toplevel=True, include_children=True)
for result in result_list:
if self.verbose and (dir_count + file_count) % 100 == 0:
print('.', file=sys.stderr, end='')
if result['block_replication'] == 0:
dir_count += 1
continue
file_count += 1
if result['block_replication'] == 1:
file_path = result['path']
repl1_count += 1
if self.verbose:
print('', file=sys.stderr)
print(file_path)
if self.replication_factor:
log.info('setting replication factor to %s on %s', self.replication_factor, file_path)
# returns a generator so must evaluate in order to actually execute
# otherwise you find there is no effect on the replication factor
for _ in client.setrep([file_path], self.replication_factor, recurse=False):
if 'result' not in _:
print('WARNING: result field not found in setrep result: {}'.format(_),
file=sys.stderr)
continue
if not _['result']:
print('WARNING: failed to setrep: {}'.format(_))
except (snakebite.errors.FileNotFoundException, snakebite.errors.RequestError) as _:
if self.verbose:
print('', file=sys.stderr)
print(_, file=sys.stderr)
if self.verbose:
print('', file=sys.stderr)
secs = int(time.time() - start_time)
print('\nCompleted in {} secs\n'.format(secs), file=sys.stderr)
print('{} files with replication factor 1 out of {} files in {} dirs'\
.format(repl1_count, file_count, dir_count), file=sys.stderr)
if __name__ == '__main__':
HdfsFindReplicationFactor1().main()
| [
"harisekhon@gmail.com"
] | harisekhon@gmail.com |
d7b27284fcf8e687c0ce5cdc8fc1586f625817db | 26aeec7c6571012e85cd6bdd42560988664dc845 | /0x04-python-more_data_structures/1-search_replace.py | bf02407fb835033abc2ea61c84569a868d1e5c89 | [] | no_license | KoeusIss/holbertonschool-higher_level_programming | 3d6ac70d9630c516fa95fcd2d6209d8591bf4169 | 446ca491156ac93134e5c15f3568cb684079d67e | refs/heads/master | 2022-12-11T15:22:58.164551 | 2020-09-24T09:51:45 | 2020-09-24T09:51:45 | 259,189,990 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | #!/usr/bin/python3
def search_replace(my_list, search, replace):
"""
Replaces all occurences of an element by another in a new list
"""
return [(replace if x == search else x) for x in my_list]
| [
"sebri.issam@gmail.com"
] | sebri.issam@gmail.com |
ca1da8e85b269c0081f63c09d3201e66d15324ae | 131921d5ed69ac5d470520a3fbb651d1374a668d | /accounts/models.py | e04dcc920bc257ac68ee4d62006da384f97c2532 | [] | no_license | SyedMaazHassan/temporary-one | 07fc31673b3eb8368014878a22c747d39b259cb3 | cc67107cabcb2a092b79fbc7d8b5369592a15241 | refs/heads/master | 2023-03-02T11:28:08.813659 | 2021-02-10T10:02:52 | 2021-02-10T10:02:52 | 337,535,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,924 | py | import uuid
from django.dispatch import receiver
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser
)
from django.db.models.signals import post_save
from saidatech_admin.models import saidatech_admin_profile
class MyUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None):
"""
Creates and saves a User with the given email, date of
birth and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, date_of_birth, password):
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(
email,
password=password,
date_of_birth=date_of_birth,
)
user.is_admin = True
user.save(using=self._db)
return user
class MyUser(AbstractBaseUser):
id=models.UUIDField( primary_key = True, editable = False,default=uuid.uuid4())
role=models.CharField(max_length=10,choices=[('Instructor', 'Instructor'), ('Student', 'Student')])
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
date_of_birth = models.DateField(null =True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = MyUserManager()
REQUIRED_FIELDS = ['date_of_birth']
USERNAME_FIELD = 'email'
is_active=models.BooleanField(default=True)
def get_full_name(self):
# The user is identified by their email address
return self.email
def get_short_name(self):
# The user is identified by their email address
return self.email
def __str__(self): # __unicode__ on Python 2
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
@receiver(post_save, sender=MyUser)
def create_admin_profile(sender, instance, created, **kwargs):
if created:
if sender.is_admin:
print("Na me b IID", MyUser.id)
#saidatech_admin_profile.objects.create(saidatech_admin_id=instance)
| [
"hafizmaazhassan33@gmail.com"
] | hafizmaazhassan33@gmail.com |
6ca67602d21d354937356280ae7d8a91c75c5990 | 26be9ea17640d29d6a8a576cbf306f71675bdfb1 | /pyroprint/optedredger.py | c46bc80acf83ea548a2c338e430e1a6e746c1c47 | [] | no_license | meredithhitchcock/wabio | 076a69efa0e38da0cbba348114408e2354fdde76 | f3de4b8ca6f98d6ec2fa3989214871c2a3781c37 | refs/heads/master | 2021-01-18T14:50:52.630167 | 2014-10-04T04:28:58 | 2014-10-04T04:28:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,812 | py | import pymysql
import string
import sys
primer = "CGTGAGGCTTAACCTT"
revcomprimer = "AAGGTTAAGCCTCACG"
seqFile1 = ""
ratioSeq1 = ""
seqFile2 = ""
ratioSeq2 = ""
fileDir = "./Genome Sequences/rDNA plasmid sequences/23-5"
outfile = ""
# A quick tool to dredge the opterons from .seq files
# Takes in three params:
# - the ratio of sequences
# - the two sequence files
def main():
global seqFile1
global ratioSeq1
global seqFile2
global ratioSeq2
global fileDir
global outfile
if len(sys.argv) < 5 or sys.argv[1] == "help":
printUsage()
return
# Parse input params
i = 1
while i < len(sys.argv):
if sys.argv[i] == "-r":
try:
(r1, split, r2) = sys.argv[i + 1].partition(":")
ratioSeq1 = int(r1)
ratioSeq2 = int(r2)
i += 2
except ValueError:
print "exception"
printUsage()
return
elif sys.argv[i] == "-s":
(seqFile1, split, seqFile2) = sys.argv[i + 1].partition(":")
outfile = seqFile1 + "-" + seqFile2 + ".opts"
i += 2
elif sys.argv[i] == "-d":
fileDir = argv[i + 1]
i += 2
elif sys.argv[i] == "-o":
outfile = argv[i + 1]
i += 2
else:
printUsage()
return
seq1 = findSeq(seqFile1)
seq2 = findSeq(seqFile2)
# Print the found sequences to an output file using the ratios
fd = open(outfile, "w")
for i in range(ratioSeq1):
fd.write("# " + str(i + 1) + " - " + seqFile1 + "\n")
fd.write(seq1 + "\n")
for i in range(ratioSeq2):
fd.write("# " + str(i + 1) + " - " + seqFile2 + "\n")
fd.write(seq2 + "\n")
fd.close()
return
def findSeq(seqFile):
try:
fd = open(fileDir + "/23-5 " + seqFile + ".seq", "r")
except:
print ("File Not Found: " +
fileDir + "/23-5 " + seqFile + ".seq")
sys.exit()
seq = ""
for line in fd:
seq += line.strip()
# FIRST try to find the sequence after the original primer
(pre, p, end) = seq.partition(primer)
# IF didn't find using the original primer try the reverse compliment
if len(p) == 0 or len(end) == 0:
(pre, p, end) = seq.partition(revcomprimer)
if len(p) == 0 or len(end) == 0:
print ("Runtime Error: Could not find the primer or the reverse " +
" compliment of the primer")
sys.exit()
# REVERSE the string pre
pre = pre[::-1]
# Compliment the string
pre = compliment(pre)
return pre
else:
return end
def compliment(seg):
# Compliment the string
slen = len(seg)
for i in range(slen):
if seg[i] == "A":
seg = seg[0:i] + "T" + seg[i + 1:slen]
elif seg[i] == "T":
seg = seg[0:i] + "A" + seg[i + 1:slen]
elif seg[i] == "C":
seg = seg[0:i] + "G" + seg[i + 1:slen]
elif seg[i] == "G":
seg = seg[0:i] + "C" + seg[i + 1:slen]
return seg
def printUsage():
print ("Usage: " + sys.argv[0] + " -r <X:Y> -s " +
"<sequence 1 name>:<sequence 2 name> [-d <sequence file directory>] [-o <outfile>]")
print (" -r : The ratio of sequence 1 to sequence 2 in the final result")
print (" -s : A \"ratio\" of sequence names to be used.\n" +
" Example: Dg03-5:Hu01-3\n" +
" NOTE: \'23-5 \' and \'.seq\' are automatically added")
print (" -d : An optional parameter to change the default location to search " +
"for the sequence files.\n" +
" The default location is: " + fileDir)
print (" -o : Optional parameter to change the default output file.\n" +
" The default output file name is \"<seq 1 name>-<seq 2 name>.opts\"")
if __name__ == "__main__":
main()
| [
"elkingtonx@gmail.com"
] | elkingtonx@gmail.com |
e3bb6cda196d50d9aa9a0366a7e535e4d6cbe821 | 9fb7bc79fc3e9224de8a63189515044812a1ff41 | /scripts/general_utils.py | d490031b36efe0659d6834af14b5d1905f111691 | [
"MIT"
] | permissive | dciborow/amlsdummy | 2fb4c0ddb39ec40140d4811d23d832c009dee139 | 91d2732dd485002b16c28353263b62c674a69399 | refs/heads/master | 2022-06-19T11:28:46.961813 | 2020-02-09T20:51:15 | 2020-02-09T20:51:15 | 239,918,986 | 0 | 0 | MIT | 2020-02-12T03:30:20 | 2020-02-12T03:30:19 | null | UTF-8 | Python | false | false | 3,210 | py | import pickle
import os
from enum import Enum
from datetime import datetime, timedelta
class JobType(Enum):
real_time_scoring = "RealTimeScoring"
batch_scoring = "BatchScoring"
class JobLog:
step_start = "start"
step_end = "end"
logs_directory = "Logs"
general_stats = "Overview.csv"
def __init__(self, jobtype):
self.job_type = jobtype
self.job_directory = jobtype.value + JobLog.logs_directory
self.job_steps = {}
self.job_info = []
self.total_start = None
def startStep(self, step_name):
if len(self.job_steps) == 0:
self.total_start = datetime.now()
self.job_steps[step_name] = {}
self.job_steps[step_name][JobLog.step_start] = datetime.now()
def endStep(self, step_name):
if step_name in self.job_steps.keys():
self.job_steps[step_name][JobLog.step_end] = datetime.now()
def addInfo(self, info):
self.job_info.append(info)
def _dumpGeneral(self, log_path, total_time):
if os.path.exists(JobLog.logs_directory) == False:
os.makedirs(JobLog.logs_directory)
stats_file = os.path.join(JobLog.logs_directory, JobLog.general_stats)
log_entry = []
log_entry.append(self.job_type.value)
log_entry.append(log_path)
log_entry.append(str(total_time))
with open(stats_file, "a+") as general_stats:
general_stats.writelines("{}\n".format(",".join(log_entry)))
def dumpLog(self):
total_run_time = datetime.now() - self.total_start
log_path = os.path.join(JobLog.logs_directory, self.job_directory)
if os.path.exists(log_path) == False:
os.makedirs(log_path)
file_name = datetime.now().isoformat()
file_name = file_name.replace(":","-")
file_name = file_name.replace(".","-")
file_name += ".log"
file_path = os.path.join(log_path, file_name)
with open(file_path, "w") as log_output:
log_output.writelines("Job Type: {}\n".format(self.job_type.value))
log_output.writelines("Total Run Time: {} seconds\n".format(total_run_time.total_seconds()))
log_output.writelines("Job Info: \n")
for info in self.job_info:
log_output.writelines(" " + info + "\n")
log_output.writelines("Job Steps: \n")
for step in self.job_steps.keys():
if JobLog.step_start in self.job_steps[step].keys() and JobLog.step_end in self.job_steps[step].keys():
time_delt = self.job_steps[step][JobLog.step_end] - self.job_steps[step][JobLog.step_start]
log_output.writelines(" {} - {} seconds \n".format(step, time_delt.total_seconds()))
else:
log_output.writelines(" {} - {} \n".format(step, self.job_steps[step]))
self._dumpGeneral(file_path, total_run_time.total_seconds())
def createPickle(file_name):
'''
Create a dummy pickle file
'''
my_data = {"nothing" : "to see here"}
with open(file_name, 'wb') as model_file:
pickle.dump(my_data, model_file)
| [
"grecoe@microsoft.com"
] | grecoe@microsoft.com |
b8e53160b6e640f632a827ac6a40be6f7edb9e58 | 16450d59c820298f8803fd40a1ffa2dd5887e103 | /SWEA/2027_대각선출력.py | 33b1e9b766ac8201bf9d5378fb3649f247886e87 | [] | no_license | egyeasy/TIL_public | f78c11f81d159eedb420f5fa177c05d310c4a039 | e2f40eda09cb0a65cc064d9ba9b0e2fa7cbbcb38 | refs/heads/master | 2021-06-21T01:22:16.516777 | 2021-02-02T13:16:21 | 2021-02-02T13:16:21 | 167,803,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | """
주어진 텍스트를 그대로 출력하세요.
> 입력
> 출력
#++++
+#+++
++#++
+++#+
++++#
"""
print("""#++++
+#+++
++#++
+++#+
++++#""")
# 반성
# 1. multiline comment를 활용하는 방법
# 2. print()를 여러 방법으로 쓰는 법 | [
"dz1120@gmail.com"
] | dz1120@gmail.com |
c4f9837ca141aa95d0af984632f977212fccf8c7 | ab0315bcded75c10c591076b22ed8ff664ee76af | /fig4/config_scf_10mods_200213.py | 5a3c6829ca9d92207ab9ba95c94ca871a795916d | [] | no_license | mukamel-lab/BICCN-Mouse-MOp | 389f62492986a2ffe4278ed16f59fc17dc75b767 | 8058ab8ae827c6e019fff719903b0ba5b400931d | refs/heads/master | 2021-07-06T11:14:25.401628 | 2020-09-30T04:54:27 | 2020-09-30T04:54:27 | 189,758,115 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | #!/usr/bin/env python3
"""An example configuration file
"""
import sys
import os
# # Configs
name = 'mop_10mods_200213'
outdir = '/cndd/fangming/CEMBA/data/MOp_all/results'
output_pcX_all = outdir + '/pcX_all_{}.npy'.format(name)
output_cells_all = outdir + '/cells_all_{}.npy'.format(name)
output_imputed_data_format = outdir + '/imputed_data_{}_{{}}.npy'.format(name)
output_clst_and_umap = outdir + '/intg_summary_{}.tsv'.format(name)
output_figures = outdir + '/figures/{}_{{}}.{{}}'.format(name)
output_cluster_centroids = outdir + '/centroids_{}.pkl'.format(name)
DATA_DIR = '/cndd/fangming/CEMBA/data/MOp_all/data_freeze_l5pt'
# fixed dataset configs
sys.path.insert(0, DATA_DIR)
from __init__datasets import *
meta_f = os.path.join(DATA_DIR, '{0}_metadata.tsv')
hvftrs_f = os.path.join(DATA_DIR, '{0}_hvfeatures.{1}')
hvftrs_gene = os.path.join(DATA_DIR, '{0}_hvfeatures.gene')
hvftrs_cell = os.path.join(DATA_DIR, '{0}_hvfeatures.cell')
mods_selected = [
'snmcseq_gene',
'snatac_gene',
'smarter_cells',
'smarter_nuclei',
'10x_cells_v2',
'10x_cells_v3',
'10x_nuclei_v3',
'10x_nuclei_v3_macosko',
'merfish',
'epi_retro',
]
features_selected = ['epi_retro']
# check features
for features_modality in features_selected:
assert (features_modality in mods_selected)
# within modality
ps = {'mc': 0.9,
'atac': 0.1,
'rna': 0.7,
'merfish': 1,
}
drop_npcs = {
'mc': 0,
'atac': 0,
'rna': 0,
'merfish': 0,
}
# across modality
cross_mod_distance_measure = 'correlation' # cca
knn = 20
relaxation = 3
n_cca = 30
# PCA
npc = 50
# clustering
k = 30
resolutions = [0.1, 0.2, 0.4, 0.8]
# umap
umap_neighbors = 60
min_dist = 0.5
| [
"fmxie1993@gmail.com"
] | fmxie1993@gmail.com |
23be186718ed310752b58249fce51092af45e1c1 | 85f5dff291acf1fe7ab59ca574ea9f4f45c33e3b | /api/tacticalrmm/agents/migrations/0054_alter_agent_goarch.py | d2f26e1c4156e7ec3053a815a89ca418cf2919a9 | [
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sadnub/tacticalrmm | a4ecaf994abe39244a6d75ed2166222abb00d4f4 | 0af95aa9b1084973642da80e9b01a18dcacec74a | refs/heads/develop | 2023-08-30T16:48:33.504137 | 2023-04-10T22:57:44 | 2023-04-10T22:57:44 | 243,405,684 | 0 | 2 | MIT | 2020-09-08T13:03:30 | 2020-02-27T01:43:56 | Python | UTF-8 | Python | false | false | 498 | py | # Generated by Django 4.0.4 on 2022-06-06 04:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agents', '0053_remove_agenthistory_status'),
]
operations = [
migrations.AlterField(
model_name='agent',
name='goarch',
field=models.CharField(blank=True, choices=[('amd64', 'amd64'), ('386', '386'), ('arm64', 'arm64'), ('arm', 'arm')], max_length=255, null=True),
),
]
| [
"dcparsi@gmail.com"
] | dcparsi@gmail.com |
bf9079fb3d60a76e417d01ad38efd6a8b18c6bd4 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/count-number-of-distinct-integers-after-reverse-operations.py | 24dbdf067e7e0c6f952bd0ae88007568b737487c | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 707 | py | # Time: O(nlogr), r = max(nums)
# Space: O(n)
# hash table
class Solution(object):
def countDistinctIntegers(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def reverse(n):
result = 0
while n:
result = result*10 + n%10
n //= 10
return result
return len({y for x in nums for y in (x, reverse(x))})
# Time: O(nlogr), r = max(nums)
# Space: O(n)
# hash table
class Solution2(object):
def countDistinctIntegers(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return len({y for x in nums for y in (x, int(str(x)[::-1]))})
| [
"noreply@github.com"
] | kamyu104.noreply@github.com |
cb51b6fcc0d3bf4a8423b790d3e33d50c46cfa76 | 80907e3f9e998abc375afcc6e6546c88ee023252 | /badgepad/cmdline.py | 4fb20087f1865f5d46be5d1143852f084c2adcdb | [] | no_license | toolness/badgepad | 5ac1eb21bf426335e81cb9400f5180b6542dea43 | 2c1e221efca12054b843aef066798dd03f6f2533 | refs/heads/master | 2021-01-10T20:03:45.630275 | 2013-04-08T19:46:43 | 2013-04-08T19:46:43 | 9,220,546 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,051 | py | import os
import sys
import shutil
import argparse
from . import pkg_path
from .project import Project
from .build import build_website
from .server import start_auto_rebuild_server
def nice_dir(path, cwd=None):
if cwd is None:
cwd = os.getcwd()
path = os.path.realpath(path)
cwd = os.path.realpath(cwd)
rel = os.path.relpath(path, cwd)
if rel.startswith('..'):
return path
return rel
def fail(text):
log(text)
sys.exit(1)
def log(text):
sys.stdout.write(text + '\n')
def cmd_serve(project, args):
"""
Serve website.
"""
start_auto_rebuild_server(project.ROOT, ip=args.ip, port=args.port)
def cmd_build(project, args):
"""
Build website.
"""
if args.base_url:
project.set_base_url(args.base_url)
if not args.output_dir:
args.output_dir = project.path('dist')
build_website(project, dest_dir=args.output_dir)
log("Done. Static website is in '%s'." % nice_dir(args.output_dir))
def cmd_init(project, args):
"""
Initialize new project directory.
"""
if project.exists('config.yml'):
fail("Directory already contains a project.")
log("Generating config.yml.")
shutil.copy(pkg_path('samples', 'config.yml'), project.ROOT)
log("Creating empty directories.")
os.mkdir(project.path('assertions'))
os.mkdir(project.path('badges'))
os.mkdir(project.path('static'))
log("Creating default templates.")
shutil.copytree(pkg_path('samples', 'templates'), project.TEMPLATES_DIR)
log("Done.")
def cmd_newbadge(project, args):
"""
Create a new badge type.
"""
filename = project.path('badges', '%s.yml' % args.name)
if os.path.exists(filename):
fail("That badge already exists.")
shutil.copy(pkg_path('samples', 'badge.yml'), filename)
log("Created %s." % project.relpath(filename))
pngfile = project.relpath('badges', '%s.png' % args.name)
log("To give the badge an image, copy a PNG file to %s." % pngfile)
def cmd_issue(project, args):
"""
Issue a badge to a recipient.
"""
basename = '%s.%s' % (args.recipient, args.badge)
filename = project.path('assertions', '%s.yml' % basename)
if not args.badge in project.badges:
fail("Badge '%s' does not exist." % args.badge)
if args.recipient not in project.recipients:
fail("Recipient '%s' does not exist." % args.recipient)
if os.path.exists(filename):
fail("Badge already issued.")
shutil.copy(pkg_path('samples', 'assertion.yml'), filename)
log("Created %s." % project.relpath(filename))
def main(arglist=None):
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--root-dir', help='root project directory',
default='.')
subparsers = parser.add_subparsers()
serve = subparsers.add_parser('serve', help=cmd_serve.__doc__)
serve.add_argument('-i', '--ip', help='ip address',
default='127.0.0.1')
serve.add_argument('-p', '--port', help='port', type=int, default=8000)
serve.set_defaults(func=cmd_serve)
build = subparsers.add_parser('build', help=cmd_build.__doc__)
build.add_argument('-u', '--base-url', help='alternate base URL')
build.add_argument('-o', '--output-dir', help='output directory')
build.set_defaults(func=cmd_build)
init = subparsers.add_parser('init', help=cmd_init.__doc__)
init.set_defaults(func=cmd_init)
newbadge = subparsers.add_parser('newbadge', help=cmd_newbadge.__doc__)
newbadge.add_argument('name')
newbadge.set_defaults(func=cmd_newbadge)
issue = subparsers.add_parser('issue', help=cmd_issue.__doc__)
issue.add_argument('recipient')
issue.add_argument('badge')
issue.set_defaults(func=cmd_issue)
args = parser.parse_args(arglist)
project = Project(args.root_dir)
if args.func is not cmd_init:
if not project.exists('config.yml'):
fail('Directory does not contain a project.')
args.func(project, args)
| [
"varmaa@gmail.com"
] | varmaa@gmail.com |
e2ee762af542c4e17ef39d01c46d47c5bbc7d2ab | 3af9425f048876de388d2a5dc4f361132d03a387 | /algorithms/source/최단경로/1.Shortest path(1753_Dijkstra).py | d4b5deea16f72a032b83b6eec204466c755aa8db | [] | no_license | hwanginbeom/TIL | 6fab0d06db9cb9d78c03e3b3392dedcdaf799df6 | 933348f08e5bd58527dcb3732c092a83581e471b | refs/heads/master | 2021-08-15T06:15:21.452951 | 2021-08-13T14:56:09 | 2021-08-13T14:56:09 | 146,391,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | '''
문제
방향그래프가 주어지면 주어진 시작점에서 다른 모든 정점으로의 최단 경로를 구하는 프로그램을 작성하시오.
단, 모든 간선의 가중치는 10 이하의 자연수이다.
입력
첫째 줄에 정점의 개수 V와 간선의 개수 E가 주어진다. (1≤V≤20,000, 1≤E≤300,000) 모든 정점에는 1부터 V까지 번호가
매겨져 있다고 가정한다. 둘째 줄에는 시작 정점의 번호 K(1≤K≤V)가 주어진다. 셋째 줄부터 E개의 줄에 걸쳐 각 간선을
나타내는 세 개의 정수 (u, v, w)가 순서대로 주어진다. 이는 u에서 v로 가는 가중치 w인 간선이 존재한다는 뜻이다. u와 v는
서로 다르며 w는 10 이하의 자연수이다. 서로 다른 두 정점 사이에 여러 개의 간선이 존재할 수도 있음에 유의한다.
출력
첫째 줄부터 V개의 줄에 걸쳐, i번째 줄에 i번 정점으로의 최단 경로의 경로값을 출력한다. 시작점 자신은 0으로 출력하고,
경로가 존재하지 않는 경우에는 INF를 출력하면 된다.
'''
import sys
import heapq
input = sys.stdin.readline
# 노드의 개수, 간선의 개수를 입력받기
node, route = map(int, input().split())
# node, route = 5, 6
INF = int(1e9) # 무한을 의미하는 값으로 10억을 설정
distance = [int(1e9)] * (node + 1)
start = int(input())
# 각 노드에 연결되어 있는 노드에 대한 정보를 담는 리스트를 만들기
graph = [[]for i in range(node + 1)]
# 모든 간선 정보를 입력 받기
for _ in range(route):
a, b, c = map(int, input().split())
# a번 노드에서 b번 노드로 가는 비용이 c라는 의미
graph[a].append((b, c))
def dijkstra(start):
q = []
# 시작 노드에 대해서 초기화
heapq.heappush(q,(0,start))
distance[start] = 0
while q:
dist, now = heapq.heappop(q)
if distance[now] < dist:
continue
for i in graph[now]:
cost = dist + i[1]
if cost < distance[i[0]]:
distance[i[0]] = cost
heapq.heappush(q, (cost, i[0]))
dijkstra(start)
for i in range(1,node+1):
if distance[i] ==1000000000:
print('INF')
continue
print(distance[i]) | [
"rydn2002@gmail.com"
] | rydn2002@gmail.com |
f3bbe6e34b15c175b28daa543e9a87e025c79843 | b647129cb448b4991059dcfb44d8279b4c8f18dd | /pyEX/commodities/commodities.py | cfcfbb6c68aaaf35f81c2bbf3438af9571a42525 | [
"Apache-2.0"
] | permissive | jmailloux/pyEX | 433a9aeab3429edb5af1c2f18dc533011ab15c92 | 2101e8c53a9080ea8b00b28a758be441095d5048 | refs/heads/main | 2023-03-24T06:36:43.544611 | 2021-03-17T03:30:40 | 2021-03-17T03:30:40 | 345,503,430 | 0 | 0 | Apache-2.0 | 2021-03-08T02:06:50 | 2021-03-08T02:06:49 | null | UTF-8 | Python | false | false | 2,944 | py | # *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from enum import Enum
from functools import lru_cache
from ..points import points
class CommoditiesPoints(Enum):
"""Commodities data points
https://iexcloud.io/docs/api/#commodities
Attributes:
WTI; Crude oil West Texas Intermediate - in dollars per barrel, not seasonally adjusted
BRENT; Crude oil Brent Europe - in dollars per barrel, not seasonally adjusted
NATGAS; Henry Hub Natural Gas Spot Price - in dollars per million BTU, not seasonally adjusted
HEATOIL; No. 2 Heating Oil New York Harbor - in dollars per gallon, not seasonally adjusted
JET; Kerosense Type Jet Fuel US Gulf Coast - in dollars per gallon, not seasonally adjusted
DIESEL; US Diesel Sales Price - in dollars per gallon, not seasonally adjusted
GASREG; US Regular Conventional Gas Price - in dollars per gallon, not seasonally adjusted
GASMID; US Midgrade Conventional Gas Price - in dollars per gallon, not seasonally adjusted
GASPRM; US Premium Conventional Gas Price - in dollars per gallon, not seasonally adjusted
PROPANE; Propane Prices Mont Belvieu Texas - in dollars per gallon, not seasonally adjusted
"""
WTI = "DCOILWTICO"
BRENT = "DCOILBRENTEU"
NATGAS = "DHHNGSP"
HEATOIL = "DHOILNYH"
JET = "DJFUELUSGULF"
DIESEL = "GASDESW"
GASREG = "GASREGCOVW"
GASMID = "GASMIDCOVW"
GASPRM = "GASPRMCOVW"
PROPANE = "DPROPANEMBTX"
@staticmethod
@lru_cache(1)
def options():
"""Return a list of the available commodities points options"""
return list(map(lambda c: c.value, CommoditiesPoints))
def wti(token="", version="stable"):
return points("DCOILWTICO", token=token, version=version)
def brent(token="", version="stable"):
return points("DCOILBRENTEU", token=token, version=version)
def natgas(token="", version="stable"):
return points("DHHNGSP", token=token, version=version)
def heatoil(token="", version="stable"):
return points("DHOILNYH", token=token, version=version)
def jet(token="", version="stable"):
return points("DJFUELUSGULF", token=token, version=version)
def diesel(token="", version="stable"):
return points("GASDESW", token=token, version=version)
def gasreg(token="", version="stable"):
return points("GASREGCOVW", token=token, version=version)
def gasmid(token="", version="stable"):
return points("GASMIDCOVW", token=token, version=version)
def gasprm(token="", version="stable"):
return points("GASPRMCOVW", token=token, version=version)
def propane(token="", version="stable"):
return points("DPROPANEMBTX", token=token, version=version)
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
19b3d339d591f72252203b511ed7e437a59647f3 | 5955ea34fd72c719f3cb78fbb3c7e802a2d9109a | /ITERATOR_GENERATOR/ITERATOR/Sample/factorial_iterated.py | cbbabe042f1a81d9ed66d5bdd9dd2ac8668ab5d6 | [] | no_license | AndreySperansky/TUITION | 3c90ac45f11c70dce04008adc1e9f9faad840b90 | 583d3a760d1f622689f6f4f482c905b065d6c732 | refs/heads/master | 2022-12-21T21:48:21.936988 | 2020-09-28T23:18:40 | 2020-09-28T23:18:40 | 299,452,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | """Итеральное исчисление факториала работает быстрее рекурсивного"""
def iterative_factorial(n):
if n == 0 or n==1:
res = 1
return res
else:
res = 1
for i in range(2, n+1):
res = res * i
return res
num = abs(int(input("Введите целое число: ")))
print("Факториал числа %d равен: " % num, iterative_factorial(num)) | [
"andrey.speransky@gmail.com"
] | andrey.speransky@gmail.com |
917574d4ba87f4c467e7867dc9e6650e93fc9a21 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2164/60606/306710.py | 97804c69d658036e62e5f0f4c83a491771e81aae | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | test_num = int(input())
s = input()
if s=="aaaab":
print(3)
print(7)
elif test_num==3 and s=="aab":
print(1)
print(8)
print(0)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
caf2fecb194e07ce6842ae03cc70ae308b64a77b | 6444622ad4a150993955a0c8fe260bae1af7f8ce | /djangoenv/lib/python2.7/site-packages/django/contrib/redirects/middleware.py | 88bdfe488ab088fde0477ae06a0433d5a7196195 | [] | no_license | jeremyrich/Lesson_RestAPI_jeremy | ca965ef017c53f919c0bf97a4a23841818e246f9 | a44263e45b1cc1ba812059f6984c0f5be25cd234 | refs/heads/master | 2020-04-25T23:13:47.237188 | 2019-03-22T09:26:58 | 2019-03-22T09:26:58 | 173,138,073 | 0 | 0 | null | 2019-03-22T09:26:59 | 2019-02-28T15:34:19 | Python | UTF-8 | Python | false | false | 1,961 | py | from __future__ import unicode_literals
from django import http
from django.apps import apps
from django.conf import settings
from django.contrib.redirects.models import Redirect
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import MiddlewareMixin
class RedirectFallbackMiddleware(MiddlewareMixin):
# Defined as class-level attributes to be subclassing-friendly.
response_gone_class = http.HttpResponseGone
response_redirect_class = http.HttpResponsePermanentRedirect
def __init__(self, get_response=None):
if not apps.is_installed("django.contrib.sites"):
raise ImproperlyConfigured(
"You cannot use RedirectFallbackMiddleware when "
"django.contrib.sites is not installed."
)
super(RedirectFallbackMiddleware, self).__init__(get_response)
def process_response(self, request, response):
# No need to check for a redirect for non-404 responses.
if response.status_code != 404:
return response
full_path = request.get_full_path()
current_site = get_current_site(request)
r = None
try:
r = Redirect.objects.get(site=current_site, old_path=full_path)
except Redirect.DoesNotExist:
pass
if r is None and settings.APPEND_SLASH and not request.path.endswith("/"):
try:
r = Redirect.objects.get(
site=current_site,
old_path=request.get_full_path(force_append_slash=True),
)
except Redirect.DoesNotExist:
pass
if r is not None:
if r.new_path == "":
return self.response_gone_class()
return self.response_redirect_class(r.new_path)
# No redirect was found. Return the response.
return response
| [
"jeremyrich@free.fr"
] | jeremyrich@free.fr |
2575fcfda27f77fbd034558e815ed42659a70e22 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1711+564/sdB_pg_1711+564_coadd.py | a317f3971b126e1513af2c8cbe3cd9d5030ee583 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[258.155417,56.418942], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_1711+564/sdB_pg_1711+564_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_1711+564/sdB_pg_1711+564_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
b5653a6f9699da39d8c2d60fdac5941697f1abbc | afb2bdf8044e4c9ff09b1b8379efbc17867d8cc0 | /2parts/challenge/challenge2.py | cf8359ef6d86b17dfe0a1b0bb22f142f4a785437 | [] | no_license | ChenFu0420/leranpython | b2e364ff8d6730a3eb768b76f0369faa3367dfa2 | 52d0aa614d7fab19e17bbb696330a0330d3862b6 | refs/heads/master | 2020-05-29T19:46:24.020046 | 2019-09-25T09:17:10 | 2019-09-25T09:17:10 | 189,339,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | num1 = int(input("输入一个数"))
num2 = int(input("再输一个数"))
#两数之和
print("两个数的和是:", num1 + num2)
#两数之差
print("两个数的差是:", num1 - num2)
#两数乘积
print("两数乘积是:", num1 * num2) | [
"hiziying@hotmail.com"
] | hiziying@hotmail.com |
f6799deed3adf5955c953244b8e21ad2a510e6ff | 48b67d5a7149376b5949f12641fa14cb8404a359 | /accounts/migrations/0005_auto_20181018_1754.py | b8183ba6fad46552feeae9eae696552ad9b89ceb | [] | no_license | mishaukr7/simple_blog | 7f962dce438b9bab03b0ddabfc1ce47d57e9cb5b | c00aba56afe4caad77dfa5f058e3ab8e1e8919b1 | refs/heads/master | 2020-04-01T23:21:28.176890 | 2018-10-19T08:58:14 | 2018-10-19T08:58:14 | 153,754,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # Generated by Django 2.1.2 on 2018-10-18 17:54
from django.db import migrations
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20181018_1712'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='city',
field=smart_selects.db_fields.ChainedForeignKey(blank=True, chained_field='country', chained_model_field='country', null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.City'),
),
]
| [
"mishaukr22@gmail.com"
] | mishaukr22@gmail.com |
629c520da224ca08d1b463ba82b88210faa9c090 | 7248b86a0a882badb20f83be57748fae89311c7d | /case01/migrations/0001_initial.py | b1c221b705f2a710eea22d9f679f6ea50702fa6f | [] | no_license | daiyeyue/daiyeDRF | 2164ae4e6d611f577d1fac9e84dd8fcd83b3f909 | 884f0dcf4bbedf2c17842d7dc05dc3603cc95877 | refs/heads/master | 2020-12-03T12:48:02.551331 | 2020-01-02T06:38:12 | 2020-01-02T06:38:12 | 231,322,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ClassRoom',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('roomName', models.CharField(max_length=20)),
('loc', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('name', models.CharField(max_length=5)),
('age', models.IntegerField()),
],
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('course', models.CharField(max_length=20)),
('name', models.CharField(max_length=5)),
('age', models.IntegerField()),
],
),
]
| [
"150064328@qq.com"
] | 150064328@qq.com |
51605ef74b4cba3582dbd4a581b09e1dbf06ec52 | d491c11dc87a955c95a4e14a2feea19fe1fa859e | /python/Arcade/Python/P32WordPower.py | 8e59958099ab4e818bb696c758d23213474a33a7 | [] | no_license | Vagacoder/Codesignal | 0f6ea791b25716cad7c46ab7df73679fb18a9882 | 87eaf44555603dd5b8cf221fbcbae5421ae20727 | refs/heads/master | 2023-07-16T04:18:44.780821 | 2021-08-15T18:41:16 | 2021-08-15T18:41:16 | 294,745,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | #
# * Python 32, Word Power
# * Easy
# * You've heard somewhere that a word is more powerful than an action. You decided
# * to put this statement at a test by assigning a power value to each action and
# * each word. To begin somewhere, you defined a power of a word as the sum of
# * powers of its characters, where power of a character is equal to its 1-based
# * index in the plaintext alphabet.
# * Given a word, calculate its power.
# * Example
# For word = "hello", the output should be
# wordPower(word) = 52.
# Letters 'h', 'e', 'l' and 'o' have powers 8, 5, 12 and 15, respectively. Thus, the total power of the word is 8 + 5 + 12 + 12 + 15 = 52.
# * Input/Output
# [execution time limit] 4 seconds (py3)
# [input] string word
# A string consisting of lowercase English letters.
# Guaranteed constraints:
# 1 ≤ word.length ≤ 25.
# [output] integer
# Power of the given word.
#%%
# * Solution 1
import string
def wordPower(word):
num = {v:(i+1) for i, v in enumerate(string.ascii_lowercase)}
return sum([num[ch] for ch in word])
a1 = 'hello'
r1 = wordPower(a1)
print(r1)
# %%
| [
"qiruihu@gmail.com"
] | qiruihu@gmail.com |
6bf9fe564297c104e2d4a8e7f00109a5ee57bd30 | 14ff5ca733ce92c14dd347e32c7ad262026c48cf | /typeshed/rdflib/exceptions.pyi | 4066f6854d2ffd93b18fa4bd16a05f92764610bd | [
"Apache-2.0"
] | permissive | common-workflow-language/cwlprov-py | 6040bd1ea18fb58909bba9874f65e4edcc4ecd92 | 9b719c687484d3f888eb5f807ec3270e9081078a | refs/heads/main | 2023-08-17T06:03:39.274209 | 2022-07-19T18:09:15 | 2022-07-19T18:21:13 | 148,144,870 | 1 | 2 | Apache-2.0 | 2023-08-02T18:35:35 | 2018-09-10T11:27:31 | Python | UTF-8 | Python | false | false | 761 | pyi | from typing import Any
class Error(Exception):
msg: Any
def __init__(self, msg: Any | None = ...) -> None: ...
class TypeCheckError(Error):
type: Any
node: Any
def __init__(self, node) -> None: ...
class SubjectTypeError(TypeCheckError):
msg: Any
def __init__(self, node) -> None: ...
class PredicateTypeError(TypeCheckError):
msg: Any
def __init__(self, node) -> None: ...
class ObjectTypeError(TypeCheckError):
msg: Any
def __init__(self, node) -> None: ...
class ContextTypeError(TypeCheckError):
msg: Any
def __init__(self, node) -> None: ...
class ParserError(Error):
msg: Any
def __init__(self, msg) -> None: ...
class UniquenessError(Error):
def __init__(self, values) -> None: ...
| [
"1330696+mr-c@users.noreply.github.com"
] | 1330696+mr-c@users.noreply.github.com |
f3abfbb514fdc985c0c51949272f13c43a7c3730 | b799c3e1fe5a50b2babcfb2960af210dec434f49 | /354.russian-doll-envelopes.py | facf0e6c6903adc2cda1b3f18cbef9f6574cf968 | [] | no_license | Joecth/leetcode_3rd_vscode | 4619ef80632dec83cbcbcd090f74e043f436cc75 | 3c0943ee9b373e4297aa43a4813f0033c284a5b2 | refs/heads/master | 2022-12-02T19:30:34.572339 | 2020-08-18T15:21:15 | 2020-08-18T15:21:15 | 255,601,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,605 | py | #
# @lc app=leetcode id=354 lang=python3
#
# [354] Russian Doll Envelopes
#
# https://leetcode.com/problems/russian-doll-envelopes/description/
#
# algorithms
# Hard (34.98%)
# Likes: 981
# Dislikes: 37
# Total Accepted: 62.8K
# Total Submissions: 178.5K
# Testcase Example: '[[5,4],[6,4],[6,7],[2,3]]'
#
# You have a number of envelopes with widths and heights given as a pair of
# integers (w, h). One envelope can fit into another if and only if both the
# width and height of one envelope is greater than the width and height of the
# other envelope.
#
# What is the maximum number of envelopes can you Russian doll? (put one inside
# other)
#
# Note:
# Rotation is not allowed.
#
# Example:
#
#
#
# Input: [[5,4],[6,4],[6,7],[2,3]]
# Output: 3
# Explanation: The maximum number of envelopes you can Russian doll is 3 ([2,3]
# => [5,4] => [6,7]).
#
#
#
#
# @lc code=start
class Solution:
def maxEnvelopes(self, envelopes: List[List[int]]) -> int:
if not envelopes: return 0
elif len(envelopes) == 1: return 1
arr = sorted(envelopes, key=lambda envelope: (envelope[0], -envelope[1]))
# return self.helper_FAILED(arr)
# return self.O_NxN(arr)
return self.O_NxlogN(arr)
def O_NxlogN(self, envelopes):
dp = []
for i in range(len(envelopes)):
if not dp:
dp.append(envelopes[i][1])
continue
# if envelopes[i][0] > dp[-1] and envelopes[i][1] > dp[-1][1]:
if envelopes[i][1] > dp[-1]:
dp.append(envelopes[i][1])
else:
target = envelopes[i][1]
start, end = 0, len(dp)
# To find elem >= envelopes[i][1]
while start + 1 < end:
mid = start + (end-start)//2
if dp[mid] >= target:
end = mid
else:
start = mid
if dp[start] >= target:
dp[start] = target
elif dp[end] >= target:
dp[end] = target
# print(dp)
return len(dp)
def O_NxN(self, envelopes):
dp = []
for i in range(len(envelopes)):
if not dp:
dp.append(envelopes[i][1])
continue
# if envelopes[i][0] > dp[-1] and envelopes[i][1] > dp[-1][1]:
if envelopes[i][1] > dp[-1]:
dp.append(envelopes[i][1])
else:
for j in range(len(dp)):
if envelopes[i][1] <= dp[j]:
dp[j] = envelopes[i][1]
break
# print(dp)
return len(dp)
def helper_FAILED(self, envelopes):
dp = []
for envelope in envelopes:
if not dp:
dp.append(envelope)
continue
if envelope[0] > dp[-1][0] and envelope[1] > dp[-1][1] :
dp.append(envelope)
else:
for j in range(len(dp)):
# NO USE
# if envelope[0] <= dp[j][0] and envelope[1] > dp[j][1]:
# break
# elif envelope[0] > dp[j][0] and envelope[1] <= dp[j][1]:
# break
if envelope[0] <= dp[j][0] and envelope[1] <= dp[j][1]:
dp[j] = envelope
break
print(dp)
return len(dp)
# @lc code=end
| [
"huangjiancong@aidigger.com"
] | huangjiancong@aidigger.com |
9a042505df211682de5afa3eb04441642762d7ba | e402a0fbde47acb8903304a0fef11ec1de83b01f | /SecretColors/data/palettes/__init__.py | 0424d070c31874e136fbe0f9ba306d437a31b594 | [
"MIT"
] | permissive | Robinsondssantos/SecretColors | ad254a872d7bcc4ef1ac1914355d2f5c7ec73357 | eb19b8a1805eba812032b450d644aa8fc5c257e5 | refs/heads/master | 2023-01-25T00:34:25.849346 | 2020-12-06T11:35:45 | 2020-12-06T11:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # Copyright (c) SecretBiology 2020.
#
# Library Name: SecretColors
# Author: Rohit Suratekar
# Website: https://github.com/secretBiology/SecretColors
#
# Most of these palettes are derived from various design systems. Few
# examples of such design systems can be found on following URL
# https://designsystemsrepo.com/design-systems
from SecretColors.data.palettes.parent import ParentPalette
from SecretColors.data.palettes.ibm import IBMPalette
from SecretColors.data.palettes.material import MaterialPalette
from SecretColors.data.palettes.clarity import ClarityPalette
from SecretColors.data.palettes.brewer import ColorBrewer
from SecretColors.data.palettes.tableau import TableauPalette
| [
"rohitsuratekar@gmail.com"
] | rohitsuratekar@gmail.com |
4973daa5f7033237dd93efc217a2a9c1532a74b5 | 90914b7d84d69a86652e69d1ad72888af363367b | /production_work_timesheet/timesheet.py | a4ebbc6d37d4d6bfb0c7cae074624939a479354c | [] | no_license | emperadorxp1/TrytonModules | 754a3ca92c0ac7b2db9165208b1bc5fda5fe4a73 | 33ef61752e1c5f490e7ed4ee8a3f0cff63a8fc89 | refs/heads/master | 2020-12-19T18:41:05.260174 | 2020-01-23T15:32:57 | 2020-01-23T15:32:57 | 235,815,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.pool import PoolMeta, Pool
__all__ = ['TimesheetWork']
class TimesheetWork(metaclass=PoolMeta):
__name__ = 'timesheet.work'
@classmethod
def _get_origin(cls):
return super(TimesheetWork, cls)._get_origin() + ['production.work']
def _validate_company(self):
pool = Pool()
ProductionWork = pool.get('production.work')
result = super(TimesheetWork, self)._validate_company()
if isinstance(self.origin, ProductionWork):
result &= self.company == self.origin.company
return result
| [
"joxua.1995@gmail.com"
] | joxua.1995@gmail.com |
0c8f151455b44f75723bef94d18ab3bf6b15805f | 97bf1824e9b299ae0c9b99dc1bcf83db321b20a5 | /secondProject/blog/models.py | 59f871477b1d2416ee8921cee812eabb7f5807ae | [] | no_license | shinhaeran/likelion_class | f2ed68f245e25a89313834876f001c4b35f5ffaa | 72c2d53cfedccde0062f46816449415131b2c332 | refs/heads/master | 2020-04-25T21:59:56.891042 | 2019-05-26T08:56:48 | 2019-05-26T08:56:48 | 173,097,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from django.db import models
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
body = models.TextField()
def __str__(self):
return self.title
def summary(self):
return self.body[:100] #100글자 까지만 보여조 | [
"haeran97@naver.com"
] | haeran97@naver.com |
f52b1ef6a6b56db0523211d934578ec0ef2a07e4 | 0377a4135f9e8940809a62186b229295bed9e9bc | /剑指offer/new2/判断一棵树是否为另一棵树的子结构.py | c14465c97106d88afb892a94b084094c2611b4b3 | [] | no_license | neko-niko/leetcode | 80f54a8ffa799cb026a7f60296de26d59a0826b0 | 311f19641d890772cc78d5aad9d4162dedfc20a0 | refs/heads/master | 2021-07-10T10:24:57.284226 | 2020-09-13T11:28:45 | 2020-09-13T11:28:45 | 198,792,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def HasSubtree(self, pRoot1, pRoot2):
# write code here
if pRoot1 == None or pRoot2 == None:
return False
else:
return self.panduan(pRoot1, pRoot2) or self.HasSubtree(pRoot1.left, pRoot2) or self.HasSubtree(pRoot1.right,
pRoot2)
def panduan(self, p1, p2):
if not p2:
return True
else:
if not p1 or p1.val != p2.val:
return False
return self.panduan(p1.right, p2.right) and self.panduan(p1.left, p2.left)
if __name__ == '__main__':
node1 = TreeNode(1)
node2 = TreeNode(2)
node1.left = node2
test1 = node1
test2 = node2
test1 = test1.left
print(test1 == test2)
| [
"2361253285@qq.com"
] | 2361253285@qq.com |
321182ac1c784cfc94356d065684340d14c0b1a1 | 3073677476a918720fb24a13961d6e9f5143627b | /console.py | dcd93ec5ed4ac74a56b3bf6c3dc042853d32cbe2 | [] | no_license | nemec/audibleMPD | 960fe2c358ac875936ceb23c1c7b19d74940012a | d214ac44e2411583db3f6cab835138747b6df6b1 | refs/heads/master | 2021-01-01T05:40:25.894785 | 2011-01-24T23:48:52 | 2011-01-24T23:48:52 | 983,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,193 | py | from os import getuid
import gobject
from input_header import input_header
if getuid() != 0:
raise ImportError, "Must be root to get keypresses."
from struct import unpack
# Code 0 = Mouse x axis, val = mouse accel (with +/-)
# Code 1 = Mouse y axis, val = mouse accel (with +/-)
KEY_PRESS = 1
KEY_RELEASE = 2
class keyevent(object):
def __init__(self, t_sec, t_usec, typ, code, val):
self.ih = input_header()
self.seconds = t_sec
self.microseconds = t_usec
self.type = typ
self.code = code
self.value = val
def __str__(self):
return "[%s.%s] type %s, code %s, value %s" % (self.seconds,
self.microseconds, self.type, self.code, self.value)
class reader(gobject.GObject):
DEFAULT_EVENT_PATH = "/dev/input/event4"
__gsignals__ = {
"mouse_abs": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, )),
"mouse_rel": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, )),
"key_down": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, )),
"key_up": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, )),
"all": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, )),
}
def __init__(self, eventpath = DEFAULT_EVENT_PATH, keywatches = None):
gobject.GObject.__init__(self)
self.eventpath = eventpath
self.trap_repeats = False
self.port = open(self.eventpath,"rb")
def trap_repeat(self, on):
self.trap_repeats = on
def emit_events(self, ev):
if ev.type == ih.EV_ABS:
if ev.code == ih.ABS_X:
self.emit("mouse_abs", (ev.value, 0))
elif ev.code == ih.ABS_Y:
self.emit("mouse_abs", (0, ev.value))
elif ev.type == ih.EV_REL:
if ev.code == ih.REL_X:
self.emit("mouse_rel", (ev.value, 0))
elif ev.code == ih.REL_Y:
self.emit("mouse_rel", (0, ev.value))
elif ev.type == ih.EV_KEY:
if ev.value == 0:
self.emit("key_up", ev)
elif ev.value == 1:
self.emit("key_down", ev)
elif ev.value == 2 and self.trap_repeats:
self.emit("key_down", ev)
self.emit("all", ev)
def readkeyevent(self, emit = False):
ev = keyevent(*unpack("2I2Hi",self.port.read(16)))
if emit:
self.emit_events(ev)
return ev
def readkey(self):
while True:
code, val = self.readkeyevent()
if val > 0:
#lockcode = code
#while True:
# code, val = self.readkeyevent()
# if code == lockcode and val != 1: # returns on key-release or key-hold
# return code
# 0 is generated by a repeat keypress
if code == 0:
return self.last_key
else:
self.last_key = code
return code
def run(self):
while 1:
self.readkeyevent(True)
if __name__ == "__main__":
gobject.threads_init()
from input_header import input_header
ih = input_header()
r = reader("/dev/input/event9")
def print_event(obj, ev):
print ev
r.connect("key_down", print_event)
r.trap_repeat(True)
r.run()
| [
"djnemec@gmail.com"
] | djnemec@gmail.com |
e700a24a2a79345362880d9c61b0b979299289a8 | 086ff58e13978ef5fa771ffc44c3b002cfcf18cb | /froide/publicbody/widgets.py | 0e89162ad29d102f0a13c6e53207916a598ada39 | [
"MIT"
] | permissive | jdieg0/froide | 70b0de85eff09886919a838fe46b776467824dfb | 44a5d7e65b1678e0031e2cf01687c8834b2517e2 | refs/heads/master | 2020-04-27T22:51:45.343233 | 2019-03-09T16:46:34 | 2019-03-09T16:46:34 | 174,752,276 | 0 | 0 | null | 2019-03-09T22:19:20 | 2019-03-09T22:19:20 | null | UTF-8 | Python | false | false | 2,348 | py | import json
from django import forms
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.templatetags.static import static
from froide.helper.content_urls import get_content_url
from .models import PublicBody
def get_widget_context():
return {
'url': {
'searchPublicBody': reverse('api:publicbody-search'),
'listLaws': reverse('api:law-list'),
'getPublicBody': reverse('api:publicbody-detail', kwargs={'pk': '0'}),
'helpAbout': get_content_url('about')
},
'i18n': {
'missingPublicBody': _('Are we missing a public body?'),
'publicBodySearchPlaceholder': _('Ministry of...'),
'search': _('Search'),
'examples': _('Examples:'),
'environment': _('Environment'),
'ministryOfLabour': _('Ministry of Labour'),
'or': _('or'),
'noPublicBodiesFound': _('No Public Bodies found for this query.'),
'letUsKnow': _('Please let us know!'),
},
'resources': {
'spinner': static('img/spinner.gif')
}
}
class PublicBodySelect(forms.Widget):
input_type = "text"
template_name = 'publicbody/_chooser.html'
initial_search = None
class Media:
extend = False
js = ('js/publicbody.js',)
def set_initial_search(self, search):
self.initial_search = search
def get_context(self, name, value=None, attrs=None):
pb, pb_desc = None, None
if value is not None:
try:
pb = PublicBody.objects.get(pk=int(value))
pb_desc = pb.get_label()
except (ValueError, PublicBody.DoesNotExist):
pass
context = super(PublicBodySelect, self).get_context(name, value, attrs)
context['widget'].update({
'value_label': pb_desc,
'search': self.initial_search,
'publicbody': pb,
'json': json.dumps({
'fields': {
name: {
'value': value,
'objects': [pb.as_data()] if pb is not None else None
}
}
})
})
context['config'] = json.dumps(get_widget_context())
return context
| [
"mail@stefanwehrmeyer.com"
] | mail@stefanwehrmeyer.com |
fdb1438c63169f6ae42534f8c356819b6ced8614 | 3358f6fbfa39d4429f2a9fa3ba5416285fab5793 | /第3章 Django/第3章 Django/3、Django中的视图/kaige/project/myApp/models.py | 7128dbac959f269df33a0dddfa674349e6780c90 | [] | no_license | kmxz2016/PycharmProjects | 8ab79cd5ef87bba2a1af0fe9f035f87a18621407 | 631a792eb9b5f4121dc08849dded10c290ac2401 | refs/heads/master | 2020-03-13T18:21:11.364219 | 2018-05-16T10:06:42 | 2018-05-16T10:06:42 | 131,234,054 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | from django.db import models
# Create your models here.
class Grades(models.Model):
gname = models.CharField(max_length=20)
gdate = models.DateTimeField()
ggirlnum = models.IntegerField()
gboynum = models.IntegerField()
isDelete = models.BooleanField(default=False)
def __str__(self):
return self.gname
class Students(models.Model):
sname = models.CharField(max_length=20)
sgender = models.BooleanField(default=True)
sage = models.IntegerField()
scontend = models.CharField(max_length=20)
isDelete = models.BooleanField(default=False)
# 关联外键
sgrade = models.ForeignKey("Grades")
def __str__(self):
return self.sname
| [
"601985329@qq.com"
] | 601985329@qq.com |
055a0a056e4c316e4fb5425388900dc3ea5d47ba | 47a3a59288792f654309bfc9ceb6cbfa890720ef | /ramda/omit_test.py | 11fd6af8b845020962f91cbac8aa37846ca872a5 | [
"MIT"
] | permissive | jakobkolb/ramda.py | 9531d32b9036908df09107d2cc19c04bf9544564 | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | refs/heads/master | 2023-06-23T00:46:24.347144 | 2021-02-01T16:47:51 | 2021-02-01T16:48:25 | 388,051,418 | 0 | 0 | MIT | 2021-07-21T16:31:45 | 2021-07-21T08:40:22 | null | UTF-8 | Python | false | false | 352 | py | from .omit import omit
from ramda.private.asserts import assert_dicts_equal
def pick_nocurry_test():
assert_dicts_equal(
omit(["a", "d"], {"a": 1, "b": 2, "c": 3, "d": 4}), {"b": 2, "c": 3}
)
assert_dicts_equal(
omit(["there_is_no_key"], {"a": 1, "b": 2, "c": 3, "d": 4}),
{"a": 1, "b": 2, "c": 3, "d": 4},
)
| [
"slava.ganzin@gmail.com"
] | slava.ganzin@gmail.com |
12b2601062b88a0764fe4605d06bd9c8dd80a3cc | 1cb49fbe0e99d30869cd30e0ccec2c9477849374 | /Face Detection/face_classifier/backup-17-May-2018/face_classifier.py | cf14b6b276b159ee1e6eeaa77a929a30eec826c7 | [] | no_license | wuyx/Machine-Learning | ea0f5417669243a8d85cd8a5132dcffdc8e069e8 | 42a7c843289cc22592ad8e8331367f973ab0b7e4 | refs/heads/master | 2022-12-19T02:43:28.961837 | 2020-08-17T03:05:01 | 2020-08-17T03:05:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | import platform
print("Platform is", platform.system())
if platform.system() == "Darwin":
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
else:
import matplotlib.pyplot as plt
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.contrib.layers import flatten
from VGG16 import VGG16
from data_wrangling import PickleHelper
class Face_Classifier_With_Tensorflow(object):
def __init__(self):
self.__path = None
def imshow(self, img):
plt.imshow(img[...,::-1], cmap="gray")
plt.show()
def face_classifier_did_loaded():
fc = Face_Classifier_With_Tensorflow()
img_cv = PickleHelper.load_pickle("../../../Data/Face/", "faces-obj-32x32-features-norm.pkl")
img_label = PickleHelper.load_pickle("../../../Data/Face/", "faces-obj-32x32-labels-norm.pkl")
print("\nFEATURE SHAPE: {0}, LABEL SHAPE: {1}\n".format(np.shape(img_cv), np.shape(img_label)))
'''
# JUST FOR TEST
np.random.seed(32)
img_label = np.random.randint(2, size=len(img_label))
print(img_label)
#fc.imshow(img_cv[0])
'''
'''
test_idx = 2
print("Label: {0} = < {1} | {2} >".format(img_label[test_idx], np.max(img_cv[test_idx]), np.min(img_cv[test_idx])))
#print(img_cv[test_idx][:30, :30])
fc.imshow(img_cv[test_idx])
'''
vgg16 = VGG16(img_cv[:100], img_label[:100])
vgg16.run_architecture()
| [
"liprin1129@gmail.com"
] | liprin1129@gmail.com |
9ba8cf12520d5f34fba58ef8cbe065bfe5b8a94b | 4b24e80dc274353ff9bd5a8fe62c50cc57444aeb | /fmcapi/api_objects/object_services/urls.py | 34c62846acb127a348bd1fd0054e7c645510516f | [
"BSD-3-Clause"
] | permissive | realvitya/fmcapi | ba4dede492238890fdfec1c3fd447906ffeab706 | 38fb9a8da2d9ebbadf4ed4cb559244646b3bf3be | refs/heads/master | 2020-09-13T23:14:53.177902 | 2019-12-09T03:17:37 | 2019-12-09T03:17:37 | 222,935,028 | 0 | 0 | BSD-3-Clause | 2019-11-20T12:42:42 | 2019-11-20T12:42:41 | null | UTF-8 | Python | false | false | 775 | py | from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
import warnings
class URLs(APIClassTemplate):
"""
The URLs Object in the FMC.
"""
VALID_JSON_DATA = ["id", "name", "url", "description"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/urls"
REQUIRED_FOR_POST = ["name", "url"]
def __init__(self, fmc, **kwargs):
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for URLs class.")
self.parse_kwargs(**kwargs)
class URL(URLs):
"""Dispose of this Class after 20210101."""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn("Deprecated: URL() should be called via URLs().")
super().__init__(fmc, **kwargs)
| [
"dmickels@cisco.com"
] | dmickels@cisco.com |
f811b9919ddd4e93906f3d2c8777defe2f814acb | 7a3c5280f07ceb5eabbb7c38fe864c3288397a26 | /pilgram/css/blending/tests/test_soft_light.py | a8da18c2c45f774217789bdcf8708c310e9ea4f7 | [
"Apache-2.0"
] | permissive | asharpc/pilgram | 17443db1bc4e51d07eb36680138806603cc1b839 | c585ca4f7f08549842befdcd05dd7d9972f7b0a2 | refs/heads/master | 2023-08-30T03:45:33.196127 | 2021-10-12T04:58:58 | 2021-10-12T04:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | # Copyright 2019 Akiomi Kamakura
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from PIL import Image
import pytest
from pilgram import css
from pilgram import util
from pilgram.css.blending.tests.helpers import assert_alpha_support
def test_soft_light():
cb = util.fill((2, 2), [0, 128, 255])
cs_array = np.array([
[[0] * 3, [127] * 3],
[[128] * 3, [255] * 3],
], dtype=np.uint8)
cs = Image.fromarray(cs_array)
soft_light = css.blending.soft_light(cb, cs)
expected = [
(0, 64, 255), (0, 128, 255),
(0, 128, 255), (0, 181, 255),
]
expected = [pytest.approx(c, abs=1) for c in expected]
assert list(soft_light.getdata()) == expected # almost eq
def test_soft_light_alpha_support(mocker):
assert_alpha_support(css.blending.soft_light)
| [
"akiomik@gmail.com"
] | akiomik@gmail.com |
824eaef60b468f8583973a13aa291b47f2721308 | e0597a144cfdcecc72e97b149ff5b1e4e90d1c6d | /src/metapack_build/package/__init__.py | 9b22d2a38b3487f8e07fda168b906cabcab03408 | [
"MIT"
] | permissive | Metatab/metapack-build | e7f684e924c732c1f25472d0b4ce70847260d154 | 5e47728c5fc41b9dd1d4d6ef82bbfb67a1d9f953 | refs/heads/master | 2022-11-13T17:09:44.338504 | 2022-11-10T21:10:07 | 2022-11-10T21:10:07 | 185,630,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE
""" """
from metapack.package import Downloader, open_package # NOQA
from .core import PackageBuilder # NOQA
from .csv import CsvPackageBuilder # NOQA
from .excel import ExcelPackageBuilder # NOQA
from .filesystem import FileSystemPackageBuilder # NOQA
from .s3 import S3CsvPackageBuilder # NOQA
from .zip import ZipPackageBuilder # NOQA
| [
"eric@civicknowledge.com"
] | eric@civicknowledge.com |
4bffd584589c2250a04486cee649559d807811e8 | 6e2487477765a38604cc94ca96af4774dbd88f58 | /hc/api/tests/test_admin.py | 86a7db1e3e00c9b71ff78700e6c3a8c088e52554 | [
"BSD-3-Clause"
] | permissive | herbertUG/healthchecks | e247428012ef8e0c79fe3fe44567b6f9eaf9b298 | 1cdb6e6d1d1a5bed7026132ed316213e60cb6be1 | refs/heads/master | 2020-09-28T08:56:17.415441 | 2019-12-06T06:58:32 | 2019-12-06T06:58:32 | 226,740,247 | 4 | 0 | BSD-3-Clause | 2019-12-08T22:20:23 | 2019-12-08T22:20:21 | null | UTF-8 | Python | false | false | 1,080 | py | from hc.api.models import Channel, Check
from hc.test import BaseTestCase
class ApiAdminTestCase(BaseTestCase):
def setUp(self):
super(ApiAdminTestCase, self).setUp()
self.check = Check.objects.create(project=self.project, tags="foo bar")
self.alice.is_staff = True
self.alice.is_superuser = True
self.alice.save()
def test_it_shows_channel_list_with_pushbullet(self):
self.client.login(username="alice@example.org", password="password")
Channel.objects.create(
project=self.project, kind="pushbullet", value="test-token"
)
r = self.client.get("/admin/api/channel/")
self.assertContains(r, "Pushbullet")
def test_it_shows_channel_list_with_unverified_email(self):
self.client.login(username="alice@example.org", password="password")
Channel.objects.create(
project=self.project, kind="email", value="foo@example.org"
)
r = self.client.get("/admin/api/channel/")
self.assertContains(r, "Email <i>(unconfirmed)</i>")
| [
"cuu508@gmail.com"
] | cuu508@gmail.com |
4f3c2f3566543195b8bf42f7c494580c1a518f9d | fb744d622b947cba3f167bf8b8da810c34ded83d | /run.py | 5cbf62bfa20333878ba812ecfb2f043e28a24949 | [] | no_license | petres/septupus | 8f0f1e7ab7658ee03bd53d66c19db1a412ff1b0e | be83d3c5029ca6d2ce3f72a48c03114fb8535566 | refs/heads/master | 2023-05-28T20:52:09.223214 | 2023-02-23T22:46:03 | 2023-02-23T22:46:03 | 195,785,991 | 0 | 0 | null | 2023-05-24T01:31:32 | 2019-07-08T10:03:06 | Python | UTF-8 | Python | false | false | 1,274 | py | #!/usr/bin/env python
from web import WebManager
from flask import request
import threading
from lib.multiManager import MultiManager
from lib.camera.cameraManager import CameraManager
from lib.robot.robotManager import RobotManager
from lib.robot.serialManager import SerialManager
from lib.games.spaceInvadersManager import SpaceInvadersManager
import logging
import sys
import time
import curses
# setup log file to subdir
logging.basicConfig(filename='log/debug.log', level=logging.DEBUG,
format='%(levelname)8s - %(name)s %(relativeCreated)d: %(message)s')
sys.stderr = open('log/error.log', 'w')
def main(screen = None):
modules = {
'serial': SerialManager(),
'camera': CameraManager(),
'web': WebManager(),
'robot': RobotManager(),
'spaceInvaders': SpaceInvadersManager(screen)
}
for n, m in modules.items():
m.setModules(modules)
m.initSharedVars()
m.load()
#modules['serial'].start()
modules['camera'].start()
modules['web'].start()
time.sleep(0.01)
modules['spaceInvaders'].run()
for n, m in modules.items():
m.save()
if isinstance(m, MultiManager) and m.isRunning():
m.stop()
#main()
curses.wrapper(main)
| [
"peter.reschenhofer@gmail.com"
] | peter.reschenhofer@gmail.com |
e8930236dbe18fffb17fe6b8fe61950997e6c864 | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/ironware/test_ironware_facts.py | e29b4db64b06c1907d83fd2341f728677407fc96 | [
"GPL-3.0-only",
"MIT",
"CC0-1.0",
"GPL-1.0-or-later"
] | permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 4,806 | py | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ansible_collections.community.general.plugins.modules.network.ironware import ironware_facts
from ..ironware_module import TestIronwareModule, load_fixture
class TestIronwareFacts(TestIronwareModule):
module = ironware_facts
def setUp(self):
super(TestIronwareFacts, self).setUp()
self.mock_run_commands = patch(
'ansible_collections.community.general.plugins.modules.network.ironware.ironware_facts.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestIronwareFacts, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except ValueError:
command = item
filename = str(command).split(' | ')[0].replace(' ', '_').replace('/', '7')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_ironware_facts_gather_subset_default(self):
set_module_args(dict())
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertEqual(['/flash/'], ansible_facts['ansible_net_filesystems'])
self.assertIn('1/1', ansible_facts['ansible_net_interfaces'].keys())
self.assertIn('10.69.1.6', ansible_facts['ansible_net_all_ipv4_addresses'])
self.assertIn('2001:db8::1', ansible_facts['ansible_net_all_ipv6_addresses'])
self.assertIn('ansible_net_neighbors', ansible_facts)
self.assertIn('1/2', ansible_facts['ansible_net_neighbors'].keys())
self.assertEqual(4096, ansible_facts['ansible_net_memtotal_mb'])
self.assertEqual(3630, ansible_facts['ansible_net_memfree_mb'])
self.assertEqual('5.8.0fT163', ansible_facts['ansible_net_version'])
self.assertEqual('MLXe 4-slot Chassis', ansible_facts['ansible_net_model'])
self.assertEqual('BGD2503J01F', ansible_facts['ansible_net_serialnum'])
def test_ironware_facts_gather_subset_config(self):
set_module_args({'gather_subset': 'config'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
self.assertIn('ansible_net_config', ansible_facts)
def test_ironware_facts_gather_subset_mpls(self):
set_module_args({'gather_subset': 'mpls'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('mpls', ansible_facts['ansible_net_gather_subset'])
self.assertIn('ansible_net_mpls_lsps', ansible_facts)
self.assertIn('ansible_net_mpls_vll', ansible_facts)
self.assertIn('ansible_net_mpls_vll_local', ansible_facts)
self.assertIn('ansible_net_mpls_vpls', ansible_facts)
self.assertIn('LSP1', ansible_facts['ansible_net_mpls_lsps'].keys())
self.assertIn('TEST-VLL', ansible_facts['ansible_net_mpls_vll'].keys())
self.assertIn('TEST-LOCAL', ansible_facts['ansible_net_mpls_vll_local'].keys())
self.assertIn('TEST-VPLS', ansible_facts['ansible_net_mpls_vpls'].keys())
| [
"joshuamadison+gh@gmail.com"
] | joshuamadison+gh@gmail.com |
2e733eb8587c4253faf3b6e89cc778fd0e48feb9 | 8bccc05fcb3cfc6ed93991927a514a96f53f7ec0 | /old_version/candidate_selection/tensorflow_models/components/vector_encoders/multilayer_perceptron.py | 0ea714aca10a4f774d400220f89e347aca98eb8a | [
"MIT"
] | permissive | afcarl/QuestionAnsweringGCN | 54101c38549405d65ef22e38fed9e5bd58122ada | e9c1987b40a553f0619fa796f692c8880de32846 | refs/heads/master | 2020-03-20T10:35:55.729170 | 2018-06-07T11:45:12 | 2018-06-07T11:45:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | import numpy as np
import tensorflow as tf
from candidate_selection.tensorflow_models.components.abstract_component import AbstractComponent
class MultilayerPerceptron(AbstractComponent):
transforms = None
variable_prefix = None
variables = None
weights = None
biases = None
l2_scale = None
dropout_rate=None
def __init__(self, transforms, variables, variable_prefix="", l2_scale=0.0, dropout_rate=0.0):
self.transforms = transforms
self.variable_prefix = variable_prefix
if self.variable_prefix != "":
self.variable_prefix += "_"
self.variables = variables
self.weights = [None]*(len(transforms)-1)
self.biases = [None]*(len(transforms)-1)
self.l2_scale=l2_scale
self.dropout_rate=dropout_rate
def prepare_tensorflow_variables(self, mode="train"):
for i in range(len(self.transforms)-1):
dim_1 = self.transforms[i]
dim_2 = self.transforms[i+1]
glorot_variance = np.sqrt(6)/np.sqrt(dim_1 + dim_2)
weight_initializer = np.random.uniform(-glorot_variance, glorot_variance, size=(dim_1, dim_2)).astype(np.float32)
bias_initializer = np.zeros(dim_2, dtype=np.float32)
self.weights[i] = tf.Variable(weight_initializer, name=self.variable_prefix + "_W" + str(i))
self.biases[i] = tf.Variable(bias_initializer, name=self.variable_prefix + "_b" + str(i))
def transform(self, vectors, mode="train"):
for i in range(len(self.transforms)-1):
if mode == "train" and self.dropout_rate > 0:
vectors = tf.nn.dropout(vectors, 1-self.dropout_rate)
vectors = tf.matmul(vectors, self.weights[i]) + self.biases[i]
if i < len(self.transforms) - 2:
vectors = tf.nn.relu(vectors)
return vectors
def get_regularization_term(self):
return self.l2_scale * tf.reduce_sum([tf.reduce_sum(tf.square(w)) for w in self.weights])
def handle_variable_assignment(self, batch, mode):
pass | [
"michael.sejr@gmail.com"
] | michael.sejr@gmail.com |
c2530ceda5d20dbbc5a7f5eb0707c0cf79756021 | 7ac2ba90f973f5230a046cc7b72ad3056554e6d4 | /pyswagger/contrib/client/requests.py | 8e877e11173c27eed403ecefc6924002f3e45f9e | [
"MIT"
] | permissive | larscwallin/pyswagger | 1d38faef430dfc5c4ccb0e288541544248eb3326 | 1e07d52d0ce342b7183319ebc9c1ddc51a78577e | refs/heads/master | 2020-02-26T13:27:51.525526 | 2014-10-22T11:01:26 | 2014-10-22T11:01:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | from __future__ import absolute_import
from ...core import BaseClient
from requests import Session, Request
import six
class Client(BaseClient):
""" Client implementation based on requests
"""
def __init__(self, auth=None):
"""
"""
super(Client, self).__init__(auth)
self.__s = Session()
def request(self, req_and_resp, opt={}):
"""
"""
req, resp = super(Client, self).request(req_and_resp, opt)
# apply request-related options before preparation.
req.prepare(handle_files=False)
# prepare for uploaded files
file_obj = {}
for k, v in six.iteritems(req.files):
f = v.data or open(v.filename, 'rb')
if 'Content-Type' in v.header:
file_obj[k] = (v.filename, f, v.header['Content-Type'])
else:
file_obj[k] = (v.filename, f)
rq = Request(
method=req.method,
url=req.url,
params=req.query,
data=req.data,
headers=req.header,
files=file_obj
)
rq = self.__s.prepare_request(rq)
rs = self.__s.send(rq)
resp.apply_with(
status=rs.status_code,
header=rs.headers,
raw=rs.text
)
return resp
| [
"missionaryliao@gmail.com"
] | missionaryliao@gmail.com |
3e299aab3601833fdc7272c15795f37dbf3e61b9 | 255021fadf9f739db042809ca95f5b9f75609ec5 | /D3/3376 파도반 수열.py | f2c65b6a4faecf7ea3da169bf4c90873e1a40770 | [] | no_license | unsung107/Algorithm_study | 13bfff518fc1bd0e7a020bb006c88375c9ccacb2 | fb3b8563bae7640c52dbe9324d329ca9ee981493 | refs/heads/master | 2022-12-13T02:10:31.173333 | 2020-09-13T11:32:10 | 2020-09-13T11:32:10 | 295,137,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | padoban = []
for i in range(100):
if i <= 2:
padoban.append(1)
elif i <= 4:
padoban.append(2)
else:
padoban.append(padoban[i - 1] + padoban[i - 5])
results = []
for rounds in range(int(input())):
num = int(input())
results.append(f'#{rounds + 1} {padoban[num - 1]}')
print('\n'.join(results))
| [
"unsung102@naver.com"
] | unsung102@naver.com |
8c588d45717f6bd6101922802c938990ea431eff | 55a273347cb103fe2b2704cb9653956956d0dd34 | /code/tmp_rtrip/test/test_pkgimport.py | 792cf3d14675d94179d159006b840859501355be | [
"MIT"
] | permissive | emilyemorehouse/ast-and-me | 4af1bc74fc967ea69ac1aed92664f6428acabe6a | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | refs/heads/master | 2022-11-18T03:50:36.505882 | 2018-05-12T17:53:44 | 2018-05-12T17:53:44 | 115,035,148 | 25 | 1 | MIT | 2022-11-04T11:36:43 | 2017-12-21T18:27:19 | Python | UTF-8 | Python | false | false | 2,404 | py | import os
import sys
import shutil
import string
import random
import tempfile
import unittest
from importlib.util import cache_from_source
from test.support import create_empty_file
class TestImport(unittest.TestCase):
def __init__(self, *args, **kw):
self.package_name = 'PACKAGE_'
while self.package_name in sys.modules:
self.package_name += random.choose(string.ascii_letters)
self.module_name = self.package_name + '.foo'
unittest.TestCase.__init__(self, *args, **kw)
def remove_modules(self):
for module_name in (self.package_name, self.module_name):
if module_name in sys.modules:
del sys.modules[module_name]
def setUp(self):
self.test_dir = tempfile.mkdtemp()
sys.path.append(self.test_dir)
self.package_dir = os.path.join(self.test_dir, self.package_name)
os.mkdir(self.package_dir)
create_empty_file(os.path.join(self.package_dir, '__init__.py'))
self.module_path = os.path.join(self.package_dir, 'foo.py')
def tearDown(self):
shutil.rmtree(self.test_dir)
self.assertNotEqual(sys.path.count(self.test_dir), 0)
sys.path.remove(self.test_dir)
self.remove_modules()
def rewrite_file(self, contents):
compiled_path = cache_from_source(self.module_path)
if os.path.exists(compiled_path):
os.remove(compiled_path)
with open(self.module_path, 'w') as f:
f.write(contents)
def test_package_import__semantics(self):
self.rewrite_file('for')
try:
__import__(self.module_name)
except SyntaxError:
pass
else:
raise RuntimeError('Failed to induce SyntaxError')
self.assertNotIn(self.module_name, sys.modules)
self.assertFalse(hasattr(sys.modules[self.package_name], 'foo'))
var = 'a'
while var in dir(__builtins__):
var += random.choose(string.ascii_letters)
self.rewrite_file(var)
try:
__import__(self.module_name)
except NameError:
pass
else:
raise RuntimeError('Failed to induce NameError.')
self.rewrite_file('%s = 1' % var)
module = __import__(self.module_name).foo
self.assertEqual(getattr(module, var), 1)
if __name__ == '__main__':
unittest.main()
| [
"emily@cuttlesoft.com"
] | emily@cuttlesoft.com |
d8c3c5cdd9933763803433ce5b76cc6d0c2c328f | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/education/azure-mgmt-education/azure/mgmt/education/aio/operations/_operations.py | 369e23054c0cb208284134ebbe6f6929b0453fc8 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,564 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
from .._vendor import EducationManagementClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.education.aio.EducationManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def list(self, **kwargs: Any) -> _models.OperationListResult:
"""Lists all of the available Microsoft.Education API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationListResult or the result of cls(response)
:rtype: ~azure.mgmt.education.models.OperationListResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-12-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseBody, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("OperationListResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {"url": "/providers/Microsoft.Education/operations"}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
f8953b3dc628c752016e48ddd21004f39880c333 | 68f0fe638c9cab6fb01a21bbf5da9beb2833f9c7 | /zfrobisher-installer/src/viewer/viewer.py | fad12f94addb7da6db3ed7640a1398de8dab2e48 | [] | no_license | fedosu85nce/work | cd7a8545211cdeafba6ff820ce27cacdd52668d5 | 1c738fd5e6ee3f8fd4f47acf2207038f20868212 | refs/heads/master | 2021-01-15T17:45:31.878900 | 2015-04-09T10:02:55 | 2015-04-09T10:02:55 | 31,659,550 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,280 | py | #
# Code
#
class Viewer(object):
"""
Base interface for KoP installer viewer
"""
def getMenu(self):
"""
Creates and returns a Menu screen object
@rtype: Menu
@returns: screen object
"""
raise NotImplementedError("getMenu not implemented")
# getMenu()
def getDiskSelection(self, disks, diskData, lvmData, raidData):
"""
Creates and returns a Disk Selection screen object
@type disks: list
@param disks: disks found in the system
@type diskData: dict
@param diskData: detailed information about disks
@type lvmData: dict
@param lvmData: lvm metadata
@type raidData: dict
@param raidData: raid metadata
@rtype: SelectHardDisk
@returns: screen object
"""
raise NotImplementedError("getDiskSelection not implemented")
# getDiskSelection()
def getAddzFCP(self):
"""
Creates and returns a AddzFCP screen object
@rtype: getAddzFCP
@returns: screen object
"""
raise NotImplementedError("getAddzFCP not implemented")
# getAddzFCP()
def getConfirmation(self, device, diskData, lvmData):
"""
Creates and confirm the Confirmation screen object
@rtype: getConfirmation
@returns: screen object
"""
raise NotImplementedError("getConfirmation not implemented")
# getDiskSelection()
def getCheckHardDisk(self, diskSelected):
"""
Creates and returns a Check Hard Disk screen object
@type diskSelected: str
@param diskSelected: disk selected by user
@rtype: CheckHardDisk
@returns: screen object
"""
raise NotImplementedError("getCheckHardDisk not implemented")
# getCheckHardDisk()
def getInstallProgress(self):
"""
Creates and returns a Install Progress screen object
@rtype: InstallProgress
@returns: screen object
"""
raise NotImplementedError("getInstallProgress not implemented")
# getInstallProgress()
def getEntitlementError(self):
"""
Creates and returns a Entilement Error screen object
@rtype: EntitlementError
@returns: screen object
"""
raise NotImplementedError("getEntitlementError not implemented")
# getEntitlementError()
def getRebootSystem(self):
"""
Creates and returns a Reboot System screen object
@rtype: RebootSystem
@returns: screen object
"""
raise NotImplementedError("getRebootSystem not implemented")
# getRebootSystem()
def getUpgradeProgressScreen(self):
"""
Creates and returns a Upgrade Progress screen object
@rtype: UpgradeProgress
@returns: screen object
"""
raise NotImplementedError("getUpgradeProgressScreen not implemented")
# getUpgradeProgressScreen()
def getMessageWindow(self):
"""
Gets a generic message box
@rtype: message window box
@returns: screen object
"""
raise NotImplementedError("MessageWindow not implemented")
# getMessageWindow()
def getRootPasswdWindow(self):
"""
Creates and returns the Root Change Password screen object
@rtype: RootChangePassword
@returns: screen object
"""
raise NotImplementedError("RootChangePassword not implemented")
# getRootPasswdWindow()
def getTimezoneWindow(self):
"""
Creates and returns the Adjust Timezone screen object
@rtype: AdjustTimezone
@returns: screen object
"""
raise NotImplementedError("getTimezoneWindow not implemented")
# getTimezoneWindow()
def getChooseLanguage(self):
"""
Creates and returns the choose language screen object
@rtype: ChooseLanguage
@returns: screen object
"""
raise NotImplementedError("getChooseLanguage not implemented")
# getChooseLanguage()
def getListNetwork(self):
"""
Creates and returns the List of Network Interfaces screen object
@rtype: ListNetworkifaces
@returns: screen object
"""
raise NotImplementedError("getListNetwork not implemented")
# getListNetwork()
def getNetworkConfig(self, device, macaddr):
"""
Creates and returns the network config screen
@rtype: ConfigNetwork
@returns: screen object
"""
raise NotImplementedError("getNetworkConfig not implemented")
# getNetworkConfig()
def getDnsSetup(self):
"""
Creates and returns the dnssetup screen
@rtype: DnsSetup
@returns: screen object
"""
raise NotImplementedError("getDnsSetup not implemented")
# getDnsSetup()
def getDateTimeSetup(self):
"""
Creates and returns the datetime setup screen
@rtype: datetimesetup
@returns: screen object
"""
raise NotImplementedError("getDateTimeSetup not implemented")
# getDateTimeSetup()
def getSummary(self):
"""
Creates and returns the summary screen
@rtype: summary
@returns: screen object
"""
raise NotImplementedError("getSummary not implemented")
# getSummary()
def getFirstScreen(self):
"""
Creates and returns the first screen
@rtype: first
@returns: screen object
"""
raise NotImplementedError("getFirstScreen not implemented")
# getFirstScreen()
def getLicenseWindow(self):
"""
Creates and returns the license window screen
@rtype: license
@returns: screen object
"""
raise NotImplementedError("getLicenseWindow not implemented")
# getLicenseWindow()
def getIfaceConfig(self, address):
"""
Creates and returns the network interface configuration
@rtype: InterfaceConfig
@returns: screen object
"""
raise NotImplementedError("getIfaceConfig not implemented")
# getIfaceConfig
# Viewer
| [
"twu@gmail.com"
] | twu@gmail.com |
c3264ac3ec8ee90688ca8d7c237485a05abdffa0 | 490ffe1023a601760ae7288e86723f0c6e366bba | /kolla-docker/zun-ui/zun_ui/api/rest_api_providerregion.py | 057018392db5c00f90f8d641943c030ace74da17 | [
"Apache-2.0"
] | permissive | bopopescu/Cloud-User-Management | 89696a5ea5d2f95191327fbeab6c3e400bbfb2b8 | 390988bf4915a276c7bf8d96b62c3051c17d9e6e | refs/heads/master | 2022-11-19T10:09:36.662906 | 2018-11-07T20:28:31 | 2018-11-07T20:28:31 | 281,786,345 | 0 | 0 | null | 2020-07-22T21:26:07 | 2020-07-22T21:26:06 | null | UTF-8 | Python | false | false | 3,580 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.views import generic
from zun_ui.api import client
import logging
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
LOG = logging.getLogger(__name__)
def change_to_id(obj):
"""Change key named 'uuid' to 'id'
Zun returns objects with a field called 'uuid' many of Horizons
directives however expect objects to have a field called 'id'.
"""
obj['id'] = obj.pop('uuid')
return obj
@urls.register
class Providerregion(generic.View):
"""API for retrieving a single container"""
url_regex = r'zun/providerregions/(?P<id>[^/]+)$'
@rest_utils.ajax()
def get(self, request, id):
LOG.debug('restapi Providerregion xxxxxx get=%s, id=%s xxx' % (request, id))
"""Get a specific container"""
return change_to_id(client.providerregion_show(request, id).to_dict())
@rest_utils.ajax(data_required=True)
def delete(self, request, id):
"""Delete single Container forcely by id.
Returns HTTP 204 (no content) on successful deletion.
"""
return client.providerregion_delete(request, id, force=True)
@rest_utils.ajax(data_required=True)
def patch(self, request, id):
"""Update a Container.
Returns the Container object on success.
"""
args = client.providerregion_update(request, id, **request.DATA)
LOG.debug('restapi inside patch Providerregion xxxxxx args=%s, xxx' % (args))
return args
@urls.register
class Providerregions(generic.View):
"""API for Zun Containers"""
url_regex = r'zun/providerregions/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of the Containers for a project.
The returned result is an object with property 'items' and each
item under this is a Container.
"""
result = client.providerregion_list(request)
LOG.debug('restapi Get yyyzzxxxxxx result= %s xxxxxx' % (result))
return {'items': [change_to_id(n.to_dict()) for n in result]}
@rest_utils.ajax(data_required=True)
def delete(self, request):
"""Delete one or more Containers by id.
Returns HTTP 204 (no content) on successful deletion.
"""
for id in request.DATA:
client.providerregion_delete(request, id)
@rest_utils.ajax(data_required=True)
def post(self, request):
"""Create a new Container.
Returns the new Container object on success.
If 'run' attribute is set true, do 'run' instead 'create'
"""
LOG.debug('providerregions restapi POST xxxxxx %s xxxxx %s xxx' % (request, request.DATA))
new_container = client.providerregion_create(request, **request.DATA)
LOG.debug('zunclient post "%s" and url"%s"' % (new_container.uuid, new_container.to_dict()))
return rest_utils.CreatedResponse(
'/api/zun/providerregion/%s' % new_container.uuid,
new_container.to_dict())
| [
"Mr.Qinlichao@hotmail.com"
] | Mr.Qinlichao@hotmail.com |
cc965e537aaab0e3ead7413323732bd1789e1d8a | 49cb44cfe9b4cd382d8a7d10e1719de69e356ed9 | /scripts/ch6/doFindAnronovHopfBifucationPointINapIKLowThreshold.py | de1dca0d51e4920df4c52bdd56e6062cdca36559 | [] | no_license | joacorapela/figsResultsAndErrorsFromIzhikevich2007 | 913a25ff10479b04fa657cea013226766bef730c | 2c04cacbaa94485168926ddc7e343207beb033b9 | refs/heads/master | 2022-01-28T15:26:30.122964 | 2022-01-21T19:56:52 | 2022-01-21T19:56:52 | 150,012,910 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py |
import sys
import numpy as np
import pdb
import matplotlib.pyplot as plt
from INapIKModel import INapIKModel
def main(argv):
i0 = 0
v0 = -70.0
vf = -50.0
dv = 1e-3
deltaAnnotate = 0.3
def i(t): return(i0)
iNapIKModel = INapIKModel.getLowThresholdInstance(i=i)
vs = np.arange(v0, vf, dv)
lambdasCol = np.empty([len(vs),2], dtype=complex)
jacobians = []
vsAll = []
nsAll = []
isAll = []
for j in xrange(len(vs)):
v = vs[j]
n = iNapIKModel._nInf(v=vs[j])
i = iNapIKModel.getIInf(y=(v, n))
vsAll.append(v)
nsAll.append(n)
isAll.append(i)
_, lambdas, jacobian = iNapIKModel.checkStability(v0=v, n0=n)
# print(lambdas)
lambdasCol[j,:] = lambdas
jacobians.append(jacobian)
argminEigval0 = np.argmin(np.abs(lambdasCol[:,0].real))
argminEigval1 = np.argmin(np.abs(lambdasCol[:,1].real))
# pdb.set_trace()
if argminEigval0!=argminEigval1:
raise RuntimeError("Could not find I for which both eigenvalues are zero")
print("Andronov-Hopft bifurcation I=%f, V=%f, n=%f"%(isAll[argminEigval0],
vsAll[argminEigval0],
nsAll[argminEigval0]))
print("Jacobian at Andronov-Hopft bifurcation point")
print(jacobians[argminEigval0])
print("Eigenvalue0=%f+j%f"%(lambdasCol[argminEigval0,0].real,
lambdasCol[argminEigval0,0].imag))
print("Eigenvalue1=%f+j%f"%(lambdasCol[argminEigval1,1].real,
lambdasCol[argminEigval1,1].imag))
plt.plot(lambdasCol[:,0].real, lambdasCol[:,1].real)
plt.xlabel(r"Real($\lambda_0$)")
plt.ylabel(r"Real($\lambda_1$)")
plt.annotate('I=%.02f'%isAll[argminEigval0],
xy=(lambdasCol[argminEigval0,0].real,
lambdasCol[argminEigval0,1].real),
xytext=(lambdasCol[argminEigval0,0].real-deltaAnnotate,
lambdasCol[argminEigval0,1].real+deltaAnnotate),
arrowprops=dict(facecolor='black', shrink=0.05))
plt.grid()
plt.show()
pdb.set_trace()
if __name__ == "__main__":
main(sys.argv)
| [
"joacorapela@gmail.com"
] | joacorapela@gmail.com |
bd196701ca9cec3ecb32a2718c4300597e17e4ad | 461d6e951b9904a3248f974842e70b01da464afb | /0x02-python-import_modules/3-infinite_add.py | ed89ea180217ad3aff0375dd7cb083f45dc11bfa | [] | no_license | diego0096/holbertonschool-higher_level_programming | 76e27997aabb966488e8610837cbd39b2ee0ac8f | e89de4a988acac492670ada8c9c6d5f5d940a1d2 | refs/heads/master | 2020-09-29T00:58:11.432351 | 2020-05-15T02:15:08 | 2020-05-15T02:15:08 | 226,907,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | #!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
sum = 0
for c in range(1, len(argv)):
sum += int(argv[c])
print("{}".format(sum))
| [
"dfqz93@hotmail.com"
] | dfqz93@hotmail.com |
0cc835788b39e16f4c18709bb19ea70716d6eccf | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/MybankPaymentTradeNormalpayOrderRefundResponse.py | 0704e0638c37464d8e3cb62748e4b75c1470ee65 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,651 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class MybankPaymentTradeNormalpayOrderRefundResponse(AlipayResponse):
def __init__(self):
super(MybankPaymentTradeNormalpayOrderRefundResponse, self).__init__()
self._operate_no = None
self._request_accept_time = None
self._request_no = None
self._retry = None
@property
def operate_no(self):
return self._operate_no
@operate_no.setter
def operate_no(self, value):
self._operate_no = value
@property
def request_accept_time(self):
return self._request_accept_time
@request_accept_time.setter
def request_accept_time(self, value):
self._request_accept_time = value
@property
def request_no(self):
return self._request_no
@request_no.setter
def request_no(self, value):
self._request_no = value
@property
def retry(self):
return self._retry
@retry.setter
def retry(self, value):
self._retry = value
def parse_response_content(self, response_content):
response = super(MybankPaymentTradeNormalpayOrderRefundResponse, self).parse_response_content(response_content)
if 'operate_no' in response:
self.operate_no = response['operate_no']
if 'request_accept_time' in response:
self.request_accept_time = response['request_accept_time']
if 'request_no' in response:
self.request_no = response['request_no']
if 'retry' in response:
self.retry = response['retry']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
55601c8bb38d719b6102c5fb2c751d1dd5cd13d0 | 5bce1118b13289308d23510f323c79aa972ddc27 | /src/modules/irisv2/message/testAndPreviewDBAssertion.py | 4c27a9dcebe7eaf26d5833febc6fc2e93b8fee11 | [] | no_license | anupsl/pyApps | 62b64b90723de32684bbabee402220317a4fe817 | 2651d502c366b87449a0c977a9876cc32521c57c | refs/heads/master | 2022-07-03T05:49:12.828630 | 2020-05-10T17:25:26 | 2020-05-10T17:25:26 | 255,157,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | import time
from src.dbCalls.campaignShard import list_Calls
from src.dbCalls.messageInfo import message_calls
from src.modules.irisv2.list.createAudienceDBAssertion import CreateAudienceDBAssertion
from src.modules.irisv2.message.authorizeMessageDbAssertion import AuthorizeMessageDBAssertion
class PreviewDBAssertion():
def __init__(self, messageId, numberOfUsers):
self.messageId = messageId
self.listType = 'TEST_GROUP'
self.numberOfusers = numberOfUsers
self.getBasicInfoFromMessageId()
def check(self):
self.validateAudienceInfo()
self.validateMessageInfo()
def waitForListToBeUpdated(self):
for _ in range(12):
if list_Calls().getCustomerCountInGVD(self.listId) > 0:
break
time.sleep(5)
def getBasicInfoFromMessageId(self):
self.listId, self.campaignId = message_calls().getTargetAudienceForTestAndPreview(self.messageId)
self.waitForListToBeUpdated()
self.listInfo = {
'VID': list_Calls().getGroupVersionId(self.listId)
}
def validateAudienceInfo(self):
CreateAudienceDBAssertion(self.listId, self.listInfo, self.listType, self.numberOfusers).check()
def validateMessageInfo(self):
response = {
'json': {
'entity': {
'id': self.messageId
}
}
}
AuthorizeMessageDBAssertion(self.campaignId, response, {}, 'skip').check()
| [
"anup@CAP-LAP-450.local"
] | anup@CAP-LAP-450.local |
4721727fb57a7576274589538fe2043374a72c05 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_109/ch178_2020_08_14_14_36_55_463698.py | 0fdcafae5574c8b94fafe555c0e97a93b5af8c7a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | def junta_nomes(homens, mulheres, sobrenomes):
nomes_possiveis = []
for i in range(len(homens)):
for p in range(len(sobrenomes)):
nomes_possiveis.append(homens[i] + ' ' + sobrenomes[p])
for i in range(len(mulheres)):
for p in range(len(sobrenomes)):
nomes_possiveis.append(mulheres[i] + ' ' + sobrenomes[p])
return nomes_possiveis | [
"you@example.com"
] | you@example.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.