blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03fadd49f9dca2f9d4388941de3945d8c5573421 | 94c1805df5a09c39159d502f420d19ad54b567fc | /runtime/deps/gyp/test/conditions/elseif/elseif.gyp | 6367ff7d7afef580e47d50e4ff8f632106c196f1 | [
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | tmikov/jscomp | 9805a5a4d06520549c57380f0df4a1c0aa0dab56 | 83828441cb38ec96603a6a60be06977d4852940a | refs/heads/develop | 2021-01-19T02:56:35.102659 | 2016-04-12T06:19:30 | 2016-04-12T06:19:30 | 36,981,674 | 237 | 13 | Apache-2.0 | 2018-10-14T09:48:12 | 2015-06-06T13:49:26 | C | UTF-8 | Python | false | false | 1,166 | gyp | # Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'variables': { 'test_var': 0 },
'target_name': 'program0',
'type': 'executable',
'sources': [ 'program.cc' ],
'includes': [ 'elseif_conditions.gypi' ],
},
{
'variables': { 'test_var': 1 },
'target_name': 'program1',
'type': 'executable',
'sources': [ 'program.cc' ],
'includes': [ 'elseif_conditions.gypi' ],
},
{
'variables': { 'test_var': 2 },
'target_name': 'program2',
'type': 'executable',
'sources': [ 'program.cc' ],
'includes': [ 'elseif_conditions.gypi' ],
},
{
'variables': { 'test_var': 3 },
'target_name': 'program3',
'type': 'executable',
'sources': [ 'program.cc' ],
'includes': [ 'elseif_conditions.gypi' ],
},
{
'variables': { 'test_var': 4 },
'target_name': 'program4',
'type': 'executable',
'sources': [ 'program.cc' ],
'includes': [ 'elseif_conditions.gypi' ],
},
],
}
| [
"tmikov@gmail.com"
] | tmikov@gmail.com |
9250e53135272125d12fc4a5fa7219ff11e1bb55 | 52c2ccb6fb55126a65bff2b4b7f653e4b0805759 | /tibiawikisql/models/achievement.py | 57c6a58645c5bebfb6f96a2e60037fab5bd67a48 | [
"Apache-2.0"
] | permissive | Galarzaa90/tibiawiki-sql | 4907236d518cdc6a53f32645efa3b22517e91f90 | 982be5ebd7905354b6c6a31c4247b2ee21bbe943 | refs/heads/master | 2022-08-09T09:18:46.533611 | 2022-07-23T13:56:07 | 2022-07-23T13:56:07 | 108,594,636 | 22 | 11 | Apache-2.0 | 2022-06-28T16:46:13 | 2017-10-27T20:52:55 | Python | UTF-8 | Python | false | false | 2,889 | py | # Copyright 2021 Allan Galarza
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tibiawikisql import schema
from tibiawikisql.models import abc
from tibiawikisql.utils import clean_links, parse_boolean, parse_integer
class Achievement(abc.Row, abc.Parseable, table=schema.Achievement):
"""Represents an Achievement.
Attributes
----------
article_id: :class:`int`
The id of the containing article.
title: :class:`str`
The title of the containing article.
timestamp: :class:`int`
The last time the containing article was edited.
name: :class:`str`
The achievement's name.
grade: :class:`int`
The achievement's grade, from 1 to 3. Also know as 'stars'.
points: :class:`int`
The amount of points given by this achievement.
description: :class:`str`
The official description shown for the achievement.
spoiler: :class:`str`
Instructions or information on how to obtain the achievement.
secret: :class:`bool`
Whether the achievement is secret or not.
premium: :class:`bool`
Whether a premium account is required to get this achievement.
achievement_id: :class:`int`
The internal ID of the achievement.
status: :class:`str`
The status of this achievement in the game.
version: :class:`str`
The client version where this was first implemented.
"""
_map = {
"name": ("name", str.strip),
"actualname": ("name", str.strip),
"grade": ("grade", lambda x: parse_integer(x, None)),
"points": ("points", lambda x: parse_integer(x, None)),
"premium": ("premium", parse_boolean),
"description": ("description", str.strip),
"spoiler": ("spoiler", clean_links),
"secret": ("secret", parse_boolean),
"achievementid": ("achievement_id", lambda x: parse_integer(x, None)),
"implemented": ("version", str.strip),
"status": ("status", str.lower),
}
_template = "Infobox_Achievement"
__slots__ = (
"article_id",
"title",
"timestamp",
"name",
"grade",
"points",
"premium",
"description",
"spoiler",
"secret",
"achievement_id",
"version",
"status",
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
| [
"allan.galarza@gmail.com"
] | allan.galarza@gmail.com |
d13a0300c450dc6acb1da9fae5225eed974a8d3d | e84020108a7037d8d4867d95fada1b72cbcbcd25 | /django/nrega.libtech.info/src/nrega/migrations/0027_auto_20170416_1616.py | 48feb13bb49447e43ccfccc0b72571ee742d2321 | [] | no_license | rajesh241/libtech | 8384316051a2e8c2d4a925cd43216b855b82e4d9 | 0105e717357a3626106028adae9bf162a7f93fbf | refs/heads/master | 2022-12-10T03:09:00.048841 | 2020-06-14T09:39:04 | 2020-06-14T09:39:04 | 24,629,538 | 1 | 1 | null | 2022-12-08T02:26:11 | 2014-09-30T07:57:45 | Python | UTF-8 | Python | false | false | 1,322 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-16 10:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nrega', '0026_auto_20170416_1531'),
]
operations = [
migrations.AddField(
model_name='muster',
name='downloadError',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AlterField(
model_name='block',
name='district',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nrega.District'),
),
migrations.AlterField(
model_name='district',
name='state',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nrega.State'),
),
migrations.AlterField(
model_name='muster',
name='block',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nrega.Block'),
),
migrations.AlterField(
model_name='panchayat',
name='block',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nrega.Block'),
),
]
| [
"togoli@gmail.com"
] | togoli@gmail.com |
bfed4c47e68ecae32060bd100cf106d926dee61e | 0285011aa0fe18c3007b968544bd1eedc56f1886 | /demozoo/settings/zxdemo_staging.py | 5f62c946a702fa32e45b7c53aa943f45cb178cf4 | [] | no_license | vigo/demozoo | ba09677e74aae9392a2c10791244e0cf0503078f | d62676fc4def152a197e3047e759ae50808dc5bb | refs/heads/master | 2020-05-18T13:47:56.932514 | 2019-02-10T17:44:14 | 2019-02-10T17:44:14 | 184,452,040 | 0 | 0 | null | 2019-05-01T17:12:57 | 2019-05-01T17:12:56 | null | UTF-8 | Python | false | false | 605 | py | from .staging import *
TEMPLATES[0]['OPTIONS']['context_processors'] = [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'zxdemo.context_processors.zxdemo_context',
]
ROOT_URLCONF = 'zxdemo.urls'
ZXDEMO_PLATFORM_IDS = [2]
ALLOWED_HOSTS = ['staging.zxdemo.org', 'localhost']
| [
"matt@west.co.tt"
] | matt@west.co.tt |
9d1a2e30038e2974395a2b0241b03d622846d866 | 7a027e77be1b963d5097c17e650ca41abcc2dcaa | /lncrawl/assets/banner.py | fe0347e1550d78bef40d2f7ef47b8dcfe8392760 | [
"Apache-2.0"
] | permissive | MouA/lightnovel-crawler | 939a9fef0fa991ada1976a03ecb77b0f4205293c | 53b6d5cc8e4bebe4c0bd0a3f1f079e084e02b68f | refs/heads/master | 2021-08-27T20:56:03.403476 | 2021-07-28T01:05:36 | 2021-07-28T01:05:36 | 158,337,494 | 0 | 0 | Apache-2.0 | 2018-11-20T05:49:55 | 2018-11-20T05:49:55 | null | UTF-8 | Python | false | false | 2,524 | py | # -*- coding: utf-8 -*-
import re
from colorama.ansi import Fore, Style
from .icons import Icons
from .version import get_value
banner_text = r'''
╭╮╱╱╱╱╱╱╭╮╱╭╮╱╱╱╱╱╱╱╱╱╱╱╱╭╮╱╭━━━╮╱╱╱╱╱╱╱╱╱╭╮
┃┃╱╱╱╱╱╱┃┃╭╯╰╮╱╱╱╱╱╱╱╱╱╱╱┃┃╱┃╭━╮┃╱╱╱╱╱╱╱╱╱┃┃
┃┃╱╱╭┳━━┫╰┻╮╭╋━╮╭━━┳╮╭┳━━┫┃╱┃┃╱╰╋━┳━━┳╮╭╮╭┫┃╭━━┳━╮
┃┃╱╭╋┫╭╮┃╭╮┃┃┃╭╮┫╭╮┃╰╯┃┃━┫┃╱┃┃╱╭┫╭┫╭╮┃╰╯╰╯┃┃┃┃━┫╭╯
┃╰━╯┃┃╰╯┃┃┃┃╰┫┃┃┃╰╯┣╮╭┫┃━┫╰╮┃╰━╯┃┃┃╭╮┣╮╭╮╭┫╰┫┃━┫┃
╰━━━┻┻━╮┣╯╰┻━┻╯╰┻━━╯╰╯╰━━┻━╯╰━━━┻╯╰╯╰╯╰╯╰╯╰━┻━━┻╯
╱╱╱╱╱╭━╯┃ <version>
╱╱╱╱╱╰━━╯ <link>
'''
# banner_text = r'''
# __...--~~~~~-._ _.-~~~~~--...__
# // `V' \\
# // Lightnovel | Crawler \\
# //__...--~~~~~~-._ | _.-~~~~~~--...__\\
# //__.....----~~~~._\ | /_.~~~~----.....__\\
# ====================\\|//====================
# `---` <version>
# <link>
# '''
# banner_text = r'''
# _ _ _ _ _ ____ _ <version>
# | | (_) __ _| |__ | |_ _ __ _____ _____| | / ___|_ __ __ ___ _| | ___ _ __
# | | | |/ _` | '_ \| __| '_ \ / _ \ \ / / _ \ | | | | '__/ _` \ \ /\ / / |/ _ \ '__|
# | |___| | (_| | | | | |_| | | | (_) \ V / __/ | | |___| | | (_| |\ V V /| | __/ |
# |_____|_|\__, |_| |_|\__|_| |_|\___/ \_/ \___|_| \____|_| \__,_| \_/\_/ |_|\___|_|
# |___/ <link>
# '''
def get_color_banner():
text = banner_text.strip('\n')
#' Lightnovel Crawler v' +
version_text = Style.BRIGHT + 'v' + get_value() + Style.RESET_ALL
link_text = Icons.LINK + Fore.CYAN + ' https://github.com/dipu-bd/lightnovel-crawler' + Fore.RESET
text = text.replace('<version>', Fore.RESET + version_text + Fore.YELLOW)
text = text.replace('<link>', Fore.CYAN + link_text + Fore.YELLOW)
text = re.sub(r'(╱+)', Fore.RESET + Style.DIM + r'\1' + Style.RESET_ALL + Fore.YELLOW, text)
text = Fore.YELLOW + text + Fore.RESET
return text
| [
"dipu.sudipta@gmail.com"
] | dipu.sudipta@gmail.com |
0d84343042ee4c00e8fcd5a19bc6b7c2f20b4b10 | e3438a134f999d57c07e8d2085122fc53e464f2d | /blog/admin.py | 0c2d4848138ee34c441bd0555ffa3013cf503f1b | [] | no_license | jwalk843/django_site | 04385a405409731fe8ecf915eaf2b5d14cf0b6c0 | b08e7759dabf98e305a591dea7f589c9f66840f8 | refs/heads/master | 2023-06-27T19:22:00.293090 | 2021-07-30T13:51:27 | 2021-07-30T13:51:27 | 391,149,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.contrib import admin
from .models import Post
admin.site.register(Post) # this registers our db with admin site
# Register your models here.
| [
"me@me.com"
] | me@me.com |
1d0172ce41ee69355b675c23fdc5d391acc54e59 | 1ba58b17f33122abf4236e9e430a51d375e0eb53 | /km72/Popov_Daniil/3/Task2.py | e02cceea53c58bcb905ea52a3b48bb75fa302257 | [] | no_license | igortereshchenko/amis_python | c4f8d86b88ab036d08ff0ce35c9b42ebeabecc42 | c6f0f2a70c82d5f269b3078eb296f82271b5bb10 | refs/heads/master | 2021-10-22T16:21:19.990650 | 2017-11-01T07:26:54 | 2017-11-01T07:26:54 | 104,785,028 | 0 | 139 | null | 2020-04-21T21:27:09 | 2017-09-25T18:11:42 | Python | UTF-8 | Python | false | false | 129 | py | print('starting programm')
a=float(input('input length a: '))
b=float(input('input length b: '))
print('S =',float((a*b)/2))
| [
"noreply@github.com"
] | igortereshchenko.noreply@github.com |
b09b0b9e39e862c54e6df257e311c37e54f53091 | 8e31c8bded501c34ad829e1271bd6dc9263e4e3c | /dynamic_profile/migrations/0012_indicatorprofile_universe.py | 48ca050f74851caa580dc5076e66a0832552d6e6 | [
"MIT"
] | permissive | OpenUpSA/wazimap-dynamic-profile | 89f59d54e5eaea38784e52844519da7b4194fcd1 | 4a66878965b9f452262a41ef1a02c7da5e5b4341 | refs/heads/master | 2021-06-21T21:01:24.757307 | 2020-01-27T09:17:59 | 2020-01-27T09:17:59 | 199,457,626 | 1 | 1 | MIT | 2021-03-19T22:54:41 | 2019-07-29T13:24:08 | Python | UTF-8 | Python | false | false | 506 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-09-02 09:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dynamic_profile', '0011_indicatorprofile_disclaimer_text'),
]
operations = [
migrations.AddField(
model_name='indicatorprofile',
name='universe',
field=models.CharField(default='Population', max_length=20),
),
]
| [
"stumenz.complex@gmail.com"
] | stumenz.complex@gmail.com |
7c1fdb9a430050d2456394bcfcfb27bdd8f592c9 | a475692e93d85aece84da0158d9317d2be1e8fbe | /lfd/test/playback_push.py | ee89734833b087d78428cc7a8897f1c186d53737 | [] | no_license | warriorarmentaix/lfd | f83e20cd9b91a0ac719645669a1eb98f19fa9007 | ff07d53d8c7ed5a092ec05a03f57620d15bb98a0 | refs/heads/master | 2016-09-06T05:31:24.262555 | 2013-06-03T07:13:45 | 2013-06-03T07:13:45 | 10,400,184 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | import argparse
parser = argparse.ArgumentParser()
parser.add_argument("file")
parser.add_argument("group")
args = parser.parse_args()
import h5py
from brett2.PR2 import PR2,IKFail
from brett2 import trajectories
from kinematics import kinematics_utils
import rospy
from brett2.ros_utils import RvizWrapper,Marker
from numpy import asarray
import numpy as np
import geometry_msgs.msg as gm
from jds_utils import conversions
if rospy.get_name()=="/unnamed":
rospy.init_node("playback_demo")
rviz = RvizWrapper.create()
pr2 = PR2.create()
rospy.sleep(1)
traj = h5py.File(args.file, 'r')[args.group]
ps = gm.PoseStamped(pose = gm.Pose(
position = gm.Point(*traj["object_pose"]),
orientation = gm.Quaternion(*traj["object_orientation"])))
ps.header.frame_id = 'base_link'
rviz.draw_marker(
ps,
id=1,
type=Marker.CUBE,
rgba = (0,1,0,1),
scale = asarray(traj["object_dimension"]))
pose_array = conversions.array_to_pose_array(asarray(traj["gripper_positions"]), 'base_link')
rviz.draw_curve(
pose_array,
id=0)
n_waypoints = 20
xyzquat = np.c_[traj["gripper_positions"],traj["gripper_orientations"]]
xyzquat_rs = kinematics_utils.unif_resample(xyzquat, n_waypoints, weights = np.ones(7), tol=.001)
times = np.linspace(0,10,n_waypoints)
pr2.torso.go_up()
pr2.join_all()
pr2.update_rave()
joint_positions,_ = trajectories.make_joint_traj(xyzquat_rs[:,0:3], xyzquat_rs[:,3:7], pr2.rarm.manip, 'base_link', 'r_wrist_roll_link', filter_options = 18)
joint_velocities = kinematics_utils.get_velocities(joint_positions, times, tol=.001)
pr2.rarm.follow_timed_joint_trajectory(joint_positions, joint_velocities, times)
#for xyzq in xyzquat_rs:
#xyz = xyzq[:3]
#quat = xyzq[3:]
#hmat = conversions.trans_rot_to_hmat(xyz,quat)
#try:
#pr2.rarm.goto_pose_matrix(hmat, 'base_link', 'r_wrist_roll_link')
#pr2.join_all()
#except IKFail:
#pass
pr2.join_all()
| [
"joschu@rhino.(none)"
] | joschu@rhino.(none) |
ff1dfe32b51f9d54fd3ed2967b0026220eda081a | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/pfnet_chainer/chainer-master/chainer/datasets/ptb.py | aa3c2a45d81bd3350c1253d5f91b4e5aab9e325a | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 3,351 | py | import os
import numpy
from chainer.dataset import download
def get_ptb_words():
"""Gets the Penn Tree Bank dataset as long word sequences.
`Penn Tree Bank <https://www.cis.upenn.edu/~treebank/>`_ is originally a
corpus of English sentences with linguistic structure annotations. This
function uses a variant distributed at
`https://github.com/tomsercu/lstm <https://github.com/tomsercu/lstm>`_,
which omits the annotation and splits the dataset into three parts:
training, validation, and test.
This function returns the training, validation, and test sets, each of
which is represented as a long array of word IDs. All sentences in the
dataset are concatenated by End-of-Sentence mark '<eos>', which is treated
as one of the vocabulary.
Returns:
tuple of numpy.ndarray: Int32 vectors of word IDs.
.. Seealso::
Use :func:`get_ptb_words_vocabulary` to get the mapping between the
words and word IDs.
"""
train = _retrieve_ptb_words('train.npz', _train_url)
valid = _retrieve_ptb_words('valid.npz', _valid_url)
test = _retrieve_ptb_words('test.npz', _test_url)
return train, valid, test
def get_ptb_words_vocabulary():
"""Gets the Penn Tree Bank word vocabulary.
Returns:
dict: Dictionary that maps words to corresponding word IDs. The IDs are
used in the Penn Tree Bank long sequence datasets.
.. seealso::
See :func:`get_ptb_words` for the actual datasets.
"""
return _retrieve_word_vocabulary()
_train_url = 'https://raw.githubusercontent.com/tomsercu/lstm/master/data/ptb.train.txt' # NOQA
_valid_url = 'https://raw.githubusercontent.com/tomsercu/lstm/master/data/ptb.valid.txt' # NOQA
_test_url = 'https://raw.githubusercontent.com/tomsercu/lstm/master/data/ptb.test.txt' # NOQA
def _retrieve_ptb_words(name, url):
def creator(path):
vocab = _retrieve_word_vocabulary()
words = _load_words(url)
x = numpy.empty(len(words), dtype=numpy.int32)
for i, word in enumerate(words):
x[i] = vocab[word]
numpy.savez_compressed(path, x=x)
return {'x': x}
root = download.get_dataset_directory('pfnet/chainer/ptb')
path = os.path.join(root, name)
loaded = download.cache_or_load_file(path, creator, numpy.load)
return loaded['x']
def _retrieve_word_vocabulary():
def creator(path):
words = _load_words(_train_url)
vocab = {}
index = 0
with open(path, 'w') as f:
for word in words:
if word not in vocab:
vocab[word] = index
index += 1
f.write(word + '\n')
return vocab
def loader(path):
vocab = {}
with open(path) as f:
for i, word in enumerate(f):
vocab[word.strip()] = i
return vocab
root = download.get_dataset_directory('pfnet/chainer/ptb')
path = os.path.join(root, 'vocab.txt')
return download.cache_or_load_file(path, creator, loader)
def _load_words(url):
path = download.cached_download(url)
words = []
with open(path) as words_file:
for line in words_file:
if line:
words += line.strip().split()
words.append('<eos>')
return words
| [
"659338505@qq.com"
] | 659338505@qq.com |
c2f345d1905de3330b58862c1ceb5251138e4b91 | 58a726bf6bf6909ab5c996bfc07fc51e925c1910 | /_serviceprovider_panel/extra/migrations/0003_auto_20191217_1240.py | 915f85aa5f22c73f25dc0b4c4e6559552bff9c73 | [] | no_license | sharingsimplethoughts/vehiclemanagementsystem | 66a31c9a746c9d916e627ea81c1107563968e833 | f4afcdd722a93d4a98b5e347c50136dafe9ffebf | refs/heads/master | 2022-11-09T23:15:47.980160 | 2020-07-01T07:42:42 | 2020-07-01T07:42:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,836 | py | # Generated by Django 2.2.1 on 2019-12-17 08:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extra', '0002_auto_20191216_1813'),
]
operations = [
migrations.AddField(
model_name='aboutus',
name='content_ar',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='aboutus',
name='title_ar',
field=models.CharField(default='', max_length=200),
),
migrations.AddField(
model_name='faq',
name='content_ar',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='faq',
name='title_ar',
field=models.CharField(default='', max_length=500),
),
migrations.AddField(
model_name='help',
name='content_ar',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='help',
name='title_ar',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='legal',
name='content_ar',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='legal',
name='title_ar',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='newoptions',
name='content_ar',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='newoptions',
name='title_ar',
field=models.CharField(blank=True, default='', max_length=50),
),
migrations.AddField(
model_name='notification',
name='description_ar',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='notification',
name='title_ar',
field=models.CharField(default='', max_length=300),
),
migrations.AddField(
model_name='privacypolicy',
name='content_ar',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='privacypolicy',
name='title_ar',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='termsandcondition',
name='content_ar',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='termsandcondition',
name='title_ar',
field=models.CharField(default='', max_length=100),
),
]
| [
"sukamal.sinha@fluper.in"
] | sukamal.sinha@fluper.in |
be262b811bc370c603cbd4aae2e7d5e6b42156ae | de27692fcf2ff86b024ebee7784a96a8b1329e4d | /ex029.py | 7438a4b695ff71b8897a5f5204acf32d70b42373 | [] | no_license | YaraDeOliveira/CursoemVideo | d2675c5e22840330f1cd581368e6855103b6c7ea | f065c768231deaa996498d21af6b97fd1201abc4 | refs/heads/main | 2023-04-23T07:56:11.700722 | 2021-05-12T14:16:36 | 2021-05-12T14:16:36 | 348,168,481 | 0 | 0 | null | 2021-03-26T18:35:31 | 2021-03-16T00:57:48 | Python | UTF-8 | Python | false | false | 213 | py | vel = float(input('Qual eh a velocidade do carro? '))
if vel > 80:
multa = float(vel-80)*7.00
print('Voce foi multado em R${:.2f}'.format(multa))
else:
print('Voce nao foi multado. Tenha um bom dia!')
| [
"79177224+YaraDeOliveira@users.noreply.github.com"
] | 79177224+YaraDeOliveira@users.noreply.github.com |
d1196457ce70deef1bd442b48b8e84f2110a66c0 | bb433d44f557d5d22e5c961178cfc281ebc4f713 | /microservices/app/src/controllers/all_notes.py | 6435a05e0d9b5c0b879d8faf360093a83a4ea607 | [] | no_license | djmgit/scribble_api | 1c265aa3f005225546a8f9b5288c5d2dcadce1d5 | ddc0e517c033a931ffecedcfe13683a4e4e3f5f4 | refs/heads/master | 2020-03-18T09:05:25.180055 | 2018-06-02T21:14:58 | 2018-06-02T21:14:58 | 134,544,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | import datetime
from flask import Blueprint, jsonify, request
from flask import current_app as app
import json
import requests
from helpers.check_login import is_loggedin
from helpers.query_all_notes import query_all
router = Blueprint('all_notes', __name__)
@router.route('/all_notes', methods=['POST'])
def add_note():
response = ""
print (request.headers)
auth_token = request.headers.get('X-Hasura-Session-Id')
if not auth_token or auth_token == "":
response = {"status": "auth token not specified"}
return jsonify(response)
hasura_id = is_loggedin(auth_token)
if not hasura_id:
response = {"status": "User is not logged in. Please login first"}
return jsonify(response)
response = query_all(hasura_id)
return jsonify(response)
| [
"djmdeveloper060796@gmail.com"
] | djmdeveloper060796@gmail.com |
1d231c8d75fd7c29c4331c835ef19ae7640eba1d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/94/usersdata/230/58378/submittedfiles/mediaLista.py | dc27c8191318cceb5c2791a5ffaf5a4277f17246 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # -*- coding: utf-8 -*-
def media(lista):
soma=0
for i in range (0,len(a),1):
soma=soma+lista[i]
media=soma/len(lista)
return(media)
n=int(input('Digite quantidade de valores: '))
a=[]
for i in range (1,n+1,1):
numero=int(input('Digite valor: '))
a.append(numero)
print('%.2f' %a[0])
print('%.2f' %a[i])
print('%.2f' %media(a))
print(a)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c942f3698c181e80fd7178e37b323870ce295241 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_207/428.py | ec2ab1e47c4bcd5b677e3896edb651faabd6b123 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | R, O, Y, G, B, V = range(6)
LETTERS = list("ROYGBV")
def compatible(c1, c2):
if c1 == R:
return c2 not in (R, O, V)
elif c1 == O:
return c2 not in (R, O, Y)
elif c1 == Y:
return c2 not in (O, Y, G)
elif c1 == G:
return c2 not in (G, Y, B)
elif c1 == B:
return c2 not in (B, G, V)
elif c1 == V:
return c2 not in (V, B, R)
def best_neigh(ccounts, c):
if c == V:
neighs = [Y]
elif c == G:
neighs = [R]
elif c == O:
neighs = [B]
elif c == R:
neighs = [Y, B, G]
elif c == Y:
neighs = [R, B, V]
else:
neighs = [R, Y, O]
return sorted([(ccounts[n], n) for n in neighs])[-1]
def sol(n, counts):
condition = (
counts[V] > 2 * counts[Y] or
counts[G] > 2 * counts[R] or
counts[O] > 2 * counts[B]
)
if condition:
return "IMPOSSIBLE"
for c in (R, O, Y, G, B, V):
if counts[c] == 0:
continue
circular = [None for _ in xrange(n)]
ccounts = counts[::]
circular[0] = c
ccounts[c] -= 1
for i in xrange(1, n):
ncount, ncolor = best_neigh(ccounts, circular[i - 1])
if ncount == 0:
break
circular[i] = ncolor
ccounts[ncolor] -= 1
if circular[-1] is not None and compatible(circular[-1], circular[0]):
return "".join(map(lambda x: LETTERS[x], circular))
return "IMPOSSIBLE"
def show(i, val):
print "Case #%s: %s" % (i, val)
if __name__ == "__main__":
T = int(raw_input().strip())
for i in xrange(1, T + 1):
tmp = map(int, raw_input().strip().split())
n = tmp[0]
counts = tmp[1:]
show(i, sol(n, counts))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
3a023e0deb8f61097b453be3ec191419d688ea19 | a08fc91ecafa7f2b6c8aed7e1ceb33822d4caa49 | /python/_sqlalchemy/_transactions.py | 2126bf46f4cc4bfb6b50aa8338f7d7ed5139bdf1 | [] | no_license | bryand1/snippets | 1fcdd4b67809aa27b58e1239d5cca22cfb962f3d | f779bf147c420996613b0778e243154cd750c3dd | refs/heads/master | 2023-01-23T18:47:07.389246 | 2020-12-31T20:10:13 | 2020-12-31T20:10:13 | 138,767,383 | 0 | 0 | null | 2023-01-19T13:02:49 | 2018-06-26T16:56:15 | Python | UTF-8 | Python | false | false | 2,107 | py | """
Transactions
A transaction is a way to execute a set of SQL statements such that either all
of the statements are executed successfully or nothing at all. If any of the
statement involved in the transaction fails then the database is returned to
the state it was in before the transaction was initiated.
We currently have two orders in the database. To fulfill an order we need to perform
the following two actions:
1. Subtract the quantity of ordered items from the items table
2. Update the date_shipped column to contain the datetime value.
Both of these actions must be performed as a unit to ensure data integrity.
The Connection object provides a begin() method, which starts the transaction and
returns an object of type Transaction. The Transaction object in turn provides
rollback() and commit() method, to rollback and commit the transaction, respectively.
In the following listing we define dispatch_order() method which accepts order_id
as an argument, and performs the above mentioned actions using transaction.
"""
from sqlalchemy.exc import IntegrityError
def dispatch_order(order_id):
# check whether order_id is valid or not
r = conn.execute(select([func.count('*')]).where(orders.c.id == order_id))
if not r.scalar():
raise ValueError('Invalid order id: {}'.format(order_id))
# fetch items in the order
s = select([order_lines.c.item_id, order_lines.c.quantity]).where(
order_lines.c.order_id == order_id
)
rs = conn.execute(s)
ordered_items_list = rs.fetchall()
# start transaction
t = conn.begin()
try:
for i in ordered_items_list:
u = update(items).where(
items.c.id == i.item_id
).values(quantity = items.c.quantity - i.quantity)
rs = conn.execute(u)
u = update(orders).where(orders.c.id == order_id).values(date_shipped=datetime.now())
rs = conn.execute(u)
t.commit()
print('Transaction completed')
except IntegrityError as e:
print(e)
t.rollback()
print('Transaction failed')
| [
"bryand1@gmail.com"
] | bryand1@gmail.com |
b74da4de81ce2a7e16de49fe9dd6562b61087325 | 2710f0994868658059bb112ee44fc680d34c01cd | /Aula4/chamar_cachorro.py | d90098fb36d4a07a06b2f7a78608563bc111ce2b | [] | no_license | danubio1982/520 | 966d17504957b37dd068d4a79705756bb426a0cf | ed8884f8319a226bee014566c258c6b111a7d552 | refs/heads/master | 2020-09-17T07:09:07.935106 | 2019-11-29T14:58:47 | 2019-11-29T14:58:47 | 224,030,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | #!/usr/bin/python3
# from Cachorro import Cachorro
import Cachorro
rex = Cachorro.Cachorro('Rex','15','preto','Doberman','Grande')
| [
"iaferro@gmail.com"
] | iaferro@gmail.com |
174627cc536f1abd68cb657d532c9d34be5e10cd | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/bgp_service_community_py3.py | 2b825c54440a7c3bf2010ca87c19c693999d9665 | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 2,075 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class BgpServiceCommunity(Resource):
"""Service Community Properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param service_name: The name of the bgp community. e.g. Skype.
:type service_name: str
:param bgp_communities: Get a list of bgp communities.
:type bgp_communities:
list[~azure.mgmt.network.v2017_03_01.models.BGPCommunity]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'service_name': {'key': 'properties.serviceName', 'type': 'str'},
'bgp_communities': {'key': 'properties.bgpCommunities', 'type': '[BGPCommunity]'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, service_name: str=None, bgp_communities=None, **kwargs) -> None:
super(BgpServiceCommunity, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.service_name = service_name
self.bgp_communities = bgp_communities
| [
"noreply@github.com"
] | lmazuel.noreply@github.com |
610746c18083b608c8e079d0cc65af413efac8fa | 910be469257538bcbbd15e894679856a1d311252 | /server/service/signature/migrations/0041_auto_20170106_0549.py | 9cadd3043937ec451442a39f5a2a74631ea8e43f | [] | no_license | bopo/bankeys2 | ece7e7faa93aab48bf5a336721bfa69b33a870d8 | 5a81f5f4cd6442aade444444ba768b9ffa9dcbd4 | refs/heads/master | 2023-08-19T04:16:12.063961 | 2023-08-04T09:09:00 | 2023-08-04T09:09:00 | 119,646,417 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-06 05:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signature', '0040_auto_20170106_0530'),
]
operations = [
migrations.AddField(
model_name='identity',
name='enddate',
field=models.DateField(blank=True, null=True, verbose_name='\u8bc1\u4e66\u8fc7\u671f\u65f6\u95f4'),
),
migrations.AddField(
model_name='identity',
name='serial',
field=models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='\u8bc1\u4e66\u7f16\u53f7'),
),
]
| [
"ibopo@126.com"
] | ibopo@126.com |
dc6383bf6bde6c72f23d8e76ea78b70a7540572f | cf57924a595d3c4f09c74835c52c29309bc95afb | /bigtop-packages/src/charm/pig/layer-pig/tests/01-deploy.py | b7e8620faf210f99085b64bcd32495b75a28d5d2 | [
"Apache-2.0",
"FreeBSD-DOC",
"MIT",
"DOC"
] | permissive | arenadata/bigtop | 94eaf06602062aef6d9a27c2d826d789a6a51a64 | 05a8ee20d4a0d0db846ba1969bd6ae469042a337 | refs/heads/branch-adh-2.0 | 2021-07-11T21:40:28.056863 | 2020-08-18T14:31:35 | 2020-08-18T14:31:35 | 88,484,637 | 3 | 15 | Apache-2.0 | 2020-09-02T10:40:31 | 2017-04-17T07:46:10 | Java | UTF-8 | Python | false | false | 1,689 | py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import re
import unittest
class TestDeploy(unittest.TestCase):
"""
Deployment and smoke test for the Apache Bigtop Pig service.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('pig')
cls.d.setup(timeout=1800)
cls.d.sentry.wait_for_messages({'pig': re.compile('ready')}, timeout=1800)
cls.pig = cls.d.sentry['pig'][0]
def test_pig(self):
"""
Validate Pig by running the smoke-test action.
"""
uuid = self.pig.action_do('smoke-test')
result = self.d.action_fetch(uuid)
# pig smoke-test sets outcome=success on success
if (result['outcome'] != "success"):
error = "Pig smoke-test failed"
amulet.raise_status(amulet.FAIL, msg=error)
if __name__ == '__main__':
unittest.main()
| [
"kevin.monroe@canonical.com"
] | kevin.monroe@canonical.com |
7e8416acc55663eef63a269fe0e4b6768ff66367 | 0b4fdc7390068ec31ce1dc5816327bffc53c5f96 | /chatapp/chat/consumers.py | 5198f1bff9708d570ca2b33f32d04c8bf01fff11 | [] | no_license | sanix-sandel/ChatApp | 4b487512874313152f7888ea719aeb2a01f4b31f | 8630de6502a42156a71f22e9303c38d0c5109ea3 | refs/heads/master | 2022-11-11T01:46:43.820728 | 2020-06-28T06:42:15 | 2020-06-28T06:42:15 | 273,632,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,435 | py | from django.contrib.auth import get_user_model
import json
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
from .models import Message
from django.contrib.auth.models import User
class ChatConsumer(WebsocketConsumer):
def fetch_messages(self, data):
messages = Message.last_10_messages()#Fetch the data from
#the database
content = {
'command': 'messages',
'messages': self.messages_to_json(messages)
}
self.send_message(content)#Send the jsonified data to websocket
def new_message(self, data):
author = data['from']
author_user = User.objects.filter(username=author)[0]
message = Message.objects.create(
author=author_user,
content=data['message'])
content = {
'command': 'new_message',
'message': self.message_to_json(message)
}
return self.send_chat_message(content)
def messages_to_json(self, messages):
result = []
for message in messages:
result.append(self.message_to_json(message))
return result
def message_to_json(self, message):
return {
'author': message.author.username,
'content': message.content,
'timestamp': str(message.timestamp)
}
commands = {
'fetch_messages': fetch_messages,
'new_message': new_message
}
def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
data = json.loads(text_data)
self.commands[data['command']](self, data)
def send_chat_message(self, message): #Send retrieved messages
#stored in
# the database to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
def send_message(self, message):
self.send(text_data=json.dumps(message))
## Receive message from room group
def chat_message(self, event):
message = event['message']
self.send(text_data=json.dumps(message))
#This new code is for ChatConsumer is very similar to the original code,
#with the following differences:
#ChatConsumer now inherits from AsyncWebsocketConsumer rather than WebsocketConsumer.
#All methods are async def rather than just def.
#await is used to call asynchronous functions that perform I/O.
#async_to_sync is no longer needed when calling methods on the channel layer.
#When a user posts a message, a JavaScript function will
#transmit the message over WebSocket to a ChatConsumer.
#The ChatConsumer will receive that message and forward it to
#the group corresponding to the room name.
#Every ChatConsumer in the same group (and thus in the same room)
#will then receive the message from the group and forward it
#over WebSocket back to JavaScript,
# where it will be appended to the chat log.
#****self.scope['url_route']['kwargs']['room_name']
#Obtains the 'room_name' parameter from the URL route in chat/routing.py that opened
#the WebSocket connection to the consumer.
#Every consumer has a scope that contains information about its connection,
#including in particular any positional or keyword arguments
#from the URL route and the currently authenticated user if any.
#****self.room_group_name = 'chat_%s' % self.room_name
#Constructs a Channels group name directly from the user-specified room name,
#without any quoting or escaping.
#Group names may only contain letters, digits, hyphens, and periods.
#Therefore this example code will fail on room names that have other characters.
#****async_to_sync(self.channel_layer.group_add)(...)
#Joins a group. The async_to_sync(…) wrapper is required
#because ChatConsumer is a synchronous WebsocketConsumer
#but it is calling an asynchronous channel layer method.
#(All channel layer methods are asynchronous.)
#Group names are restricted to ASCII alphanumerics, hyphens, and periods only.
#Since this code constructs a group name directly
#from the room name, it will fail if the room name
#contains any characters that aren’t valid in a group name.
#****self.accept()
#Accepts the WebSocket connection.
#If you do not call accept() within the connect() method then
#the connection will be rejected and closed.
#You might want to reject a connection for example
#because the requesting user is not authorized to perform the requested action.
#It is recommended that accept() be called as
#the last action in connect() if you choose to accept the connection.
#async_to_sync(self.channel_layer.group_discard)(...)
#Leaves a group.
#async_to_sync(self.channel_layer.group_send)
#Sends an event to a group.
#An event has a special 'type' key corresponding to the name of
#the method that should be invoked on consumers that receive the event.
| [
"sanicksikani@gmail.com"
] | sanicksikani@gmail.com |
54b13d666fe1f8329b551ebc406b54ad991282ac | 3d2b12fea681115711b875118eca7613532ff107 | /alignment/fastqc.py | 551892f8820b4902160c1afa98b31eb128856e08 | [] | no_license | siyunw/RNAseq | 0a9c1acd4559a74d771a7bbd663acb86b47da46e | 03e4c6d2f6cb667d1fd9c79c7f7ff2263c5860f4 | refs/heads/master | 2020-12-11T08:14:16.747303 | 2015-07-01T21:26:51 | 2015-07-01T21:26:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,917 | py | #!/usr/bin/python
import sys
sys.path.append('/home/ubuntu/TOOLS/Scripts/utility')
import os
from date_time import date_time
from subprocess import Popen
from subprocess import call
from log import log
def fastqc(fastqc_tool,sample,end1,end2,t):
# casual logging - look for a LOGS directory, otherwise assume current dir
log_dir='./'
if os.path.isdir('LOGS'):
log_dir='LOGS/'
loc=log_dir + sample + '.fastqc.log'
fastqc_cmd=fastqc_tool + ' -t ' + t + ' -o QC/ ' + end1 + ' ' + end2
log(loc,date_time() + fastqc_cmd + "\n")
f=Popen(fastqc_cmd,shell=True,stdin=None,stdout=None,stderr=None,close_fds=True)
# check after a minute whether the process is still good - shouldn't take too long to ascertain whether phred score didn't fit
call('sleep 20s', shell=True)
if str(f.poll()) == '1':
log(loc,date_time() + 'fastqc returned an error. Check your inputs and try again!\n')
exit(1)
return 0
if __name__ == "__main__":
import argparse
parser=argparse.ArgumentParser(description='fastqc module. Provides quality stats for fastq file and is independent of alignment.')
parser.add_argument('-f','--fastqc',action='store',dest='fastqc_tool', help='Location of fastqc tool.')
parser.add_argument('-sa','--sample',action='store',dest='sample',help='Sample/location name prefix')
parser.add_argument('-f1','--file1',action='store',dest='end1',help='First of paired-end fastq file')
parser.add_argument('-f2','--file2',action='store',dest='end2',help='Second of paired-end fastq file')
parser.add_argument('-t','--threads',action='store',dest='t',help='Number of threads')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
inputs=parser.parse_args()
(fastqc_tool,sample,end1,end2,t)=(inputs.fastqc_tool,inputs.sample,inputs.end1,inputs.end2,inputs.t)
fastqc(fastqc_tool,sample,end1,end2,t)
| [
"miguel.a.brown@gmail.com"
] | miguel.a.brown@gmail.com |
2540a4d651baff051071787da67a54768a6abc65 | ffb2933845f6c1080d465aa45ed2dce0da4c9068 | /examples/projects/golem/tests/project/add_directory_to_pages_section.py | b00282ab6dc4f550074145bb2a1d8b4083d01500 | [
"MIT"
] | permissive | vault-the/golem | d84afb40d1222fddc69aa0c2b907910740a66fbc | 3bd132685b148c0d9c12deeebfc00569d07063e4 | refs/heads/master | 2022-11-11T02:34:47.043655 | 2017-09-26T17:08:33 | 2017-09-26T17:08:33 | 105,012,053 | 0 | 0 | MIT | 2020-06-29T14:47:46 | 2017-09-27T11:53:05 | JavaScript | UTF-8 | Python | false | false | 510 | py |
description = 'Verify that the user can add a directory in the pages section by appending \'\\\' at the end'
pages = ['login',
'index',
'project']
def setup(data):
go_to('http://localhost:8000/')
login.do_login('admin', 'admin')
def test(data):
index.access_project('test')
store('directory_name', random('ccccc/'))
project.add_new_page(data.directory_name)
project.verify_page_exists(data.directory_name)
def teardown(data):
close()
| [
"feo.luciano@gmail.com"
] | feo.luciano@gmail.com |
60260c2e6350adf3295c6d99c693e6cec5c5911d | e8c6ef46c2e62f7952b07e6e3c6e3bec4629a54f | /generales/views.py | f530cfc8cb60560e67dafb71a605f40f97425059 | [] | no_license | DevManuelBarros/octasys | 37af5acc53860743e2d71ac1a2a5583d10fd71fb | 791cc73e9ab2a8b2aee03ff0b5e6af805fe2b709 | refs/heads/master | 2022-11-06T14:36:39.590221 | 2020-06-22T10:29:28 | 2020-06-22T10:29:28 | 270,491,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,008 | py | from django.shortcuts import render
from django.urls import reverse_lazy
from django.http import HttpResponse
from django.template import RequestContext
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import (CreateView,
ListView,
DetailView,
UpdateView)
from django.contrib.auth.decorators import login_required
from .precios import CalcularPrecios
from .models import Materiales, Producto, Categoria
from .forms import (MaterialesCreateForm,
CategoriaCreateForm,
ProductoCreateForm)
@login_required
def index(request):
fn, ml, mp = CalcularPrecios()
titulos = ['Producto','1-99','100-249','250-499','500-999','1000+']
return render(request, 'precios.html', {'fn':fn,
'ml': ml,
'mp': mp,
'titulos':titulos})
# ======== MATERIALES ============
class MaterialesCreate(LoginRequiredMixin, CreateView):
form_class = MaterialesCreateForm
template_name = 'generales/create_form_small.html'
success_url = reverse_lazy('generales:index')
def get_context_data(self, **kwargs):
instance = super(MaterialesCreate, self).get_context_data(**kwargs)
instance['nombre_cabecera'] = 'Materiales'
instance['nombre_fomulario'] = 'Formulario para Crear Materiales'
return instance
class MaterialesList(LoginRequiredMixin, ListView):
model = Materiales
template_name = 'generales/materiales_list.html'
class MaterialDetail(LoginRequiredMixin, DetailView):
model = Materiales
template_name = 'generales/materiales_detail.html'
class MaterialesUpdate(LoginRequiredMixin, UpdateView):
model = Materiales
form_class = MaterialesCreateForm
template_name = 'generales/create_form_small.html'
success_url = reverse_lazy('generales:MaterialesList')
# =========== CATEGORIA =============
class CategoriaCreate(LoginRequiredMixin, CreateView):
form_class = CategoriaCreateForm
template_name = 'generales/create_form_small.html'
success_url = reverse_lazy('generales:index')
def get_context_data(self, **kwargs):
instance = super(CategoriaCreate, self).get_context_data(**kwargs)
instance['nombre_cabecera'] = 'Categoria'
instance['nombre_fomulario'] = 'Formulario para Crear Categorias'
return instance
class CategoriaList(LoginRequiredMixin, ListView):
model = Categoria
template_name = 'generales/categoria_list.html'
class CategoriaDetail(LoginRequiredMixin, DetailView):
model = Categoria
template_name = 'generales/categoria_detail.html'
class CategoriaUpdate(LoginRequiredMixin, UpdateView):
model = Categoria
form_class = CategoriaCreateForm
template_name = 'generales/create_form_small.html'
success_url = reverse_lazy('generales:CategoriasList')
# ============= PRODUCTO =============
class ProductoCreate(LoginRequiredMixin,CreateView):
form_class = ProductoCreateForm
template_name = 'generales/create_form_small.html'
success_url = reverse_lazy('generales:ProductoList')
def get_context_data(self, **kwargs):
instance = super(ProductoCreate, self).get_context_data(**kwargs)
instance['nombre_cabecera'] = 'Producto'
instance['nombre_fomulario'] = 'Formulario para Crear Producto'
return instance
class ProductoList(LoginRequiredMixin, ListView):
model = Producto
template_name = 'generales/producto_list.html'
class ProductoUpdate(LoginRequiredMixin, UpdateView):
model = Producto
form_class = ProductoCreateForm
template_name = 'generales/create_form_small.html'
success_url = reverse_lazy('generales:ProductoList')
class ProductoDetail(LoginRequiredMixin, DetailView):
model = Producto
template_name = 'generales/producto_detail' | [
"dev.manuel.barros@gmail.com"
] | dev.manuel.barros@gmail.com |
03fb12558bad467036f3bbeb3ec4aa5eb610d426 | 72880d033c9948098291efebf934255635f8c6ea | /pythonexamples/24.py | c563d71b414d6e7f77e22a58b1cca7d1ff5adb1a | [] | no_license | manutdmohit/mypythonexamples | 729347aec300bda01f629224337c84d5838a71f2 | b189c201d07b1a345478699bbb3852c02eb96ce5 | refs/heads/master | 2023-04-18T01:55:22.026867 | 2021-05-13T05:59:09 | 2021-05-13T05:59:09 | 366,946,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | Python 3.8.5 (tags/v3.8.5:580fbb0, Jul 20 2020, 15:57:54) [MSC v.1924 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> s='ABABABA'
>>> s=s.replace('A','B')
>>> s='ABABABA'
>>> print(id(s))
2150320808304
>>> s=s.replace('A','B')
>>> print(id(s))
2150310923824
>>> print(s)
BBBBBBB
>>> l=[10,20,30]
>>> print(type(l))
<class 'list'>
>>> l[0]
10
>>> print(type(l[0]))
<class 'int'>
>>> l=[10,20,30]
>>> d='-'.join(l)
Traceback (most recent call last):
File "<pyshell#12>", line 1, in <module>
d='-'.join(l)
TypeError: sequence item 0: expected str instance, int found
>>> l=['10','20','30']
>>> d='-'.join(l)
>>> print(d)
10-20-30
>>> print('abc123'.isalpha())
False
>>> print('abc123'.isdigit())
False
>>> print('abc123'.isalnum())
True
>>>
>>> s=mohitsaud
Traceback (most recent call last):
File "<pyshell#20>", line 1, in <module>
s=mohitsaud
NameError: name 'mohitsaud' is not defined
>>> s='mohitsaud'
>>> s.append('@gmail.com')
Traceback (most recent call last):
File "<pyshell#22>", line 1, in <module>
s.append('@gmail.com')
AttributeError: 'str' object has no attribute 'append'
>>> s.append(@gmail.com)
SyntaxError: invalid syntax
>>> s.append(1)
Traceback (most recent call last):
File "<pyshell#24>", line 1, in <module>
s.append(1)
AttributeError: 'str' object has no attribute 'append'
>>> s.append('1')
Traceback (most recent call last):
File "<pyshell#25>", line 1, in <module>
s.append('1')
AttributeError: 'str' object has no attribute 'append'
>>> s=[12,13,15,16]
>>> s.append('1')
>>> print(s)
[12, 13, 15, 16, '1']
>>> s.append(1)
>>> print(s)
[12, 13, 15, 16, '1', 1]
>>> s=[12,13,15,16]
>>> s.append(1)
>>> print(s)
[12, 13, 15, 16, 1]
>>> email='saudmohit'+'@'+'gmail.com'
>>> print(email)
saudmohit@gmail.com
>>> | [
"saudmohit@gmail.com"
] | saudmohit@gmail.com |
6757670486078c66a4396fd4d9821c31a12a5d5b | bdfe75b2ff098b94af1d3d4a653c5184f3819bf0 | /0x05-python-exceptions/2-safe_print_list_integers.py | 19d54ffbe59a16290d72270d45200568132c9325 | [] | no_license | ahmedzitouni586/holbertonschool-higher_level_programming | 0b20c02473d327372d4f62aa88fb76e210db3f44 | 7d127082a1981ea9b84b01e4237c2ea266f2df58 | refs/heads/main | 2023-08-16T13:09:56.477317 | 2021-09-29T12:47:45 | 2021-09-29T12:47:45 | 361,734,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | #!/usr/bin/python3
def safe_print_list_integers(my_list=[], x=0):
y = 0
for i in range(0, x):
try:
print("{:d}".format(my_list[i]), end="")
y += 1
except (TypeError, ValueError):
pass
print("")
return y
| [
"zitouniahmed595@gmail.com"
] | zitouniahmed595@gmail.com |
098689df849b3420052b74330371f6d4a9f19f18 | 92754bb891a128687f3fbc48a312aded752b6bcd | /Algorithms/Python3.x/929-Unique_Email_Addresses.py | 1435e7373d2c3c24eb6719646e9908472c2ef993 | [] | no_license | daidai21/Leetcode | ddecaf0ffbc66604a464c3c9751f35f3abe5e7e5 | eb726b3411ed11e2bd00fee02dc41b77f35f2632 | refs/heads/master | 2023-03-24T21:13:31.128127 | 2023-03-08T16:11:43 | 2023-03-08T16:11:43 | 167,968,602 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | # Runtime: 92 ms, faster than 8.90% of Python3 online submissions for Unique Email Addresses.
# Memory Usage: 13.9 MB, less than 6.25% of Python3 online submissions for Unique Email Addresses.
class Solution:
def numUniqueEmails(self, emails: List[str]) -> int:
res = set()
for email in emails:
temp = email.split('@')
local_name = list(temp[0])
for _ in range(local_name.count('.')): # del .
local_name.remove('.')
if '+' in local_name: # del +
idx = local_name.index('+')
local_name = local_name[:idx]
res.add(str(local_name) + "@" + temp[1])
return len(res)
# Runtime: 60 ms, faster than 66.02% of Python3 online submissions for Unique Email Addresses.
# Memory Usage: 13.9 MB, less than 6.25% of Python3 online submissions for Unique Email Addresses.
class Solution:
def numUniqueEmails(self, emails: List[str]) -> int:
seen = set()
for email in emails:
# temp = email.split('@')
local, domain = email.split('@')
if '+' in local:
local = local[:local.index('+')]
seen.add(local.replace('.', '') + '@' + domain)
return len(seen)
| [
"daidai4269@aliyun.com"
] | daidai4269@aliyun.com |
108c35e3e97d04f06695139964647fc10c1a9c9d | 748fc54d0351880bff2b817ca060a70ff9752561 | /logic-2/make_chocolate.py | 13824445cc635f59ec88eabb9eb4492c23084686 | [] | no_license | NicoKNL/codingbat-python-solutions | 7e7cd52d1557f200f01d3843a882e55a0818c6a6 | 0fafa65effeefb78fdbe95a8a5e8a461e5778e0b | refs/heads/master | 2021-09-18T19:39:20.033887 | 2018-07-18T18:51:48 | 2018-07-18T18:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | def make_chocolate(small, big, goal):
n_big = int(math.floor(goal / 5))
goal -= min([big, n_big]) * 5
if small >= goal:
return goal
else:
return -1
| [
"klaassen.nico@gmail.com"
] | klaassen.nico@gmail.com |
eef3747cf754902076f13a7656ee386a2e599e9a | bbbdfb1976a0710c8adebb53ee459c53d1cc7510 | /Tour/migrations/0010_auto_20180524_1140.py | 647a5e814497b20657c268ce0ec303d8e2ce4c9e | [] | no_license | XBoBaX/tour_z | 8208f3c8211c1cf7c6cc2de5ca349ad0eacedfc2 | e0f835058c299c2badaaedba0e93f2f545a85ebe | refs/heads/master | 2020-03-18T08:54:22.991151 | 2018-05-24T19:18:35 | 2018-05-24T19:18:35 | 134,533,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # Generated by Django 2.0.4 on 2018-05-24 08:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Tour', '0009_auto_20180520_1340'),
]
operations = [
migrations.AlterModelOptions(
name='infotour',
options={'verbose_name': 'Билеты'},
),
]
| [
"memesHipe@gmail.com"
] | memesHipe@gmail.com |
cd993be8949602eb7e7a1d6b719d16af43df8747 | 227539d0906cdfbb7cd19f16599c35d5bd09abfd | /CodingBat/List-1/has23.py | 3b61447eb49f2248bb0723ff0dc457cd7982d747 | [] | no_license | solomonli/PycharmProjects | cceb92a11ec1f9e7fef25bca552d8264c75228a0 | 31673627487db1370424f5b0aeee3e20bb23b47a | refs/heads/master | 2021-06-24T11:59:36.365496 | 2019-07-08T09:53:18 | 2019-07-08T09:53:18 | 148,558,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | def has23(nums):
"""
Given an int array length 2, return True if it contains a 2 or a 3.
has23([2, 5]) → True
has23([4, 3]) → True
has23([4, 5]) → False
:param nums: list of ints length 2
:return: boolean
"""
return 2 in nums or 3 in nums
print(has23([2, 5]))
print(has23([4, 3]))
print(has23([4, 5]))
| [
"richdad.solomon@gmail.com"
] | richdad.solomon@gmail.com |
6773a55996ed17462c747d9c0806f1a799fedfb9 | 947e71b34d21f3c9f5c0a197d91a880f346afa6c | /ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_server.py | 6732573f6ab9c7cc59fbc9dab4541f252063f7cc | [
"Apache-2.0",
"MIT",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"OFL-1.1",
"MS-PL",
"AFL-2.1",
"GPL-2.0-only",
"Python-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | liuwenru/Apache-Ambari-ZH | 4bc432d4ea7087bb353a6dd97ffda0a85cb0fef0 | 7879810067f1981209b658ceb675ac76e951b07b | refs/heads/master | 2023-01-14T14:43:06.639598 | 2020-07-28T12:06:25 | 2020-07-28T12:06:25 | 223,551,095 | 38 | 44 | Apache-2.0 | 2023-01-02T21:55:10 | 2019-11-23T07:43:49 | Java | UTF-8 | Python | false | false | 5,069 | py | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from postgresql_service import postgresql_service
class PostgreSQLServer(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
# init the database, the ':' makes the command always return 0 in case the database has
# already been initialized when the postgresql server colocates with ambari server
Execute(format("service {postgresql_daemon_name} initdb || :"))
# update the configuration files
self.update_pghda_conf(env)
self.update_postgresql_conf(env)
# Reload the settings and start the postgresql server for the changes to take effect
# Note: Don't restart the postgresql server because when Ambari server and the hive metastore on the same machine,
# they will share the same postgresql server instance. Restarting the postgresql database may cause the ambari server database connection lost
postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'reload')
# ensure the postgresql server is started because the add hive metastore user requires the server is running.
self.start(env)
# create the database and hive_metastore_user
File(params.postgresql_adduser_path,
mode=0755,
content=StaticFile(format("{postgresql_adduser_file}"))
)
cmd = format("bash -x {postgresql_adduser_path} {postgresql_daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {db_name}")
Execute(cmd,
tries=3,
try_sleep=5,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
)
def start(self, env):
import params
env.set_params(params)
postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'start')
def stop(self, env):
import params
env.set_params(params)
postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'stop')
def status(self, env):
import status_params
postgresql_service(postgresql_daemon_name=status_params.postgresql_daemon_name, action = 'status')
def update_postgresql_conf(self, env):
import params
env.set_params(params)
# change the listen_address to *
Execute(format("sed -i '/^[[:space:]]*listen_addresses[[:space:]]*=.*/d' {postgresql_conf_path}"))
Execute(format("echo \"listen_addresses = '*'\" | tee -a {postgresql_conf_path}"))
# change the standard_conforming_string to off
Execute(format("sed -i '/^[[:space:]]*standard_conforming_strings[[:space:]]*=.*/d' {postgresql_conf_path}"))
Execute(format("echo \"standard_conforming_strings = off\" | tee -a {postgresql_conf_path}"))
def update_pghda_conf(self, env):
import params
env.set_params(params)
# trust hive_metastore_user and postgres locally
Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
Execute(format("echo \"local all postgres trust\" | tee -a {postgresql_pghba_conf_path}"))
Execute(format("echo \"local all \\\"{hive_metastore_user_name}\\\" trust\" | tee -a {postgresql_pghba_conf_path}"))
# trust hive_metastore_user and postgres via local interface
Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
Execute(format("echo \"host all postgres 0.0.0.0/0 trust\" | tee -a {postgresql_pghba_conf_path}"))
Execute(format("echo \"host all \\\"{hive_metastore_user_name}\\\" 0.0.0.0/0 trust\" | tee -a {postgresql_pghba_conf_path}"))
if __name__ == "__main__":
PostgreSQLServer().execute()
| [
"ijarvis@sina.com"
] | ijarvis@sina.com |
3516e2291650f72670d10733f1f01cdbe80494f8 | 02d0714edfef5a2d3630d7659c553c157e291e52 | /tempest/api_schema/response/compute/v2_1/interfaces.py | 130775b46a72c9d9a839d2e6962911ca2f65e236 | [
"Apache-2.0"
] | permissive | atulbangar09/tempest | f07dced592481a7ec71a9c7469b7d50d30cdc171 | 9f5644ce2784cd882e86ac89236f8f8f828d7c43 | refs/heads/master | 2023-02-06T10:26:40.112917 | 2020-01-21T16:17:16 | 2020-01-21T16:17:16 | 234,520,093 | 0 | 0 | Apache-2.0 | 2020-01-17T09:52:53 | 2020-01-17T09:52:52 | null | UTF-8 | Python | false | false | 2,272 | py | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api_schema.response.compute.v2_1 import parameter_types
interface_common_info = {
'type': 'object',
'properties': {
'port_state': {'type': 'string'},
'fixed_ips': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'subnet_id': {
'type': 'string',
'format': 'uuid'
},
'ip_address': parameter_types.ip_address
},
'additionalProperties': False,
'required': ['subnet_id', 'ip_address']
}
},
'port_id': {'type': 'string', 'format': 'uuid'},
'net_id': {'type': 'string', 'format': 'uuid'},
'mac_addr': parameter_types.mac_address
},
'additionalProperties': False,
'required': ['port_state', 'fixed_ips', 'port_id', 'net_id', 'mac_addr']
}
get_create_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'interfaceAttachment': interface_common_info
},
'additionalProperties': False,
'required': ['interfaceAttachment']
}
}
list_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'interfaceAttachments': {
'type': 'array',
'items': interface_common_info
}
},
'additionalProperties': False,
'required': ['interfaceAttachments']
}
}
delete_interface = {
'status_code': [202]
}
| [
"jignasha.vithalani@triliodata.com"
] | jignasha.vithalani@triliodata.com |
b87a5862baddc52281f8bf5643f0a5d27f332020 | cb9741ba6fb4e9c6d27cd4bc1790ab4123bc467b | /autoplan/vis.py | a8e253b7e993a94c9cfb78856ed418c89ff8e8e1 | [] | no_license | willcrichton/autoplan | fa5070035c18b87981b0c0ccd16c9272728ecc2d | d7b97bca716559729deac08ccdf951f1ff10f268 | refs/heads/master | 2022-06-18T14:32:33.201629 | 2021-04-30T01:22:08 | 2021-04-30T01:22:08 | 192,789,759 | 2 | 0 | null | 2022-06-06T21:37:38 | 2019-06-19T19:10:20 | Jupyter Notebook | UTF-8 | Python | false | false | 1,325 | py | import seaborn as sns
import matplotlib.pyplot as plt
import math
import numpy as np
import pandas as pd
def plot_loss(loss, **kwargs):
ax = pd.Series(loss).plot(**kwargs)
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss')
return ax
def plot_accuracy(evals, **kwargs):
ax = pd.Series([e.accuracy for e in evals]).plot(**kwargs)
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.set_ylim(0, 1)
return ax
def plot_cm(ax, name, cm, classes, normalize=True):
if normalize:
cm = cm / cm.sum(axis=1)[:, np.newaxis]
sns.heatmap(cm, annot=True, ax=ax)
ax.set_xlabel('Pred class')
ax.set_ylabel('True class')
ax.set_xticklabels(classes, rotation=45)
ax.set_yticklabels(classes, rotation=45)
ax.set_title(name)
def plot_all_cm(eval):
fig, axes = plt.subplots(len(eval), 1)
fig.set_size_inches(8, len(eval) * 5)
for ax, k in zip(axes, eval):
eval[k].plot_cm(ax=ax, title=k)
plt.tight_layout()
def plot_all_accuracy(evals):
N = len(evals[0])
fig, axes = plt.subplots(math.ceil(N/2), 2)
fig.set_size_inches(8, N/2 * 3)
axes = [ax for l in axes for ax in l]
for ax, k in zip(axes, evals[0]):
plot_accuracy([e[k] for e in evals], ax=ax)
ax.set_title(k)
ax.set_ylim(0, 1)
plt.tight_layout()
| [
"wcrichto@cs.stanford.edu"
] | wcrichto@cs.stanford.edu |
6ddc86edaf26074f4217c134f0f054cfad8fc848 | 63675264d51fad9cac2ed96576156d25ba6a5e21 | /backend/productionSlide/api/v1/urls.py | 3d70cceb103b3d7d9b0e23ac27fd98553ce7c9cb | [] | no_license | crowdbotics-apps/hunterryan-22013 | 42a3451c621042d65abd53f0f6292ffdf88a759b | 8c2cc0f935f1f405eb23e90461529c402b7985c0 | refs/heads/master | 2023-01-29T14:28:33.329793 | 2020-12-07T23:18:19 | 2020-12-07T23:18:19 | 307,755,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import ProductionViewSet
router = DefaultRouter()
router.register("production", ProductionViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
bc9e8f8e78181b4b170530df62bef23d37e878e4 | 1ed4e96c20da03fbd3aa4f18d4b004a59d8f89e5 | /Repo/venv/Lib/site-packages/caffe2/python/mkl/mkl_LRN_speed_test.py | 6b0c5aba7e2cab7d74e0aac1c1662d4e817165f8 | [] | no_license | donhatkha/CS2225.CH1501 | eebc854864dc6fe72a3650f640787de11d4e82b7 | 19d4dd3b11f8c9560d0d0a93882298637cacdc80 | refs/heads/master | 2023-07-19T13:27:17.862158 | 2021-02-08T07:19:05 | 2021-02-08T07:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,266 | py |
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import cnn, core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 2, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.LRN("X", ["Y", "Y_Scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW")
net.LRN("X_mkl", ["Y_mkl", "Y_Scale_mkl"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("LRN CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvReluLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 3, 224, 224).astype(np.float32) - 0.5
W = np.random.rand(64, 3, 11, 11).astype(np.float32) - 0.5
b = np.random.rand(64).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=11)
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl",
pad=1, stride=1, kernel=11, device_option=mkl_do)
net.Relu("C", "R")
net.Relu("C_mkl", "R_mkl", device_option=mkl_do)
net.LRN("R", ["Y", "Y_Scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW")
net.LRN("R_mkl", ["Y_mkl", "Y_Scale_mkl"],size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
| [
"59596379+khado2359@users.noreply.github.com"
] | 59596379+khado2359@users.noreply.github.com |
2615d0e57751c7147ed0b0aca621d8ce9a41aa31 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/venues/bar_venue/__init__.py | 0cd92a4f71cc399dedf4e0830ffd1d28dc481848 | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\venues\bar_venue\__init__.py
# Compiled at: 2015-06-01 19:48:40
# Size of source mod 2**32: 79 bytes
pass | [
"cristina.caballero2406@gmail.com"
] | cristina.caballero2406@gmail.com |
483465d8b1bd2615e83a40ca9935baa90a3e55d4 | e96deed00dd14a1f6d1ed7825991f12ea8c6a384 | /count_possibilities_convert_int_to_string.py | 90a0516abc752e4e53cfb526430963129e87e578 | [] | no_license | borisachen/leetcode | 70b5c320abea8ddfa299b2e81f886cfeb39345c1 | 15e36b472a5067d17482dbd0d357336d31b35ff4 | refs/heads/master | 2021-01-19T17:07:46.726320 | 2020-11-16T04:30:52 | 2020-11-16T04:30:52 | 88,306,634 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | Count Possibilities
We have a dictionary with
a:1
b:2
...
z:26
Given a string = '11135213',
we want to find the number of possibilities that the string forms.
11 -> 1,1 or 11
Whenever we find a 1 or a 2, we need to look ahead see if we need to create a second branch.
def dfs(query, keys):
if query == '':
return 0
if len(query) >= 2 and int(query[0:2]) <= 26:
return 1 + dfs(query[1:], keys) + dfs(query[2:], keys)
else:
return dfs(query[1:], keys)
def count_possibilities(query):
keys = {}
for i,j in enumerate('abcdefghijklmnopqrstuvwxyz'):
keys[str(i)] = j
return dfs(query, keys) + 1
count_possibilities(query = '1') # 1
count_possibilities(query = '11') # 2
count_possibilities(query = '111') # 3
count_possibilities(query = '1111') # 5
count_possibilities(query = '2') # 1
count_possibilities(query = '26') # 2
count_possibilities(query = '27') # 1
| [
"boris.chen@gmail.com"
] | boris.chen@gmail.com |
ba78ba99194fc7c79e612691e84b61085f9415e0 | 722af8e6fa81960a6119c2e45ba6795771bad595 | /accounts/models.py | 16dd824055c2fbbac6bb0267915d8da9bc4e22e9 | [] | no_license | witty-technologies-empowerment/pmc | 85d21fa3c360d40adeec7ca93792b5bc68c258e5 | 201bee60197240eec911637e136cf14bc5814eec | refs/heads/master | 2023-05-27T12:37:48.894933 | 2021-06-13T04:34:57 | 2021-06-13T04:34:57 | 376,439,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from django.db import models
from django.contrib.auth.models import User
class userContact(models.Model):
user = models.CharField(max_length=20)
telephone = models.CharField(max_length=12)
def __str__(self):
return self.user + " telephone is " + self.telephone + "." | [
"www.spbiology@gmail.com"
] | www.spbiology@gmail.com |
fee215edec6ed11ef121fdb37857078845e85c5d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /5ZDz5nDDPdfg5BH8K_10.py | e537a9a9cefc2c883be9d8774afd7565570187c6 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,201 | py | """
Starting with either `3` or `5` and given these operations:
* add `5`
* multiply by `3`
You should say if it is possible to reach the target number `n`.
### Examples
only_5_and_3(14) ➞ True
# 14 = 3*3 + 5
only_5_and_3(25) ➞ True
# 25 = 5+5+5+5+5
only_5_and_3(7) ➞ False
# There exists no path to the target number 7
### Notes
You can solve this problem by recursion or algebra.
"""
def only_5(x):
if x % 5 ==0:
return True
elif x % 5!=0:
sisa=x%5
for i in range (1,x):
ni=pow(3,i)
if ni==sisa:
return True
elif ni > sisa:
return False
def only_3(x):
lis=[]
for i in range(1,x):
ni=pow(3,i)
if ni==x:
return True
sisa= x-ni
if sisa%5==0:
lis.append(i)
break
if len(lis)==0:
return False
sis=x-pow(3,lis[0])
if sis%5==0 and sis>0:
return True
return False
def only_5_and_3(y):
only3=only_3(y)
only5=only_5(y)
if only3==True and only5==False:
return True
elif only3==False and only5==True:
return True
elif only3==False and only3==False:
return False
elif only3==True and only5==True:
return True
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
fc6dd74e3c081306a2ecf826409d03aaffd48160 | f6b4372bd887a1cde58962ce2b22dc2528ab4b2c | /meus_programas/contador_teste.py | 9d8647ca104d189c4022209c5c9c776f3f6e0d75 | [] | no_license | Titowisk/estudo_python | 4eddec1ee9d190f5efd1ed1d773157cb767d0746 | 1873649a5eef6cf60e37e36bbb6195df419fa3ab | refs/heads/master | 2021-05-05T09:13:08.754379 | 2018-06-18T23:38:55 | 2018-06-18T23:38:55 | 119,263,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | #coding=utf-8
import random
def gerar_mega2():
sorteio = []
while len(sorteio) <=5:
d = random.randint(1, 30)
if d in sorteio:
continue
else:
sorteio.append(d)
sorteio.sort()
return sorteio
sorteio = gerar_mega2()
n_apostas = 0
ganhador = False
while ganhador == False:
numero_sorteado = gerar_mega2()
if numero_sorteado != sorteio:
n_apostas +=1
if n_apostas == 100:
print('Você já fez {} apostas'.format(n_apostas))
continue
else:
ganhador = True
print(n_apostas)
| [
"rabelo51@gmail.com"
] | rabelo51@gmail.com |
eb469b3339967a945f5031ae23c8825a832f2c4a | 45b4ff6a4e4804ff84847d56400e10cdb0d96186 | /python/test/test_prediction_resource_relationships_vehicle.py | 4aaafea194772b420459fc15adc18a5ba01d9520 | [] | no_license | pranav/mbta-libraries | fabbc9305569a344e25fa1b281cba290f0fa3f13 | e793696addd94750f722f3132aadc8dfe00adef5 | refs/heads/master | 2021-08-22T11:15:26.014862 | 2017-11-30T03:12:52 | 2017-11-30T03:12:52 | 112,558,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | # coding: utf-8
"""
MBTA
MBTA service API. https://www.mbta.com
OpenAPI spec version: 3.0
Contact: developer@mbta.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.prediction_resource_relationships_vehicle import PredictionResourceRelationshipsVehicle
class TestPredictionResourceRelationshipsVehicle(unittest.TestCase):
""" PredictionResourceRelationshipsVehicle unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testPredictionResourceRelationshipsVehicle(self):
"""
Test PredictionResourceRelationshipsVehicle
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.prediction_resource_relationships_vehicle.PredictionResourceRelationshipsVehicle()
pass
if __name__ == '__main__':
unittest.main()
| [
"pgandhi@hubspot.com"
] | pgandhi@hubspot.com |
a04419a657b0bda846d36d62dae92b9c6fb8a54d | 5c0c0176db0ccf2c24b6b5ed459a8dc144518b13 | /nni/nas/tensorflow/mutator.py | b0d2aed684e289b556ccc1388b95477d55c5c2da | [
"MIT"
] | permissive | petuum/nni | ac4f4a1c4d6df71684eeffa127b7c4858fd29e97 | 8134be6269902939232482d63649c06f9864be6d | refs/heads/master | 2023-02-18T11:21:41.078889 | 2021-01-20T03:21:50 | 2021-01-20T03:21:50 | 302,736,456 | 4 | 3 | MIT | 2020-11-20T20:21:15 | 2020-10-09T19:34:11 | Python | UTF-8 | Python | false | false | 3,045 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import tensorflow as tf
from .base_mutator import BaseMutator
_logger = logging.getLogger(__name__)
class Mutator(BaseMutator):
def __init__(self, model):
super().__init__(model)
self._cache = {}
def sample_search(self):
raise NotImplementedError('Method `sample_search` must be overridden')
def sample_final(self):
raise NotImplementedError('Method `sample_final` must be overriden for exporting')
def reset(self):
self._cache = self.sample_search()
def export(self):
return self.sample_final()
# TODO: status
# TODO: graph
def on_forward_layer_choice(self, mutable, *inputs):
mask = self._get_decision(mutable)
assert len(mask) == len(mutable), \
'Invalid mask, expected {} to be of length {}.'.format(mask, len(mutable))
out = self._select_with_mask(lambda choice: choice(*inputs), mutable.choices, mask)
return self._tensor_reduction(mutable.reduction, out), mask
def on_forward_input_choice(self, mutable, tensor_list):
mask = self._get_decision(mutable)
assert len(mask) == mutable.n_candidates, \
'Invalid mask, expected {} to be of length {}.'.format(mask, mutable.n_candidates)
out = self._select_with_mask(lambda tensor: tensor, tensor_list, mask)
return self._tensor_reduction(mutable.reduction, out), mask
def _select_with_mask(self, map_fn, candidates, mask):
if mask.dtype.is_bool:
out = [map_fn(cand) for cand, m in zip(candidates, mask) if m]
elif mask.dtype.is_floating:
out = [map_fn(cand) * m for cand, m in zip(candidates, mask) if m]
else:
raise ValueError('Unrecognized mask, dtype is {}'.format(mask.dtype.name))
return out
def _tensor_reduction(self, reduction_type, tensor_list):
if reduction_type == 'none':
return tensor_list
if not tensor_list:
return None
if len(tensor_list) == 1:
return tensor_list[0]
if reduction_type == 'sum':
return sum(tensor_list)
if reduction_type == 'mean':
return sum(tensor_list) / len(tensor_list)
if reduction_type == 'concat':
image_data_format = tf.keras.backend.image_data_format()
if image_data_format == "channels_first":
axis = 0
else:
axis = -1
return tf.concat(tensor_list, axis=axis) # pylint: disable=E1120,E1123
# pylint issue #3613
raise ValueError('Unrecognized reduction policy: "{}'.format(reduction_type))
def _get_decision(self, mutable):
if mutable.key not in self._cache:
raise ValueError('"{}" not found in decision cache.'.format(mutable.key))
result = self._cache[mutable.key]
_logger.debug('Decision %s: %s', mutable.key, result)
return result
| [
"noreply@github.com"
] | petuum.noreply@github.com |
0cf491a1ddc59c3ebadf24ae52de20ab9af2111e | 0b802a3b3572ae4e9be55cb1c116ebcf06cceb4d | /tests/pipupgrade/util/test_string.py | e77577f017a74f664002ce2f4f93dcb3a4e21112 | [
"MIT"
] | permissive | todun/pipupgrade | fc8b1315a9b432a75dd78c1783f85cd0147e631b | 2f2e04d77c7e276e4b6172d42b5bdeaae11075fb | refs/heads/master | 2020-06-25T00:43:26.995923 | 2019-06-10T18:46:22 | 2019-06-10T18:46:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | # imports - module imports
from pipupgrade.util.string import strip, strip_ansi, pluralize, kebab_case
from pipupgrade import cli
def test_strip():
string = "foobar"
assert strip(string) == string
string = "\n foobar\nfoobar \n "
assert strip(string) == "foobar\nfoobar"
string = "\n\n\n"
assert strip(string) == ""
def test_strip_ansi():
assert strip_ansi(cli.format("foobar", cli.GREEN)) == "foobar"
assert strip_ansi(cli.format("barfoo", cli.BOLD)) == "barfoo"
def test_pluralize():
assert pluralize("package", 1) == "package"
assert pluralize("package", 2) == "packages"
assert pluralize("packages", 2) == "packages"
def test_kebab_case():
assert kebab_case("foo bar") == "foo-bar"
assert kebab_case("Foo Bar") == "foo-bar"
assert kebab_case("FOO BAR") == "foo-bar"
assert kebab_case("_FOO_BAR_", delimiter = "_") == "foo-bar"
assert kebab_case("foo_bar", delimiter = "_") == "foo-bar" | [
"achillesrasquinha@gmail.com"
] | achillesrasquinha@gmail.com |
bb99904c85e56977d83bbca5bb30be8856678735 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=89/sched.py | 80cbfd969d340eb1e372914d9a2939a2093bbf4b | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | -S 0 -X RUN -Q 0 -L 3 122 400
-S 1 -X RUN -Q 0 -L 3 109 400
-S 0 -X RUN -Q 0 -L 3 81 300
-S 0 -X RUN -Q 0 -L 3 81 250
-S 1 -X RUN -Q 0 -L 3 76 400
-S 4 -X RUN -Q 1 -L 1 65 400
-S 4 -X RUN -Q 1 -L 1 62 200
-S 4 -X RUN -Q 1 -L 1 47 300
-S 4 -X RUN -Q 1 -L 1 45 300
-S 3 -X RUN -Q 2 -L 1 45 200
-S 3 -X RUN -Q 2 -L 1 42 250
-S 3 -X RUN -Q 2 -L 1 33 100
-S 3 -X RUN -Q 2 -L 1 33 150
-S 2 -X RUN -Q 3 -L 1 21 150
-S 2 -X RUN -Q 3 -L 1 17 175
-S 2 -X RUN -Q 3 -L 1 16 150
-S 2 -X RUN -Q 3 -L 1 9 125
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
f69b4f7ddd7afe944af70c3875f6d79ef7203228 | eb525db30bd7fc207a50b232b47890771cc5e069 | /girder_worker/plugins/docker/stream_adapter.py | db9cc7c9f184757a5c2d955dd6d749dc44a656c3 | [
"Apache-2.0"
] | permissive | dorukozturk/girder_worker | 916220ca0ec91a9764e201ae5ac9a11b101ed367 | e38989d18e126096b402d9c8821021d01d4696bf | refs/heads/master | 2020-12-03T02:12:19.839207 | 2017-07-28T12:27:41 | 2017-07-28T12:27:41 | 95,914,617 | 0 | 0 | null | 2018-01-23T13:18:58 | 2017-06-30T18:39:00 | Python | UTF-8 | Python | false | false | 2,542 | py | from girder_worker.core.utils import StreamPushAdapter
import struct
class DockerStreamPushAdapter(StreamPushAdapter):
"""
An adapter that reads a docker stream. The format is a Header and a Payload (frame).
Where header has the following format:
header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
We want to read the header to get the size of the payload, read the payload
and forward it on to another adapter.
"""
def __init__(self, adapter):
self._adapter = adapter
self._reset()
def _reset(self):
self._header = ''
self._header_bytes_read = 0
self._payload_bytes_read = 0
self._payload_size = None
def _read_header(self):
"""
Read the header or part of the header. When the head has been read, the
payload size is decodeded and returned, otherwise return None.
"""
bytes_to_read = min(8 - self._header_bytes_read, self._data_length-self._data_offset)
self._header += self._data[self._data_offset:self._data_offset+bytes_to_read]
self._data_offset += bytes_to_read
self._header_bytes_read += bytes_to_read
if self._header_bytes_read == 8:
_, payload_size = struct.unpack('>BxxxL', self._header)
return payload_size
def _read_payload(self):
"""
Read the payload or part of the payload. The data is written directly to
the wrapped adapter.
"""
bytes_to_read = min(self._payload_size - self._payload_bytes_read,
self._data_length-self._data_offset)
self._adapter.write(self._data[self._data_offset:self._data_offset+bytes_to_read])
self._data_offset += bytes_to_read
self._payload_bytes_read += bytes_to_read
def write(self, data):
self._data = data
self._data_length = len(data)
self._data_offset = 0
# While we still have data iterate over it
while self._data_length > self._data_offset:
# We are reading the header
if self._header_bytes_read < 8:
self._payload_size = self._read_header()
# We are reading the payload
if self._payload_size and self._payload_bytes_read < self._payload_size:
self._read_payload()
# We are done with this payload
if self._payload_size == self._payload_bytes_read:
self._reset()
def close(self):
self._adapter.close()
| [
"chris.harris@kitware.com"
] | chris.harris@kitware.com |
905ee1e8555922be6ee7645b9393f7a84645ee41 | bc197cc45c503389aa4cec57862204f6ece7f94f | /game/content/ghplots/utility.py | 604f69090f7b1dff4e368a2011bf186d5fcb9c2f | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | asdlei99/gearhead-caramel | 84f28dbd0f1db255abf5186ff19cc6fb54f6b0a1 | f4f5803858acada2b252bb0860df8f828d65af15 | refs/heads/master | 2020-12-02T01:49:55.547899 | 2019-12-27T04:41:42 | 2019-12-27T04:41:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | from pbge.plots import Plot, PlotState
import game.content.ghwaypoints
import game.content.ghterrain
import gears
import pbge
from game import teams,ghdialogue
from game.ghdialogue import context
import random
from pbge.dialogue import ContextTag,Offer
from game.content.ghplots import dd_main
import game.content.plotutility
import game.content.gharchitecture
# ***************************************
# *** PLACE_LOCAL_REPRESENTATIVES ***
# ***************************************
#
# FACTION: The faction to which the new NPCs will belong.
class PlaceACommander( Plot ):
LABEL = "PLACE_LOCAL_REPRESENTATIVES"
def custom_init( self, nart ):
myscene = self.elements["LOCALE"]
myfac = self.elements["FACTION"]
destscene = self.seek_element(nart, "_DEST", self._is_best_scene, scope=myscene, must_find=False)
if not destscene:
destscene = self.seek_element(nart, "_DEST", self._is_good_scene, scope=myscene)
myjob = myfac.choose_job(gears.tags.Commander)
mynpc = self.register_element("NPC",gears.selector.random_character(rank=random.randint(50,80),job=myjob,local_tags=myscene.attributes,combatant=True,faction=myfac),dident="_DEST")
destscene.local_teams[mynpc] = destscene.civilian_team
return True
def _is_best_scene(self,nart,candidate):
return (isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes and
gears.tags.SCENE_BASE in candidate.attributes and
candidate.faction and nart.camp.are_ally_factions(candidate.faction,self.elements["FACTION"]))
def _is_good_scene(self,nart,candidate):
return isinstance(candidate,pbge.scenes.Scene) and gears.tags.SCENE_PUBLIC in candidate.attributes
| [
"pyrrho12@yahoo.ca"
] | pyrrho12@yahoo.ca |
8896119fe00b5a5eb73ea77bb0d1d89d08da0cdd | 4cc4d9d488939dde56fda368faf58d8564047673 | /tools/test/connectivity/acts/framework/acts/test_utils/wifi/wifi_aware_const.py | 7e495f5e728eb747a7ff2f108202c2ea618ac2a7 | [] | no_license | Tosotada/android-8.0.0_r4 | 24b3e4590c9c0b6c19f06127a61320061e527685 | 7b2a348b53815c068a960fe7243b9dc9ba144fa6 | refs/heads/master | 2020-04-01T11:39:03.926512 | 2017-08-28T16:26:25 | 2017-08-28T16:26:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,879 | py | #!/usr/bin/env python3.4
#
# Copyright 2016 - Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################
# Broadcast events
######################################################
BROADCAST_WIFI_AWARE_AVAILABLE = "WifiAwareAvailable"
BROADCAST_WIFI_AWARE_NOT_AVAILABLE = "WifiAwareNotAvailable"
######################################################
# ConfigRequest keys
######################################################
CONFIG_KEY_5G_BAND = "Support5gBand"
CONFIG_KEY_MASTER_PREF = "MasterPreference"
CONFIG_KEY_CLUSTER_LOW = "ClusterLow"
CONFIG_KEY_CLUSTER_HIGH = "ClusterHigh"
CONFIG_KEY_ENABLE_IDEN_CB = "EnableIdentityChangeCallback"
######################################################
# PublishConfig keys
######################################################
PUBLISH_KEY_SERVICE_NAME = "ServiceName"
PUBLISH_KEY_SSI = "ServiceSpecificInfo"
PUBLISH_KEY_MATCH_FILTER = "MatchFilter"
PUBLISH_KEY_TYPE = "PublishType"
PUBLISH_KEY_COUNT = "PublishCount"
PUBLISH_KEY_TTL = "TtlSec"
PUBLISH_KEY_TERM_CB_ENABLED = "TerminateNotificationEnabled"
######################################################
# SubscribeConfig keys
######################################################
SUBSCRIBE_KEY_SERVICE_NAME = "ServiceName"
SUBSCRIBE_KEY_SSI = "ServiceSpecificInfo"
SUBSCRIBE_KEY_MATCH_FILTER = "MatchFilter"
SUBSCRIBE_KEY_TYPE = "SubscribeType"
SUBSCRIBE_KEY_COUNT = "SubscribeCount"
SUBSCRIBE_KEY_TTL = "TtlSec"
SUBSCRIBE_KEY_STYLE = "MatchStyle"
SUBSCRIBE_KEY_ENABLE_TERM_CB = "EnableTerminateNotification"
######################################################
# WifiAwareAttachCallback events
######################################################
EVENT_CB_ON_ATTACHED = "WifiAwareOnAttached"
EVENT_CB_ON_ATTACH_FAILED = "WifiAwareOnAttachFailed"
######################################################
# WifiAwareIdentityChangedListener events
######################################################
EVENT_CB_ON_IDENTITY_CHANGED = "WifiAwareOnIdentityChanged"
# WifiAwareAttachCallback & WifiAwareIdentityChangedListener events keys
EVENT_CB_KEY_REASON = "reason"
EVENT_CB_KEY_MAC = "mac"
EVENT_CB_KEY_LATENCY_MS = "latencyMs"
EVENT_CB_KEY_TIMESTAMP_MS = "timestampMs"
######################################################
# WifiAwareDiscoverySessionCallback events
######################################################
SESSION_CB_ON_PUBLISH_STARTED = "WifiAwareSessionOnPublishStarted"
SESSION_CB_ON_SUBSCRIBE_STARTED = "WifiAwareSessionOnSubscribeStarted"
SESSION_CB_ON_SESSION_CONFIG_UPDATED = "WifiAwareSessionOnSessionConfigUpdated"
SESSION_CB_ON_SESSION_CONFIG_FAILED = "WifiAwareSessionOnSessionConfigFailed"
SESSION_CB_ON_SESSION_TERMINATED = "WifiAwareSessionOnSessionTerminated"
SESSION_CB_ON_SERVICE_DISCOVERED = "WifiAwareSessionOnServiceDiscovered"
SESSION_CB_ON_MESSAGE_SENT = "WifiAwareSessionOnMessageSent"
SESSION_CB_ON_MESSAGE_SEND_FAILED = "WifiAwareSessionOnMessageSendFailed"
SESSION_CB_ON_MESSAGE_RECEIVED = "WifiAwareSessionOnMessageReceived"
# WifiAwareDiscoverySessionCallback events keys
SESSION_CB_KEY_CB_ID = "callbackId"
SESSION_CB_KEY_SESSION_ID = "sessionId"
SESSION_CB_KEY_REASON = "reason"
SESSION_CB_KEY_PEER_ID = "peerId"
SESSION_CB_KEY_SERVICE_SPECIFIC_INFO = "serviceSpecificInfo"
SESSION_CB_KEY_MATCH_FILTER = "matchFilter"
SESSION_CB_KEY_MESSAGE = "message"
SESSION_CB_KEY_MESSAGE_ID = "messageId"
SESSION_CB_KEY_MESSAGE_AS_STRING = "messageAsString"
SESSION_CB_KEY_LATENCY_MS = "latencyMs"
SESSION_CB_KEY_TIMESTAMP_MS = "timestampMs"
######################################################
# WifiAwareRangingListener events (RttManager.RttListener)
######################################################
RTT_LISTENER_CB_ON_SUCCESS = "WifiAwareRangingListenerOnSuccess"
RTT_LISTENER_CB_ON_FAILURE = "WifiAwareRangingListenerOnFailure"
RTT_LISTENER_CB_ON_ABORT = "WifiAwareRangingListenerOnAborted"
# WifiAwareRangingListener events (RttManager.RttListener) keys
RTT_LISTENER_CB_KEY_CB_ID = "callbackId"
RTT_LISTENER_CB_KEY_SESSION_ID = "sessionId"
RTT_LISTENER_CB_KEY_RESULTS = "Results"
RTT_LISTENER_CB_KEY_REASON = "reason"
RTT_LISTENER_CB_KEY_DESCRIPTION = "description"
######################################################
# Aware Data-Path Constants
DATA_PATH_INITIATOR = 0
DATA_PATH_RESPONDER = 1
# Maximum send retry
MAX_TX_RETRIES = 5
| [
"xdtianyu@gmail.com"
] | xdtianyu@gmail.com |
7403b067ddf760a7f9353236027c975fdf5d4969 | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/scattersmith/marker/colorbar/_tickformatstops.py | 3c83565dd95aa07a025b6ec918b72ea323b0e756 | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 2,328 | py | import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self,
plotly_name="tickformatstops",
parent_name="scattersmith.marker.colorbar",
**kwargs,
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs,
)
| [
"nicolas@plot.ly"
] | nicolas@plot.ly |
ee8e7f1b1e741068445846bdbc8b554b7ccd40b0 | bc3bd7601fa427d638f872b4ddfdebe4ce23a25c | /test/test_pipeline_commit_target.py | a3f61fca40255510e6c3d6c6f52192f34e4e8ec4 | [] | no_license | magmax/bitbucket-openapi | 59ef55ab3aa42940c8211d3ecd16ef7d6fc74c21 | 836ae762735ae5b1ececcee5287fa271d7d8de5b | refs/heads/master | 2020-07-28T16:10:32.736169 | 2019-09-19T04:17:09 | 2019-09-19T04:17:09 | 209,460,884 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | # coding: utf-8
"""
Bitbucket API
Code against the Bitbucket API to automate simple tasks, embed Bitbucket data into your own site, build mobile or desktop apps, or even add custom UI add-ons into Bitbucket itself using the Connect framework. # noqa: E501
The version of the OpenAPI document: 2.0
Contact: support@bitbucket.org
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import bitbucketopenapi
from bitbucketopenapi.models.pipeline_commit_target import PipelineCommitTarget # noqa: E501
from bitbucketopenapi.rest import ApiException
class TestPipelineCommitTarget(unittest.TestCase):
"""PipelineCommitTarget unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPipelineCommitTarget(self):
"""Test PipelineCommitTarget"""
# FIXME: construct object with mandatory attributes with example values
# model = bitbucketopenapi.models.pipeline_commit_target.PipelineCommitTarget() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"miguelangel.garcia@gmail.com"
] | miguelangel.garcia@gmail.com |
6c415c8dffe9dce42b4fff8d874738aef90b4765 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_None/trend_MovingMedian/cycle_30/ar_12/test_artificial_32_None_MovingMedian_30_12_20.py | 6aabf6a695b31d651117eeef2bd4686cf209b664 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 268 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
7a83add2ea43ac53f4b26817052a7cfb1eaf3e16 | b6689876963fe86369a43f59ccb50b3761024c93 | /example/example/spiders/movie.py | 7773a3829a1fcadde2d2b299193b7b28b5d6f5f2 | [] | no_license | Gerapy/GerapyRabbitMQ | e459e16bbc9bd4cee7eb5a250f64e4a2406b7641 | 5b74fee6990b2b9fcb590b3cf12e0c668be65130 | refs/heads/master | 2022-11-22T13:54:11.167461 | 2020-07-25T19:53:31 | 2020-07-25T19:53:31 | 282,440,431 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,617 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy import Request, signals
from example.items import MovieItem
import logging
logger = logging.getLogger(__name__)
class MovieSpider(scrapy.Spider):
name = 'movie'
allowed_domains = ['static1.scrape.center']
base_url = 'https://static1.scrape.center'
max_page = 10
def start_requests(self):
"""
first page
:return:
"""
for page in range(1, self.max_page + 1):
url = f'{self.base_url}/page/{page}'
logger.debug('start url %s', url)
yield Request(url, callback=self.parse_index, priority=10)
def parse_index(self, response):
"""
extract movies
:param response:
:return:
"""
items = response.css('.item')
for item in items:
href = item.css('.name::attr(href)').extract_first()
detail_url = response.urljoin(href)
logger.info('detail url %s', detail_url)
yield Request(detail_url, callback=self.parse_detail)
def parse_detail(self, response):
"""
process detail info of book
:param response:
:return:
"""
name = response.css('h2::text').extract_first()
categories = response.css('.categories button span::text').extract()
score = response.css('.score::text').extract_first()
categories = [category.strip() for category in categories] if categories else []
score = score.strip() if score else None
yield MovieItem(name=name, categories=categories, score=score)
| [
"cqc@cuiqingcai.com"
] | cqc@cuiqingcai.com |
2cf0a7f3558a4c2f4dcdf6eb7a8e24f1ff75211e | 4c601eaa346e660c296e270cc2d79aea9a3721fe | /homeassistant/components/smappee/const.py | 4bc370e9c093317433cc230c942a6a2731c7efdc | [
"Apache-2.0"
] | permissive | basnijholt/home-assistant | f55110af9ff602274c0a929c7298ef97a0ef282f | ba55b4b8338a2dc0ba3f1d750efea49d86571291 | refs/heads/dev | 2023-01-21T11:53:52.621353 | 2020-08-08T15:03:06 | 2020-08-08T15:03:06 | 220,313,680 | 5 | 1 | Apache-2.0 | 2023-01-13T06:04:49 | 2019-11-07T19:29:54 | Python | UTF-8 | Python | false | false | 718 | py | """Constants for the Smappee integration."""
from datetime import timedelta
DOMAIN = "smappee"
DATA_CLIENT = "smappee_data"
BASE = "BASE"
SMAPPEE_PLATFORMS = ["binary_sensor", "sensor", "switch"]
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
AUTHORIZE_URL = {
"PRODUCTION": "https://app1pub.smappee.net/dev/v1/oauth2/authorize",
"ACCEPTANCE": "https://farm2pub.smappee.net/dev/v1/oauth2/authorize",
"DEVELOPMENT": "https://farm3pub.smappee.net/dev/v1/oauth2/authorize",
}
TOKEN_URL = {
"PRODUCTION": "https://app1pub.smappee.net/dev/v3/oauth2/token",
"ACCEPTANCE": "https://farm2pub.smappee.net/dev/v3/oauth2/token",
"DEVELOPMENT": "https://farm3pub.smappee.net/dev/v3/oauth2/token",
}
| [
"noreply@github.com"
] | basnijholt.noreply@github.com |
1ef0123300e1c65a8bab6f04340e2c2dd0963305 | 602a4e86499841fbae43d84fc92908c533106aea | /userprofile/urls.py | 0954c63e61d4faeb63d48f74ca5c63156a7bd407 | [] | no_license | vden/TsoguNG | b187ccf1bef387417ec73467c51458d6f1443239 | f8d5e7ab9d85559aa163c232c9f28a24a2b7c2a4 | refs/heads/master | 2021-01-02T08:52:03.914218 | 2011-04-26T07:01:57 | 2011-04-26T07:01:57 | 1,663,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
urlpatterns = patterns('tsogung.userprofile.views',
(r'^$', 'index'),
(r'^update/$', 'update'),
(r'^edit/$', 'edit'),
(r'^send_email_confirm/$', 'send_email_confirm'),
(r'^student_status_confirm/$', 'student_status_confirm'),
(r'^email_confirm/(?P<user_id>\d+)/(?P<code>[0-9a-fA-F\-]+)/$','email_confirm'),
# (r'token/(?P<token>[0-9a-fA-F\-]+)/$','token'),
)
| [
"denis.voskvitsov@gmail.com"
] | denis.voskvitsov@gmail.com |
9d0b45d1670209903c9fd17a33cb6369a5df7a3d | f645ea40794ec13a1ee6eb6453b87260d0e835d6 | /adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_boardtest/boardtest_led.py | 6820db80a0dfb40e832317329ca4b3ef7e346955 | [
"MIT"
] | permissive | bergius/pico_rgb_keypad_hid | e0e1751859267136242411176af0d57da9e0e762 | a685c5aacd1e31c7a1bc9f50c80cce5724291bc2 | refs/heads/main | 2023-04-29T10:07:50.560920 | 2021-05-19T08:27:42 | 2021-05-19T08:27:42 | 368,795,603 | 0 | 0 | null | 2021-05-19T08:22:57 | 2021-05-19T08:22:56 | null | UTF-8 | Python | false | false | 3,574 | py | # SPDX-FileCopyrightText: 2018 Shawn Hymel for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_boardtest.boardtest_led`
====================================================
Toggles all available onboard LEDs. You will need to manually verify their
operation by watching them.
Run this script as its own main.py to individually run the test, or compile
with mpy-cross and call from separate test script.
* Author(s): Shawn Hymel for Adafruit Industries
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import time
import board
import digitalio
import supervisor
__version__ = "1.2.4"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BoardTest.git"
# Constants
LED_ON_DELAY_TIME = 0.2 # Seconds
LED_OFF_DELAY_TIME = 0.2 # Seconds
LED_PIN_NAMES = ["L", "LED", "RED_LED", "YELLOW_LED", "GREEN_LED", "BLUE_LED"]
# Test result strings
PASS = "PASS"
FAIL = "FAIL"
NA = "N/A"
# Toggle IO pins while waiting for answer
def _toggle_wait(led_pins):
timestamp = time.monotonic()
led_state = False
print("Are the pins listed above toggling? [y/n]")
while True:
# Cycle through each pin in the list
for pin in led_pins:
led = digitalio.DigitalInOut(getattr(board, pin))
led.direction = digitalio.Direction.OUTPUT
blinking = True
# Blink each LED once while looking for input
while blinking:
if led_state:
if time.monotonic() > timestamp + LED_ON_DELAY_TIME:
led_state = False
led.value = led_state
led.deinit()
blinking = False
timestamp = time.monotonic()
else:
if time.monotonic() > timestamp + LED_OFF_DELAY_TIME:
led_state = True
led.value = led_state
timestamp = time.monotonic()
# Look for user input
if supervisor.runtime.serial_bytes_available:
answer = input()
if answer == "y":
return True
return False
def run_test(pins):
"""
Toggles the onboard LED(s) on and off.
:param list[str] pins: list of pins to run the test on
:return: tuple(str, list[str]): test result followed by list of pins tested
"""
# Look for pins with LED names
led_pins = list(set(pins).intersection(set(LED_PIN_NAMES)))
# Toggle LEDs if we find any
if led_pins:
# Print out the LEDs found
print("LEDs found:", end=" ")
for pin in led_pins:
print(pin, end=" ")
print("\n")
# Blink LEDs and wait for user to verify test
result = _toggle_wait(led_pins)
if result:
return PASS, led_pins
return FAIL, led_pins
# Else (no pins found)
print("No LED pins found")
return NA, []
def _main():
# List out all the pins available to us
pins = list(dir(board))
print()
print("All pins found:", end=" ")
# Print pins
for pin in pins:
print(pin, end=" ")
print("\n")
# Run test
result = run_test(pins)
print()
print(result[0])
print("Pins tested: " + str(result[1]))
# Execute only if run as main.py or code.py
if __name__ == "__main__":
_main()
| [
"quintin.balsdon@zuhlke.com"
] | quintin.balsdon@zuhlke.com |
291a53b1bba81abf57b9cf2781c245b222b7f1cf | 17c280ade4159d4d8d5a48d16ba3989470eb3f46 | /17/mc/ExoDiBosonResonances/EDBRTreeMaker/test/QCDHT500to700.py | 6613e3ae8a3039e0fcfe67fef5e08bb8ce565a7b | [] | no_license | chengchen1993/run2_ntuple | 798ff18489ff5185dadf3d1456a4462e1dbff429 | c16c2b203c05a3eb77c769f63a0bcdf8b583708d | refs/heads/master | 2021-06-25T18:27:08.534795 | 2021-03-15T06:08:01 | 2021-03-15T06:08:01 | 212,079,804 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'QCDHT500to700'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName='Analysis'
config.JobType.sendExternalFolder=True# = 'Analysis'
config.JobType.inputFiles = ['L1PrefiringMaps_new.root','Fall17_17Nov2017_V32_MC_L1FastJet_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L2Relative_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L3Absolute_AK4PFchs.txt','Fall17_17Nov2017_V32_MC_L1FastJet_AK8PFchs.txt','Fall17_17Nov2017_V32_MC_L2Relative_AK8PFchs.txt','Fall17_17Nov2017_V32_MC_L3Absolute_AK8PFchs.txt','Fall17_17Nov2017_V32_MC_L1FastJet_AK8PFPuppi.txt','Fall17_17Nov2017_V32_MC_L2Relative_AK8PFPuppi.txt','Fall17_17Nov2017_V32_MC_L3Absolute_AK8PFPuppi.txt','Fall17_17Nov2017_V32_MC_L1FastJet_AK4PFPuppi.txt','Fall17_17Nov2017_V32_MC_L2Relative_AK4PFPuppi.txt','Fall17_17Nov2017_V32_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['L1PrefiringMaps_new.root','PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/QCD_HT500to700_TuneCP5_13TeV-madgraph-pythia8/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =2
config.Data.totalUnits = -1
# This string is used to construct the output dataset name
name='WWW'
steam_dir='chench'
config.Data.outLFNDirBase='/store/user/chench/'#='/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/'+steam_dir+'/'+name+'/'
config.Data.publication = False
config.Data.outputDatasetTag = 'QCDHT500to700'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"c.chen@cern.ch"
] | c.chen@cern.ch |
0129bc9ef7424e63dff4a587bec620a5178598de | 2a06c435c4eaac1f10a3a60628852b2938903eba | /explorer/reduce.py | 30d3d452e47d630dc5e8a3fbde4aa229d941db5a | [] | no_license | brianhouse/animas | 9b727fe3c23e35dad1d243680226bb066ce5cff9 | 6611aaa69c85be66618cb115b89c2508928ee8a4 | refs/heads/master | 2020-05-29T14:40:14.897399 | 2017-10-14T14:41:54 | 2017-10-14T14:41:54 | 64,967,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | #!/usr/bin/env python3
import json, time
import numpy as np
import signal_processing as sp
from sklearn import manifold, decomposition, cluster
from housepy import config, log, util, chart
# points, rates = util.load("data/1470432105.pkl") # last week
# points, rates = util.load("data/1470593687.pkl") # last two weeks
# points, rates = util.load("data/1470681705.pkl") # smoothed!
# points, rates = util.load("data/1470681860.pkl") # not smoothed!
points, rates = util.load("data/last_snap.pkl")
log.info("INPUT: %s POINTS, %s DIMENSIONS" % points.shape)
# points = manifold.Isomap().fit_transform(points)
# points = manifold.LocallyLinearEmbedding(method="modified").fit_transform(points)
# points = manifold.SpectralEmbedding().fit_transform(points)
# points = manifold.MDS().fit_transform(points)
# points = manifold.TSNE(n_iter=2000).fit_transform(points)
# points = decomposition.PCA(n_components=2).fit_transform(points)
# points = manifold.TSNE().fit_transform(points)
points = decomposition.PCA(n_components=2).fit_transform(points)
log.info("OUTPUT: %s POINTS, %s DIMENSIONS" % points.shape)
# labels = cluster.DBSCAN(eps=0.1, min_samples=5).fit_predict(points)
clusterer = cluster.KMeans(n_clusters=8)
labels = clusterer.fit_predict(points)
centroids = clusterer.cluster_centers_
labels += abs(min(labels))
max_label = max(labels)
log.info("CENTROIDS\n%s" % centroids)
centroids = np.column_stack((sp.normalize(centroids[:,0], np.min(points[:,0]), np.max(points[:,0])), sp.normalize(centroids[:,1], np.min(points[:,1]), np.max(points[:,1]))))
points = np.column_stack((sp.normalize(points[:,0], np.min(points[:,0]), np.max(points[:,0])), sp.normalize(points[:,1], np.min(points[:,1]), np.max(points[:,1]))))
chart.plot(points, sample_axis=True, scatter=False, c=(0., 0., 1., 1.), linewidth=2)
chart.plot(centroids, sample_axis=True, scatter=True, c=(1., 0., 0., 1.), linewidth=0, s=100)
chart.show("charts/")
| [
"brian.house@gmail.com"
] | brian.house@gmail.com |
d086c1cdb2de19c5b6f35ea3f2a0024a6a9e8b48 | a79341b8b91f0f847a2125d5ae13a7cdffc44da3 | /model/imdb.py | f56f427e2abb6bc026d8b7b3d2bdf1a6d8794ef7 | [] | no_license | edmondchuc/tele-tracker | bccf1925cc692485de57d34e81fc0d87e1fd7b18 | 721056e3cb132f6f8d9ced35f30ba51ffb03341b | refs/heads/master | 2020-04-22T04:41:48.106304 | 2019-02-11T13:20:30 | 2019-02-11T13:20:30 | 170,132,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,369 | py | import requests
from requests.sessions import merge_setting, session
from config import Config
class InvalidParamsException(Exception):
"""
Invalid query string argument (parameters) sent to the OMDb API.
"""
pass
class IMDb():
def __init__(self):
if type(self) is IMDb:
raise Exception('This class "{}" should not be instantiated.'.format(IMDb.__str__(self)))
@staticmethod
def remove_mongo_id(d):
"""
Remove the MongoDB dict key '_id' storing the MongoDB ObjectId.
:param d: A dict of some IMDb resource.
:return: A dict without the key '_id'.
:rtype: dict
"""
if '_id' in d:
del d['_id']
return d
@staticmethod
def query(url, params=None):
if 'i' in params: # if params contains 'i' (imdbID), then get the imdb resource instance
return IMDb._get_imdb_instance(url, params)
elif 's' in params:
return IMDb._get_imdb_register(url, params)
else:
raise InvalidParamsException('The request does not contain arguments for a title (s) or IMDb ID (i).')
@staticmethod
def _build_url_query_id(url, params):
"""
Build the url and params into a valid HTTP URL.
:param url: The OMDb API URL.
:param params: The query string arguments.
:return: A valid HTTP URL with encoded query string arguments.
:rtype: str
"""
id = url
if id[-1] == '/':
id = id[:len(id)-1] # remove the trailing slash for id consistency
for k, v in params.items():
if '?' not in id:
id += '?' + k + '=' + v
else:
id += '&' + k + '=' + v
return id
@staticmethod
def _get_imdb_register(url, params):
"""
Retrieve an IMDb register of instances using url + params as a unique identifier.
:param url: The OMDb API's URL.
:param params: A dict of the query string arguments.
:return: An IMDb register of instances.
:rtype: dict
"""
# Retrieve from MongoDB
registers = Config.mongo_client.imdb.registers
# Build the unique identifier (url + query)
id = IMDb._build_url_query_id(url, params)
result = registers.find_one({'url_id': id})
if result is not None:
return IMDb.remove_mongo_id(result)
# Retrieve from the OMDb API
r = requests.get(url, params=params)
response = r.json()
if response['Response'] == 'True':
# store it in MongoDB before returning the result
# add the url_id
response['url_id'] = id
registers.insert_one(response)
# The insert_one() changes the original response dict. It now has an extra key '_id' inserted
# by MongoDB. We need to remove it before returning the result.
return IMDb.remove_mongo_id(response)
else:
# TODO: store the failed id in MongoDB
raise InvalidParamsException(response['Error'])
@staticmethod
def _get_imdb_instance(url, params):
"""
Retrieve an IMDb instance using its unique imdbID identifier.
:param url: The OMDb API's URL.
:param params: A dict of the query string arguments.
:return: An IMDb instance's metadata as a dict.
:rtype: dict
:raises: InvalidParamsException
"""
# retrieve from MongoDB
instances = Config.mongo_client.imdb.instances
result = instances.find_one({'imdbID': params['i']}) # search MongoDB for the imdbID
if result is not None:
return IMDb.remove_mongo_id(result)
# retrieve from OMDb API
r = requests.get(url, params=params)
response = r.json()
if response['Response'] == 'True':
# store it in MongoDB before returning the result
instances.insert_one(response)
# The insert_one() changes the original response dict. It now has an extra key '_id' inserted
# by MongoDB. We need to remove it before returning the result.
return IMDb.remove_mongo_id(response)
else:
# TODO: store the failed id in MongoDB
raise InvalidParamsException(response['Error']) | [
"edmond.chuc@outlook.com"
] | edmond.chuc@outlook.com |
4430a39cd5cbf3a5c60284c781fb3c40b00afa39 | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /gbd_2019/cod_code/cancer/c_models/b_cod_mortality/project_incidence.py | 488a499ddd865a37aee8129f3e1b698f06f0a65e | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py |
'''
Description: Applies the IHME population to the incidence rate to create a
projected estimate matching the IHME coverage population
How To Use:
'''
from cancer_estimation.py_utils import (
common_utils as utils,
data_format_tools as dft,
modeled_locations
)
from cancer_estimation.a_inputs.a_mi_registry import (
populations as pop,
mi_dataset
)
from cancer_estimation.b_staging import staging_functions
from cancer_estimation.c_models import modeling_functions
from cancer_estimation._database import cdb_utils as cdb
import pandas as pd
def get_uid_vars():
''' Returns a list of the variables considered to be unique identifiers in
the combine_incidence process
'''
return(['location_id', 'year_id', 'sex_id', 'age_group_id', 'acause'])
def project_to_special_locations(df):
''' Uses CoD team model covariates to project data onto different location_ids
i.e. splits india data into urban and rural
'''
print("project_to_special_locations is INCOMPLETE")
return(df)
def supplement_national_estimates(df):
''' Combines subnational estimates to create national estimates, then
removes redundancy. These estimates are used for validation only
'''
uid_cols = get_uid_vars()
output_cols = uid_cols + ['cases', 'pop', 'dataset_id', 'NID']
# Subset to data used to create national projections.
for_natnl_projections = (~df['national_registry'].eq(1) &
df['is_subnational'].eq(1) & ~df['location_id'].eq(354))
natnl_est = df.loc[for_natnl_projections, :]
natnl_est.at[:,'location_id'] = natnl_est['country_id']
natnl_est = staging_functions.combine_uid_entries(natnl_est,
uid_cols = uid_cols+['is_subnational', 'national_registry'],
metric_cols=['cases','pop'])
# Preferentially keep existing data for the same uid
df.at[:, 'existing'] = 1
df = df.append(natnl_est)
df.sort_values(uid_cols + ['existing'], ascending=False).reset_index()
df.drop_duplicates(subset=uid_cols, keep="first", inplace=True)
return(df)
def project_ihme_location_estimates(df):
''' For each IHME location_id, projects estimates based in the input cancer
rates (includes generation of national estimates from subnational estimates
if subnational estimates are not present)
'''
df = modeled_locations.add_country_id(df)
df = modeled_locations.add_subnational_status(df)
df = supplement_national_estimates(df)
df.at[:,'rate'] = df['cases']/df['pop']
del df['pop']
df = df.merge(modeling_functions.load_sdi_map())
ihme_pop = pop.load_raw_ihme_pop(df.location_id.unique())
ihme_pop.rename(columns={'population':'pop'}, inplace=True)
final_df = df.merge(ihme_pop)
can_project = (final_df['sdi_quintile'].eq(5) | final_df['is_subnational'].eq(1))
final_df.loc[can_project, 'cases'] = final_df['rate']*final_df['pop']
assert len(final_df) == len(df), "Error during estimate projection"
return(df)
def project_data():
''' Runs pipeline to combine previously-selected incidence data
Requires incidence data that are unique by location_id-year-sex-age-acause
'''
input_file = utils.get_path("combined_incidence", process="cod_mortality")
output_file = utils.get_path("projected_incidence", process="cod_mortality")
df = pd.read_csv(input_file)
df = project_to_special_locations(df)
df = project_ihme_location_estimates(df)
df.to_csv(output_file, index=False)
print("incidence data projected")
if __name__=="__main__":
project_data() | [
"cheth@uw.edu"
] | cheth@uw.edu |
09b7d08abb9002d9c6298b38ac70142412ca294d | 787344a140b1f1ca05277b44dbf72fda3fa25cda | /bin/clean_assignment_db.py | 53e9c5c2e96b8ee06bdf1ab257e7212f9250e7db | [
"MIT"
] | permissive | poldrack/r-autograder | 6785b40dbd3011dfe5fb47c134c66a8d18985d21 | 58ab96bed7456aef2cec9e01ff6eff23daec0425 | refs/heads/master | 2020-12-29T22:57:53.816305 | 2020-03-05T22:33:24 | 2020-03-05T22:33:24 | 238,765,808 | 3 | 0 | null | 2020-02-06T19:25:53 | 2020-02-06T19:20:10 | Python | UTF-8 | Python | false | false | 1,072 | py | #!/usr/bin/env python3
"""
clear out assignment db for particular week
using config.json
"""
from rautograder.Database import Database
import json
import os
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Clear assignment database')
parser.add_argument('-t', '--test_mode',
help='test mode',
action='store_true')
parser.add_argument('-w', '--week',
type=int,
help='week number')
parser.add_argument('-c', '--config_file',
help='json config',
default='config.json')
args = parser.parse_args()
if os.path.exists(args.config_file):
print(f"loading config from {args.config_file}")
with open(args.config_file,'r') as f:
config = json.load(f)
for v in config:
setattr(args, v, config[v])
print('Clearing database for week %d' % args.week)
db = Database()
db.clean_assignment_db(args.week) | [
"poldrack@gmail.com"
] | poldrack@gmail.com |
a027fe5984f77b3f36cc37ec7591e76ff46187d6 | c404dce8809d1a1d9828a5c92d3eede96bad5487 | /igdiscover/dbdiff.py | aada219ffcee30656be28832e6312a2d27c81644 | [
"MIT"
] | permissive | mateuszatki/IgDiscover | 8b560297d64e5b9e2c65408c43214cc78f235558 | b6f1bdcdf75ddae2af154d41bb21fedff60115a1 | refs/heads/master | 2020-03-29T08:59:11.699494 | 2018-11-19T21:51:50 | 2018-11-19T21:51:51 | 149,736,075 | 0 | 0 | null | 2018-09-21T08:46:21 | 2018-09-21T08:46:21 | null | UTF-8 | Python | false | false | 6,045 | py | """
Compare two FASTA files based on sequences
The order of records in the two files does not matter.
Exit code:
2 if duplicate sequences or duplicate record names were found
1 if there are any lost or gained records or sequence differences
0 if the records are identical, but allowing for different record names
"""
import sys
import logging
import numpy as np
from scipy.optimize import linear_sum_assignment
from sqt import FastaReader
from sqt.align import hamming_distance
logger = logging.getLogger(__name__)
do_not_show_cpustats = 1
def add_arguments(parser):
arg = parser.add_argument
arg('--color', default='auto', choices=('auto', 'never', 'always'),
help='Whether to colorize output')
arg('a', help='FASTA file with expected sequences')
arg('b', help='FASTA file with actual sequences')
RED = "\x1b[0;31m"
GREEN = "\x1b[0;32m"
RESET = "\x1b[0m"
def red(s):
return RED + s + RESET
def green(s):
return GREEN + s + RESET
def check_duplicate_names(records):
names = set()
for record in records:
if record.name in names:
yield record.name
names.add(record.name)
def check_exact_duplicate_sequences(records):
sequences = dict()
for record in records:
if record.sequence in sequences:
yield record.name, sequences[record.sequence]
else:
sequences[record.sequence] = record.name
def compare(a, b):
"""Return cost of comparing a to b"""
l = min(len(a.sequence), len(b.sequence))
length_diff = max(len(a.sequence), len(b.sequence)) - l
dist_prefixes = hamming_distance(a.sequence[:l], b.sequence[:l])
dist_suffixes = hamming_distance(a.sequence[-l:], b.sequence[-l:])
return 5 * min(dist_prefixes, dist_suffixes) + length_diff
def pair_up_identical(a_records, b_records):
identical = []
b_map = {record.sequence: record for record in b_records}
a_rest = []
for a in a_records:
if a.sequence in b_map:
identical.append((a, b_map[a.sequence]))
del b_map[a.sequence]
else:
a_rest.append(a)
return identical, a_rest, list(b_map.values())
def pair_up(a_records, b_records, max_cost=20):
# Pair up identical sequences first
identical, a_records, b_records = pair_up_identical(a_records[:], b_records[:])
# Compare all vs all and fill in a score matrix
m = len(a_records)
n = len(b_records)
cost = np.zeros((m, n), dtype=int)
for i, a in enumerate(a_records):
for j, b in enumerate(b_records):
cost[i, j] = compare(a, b)
# Solve minimum weighted bipartite matching
assignment = linear_sum_assignment(cost)
similar = []
a_similar = set()
b_similar = set()
for i, j in zip(*assignment):
if cost[i, j] <= max_cost:
similar.append((a_records[i], b_records[j]))
a_similar.add(i)
b_similar.add(j)
a_only = [a for i, a in enumerate(a_records) if i not in a_similar]
b_only = [b for j, b in enumerate(b_records) if j not in b_similar]
return a_only, b_only, identical, similar
def format_indel(a, b, colored: bool):
if len(a) > len(b):
assert len(b) == 0
s = '{-' + a + '}'
return red(s) if colored else s
elif len(b) > len(a):
assert len(a) == 0
s = '{+' + b + '}'
return green(s) if colored else s
else:
return ''
def print_similar(a, b, colored: bool):
l = min(len(a.sequence), len(b.sequence))
dist_prefixes = hamming_distance(a.sequence[:l], b.sequence[:l])
dist_suffixes = hamming_distance(a.sequence[-l:], b.sequence[-l:])
if dist_prefixes <= dist_suffixes:
a_prefix = ''
b_prefix = ''
a_common = a.sequence[:l]
b_common = b.sequence[:l]
a_suffix = a.sequence[l:]
b_suffix = b.sequence[l:]
else:
a_prefix = a.sequence[:-l]
b_prefix = b.sequence[:-l]
a_common = a.sequence[-l:]
b_common = b.sequence[-l:]
a_suffix = ''
b_suffix = ''
s = format_indel(a_prefix, b_prefix, colored)
edits = []
for i, (ac, bc) in enumerate(zip(a_common, b_common)):
if ac != bc:
if colored:
s = '{' + red(ac) + ' → ' + green(bc) + '}'
else:
s = '{' + ac + ' → ' + bc + '}'
edits.append(s)
else:
edits.append(ac)
s += ''.join(edits)
s += format_indel(a_suffix, b_suffix, colored)
print('~', a.name, '--', b.name)
print(s)
print()
def main(args):
if args.color == 'auto':
colored = sys.stdout.isatty()
elif args.color == 'never':
colored = False
else:
assert args.color == 'always'
colored = True
with FastaReader(args.a) as f:
a_records = list(f)
with FastaReader(args.b) as f:
b_records = list(f)
has_duplicate_names = False
for records, path in ((a_records, args.a), (b_records, args.b)):
dups = list(check_duplicate_names(records))
if dups:
has_duplicate_names = True
print('Duplicate record names found in', path)
for name in dups:
print('-', name)
has_duplicate_sequences = False
for record, path in ((a_records, args.a), (b_records, args.b)):
dups = list(check_exact_duplicate_sequences(records))
if dups:
has_duplicate_sequences = True
print('Duplicate sequences found in', path)
for name, name_orig in dups:
print('-', name, 'is identical to earlier record', name_orig)
only_a, only_b, identical, similar = pair_up(a_records, b_records)
different_name = [(a, b) for a, b in identical if a.name != b.name]
# Summary
print('{} vs {} records. {} lost, {} gained, {} identical, {} different name, {} similar'.format(
len(a_records), len(b_records), len(only_a), len(only_b),
len(identical) - len(different_name), len(different_name),
len(similar)))
# Report what has changed
if only_a:
print()
print('## Only in A')
for record in only_a:
print('-', record.name)
if only_b:
print()
print('## Only in B')
for record in only_b:
print('+', record.name)
if different_name:
print()
print('## Different name (sequence identical)')
for a, b in different_name:
print('=', a.name, '--', b.name)
if similar:
print()
print('## Similar')
for a, b in similar:
print_similar(a, b, colored)
if has_duplicate_names or has_duplicate_sequences:
sys.exit(2)
if only_a or only_b or similar:
sys.exit(1)
# different name is fine for success
sys.exit(0)
| [
"marcel.martin@scilifelab.se"
] | marcel.martin@scilifelab.se |
e9c676a2bda26f05c1c2b1ce8d8d787df65ee28d | 3a39ddc4a8600ffc5110453867370c1d8e2da121 | /media-libs/libdvdcss/libdvdcss-1.2.10.py | 313858ee1df9761c7e7ffbe596db5cde8de288bf | [] | no_license | seqizz/hadron64 | f2276133786c62f490bdc0cbb6801491c788520f | ca6ef5df3972b925f38e3666ccdc20f2d0bfe87e | refs/heads/master | 2021-01-18T04:53:09.597388 | 2013-02-25T21:25:32 | 2013-02-25T21:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | metadata = """
summary @ A portable abstraction library for DVD decryption
homepage @ http://www.videolan.org/libdvdcss
license @ GPL
src_url @ http://download.videolan.org/pub/libdvdcss/$version/$fullname.tar.bz2
arch @ ~x86
options @ doc
"""
depends = """
runtime @ sys-libs/glibc
"""
opt_build = """
doc @ app-doc/doxygen
"""
def configure():
conf(
"--enable-static",
"--enable-shared",
config_enable("doc"),
"--disable-dependency-tracking")
def install():
raw_install("DESTDIR=%s" % install_dir)
insdoc("AUTHORS", "ChangeLog", "NEWS", "README")
if opt("doc"):
insdoc("doc/latex/refman.ps")
#TODO: doc flags needs doxygen, which is in seq repo, which needs shitload of work beceuse of QT + LaTeX
| [
"seqizz@gmail.com"
] | seqizz@gmail.com |
ae87acfb91d524438626533b4763b16e81b65d72 | 009628e385aca8552dad5c1c5cba018ca6e5954d | /mtk/geometry/rms.py | a5a9d290eba7d7247a8eb7319f745899093bbca7 | [] | no_license | csrocha/python-mtk | 565ebcfeb668a6409d48135bf081321d8121b263 | c3ba520f55c2e204feb6b98251abcb046e51c6cd | refs/heads/main | 2023-01-12T02:46:44.457520 | 2020-11-17T20:20:59 | 2020-11-17T20:20:59 | 313,939,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,441 | py | # -*- coding: ISO-8859-1 -*-
# $Id: rms.py 62 2009-05-19 09:15:03Z cristian_docking $
#
# Copyright (C) 2009 Cristian S. Rocha (crocha@dc.uba.ar)
# Licensed to PSF under a Contributor Agreement.
"""Calcule rms related functions"""
from numpy import array, shape, dot, transpose, identity
from numpy import sqrt, sum, max
from numpy.linalg import svd, det
def array_rmsd(arr1, arr2):
return sqrt(sum((arr1 - arr2)**2)/len(arr1))
def rmsd(crds1, crds2):
"""Returns RMSD between 2 sets of [nx{3,4}] numpy array
url: http://boscoh.com/protein/rmsd-root-mean-square-deviation
>>> from numpy import array
>>> a = array([ [ 0, 0, 0, 1 ] ])
>>> b = array([ [ 1, 0, 0, 1 ] ])
>>> rmsd(a, b)
1.0
>>> from numpy import array
>>> a = array([ [ 0, 0, 0 ] ])
>>> b = array([ [ 1, 0, 0 ] ])
>>> rmsd(a, b)
1.0
>>> a = array([ [ 0, 0, 0, 1 ], [ 1, 0, 0, 1 ] ])
>>> b = array([ [ 1, 0, 0, 1 ], [ 2, 0, 0, 1 ] ])
>>> rmsd(a, b)
1.0
>>> a = array([ [ 0, 0, 0 ], [ 2, 0, 0 ] ])
>>> b = array([ [ 1, 0, 0 ], [ 2, 0, 0 ] ])
>>> "%08.4f" % rmsd(a, b)
'000.7071'
>>> a = array([ [ i, j, k ] for i in xrange(10) for j in range(10) for k in range(10) ])
>>> b = array([ [ k, j, i ] for i in xrange(10) for j in range(10) for k in range(10) ])
>>> "%08.4f" % rmsd(a, b)
'005.7446'
"""
assert(crds1.shape[1] > 2)
assert(crds1.shape == crds2.shape)
# Checking for affine coordinates
if crds1.shape[1] > 3:
crds1 = crds1[:,:3]
crds2 = crds2[:,:3]
E0 = sum(crds1 * crds1) + \
sum(crds2 * crds2)
S = sum(crds1 * crds2)
rms2 = (E0 - 2*S) / float(crds1.shape[0])
assert(rms2 >= 0.0)
return sqrt(rms2)
def fit_rotation(crds1, crds2, affine=False):
"""Returns best-fit_rotation rotation matrix as [3x{3,4}] numpy matrix
url: http://boscoh.com/protein/rmsd-root-mean-square-deviation
>>> from numpy import allclose
>>> a = array([ [ 1, 0, 0, 1 ], [ 0, 1, 0, 1 ] ])
>>> b = array([ [ 1, 0, 0, 1 ], [ 0, 1, 0, 1 ] ])
>>> fit_rotation(a, b)
array([[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.]])
>>> a = array([ [ 0, 0, 0, 1 ], [ 1, 0, 0, 1 ] ])
>>> b = array([ [ 0, 0, 0, 1 ], [ 0, 1, 0, 1 ] ])
>>> R = fit_rotation(a, b)
>>> rmsd( dot(a, R), b )
0.0
>>> a = array([ [ 0, 0, 0, 1 ], [ 0, 1, 0, 1 ], \
[ 1, 1, 0, 1 ], [ 1, 0, 0, 1 ] ])
>>> b = array([ [ 0, 0, 0, 1 ], [ 0,-1, 0, 1 ], \
[-1,-1, 0, 1 ], [-1, 0, 0, 1 ] ])
>>> R = fit_rotation(a, b)
>>> allclose(rmsd( dot(a, R), b ), .0, atol=1e-7)
True
>>> a = array([ [ 0, 0, 0, 1 ], [ 0, 1, 0, 1 ], \
[ 1, 1, 0, 1 ], [ 1, 0, 0, 1 ] ])
>>> b = array([ [ 0, 1, 0, 1 ], [ 1, 1, 0, 1 ], \
[ 1, 0, 0, 1 ], [ 0, 0, 0, 1 ] ])
>>> ca, cb = centre(a), centre(b)
>>> R = fit_rotation(a - ca, b -cb)
>>> rmsd( dot(a-ca, R) + ca, b )
0.0
"""
assert(crds1.shape[1] > 2)
assert(crds1.shape == crds2.shape)
# Checking for affine coordinates
if crds1.shape[1] > 3:
crds1 = crds1[:,:3]
crds2 = crds2[:,:3]
affine = True
correlation_matrix = dot(transpose(crds1), crds2)
v, s, w = svd(correlation_matrix)
is_reflection = (det(v) * det(w)) < 0.0
if is_reflection:
v[-1,:] = -v[-1,:]
r = dot(v, w)
if affine:
I = identity(4)
I[:3,:3] = r
return I
else:
return r
def fit_transform(crds1, crds2):
"""Return the best fit transformation from crds1 to crds2
>>> a = array([ [ 0, 0, 0, 1 ], [ 0, 1, 0, 1 ], \
[ 1, 1, 0, 1 ], [ 1, 0, 0, 1 ] ])
>>> b = array([ [ 0, 1, 0, 1 ], [ 1, 1, 0, 1 ], \
[ 1, 0, 0, 1 ], [ 0, 0, 0, 1 ] ])
>>> R = fit_transform(a, b)
>>> rmsd( dot(a, R), b )
0.0
"""
ca, cb = centre(crds1), centre(crds2)
T = identity(4)
T[3,:3] = -ca[:3]
T = dot(T, fit_rotation(crds1-ca, crds2-cb, affine = True))
C = identity(4)
C[3,:3] = ca[:3]
T = dot(T, C)
return T
def fit(crds1, crds2):
"""Return the best fit rmsd value of crds1 to crds2
>>> a = array([ [ 0, 0, 0, 1 ], [ 0, 1, 0, 1 ], \
[ 1, 1, 0, 1 ], [ 1, 0, 0, 1 ] ])
>>> b = array([ [ 0, 1, 0, 1 ], [ 1, 1, 0, 1 ], \
[ 1, 0, 0, 1 ], [ 0, 0, 0, 1 ] ])
>>> fit(a, b)
0.0
>>> a = array([ [ i, j, k, 1 ] for i in xrange(10) for j in range(10) for k in range(10) ])
>>> b = array([ [ k, j, i, 1 ] for i in xrange(10) for j in range(10) for k in range(10) ])
>>> "%08.4f" % fit(a, b)
'005.7446'
"""
R = fit_transform(crds1, crds2)
return rmsd( dot(crds1, R), crds2 )
def centre(crds):
"""Returns the geometric centre of crds
>>> a = array([ [ 0, 0, 0, 1 ], [ 0, 1, 0, 1 ], \
[ 1, 1, 0, 1 ], [ 1, 0, 0, 1 ] ])
>>> centre(a)
array([ 0.5, 0.5, 0. , 1. ])
"""
n_vect = float(crds.shape[0])
return array([ sum(crds[:,0])/n_vect, sum(crds[:,1])/n_vect,
sum(crds[:,2])/n_vect , 1.0])[:crds.shape[1]]
def test_suite():
import doctest
return doctest.DocTestSuite()
if __name__ == "__main__":
import unittest
runner = unittest.TextTestRunner()
runner.run(test_suite())
| [
"cristian.rocha@moldeo.coop"
] | cristian.rocha@moldeo.coop |
0d6714ece4a8c98e9bf0db905e7aaea398bb4170 | 4a3d5904441535e77d39e1f6d9552433d297ce0a | /profit_loss.py | 6464d0204128f7c61f296c825ea4a7dad060d853 | [] | no_license | Rinkikumari19/codechef_questions | b2a5d3b483f3aa8ef70d4af963f7315dca308e6e | 2b1f991d3e2faad4ca5d7ce9af4fad0c35e7b087 | refs/heads/master | 2022-11-23T19:00:21.589006 | 2020-08-02T18:22:38 | 2020-08-02T18:22:38 | 280,680,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | sp = int(input("enter your number"))
cp = int(input("enter your number"))
if sp>cp:
print("profit")
elif sp<cp:
print("loss")
else:
print("no profit, no loss")
# It will check profit or loss | [
"ravina18@navgurukul.org"
] | ravina18@navgurukul.org |
176d735cd834be1894e7ea014a170604d7fcfb2f | 5ae3bc1920fafc33693cdfa3928a48158aa6f725 | /687/687.py | 5f58fd789819bd64da74f0b7787136b4efe95a9d | [] | no_license | sjzyjc/leetcode | 2d0764aec6681d567bffd8ff9a8cc482c44336c2 | 5e09a5d36ac55d782628a888ad57d48e234b61ac | refs/heads/master | 2021-04-03T08:26:38.232218 | 2019-08-15T21:54:59 | 2019-08-15T21:54:59 | 124,685,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def longestUnivaluePath(self, root: TreeNode) -> int:
if not root:
return 0
return self.dfs(root, 0)
#cur longest sigle path, current max
def dfs(self, node, ans):
if not node:
return 0, ans
left_long, left_ans = self.dfs(node.left, ans)
right_long, right_ans = self.dfs(node.right, ans)
cur_ans = cur_long = 1
if node.left and node.left.val == node.val:
cur_ans += left_long
cur_long += left_long
if node.right and node.right.val == node.val:
cur_ans += right_long
cur_long = max(cur_long, right_long + 1)
return cur_long, max(cur_ans, left_ans, right_ans)
| [
"jcyang@MacBook-Air.local"
] | jcyang@MacBook-Air.local |
cb5c552d5609ca9db8b4646ca79559abe3a4271c | 7d5d0727c70bef05717199fb125c50102a832a12 | /src/learn/basic/dataloading.py | a3238f6909dd2db7c22abd3eb228ecff655f72eb | [] | no_license | yyHaker/pytorch_study | 3408f8b34e8bf7dee1e5f42e4bde5e3c777d35b5 | 664238f73114ccbdbe41a34ecbb7dab74d467285 | refs/heads/master | 2022-11-06T17:04:45.255802 | 2019-10-14T12:06:33 | 2019-10-14T12:06:33 | 113,334,932 | 2 | 1 | null | 2022-10-23T12:21:59 | 2017-12-06T15:41:46 | Python | UTF-8 | Python | false | false | 2,632 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
plt.ion() # interactive mode
landmarks_frame = pd.read_csv('faces/face_landmarks.csv')
n = 65
img_name = landmarks_frame.iloc[n, 0]
landmarks = landmarks_frame.iloc[n, 1:].as_matrix()
landmarks = landmarks.astype('float').reshape(-1, 2)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
def show_landmarks(image, landmarks):
"""Show image with landmarks"""
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
plt.pause(0.001) # pause a bit so that plots are updated
plt.figure()
show_landmarks(io.imread(os.path.join('faces/', img_name)),
landmarks)
plt.show()
class FaceLandmarksDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir,
self.landmarks_frame.iloc[idx, 0])
image = io.imread(img_name)
landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix()
landmarks = landmarks.astype('float').reshape(-1, 2)
sample = {'image': image, 'landmarks': landmarks}
if self.transform:
sample = self.transform(sample)
return sample
face_dataset = FaceLandmarksDataset(csv_file='faces/face_landmarks.csv',
root_dir='faces/')
fig = plt.figure()
for i in range(len(face_dataset)):
sample = face_dataset[i]
print(i, sample['image'].shape, sample['landmarks'].shape)
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample #{}'.format(i))
ax.axis('off')
show_landmarks(**sample)
if i == 3:
plt.show()
break
| [
"572176750@qq.com"
] | 572176750@qq.com |
3629df5aeea5748fc3564e8fef8155b2e47a1eb6 | 33b2df796669a76b3d82f465b5d0ce95241a8082 | /tests/gene_tests.py | 03c8f8a4c2159d0d90b337803f5b41f869f1d70f | [
"MIT"
] | permissive | dohlee/python-dohlee | 7c9a2d83d41ad4d50bf3e4304e1e694676cc4820 | f99bb4892b6de8cde0515e09fac50e36313b06ed | refs/heads/master | 2021-06-07T11:12:10.825104 | 2019-08-09T04:37:24 | 2019-08-09T04:37:24 | 130,536,062 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | from nose.tools import assert_equal
import dohlee.gene as gene
def given_single_gene_ensg2symbol_test():
ensg = 'ENSG00000141510'
symbol = gene.ensg2symbol(ensg)
assert_equal(symbol, 'TP53')
def given_multiple_gene_ensg2symbol_test():
ensgs = ['ENSG00000121879', 'ENSG00000141510']
symbols = gene.ensg2symbol(ensgs)
assert_equal(symbols, ['PIK3CA', 'TP53'])
def given_single_gene_symbol2ensg_test():
symbol = 'TP53'
ensg = gene.symbol2ensg(symbol)
assert_equal(ensg, 'ENSG00000141510')
def given_multiple_gene_symbol2ensg_test():
symbols = ['PIK3CA', 'TP53']
ensgs = gene.symbol2ensg(symbols)
assert_equal(ensgs, ['ENSG00000121879', 'ENSG00000141510'])
| [
"apap950419@gmail.com"
] | apap950419@gmail.com |
d3b7a467a2cd51c08f899aef6235287f9a7302f2 | 76c14138689216634ca12b5e1bc0947a9866b1b0 | /PYTHON/oops/nn/encap.py | 0015ce9871be0212b6a8a49ab62bebdbf4202e9e | [] | no_license | KSrinuvas/ALL | 4b0c339bfeb8780232f3853e5fc53c40b84c9cb7 | 9251464a27a98f98e1055ebf129d6934a02e8ffc | refs/heads/master | 2020-12-06T21:59:33.449669 | 2020-03-15T17:05:42 | 2020-03-15T17:05:42 | 232,560,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | #!/usr/bin/python3
class AA:
def __init__(self):
self.__val = 500
def Print(self):
print (self.__val)
def Update(self,price):
self.__val = price
print (self.__val)
dd = AA()
dd.__val = 700
print (dd.__val)
dd.Print()
dd.Update(1000)
| [
"Srinuvas935@gmail.com"
] | Srinuvas935@gmail.com |
54abdd9a8a5612396e4dd71365da21a01e80f094 | 44845df9198ae8c80fabecb6ed3ae6a44e43f38c | /iva/migrations/0002_auto_20170328_1235.py | 005c20c45e08cbc383afa7e4bd650a8fc3de6163 | [] | no_license | CarlosSanz81/cima | 570da404bddd0a813a025163a9e94676b9d0b4a9 | 3ad9b37af4a2d8a5789915208afffec7b6af3c0e | refs/heads/master | 2021-01-23T08:00:04.964713 | 2017-03-28T14:33:09 | 2017-03-28T14:33:09 | 72,184,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-28 10:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('iva', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='iva',
options={'ordering': ('tipo',)},
),
]
| [
"carlossanzgarcia81@gmail.com"
] | carlossanzgarcia81@gmail.com |
65509d419fbc0dc77d5715279909582362317857 | e7efae2b83216d9621bd93390959d652de779c3d | /gitlab/tests/test_unit.py | 8acbb2295edbf835bb2f3c50f98aaaaf96c2ff47 | [
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | DataDog/integrations-core | ee1886cc7655972b2791e6ab8a1c62ab35afdb47 | 406072e4294edff5b46b513f0cdf7c2c00fac9d2 | refs/heads/master | 2023-08-31T04:08:06.243593 | 2023-08-30T18:22:10 | 2023-08-30T18:22:10 | 47,203,045 | 852 | 1,548 | BSD-3-Clause | 2023-09-14T16:39:54 | 2015-12-01T16:41:45 | Python | UTF-8 | Python | false | false | 5,916 | py | # (C) Datadog, Inc. 2023-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from mock.mock import MagicMock
from datadog_checks.base import AgentCheck, ConfigurationError
from datadog_checks.dev.testing import requires_py2, requires_py3
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.gitlab.common import get_gitlab_version
from .common import (
CUSTOM_TAGS,
GITALY_METRICS,
GITLAB_GITALY_PROMETHEUS_ENDPOINT,
GITLAB_TAGS,
V1_METRICS,
V2_METRICS,
assert_check,
)
pytestmark = [pytest.mark.unit]
@pytest.mark.parametrize('use_openmetrics', [True, False], indirect=True)
def test_check(dd_run_check, aggregator, mock_data, gitlab_check, get_config, use_openmetrics):
check = gitlab_check(get_config(use_openmetrics))
dd_run_check(check)
dd_run_check(check)
assert_check(aggregator, V2_METRICS if use_openmetrics else V1_METRICS, use_openmetrics)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
@requires_py3
def test_check_gitaly(dd_run_check, aggregator, mock_data, gitlab_check, get_config):
from datadog_checks.gitlab.gitlab_v2 import GitlabCheckV2
config = get_config(True)
instance = config['instances'][0]
instance["openmetrics_endpoint"] = instance["prometheus_url"]
instance["gitaly_server_endpoint"] = GITLAB_GITALY_PROMETHEUS_ENDPOINT
check = gitlab_check(config)
dd_run_check(check)
dd_run_check(check)
assert_check(aggregator, V2_METRICS + GITALY_METRICS, True)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
aggregator.assert_service_check(
'gitlab.gitaly.openmetrics.health',
status=GitlabCheckV2.OK,
tags=GITLAB_TAGS + CUSTOM_TAGS + ['endpoint:{}'.format(GITLAB_GITALY_PROMETHEUS_ENDPOINT)],
)
@requires_py2
def test_openmetrics_with_python2(gitlab_check, get_config):
with pytest.raises(
ConfigurationError, match="This version of the integration is only available when using Python 3."
):
gitlab_check(get_config(True))
@pytest.mark.parametrize(
"raw_version",
[
"1.2.3",
"5.6.7",
],
)
def test_get_gitlab_version(raw_version):
http = MagicMock()
http.get.return_value.json.return_value = {"version": raw_version}
version = get_gitlab_version(http, MagicMock(), "http://localhost", "my-token")
http.get.assert_called_with("http://localhost/api/v4/version", params={'access_token': "my-token"})
assert version == raw_version
def test_get_gitlab_version_without_token():
http = MagicMock()
version = get_gitlab_version(http, MagicMock(), "http://localhost", None)
http.get.assert_not_called()
assert version is None
@requires_py3
def test_no_gitlab_url(dd_run_check, aggregator, mock_data, gitlab_check, get_config):
config = get_config(True)
del config['instances'][0]['gitlab_url']
check = gitlab_check(config)
dd_run_check(check)
aggregator.assert_service_check('gitlab.openmetrics.health', status=AgentCheck.OK)
@requires_py3
def test_parse_readiness_service_checks_with_invalid_payload(
dd_run_check, aggregator, mock_data, gitlab_check, get_config
):
check = gitlab_check(get_config(True))
# Manually init the check
check.parse_config()
mocked_response = MagicMock()
mocked_response.json.raiseError.side_effect = Exception()
check.parse_readiness_service_checks(mocked_response)
mocked_response.json.assert_called_once()
for service_check in check.READINESS_SERVICE_CHECKS.values():
aggregator.assert_service_check(
'gitlab.readiness.{}'.format(service_check), status=AgentCheck.UNKNOWN, tags=GITLAB_TAGS + CUSTOM_TAGS
)
assert len(aggregator.service_check_names) == 13
@pytest.mark.parametrize(
'service_check, expected_redis_status',
[
pytest.param(
{"redis_check": [{"status": "ok"}]},
AgentCheck.OK,
id="OK",
),
pytest.param(
{"redis_check": [{"status": "failed"}]},
AgentCheck.CRITICAL,
id="CRITICAL",
),
pytest.param(
{"redis_check": [{}]},
AgentCheck.UNKNOWN,
id="UNKNOWN",
),
pytest.param(
{},
AgentCheck.UNKNOWN,
id="missing service check",
),
pytest.param(
{"unknown_check": [{"status": "ok"}]},
AgentCheck.UNKNOWN,
id="unknown service check",
),
pytest.param(
{"redis": [{"status": "ok"}]},
AgentCheck.UNKNOWN,
id="service check not finishing with _check",
),
pytest.param(
{"redis_check": {"status": "ok"}},
AgentCheck.UNKNOWN,
id="malformed check",
),
],
)
@requires_py3
def test_parse_readiness_service_checks(
dd_run_check, aggregator, mock_data, gitlab_check, get_config, service_check, expected_redis_status
):
check = gitlab_check(get_config(True))
# Manually init the check
check.parse_config()
mocked_response = MagicMock()
mocked_response.json.return_value = service_check
check.parse_readiness_service_checks(mocked_response)
aggregator.assert_service_check(
'gitlab.readiness.redis',
status=expected_redis_status,
tags=GITLAB_TAGS + CUSTOM_TAGS,
)
for not_received_service_check in set(check.READINESS_SERVICE_CHECKS.values()) - {"redis"}:
aggregator.assert_service_check(
'gitlab.readiness.{}'.format(not_received_service_check),
status=AgentCheck.UNKNOWN,
tags=GITLAB_TAGS + CUSTOM_TAGS,
)
assert len(aggregator.service_check_names) == 13
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
0de6366059644781c5ad60ba84b583894cd2615a | f21ce1669b00d80e8d064363342bafe6cc2bca71 | /personal_website/landing/views.py | 5b88cbed3920bdd12db0ea8314bcb0500a4b33a2 | [] | no_license | sandipan898/personal-website | 760a87b42373c0098d67dd3bedb96bac16147e38 | 62ae9dc2be63f9b7d4297596dcffa329e2d9b961 | refs/heads/main | 2023-06-30T03:03:42.374597 | 2021-07-31T21:31:41 | 2021-07-31T21:31:41 | 328,332,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | from django.shortcuts import render
from django.views import View
# Create your views here.
class HomeView(View):
""" Defining Home Page View """
template_name = "landing/index.html"
def get(self, request):
""" Defining the GET method oon Home Page View """
return render(request, template_name=self.template_name)
| [
"sandipan.das898@gmail.com"
] | sandipan.das898@gmail.com |
c18d4509b54afcc01aad8058748e79cde649f324 | 8de6b19856ad0951489bcaaa23ea6e10996c7f8d | /fizzbuzz.py | 2b69869321e6ce91eea4fe4aa111ad0cbcb21d65 | [
"MIT"
] | permissive | incolumepy-cursos/poop | 2a59fec0fce283e7c62ccae6dcba7261b7f7ed87 | e4ac26b8d2a8c263a93fd9642fab52aafda53d80 | refs/heads/main | 2023-03-27T21:33:06.719286 | 2021-04-03T16:56:02 | 2021-04-03T16:56:02 | 351,632,043 | 0 | 0 | MIT | 2021-03-26T01:59:11 | 2021-03-26T01:59:10 | null | UTF-8 | Python | false | false | 2,074 | py | # smalltalk infected fizzbuzz version
from collections import deque
from forbiddenfruit import curse
def if_true(self, block):
# simulate blocks using functions
self and block()
# close circuit when object is truthy
return self
def if_false(self, block):
# simulate blocks using functions
not self and block()
# close circuit when object is falsy
return self
def println(self):
"""Prints the values to a stream, or to sys.stdout by default.
>>> "Fizz".print()
Fizz
>>> "FizzBuzz".print()
FizzBuzz
"""
print(self)
def do(self, block):
"""Evaluate the receiver for each element in aBlock.
>>> range(1, 11).do(lambda number: number.print())
"""
deque(map(block, self), maxlen=0)
return self
curse(bool, "if_true", if_true)
curse(bool, "if_false", if_false)
curse(str, "print", println)
curse(int, "print", println)
curse(range, "do", do)
# lambdas are used to simulate blocks
"""Summary
We add a do methd on range objects that evaluates a block
for each element on interval.
This block will receive a number, that evaluated
in the expression "number % 15 == 0", This will result in a boolean object,
to which we will send two messages,
one with a block to be evaluated if the expression is true and
another for if it is false.
If true, we will send a print message to a "FizzBuzz" object.
If it is false, we will use the same numeric object
to evaluate the expression number% 5 == 0.
And so we repeat the cycle, until at last a message
is sent to the number printed.
"""
range(1, 101).do(
lambda number: (number % 15 == 0)
.if_true("FizzBuzz".print)
.if_false(
lambda: (number % 5 == 0)
.if_true("Buzz".print)
.if_false(
lambda: (number % 3 == 0)
.if_true("Fizz".print)
.if_false(number.print)
)
)
)
"""
Notes:
- A message is sent to an object for printing
- Lambdas are used to simulate a block
- Add method do for a range, evaluating a block on each number on interval
- Objects and messages
"""
| [
"cassiobotaro@gmail.com"
] | cassiobotaro@gmail.com |
d00756139719b251cbd67d826b9f5a9d26cfe9aa | cdfd0e69a5f81fbc9480fcdae85f195797eced49 | /store/templatetags/cart.py | edbde375cf7de9bda9d1a4690454ee7317580d5a | [] | no_license | anupjungkarki/Eshop-Project-In-Django | 00c0e77ac79e3ad13a951719bb5d7d007a4ee0ec | e1445e15d376def379f20e50a10205647d79026c | refs/heads/master | 2023-01-14T12:10:28.443359 | 2020-11-24T11:17:49 | 2020-11-24T11:17:49 | 315,603,204 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | from django import template
register = template.Library()
# Use of Filter
@register.filter(name='is_in_cart')
def is_in_cart(product, cart):
keys = cart.keys()
for id in keys:
if int(id) == product.id:
return True
return False
@register.filter(name='cart_quantity')
def cart_quantity(product, cart):
keys = cart.keys()
for id in keys:
if int(id) == product.id:
return cart.get(id)
return 0
@register.filter(name='price_total')
def price_total(product, cart):
return product.price * cart_quantity(product, cart)
@register.filter(name='total_cart_price')
def total_cart_price(products, cart):
sum = 0
for p in products:
sum += price_total(p, cart)
return sum
| [
"anupkarki2012@gmail.com"
] | anupkarki2012@gmail.com |
b052c9c007ed0f4575616c243987ec5565525afa | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /458/458.poor-pigs.233532116.Accepted.leetcode.py | c82be47e46aa5c305e7d39a7a5c28fcac50aad85 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from math import log, ceil
class Solution(object):
def poorPigs(self, buckets, minutesToDie, minutesToTest):
rounds = minutesToTest // minutesToDie
return int(ceil(log(buckets) // log(rounds + 1)))
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
4fa4c58637f6b155faf1a21f2bbf7c3df8421796 | 86fc644c327a8d6ea66fd045d94c7733c22df48c | /scripts/managed_cpe_services/customer/single_cpe_site/single_cpe_site_services/lan/switches/vlan/service_customization.py | 0aefed9f8e5c101bcf571006953b92b7b2c8013a | [] | no_license | lucabrasi83/anutacpedeployment | bfe703657fbcf0375c92bcbe7560051817f1a526 | 96de3a4fd4adbbc0d443620f0c53f397823a1cad | refs/heads/master | 2021-09-24T16:44:05.305313 | 2018-10-12T02:41:18 | 2018-10-12T02:41:18 | 95,190,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,979 | py | #
# This computer program is the confidential information and proprietary trade
# secret of Anuta Networks, Inc. Possessions and use of this program must
# conform strictly to the license agreement between the user and
# Anuta Networks, Inc., and receipt or possession does not convey any rights
# to divulge, reproduce, or allow others to use this program without specific
# written authorization of Anuta Networks, Inc.
#
# Copyright (c) 2016-2017 Anuta Networks, Inc. All Rights Reserved.
#
#
#ALL THE CUSTOMIZATIONS REGARDING DATAPROCESSING SHOULD BE WRITTEN INTO THIS FILE
#
"""
Tree Structure of Handled XPATH:
services
|
managed-cpe-services
|
customer
|
single-cpe-site
|
single-cpe-site-services
|
lan
|
switches
|
vlan
Schema Representation:
/services/managed-cpe-services/customer/single-cpe-site/single-cpe-site-services/lan/switches/vlan
"""
"""
Names of Leafs for this Yang Entity
vlan-id
name
"""
from servicemodel import util
from servicemodel import yang
from servicemodel import devicemgr
from cpedeployment.cpedeployment_lib import getLocalObject
from cpedeployment.cpedeployment_lib import getDeviceObject
from cpedeployment.cpedeployment_lib import getCurrentObjectConfig
from cpedeployment.cpedeployment_lib import getPreviousObjectConfig
from cpedeployment.cpedeployment_lib import ServiceModelContext
from cpedeployment.cpedeployment_lib import getParentObject
from cpedeployment.cpedeployment_lib import log
from cpedeployment.switch_lib import vlandef
class ServiceDataCustomization:
@staticmethod
def process_service_create_data(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the inputs"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.items():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
inputkeydict = kwargs['inputkeydict']
@staticmethod
def process_service_device_bindings(smodelctx, sdata, dev, **kwargs):
""" Custom API to modify the device bindings or Call the Business Login Handlers"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.items():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
inputkeydict = kwargs['inputkeydict']
devbindobjs = kwargs['devbindobjs']
id = kwargs['id']
opaque_args = kwargs['hopaque']
for device in util.convert_to_list(dev):
vlandef(smodelctx, sdata, device, **kwargs)
@staticmethod
def process_service_update_data(smodelctx, sdata, **kwargs):
"""callback called for update operation"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.items():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
id = kwargs['id']
opaque_args = kwargs['hopaque']
#Previous config and previous inputdict
pconfig = kwargs['pconfig']
pinputdict = kwargs['pinputdict']
dev = kwargs['dev']
@staticmethod
def process_service_delete_data(smodelctx, sdata, **kwargs):
"""callback called for delete operation"""
modify = True
if modify and kwargs is not None:
for key, value in kwargs.items():
log("%s == %s" %(key,value))
if modify:
config = kwargs['config']
inputdict = kwargs['inputdict']
dev = kwargs['dev']
id = kwargs['id']
opaque_args = kwargs['hopaque']
class DeletePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for Deletion"""
log('operations: %s' % (operations))
class CreatePreProcessor(yang.SessionPreProcessor):
def processBeforeReserve(self, session):
operations = session.getOperations()
"""Add any move operations for creation"""
log('operations: %s' % (operations))
| [
"sebastien.pouplin@tatacommunications.com"
] | sebastien.pouplin@tatacommunications.com |
b6eb339f28b7a108352ff5513b214bf00503b35a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_thwacks.py | 33dacda03f2cfd3b1bc002c12760d507191c50c8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _THWACKS():
def __init__(self,):
self.name = "THWACKS"
self.definitions = thwack
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['thwack']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4c3040efe710b9369d66dc92db6caf602906b56b | 4efad5834a063b959f2d8e36ee35f64cd8fd67a2 | /Calculator/calc.py | 338804970abfb5954fc6c6456b7a5508258acb3e | [] | no_license | DX9807/General-Practice-Code | e99133f5773754bad0aaf7820a5e1fdd2b4d5794 | efd79bdaf8756e35258b5b94eb4f9fe0d73ac361 | refs/heads/master | 2020-05-23T11:46:15.418345 | 2019-05-15T03:38:24 | 2019-05-15T03:38:24 | 186,743,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | # -*- coding: utf-8 -*-
from tkinter import *
from math import *
def evaluate(event):
res.configure(text = "Result: " + str(eval(entry.get())))
w = Tk()
w.title('Calculator')
Label(w, text="Your Expression:").pack()
entry = Entry(w)
entry.bind("<Return>", evaluate)
entry.pack()
res = Label(w)
res.pack()
w.mainloop()
| [
"sub.yadav19@gmail.com"
] | sub.yadav19@gmail.com |
5c8485a5bc3a860a119d61a8cbd8642212ad959c | 03f9b8bdea312636afb4df3737b55cb0cc4b21ff | /MyCalendarIII.py | 6e43a4ad8b174489a88cdb656b06e7c0bf6d1da3 | [] | no_license | ellinx/LC-python | f29dd17bbe15407ba0d06ad68386efdc9a343b56 | 9190d3d178f1733aa226973757ee7e045b7bab00 | refs/heads/master | 2021-06-01T15:21:24.379811 | 2020-10-29T04:37:07 | 2020-10-29T04:37:07 | 132,704,788 | 1 | 1 | null | 2019-05-15T03:26:11 | 2018-05-09T05:13:26 | Python | UTF-8 | Python | false | false | 2,459 | py | """
Implement a MyCalendarThree class to store your events.
A new event can always be added.
Your class will have one method, book(int start, int end).
Formally, this represents a booking on the half open interval [start, end),
the range of real numbers x such that start <= x < end.
A K-booking happens when K events have some non-empty intersection
(ie., there is some time that is common to all K events.)
For each call to the method MyCalendar.book,
return an integer K representing the largest integer such that there exists a K-booking in the calendar.
Your class will be called like this:
MyCalendarThree cal = new MyCalendarThree();
MyCalendarThree.book(start, end)
Example 1:
MyCalendarThree();
MyCalendarThree.book(10, 20); // returns 1
MyCalendarThree.book(50, 60); // returns 1
MyCalendarThree.book(10, 40); // returns 2
MyCalendarThree.book(5, 15); // returns 3
MyCalendarThree.book(5, 10); // returns 3
MyCalendarThree.book(25, 55); // returns 3
Explanation:
The first two events can be booked and are disjoint, so the maximum K-booking is a 1-booking.
The third event [10, 40) intersects the first event, and the maximum K-booking is a 2-booking.
The remaining events cause the maximum K-booking to be only a 3-booking.
Note that the last event locally causes a 2-booking, but the answer is still 3 because
eg. [10, 20), [10, 40), and [5, 15) are still triple booked.
Note:
1. The number of calls to MyCalendarThree.book per test case will be at most 400.
2. In calls to MyCalendarThree.book(start, end), start and end are integers in the range [0, 10^9].
"""
class MyCalendarThree:
def __init__(self):
self.starts = []
self.ends = []
def book(self, start: 'int', end: 'int') -> 'int':
idx = bisect.bisect_left(self.starts, start)
self.starts.insert(idx,start)
idx = bisect.bisect_left(self.ends, end)
self.ends.insert(idx,end)
i1, i2 = 0, 0
cur, ret = 0, 0
while i1<len(self.starts):
if self.starts[i1]==self.ends[i2]:
i1 += 1
i2 += 1
continue
if self.starts[i1]<self.ends[i2]:
i1 += 1
cur += 1
ret = max(ret, cur)
else:
i2 += 1
cur -= 1
return ret
# Your MyCalendarThree object will be instantiated and called as such:
# obj = MyCalendarThree()
# param_1 = obj.book(start,end)
| [
"ellin.xll@gmail.com"
] | ellin.xll@gmail.com |
0269176044b3fe706b1e0ca8440f7c035674c75d | 2a4c0528a078e6dc9306c0e5b1afc32c1c633bdf | /pabi_track_change/models/asset.py | e429a06164aafbd79b543a5a58b8e7071895f0f7 | [] | no_license | newtratip/pb2_addons | 329a1c7a9e1bc2fe689a370c8e0a1c744c3944ed | 8328bb9d3803c04a58593da3fe0e643ee4988732 | refs/heads/master | 2020-12-02T17:47:42.079259 | 2017-07-31T09:33:41 | 2017-07-31T09:33:41 | 96,419,366 | 0 | 0 | null | 2017-07-06T10:37:03 | 2017-07-06T10:37:03 | null | UTF-8 | Python | false | false | 2,134 | py | # -*- coding: utf-8 -*-
from openerp import models, fields
class AccountAsset(models.Model):
_inherit = 'account.asset'
name = fields.Char(track_visibility='onchange')
code = fields.Char(track_visibility='onchange')
code2 = fields.Char(track_visibility='onchange')
parent_id = fields.Many2one(track_visibility='onchange')
status = fields.Many2one(track_visibility='onchange')
deliver_to = fields.Char(track_visibility='onchange')
deliver_date = fields.Date(track_visibility='onchange')
section_id = fields.Many2one(track_visibility='onchange')
project_id = fields.Many2one(track_visibility='onchange')
invest_asset_id = fields.Many2one(track_visibility='onchange')
invest_construction_id = fields.Many2one(track_visibility='onchange')
# profile_type = fields.Selection(track_visibility='onchange')
method_time = fields.Selection(track_visibility='onchange')
method_number = fields.Integer(track_visibility='onchange')
method_period = fields.Selection(track_visibility='onchange')
days_calc = fields.Boolean(track_visibility='onchange')
method = fields.Selection(track_visibility='onchange')
prorata = fields.Boolean(track_visibility='onchange')
owner_section_id = fields.Many2one(track_visibility='onchange')
owner_project_id = fields.Many2one(track_visibility='onchange')
purchase_request_id = fields.Many2one(track_visibility='onchange')
asset_purchase_method_id = fields.Many2one(track_visibility='onchange')
pr_requester_id = fields.Many2one(track_visibility='onchange')
date_request = fields.Date(track_visibility='onchange')
doc_request_id = fields.Many2one(track_visibility='onchange')
responsible_user_id = fields.Many2one(track_visibility='onchange')
location_id = fields.Many2one(track_visibility='onchange')
room = fields.Char(track_visibility='onchange')
serial_number = fields.Char(track_visibility='onchange')
warranty_start_date = fields.Date(track_visibility='onchange')
warranty_expire_date = fields.Date(track_visibility='onchange')
note = fields.Text(track_visibility='onchange')
| [
"kittiu@gmail.com"
] | kittiu@gmail.com |
8a2686c511fb30c9a613a925374a9a4ced4278ab | cc21a6071fde01a99f252097209a27854cbf149d | /api/views/account.py | ebb5230b8ea3e9d95fded470decaa5e53159da55 | [] | no_license | faker-DemoKing/ptdevops | 6939c9106f307602defad79db1d143b7aae927b9 | 54e00ca01925ae9ef6f3235cb0e481e679bdd79f | refs/heads/master | 2023-02-23T02:48:51.301269 | 2020-07-12T08:00:31 | 2020-07-12T08:00:31 | 231,424,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,068 | py | # -*- coding:utf-8 -*-
import datetime
import jwt
import uuid
import time
from flask import abort
from flask import current_app
from flask import request
from flask import session
from flask_login import login_user, logout_user
from api.models import User, Role
from api.resource import APIView
from api.libs import human_datetime, args_required
from api.libs.cache import UserCache
from api.libs.perm import auth_abandoned
from api.libs.perm.crud import UserCRUD
class LoginView(APIView):
url_prefix = "/login"
# 装饰器,查看request.value是否有某个值
@args_required("username")
@args_required("password")
@auth_abandoned
def post(self):
username = request.values.get("username") or request.values.get("email")
password = request.values.get("password")
user, authenticated = User.query.authenticate(username, password)
log_type = request.values.get('type')
x_real_ip = request.headers.get('x-real-ip', '')
if user and not user.is_active:
return abort(403,"账户已被系统禁用")
if not user:
return abort(403, "User <{0}> does not exist".format(username))
if not authenticated:
value = UserCache.get_count_error(username)
if value >= 3:
if user and user.is_active:
user.is_active = False
user.save()
return abort(403,"账户已被禁用")
return abort(403, "invalid username or password")
role = Role.get_by(id=user.id, first=True, to_dict=True)
if log_type == 'ldap':
pass
# ldap未完成
else:
if user and user.deleted_by is None:
return self.handle_user_info(user, x_real_ip, role)
def handle_user_info(self, user, x_real_ip, role):
session["user"] = dict(id=user.id,
username=user.username,
nickname=user.nickname,
role=role)
UserCache.del_count_error(user.username)
token_isvalid = user.access_token and len(user.access_token) == 32 and user.token_expired >= time.time()
access_token = user.access_token if token_isvalid else uuid.uuid4().hex
token_expired = time.time() + 8 * 60 * 60
last_login = human_datetime()
last_ip = x_real_ip
UserCRUD.update(user.id,access_token=access_token,token_expired=token_expired,
last_login=last_login,last_ip=last_ip)
login_user(user)
return self.jsonify({
"access_token" : user.access_token,
"nickname" : user.nickname,
"is_supper" : user.is_supper,
"has_real_ip" : True if x_real_ip else False,
"permissions" : [] if user.is_supper else user.page_perms})
class LogoutView(APIView):
url_prefix = "/logout"
@auth_abandoned
def get(self):
logout_user()
return self.jsonify(error='') | [
"you@example.com"
] | you@example.com |
7f133d76c321fa4f16e06bd2e0dc664c92d5e192 | 8140a910c709030d9c5394bc2c9ba5c30eda6063 | /34-day/8堵塞进程池创建多进程.py | af22b37587acfe1b43f28d38e88de33fd756d6b3 | [] | no_license | liuruitao/2mouth | 417139ecfd58d0220a95b78967247c4795ebc022 | 854e9322cea73462bb2fe08b3bdf6c33b73a40cd | refs/heads/master | 2020-03-13T10:49:54.016344 | 2018-05-25T04:25:37 | 2018-05-25T04:25:37 | 131,090,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from multiprocessing import Pool
import os,time
#子进程
def work(msg):
for i in range(3):
time.sleep(1)
print('略略略略略pid=%d'%os.getpid())
p = Pool(1)
#主进程
for i in range(1,6):
print(i)
p.apply(work,(i,))
p.close() #关闭池子
p.join() #等待p中所有子进程执行完,必须放在close语句后
| [
"2590011467@qq.com"
] | 2590011467@qq.com |
7f9e577c677e2cc873547a311a56bb700869ac08 | 29fc11636bf5053feb113ad837ec42ffe3f09bfd | /Terra_2/Test_17/Test_17.py | 76fe6850638e9694cfd48d93bb37057699a34407 | [] | no_license | deyh2020/Bugs4Q | 8446db4a8efcf3541ba740b7d658b6812d56fe3e | b6e306e12bd1c5fdec126655ad008c386340ab57 | refs/heads/master | 2023-05-31T03:41:28.576877 | 2021-06-17T15:08:03 | 2021-06-17T15:08:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,397 | py | # Importing needed libraries
from qiskit import *
from qiskit.mapper import Layout
import numpy as np
import matplotlib as mp
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# Enable use of real device
IBMQ.load_accounts()
backend_exp = IBMQ.get_backend('ibmq_16_melbourne')
for u in range(0,1): # It isn't important, it is because I measured all qubit's T1
u_st = str(u)
file1 = 'T1__raw_qubit_' + u_st + '.txt'
out1 = open( file1, 'w' )
out1.write('# This is the qubit\'s ' + u_st +' T1 raw data \n' )
circuit = []
q = QuantumRegister(2, 'q') #Only changing this and the layout makes it works
c1 = ClassicalRegister(1, 'c')
qc = QuantumCircuit(q)
mz = QuantumCircuit(q,c1)
lay = Layout({ (q,0) : 0, (q,1):1 })
# Exciting the qubit
qc.x(q[0])
qc.barrier(q)
# Measurment on Z-axis
mz.measure(q[0],c1[0])
# Waiting time ( 30*0.12 us each iteration)
for i in range(50):
identity = QuantumCircuit(q,c1)
identity.barrier(q)
for k in range(i*30):
identity.iden(q)
identity.barrier(q)
circuit.append(qc+identity+mz)
# Running the experiment
jobZ = execute(circuit, backend_exp, initial_layout=lay, shots=1024)
out1.write('# N° id_gates Z-Measure Error \n')
Result = jobZ.result() # Taking the results
counts = []
for i in range(50):
counts.append(Result.get_counts(circuit[i]) )
# Preparing the lists to make fits
y = []
x = []
for i in range(50):
py = counts[i]['1']/1024
x.append(i*30*0.12)
y.append( py )
out1.write(str(i*30) + ' '+ str(py) + '\n')
out1.write( '\n')
def expo(x, amp, slope, high):
y = amp*np.exp(-slope*x)+high
return y
x = np.array(x)
y = np.array(y)
err_y = np.array(err_y)
params , paramscov = curve_fit(expo, x, y,p0=[1,0.02,0] )
a =np.sqrt(np.diag(paramscov))
out1.write('The raw T1 is ' + str(1/params[1])+ ' +- ' + str(a[1]/params[1]) + '\n')
out1.close()
plt.figure()
plt.plot(x, expo(x, *params),
label='Raw fitted function')
plt.plot(x , y, 'ro', label= 'data')
plt.xlabel( ' Time [us] ')
plt.ylabel(' Probability of being in the excited state ')
plt.legend()
plt.savefig('plot_q_'+ u_st+ '_raw.png')
| [
"zpz2393247079@gmail.com"
] | zpz2393247079@gmail.com |
dfdd6e32ff248a403f2c0a76add35000a470baec | daeb851843a55ca3f34008765ebf8ff18c0d1ecd | /testcase/count.py | 4bcb93623d73e3b91c4309cfd4a5a4fe8a39aba1 | [] | no_license | Cola1995/Py | f5066df0ef5b60183f5e5e2ec6b77b2be536e7f8 | bab82d8f5aae18e612dbe9bcd38f0d4260463b16 | refs/heads/master | 2020-04-08T06:30:36.212074 | 2018-12-27T06:14:53 | 2018-12-27T06:14:53 | 159,099,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | class Count():
def __init__(self,a,b):
self.a=int(a)
self.b=int(b)
def add(self):
return self.a+self.b
def sub(self):
return self.a-self.b
| [
"991571566@qq.com"
] | 991571566@qq.com |
8c58fc611fa2b6963feddec096fc3daaea60db18 | 5eca83a3a019467c8e5fafe5f2c2f6dc946a0e28 | /solutions/day_48.py | 76e210977c2ebe31b8062f2e5a1c45d4732ee400 | [] | no_license | Kontowicz/Daily-Interview-Pro | 4c821b6afc9451c613f06e3850072e10d7d6a7d4 | 3bbe26430b6d004821477e14d37debe5d4a6d518 | refs/heads/master | 2020-06-25T00:37:04.138548 | 2020-02-28T16:43:30 | 2020-02-28T16:43:30 | 199,140,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | def threeSum(nums):
set = []
result = []
for i, item in enumerate(nums):
for j, value in enumerate(nums):
if i <= j:
continue
if -(item + value) in set:
result.append([-(item + value), item, value])
else:
set.append(value)
return result
nums = [1, -2, 1, 0, 5]
print(threeSum(nums))
# [[-2, 1, 1]] | [
"przemyslowiec@gmail.com"
] | przemyslowiec@gmail.com |
e9802599baf0a78324272736724668e48bd8117c | ad5b72656f0da99443003984c1e646cb6b3e67ea | /tests/layer_tests/tensorflow_tests/test_tf_Select.py | 209464d5fa2f986a1cb3b36417b48c2efb3d5d19 | [
"Apache-2.0"
] | permissive | novakale/openvino | 9dfc89f2bc7ee0c9b4d899b4086d262f9205c4ae | 544c1acd2be086c35e9f84a7b4359439515a0892 | refs/heads/master | 2022-12-31T08:04:48.124183 | 2022-12-16T09:05:34 | 2022-12-16T09:05:34 | 569,671,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import tensorflow as tf
from common.tf_layer_test_class import CommonTFLayerTest
class TestSelect(CommonTFLayerTest):
def _prepare_input(self, inputs_info):
assert 'cond' in inputs_info, "Test error: inputs_info must contain `cond`"
assert 'x' in inputs_info, "Test error: inputs_info must contain `x`"
assert 'y' in inputs_info, "Test error: inputs_info must contain `y`"
cond_shape = inputs_info['cond']
x_shape = inputs_info['x']
y_shape = inputs_info['y']
inputs_data = {}
inputs_data['cond'] = np.random.randint(0, 2, cond_shape).astype(bool)
inputs_data['x'] = np.random.randint(-100, 100, x_shape).astype(np.float32)
inputs_data['y'] = np.random.randint(-100, 100, y_shape).astype(np.float32)
return inputs_data
def create_select_net(self, cond_shape, x_shape, y_shape):
tf.compat.v1.reset_default_graph()
# Create the graph and model
with tf.compat.v1.Session() as sess:
cond = tf.compat.v1.placeholder(tf.bool, cond_shape, 'cond')
x = tf.compat.v1.placeholder(tf.float32, x_shape, 'x')
y = tf.compat.v1.placeholder(tf.float32, y_shape, 'y')
tf.raw_ops.Select(condition=cond, x=x, y=y, name='select')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
return tf_net, None
test_data_basic = [
dict(cond_shape=[], x_shape=[3, 2, 4], y_shape=[3, 2, 4]),
dict(cond_shape=[2], x_shape=[2, 4, 5], y_shape=[2, 4, 5]),
dict(cond_shape=[2, 3, 4], x_shape=[2, 3, 4], y_shape=[2, 3, 4]),
]
@pytest.mark.parametrize("params", test_data_basic)
@pytest.mark.precommit_tf_fe
@pytest.mark.nightly
def test_select_basic(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, use_old_api):
if not use_new_frontend:
pytest.skip("Select tests are not passing for the legacy frontend.")
self._test(*self.create_select_net(**params),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, use_old_api=use_old_api)
| [
"noreply@github.com"
] | novakale.noreply@github.com |
f8abca3621fabcd2c3e8ae480e216266dc0013ac | c7f82dd826556a1e6e1dbbfd6f422f4b71a582e5 | /coordinator/migrations/0010_task_answer.py | 8bf30e19d903e81b7d26f3bf2c37a5fbd9435036 | [] | no_license | jabykuniyil/SPS | 0c88eec3ae7ba4aadac39ae741abbcd224be0f78 | 78bd2151ebb6f4e0ecae5858985d6142dfa34339 | refs/heads/main | 2023-04-25T05:14:55.936405 | 2021-05-11T02:51:18 | 2021-05-11T02:51:18 | 352,079,044 | 1 | 0 | null | 2021-05-11T02:51:19 | 2021-03-27T13:25:10 | HTML | UTF-8 | Python | false | false | 394 | py | # Generated by Django 3.1.7 on 2021-04-02 13:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coordinator', '0009_auto_20210331_0939'),
]
operations = [
migrations.AddField(
model_name='task',
name='answer',
field=models.TextField(blank=True, null=True),
),
]
| [
"mohdjabiran112@gmail.com"
] | mohdjabiran112@gmail.com |
064d4b82a9e356abd10e43abe3af041a7343ca51 | 9f9f4280a02f451776ea08365a3f119448025c25 | /plans/hsppw/lcut_hsp-l_030_pwde_mlpr_hs.py | 719e108eeea27e4ed2fefbaf64a6d946c18576cf | [
"BSD-2-Clause"
] | permissive | dbis-uibk/hit-prediction-code | 6b7effb2313d2499f49b2b14dd95ae7545299291 | c95be2cdedfcd5d5c27d0186f4c801d9be475389 | refs/heads/master | 2023-02-04T16:07:24.118915 | 2022-09-22T12:49:50 | 2022-09-22T12:49:50 | 226,829,436 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,157 | py | """Plan using all features."""
import os.path
from dbispipeline.evaluators import CvEpochEvaluator
from sklearn.neural_network import MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
import hit_prediction_code.common as common
from hit_prediction_code.dataloaders import ClassLoaderWrapper
from hit_prediction_code.dataloaders import CutLoaderWrapper
from hit_prediction_code.dataloaders import EssentiaLoader
import hit_prediction_code.evaluations as evaluations
from hit_prediction_code.models.pairwise import PairwiseOrdinalModel
from hit_prediction_code.result_handlers import print_results_as_json
from hit_prediction_code.transformers.label import compute_hit_score_on_df
PATH_PREFIX = 'data/hit_song_prediction_msd_bb_lfm_ab/processed'
number_of_classes = 30
dataloader = ClassLoaderWrapper(
wrapped_loader=CutLoaderWrapper(
wrapped_loader=EssentiaLoader(
dataset_path=os.path.join(
PATH_PREFIX,
'hsp-l_acousticbrainz.parquet',
),
features=[
*common.all_no_year_list(),
],
label='yang_hit_score',
nan_value=0,
data_modifier=lambda df: compute_hit_score_on_df(
df,
pc_column='lastfm_playcount',
lc_column='lastfm_listener_count',
hit_score_column='yang_hit_score',
),
),
number_of_bins=number_of_classes,
),
labels=list(range(number_of_classes)),
)
pipeline = Pipeline([
('scale', MinMaxScaler()),
('model',
PairwiseOrdinalModel(
wrapped_model=MLPRegressor(
hidden_layer_sizes=(256, 128, 128, 128, 64),
verbose=True,
),
pairs_factor=3.,
threshold_type='average',
pair_strategy='random',
pair_encoding='delta',
threshold_sample_training=False,
)),
])
evaluator = CvEpochEvaluator(
cv=evaluations.cv(),
scoring=evaluations.metrics.ordinal_classifier_scoring(),
scoring_step_size=1,
)
result_handlers = [
print_results_as_json,
]
| [
"mikevo-uibk@famv.net"
] | mikevo-uibk@famv.net |
7feea9a9aa23b577292caba64cbf92772ea3efd9 | dbe7e1d9fe2457c26f83095d941e4392e7d30f8c | /django_dashboard/api/file_uploadmixin/mixins.py | 1f81e9322b2ad581d2e774323c84718e28ede401 | [
"MIT"
] | permissive | keepexploring/smartbiogas | 51e124735ec04bc6b87a8ac75c66c83de6865001 | ca663435b05666113e3c0cb55e6f087c61497208 | refs/heads/master | 2022-12-12T10:42:37.412038 | 2018-07-18T15:29:04 | 2018-07-18T15:29:04 | 111,402,799 | 0 | 0 | MIT | 2022-12-08T00:56:54 | 2017-11-20T11:39:05 | JavaScript | UTF-8 | Python | false | false | 504 | py | class MultipartResource(object):
def deserialize(self, request, data, format=None):
if not format:
format = request.META.get('CONTENT_TYPE', 'application/json')
if format == 'application/x-www-form-urlencoded':
return request.POST
if format.startswith('multipart'):
data = request.POST.copy()
data.update(request.FILES)
return data
return super(MultipartResource, self).deserialize(request, data, format) | [
"joel.c@scene.community"
] | joel.c@scene.community |
580802df8cf0d97cbc8ed7db052e85c3bb7a9ebc | 88ed6ed99589f7fb8e49aeb6c15bf0d51fe14a01 | /048_rotate-image.py | c59926d8aca23517a63ae5b816afe19165f99d58 | [] | no_license | ryeLearnMore/LeetCode | 3e97becb06ca2cf4ec15c43f77447b6ac2a061c6 | 04ec1eb720474a87a2995938743f05e7ad5e66e3 | refs/heads/master | 2020-04-07T19:02:43.171691 | 2019-06-23T15:09:19 | 2019-06-23T15:09:19 | 158,634,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | #!/usr/bin/env python
#coding:utf-8
#@author: rye
#@time: 2019/3/1 20:11
# 此题没想出来
'''
tips:
1. 这种题就属于想不出来就真不会做,看一眼答案就知道方法了。
2.
temp = x
x = y == x, y = y, x 这种交换变量不需要temp,可以直接交换,算是python特有吧
y = temp
'''
class Solution(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: None Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
# 上下翻转
for i in range(n // 2):
matrix[i], matrix[n - 1 - i] = matrix[n - 1 - i], matrix[i]
# 主对角线翻转
for i in range(n):
for j in range(i + 1, n):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
return matrix
if __name__ == '__main__':
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
print(Solution().rotate(matrix)) | [
"noreply@github.com"
] | ryeLearnMore.noreply@github.com |
560b61e15d96f3a36f5534cc85943cfa6a941de4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03137/s736209100.py | 96b79c1420985c72762d81a816292f29b0639806 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | n, m = map(int,input().split())
X = list(map(int,input().split()))
X.sort()
dist = []
for i in range(m-1):
dist.append(X[i+1]-X[i])
dist.sort()
ans = 0
for i in range(max(m-n,0)):
ans += dist[i]
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8808fc36608a3b4e5f17bafa2899d90877aea194 | d309503377a3e76c42e8155f8c8597fb07abc914 | /store/migrations/0005_alter_customer_user.py | ef6551e894c961627ecb05330d2e81820eb99cd5 | [] | no_license | Ibrahimkhalill/Online-Shopping-System | 9fc6152e8406d83793e2e093926496952518addb | 08e7f17e59f0e8b69c885248ccd35e69f9b37b5f | refs/heads/main | 2023-06-01T21:55:57.774047 | 2021-06-14T14:06:01 | 2021-06-14T14:06:01 | 376,842,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | # Generated by Django 3.2 on 2021-04-24 06:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('store', '0004_auto_20210422_0004'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customer', to=settings.AUTH_USER_MODEL),
),
]
| [
"you@example.com"
] | you@example.com |
0ee2a045669d8e0a892f4476760d4beb8b7e41ae | 73b5d880fa06943c20ff0a9aee9d0c1d1eeebe10 | /tinyos-2.x-contrib/wsu/tools/simx/simx/test/act/MViz/.svn/text-base/harness.py.svn-base | a109e67b6b1ec78f803d8639d4927deb334fab0f | [] | no_license | x3ro/tinyos-legacy | 101d19f9e639f5a9d59d3edd4ed04b1f53221e63 | cdc0e7ba1cac505fcace33b974b2e0aca1ccc56a | refs/heads/master | 2021-01-16T19:20:21.744228 | 2015-06-30T20:23:05 | 2015-06-30T20:23:05 | 38,358,728 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,353 | #
# Sample file for TReact harness.
#
# This assumes that the TOSSIM module has been built with sim-sf,
# TossimSync and PseudoSensor support following the convention where
# the directory (or link) "simx" refers to the top-level of the SimX
# structure. If this is not true, then this file must be appropriately
# customized.
#
# (Required) Add SimX to module search path.
import sys
sys.path.append("simx")
# (Required for setup) Load additional objects -- requires compiled support!
#from PseudoSensor import PseudoSensor
from pseudo_sensor import PseudoSensorSupport
# (Highly recommended) Variables scheme must be loaded to enable
# monitoring.
from tinyos.tossim.TossimApp import NescApp
print "loading variables from 'app.xml'..."
app = NescApp()
vars = app.variables.variables()
# (Required for this setup) TossimTopo provides topology control
# extension to Tossim. The normal Tossim object may be used but it
# will require disabling some "core" modules below.
print "starting tossim-topo"
import TOSSIM
from tossim_topo import Topo, TopoGen
tossim = Topo.TossimTopo(TOSSIM, vars if vars is not None else [])
tossim_topo = tossim
T = tossim
# (Required for this setup) Schedule python events "in mote
# time". This provides the periodic time reporting. It is also
# extremely useful to setup events that occur when a node is booted.
from tossim_evt.TossimEvent import *
tossim_event = TossimEvent(tossim)
# (Optional) Enable to forward packets from mote serial to SF. This
# is not strictly required for TReact but allows another application
# to simultaneously monitor normal mote output.
from TOSSIM import SerialForwarder
sf = SerialForwarder(9002)
# (Required for this setup) Manage simulation time. This can be
# disabled but it requires disabling a "core" module.
from TossimSync import TossimSync
from act import TimeControl
time_control = TimeControl.Remote(tossim, TossimSync(), tossim_event)
# (Required) Start packet injector.
print "starting injector"
from simsf_inject.SFInject import SFInject
injector = SFInject(9091).start()
# (Required) Create the main reactor. For consistency with modules,
# the global variable 'R' should be used.
print "creating reactor"
from act.ReactR import ReactR
R = ReactR(injector=injector, globals=globals())
# (Required unless explicit passed in to loadMod()) Allows modules to
# automatically "discover" services.
R.service.register(time_control, "TimeControl")
R.service.register(tossim_topo, "Tossim", "TossimTopo")
# (Highly recommended) Load standard modules. Disabling these modules
# will result in reduced TReactViz capabilities.
print "loading standard modules"
R.loadMod('Core')
R.loadMod('Topo')
R.loadMod('Watch')
R.loadMod('Time')
# (Recommended) Periodically display the time on the local console.
def ping(t):
print "at simtime %s" % t.timeStr()
tossim_event.runAt(t.time() + t.ticksPerSecond(), ping)
tossim_event.runAt(0, ping)
# (Optional) Pause the simulation until started manually.
print "starting simulation 'paused'."
time_control.stop()
# (Required) Run the primary event loop. The order presented below
# (run simulation cycle, process TossimEvent, process TReact, process
# SF) is how TReact has been tested. Other configurations should work.
while 1:
time_control.runSim()
tossim_event.processEvents()
R.process(locals=locals())
sf.process()
| [
"lucas@x3ro.de"
] | lucas@x3ro.de | |
f3a3923b7c9d13fdf70dab59131571c25daa4fee | 8fd28b248511f42ad8732ca1e574aada33908376 | /tools/data/activitynet/tsn_feature_extraction.py | 377c6cd08883fe7ad8c654a1882828eba750e622 | [
"Apache-2.0"
] | permissive | vt-vl-lab/video-data-aug | 28bd175535cab1444055502389c8f5d7d75e4bd2 | 01667cdbd1b952f2510af3422beeeb76e0d9e15a | refs/heads/main | 2023-09-01T02:36:40.034893 | 2021-07-21T01:31:42 | 2021-07-21T01:31:42 | 352,920,339 | 29 | 6 | Apache-2.0 | 2021-07-21T01:29:36 | 2021-03-30T08:06:54 | Jupyter Notebook | UTF-8 | Python | false | false | 4,997 | py | import argparse
import os.path as osp
import pickle
import mmcv
import numpy as np
import torch
from mmaction.datasets.pipelines import Compose
from mmaction.models import build_model
def parse_args():
parser = argparse.ArgumentParser(description='Extract TSN Feature')
parser.add_argument('--data-prefix', default='', help='dataset prefix')
parser.add_argument('--output-prefix', default='', help='output prefix')
parser.add_argument(
'--data-list',
help='video list of the dataset, the format should be '
'`frame_dir num_frames output_file`')
parser.add_argument(
'--frame-interval',
type=int,
default=16,
help='the sampling frequency of frame in the untrimed video')
parser.add_argument('--clip-len', type=int, default=1, help='clip length')
parser.add_argument('--modality', default='RGB')
parser.add_argument('--ckpt', help='checkpoint for feature extraction')
parser.add_argument(
'--part',
type=int,
default=0,
help='which part of dataset to forward(alldata[part::total])')
parser.add_argument(
'--total', type=int, default=1, help='how many parts exist')
args = parser.parse_args()
return args
def main():
args = parse_args()
args.is_rgb = args.modality == 'RGB'
args.input_format = 'NCHW' if args.is_rgb else 'NCHW_Flow'
rgb_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_bgr=False)
flow_norm_cfg = dict(mean=[128, 128], std=[128, 128])
args.img_norm_cfg = rgb_norm_cfg if args.is_rgb else flow_norm_cfg
args.f_tmpl = 'image_{:05d}.jpg' if args.is_rgb else 'flow_{}_{:05d}.jpg'
args.in_channels = args.clip_len * (3 if args.is_rgb else 2)
# max batch_size for one forward
args.batch_size = 200
# define the data pipeline for Untrimmed Videos
data_pipeline = [
dict(
type='UntrimmedSampleFrames',
clip_len=args.clip_len,
frame_interval=args.frame_interval),
dict(type='FrameSelector'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=256),
dict(type='Normalize', **args.img_norm_cfg),
dict(type='FormatShape', input_format=args.input_format),
dict(type='Collect', keys=['imgs'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data_pipeline = Compose(data_pipeline)
# define TSN R50 model, the model is used as the feature extractor
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
depth=50,
in_channels=args.in_channels,
norm_eval=False),
cls_head=dict(
type='TSNHead',
num_classes=200,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1)))
model = build_model(model, test_cfg=dict(average_clips=None))
# load pretrained weight into the feature extractor
state_dict = torch.load(args.ckpt)['state_dict']
model.load_state_dict(state_dict)
model = model.cuda()
model.eval()
data = open(args.data_list).readlines()
data = [x.strip() for x in data]
data = data[args.part::args.total]
# enumerate Untrimmed videos, extract feature from each of them
prog_bar = mmcv.ProgressBar(len(data))
for item in data:
frame_dir, length, output_file = item.split()
frame_dir = osp.join(args.data_prefix, frame_dir)
output_file = osp.join(args.output_prefix, output_file)
assert output_file.endswith('.pkl')
length = int(length)
# prepare a psuedo sample
tmpl = dict(
frame_dir=frame_dir,
total_frames=length,
filename_tmpl=args.f_tmpl,
modality=args.modality)
sample = data_pipeline(tmpl)
imgs = sample['imgs']
shape = imgs.shape
# the original shape should be N_seg * C * H * W, resize it to N_seg *
# 1 * C * H * W so that the network return feature of each frame (No
# score average among segments)
imgs = imgs.reshape((shape[0], 1) + shape[1:])
imgs = imgs.cuda()
def forward_data(model, data):
# chop large data into pieces and extract feature from them
results = []
start_idx = 0
num_clip = data.shape[0]
while start_idx < num_clip:
with torch.no_grad():
part = data[start_idx:start_idx + args.batch_size]
feat = model.forward(part, return_loss=False)
results.append(feat)
start_idx += args.batch_size
return np.concatenate(results)
feat = forward_data(model, imgs)
with open(output_file, 'wb') as fout:
pickle.dump(feat, fout)
prog_bar.update()
if __name__ == '__main__':
main()
| [
"zouyuliang123@gmail.com"
] | zouyuliang123@gmail.com |
c6ceb8e9e1eb4840e1499580e913a35506efceea | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634697451274240_1/Python/DUPI/pancake.py | aee02c393f84367804ba225bff7d3e8a46e7950f | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | import os
os.chdir('/Users/mac/OneDrive/competitions/codejam 2016/pancake')
##extra_need
def flip(s):
new_s = ''
for c in s[::-1]:
if c == '-':
new_s += '+'
else:
new_s += '-'
return new_s
def getT(s):
if len(s) == 0:
return 0
if (s[-1] == '+'):
return getT(s[:-1])
if (s[0] == '-'):
return 1+getT(flip(s))
i = 1
while (i <= len(s)):
if s[-i] == '+':
break
i += 1
return 1+getT(flip(s[:-(i-1)]) + s[-(i-1):])
##read test.in
test_f = open('./tests/B-large.in.txt')
out_f = open('./tests/B-large.out.txt', 'w+')
test_num = None
test_case_num = 1
for line in test_f:
if test_num == None:
test_num = int(line)
else:
s = line.strip()
T = getT(s)
#print '{}, {}, {}'.format(max_s, audiences, extra_need)
out_f.write('Case #{}: {}\n'.format(test_case_num, T))
test_case_num += 1
test_f.close()
out_f.close() | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
dc1821661c226c9f7e7709ebe1ad750feedb1743 | 589b6d5553cf51f95053fb3247eae16ec037fdb3 | /bsidesns/api/email.py | 2f32177806e43eec6962f1d03a9b6c5bc59ef12d | [
"BSD-2-Clause"
] | permissive | bsidesns/backend | 97b6cd95236574fce3453a6c500b39ca93d55470 | ba8173f2b81210a561b203973eb48d5c124870b1 | refs/heads/master | 2020-12-08T07:00:33.338692 | 2020-09-13T10:23:39 | 2020-09-13T10:23:39 | 232,920,190 | 0 | 3 | BSD-2-Clause | 2020-09-13T10:23:40 | 2020-01-09T22:42:26 | Python | UTF-8 | Python | false | false | 2,121 | py | from email.mime.text import MIMEText
from flask import current_app
from flask_jwt_extended import get_jwt_identity
from flask_smorest import Blueprint, abort
from freenit.api.methodviews import ProtectedMethodView
from ..models.email import Email
from ..models.event import Event
from ..models.talk import Talk
from ..models.user import User
from ..schemas.email import EmailSchema
blueprint = Blueprint('email', 'email')
@blueprint.route('', endpoint='email')
class EmailAPI(ProtectedMethodView):
@blueprint.arguments(EmailSchema)
@blueprint.response(EmailSchema)
def post(self, args):
"""Send email"""
user_id = get_jwt_identity()
try:
adminUser = User.get(id=user_id)
except User.DoesNotExist:
abort(404, message='No such user')
if not adminUser.admin:
abort(403, message='Only admins can send messages')
baseQuery = User.select()
email = Email(**args)
msg = MIMEText(email.message, 'plain', 'utf-8')
if email.to == 'all':
query = baseQuery
elif email.to == 'admins':
query = baseQuery.where(User.admin)
elif email.to == 'presenters':
events = Event.select().order_by(Event.year.desc())
if events.count() == 0:
abort(409, message='At least one event should exist')
query = [
talk.user for talk in events[0].talks.where(Talk.published)
]
elif email.to == 'volunteers':
query = baseQuery.where(User.volunteer)
else:
abort(409, message='No such user group')
if email.fromAddress == 'office':
msg['From'] = current_app.config.get('FROM_EMAIL', None)
elif email.fromAddress == 'me':
msg['From'] = adminUser.email
else:
abort(409, message='Invalid "fromAddress" parameter')
msg['Subject'] = email.subject
msg['To'] = [user.email for user in query]
try:
current_app.sendmail(msg)
except Exception:
pass
return email
| [
"meka@tilda.center"
] | meka@tilda.center |
0c90b6aeeb0695aad1dee415b816e6578ee10256 | ac10761e842fbde677db3c78a74400845e08904a | /lib/python/classytags/utils.py | 5f1fca2d0efcf1693b662826350962234dd0ec77 | [] | no_license | mozilla/moztrap-vendor-lib | 6d7704394ef1db72ee0514eefc25d9fcb191c4ca | d0007ae11fad91157b99feb985d19b16170fcb09 | refs/heads/master | 2023-07-03T17:19:42.477593 | 2019-03-29T15:55:04 | 2019-03-29T15:55:04 | 2,573,756 | 1 | 5 | null | 2020-06-08T14:44:16 | 2011-10-14T01:59:31 | Python | UTF-8 | Python | false | false | 1,914 | py | from copy import copy
import re
class NULL:
"""
Internal type to differentiate between None and No-Input
"""
class TemplateConstant(object):
"""
A 'constant' internal template variable which basically allows 'resolving'
returning it's initial value
"""
def __init__(self, value):
if isinstance(value, basestring):
self.value = value.strip('"\'')
else:
self.value = value
def __repr__(self): # pragma: no cover
return '<TemplateConstant: %s>' % repr(self.value)
def resolve(self, context):
return self.value
class StructuredOptions(object):
"""
Bootstrapped options
"""
def __init__(self, options, breakpoints, blocks):
self.options = options
self.breakpoints = copy(breakpoints)
self.blocks = copy(blocks)
self.current_breakpoint = None
if self.breakpoints:
self.next_breakpoint = self.breakpoints.pop(0)
else:
self.next_breakpoint = None
def shift_breakpoint(self):
"""
Shift to the next breakpoint
"""
self.current_breakpoint = self.next_breakpoint
if self.breakpoints:
self.next_breakpoint = self.breakpoints.pop(0)
else:
self.next_breakpoint = None
def get_arguments(self):
"""
Get the current arguments
"""
return copy(self.options[self.current_breakpoint])
_re1 = re.compile('(.)([A-Z][a-z]+)')
_re2 = re.compile('([a-z0-9])([A-Z])')
def get_default_name(name):
"""
Turns "CamelCase" into "camel_case"
"""
return _re2.sub(r'\1_\2', _re1.sub(r'\1_\2', name)).lower()
def mixin(parent, child, attrs={}):
return type(
'%sx%s' % (parent.__name__, child.__name__),
(child, parent),
attrs
)
| [
"carl@oddbird.net"
] | carl@oddbird.net |
cfafc275f648155b4e71aba1353a25676b1219f2 | ef11cb7a2ee550e4fb95be46cd4d67d6cc230787 | /python/Monthly/Jan2021/test/test_generatedarraymax.py | 5ba47b789da01d6c4140d4aa834fa9d8f883585f | [] | no_license | Hilldrupca/LeetCode | 44b32161743ba982ea5e3fe593ff8a27c96e9350 | c6d600bc74afd14e00d4f0ffed40696192b229c3 | refs/heads/master | 2023-03-31T22:21:17.967663 | 2021-04-07T16:18:17 | 2021-04-07T16:18:17 | 288,544,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import unittest, sys
sys.path.append('..')
from generatedarraymax import Solution
class TestGeneratedArrayMax(unittest.TestCase):
def setUp(self):
self.s = Solution()
def test_get_maximum_generated(self):
self.assertEqual(self.s.getMaximumGenerated(0), 0)
self.assertEqual(self.s.getMaximumGenerated(2), 1)
self.assertEqual(self.s.getMaximumGenerated(3), 2)
self.assertEqual(self.s.getMaximumGenerated(7), 3)
self.assertEqual(self.s.getMaximumGenerated(100), 21)
if __name__ == '__main__':
unittest.main()
| [
"hilldrupca@gmail.com"
] | hilldrupca@gmail.com |
57ca23a00332ec85ac7d2f7703791c659beb2268 | 8a914e1deebfd4dc72d339a22ab1dfba90ce8c9d | /tagging/migrations/0001_initial.py | 374101e3d198ca3d30ec535dce4b4cfb59ab03bb | [
"BSD-3-Clause",
"MIT"
] | permissive | Fantomas42/django-tagging | 451a14e0be792536087d22106f0d03f574438cd4 | f3622e62112c3ecc89eabb2512b1b3dd2e5e6ca0 | refs/heads/develop | 2023-01-22T22:47:40.984435 | 2020-03-06T18:00:00 | 2020-03-06T18:00:00 | 15,029,579 | 78 | 74 | NOASSERTION | 2023-01-15T15:40:38 | 2013-12-08T19:16:36 | Python | UTF-8 | Python | false | false | 1,844 | py | from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)),
('name', models.CharField(
unique=True, max_length=50,
verbose_name='name', db_index=True)),
],
options={
'ordering': ('name',),
'verbose_name': 'tag',
'verbose_name_plural': 'tags',
},
),
migrations.CreateModel(
name='TaggedItem',
fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField(
verbose_name='object id', db_index=True)),
('content_type', models.ForeignKey(
verbose_name='content type',
on_delete=models.SET_NULL,
to='contenttypes.ContentType')),
('tag', models.ForeignKey(
related_name='items', verbose_name='tag',
on_delete=models.SET_NULL,
to='tagging.Tag')),
],
options={
'verbose_name': 'tagged item',
'verbose_name_plural': 'tagged items',
},
),
migrations.AlterUniqueTogether(
name='taggeditem',
unique_together=set([('tag', 'content_type', 'object_id')]),
),
]
| [
"fantomas42@gmail.com"
] | fantomas42@gmail.com |
e193ef894df3ec750366febd0ae92963d6695551 | 0ab2922da69fd3a92f56caba43df72b564a4aa64 | /wharfci/views.py | 146904690125cb9f09565e3bc50220d8d5a9904b | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | honza/wharf-ci | 056af31de41903475fb074a55c6ac51a7ba3600c | 9d49c4684509c48948a6f692975e93000651d181 | refs/heads/master | 2021-01-22T11:10:41.377096 | 2013-08-05T14:17:40 | 2013-08-05T14:17:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from django.template import RequestContext
from django.shortcuts import render_to_response, redirect
from django.core.urlresolvers import reverse
def index(request):
ctx = {}
# show dashboard if logged in
if request.user.is_authenticated():
return redirect(reverse('dashboard'))
return render_to_response('index.html', ctx,
context_instance=RequestContext(request))
| [
"ejhazlett@gmail.com"
] | ejhazlett@gmail.com |
a74de07e1bc6a998e4ead1f98a61c65156f4edd2 | aa27e08efbb3044de42a24f744fde8fdf37044fa | /replace-all-digits-with-characters-2.py | 08cb83ab6516f780241e767bb3b918f2a0092588 | [] | no_license | rpivo/leetcode-answers | 9a4bddec259139c9459c00d888effdbb35eacada | c7951344b9ce40ef092f4fe3ec34463430ba899c | refs/heads/main | 2021-12-18T06:53:47.288559 | 2021-12-10T22:36:54 | 2021-12-10T22:36:54 | 194,540,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | # define class Solution
class Solution:
# define function replaceDigits, which takes in the class instance (self), string s, and
# returns a string.
def replaceDigits(self, s: str) -> str:
# create variable alpha, which is a list of all alphabetical characters in order.
alpha = list("abcdefghijklmnopqrstuvwxyz")
# create variable res and initialize as an empty string
res = ""
# create variable i and initialize as 0
i = 0
# define function current, which takes in letter
def current(letter):
# define variable i and initialize as 0
i = 0
# while letter is not equal to the value of alpha at index i
while letter != alpha[i]:
# increment i by 1
i += 1
return i
# while i is less than the length of s
while i < len(s):
# set res equal to res plus the value of s at index i
res += s[i]
# if i plus 1 is less than the length of s
if (i + 1) < len(s):
# set res equal to res plus the value of alpha at the return value of current with
# the value of s at index i passed in, plus the value of s at index i plus 1,
# converted to an integer.
res += alpha[current(s[i]) + int(s[i+1])]
# increment i by 2.
i += 2
return res
| [
"ryanpivovar@gmail.com"
] | ryanpivovar@gmail.com |
55535f770b9b1f8648454121ab50b2e5e107e8f7 | a394b1053f018ff8be63221c61682df03af4937b | /osf/migrations/0081_merge_20180212_0949.py | ddcf9023e719c00781ea8d46f71fbbd719d701be | [
"Apache-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"MIT",
"AGPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | RCOSDP/RDM-osf.io | 81b11d9511f6248ec9bccb6c586b54a58429e1e7 | 5d632eb6d4566d7d31cd8d6b40d1bc93c60ddf5e | refs/heads/develop | 2023-09-01T09:10:17.297444 | 2023-08-28T04:59:04 | 2023-08-28T04:59:04 | 123,298,542 | 12 | 24 | Apache-2.0 | 2023-09-12T08:58:28 | 2018-02-28T14:46:05 | Python | UTF-8 | Python | false | false | 327 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-12 15:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0080_ensure_schemas'),
('osf', '0079_merge_20180202_1206'),
]
operations = [
]
| [
"sloria1@gmail.com"
] | sloria1@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.