blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
410cb4081e89dc3eb59cc3fbda59b68ae7844275 | 2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f | /aws.autoscaling.Group.mixed-instances-policy-python/__main__.py | 2c3d5382d28b5d51dfca25f592d0fd7afc8785c2 | [] | no_license | ehubbard/templates-aws | e323b693a18234defe6bd56ffcc64095dc58e3a1 | 2ae2e7a5d05490078017fed6d132dcdde1f21c63 | refs/heads/master | 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large",
name_prefix="example")
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=1,
min_size=1,
mixed_instances_policy={
"launch_template": {
"launchTemplateSpecification": {
"launchTemplateId": example_launch_template.id,
},
"override": [
{
"instance_type": "c4.large",
"weightedCapacity": "3",
},
{
"instance_type": "c3.large",
"weightedCapacity": "2",
},
],
},
})
| [
"jvp@justinvp.com"
] | jvp@justinvp.com |
ab019ff8f44d3ba691aebdc11c86e282acb76fe4 | 1fc45a47f0e540941c87b04616f3b4019da9f9a0 | /src/sentry/api/endpoints/project_release_commits.py | ee4a0f82aea772d83f6c1a4e66e53739b4b45f9a | [
"BSD-2-Clause"
] | permissive | seukjung/sentry-8.15.0 | febc11864a74a68ddb97b146cc1d2438ef019241 | fd3cab65c64fcbc32817885fa44df65534844793 | refs/heads/master | 2022-10-28T06:39:17.063333 | 2018-01-17T12:31:55 | 2018-01-17T12:31:55 | 117,833,103 | 0 | 0 | BSD-3-Clause | 2022-10-05T18:09:54 | 2018-01-17T12:28:13 | Python | UTF-8 | Python | false | false | 1,641 | py | from __future__ import absolute_import
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectReleasePermission
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Release, ReleaseCommit
class ProjectReleaseCommitsEndpoint(ProjectEndpoint):
doc_section = DocSection.RELEASES
permission_classes = (ProjectReleasePermission,)
def get(self, request, project, version):
"""
List a Project Release's Commits
````````````````````````````````
Retrieve a list of commits for a given release.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to list the
release files of.
:pparam string version: the version identifier of the release.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
queryset = ReleaseCommit.objects.filter(
release=release,
).select_related('commit', 'commit__author')
return self.paginate(
request=request,
queryset=queryset,
order_by='order',
on_results=lambda x: serialize([rc.commit for rc in x], request.user),
)
| [
"jeyce@github.com"
] | jeyce@github.com |
8b110ee3b6013db04f1448091c8518136433b53e | 7d800b5d51e47bf59ef5788bd1592d9c306d14c3 | /orders/migrations/0023_order.py | acac20f5ce2afce9ee8126e02bb1517ca7612693 | [] | no_license | paulitstep/cafe-website | 2e28e5a218f58c491cd65c6dc2deee22158765e2 | 29d077b1924871941ef95d5715412ae64ff4e892 | refs/heads/master | 2020-04-19T04:06:02.902468 | 2019-02-11T11:08:54 | 2019-02-11T11:08:54 | 167,953,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | # Generated by Django 2.1.1 on 2018-11-13 20:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cart', '0015_cart_cartitem'),
('orders', '0022_auto_20181113_2242'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.CharField(default='ABC', max_length=120, unique=True, verbose_name='ID заказа')),
('status', models.CharField(choices=[('Started', 'Started'), ('Finished', 'Finished')], default='Started', max_length=120, verbose_name='Статус заказа')),
('price_total', models.DecimalField(decimal_places=2, default=0.0, max_digits=6, verbose_name='Итоговая сумма')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.Cart', verbose_name='Корзина')),
('order_info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.OrderInfo')),
],
),
]
| [
"pasha-mo1@rambler.ru"
] | pasha-mo1@rambler.ru |
4590727dcc68468074628fc79b7672369506aef3 | 4f5675d7d1cdb9dfb42cabcb9b154f08ed4f92e6 | /sensor/manage4.py | 19f8fd405b6dd84beea27a75322d39a9f510f808 | [] | no_license | block1b/twisted_rpc | 674934a85313761fabc48d8529f326a7f6958a29 | 5fa8f0f8ce07f99a280c4f8d81362532a443440a | refs/heads/master | 2020-03-19T18:07:02.738900 | 2018-06-10T06:54:16 | 2018-06-10T06:54:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | # coding=utf-8
from init_pro import ConnectionFactory, ConnectionProtocol, Connector
import sys
from twisted.internet import reactor
from twisted.python import log
import random
import json
import datetime
log.startLogging(sys.stdout)
class CreateConnection(object):
"""
创建主动的长连接
"""
def __init__(self, host, port):
self.long_connection = ConnectionFactory('ConnectionPlatform', ConnectionProtocol)
self.long_connection.onlineProtocol = Connector
self.host = host
self.port = port
def create_long_connection(self):
"""建立长连接"""
if not len(Connector.get_online_protocol('ConnectionPlatform')):
print u"未连接........................"
reactor.connectTCP(self.host, self.port, self.long_connection)
print u"正在重连........................"
else:
Connector.get_online_protocol('ConnectionPlatform')[0].transport.write(json.dumps(self.pack_data()))
print u"已发送采集的到的数据....................."
reactor.callLater(1, self.create_long_connection) # 一直尝试在连接
@staticmethod
def pack_data():
info = dict()
info["id"] = '4'
info["entry_time"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
info["entry_data"] = random.uniform(-10, 50)
info["info"] = "null"
info['type'] = 'temp'
info['name'] = 'jian'
return info
create_connection = CreateConnection('127.0.0.1', 5002)
create_connection.create_long_connection()
reactor.run()
| [
"1115064450@qq.com"
] | 1115064450@qq.com |
971f11bd2b61abcdffd848b6073412f022007d3a | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py | 21352a07ca8f6642d491dc9a5b683b2b4483d2d5 | [
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 853 | py | # Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
model = dict(
pretrained='open-mmlab://detectron2/resnet101_caffe',
backbone=dict(depth=101))
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
| [
"chenyong84@huawei.com"
] | chenyong84@huawei.com |
40af72dc5ee87cc11f60cf3e10c57cc8617d2fbf | 8d2e5b5ea408579faa699c09bdbea39e864cdee1 | /ufora/util/TypeAwareComparison.py | ec50d2969a9792de4b1a5196ed0ac3933d706cc9 | [
"dtoa",
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | iantuioti/ufora | 2218ef4c7e33c171268ce11458e9335be7421943 | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | refs/heads/master | 2021-01-17T17:08:39.228987 | 2017-01-30T16:00:45 | 2017-01-30T16:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def typecmp(self, other, ownTypeComp):
'''
Compares objects of varying types. If they are different types it returns the lexical
comparison of their type string. Otherwise it uses the provided type comparison callable
'''
if self.__class__ != other.__class__:
return cmp(self.__class__, other.__class__)
return ownTypeComp(self, other)
| [
"braxton.mckee@gmail.com"
] | braxton.mckee@gmail.com |
11723a0259f19eee03f62469fb9728c3ae122d34 | ef02d3f3c5dbb2f1bf1b5a8b419d44efc9eb9cf1 | /src/scraping/migrations/0006_auto_20210227_1647.py | 99f1ec19ef18215e8c6e4f0d0c9e3b87564d93d2 | [] | no_license | Kirill67tyar/scraping_service | d0e60c1a07e1455b007b80908a0145ac26c38ba4 | e1c8ed8275d20e104e912e48bbc3d2b3a4e889ff | refs/heads/master | 2023-03-23T00:49:11.641175 | 2021-03-05T14:49:57 | 2021-03-05T14:49:57 | 335,295,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # Generated by Django 3.1.6 on 2021-02-27 13:47
from django.db import migrations
import jsonfield.fields
import scraping.models
class Migration(migrations.Migration):
dependencies = [
('scraping', '0005_auto_20210224_1842'),
]
operations = [
migrations.AlterField(
model_name='error',
name='data',
field=jsonfield.fields.JSONField(default=scraping.models.get_default_data_errors),
),
]
| [
"50547951+Kirill67tyar@users.noreply.github.com"
] | 50547951+Kirill67tyar@users.noreply.github.com |
bbd30238c51b35c915d81fddbe1772cad0af452e | 97bd006a2a9885f1733bead1fcb6cb59b7779c43 | /experiments/naive_bayes/rbf_if/parameterization.py | 04084965224f5451e61ed1fff97b8b3fa8453c45 | [] | no_license | foxriver76/master-thesis-rslvq | d3524176d05e553b7cca5a37f580ef2cf9efc805 | 00a6c0b4bc3289fe30ead7d7c4e1ae41984dcf8b | refs/heads/master | 2022-03-07T03:55:19.244161 | 2018-10-15T08:28:41 | 2018-10-15T08:28:41 | 139,425,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 14 08:34:18 2018
@author: moritz
"""
from sklearn.model_selection import GridSearchCV
from rslvq_stream import RSLVQ
import json
from skmultiflow.data.random_rbf_generator_drift import RandomRBFGeneratorDrift
"""Subset of 30k"""
stream = RandomRBFGeneratorDrift(change_speed=0.001)
stream.prepare_for_use()
X, y = stream.next_sample(batch_size=30000)
clf = RSLVQ()
"""Specify possible params"""
ppt_range = [1, 2, 4, 8, 10, 12, 20]
sigma_range = [1.0, 2.0, 3.0, 5.0]
param_grid = [{'sigma': sigma_range,
'gradient_descent': ['SGD'],
'prototypes_per_class': ppt_range}]
gs = GridSearchCV(estimator=clf,
param_grid=param_grid,
scoring='accuracy',
cv=10,
n_jobs=-1)
gs = gs.fit(X, y)
"""Print best params"""
print(gs.best_score_)
print(gs.best_params_)
"""Test classifier"""
clf = gs.best_estimator_
clf.fit(X, y)
print('Korrektklassifizierungsraten: \
%.3f' % clf.score(X, y))
accuracy = clf.score(X, y)
"""Write results to File"""
file = open('../../param_search_results.txt', 'a+')
file.write(50 * '-')
file.write('\nAGRAWAL - RSLVQ SGD\n')
file.write('\nBest score: %.5f ' % (gs.best_score_))
file.write('\nBest param: %s' % (json.dumps(gs.best_params_)))
file.write('\nTest Accuracy: %.5f \n\n' % (accuracy))
file.close() | [
"moritz.heusinger@gmail.com"
] | moritz.heusinger@gmail.com |
ca20ef76e3f12077d99b30818c4300e8d0ed1e2b | e6f02bafae8842cae7b45efc2d4719c1a931f68d | /python/scripts/serve_dir_single_file.py | 92b87c034eb909ca4638e839c4e3aca61ae2d15f | [] | no_license | minhhoangcn4/nuxt-dashboard-template | 71d7f54462edb96ddb09667fee9c5a4ea76583ca | 7f15e1c79122ad45e398cc319d716d2a439b8365 | refs/heads/master | 2022-04-18T05:43:56.254971 | 2020-04-19T16:52:32 | 2020-04-19T16:52:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py |
import os
import build_frontend as bf
here = os.path.dirname(os.path.abspath(__file__))
folder_root = os.path.join(here, '..', '..')
c = bf.ServeSingleFile(folder_root=folder_root,
port=8080)
c.run()
| [
"olivier.borderies@gmail.com"
] | olivier.borderies@gmail.com |
283c78dd3f670c65171d58c2b10825d443747d41 | 71fa0d6b0cf81dcd68fb4b5f43bb9fb7026df170 | /code/lamost/mass_age/paper_plots/write_paper_table.py | d7d649c0c8413ca2e487c363c9867f887718c740 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | aceilers/TheCannon | 31354db3df2495cea1e938090079f74f316a5bbf | c140a0c9555bb98956b013d1a9d29eb94ed4c514 | refs/heads/master | 2020-12-25T22:29:57.147937 | 2017-07-18T08:22:46 | 2017-07-18T08:22:46 | 64,823,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,707 | py | import pyfits
import numpy as np
from sigfig import round_sig
def ndec(num):
dec = str(num).split('.')[-1]
return len(dec)
def fmt_id(id_val):
split_val = id_val.split('_')
return split_val[0] + "\_" + split_val[1]
inputf = pyfits.open("/Users/annaho/Data/LAMOST/Mass_And_Age/Ho2016b_Catalog.fits")
dat = inputf[1].data
inputf.close()
choose = dat['in_martig_range']
lamost_id = dat['LAMOST_ID'][choose]
lamost_id = np.array([fmt_id(val) for val in lamost_id])
ra = dat['RA'][choose]
dec = dat['Dec'][choose]
teff = dat['Teff'][choose]
logg = dat['logg'][choose]
mh = dat['MH'][choose]
cm = dat['CM'][choose]
nm = dat['NM'][choose]
am = dat['AM'][choose]
ak = dat['Ak'][choose]
mass = dat['Mass'][choose]
logAge = dat['logAge'][choose]
teff = np.array([int(val) for val in teff])
logg = np.array([round_sig(val,3) for val in logg])
mh = np.array([round_sig(val, 3) for val in mh])
cm = np.array([round_sig(val, 3) for val in cm])
nm = np.array([round_sig(val, 3) for val in nm])
am = np.array([round_sig(val, 3) for val in am])
ak = np.array([round_sig(val, 3) for val in ak])
mass = np.array([round_sig(val, 2) for val in mass])
logAge = np.array([round_sig(val, 2) for val in logAge])
teff_err = dat['Teff_err'][choose]
logg_err = dat['logg_err'][choose]
mh_err = dat['MH_err'][choose]
cm_err = dat['CM_err'][choose]
nm_err = dat['NM_err'][choose]
am_err = dat['AM_err'][choose]
ak_err = dat['Ak_err'][choose]
teff_scat = dat['Teff_scatter'][choose]
logg_scat = dat['logg_scatter'][choose]
mh_scat = dat['MH_scatter'][choose]
cm_scat = dat['CM_scatter'][choose]
nm_scat = dat['NM_scatter'][choose]
am_scat = dat['AM_scatter'][choose]
mass_err = dat['Mass_err'][choose]
logAge_err = dat['logAge_err'][choose]
snr = dat['SNR'][choose]
chisq =dat['Red_Chisq'][choose]
content = '''\\begin{tabular}{cccccccccc}
\\tableline\\tableline
LAMOST ID & RA & Dec & \\teff\ & \logg\ & \mh\ & \cm\ & \\nm\ & \\alpham\ & \\ak\ \\\\
& (deg) & (deg) & (K) & (dex) & (dex) & (dex) & (dex) & (dex) & mag \\\\
\\tableline
'''
outputf = open("paper_table.txt", "w")
outputf.write(content)
for i in range(0,4):
outputf.write(
'%s & %s & %s & %s & %s & %s & %s & %s & %s & %s \\\ '
%(lamost_id[i], np.round(ra[i], 2), np.round(dec[i], 2),
teff[i], logg[i], mh[i], cm[i], nm[i], am[i], ak[i]))
#int(teff[i]), round_sig(logg[i], 3), round_sig(mh[i], 3),
#round_sig(cm[i], 3), round_sig(nm[i], 3), round_sig(am[i], 3),
#round_sig(ak[i], 3)))
content = '''\\tableline
\end{tabular}}
\end{table}
\\begin{table}[H]
\caption{
Continued from Table 1: Formal Errors}
{\scriptsize
\\begin{tabular}{cccccccc}
\\tableline\\tableline
LAMOST ID & $\sigma$(\\teff) & $\sigma$(\logg) & $\sigma$(\mh) & $\sigma$(\cm) & $\sigma$(\\nm) & $\sigma$(\\alpham) & $\sigma$(\\ak) \\\\
& (K) & (dex) & (dex) & (dex) & (dex) & (dex) & (mag) \\\\
\\tableline
'''
outputf.write(content)
for i in range(0,4):
outputf.write(
'%s & %s & %s & %s & %s & %s & %s & %s \\\\ '
%(lamost_id[i], int(teff_err[i]),
np.round(logg_err[i], ndec(logg[i])),
np.round(mh_err[i], ndec(mh[i])),
np.round(cm_err[i], ndec(cm[i])),
np.round(nm_err[i], ndec(nm[i])),
np.round(am_err[i], ndec(am[i])),
np.round(ak_err[i], ndec(ak[i]))))
content = '''\\tableline
\end{tabular}}
\end{table}
\\begin{table}[H]
\caption{
Continued from Table 2: Estimated Error (Scatter)}
{\scriptsize
\\begin{tabular}{cccccccccc}
\\tableline\\tableline
LAMOST ID & $s$(\\teff) & $s$(\logg) & $s$(\mh) & $s$(\cm) & $s$(\\nm) & $s$(\\alpham) \\\\
& (K) & (dex) & (dex) & (dex) & (dex) & (dex) \\\\
\\tableline
'''
outputf.write(content)
for i in range(0,4):
outputf.write(
'%s & %s & %s & %s & %s & %s & %s \\\\ '
%(lamost_id[i], int(teff_scat[i]),
np.round(logg_scat[i], ndec(logg[i])),
np.round(mh_scat[i], ndec(mh[i])),
np.round(cm_scat[i], ndec(cm[i])),
np.round(nm_scat[i], ndec(nm[i])),
np.round(am_scat[i], ndec(am[i]))))
content = '''\\tableline
\end{tabular}}
\end{table}
\\begin{table}[H]
\caption{
Continued from Table 3}
{\scriptsize
\\begin{tabular}{ccccccc}
\\tableline\\tableline
LAMOST ID & Mass & log(Age) & $\sigma$(Mass) & $\sigma$(log(Age)) & SNR & Red. \\\\
& ($M_\odot$) & dex & ($M_\odot$) & (dex) & & $\chi^2$ \\\\
\\tableline
'''
outputf.write(content)
for i in range(0,4):
outputf.write(
'%s & %s & %s & %s & %s & %s & %s \\\\ '
%(lamost_id[i], mass[i], logAge[i],
np.round(mass_err[i], ndec(mass_err[i])),
np.round(logAge_err[i], ndec(logAge[i])),
round_sig(snr[i], 3), round_sig(chisq[i], 2)))
content = '''\\tableline
\end{tabular}}
\end{table}
'''
outputf.write(content)
outputf.close()
| [
"annayqho@gmail.com"
] | annayqho@gmail.com |
f5ddb3679e63a10e89433aa31c76875a957f6882 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /9sN5tvXZjYCsKb4Mx_10.py | 87730daea0cefa4298f4002d9ce77ce7bf826233 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | """
Create a function that takes the volume of a cube and returns the length of
the cube's main diagonal, rounded to two decimal places.
### Examples
cube_diagonal(8) ➞ 3.46
cube_diagonal(343) ➞ 12.12
cube_diagonal(1157.625) ➞ 18.19
#### Notes
Use the `sqrt` function in the math module.
"""
import math
def cube_diagonal(volume):
return round(math.sqrt(3) * pow(volume,1/3),2)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
87e59e18201aa6e50de6d0578c21eb8a359dbb7c | 2ea49bfaa6bc1b9301b025c5b2ca6fde7e5bb9df | /contributions/Acksl/python/Data Structures/2016-09-15.py | ad51408136aa1bb6e996fb00b61d28170f669b81 | [] | no_license | 0x8801/commit | 18f25a9449f162ee92945b42b93700e12fd4fd77 | e7692808585bc7e9726f61f7f6baf43dc83e28ac | refs/heads/master | 2021-10-13T08:04:48.200662 | 2016-12-20T01:59:47 | 2016-12-20T01:59:47 | 76,935,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | Built-in `list` methods
`del` statement for lists
`bytes` type
`Dictionary` view objects
Get the most of `int`s | [
"axel.dahlberg12@gmail.com"
] | axel.dahlberg12@gmail.com |
8c5030a179cc631854c7d96c7e2d1c6386015dbf | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2209/60752/238089.py | 1fbb44fa4a0aa3c0bafbae2da87777e5890a840f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | #include <bits/stdc++.h>
#define ls q << 1
#define rs q << 1 | 1
using namespace std;
const int N = 3e5 + 11;
const int inf = 0x3f3f3f3f;
char s[N], c[N];
int n, m, cnt, trie[N][26], ed[N], tr[N << 2];
int dep[N], f[N], g[N], v[N], fail[N];
queue<int> q;
void insert() {
int now = 0, len = strlen(c + 1);
for (int i = 1; i <= len; i++) {
int num = c[i] - 'a';
if (!trie[now][num])
trie[now][num] = ++cnt;
dep[trie[now][num]] = dep[now] + 1;
now = trie[now][num];
}
ed[now] = 1;
}
void makefail() {
for (int i = 0; i < 26; i++)
if (trie[0][i])
q.push(trie[0][i]);
while (!q.empty()) {
int x = q.front();
q.pop();
if (ed[x])
v[x] = dep[x];
else
v[x] = v[fail[x]];
for (int i = 0; i < 26; i++) {
if (trie[x][i])
fail[trie[x][i]] = trie[fail[x]][i], q.push(trie[x][i]);
else
trie[x][i] = trie[fail[x]][i];
}
}
}
void update(int q) { tr[q] = min(tr[ls], tr[rs]); }
int query(int q, int l, int r, int L, int R) {
if (l >= L && r <= R)
return tr[q];
int mid = l + r >> 1, re = inf;
if (mid >= L)
re = min(re, query(ls, l, mid, L, R));
if (mid < R)
re = min(re, query(rs, mid + 1, r, L, R));
return re;
}
void modify(int q, int l, int r, int x, int v) {
if (l == r)
return tr[q] = v, void();
int mid = l + r >> 1;
if (mid >= x)
modify(ls, l, mid, x, v);
else
modify(rs, mid + 1, r, x, v);
update(q);
}
int read() {
int x = 0, f = 1;
char ch = getchar();
while (!isdigit(ch)) {
if (ch == '-')
f = -f;
ch = getchar();
}
while (isdigit(ch)) {
x = x * 10 + ch - 48;
ch = getchar();
}
return x * f;
}
signed main() {
m = read();
scanf("%s", s + 1);
n = strlen(s + 1);
for (int i = 1; i <= m; i++) {
scanf("%s", c + 1);
insert();
}
makefail();
int now = 0;
for (int i = 1; i <= n; i++) {
int x = trie[now][s[i] - 'a'];
g[i] = v[x];
now = x;
}
memset(tr, 0x3f, sizeof(tr));
for (int i = 1; i <= n; i++) {
if (!g[i])
f[i] = inf;
else {
if (g[i] == i)
f[i] = 1;
else
f[i] = query(1, 1, n, i - g[i], i - 1) + 1;
}
modify(1, 1, n, i, f[i]);
}
printf("%d\n", f[n] >= inf ? -1 : f[n]);
return 0;
} | [
"1069583789@qq.com"
] | 1069583789@qq.com |
f646f114ff492f1ee43d8edfa4022c398984be63 | 5c16b25d78823499d3a8b33a59636ce3b0923da1 | /articleapp/migrations/0001_initial.py | 7006451c2c3a7fda0d4f3cdbd5861c97b1908a4c | [] | no_license | noeul1114/gis_3ban_1 | 351b3dd5dd306a333df657bf9d1cdad0827f4161 | e25fac8922984c7a3b42d8f97ac8ce230d0224fb | refs/heads/master | 2023-08-26T14:41:27.970660 | 2021-10-06T06:59:00 | 2021-10-06T06:59:00 | 382,189,975 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | # Generated by Django 3.2.4 on 2021-08-03 01:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, null=True)),
('image', models.ImageField(null=True, upload_to='article/')),
('content', models.TextField(null=True)),
('created_at', models.DateField(auto_now_add=True, null=True)),
('writer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='article', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"parkhyongsok@naver.com"
] | parkhyongsok@naver.com |
e97bec724b57e515c3368cbf9177b005f77ae1ea | af7466d6abfcce9e02efe91abe1875fbcf8d04aa | /lib/parsers/parse856Holding.py | 7b351add50a67d0c83f41d770abb32c03de734fb | [] | no_license | NYPL/sfr-oclc-catalog-lookup | eb1472d1a6cab85734b4c0ac6648de846e5b00fb | 4bf3bde518211870d6c20cde840c57bd83c1816c | refs/heads/development | 2020-04-15T07:45:59.184860 | 2020-02-05T21:22:32 | 2020-02-05T21:22:32 | 164,501,003 | 1 | 1 | null | 2020-02-05T21:22:34 | 2019-01-07T21:43:14 | Python | UTF-8 | Python | false | false | 7,355 | py | import math
from multiprocessing import Process, Pipe
from multiprocessing.connection import wait
import re
import requests
from helpers.errorHelpers import HoldingError
from lib.dataModel import Link, Identifier
class HoldingParser:
EBOOK_REGEX = {
'gutenberg': r'gutenberg.org\/ebooks\/[0-9]+\.epub\.(?:no|)images$',
'internetarchive': r'archive.org\/details\/[a-z0-9]+$',
'hathitrust': r'catalog.hathitrust.org\/api\/volumes\/[a-z]{3,6}\/[a-zA-Z0-9]+\.html' # noqa: E501
}
ID_REGEX = {
'oclc': r'oclc\/([0-9]+)',
'gutenberg': r'gutenberg.org\/ebooks\/([0-9]+)$'
}
URI_ID_REGEX = r'\/((?:(?!\/)[^.])+(?=$|\.[a-z]{3,4}$))'
HATHI_OCLC_REGEX = r'([a-z]+\/[a-z0-9]+)\.html$'
HATHI_ID_REGEX = r'id=([a-z\.\/\$0-9]+)'
HATHI_DOWNLOAD_URL = 'babel.hathitrust.org/cgi/imgsrv/download/pdf?id={}'
HATHI_METADATA_URL = 'http://catalog.hathitrust.org/api/volumes/full/{}.json'
def __init__(self, field, instance):
self.field = field
self.instance = instance
self.source = 'unknown'
def parseField(self):
if self.field.ind1 != '4':
raise HoldingError('856 does not contain an HTTP reference')
try:
self.uri = self.field.subfield('u')[0].value
except IndexError:
raise HoldingError('856 Field is missing u subfield for URI')
self.identifier = self.loadURIid()
def loadURIid(self):
"""Regex to extract identifier from an URI. \/((?:(?!\/)[^.])+ matches
the path of the URI, excluding anything before the final slash (e.g.
will match "1234" from http://test.com/1234) (?=$|\.[a-z]{3,4}$)) is a
positive lookahead that excludes the file format from the identifier
(so the above will still return "1234" if the URI ends in "1234.epub")
"""
uriGroup = re.search(self.URI_ID_REGEX, self.uri)
if uriGroup is not None:
self.identifier = uriGroup.group(1)
else:
self.identifier = self.uri
def extractBookLinks(self):
if self.matchEbook() is True:
return
elif self.matchIdentifier() is True:
return
else:
self.instance.links.append(
HoldingParser.createLink(self.uri, 'text/html')
)
def matchEbook(self):
for source, regex in self.EBOOK_REGEX.items():
self.source = source
if re.search(regex, self.uri):
if source == 'internetarchive':
if self.checkIAStatus() is True:
return None
elif source == 'hathitrust':
self.parseHathiLink()
return None
self.instance.addFormat(**{
'source': source,
'content_type': 'ebook',
'links': [
self.createLink(
self.uri, 'text/html',
local=False, download=False, images=False, ebook=True
)
],
'identifiers': [Identifier(identifier=self.identifier, source='hathi')]
})
return True
def matchIdentifier(self):
for idType, regex in self.ID_REGEX.items():
idGroup = re.search(regex, self.uri)
if idGroup is not None:
self.instance.addIdentifier(**{
'type': idType,
'identifier': idGroup.group(1),
'weight': 0.8
})
return True
def checkIAStatus(self):
metadataURI = self.uri.replace('details', 'metadata')
metadataResp = requests.get(metadataURI)
if metadataResp.status_code == 200:
iaData = metadataResp.json()
iaMeta = iaData['metadata']
if iaMeta.get('access-restricted-item', False) is False:
return False
return True
def parseHathiLink(self):
if 'catalog' not in self.uri:
return None
self.loadCatalogLinks()
def loadCatalogLinks(self):
hathiIDGroup = re.search(self.HATHI_OCLC_REGEX, self.uri)
if hathiIDGroup:
hathiID = hathiIDGroup.group(1)
hathiItems = self.fetchHathiItems(hathiID)
if hathiItems:
self.startHathiMultiprocess(hathiItems)
def startHathiMultiprocess(self, hathiItems):
processes = []
outPipes = []
cores = 4
chunkSize = math.ceil(len(hathiItems) / cores)
for i in range(cores):
start = i * chunkSize
end = start + chunkSize
pConn, cConn = Pipe(duplex=False)
proc = Process(
target=self.processHathiChunk,
args=(hathiItems[start:end], cConn)
)
processes.append(proc)
outPipes.append(pConn)
proc.start()
cConn.close()
while outPipes:
for p in wait(outPipes):
try:
newItem = p.recv()
if newItem == 'DONE':
outPipes.remove(p)
else:
self.instance.addFormat(**newItem)
except EOFError:
outPipes.remove(p)
for proc in processes:
proc.join()
def fetchHathiItems(self, hathiID):
apiURL = self.HATHI_METADATA_URL.format(
hathiID
)
apiResp = requests.get(apiURL)
if apiResp.status_code == 200:
catalogData = apiResp.json()
return catalogData.get('items', [])
def processHathiChunk(self, hathiItems, cConn):
for recItem in hathiItems:
newItem = self.getNewItemLinks(recItem)
if newItem is not None:
cConn.send(newItem)
cConn.send('DONE')
cConn.close()
def getNewItemLinks(self, recItem):
if recItem.get('rightsCode', 'ic') in ['ic', 'icus', 'ic-world', 'und']:
return
redirectURL = requests.head(recItem['itemURL'])
realURL = redirectURL.headers['Location'].replace('https://', '')
hathiID = re.search(self.HATHI_ID_REGEX, realURL).group(1)
downloadURL = self.HATHI_DOWNLOAD_URL.format(hathiID)
return {
'source': self.source,
'content_type': 'ebook',
'links': [
HoldingParser.createLink(
realURL, 'text/html',
local=False, download=False, images=True, ebook=False
),
HoldingParser.createLink(
downloadURL, 'application/pdf',
local=False, download=True, images=True, ebook=False
)
],
'identifiers': [Identifier(identifier=hathiID, source='hathi')]
}
@staticmethod
def createLink(uri, mediaType, local=False, download=False, images=False, ebook=False):
return Link(
url=uri,
mediaType=mediaType,
flags={'local': local, 'download': download, 'images': images, 'ebook': ebook}
)
| [
"mwbenowitz@gmail.com"
] | mwbenowitz@gmail.com |
d1896c910c15d67fea04a633a1af5eeb4bcb8691 | a04363ff165d9de42ceb7f277fe3e9896a443627 | /bdai_master-master/lambda_functions/twitter-pipeline/handler.py | 3cd04f129c77bbc4e7b8d37c4fe2a0bbc0eaa1af | [] | no_license | webclinic017/bdai_master | 45327d3ca29262c31986ad1fd05ea96c45949135 | c6953cce37764965b5caa0ea6b3add6df47f8ce4 | refs/heads/master | 2023-07-26T06:43:42.545663 | 2020-04-13T23:13:05 | 2020-04-13T23:13:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,364 | py | import re
import tweepy
import boto3
from tweepy import OAuthHandler
from textblob import TextBlob
# In[2]:
consumer_key = 'GIw0j8Nm3Qx9YYQCo5SLnqljh'
consumer_secret = 'HMLyZMmeGbhV9hnQkYeAFKJp0ynPsVWri3RT4FHTxNwQ2gad3g'
access_token = '2748454529-gTBtq6YTLRTRdMhUMiVISbFp3BPlP5pmfB9wRST'
access_token_secret = '1Fofwl74IXKOxFLkLHgKK42nLKg65OA3PMaEyKIlkkFDF'
# In[3]:
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# In[4]:
# want to find a way to ignore tweets that are affiliate links ie. robinhood links
def clean_tweet(self, tweet):
'''
Utility function to clean tweet text by removing links, special characters
using simple regex statements.
'''
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
# In[10]:
def get_tweets(self, query, count):
tweets = []
try:
# call twitter api to fetch tweets
fetched_tweets = self.search(q = query, count = count)
# parsing tweets one by one
for tweet in fetched_tweets:
# empty dictionary to store required params of a tweet
parsed_tweet = {}
# saving ticker query of tweet
parsed_tweet['query'] = query
# saving text of tweet
parsed_tweet['text'] = tweet.text
# saving sentiment of tweet
parsed_tweet['sentiment'] = get_tweet_sentiment(api, tweet.text)
# saving time of tweet
parsed_tweet['created_at'] = str(tweet.created_at)
# appending parsed tweet to tweets list
if tweet.retweet_count > 0:
# if tweet has retweets, ensure that it is appended only once
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
# return parsed tweets
return tweets
except tweepy.TweepError as e:
# print error (if any)
print("Error : " + str(e))
# In[12]:
def get_tweet_sentiment(self, tweet):
# create TextBlob object of passed tweet text
analysis = TextBlob(clean_tweet(api, tweet))
# set sentiment
return analysis.sentiment.polarity
def change_tweet_to_utf8(tweets):
for tweet in tweets:
tweet["text"] = tweet["text"].encode("utf-8")
print(tweet)
print()
# In[13]:
def twitter_handler(event, context):
tweets = get_tweets(api, query = '$aapl', count = 100)
change_tweet_to_utf8(tweets)
tweets2 = get_tweets(api, query = '$googl', count = 100)
change_tweet_to_utf8(tweets2)
tweets3 = get_tweets(api, query = '$mmm', count = 100)
change_tweet_to_utf8(tweets3)
tweets4 = get_tweets(api, query = '$xom', count = 100)
change_tweet_to_utf8(tweets4)
tweets5 = get_tweets(api, query = '$csco', count = 100)
change_tweet_to_utf8(tweets5)
tweets6 = get_tweets(api, query = '$ge', count = 100)
change_tweet_to_utf8(tweets6)
tweets7 = get_tweets(api, query = '$hd', count = 100)
change_tweet_to_utf8(tweets7)
tweets8 = get_tweets(api, query = '$psx', count = 100)
change_tweet_to_utf8(tweets8)
tweets9 = get_tweets(api, query = '$mlpx', count = 100)
change_tweet_to_utf8(tweets9)
tweets10 = get_tweets(api, query = '$oxy', count = 100)
change_tweet_to_utf8(tweets10)
tweets11 = get_tweets(api, query = '$regi', count = 100)
change_tweet_to_utf8(tweets11)
tweets12 = get_tweets(api, query = '$mro', count = 100)
change_tweet_to_utf8(tweets12)
tweets13 = get_tweets(api, query = '$nrg', count = 100)
change_tweet_to_utf8(tweets13)
tweets14 = get_tweets(api, query = '$enbl', count = 100)
change_tweet_to_utf8(tweets14)
tweets15 = get_tweets(api, query = '$intc', count = 100)
change_tweet_to_utf8(tweets15)
bucket_name = "twitter-pipeline-bucket"
lambda_path = "/tmp/" + file_name
s3_path = "/tweets/" + file_name
s3 = boto3.resource("s3")
s3.Bucket(bucket_name).put_object(Key=s3_path, Body=encoded_string)
return {
'statusCode': 200,
'body': "Successful"
}
| [
"noreply@github.com"
] | webclinic017.noreply@github.com |
bfa6c57b1cf19aaec50e98c4cf9775c806514880 | 0c752e60f2eeac20673db6298984f5300ee00591 | /setup.py | 266648b0a808735ec3e3ac432018f3b940929078 | [] | no_license | Tamosauskas/collective.newsticker | d3473e5dc68c0874a0e812d9d9c2bb43746ed4f4 | 91ef5ba610b68895e2f7dba532c7b1a296eaba00 | refs/heads/master | 2021-01-16T00:47:38.423885 | 2011-10-06T13:23:52 | 2011-10-06T13:23:52 | 2,525,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
version = '1.0dev'
setup(name='collective.newsticker',
version=version,
description="An implementation of the jQuery News Ticker Plugin for Plone.",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Programming Language :: Python",
],
keywords='plone jquery',
author='Héctor Velarde',
author_email='hector.velarde@gmail.com',
url='https://github.com/collective/collective.newsticker',
license='GPL',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'five.grok',
'zope.schema>=3.8.0', # required to use IContextAwareDefaultFactory
],
extras_require={
'test': ['plone.app.testing'],
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
| [
"hector.velarde@gmail.com"
] | hector.velarde@gmail.com |
c57cf7c79f465eda9b6a2c3a446cf3a641826adc | 3bae1ed6460064f997264091aca0f37ac31c1a77 | /apps/cloud_api_generator/generatedServer/tasklets/machine/rollback/machine_rollback.py | 7ba45c98b3698e570b614117f8241526791bd529 | [] | no_license | racktivity/ext-pylabs-core | 04d96b80ac1942754257d59e91460c3a141f0a32 | 53d349fa6bee0ccead29afd6676979b44c109a61 | refs/heads/master | 2021-01-22T10:33:18.523799 | 2017-06-08T09:09:28 | 2017-06-08T09:09:28 | 54,314,984 | 0 | 0 | null | 2017-06-08T09:09:29 | 2016-03-20T11:55:01 | Python | UTF-8 | Python | false | false | 179 | py | __author__ = 'aserver'
__tags__ = 'machine', 'rollback'
__priority__= 3
def main(q, i, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
| [
"devnull@localhost"
] | devnull@localhost |
1018f09e7291d2d37f9db87d4d60882d3bba1b3f | 388ee4f6147c28a54125c6c3e90e47da207fab65 | /lib/python3.5/site-packages/boto-2.48.0-py3.5.egg/EGG-INFO/scripts/fetch_file | d4c43cc3fd280687105334bb2a32d6a23a3e3c79 | [] | no_license | alikhundmiri/save_PIL_to_S3 | 08ea5f622906528504d6742a39d5f50d83b57340 | 2229ed410cd3acd4e1be3ed4287709747dccc617 | refs/heads/master | 2020-03-10T21:13:14.333990 | 2018-04-15T08:34:53 | 2018-04-15T08:34:53 | 129,588,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | #!/Users/alikhundmiri/Desktop/pythons/Image/bin/python
# Copyright (c) 2009 Chris Moyer http://coredumped.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import sys
if __name__ == "__main__":
from optparse import OptionParser
usage = """%prog [options] URI
Fetch a URI using the boto library and (by default) pipe contents to STDOUT
The URI can be either an HTTP URL, or "s3://bucket_name/key_name"
"""
parser = OptionParser(version="0.1", usage=usage)
parser.add_option("-o", "--out-file",
help="File to receive output instead of STDOUT",
dest="outfile")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
from boto.utils import fetch_file
f = fetch_file(args[0])
if options.outfile:
open(options.outfile, "w").write(f.read())
else:
print(f.read())
| [
"salikhundmiri@gmail.com"
] | salikhundmiri@gmail.com | |
8c5207ebadc1ac132c9e5ae23a332403c97b4c57 | 22c5fc7dd52149ebd4338a487ae9ab0db0e43f01 | /tests/test_ahnet.py | 509cfbc59c4f97b70213d092f404c5db9b7cb2e6 | [
"Apache-2.0"
] | permissive | precision-medicine-um/MONAI-Deep_Learning | 3d3f547dd9815152561a6853f8d4727b0e5ca4c4 | d94c4d3a2c465717ba3fae01b7acea7fada9885b | refs/heads/master | 2022-12-28T07:04:07.768415 | 2020-10-17T13:11:56 | 2020-10-17T13:11:56 | 305,346,962 | 3 | 0 | Apache-2.0 | 2022-12-27T15:44:13 | 2020-10-19T10:30:07 | Python | UTF-8 | Python | false | false | 5,322 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.networks.blocks import FCN, MCFCN
from monai.networks.nets import AHNet
from tests.utils import skip_if_quick
TEST_CASE_FCN_1 = [
{"out_channels": 3, "upsample_mode": "transpose"},
torch.randn(5, 3, 64, 64),
(5, 3, 64, 64),
]
TEST_CASE_FCN_2 = [
{"out_channels": 2, "upsample_mode": "transpose", "pretrained": True, "progress": False},
torch.randn(5, 3, 64, 64),
(5, 2, 64, 64),
]
TEST_CASE_FCN_3 = [
{"out_channels": 1, "upsample_mode": "bilinear", "pretrained": False},
torch.randn(5, 3, 64, 64),
(5, 1, 64, 64),
]
TEST_CASE_MCFCN_1 = [
{"out_channels": 3, "in_channels": 8, "upsample_mode": "transpose", "progress": False},
torch.randn(5, 8, 64, 64),
(5, 3, 64, 64),
]
TEST_CASE_MCFCN_2 = [
{"out_channels": 2, "in_channels": 1, "upsample_mode": "transpose", "progress": True},
torch.randn(5, 1, 64, 64),
(5, 2, 64, 64),
]
TEST_CASE_MCFCN_3 = [
{"out_channels": 1, "in_channels": 2, "upsample_mode": "bilinear", "pretrained": False},
torch.randn(5, 2, 64, 64),
(5, 1, 64, 64),
]
TEST_CASE_AHNET_2D_1 = [
{"spatial_dims": 2, "upsample_mode": "bilinear"},
torch.randn(3, 1, 128, 128),
(3, 1, 128, 128),
]
TEST_CASE_AHNET_2D_2 = [
{"spatial_dims": 2, "upsample_mode": "transpose", "out_channels": 2},
torch.randn(2, 1, 128, 128),
(2, 2, 128, 128),
]
TEST_CASE_AHNET_2D_3 = [
{"spatial_dims": 2, "upsample_mode": "bilinear", "out_channels": 2},
torch.randn(2, 1, 160, 128),
(2, 2, 160, 128),
]
TEST_CASE_AHNET_3D_1 = [
{"spatial_dims": 3, "upsample_mode": "trilinear"},
torch.randn(3, 1, 128, 128, 64),
(3, 1, 128, 128, 64),
]
TEST_CASE_AHNET_3D_2 = [
{"spatial_dims": 3, "upsample_mode": "transpose", "out_channels": 2},
torch.randn(2, 1, 128, 128, 64),
(2, 2, 128, 128, 64),
]
TEST_CASE_AHNET_3D_3 = [
{"spatial_dims": 3, "upsample_mode": "trilinear", "out_channels": 2},
torch.randn(2, 1, 160, 128, 64),
(2, 2, 160, 128, 64),
]
TEST_CASE_AHNET_3D_WITH_PRETRAIN_1 = [
{"spatial_dims": 3, "upsample_mode": "trilinear"},
torch.randn(3, 1, 128, 128, 64),
(3, 1, 128, 128, 64),
{"out_channels": 1, "upsample_mode": "transpose"},
]
TEST_CASE_AHNET_3D_WITH_PRETRAIN_2 = [
{"spatial_dims": 3, "upsample_mode": "transpose", "out_channels": 2},
torch.randn(2, 1, 128, 128, 64),
(2, 2, 128, 128, 64),
{"out_channels": 1, "upsample_mode": "bilinear"},
]
TEST_CASE_AHNET_3D_WITH_PRETRAIN_3 = [
{"spatial_dims": 3, "upsample_mode": "transpose", "in_channels": 2, "out_channels": 3},
torch.randn(2, 2, 128, 128, 64),
(2, 3, 128, 128, 64),
{"out_channels": 1, "upsample_mode": "bilinear"},
]
class TestFCN(unittest.TestCase):
@parameterized.expand([TEST_CASE_FCN_1, TEST_CASE_FCN_2, TEST_CASE_FCN_3])
def test_fcn_shape(self, input_param, input_data, expected_shape):
net = FCN(**input_param)
net.eval()
with torch.no_grad():
result = net.forward(input_data)
self.assertEqual(result.shape, expected_shape)
class TestMCFCN(unittest.TestCase):
@parameterized.expand([TEST_CASE_MCFCN_1, TEST_CASE_MCFCN_2, TEST_CASE_MCFCN_3])
def test_mcfcn_shape(self, input_param, input_data, expected_shape):
net = MCFCN(**input_param)
net.eval()
with torch.no_grad():
result = net.forward(input_data)
self.assertEqual(result.shape, expected_shape)
class TestAHNET(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_AHNET_2D_1,
TEST_CASE_AHNET_2D_2,
TEST_CASE_AHNET_2D_3,
TEST_CASE_AHNET_3D_1,
TEST_CASE_AHNET_3D_2,
TEST_CASE_AHNET_3D_3,
]
)
@skip_if_quick
def test_ahnet_shape(self, input_param, input_data, expected_shape):
net = AHNet(**input_param)
net.eval()
with torch.no_grad():
result = net.forward(input_data)
self.assertEqual(result.shape, expected_shape)
class TestAHNETWithPretrain(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_AHNET_3D_WITH_PRETRAIN_1,
TEST_CASE_AHNET_3D_WITH_PRETRAIN_2,
TEST_CASE_AHNET_3D_WITH_PRETRAIN_3,
]
)
@skip_if_quick
def test_ahnet_shape(self, input_param, input_data, expected_shape, fcn_input_param):
net = AHNet(**input_param)
net2d = FCN(**fcn_input_param)
net.copy_from(net2d)
net.eval()
with torch.no_grad():
result = net.forward(input_data)
self.assertEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | precision-medicine-um.noreply@github.com |
927250df7665ad2bafdbed1ebce7ed9e8f9b37d5 | 37061d249207275daad5f465522f0f5b258daac4 | /mall/mall/apps/verifications/urls.py | b3b491292b8d83b6715561bce4ff44a4c0f5bb70 | [] | no_license | chanwanxiang/mallinfo | e4d9322436cf1711f7864acb8e288fb8ccf44c0c | f730773509dcc052374720ae5eae2c012bf96bb8 | refs/heads/main | 2023-04-19T11:41:23.124078 | 2021-05-12T08:30:56 | 2021-05-12T08:30:56 | 354,263,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^image_code/(?P<uuid>[\w-]+)/$', views.ImageCodeView.as_view()),
url(r'^sms_code/(?P<mobile>1[3-9]\d{9})$/', views.SmsCodeView.as_view()),
]
| [
"595366700@qq.com"
] | 595366700@qq.com |
8deb8da80cad74ca6d0ca6fa50cd7220d4b62737 | 947a46aee6191a640938cf1b5892aa577ca8708b | /independentbanker/spiders/spider.py | 690b4738b3eaf75543710e2d6532cc5596beaac6 | [] | no_license | hristo-grudev/independentbanker | 6f8a39555b093fdf52218d76ee0c656c37166cee | d2d1f0892227e5f14b98a38e67413bf78ec7a443 | refs/heads/main | 2023-03-30T15:14:37.784357 | 2021-04-02T07:36:43 | 2021-04-02T07:36:43 | 353,943,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | import datetime
import scrapy
from scrapy.loader import ItemLoader
from ..items import IndependentbankerItem
from itemloaders.processors import TakeFirst
base = 'https://independentbanker.org/{}/'
class IndependentbankerSpider(scrapy.Spider):
name = 'independentbanker'
year = 2011
start_urls = [base.format(year)]
def parse(self, response):
post_links = response.xpath('//div[@class="post-inner"]//h2[@class="entry-title"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
next_page = response.xpath('//a[@class="next page-numbers"]/@href').getall()
yield from response.follow_all(next_page, self.parse)
if self.year < datetime.datetime.now().year:
self.year += 1
yield response.follow(base.format(self.year), self.parse)
def parse_post(self, response):
title = response.xpath('//header[@class="entry-header"]//h1/text()').get()
description = response.xpath('//div[@class="entry-content"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description if '{' not in p]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="entry-meta"]/text()').get()
item = ItemLoader(item=IndependentbankerItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
544128c3d90a839809f1c93fffa23c0ff34618d2 | 97249b5f3b0054ccb7e61b211c525af7e9842f48 | /clickpost/router.py | 1d5cd19329b1b97edd20a5943801742409e865ad | [] | no_license | itssonamsinha/testing2 | ebaf88b7c30c8d9bd995e0eac687c8650c3ebc83 | 8800baf8cf3dd5bbfc97959bab0a2c1a674c7587 | refs/heads/master | 2021-03-15T06:12:24.921359 | 2020-03-18T07:42:16 | 2020-03-18T07:42:16 | 246,830,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from django.urls import path
from .views import NotificationView
urlpatterns = [
path('sendSms', NotificationView.as_view({'get': 'retrieve_sms'}), name='send-sms'),
path('sendWhatsApp', NotificationView.as_view({'get': 'retrieve_whatsapp'}), name='send-sms'),
path('sendProductNotification', NotificationView.as_view({'get': 'send_notification'}), name='send-notification'),
] | [
"sonamsinha@policybazaar.com"
] | sonamsinha@policybazaar.com |
6971479fb0d05f17efa38c065ea41d78c4494504 | fdbb74a95924e2677466614f6ab6e2bb13b2a95a | /third_party/python/Lib/distutils/tests/test_extension.py | e35f2738b6a21966e862cb0bbecbe92f6b0d60ef | [
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"ISC"
] | permissive | jart/cosmopolitan | fb11b5658939023977060a7c6c71a74093d9cb44 | 0d748ad58e1063dd1f8560f18a0c75293b9415b7 | refs/heads/master | 2023-09-06T09:17:29.303607 | 2023-09-02T03:49:13 | 2023-09-02T03:50:18 | 272,457,606 | 11,887 | 435 | ISC | 2023-09-14T17:47:58 | 2020-06-15T14:16:13 | C | UTF-8 | Python | false | false | 2,768 | py | """Tests for distutils.extension."""
import unittest
import os
import warnings
from test.support import check_warnings, run_unittest
from distutils.extension import read_setup_file, Extension
class ExtensionTestCase(unittest.TestCase):
def test_read_setup_file(self):
# trying to read a Setup file
# (sample extracted from the PyGame project)
setup = os.path.join(os.path.dirname(__file__), 'Setup.sample')
exts = read_setup_file(setup)
names = [ext.name for ext in exts]
names.sort()
# here are the extensions read_setup_file should have created
# out of the file
wanted = ['_arraysurfarray', '_camera', '_numericsndarray',
'_numericsurfarray', 'base', 'bufferproxy', 'cdrom',
'color', 'constants', 'display', 'draw', 'event',
'fastevent', 'font', 'gfxdraw', 'image', 'imageext',
'joystick', 'key', 'mask', 'mixer', 'mixer_music',
'mouse', 'movie', 'overlay', 'pixelarray', 'pypm',
'rect', 'rwobject', 'scrap', 'surface', 'surflock',
'time', 'transform']
self.assertEqual(names, wanted)
def test_extension_init(self):
# the first argument, which is the name, must be a string
self.assertRaises(AssertionError, Extension, 1, [])
ext = Extension('name', [])
self.assertEqual(ext.name, 'name')
# the second argument, which is the list of files, must
# be a list of strings
self.assertRaises(AssertionError, Extension, 'name', 'file')
self.assertRaises(AssertionError, Extension, 'name', ['file', 1])
ext = Extension('name', ['file1', 'file2'])
self.assertEqual(ext.sources, ['file1', 'file2'])
# others arguments have defaults
for attr in ('include_dirs', 'define_macros', 'undef_macros',
'library_dirs', 'libraries', 'runtime_library_dirs',
'extra_objects', 'extra_compile_args', 'extra_link_args',
'export_symbols', 'swig_opts', 'depends'):
self.assertEqual(getattr(ext, attr), [])
self.assertEqual(ext.language, None)
self.assertEqual(ext.optional, None)
# if there are unknown keyword options, warn about them
with check_warnings() as w:
warnings.simplefilter('always')
ext = Extension('name', ['file1', 'file2'], chic=True)
self.assertEqual(len(w.warnings), 1)
self.assertEqual(str(w.warnings[0].message),
"Unknown Extension options: 'chic'")
def test_suite():
return unittest.makeSuite(ExtensionTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| [
"jtunney@gmail.com"
] | jtunney@gmail.com |
df7aeee9a54209ea3c3d4e27855b4fcf5d6ef691 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/sUe.py | 946ef0e25b70dd125c49ea77a1b22196b31a1016 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'sUE':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
83f72edc799573491790f4191be54267fb97346d | 44ff608608e8042f86edf08aa8fa4469a9786837 | /nacos/errors.py | 79ede9ddef722579a47ca10738f250a44bb38d89 | [
"MIT"
] | permissive | neggplant/pynacos-sdk | 928a2297eaaaebdf3f595328f29d378d028a9fed | c98521ca706437262b680f9beeb1a2e4a6dad2d8 | refs/heads/master | 2023-02-26T17:57:45.967901 | 2021-02-03T13:24:16 | 2021-02-03T13:24:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | class RequestError(Exception):
def __init__(self, message="failed to request data"):
Exception.__init__(self, message)
class ParamError(Exception):
def __init__(self, message="invalid param"):
Exception.__init__(self, message)
| [
"olivetree123@163.com"
] | olivetree123@163.com |
9da6a8533f52d115e139ed88b0df5fb07b8ce617 | c08d8126a90f773f0cf04237157a578f82b0a2ac | /libs/dataset.py | b01b5ec07253fddd3c00bcc8654451e96caf6269 | [] | no_license | GOSSAN0602/baidu_car_pose_estimation | cf2d4a8b69f629b01bc70f657c4f0981999034a4 | 2a2e77bae99011890ca7861a24298ebffa604b3d | refs/heads/master | 2020-12-05T07:20:42.358114 | 2020-01-08T15:27:49 | 2020-01-08T15:27:49 | 232,045,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | import numpy as np
import pandas as pd
import cv2
from torch.utils.data import Dataset, DataLoader
import torch
import sys
sys.path.append('./')
from libs.img_preprocess import *
def imread(path, fast_mode=False):
img = cv2.imread(path)
if not fast_mode and img is not None and len(img.shape) == 3:
img = np.array(img[:, :, ::-1])
return img
class CarDataset(Dataset):
"""Car dataset."""
def __init__(self, dataframe, root_dir, training=True, transform=None):
self.df = dataframe
self.root_dir = root_dir
self.transform = transform
self.training = training
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# Get image name
idx, labels = self.df.values[idx]
img_name = self.root_dir.format(idx)
# Read image
img0 = imread(img_name, True)
if self.transform is not None:
img0 = self.transform(img0)
img = preprocess_image(img0)
img = np.rollaxis(img, 2, 0)
# Get mask and regression maps
if self.training:
mask, regr = get_mask_and_regr(img0, labels)
regr = np.rollaxis(regr, 2, 0)
else:
mask, regr = 0, 0
return [img, mask, regr]
| [
"google-dl-platform@googlegroups.com"
] | google-dl-platform@googlegroups.com |
b5380f194e2ebf4483ba73e4bbda03f43357c625 | efc9b70544c0bc108aaec0ed6a2aefdf208fd266 | /7_Reverse Integer.py | 89ccbc472a02918df9bb0a259e44de8755b77148 | [] | no_license | fxy1018/Leetcode | 75fad14701703d6a6a36dd52c338ca56c5fa9eff | 604efd2c53c369fb262f42f7f7f31997ea4d029b | refs/heads/master | 2022-12-22T23:42:17.412776 | 2022-12-15T21:27:37 | 2022-12-15T21:27:37 | 78,082,899 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | '''
Given a 32-bit signed integer, reverse digits of an integer.
Example 1:
Input: 123
Output: 321
Example 2:
Input: -123
Output: -321
Example 3:
Input: 120
Output: 21
Note:
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
'''
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
res = 0
negative = False
if x < 0:
negative = True
x *= -1
while x > 0:
mod = x%10
res = 10*res + mod
x = (x-mod)//10
if res >2**31-1 or res < -2**31:
return(0)
if negative:
return(res*-1)
return(res)
| [
"noreply@github.com"
] | fxy1018.noreply@github.com |
1ed1be18e715d55229010ba193864c1a21ca05e4 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/sieve/01b5423521084f56a7ead15fa4504e4f.py | 99cb7e8e8668e1ae7fee586e05b9e99094a5c1b4 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 348 | py | def sieve(number):
nonprimes = []
primes = []
for x in range(2,number/2):
y = 2
result = 0
while (result < number +1):
result = x*y
nonprimes.append(result)
y = y + 1
for a in range(2,number):
if a not in nonprimes:
primes.append(a)
return primes
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
1cce6bb01e3e8ebd4f9aba7c62c8d07cedbb2b9f | 4c187f0f9d244e89facdddc1581bcef33e092a93 | /benchmarks/QLib/QASM_src/benstein_vazirani_23b_secret_64.py | 18e103f3200545383140d5d7ea73dd4ab19dc862 | [] | no_license | Gonaco/Super-Qool-Benchmarks | 419dea5306bcec7e502034527acffe371a4e8004 | a630f3dd6f22bebd4ce7601a772fd3a8cd3dd08c | refs/heads/master | 2021-01-25T13:40:57.523633 | 2018-04-03T09:31:56 | 2018-04-03T09:31:56 | 123,600,859 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | from openql import openql as ql
import os
import numpy as np
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, 'test_output')
ql.set_output_dir(output_dir)
config_fn = os.path.join(curdir, '/home/daniel/Master/Quantum_Computing_and_Quantum_Information/OpenQL/tests/hardware_config_cc_light.json')
platform = ql.Platform('platform_none', config_fn)
sweep_points = [1,2]
num_circuits = 1
num_qubits = 25
p = ql.Program('benstein_vazirani_23b_secret_64', num_qubits, platform)
p.set_sweep_points(sweep_points, num_circuits)
k = ql.Kernel('benstein_vazirani_23b_secret_64', platform)
k.gate('prepx',23)
k.gate('x',23)
k.gate('h',0)
k.gate('h',1)
k.gate('h',2)
k.gate('h',3)
k.gate('h',4)
k.gate('h',5)
k.gate('h',6)
k.gate('h',7)
k.gate('h',8)
k.gate('h',9)
k.gate('h',10)
k.gate('h',11)
k.gate('h',12)
k.gate('h',13)
k.gate('h',14)
k.gate('h',15)
k.gate('h',16)
k.gate('h',17)
k.gate('h',18)
k.gate('h',19)
k.gate('h',20)
k.gate('h',21)
k.gate('h',22)
k.gate('h',23)
k.gate('cnot',6y)
k.gate('h',0)
k.gate('h',1)
k.gate('h',2)
k.gate('h',3)
k.gate('h',4)
k.gate('h',5)
k.gate('h',6)
k.gate('h',7)
k.gate('h',8)
k.gate('h',9)
k.gate('h',10)
k.gate('h',11)
k.gate('h',12)
k.gate('h',13)
k.gate('h',14)
k.gate('h',15)
k.gate('h',16)
k.gate('h',17)
k.gate('h',18)
k.gate('h',19)
k.gate('h',20)
k.gate('h',21)
k.gate('h',22)
k.gate('h',23)
p.add_kernel(k)
p.compile(optimize=False)
| [
"danielmoremanza@gmail.com"
] | danielmoremanza@gmail.com |
4a709776d4093627ea30121af39de661a51c6b0b | ce1f8877fa9ff084b75bceec4cc7ddf5b3153b07 | /clif/testing/python/t3_test.py | 1ac9b2d39c371c6fecfc64077ee971e8438811cf | [
"Apache-2.0"
] | permissive | HenriChataing/clif | 034aba392294ac30e40801815cf4d3172d3d44bd | 307ac5b7957424706c598876d883936c245e2078 | refs/heads/master | 2021-01-23T16:25:19.543400 | 2017-09-01T22:18:03 | 2017-09-01T22:18:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.testing.t3."""
import unittest
from clif.testing.python import t3
class T3Test(unittest.TestCase):
def testEnum(self):
self.assertEqual(t3._Old.TOP1, 1)
self.assertEqual(t3._Old.TOPn, -1)
self.assertNotEqual(t3._New.TOP, 1)
self.assertEqual(t3._New.TOP.name, 'TOP')
self.assertTrue(t3._New.BOTTOM)
self.assertEqual(t3._New.BOTTOM, t3._New(1))
self.assertEqual(t3._New.TOP, t3._New(1000))
self.assertEqual(t3.K.OldE.ONE, 1)
self.assertRaises(TypeError, t3.K().M, (5))
if __name__ == '__main__':
unittest.main()
| [
"mrovner@google.com"
] | mrovner@google.com |
56b409a0f90af1f8c33c763c5b51333b9d2319d9 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/carbon/common/lib/jinja2/nodes.py | a9ce7128c60779add4374f810a327ee3df669a85 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 16,999 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\jinja2\nodes.py
import operator
from itertools import chain, izip
from collections import deque
from jinja2.utils import Markup, MethodType, FunctionType
_context_function_types = (FunctionType, MethodType)
_binop_to_func = {'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub}
_uaop_to_func = {'not': operator.not_,
'+': operator.pos,
'-': operator.neg}
_cmpop_to_func = {'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b}
class Impossible(Exception):
pass
class NodeType(type):
def __new__(cls, name, bases, d):
for attr in ('fields', 'attributes'):
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
def __init__(self, environment, template_name = None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the node must have an attached environment.')
return EvalContext(node.environment)
return ctx
class Node(object):
__metaclass__ = NodeType
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' % self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (self.__class__.__name__, len(self.fields), len(self.fields) != 1 and 's' or ''))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' % iter(attributes).next())
def iter_fields(self, exclude = None, only = None):
for name in self.fields:
if exclude is only is None or exclude is not None and name not in exclude or only is not None and name in only:
try:
yield (name, getattr(self, name))
except AttributeError:
pass
def iter_child_nodes(self, exclude = None, only = None):
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override = False):
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(('%s=%r' % (arg, getattr(self, arg, None)) for arg in self.fields)))
class Stmt(Node):
abstract = True
class Helper(Node):
abstract = True
class Template(Node):
fields = ('body',)
class Output(Stmt):
fields = ('nodes',)
class Extends(Stmt):
fields = ('template',)
class For(Stmt):
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
fields = ('test', 'body', 'else_')
class Macro(Stmt):
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
fields = ('body', 'filter')
class Block(Stmt):
fields = ('name', 'body', 'scoped')
class Include(Stmt):
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
fields = ('node',)
class Assign(Stmt):
fields = ('target', 'node')
class Expr(Node):
abstract = True
def as_const(self, eval_ctx = None):
raise Impossible()
def can_assign(self):
return False
class BinExpr(Expr):
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.environment.sandboxed and self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.environment.sandboxed and self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none', 'True', 'False', 'None')
class Literal(Expr):
abstract = True
class Const(Literal):
fields = ('value',)
def as_const(self, eval_ctx = None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno = None, environment = None):
from compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
fields = ('data',)
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
fields = ('items', 'ctx')
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple((x.as_const(eval_ctx) for x in self.items))
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
fields = ('items',)
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
return [ x.as_const(eval_ctx) for x in self.items ]
class Dict(Literal):
fields = ('items',)
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict((x.as_const(eval_ctx) for x in self.items))
class Pair(Helper):
fields = ('key', 'value')
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
return (self.key.as_const(eval_ctx), self.value.as_const(eval_ctx))
class Keyword(Helper):
fields = ('key', 'value')
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
return (self.key, self.value.as_const(eval_ctx))
class CondExpr(Expr):
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [ x.as_const(eval_ctx) for x in self.args ]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict((x.as_const(eval_ctx) for x in self.kwargs))
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [ x.as_const(eval_ctx) for x in self.args ]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict((x.as_const(eval_ctx) for x in self.kwargs))
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx = None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx), self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
fields = ('nodes',)
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join((unicode(x.as_const(eval_ctx)) for x in self.nodes))
class Compare(Expr):
fields = ('expr', 'ops')
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
fields = ('op', 'expr')
class Mul(BinExpr):
operator = '*'
class Div(BinExpr):
operator = '/'
class FloorDiv(BinExpr):
operator = '//'
class Add(BinExpr):
operator = '+'
class Sub(BinExpr):
operator = '-'
class Mod(BinExpr):
operator = '%'
class Pow(BinExpr):
operator = '**'
class And(BinExpr):
operator = 'and'
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
operator = 'or'
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
operator = 'not'
class Neg(UnaryExpr):
operator = '-'
class Pos(UnaryExpr):
operator = '+'
class EnvironmentAttribute(Expr):
fields = ('name',)
class ExtensionAttribute(Expr):
fields = ('identifier', 'name')
class ImportedName(Expr):
fields = ('importname',)
class InternalName(Expr):
fields = ('name',)
def __init__(self):
raise TypeError("Can't create internal names. Use the `free_identifier` method on a parser.")
class MarkSafe(Expr):
fields = ('expr',)
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
fields = ('expr',)
def as_const(self, eval_ctx = None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
pass
class Continue(Stmt):
pass
class Break(Stmt):
pass
class Scope(Stmt):
fields = ('body',)
class EvalContextModifier(Stmt):
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
fields = ('body',)
def _failing_new(*args, **kwargs):
raise TypeError("can't create custom node types")
NodeType.__new__ = staticmethod(_failing_new)
del _failing_new
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
0199368418346d8e9f8d077120096ec470eb55f7 | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /src/datadog_api_client/v1/model/service_level_objective_query.py | ee11656739b4377458798b17a374a2d58d95f8a3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 1,316 | py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
)
class ServiceLevelObjectiveQuery(ModelNormal):
@cached_property
def openapi_types(_):
return {
"denominator": (str,),
"numerator": (str,),
}
attribute_map = {
"denominator": "denominator",
"numerator": "numerator",
}
def __init__(self_, denominator: str, numerator: str, **kwargs):
"""
A metric-based SLO. **Required if type is metric**. Note that Datadog only allows the sum by aggregator
to be used because this will sum up all request counts instead of averaging them, or taking the max or
min of all of those requests.
:param denominator: A Datadog metric query for total (valid) events.
:type denominator: str
:param numerator: A Datadog metric query for good events.
:type numerator: str
"""
super().__init__(kwargs)
self_.denominator = denominator
self_.numerator = numerator
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
a7873b9123b4adc26592ecf2c7886ad5a18ad42f | 7726380df6ca42be4398136eb133496b55ef23ff | /bin/demosaick | c643b8233e909503cd6523ff49b2bc5b0aa6a291 | [
"MIT"
] | permissive | isVoid/demosaicnet | 7f9bc98a2a6ba7afaddd8b7400b17e996a0d531f | d03c9b263d91f07ea198df53c62d5b6da00b4702 | refs/heads/master | 2021-09-06T18:35:35.946714 | 2018-02-09T20:03:41 | 2018-02-09T20:03:41 | 111,714,754 | 0 | 1 | null | 2017-11-22T17:41:45 | 2017-11-22T17:41:44 | null | UTF-8 | Python | false | false | 11,524 | #!/usr/bin/env python
# MIT License
#
# Deep Joint Demosaicking and Denoising
# Siggraph Asia 2016
# Michael Gharbi, Gaurav Chaurasia, Sylvain Paris, Fredo Durand
#
# Copyright (c) 2016 Michael Gharbi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Run the demosaicking network on an image or a directory containing multiple images."""
import argparse
import cv2
import numpy as np
import os
import re
import time
import tempfile
from tqdm import tqdm
os.environ['GLOG_minloglevel'] = '2'
import caffe
NOISE_LEVELS = [0.0000, 0.0784] # Min/Max noise levels we trained on
def _psnr(a, b, crop=0, maxval=1.0):
"""Computes PSNR on a cropped version of a,b"""
if crop > 0:
aa = a[crop:-crop, crop:-crop, :]
bb = b[crop:-crop, crop:-crop, :]
else:
aa = a
bb = b
d = np.mean(np.square(aa-bb))
d = -10*np.log10(d/(maxval*maxval))
return d
def _uint2float(I):
if I.dtype == np.uint8:
I = I.astype(np.float32)
I = I*0.00390625
elif I.dtype == np.uint16:
I = I.astype(np.float32)
I = I/65535.0
else:
raise ValueError("not a uint type {}".format(I.dtype))
return I
def _float2uint(I, dtype):
if dtype == np.uint8:
I /= 0.00390625
I += 0.5
I = np.clip(I,0,255)
I = I.astype(np.uint8)
elif dtype == np.uint16:
I *= 65535.0
I += 0.5
I = np.clip(I,0,65535)
I = I.astype(np.uint16)
else:
raise ValueError("not a uint type {}".format(dtype))
return I
def _blob_to_image(blob):
# input shape h,w,c
shape = blob.data.shape
sz = shape[1:]
out = np.copy(blob.data)
out = np.reshape(out, sz)
out = out.transpose((1,2,0))
return out
def _make_mosaic(im, mosaic_type):
if mosaic_type == 'bayer':
layer = 'BayerMosaickLayer'
elif mosaic_type == 'xtrans':
layer = 'XTransMosaickLayer'
else:
raise Exception('Unknown mosaick type {}.'.format(mosaic_type))
h, w, c = im.shape
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim:1 dim: %d dim: %d dim: %d }
layer { type: 'Python' name: 'output' bottom: 'data' top: 'output'
python_param { module: 'demosaicnet.layers' layer: '%s' } }""" % (c, h, w, layer))
fname = f.name
net = caffe.Net(fname, caffe.TEST)
os.remove(fname)
net.blobs['data'].data[...] = im.transpose([2, 0, 1])[...]
out = np.squeeze(net.blobs['output'].data).transpose([1, 2, 0])
net.forward()
return out
def demosaick(net, M, noise, psize, crop):
start_time = time.time()
h,w = M.shape[:2]
psize = min(min(psize,h),w)
psize -= psize % 2
patch_step = psize
patch_step -= 2*crop
shift_factor = 2
# Result array
R = np.zeros(M.shape, dtype = np.float32)
rangex = range(0,w-2*crop,patch_step)
rangey = range(0,h-2*crop,patch_step)
ntiles = len(rangex)*len(rangey)
with tqdm(total=ntiles, unit='tiles', unit_scale=True) as pbar:
for start_x in rangex:
for start_y in rangey:
end_x = start_x+psize
end_y = start_y+psize
if end_x > w:
end_x = w
end_x = shift_factor*((end_x)/shift_factor)
start_x = end_x-psize
if end_y > h:
end_y = h
end_y = shift_factor*((end_y)/shift_factor)
start_y = end_y-psize
tileM = M[start_y:end_y, start_x:end_x, :]
tileM = tileM[np.newaxis,:,:,:]
tileM = tileM.transpose((0,3,1,2))
net.blobs['mosaick'].reshape(*tileM.shape)
net.blobs['mosaick'].data[...] = tileM
if 'noise_level' in net.blobs.keys():
noise_shape = [1,]
net.blobs['noise_level'].reshape(*noise_shape)
net.blobs['noise_level'].data[...] = noise
net.forward()
out = net.blobs['output']
out = _blob_to_image(out)
s = out.shape[0]
R[start_y+crop:start_y+crop+s,
start_x+crop:start_x+crop+s,:] = out
pbar.update(1)
R[R<0] = 0.0
R[R>1] = 1.0
runtime = (time.time()-start_time)*1000 # in ms
return R, runtime
def main(args):
arch_path = os.path.join(args.model, 'deploy.prototxt')
weights_path = os.path.join(args.model, 'weights.caffemodel')
if args.gpu:
print ' - using GPU'
caffe.set_mode_gpu()
else:
print ' - using CPU'
caffe.set_mode_cpu()
net = caffe.Net(arch_path, weights_path, caffe.TEST)
crop = (net.blobs['mosaick'].data.shape[-1]
- net.blobs['output'].data.shape[-1])/2
print "Crop", crop
regexp = re.compile(r".*\.(png|tif)")
if os.path.isdir(args.input):
print 'dir'
inputs = [f for f in os.listdir(args.input) if regexp.match(f)]
inputs = [os.path.join(args.input, f) for f in inputs]
else:
inputs = [args.input]
avg_psnr = 0
n = 0
for fname in inputs:
print '+ Processing {}'.format(fname)
Iref = cv2.imread(fname, -1)
if len(Iref.shape) == 4: # removes alpha
Iref = Iref[:, :, :3]
if len(Iref.shape) == 3: # CV color storage..
Iref = cv2.cvtColor(Iref,cv2.COLOR_BGR2RGB)
dtype = Iref.dtype
if dtype not in [np.uint8, np.uint16]:
raise ValueError('Input type not handled: {}'.format(dtype))
Iref = _uint2float(Iref)
if len(Iref.shape) == 2:
# Offset the image to match the our mosaic pattern
if args.offset_x > 0:
print ' - offset x'
# Iref = Iref[:, 1:]
Iref = np.pad(Iref, [(0, 0), (args.offset_x, 0)], 'reflect')
if args.offset_y > 0:
print ' - offset y'
# Iref = Iref[1:, :]
Iref = np.pad(Iref, [(args.offset_y, 0), (0,0)], 'reflect')
has_groundtruth = False
Iref = np.dstack((Iref, Iref, Iref))
else:
# No need for offsets if we have the ground-truth
has_groundtruth = True
if has_groundtruth and args.noise > 0:
print ' - adding noise sigma={:.3f}'.format(args.noise)
I = Iref + np.random.normal(
loc=0.0, scale = args.noise , size = Iref.shape )
else:
I = Iref
if crop > 0:
if args.mosaic_type == 'bayer':
c = crop + (crop %2) # Make sure we don't change the pattern's period
I = np.pad(I, [(c, c), (c, c), (0, 0)], 'reflect')
else:
c = crop + (crop % 6) # Make sure we don't change the pattern's period
I = np.pad(I, [(c, c), (c, c), (0, 0)], 'reflect')
if has_groundtruth:
print ' - making mosaick'
else:
print ' - formatting mosaick'
M = _make_mosaic(I, args.mosaic_type)
R, runtime = demosaick(net, M, args.noise, args.tile_size, crop)
if crop > 0:
R = R[c:-c, c:-c, :]
I = I[c:-c, c:-c, :]
M = M[c:-c, c:-c, :]
if not has_groundtruth:
if args.offset_x > 0:
print ' - remove offset x'
R = R[:, args.offset_x:]
I = I[:, args.offset_x:]
M = M[:, args.offset_x:]
if args.offset_y > 0:
print ' - remove offset y'
R = R[args.offset_y:, :]
I = I[args.offset_y:, :]
M = M[args.offset_y:, :]
if len(Iref.shape) == 2:
# Offset the image to match the our mosaic pattern
if args.offset_x == 1:
print ' - offset x'
Iref = Iref[:, 1:]
if args.offset_y == 1:
print ' - offset y'
Iref = Iref[1:, :]
has_groundtruth = False
if has_groundtruth:
p = _psnr(R, Iref, crop=crop)
avg_psnr += p
n += 1
diff = np.abs((R-Iref))
diff /= np.amax(diff)
out = np.hstack((Iref, I, M, R, diff))
out = _float2uint(out, dtype)
print ' PSNR = {:.1f} dB, time = {} ms'.format(p, int(runtime))
else:
print ' - raw image without groundtruth, bypassing metric'
out = _float2uint(R, dtype)
outputname = os.path.join(args.output, os.path.split(fname)[-1])
# CV color storage..
out = cv2.cvtColor(out, cv2.COLOR_RGB2BGR)
cv2.imwrite(outputname, out)
if has_groundtruth and n > 0:
avg_psnr /= n
print '+ Average PSNR = {:.1f} dB'.format(avg_psnr)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='data/images/test/hdrvdp/000/000003.png', help='path to input image or folder.')
parser.add_argument('--output', type=str, default='output', help='path to output folder.')
parser.add_argument('--model', type=str, default='testnet', help='path to trained model (folder containing deploy.prototxt and weights.caffemodel).')
parser.add_argument('--noise', type=float, default=0.0, help='standard deviation of additive Gaussian noise, w.r.t to a [0,1] intensity scale.')
parser.add_argument('--offset_x', type=int, default=0, help='number of pixels to offset the mosaick in the x-axis.')
parser.add_argument('--offset_y', type=int, default=0, help='number of pixels to offset the mosaick in the y-axis.')
parser.add_argument('--tile_size', type=int, default=512, help='split the input into tiles of this size.')
parser.add_argument('--gpu', dest='gpu', action='store_true', help='use the GPU for processing.')
parser.add_argument('--mosaic_type', type=str, default='bayer', choices=['bayer', 'xtrans'], help='type of mosaick (xtrans or bayer)')
parser.set_defaults(gpu=False)
args = parser.parse_args()
if args.noise > NOISE_LEVELS[1] or args.noise < NOISE_LEVELS[0]:
msg = 'The model was trained on noise levels in [{}, {}]'.format(
NOISE_LEVELS[0], NOISE_LEVELS[1])
raise ValueError(msg)
main(args)
| [
"gharbi@mit.edu"
] | gharbi@mit.edu | |
f002b030cd2969fee5c668a5cae035d7f1da8581 | dbd603f5246059ce5fe3d426b4e7f5f98b944a78 | /mlmath/vector.py | 589999452c6de70857b5b5b0536d879990f1dc7c | [] | no_license | chyld/mlmath | f81522644b30e1a186032a6edbb908891730d9a7 | 7ce5606f02111d49f893f0f35e57bc72fb40cdcf | refs/heads/master | 2020-04-18T21:41:56.910701 | 2019-01-27T05:12:48 | 2019-01-27T05:12:48 | 167,772,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | class Vector:
def __init__(self, *elements):
self.elements = elements
def scale(self, amount):
return Vector(*(e * amount for e in self.elements))
def norm(self):
return sum((e ** 2 for e in self.elements)) ** 0.5
def __add__(self, other):
return Vector(*(a + b for a, b in zip(self.elements, other.elements)))
| [
"chyld.medford@gmail.com"
] | chyld.medford@gmail.com |
d8b482999535ede7ae3f2ba3547c471e1c6f9eb1 | 1b8d87b37cc6de4b0ffaedf0d5dc3877888865c3 | /fhirclient/r4models/messagedefinition_tests.py | 68e8854a920536410beae208bafde5623997ea6e | [] | no_license | Healthedata1/Flask-Alerts-Sender | d222e689de01daaa59d51aea2054d538db231cf9 | 0637cb1bb2c8af18243fce3aecc09723c2fdd155 | refs/heads/master | 2022-12-12T14:14:04.708052 | 2021-05-05T20:52:49 | 2021-05-05T20:52:49 | 231,147,534 | 1 | 0 | null | 2022-12-08T03:22:29 | 2019-12-31T21:20:13 | Python | UTF-8 | Python | false | false | 2,376 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import messagedefinition
from .fhirdate import FHIRDate
class MessageDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("MessageDefinition", js["resourceType"])
return messagedefinition.MessageDefinition(js)
def testMessageDefinition1(self):
inst = self.instantiate_from("messagedefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MessageDefinition instance")
self.implMessageDefinition1(inst)
js = inst.as_json()
self.assertEqual("MessageDefinition", js["resourceType"])
inst2 = messagedefinition.MessageDefinition(js)
self.implMessageDefinition1(inst2)
def implMessageDefinition1(self, inst):
self.assertEqual(inst.category, "notification")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org")
self.assertEqual(inst.date.date, FHIRDate("2016-11-09").date)
self.assertEqual(inst.date.as_json(), "2016-11-09")
self.assertEqual(inst.eventCoding.code, "admin-notify")
self.assertEqual(inst.eventCoding.system, "http://example.org/fhir/message-events")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example")
self.assertEqual(inst.name, "EXAMPLE")
self.assertEqual(inst.publisher, "Health Level Seven, Int'l")
self.assertEqual(inst.purpose, "Defines a base example for other MessageDefinition instances.")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Message definition base example</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Message definition base example")
self.assertEqual(inst.url, "http://hl7.org/fhir/MessageDefinition/example")
| [
"ehaas@healthedatainc.com"
] | ehaas@healthedatainc.com |
d754a968e1f0df26c599e84104d0e8b552cb60ae | 0d15c599ec1fed05d7acdb31cebe37a40d38c2e3 | /setup.py | 482189a2a087726c6040c10024b6ef6344ccf911 | [] | no_license | VoteIT/voteit.notes | 34753f6ed45aa6c015e9aa015d679f4ee132e4ff | ecdb9b945d7cdab0cd566b22c879b71ba3c23b75 | refs/heads/master | 2021-07-13T07:41:39.347100 | 2020-05-16T12:26:57 | 2020-05-16T12:26:57 | 133,809,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CONTRIBUTORS = open(os.path.join(here, 'CONTRIBUTORS.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = ('voteit.core',
'betahaus.viewcomponent',
'pyramid',
'colander',
'deform',
'fanstatic',)
setup(name='voteit.notes',
version='0.1dev',
description='Personal notes on proposals for VoteIT',
long_description=README + '\n\n' + CONTRIBUTORS + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='VoteIT development team and contributors',
author_email='info@voteit.se',
url='http://www.voteit.se',
keywords='web pyramid pylons voteit',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="voteit.notes",
entry_points = {
'fanstatic.libraries': [
'voteit_notes_lib = voteit.notes.fanstaticlib:voteit_notes_lib'
],
},
)
| [
"robin@betahaus.net"
] | robin@betahaus.net |
0b3a3a34c6f90fed16abed7da992f7be4a7df450 | c89543dd926c1787c40616ed174a3d1371c54449 | /superset/databases/commands/export.py | 4d3bb7f99f251d041f0dff7b5e8faa99d90b2d68 | [
"Apache-2.0",
"OFL-1.1"
] | permissive | j420247/incubator-superset | 7c7bff330393f0e91f5e67782f35efe8c735250a | c9b9b7404a2440a4c9d3173f0c494ed40f7fa2bd | refs/heads/master | 2023-03-11T21:53:16.827919 | 2023-02-03T19:04:17 | 2023-02-03T19:04:17 | 157,780,350 | 1 | 1 | Apache-2.0 | 2023-03-07T00:14:51 | 2018-11-15T22:24:29 | TypeScript | UTF-8 | Python | false | false | 4,254 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import json
import logging
from typing import Any, Dict, Iterator, Tuple
import yaml
from superset.databases.commands.exceptions import DatabaseNotFoundError
from superset.databases.dao import DatabaseDAO
from superset.commands.export.models import ExportModelsCommand
from superset.models.core import Database
from superset.utils.dict_import_export import EXPORT_VERSION
from superset.utils.file import get_filename
logger = logging.getLogger(__name__)
def parse_extra(extra_payload: str) -> Dict[str, Any]:
try:
extra = json.loads(extra_payload)
except json.decoder.JSONDecodeError:
logger.info("Unable to decode `extra` field: %s", extra_payload)
return {}
# Fix for DBs saved with an invalid ``schemas_allowed_for_csv_upload``
schemas_allowed_for_csv_upload = extra.get("schemas_allowed_for_csv_upload")
if isinstance(schemas_allowed_for_csv_upload, str):
extra["schemas_allowed_for_csv_upload"] = json.loads(
schemas_allowed_for_csv_upload
)
return extra
class ExportDatabasesCommand(ExportModelsCommand):
dao = DatabaseDAO
not_found = DatabaseNotFoundError
@staticmethod
def _export(
model: Database, export_related: bool = True
) -> Iterator[Tuple[str, str]]:
db_file_name = get_filename(model.database_name, model.id, skip_id=True)
file_path = f"databases/{db_file_name}.yaml"
payload = model.export_to_dict(
recursive=False,
include_parent_ref=False,
include_defaults=True,
export_uuids=True,
)
# https://github.com/apache/superset/pull/16756 renamed ``allow_csv_upload``
# to ``allow_file_upload`, but we can't change the V1 schema
replacements = {"allow_file_upload": "allow_csv_upload"}
# this preserves key order, which is important
payload = {replacements.get(key, key): value for key, value in payload.items()}
# TODO (betodealmeida): move this logic to export_to_dict once this
# becomes the default export endpoint
if payload.get("extra"):
extra = payload["extra"] = parse_extra(payload["extra"])
# ``schemas_allowed_for_csv_upload`` was also renamed to
# ``schemas_allowed_for_file_upload``, we need to change to preserve the
# V1 schema
if "schemas_allowed_for_file_upload" in extra:
extra["schemas_allowed_for_csv_upload"] = extra.pop(
"schemas_allowed_for_file_upload"
)
payload["version"] = EXPORT_VERSION
file_content = yaml.safe_dump(payload, sort_keys=False)
yield file_path, file_content
if export_related:
for dataset in model.tables:
ds_file_name = get_filename(
dataset.table_name, dataset.id, skip_id=True
)
file_path = f"datasets/{db_file_name}/{ds_file_name}.yaml"
payload = dataset.export_to_dict(
recursive=True,
include_parent_ref=False,
include_defaults=True,
export_uuids=True,
)
payload["version"] = EXPORT_VERSION
payload["database_uuid"] = str(model.uuid)
file_content = yaml.safe_dump(payload, sort_keys=False)
yield file_path, file_content
| [
"noreply@github.com"
] | j420247.noreply@github.com |
8d2ad9a8b6c480e830f3ac604361cacec07ae068 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf.0/gsn-edf_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=12/sched.py | 5807deb25f4484e9338877651fe134cfb937907d | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | -X FMLP -Q 0 -L 5 100 300
-X FMLP -Q 0 -L 5 96 300
-X FMLP -Q 1 -L 5 88 300
-X FMLP -Q 1 -L 5 84 400
-X FMLP -Q 2 -L 3 64 300
-X FMLP -Q 2 -L 3 55 250
-X FMLP -Q 3 -L 2 49 300
-X FMLP -Q 3 -L 2 48 175
47 175
35 200
33 100
31 150
26 175
24 150
18 200
18 200
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
207423db38088cf820dfc1c434376eec194dd38d | 7520e14426f46525605d87f6104b6100c0724d84 | /examples/finite_differences.py | 871c43dee817896f653bba1adcaf93428618e26f | [] | no_license | shanhaiying/pysketcher | 1b40b1b230af429f93173cb6765d0b96d9806535 | 1007b71c64b3c812d301caa3b422b5308dcf87db | refs/heads/master | 2021-01-17T07:55:17.362970 | 2015-01-28T18:09:09 | 2015-01-28T18:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,224 | py | """
Illustrate forward, backward and centered finite differences
in four figures.
"""
from pysketcher import *
#test_test()
xaxis = 2
drawing_tool.set_coordinate_system(0, 7, 1, 6, axis=False)
f = SketchyFunc1('$u(t)$')
x = 3 # center point where we want the derivative
xb = 2 # x point used for backward difference
xf = 4 # x point used for forward difference
p = (x, f(x)) # center point
pf = (xf, f(xf)) # forward point
pb = (xb, f(xb)) # backward point
r = 0.1 # radius of circles placed at key points
c = Circle(p, r).set_linecolor('blue')
cf = Circle(pf, r).set_linecolor('red')
cb = Circle(pb, r).set_linecolor('green')
# Points in the mesh
p0 = point(x, xaxis) # center point
pf0 = point(xf, xaxis) # forward point
pb0 = point(xb, xaxis) # backward point
tick = 0.05
# 1D mesh with three points
mesh = Composition({
'tnm1': Text('$t_{n-1}$', pb0 - point(0, 0.3)),
'tn': Text('$t_{n}$', p0 - point(0, 0.3)),
'tnp1': Text('$t_{n+1}$', pf0 - point(0, 0.3)),
'axis': Composition({
'hline': Line(pf0-point(3,0), pb0+point(3,0)).\
set_linecolor('black').set_linewidth(1),
'tick_m1': Line(pf0+point(0,tick), pf0-point(0,tick)).\
set_linecolor('black').set_linewidth(1),
'tick_n': Line(p0+point(0,tick), p0-point(0,tick)).\
set_linecolor('black').set_linewidth(1),
'tick_p1': Line(pb0+point(0,tick), pb0-point(0,tick)).\
set_linecolor('black').set_linewidth(1)}),
})
# 1D mesh with three points for Crank-Nicolson
mesh_cn = Composition({
'tnm1': Text('$t_{n}$', pb0 - point(0, 0.3)),
'tn': Text(r'$t_{n+\frac{1}{2}}$', p0 - point(0, 0.3)),
'tnp1': Text('$t_{n+1}$', pf0 - point(0, 0.3)),
'axis': Composition({
'hline': Line(pf0-point(3,0), pb0+point(3,0)).\
set_linecolor('black').set_linewidth(1),
'tick_m1': Line(pf0+point(0,tick), pf0-point(0,tick)).\
set_linecolor('black').set_linewidth(1),
'tick_n': Line(p0+point(0,tick), p0-point(0,tick)).\
set_linecolor('black').set_linewidth(1),
'tick_p1': Line(pb0+point(0,tick), pb0-point(0,tick)).\
set_linecolor('black').set_linewidth(1)}),
})
# Vertical dotted lines at each mesh point
vlinec = Line(p, p0).set_linestyle('dotted').\
set_linecolor('blue').set_linewidth(1)
vlinef = Line(pf, pf0).set_linestyle('dotted').\
set_linecolor('red').set_linewidth(1)
vlineb = Line(pb, pb0).set_linestyle('dotted').\
set_linecolor('green').set_linewidth(1)
# Compose vertical lines for each type of difference
forward_lines = Composition({'center': vlinec, 'right': vlinef})
backward_lines = Composition({'center': vlinec, 'left': vlineb})
centered_lines = Composition({'left': vlineb, 'right': vlinef})
centered_lines2 = Composition({'left': vlineb, 'right': vlinef,
'center': vlinec})
# Tangents illustrating the derivative
domain = [1, 5]
domain2 = [2, 5]
forward_tangent = Line(p, pf).new_interval(x=domain2).\
set_linestyle('dashed').set_linecolor('red')
backward_tangent = Line(pb, p).new_interval(x=domain).\
set_linestyle('dashed').set_linecolor('green')
centered_tangent = Line(pb, pf).new_interval(x=domain).\
set_linestyle('dashed').set_linecolor('blue')
h = 1E-3 # h in finite difference approx used to compute the exact tangent
exact_tangent = Line((x+h, f(x+h)), (x-h, f(x-h))).\
new_interval(x=domain).\
set_linestyle('dotted').set_linecolor('black')
forward = Composition(
dict(tangent=forward_tangent,
point1=c, point2=cf, coor=forward_lines,
name=Text('forward',
forward_tangent.geometric_features()['end'] + \
point(0.1,0), alignment='left')))
backward = Composition(
dict(tangent=backward_tangent,
point1=c, point2=cb, coor=backward_lines,
name=Text('backward',
backward_tangent.geometric_features()['end'] + \
point(0.1,0), alignment='left')))
centered = Composition(
dict(tangent=centered_tangent,
point1=cb, point2=cf, point=c, coor=centered_lines2,
name=Text('centered',
centered_tangent.geometric_features()['end'] + \
point(0.1,0), alignment='left')))
exact = Composition(dict(graph=f, tangent=exact_tangent))
forward = Composition(dict(difference=forward, exact=exact)).\
set_name('forward')
backward = Composition(dict(difference=backward, exact=exact)).\
set_name('backward')
centered = Composition(dict(difference=centered, exact=exact)).\
set_name('centered')
all = Composition(
dict(exact=exact, forward=forward, backward=backward,
centered=centered)).set_name('all')
for fig in forward, backward, centered, all:
drawing_tool.erase()
fig.draw()
mesh.draw()
drawing_tool.display()
drawing_tool.savefig('fd_'+fig.get_name())
# Crank-Nicolson around t_n+1/2
drawing_tool.erase()
centered.draw()
mesh_cn.draw()
drawing_tool.display()
drawing_tool.savefig('fd_centered_CN')
raw_input()
| [
"hpl@simula.no"
] | hpl@simula.no |
f6531f7cfb43c1e68e3acf5fc0ab5dfd6c670dad | 71b11008ab0455dd9fd2c47107f8a27e08febb27 | /09、UI自动化测试及黑马头条项目实战/day16/03代码/scripts/mp/test_publish_article.py | bb0c9fc390b7a3689f3c4dbcddb3c679b9e20b14 | [] | no_license | zmh19941223/heimatest2021 | 49ce328f8ce763df0dd67ed1d26eb553fd9e7da4 | 3d2e9e3551a199bda9945df2b957a9bc70d78f64 | refs/heads/main | 2023-08-25T17:03:31.519976 | 2021-10-18T05:07:03 | 2021-10-18T05:07:03 | 418,348,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | # 定义测试类
import logging
import allure
import pytest
from config import BaseDir
from page.mp.home_page import HomeProxy
from page.mp.login_page import LoginProxy
from page.mp.publish_page import PublishProxy
from utils import UtilsDriver, is_exist, get_case_data
case_data = get_case_data(BaseDir + "/data/mp/test_login_data.json")
@pytest.mark.run(order=1)
class TestPublishArticle:
# 定义类级别的fixture初始化操作方法
def setup_class(self):
self.login_proxy = LoginProxy()
self.home_proxy = HomeProxy()
self.publish_proxy = PublishProxy()
# 定义类级别的fixture销毁操作方法
def teardown_class(self):
UtilsDriver.quit_mp_driver()
# 定义登录的测试用例
@pytest.mark.parametrize("username, code, expect", case_data)
@allure.severity(allure.severity_level.CRITICAL)
def test_login(self, username, code, expect):
logging.info("用例的数据如下:用户名:{}, 验证码:{}, 预期结果:{}".format(username,
code, expect))
print(username, code)
self.login_proxy.login(username, code) # 登录
allure.attach(UtilsDriver.get_mp_driver().get_screenshot_as_png(), "登录截图", allure.attachment_type.PNG)
username = self.home_proxy.get_username_msg() # 获取登录后的用户名信息
assert expect == username # 根据获取到的用户名进行断言
# 定义测试方法
@allure.severity(allure.severity_level.CRITICAL)
def test_publish_article(self):
self.home_proxy.go_publish_page() # 跳转到发布文章页面
self.publish_proxy.publish_article("发布文章_0828_15", "发布文章_0710_14发布文章_0710_14", "数据库")
assert is_exist(UtilsDriver.get_mp_driver(), "新增文章成功")
| [
"1780858508@qq.com"
] | 1780858508@qq.com |
806afa96acf010a531f3f9ea7f1949f08b8aed32 | 0bd00c67608b1ce6c5e76d77b3ced4cce64ee5a0 | /python/problem57.py | 1052c9808248f12fd2498667f322d48b963d14b6 | [] | no_license | patrickdean/euler | 420ef8b268764a7e47120c5879063a7d9164fa09 | d42e94db3713bbbb65cb27e87ce1775211f53790 | refs/heads/master | 2020-05-18T17:44:18.738482 | 2014-05-20T00:48:51 | 2014-05-20T00:54:17 | 4,303,268 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
# Patrick Dean
# Project Euler: #057
# In the first one-thousand expansions of the continued fraction of sqrt(2), find the number fractions contain a numerator with more digits than denominator
def two_conv_gen(n):
s= [2] * n
return [1] + s
def convergent(lst):
if len(lst) == 1:
return (lst[0], 1)
x = lst[-1:]
i = 1
num, den = x[0], 1
while i < len(lst):
i += 1
num, den = x[0] * num + den, num
x = lst[-(i+1):]
return (num, den)
l = two_conv_gen(1000)
x = [convergent(l[:i]) for i in range(1, len(l))]
print sum(1 for a, b in x if len(str(a)) > len(str(b))) | [
"="
] | = |
9962f987b25843ee987ebf75bc28b9e9c9c1dc90 | c2fd315faa3d4ad91474197e0c55526f0db83e3f | /nonrecursive_tree_search.py | a1eaef60ee1e98a2b33bf5a79d30dae926a947c2 | [] | no_license | zackmdavis/Standard_Algorithms | 8db9f912bddcb5bf14756d21ce67745ddbcc69c9 | 655309a4440c7baea26de291f9881494b4695827 | refs/heads/master | 2021-01-17T08:58:52.214180 | 2016-04-03T03:01:03 | 2016-04-03T03:01:03 | 4,513,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,946 | py | #!/usr/bin/env python3
# I usually think of depth-first tree search in terms of recursive function
# invocations, but we should be able to manage the node-stack ourselves instead
# of leaving it implicit in the call stack!
# Let's consider a tree where the leaf nodes have values, and, given a tree, we
# want to find the greatest value contained amongst its leaves.
import inspect
import logging
import sys
import unittest
class Node:
def __init__(self, value, children=None):
# assert that leaves have initial values and internal nodes do not
if children is None:
assert value is not None
else:
assert value is None
self.value = value
# default may be changed by parent Node's __init__ializer!
self.parent = None
if children is None:
self.children = []
else:
self.children = children
for child in children:
child.parent = self
def __repr__(self):
return "<Node: id={} value={} ({} children)>".format(
id(self), self.value, len(self.children))
def recursive_search(node,
# Yes, I know the gotcha about mutable default
# values. Wait for it ...
visit_order=[]):
visit_order.append(node)
if not node.children:
return node.value
else:
return max(recursive_search(child)
for child in node.children)
def stack_search(root,
# Wait for it ...
visit_order=[]):
stack = [root]
while stack:
node = stack.pop()
visit_order.append(node)
if not node.children:
# propagate what we've learned up the tree
messenger = node
while (messenger.parent is not None and
(messenger.parent.value is None or
messenger.parent.value < messenger.value)):
logging.debug(
"setting value of {} to {} because of child {}".format(
messenger.parent, messenger.value, messenger))
messenger.parent.value = messenger.value
messenger = messenger.parent
else:
for child in reversed(node.children):
stack.append(child)
return root.value
our_tree = Node(None,
[Node(None,
[Node(None,
[Node(1),
Node(2),
Node(None,
[Node(3),
Node(None,
[Node(4),
Node(None,
[Node(3)]),
Node(2)])])])]),
Node(None,
[Node(None,
[Node(None,
[Node(1),
Node(2),
Node(None,
[Node(3)])])])])])
class RecursiveSearchTestCase(unittest.TestCase):
def test_equivalence(self):
search_methods = [recursive_search, stack_search]
for search_method in search_methods:
self.assertEqual(4, search_method(our_tree))
self.assertEqual(
# We have fun around here.
*[inspect.signature(
search_method).parameters['visit_order'].default
for search_method in search_methods]
)
if __name__ == "__main__":
if sys.argv[1:]:
arg, *_rest = sys.argv[1:]
else:
arg = None
if arg == "debug":
logging_kwargs = {'level': logging.DEBUG}
else:
logging_kwargs = {}
sys.argv[1:] = []
logging.basicConfig(**logging_kwargs)
unittest.main()
| [
"code@zackmdavis.net"
] | code@zackmdavis.net |
76547e9354f9b0bb944103b940eec27806a919ec | 1faf574fc2592e8c65043021762ed6eab441feb5 | /프로그래머스/카카오_겨울_인턴_호텔방배정.py | f0d126466c2a7c547b29d692c9b9132de753635c | [] | no_license | YooGunWook/coding_test | 4e9c8851a025271f79408fd7a0f097f07351b1e7 | 2806ecf5db3714d1a4b5dbfaa2b584bb8b6166a0 | refs/heads/master | 2022-10-15T03:37:46.129384 | 2022-09-26T13:45:53 | 2022-09-26T13:45:53 | 240,265,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | import bisect
import collections
import copy
# Only 정확성
def solution(k, room_number):
rooms = {} # 방 정보
answer = [] # 배정 정보
for i in range(1, k + 1):
rooms[i] = 0 # 각 방별로 정보를 넣어준다.
room_num = list(rooms.keys()) # 이진탐색용
for idx, room in enumerate(room_number):
if rooms[room] == 0: # 방에 사람이 없으면 바로 넣어줌
rooms[room] = 1
answer.append(room)
else: # 아니면 이진 탐색으로 가장 가까운 방에 배정
while True:
cand = bisect.bisect_right(room_num, room) # 오른쪽 기준 탐색
if rooms[room_num[cand]] == 0: # 비어있으면 배정
rooms[room_num[cand]] = 1
answer.append(room_num[cand])
break
room = room_num[cand] # 다음 방으로 탐색
return answer
# 정확성 + 효율성
def solution2(k, room_number):
rooms = collections.defaultdict(int) # 방 정보 저장 + 방문 기록
answer = []
for room in room_number:
n = room # 방문 기록으로 만든다.
visit = [n] # 방문 기록들
while n in rooms: # 각 방별로 조회
n = rooms[n] # 새로운 n을 설정
visit.append(n) # 이걸 통해 빈 방을 찾을 때까지 조회한다
answer.append(n) # 정답 넣기
for vi in visit: # 방문 기록을 저장한다.
rooms[vi] = n + 1 # 다음으로 가야될 방을 이걸로 저장해주는 것!
return answer
room_number = [1, 3, 4, 1, 3, 1]
k = 10
print(solution2(k, room_number)) | [
"gunwook0307@yonsei.ac.kr"
] | gunwook0307@yonsei.ac.kr |
a1ac33d8d408048b216a7e510c1fb0621d89afe5 | 890d11bd06222125b4b4f4af7cea814544755403 | /graff/db.py | 6d91204e1996641bee36be6458a2f7e6cc6b3cea | [] | no_license | eklitzke/graff | 8f80f4a5ea015fa320c9dbd46dedbb6067d7ce2e | 8146c4a4397d2ee481549c407fb976c0c763fcc8 | refs/heads/master | 2021-01-01T05:49:33.717421 | 2011-12-26T21:28:04 | 2011-12-26T21:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,070 | py | import datetime
import hashlib
import os
from sqlalchemy import create_engine, func, Column, ForeignKey
from sqlalchemy.types import Integer, String, Float, DateTime, Boolean
from sqlalchemy.orm import sessionmaker, relationship, backref
from sqlalchemy.ext.declarative import declarative_base
import warnings
from graff import config
from graff import crypto
from graff import geo
if config.get('memory', True):
engine = create_engine('sqlite:///:memory:')
now = func.datetime()
else:
engine = create_engine('mysql+mysqldb://' +
config.get('db_user', 'graff') + ':' +
config.get('db_pass', 'gr4ff') + '@' +
config.get('db_host', '127.0.0.1') + '/' +
config.get('db_schema', 'graff'), pool_recycle=3600)
now = func.now()
Session = sessionmaker(bind=engine)
class _Base(object):
@property
def encid(self):
if hasattr(self, 'secret_key'):
return crypto.encid(self.id, self.secret_key)
else:
return crypto.encid(self.id)
@classmethod
def create(cls, session, **kw):
obj = cls(**kw)
session.add(obj)
return obj
@classmethod
def by_id(cls, session, row_id):
return session.query(cls).filter(cls.id == row_id).first()
@classmethod
def from_encid(cls, session, encid):
if hasattr(cls, 'secret_key'):
row_id = crypto.decid(encid, cls.secret_key)
else:
row_id = crypto.decid(encid)
return cls.by_id(session, row_id)
@classmethod
def most_recent(cls, session, limit):
return session.query(cls).order_by(cls.id.desc()).limit(limit)
Base = declarative_base(cls=_Base)
GEOHASH_PRECISION = 12
class Photo(Base):
__tablename__ = 'photo'
id = Column(Integer, primary_key=True)
body_hash = Column(String(40), nullable=False)
content_type = Column(String(64), nullable=False)
fsid = Column(String(32), nullable=False)
latitude = Column(Float)
longitude = Column(Float)
geohash = Column(String(GEOHASH_PRECISION))
make = Column(String(128))
model = Column(String(128))
photo_time = Column(DateTime, nullable=False, default=now)
photo_height = Column(Integer, nullable=False)
photo_width = Column(Integer, nullable=False)
remote_ip = Column(Integer, nullable=False)
sensor = Column(Boolean)
time_created = Column(DateTime, nullable=False, default=now)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship('User', backref=backref('photos', order_by=id))
@property
def time_ago(self):
delta = datetime.datetime.now() - self.time_created
if delta < datetime.timedelta(seconds=30):
return 'a moment ago'
elif delta < datetime.timedelta(seconds=120):
return '1 minute ago'
elif delta < datetime.timedelta(seconds=59 * 60):
return '%d minutes ago' % (int(delta.total_seconds() / 60.0),)
elif delta < datetime.timedelta(seconds=120 * 60):
return '1 hour ago'
elif delta < datetime.timedelta(seconds=24 * 60 * 60):
return '%d hours ago' % (int(delta.total_seconds() / 3600.0),)
elif delta < datetime.timedelta(seconds=2 * 86400):
return '1 day ago'
else:
return '%d days ago' % (int(delta.total_seconds() / 84600.0),)
@classmethod
def get_nearby(cls, session, limit=None, user=None, bounds=None):
"""Get all of the photos in an area (possibly unbounded). Results are
returned in descending order of age (i.e. newest photos first).
"""
assert limit is not None
q = session.query(cls)
if bounds:
hashcode = geo.get_bounding_geohash(bounds['n'], bounds['w'], bounds['s'], bounds['e'])
q.filter(cls.geohash.like(hashcode + '%')).filter(cls.latitude <= bounds['n']).filter(cls.latitude >= bounds['s']).filter(cls.longitude >= bounds['w']).filter(cls.longitude <= bounds['e'])
if user:
u = User.by_name(session, user)
q.filter(cls.user_id == u.id)
return q.order_by(cls.time_created.desc()).limit(limit)
def to_json(self):
return {
'id': self.encid,
'latitude': self.latitude,
'longitude': self.longitude,
'time_ago': self.time_ago,
'time_created': int(self.time_created.strftime('%s')),
'user': self.user.name if self.user_id else None
}
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
pw_hash = Column(String(56), nullable=False)
email = Column(String)
location = Column(String)
signup_ip = Column(Integer, nullable=False)
login_ip = Column(Integer, nullable=False)
time_created = Column(DateTime, nullable=False, default=now)
@classmethod
def create(cls, session, **kwargs):
if session.query(cls).filter(cls.name == kwargs['name']).first() is not None:
return 'That username has already been taken'
if kwargs['email'] and session.query(cls).filter(cls.email == kwargs['email']).first() is not None:
return 'That email has already been registered'
with open('/dev/random', 'rb') as devrandom:
salt = devrandom.read(8)
hashval = hashlib.sha1(salt + kwargs.pop('password').encode('ascii')).digest()
kwargs['pw_hash'] = (salt + hashval).encode('hex')
kwargs['signup_ip'] = kwargs['login_ip'] = kwargs.pop('remote_ip')
return super(User, cls).create(session, **kwargs)
@classmethod
def authenticate(cls, session, name, password, remote_ip):
row = session.query(cls).filter(cls.name == name).first()
if row is None:
return None
row_hash = row.pw_hash.decode('hex')
if hashlib.sha1(str(row_hash[:8]) + password.encode('ascii')).digest() == row_hash[8:]:
row.login_ip = remote_ip
return row
return None
@classmethod
def by_name(cls, session, name):
return session.query(cls).filter(cls.name == name).first()
# set up encryption keys
g = globals()
crypto_keys = set()
for k, v in g.items():
find_key = False
try:
if issubclass(v, Base) and v is not Base:
find_key = True
except TypeError:
continue
if find_key:
if 'secret_key' in v.__dict__:
warnings.warn('static key set for %s' % (v,))
elif config.get('key_' + v.__name__) is not None:
v.secret_key = config.get('key_' + v.__name__)
elif config.get('memory'):
v.secret_key = os.urandom(16)
else:
v.secret_key = '?' * 16
if v.secret_key in crypto_keys:
warnings.warn('re-using crypto key for %s' % (v,))
crypto_keys.add(v.secret_key)
del crypto_keys
del g
if config.get('memory', True):
Base.metadata.create_all(engine)
| [
"evan@eklitzke.org"
] | evan@eklitzke.org |
8c49e8ae73abdd7a66789562176f376394c27e17 | 64f9f39485900853d64d1f727a80e097a6836053 | /dabi/dabi/items.py | 71177c9d7eedfa5c63ee957c1921f527c8b53231 | [] | no_license | timbortnik/dabi-scraper | 9f09e6f2e639b388106725f2a883b15fb7d7eb21 | 4262714b9ac62359b938b417abafae62cb76cfe3 | refs/heads/master | 2021-04-03T07:41:28.409473 | 2018-03-08T20:28:39 | 2018-03-08T20:28:39 | 124,365,911 | 0 | 0 | null | 2018-03-08T16:28:37 | 2018-03-08T09:07:56 | Python | UTF-8 | Python | false | false | 251 | py | from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import TakeFirst, MapCompose
class TakeFirstItemLoader(ItemLoader):
default_output_processor = TakeFirst()
default_input_processor = MapCompose(unicode.strip)
| [
"dchaplinsky@conversionscience.co.uk"
] | dchaplinsky@conversionscience.co.uk |
e9d28e0e53301a24a36450014edba54f0c72b8ff | e53aa5ebfff14c484942cb6ae57db98c15000ee7 | /test/integration/ggrc/services/test_collection_post.py | 6b2579c1484b4853f4b43db50c36186275fe8969 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dondublon/ggrc-core | bde266be2b8918afb85e7f659a561e63f49bd748 | ea8258f0eb58a4b04f8c7b85c9ab9ae1e87cd228 | refs/heads/release/0.10-Raspberry | 2021-01-21T18:21:53.202351 | 2017-05-21T21:55:22 | 2017-05-21T21:55:22 | 92,039,199 | 0 | 0 | null | 2017-05-22T10:03:43 | 2017-05-22T10:03:43 | null | UTF-8 | Python | false | false | 7,377 | py | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for collection post service."""
import json
from ggrc import db
from ggrc import models
from integration.ggrc.services import TestCase
class TestCollectionPost(TestCase):
"""Test for collection post api calls."""
@staticmethod
def get_location(response):
"""Ignore the `http://localhost` prefix of the Location"""
return response.headers['Location'][16:]
@staticmethod
def headers(*args, **kwargs):
"""Get request headers."""
ret = list(args)
ret.append(('X-Requested-By', 'Unit Tests'))
ret.extend(kwargs.items())
return ret
def test_collection_post_successful(self):
"""Test normal successful collection post call."""
data = json.dumps(
{'services_test_mock_model': {'foo': 'bar', 'context': None}})
self.client.get("/login")
response = self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assertStatus(response, 201)
self.assertIn('Location', response.headers)
response = self.client.get(
self.get_location(response), headers=self.headers())
self.assert200(response)
self.assertIn('Content-Type', response.headers)
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertIn('services_test_mock_model', response.json)
self.assertIn('foo', response.json['services_test_mock_model'])
self.assertEqual('bar', response.json['services_test_mock_model']['foo'])
# check the collection, too
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
self.assertEqual(
1, len(response.json['test_model_collection']['test_model']))
self.assertEqual(
'bar', response.json['test_model_collection']['test_model'][0]['foo'])
def test_successful_single_array(self):
"""Test collection post successful single array."""
data = json.dumps(
[{'services_test_mock_model': {'foo': 'bar', 'context': None}}])
self.client.get("/login")
response = self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
self.assertEqual(type(response.json), list)
self.assertEqual(len(response.json), 1)
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
self.assertEqual(
1, len(response.json['test_model_collection']['test_model']))
self.assertEqual(
'bar', response.json['test_model_collection']['test_model'][0]['foo'])
def test_successful_multiple(self):
"""Test collection post successful multiple."""
data = json.dumps([
{'services_test_mock_model': {'foo': 'bar1', 'context': None}},
{'services_test_mock_model': {'foo': 'bar2', 'context': None}},
])
self.client.get("/login")
response = self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
self.assertEqual(type(response.json), list)
self.assertEqual(len(response.json), 2)
self.assertEqual(
'bar1', response.json[0][1]['services_test_mock_model']['foo'])
self.assertEqual(
'bar2', response.json[1][1]['services_test_mock_model']['foo'])
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
self.assertEqual(
2, len(response.json['test_model_collection']['test_model']))
def test_multiple_with_errors(self):
"""Test collection post successful multiple with errors."""
data = json.dumps([
{'services_test_mock_model':
{'foo': 'bar1', 'code': 'f1', 'context': None}},
{'services_test_mock_model':
{'foo': 'bar1', 'code': 'f1', 'context': None}},
{'services_test_mock_model':
{'foo': 'bar2', 'code': 'f2', 'context': None}},
{'services_test_mock_model':
{'foo': 'bar2', 'code': 'f2', 'context': None}},
])
self.client.get("/login")
response = self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assertEqual(400, response.status_code)
self.assertEqual([400], [i[0] for i in response.json])
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
self.assertEqual(
0, len(response.json['test_model_collection']['test_model']))
def test_post_bad_request(self):
"""Test collection post with invalid content."""
response = self.client.post(
self.mock_url(),
content_type='application/json',
data='This is most definitely not valid content.',
headers=self.headers(),
)
self.assert400(response)
def test_bad_content_type(self):
"""Test post with bad content type."""
response = self.client.post(
self.mock_url(),
content_type='text/plain',
data="Doesn't matter, now does it?",
headers=self.headers(),
)
self.assertStatus(response, 415)
def test_post_relationship(self):
"""Test integrity error on relationship collection post.
Posting duplicate relationships should have a mechanism for removing
duplicates from the post request and fixing unique integrity errors.
"""
db.session.add(models.Policy(id=144, title="hello"))
db.session.add(models.Policy(id=233, title="world"))
db.session.add(models.Policy(id=377, title="bye"))
db.session.commit()
self.client.get("/login")
data = json.dumps([{
"relationship": {
"source": {"id": 144, "type": "Policy"},
"destination": {"id": 233, "type": "Policy"},
"context": None,
},
}])
response = self.client.post(
"/api/relationships",
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
relationships = models.Relationship.eager_query().all()
self.assertEqual(len(relationships), 1)
rel1 = relationships[0]
self.assertEqual({144, 233}, {rel1.source.id, rel1.destination.id})
data = json.dumps([{
"relationship": { # this should be ignored
"source": {"id": 144, "type": "Policy"},
"destination": {"id": 233, "type": "Policy"},
"context": None,
},
}, {
"relationship": {
"source": {"id": 377, "type": "Policy"},
"destination": {"id": 144, "type": "Policy"},
"context": None,
},
}, {
"relationship": { # Refactored api will ignore this
"source": {"id": 144, "type": "Policy"},
"destination": {"id": 377, "type": "Policy"},
"context": None,
},
}])
response = self.client.post(
"/api/relationships",
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
relationships = models.Relationship.eager_query().all()
self.assertEqual(len(relationships), 3) # This should be 2
rel1 = relationships[0]
| [
"zidarsk8@gmail.com"
] | zidarsk8@gmail.com |
bec70bf0707e4fecdfb9fa574828afe8dfd98e98 | 42685605f569e9d0afadc358ace6ce212e86bf1c | /1_Zadania/Dzien_3/nowy/nowy/bin/python-config | 975871648d91ff51ce0d1b9a103b700bf84f288c | [] | no_license | Danutelka/Coderslab-Podstawy-Python | e6bdbbd9dc2031cf9ec5d9d3eeba717d22e9ecd7 | eed6a957f081e488ae3c94298718f7b93e17a93c | refs/heads/master | 2020-08-04T15:21:28.058433 | 2019-04-07T19:24:54 | 2019-04-07T19:24:54 | 212,181,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | #!/home/danuta/KRA_PYT_W_03_Podstawy_Python/1_Zadania/Dzien_3/nowy/nowy/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"kawecka.d@gmail.com"
] | kawecka.d@gmail.com | |
2dadad188a8092ba4e9d3982cd96b444bb4ea81e | 3e5fc180f90d63bcff54eab1cea310f3a2bbea10 | /manage.py | d4baaecbe6653ea8773bc39d8cb8bc5c57dddba2 | [] | no_license | eht16/dpaste.de | fbb9aa24c0fdf8373350d1d1f162f7bd596e60a6 | 2b9797ae125808567cfdcac9292a3d5f58d42aab | refs/heads/master | 2021-01-17T03:42:55.691283 | 2013-03-31T09:20:01 | 2013-03-31T09:20:01 | 2,736,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #!/usr/bin/env python
import os
import sys
from django.core.management import execute_from_command_line
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pastebin.conf.local.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"enrico.troeger@uvena.de"
] | enrico.troeger@uvena.de |
b26774f0589d15eb68e5fa29c1c34e7a62e3d687 | 43a07c7e2b7f46e95e4693afa11ddbcce195c262 | /yatra/urls.py | 1320df4d2f2937954e47acdf2295fde68c91fb2a | [] | no_license | ashokkuikel/yatra | 217ae9432852e164cec12c7a3aca55440cebfb91 | 3a1a5d7df6f754a1d5de1b07172f28496cd4beec | refs/heads/master | 2020-03-07T00:53:40.816789 | 2016-07-03T03:16:33 | 2016-07-03T03:16:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | """yatra URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from tour.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', HomeView.as_view(), name="home"),
url(r'^logout/$', LogoutView.as_view(), name="logout"),
url(r'^dashboard/$', DashboardView.as_view(), name="dashboard"),
url(r'^search/$', SearchView.as_view(), name="search"),
url(r'^plan/(?P<pk>\d+)/$', PlanView.as_view(), name="plan"),
url(r'^visualize/$', VisualizeView.as_view(), name="visualize"),
]
| [
"bibekdahal.bd16@gmail.com"
] | bibekdahal.bd16@gmail.com |
ad7d72c705124fc2255009e05d46d590e1a297f0 | b12bcee65e72940f436c7b1b44f18e0312dd333d | /python/pyspark/sql/tests/connect/test_parity_functions.py | fa3388d43e115612939a71cc6c83084ab47c32c2 | [
"CC0-1.0",
"MIT",
"Python-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception... | permissive | jalpan-randeri/spark | 30804d28e35ff959b49588c74e24e9acab66269b | 5a762687f416a11a3a6211bc6d90225503295485 | refs/heads/master | 2023-01-08T05:03:10.919327 | 2022-12-23T23:55:40 | 2022-12-23T23:55:40 | 221,611,476 | 0 | 0 | Apache-2.0 | 2019-11-14T04:28:12 | 2019-11-14T04:28:09 | null | UTF-8 | Python | false | false | 8,209 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import os
from pyspark.sql import SparkSession
from pyspark.sql.tests.test_functions import FunctionsTestsMixin
from pyspark.testing.connectutils import should_test_connect, connect_requirement_message
from pyspark.testing.sqlutils import ReusedSQLTestCase
@unittest.skipIf(not should_test_connect, connect_requirement_message)
class FunctionsParityTests(ReusedSQLTestCase, FunctionsTestsMixin):
@classmethod
def setUpClass(cls):
from pyspark.sql.connect.session import SparkSession as RemoteSparkSession
super(FunctionsParityTests, cls).setUpClass()
cls._spark = cls.spark # Assign existing Spark session to run the server
# Sets the remote address. Now, we create a remote Spark Session.
# Note that this is only allowed in testing.
os.environ["SPARK_REMOTE"] = "sc://localhost"
cls.spark = SparkSession.builder.remote("sc://localhost").getOrCreate()
assert isinstance(cls.spark, RemoteSparkSession)
@classmethod
def tearDownClass(cls):
# TODO(SPARK-41529): Implement stop in RemoteSparkSession.
# Stop the regular Spark session (server) too.
cls.spark = cls._spark
super(FunctionsParityTests, cls).tearDownClass()
del os.environ["SPARK_REMOTE"]
@unittest.skip("Fails in Spark Connect, should enable.")
def test_add_months_function(self):
super().test_add_months_function()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_array_repeat(self):
super().test_array_repeat()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_assert_true(self):
super().test_assert_true()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_basic_functions(self):
super().test_basic_functions()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_between_function(self):
super().test_between_function()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_date_add_function(self):
super().test_date_add_function()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_date_sub_function(self):
super().test_date_sub_function()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_datetime_functions(self):
super().test_datetime_functions()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_dayofweek(self):
super().test_dayofweek()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_explode(self):
super().test_explode()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_expr(self):
super().test_expr()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_first_last_ignorenulls(self):
super().test_first_last_ignorenulls()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_function_parity(self):
super().test_function_parity()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_functions_broadcast(self):
super().test_functions_broadcast()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_inline(self):
super().test_inline()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_input_file_name_reset_for_rdd(self):
super().test_input_file_name_reset_for_rdd()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_input_file_name_udf(self):
super().test_input_file_name_udf()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_inverse_trig_functions(self):
super().test_inverse_trig_functions()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_lit_list(self):
super().test_lit_list()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_lit_np_scalar(self):
super().test_lit_np_scalar()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_map_functions(self):
super().test_map_functions()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_math_functions(self):
super().test_math_functions()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_ndarray_input(self):
super().test_ndarray_input()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_nested_higher_order_function(self):
super().test_nested_higher_order_function()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_np_scalar_input(self):
super().test_np_scalar_input()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_nth_value(self):
super().test_nth_value()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_overlay(self):
super().test_overlay()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_percentile_approx(self):
super().test_percentile_approx()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_raise_error(self):
super().test_raise_error()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_slice(self):
super().test_slice()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_sorting_functions_with_column(self):
super().test_sorting_functions_with_column()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_window_functions(self):
super().test_window_functions()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_window_functions_cumulative_sum(self):
super().test_window_functions_cumulative_sum()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_window_functions_without_partitionBy(self):
super().test_window_functions_without_partitionBy()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_window_time(self):
super().test_window_time()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_corr(self):
super().test_corr()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_cov(self):
super().test_cov()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_crosstab(self):
super().test_crosstab()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_rand_functions(self):
super().test_rand_functions()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_reciprocal_trig_functions(self):
super().test_reciprocal_trig_functions()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_sampleby(self):
super().test_sampleby()
@unittest.skip("Fails in Spark Connect, should enable.")
def test_approxQuantile(self):
super().test_approxQuantile()
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.connect.test_parity_functions import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| [
"gurwls223@apache.org"
] | gurwls223@apache.org |
d810b2f8cd0199f8b3c9e947cef421163bf2a574 | 5a142fb7312fedd4a0386247337b0188112b426e | /project2/face-reco/reconnaissance avec sqlite/trainner.py | 4ceedbed3e17fbc06e80d1f7ae6c1d751ae9b94a | [] | no_license | rolandus10/Projet_miroir | aece30aabe543d0165554476938c728a4e341af1 | 71cbca1fe2a0863b108f045f2f6f84e5eda45705 | refs/heads/master | 2020-04-25T16:54:23.361658 | 2019-05-10T14:56:19 | 2019-05-10T14:56:19 | 172,928,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | import os
import cv2
import numpy as np
from PIL import Image
recognizer= cv2.face.createLBPHFaceRecognizer()
path='dataSet'
def getImages_And_ID (path):
''' crée une liste avec les chemin relatif des différentes images '''
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imPath in imagePaths:
#faceImg est une image au format PIL ==> on doit la convertir en numpy car cv2 ne travail qu'avec des format numpy
faceImg=Image.open(imPath).convert('L')
faceNp=np.array(faceImg,'uint8')
id=int(os.path.split(imPath)[-1].split('.')[1])
# on rempli les listes
faces.append(faceNp)
IDs.append(id)
cv2.imshow("training",faceNp)
cv2.waitKey(10)
return np.array(IDs),faces
IDs,faces=getImages_And_ID (path)
recognizer.train(faces,IDs)
# le fichier recognizer doit exister!
recognizer.save('recognizer/trainingData.yml')
cv2.destroyAllWindows()
os.system("pause")
| [
"pi@raspberrypi"
] | pi@raspberrypi |
cd1abe48746f3e5e7806bdad54ba17455c93c632 | 9a206d604ea4bb976c35e8ea2a20abc20e2086aa | /scripts/RNN_217.py | 10290816a41409706b0eb01e4986b0b5a5f2d296 | [
"MIT"
] | permissive | ShepherdCode/BuildingEnergy | 24c9e35bc26a9fba2dc5aa697fd8d0f5a6e46051 | a2b5a260fed1a0adb57ffe373d3971099b1db66b | refs/heads/main | 2023-06-04T02:50:57.513211 | 2021-05-06T11:11:00 | 2021-05-06T11:11:00 | 349,497,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,487 | py | #!/usr/bin/env python
# coding: utf-8
# # RNN
# Compare to RNN_218. No smoothing. Predictors = hour-of-day, day-of-year. Given 12 hrs, predict 12 hr starting 24 hr ahead. Train on year 1, test on year 2.
#
# In[1]:
from os import listdir
import csv
from zipfile import ZipFile
import numpy as np
import pandas as pd
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import SimpleRNN
from keras.layers import LSTM
from keras.layers import GRU
from keras.layers import TimeDistributed
from keras.layers import Dense
from keras.losses import MeanSquaredError
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
# In[2]:
# Constants
EPOCHS=50 # use 5 for software testing, 50 for model testing
SITE = 'Eagle'
PREDICTORS = ['hour','month','doy','meter','cloudCoverage', 'airTemperature', 'dewTemperature', 'precipDepth1HR', 'precipDepth6HR', 'seaLvlPressure', 'windDirection', 'windSpeed']
PREDICTORS = ['hour','doy'] # short list for testing
NUM_PREDICTORS=len(PREDICTORS)
print("PREDICTORS=",NUM_PREDICTORS,PREDICTORS)
PREDICTED_VARIABLE = 'meter'
STEPS_HISTORY = 24
STEPS_FORWARD = 12
STEPS_FUTURE = 12
METER_FILE='steam.csv'
WEATHER_FILE='weather.csv'
EXAMPLE='Eagle_lodging_Edgardo'
SITE_BUILDINGS = None
# In[3]:
DATAPATH=''
try:
# On Google Drive, set path to my drive / data directory.
from google.colab import drive
IN_COLAB = True
PATH='/content/drive/'
drive.mount(PATH)
DATAPATH=PATH+'My Drive/data/' # must end in "/"
except:
# On home computer, set path to local data directory.
IN_COLAB = False
DATAPATH='data/' # must end in "/"
ZIP_FILE='BuildingData.zip'
ZIP_PATH = DATAPATH+ZIP_FILE
MODEL_FILE='Model' # will be used later to save models
# In[4]:
def read_zip_to_panda(zip_filename,csv_filename):
zip_handle = ZipFile(zip_filename)
csv_handle = zip_handle.open(csv_filename)
panda = pd.read_csv(csv_handle)
return panda
def fix_date_type(panda):
# Convert the given timestamp column to the pandas datetime data type.
panda['timestamp'] = pd.to_datetime(panda['timestamp'], infer_datetime_format = True)
indexed = panda.set_index(['timestamp'])
return indexed
# In[5]:
def load_weather_for_site(site):
wet_df = read_zip_to_panda(ZIP_PATH,WEATHER_FILE)
wet_df = fix_date_type(wet_df)
site_df = wet_df.loc[wet_df['site_id'] == site]
# Drop the site, which is constant (we selected for one site).
site_df = site_df.drop(['site_id'],axis=1)
site_df.insert(0,'hour',0)
site_df.insert(1,'month',0)
site_df.insert(2,'doy',0)
L=len(site_df)
for i in range(0,L):
dt=site_df.index[i]
hour=dt.hour
month=dt.month
doy=dt.dayofyear
site_df.iat[i,0] = hour
site_df.iat[i,1] = month
site_df.iat[i,2] = doy
return site_df
one_site_weather = load_weather_for_site(SITE)
one_site_weather.tail()
# In[6]:
def load_meter_for_building(bldg):
all_df = read_zip_to_panda(ZIP_PATH,METER_FILE)
all_df = fix_date_type(all_df)
global SITE_BUILDINGS
SITE_BUILDINGS = [x for x in all_df.columns if x.startswith(SITE)]
site_series = all_df[bldg]
site_df = site_series.to_frame()
#site_df = all_df.loc[all_df['site_id'] == site]
# Change column name from building name to meter.
site_df = site_df.rename(columns={bldg : PREDICTED_VARIABLE})
return site_df
one_bldg_meter = load_meter_for_building(EXAMPLE)
print(type(one_bldg_meter))
one_bldg_meter.tail()
# In[7]:
def prepare_for_learning(wdf,mdf):
# Concatenate weather and meter.
df = pd.concat([wdf,mdf],axis=1)
num_samples = len(df) - STEPS_FUTURE - STEPS_HISTORY
X_shape = (num_samples,STEPS_FUTURE,NUM_PREDICTORS)
Y_shape = (num_samples,STEPS_FUTURE)
X=np.zeros(X_shape)
y=np.zeros(Y_shape)
predictor_series = df[PREDICTORS].values # selected features
predicted_series = df[PREDICTED_VARIABLE].values # meter
# TO DO: can we take predicted from mdf instead?
for sam in range (0,num_samples):
prev_val = 0
one_sample = predictor_series[sam:sam+STEPS_FORWARD]
for time in range (0,STEPS_FORWARD):
one_period = one_sample[time]
for feat in range (0,NUM_PREDICTORS):
val = one_period[feat]
if np.isnan(val):
val = prev_val
else:
prev_val = val
X[sam,time,feat] = val
for time in range (0,STEPS_FUTURE):
y[sam,time]=predicted_series[sam+STEPS_HISTORY+time]
return X,y
X,y = prepare_for_learning(one_site_weather,one_bldg_meter)
print("X shape:",X.shape)
print("y shape:",y.shape)
# In[8]:
print("X columns:",PREDICTORS)
print("X example:\n",X[100].astype(int))
print("y example:\n",y[100].astype(int))
# In[9]:
def make_RNN():
# The GRU in Keras is optimized for speed on CoLab GPU.
rnn = Sequential([
GRU(16,return_sequences=True,
input_shape=(STEPS_FORWARD,NUM_PREDICTORS)),
GRU(16,return_sequences=True),
GRU(16,return_sequences=False),
Dense(STEPS_FUTURE)
])
rnn.compile(optimizer='adam',loss=MeanSquaredError())
return rnn
# In[12]:
cors = []
one_site_weather = load_weather_for_site(SITE)
for BLDG in SITE_BUILDINGS:
print("Building",BLDG)
one_bldg_meter = load_meter_for_building(BLDG)
count_bad = one_bldg_meter[PREDICTED_VARIABLE].isna().sum()
MAX_BAD = 500
if count_bad<=MAX_BAD:
# Must get rid of Nan labels, else loss hits NaN during training.
print(" Count bad values before:",count_bad)
pseudovalue = one_bldg_meter[PREDICTED_VARIABLE].mean()
one_bldg_meter = one_bldg_meter.fillna(pseudovalue)
count_bad = one_bldg_meter[PREDICTED_VARIABLE].isna().sum()
print(" Count bad values after:",count_bad)
#
X,y = prepare_for_learning(one_site_weather,one_bldg_meter)
split = len(X)//2 # year 1 vs year 2
X_train = np.asarray(X[0:split])
y_train = np.asarray(y[0:split])
X_test = np.asarray(X[split:])
y_test = np.asarray(y[split:])
model = make_RNN()
print(model.summary())
#print("Example X train:\n",X_train[example].astype(int))
example=411
print("Example y train:\n",y_train[example].astype(int))
model.fit(X_train,y_train,epochs=EPOCHS)
# Keep a table for reporting later.
y_pred = model.predict(X_test)
rmse = mean_squared_error(y_test,y_pred,squared=False)
mean = one_bldg_meter[PREDICTED_VARIABLE].mean()
cors.append([mean,rmse,rmse/mean,BLDG])
print("mean,rmse,rmse/mean,bldg:",mean,rmse,rmse/mean,BLDG)
for hr in range(0,24,2):
print("Example prediction:\n",hr,y_pred[example+hr].astype(int))
print()
print("History",STEPS_HISTORY,"Future",STEPS_FUTURE)
print("Column 1: Mean usage.")
print("Column 2: RMSE of LinearRegression(X=Weather, y=Usage).")
print("Column 3: RMSE/mean normalized to help understand RMSE.")
print("Column 4: Building.")
for cor in sorted(cors):
print("%10.2f %10.2f %5.2f %s"%(cor[0],cor[1],cor[2],cor[3]))
# In[12]:
# In[12]:
| [
"jmill02@shepherd.edu"
] | jmill02@shepherd.edu |
ebac8572ad03da254a425a4a75b0e61974e3b761 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03385/s603024800.py | 7a526ca41c02972a2a71f7357059eef123871f20 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | s = input()
ans = True
if "a" not in s:
ans = False
if "b" not in s:
ans = False
if "c" not in s:
ans = False
if ans:
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
973c0bedba67067c54281cdc095a2919b8cf0883 | bd696223aaf5404987df11832b4c17c916b9690f | /nlp_sample/deep_zero_nlp/ch07/train_better_seq2seq.py | 50920b13d185794e98201df4a04b802eb0f7c886 | [] | no_license | wararaki718/scrapbox3 | 000a285477f25c1e8a4b6017b6ad06c76f173342 | 9be5dc879a33a1988d9f6611307c499eec125dc2 | refs/heads/master | 2023-06-16T08:46:32.879231 | 2021-07-17T14:12:54 | 2021-07-17T14:12:54 | 280,590,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | import numpy as np
from dataset import sequence
from optimizer import Adam
from trainer import Trainer
from util import eval_seq2seq
from peeky_seq2seq import PeekySeq2seq
def main() -> None:
(x_train, t_train), (x_test, t_test) = sequence.load_data('addition.txt')
x_train, x_test = x_train[:, ::-1], x_test[:, ::-1]
char_to_id, id_to_char = sequence.get_vocab()
vocab_size = len(char_to_id)
wordvec_size = 16
hidden_size = 128
batch_size = 128
max_epoch = 25
max_grad = 5.0
model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size)
optimizer = Adam()
trainer = Trainer(model, optimizer)
acc_list = []
for epoch in range(1, max_epoch+1):
trainer.fit(x_train, t_train, max_epoch=1, batch_size=batch_size, max_grad=max_grad)
correct_num = 0
for i in range(len(x_test)):
question, correct = x_test[[i]], t_test[[i]]
verbose = i < 10
correct_num += eval_seq2seq(model, question, correct, id_to_char, verbose)
acc = float(correct_num) / len(x_test)
acc_list.append(acc)
print(f'val acc {acc*100}%')
print('DONE')
if __name__ == '__main__':
main()
| [
"ky7.ott.w@gmail.com"
] | ky7.ott.w@gmail.com |
fa413f4d8bb5301fad59fb5a048130fde9b3e8b9 | bfa44aa7f6a54a9b60c5ff545897787b15e13c61 | /webcart/users/signals.py | 65e2687c2a7bb1d6aabb4b8ac6bc39735bdb8bb6 | [] | no_license | BabGee/foodcart | e26ef7dbb19df9019c45c623d39e6612f56c8e27 | 30c213007313349af20e6efd650e1c71696ee14a | refs/heads/master | 2022-11-27T12:24:11.279614 | 2020-02-22T10:41:41 | 2020-02-22T10:41:41 | 223,042,865 | 1 | 0 | null | 2022-11-22T04:51:01 | 2019-11-20T22:56:50 | Tcl | UTF-8 | Python | false | false | 401 | py | from django.contrib.auth.models import User
from django.db.models import post_save
from django.dispatch import receiver
from .models import Profile
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
| [
"you@example.com"
] | you@example.com |
dafcc875b720940b57d1893518cdca38e1fc1bc2 | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/pandas/tests/io/parser/common.py | e85d3ad294655c7424a5049b82708f215de3a092 | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 60,098 | py | # -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
assert np.array_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
assert expected.index.name is None
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
self.read_csv(StringIO(text), verbose=True)
output = sys.stdout.getvalue()
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 3 NA values in column a\n'
# Reset the stdout buffer.
sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
self.read_csv(StringIO(text), verbose=True, index_col=0)
output = sys.stdout.getvalue()
# Engines are verbose in different ways.
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
pytest.skip(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
pytest.raises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
assert expected.A.dtype == 'int64'
assert expected.B.dtype == 'float'
assert expected.C.dtype == 'float'
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
assert df2['Number1'].dtype == float
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = self.read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = self.read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
def test_compact_ints_use_unsigned(self):
# see gh-13323
data = 'a,b,c\n1,9,258'
# sanity check
expected = DataFrame({
'a': np.array([1], dtype=np.int64),
'b': np.array([9], dtype=np.int64),
'c': np.array([258], dtype=np.int64),
})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.int8),
'b': np.array([9], dtype=np.int8),
'c': np.array([258], dtype=np.int16),
})
# default behaviour for 'use_unsigned'
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True)
tm.assert_frame_equal(out, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=False)
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.uint8),
'b': np.array([9], dtype=np.uint8),
'c': np.array([258], dtype=np.uint16),
})
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=True)
tm.assert_frame_equal(out, expected)
def test_compact_ints_as_recarray(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
assert result.dtype == ex_dtype
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
assert result.dtype == ex_dtype
def test_as_recarray(self):
# basic test
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# index_col ignored
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True, index_col=0)
tm.assert_numpy_array_equal(out, expected)
# respects names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = '1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# header order is respected even though it conflicts
# with the natural ordering of the column names
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'b,a\n1,a\n2,b'
expected = np.array([(1, 'a'), (2, 'b')],
dtype=[('b', '=i8'), ('a', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True)
tm.assert_numpy_array_equal(out, expected)
# overrides the squeeze parameter
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a\n1'
expected = np.array([(1,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True, squeeze=True)
tm.assert_numpy_array_equal(out, expected)
# does data conversions before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
conv = lambda x: int(x) + 1
expected = np.array([(2, 'a'), (3, 'b')],
dtype=[('a', '=i8'), ('b', 'O')])
out = self.read_csv(StringIO(data), as_recarray=True,
converters={'a': conv})
tm.assert_numpy_array_equal(out, expected)
# filters by usecols before doing recarray conversion
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
data = 'a,b\n1,a\n2,b'
expected = np.array([(1,), (2,)], dtype=[('a', '=i8')])
out = self.read_csv(StringIO(data), as_recarray=True,
usecols=['a'])
tm.assert_numpy_array_equal(out, expected)
def test_memory_map(self):
mmap_file = os.path.join(self.dirpath, 'test_mmap.csv')
expected = DataFrame({
'a': [1, 2, 3],
'b': ['one', 'two', 'three'],
'c': ['I', 'II', 'III']
})
out = self.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(out, expected)
def test_null_byte_char(self):
# see gh-2741
data = '\x00,foo'
cols = ['a', 'b']
expected = DataFrame([[np.nan, 'foo']],
columns=cols)
if self.engine == 'c':
out = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with tm.assert_raises_regex(ParserError, msg):
self.read_csv(StringIO(data), names=cols)
def test_utf8_bom(self):
# see gh-4793
bom = u('\ufeff')
utf8 = 'utf-8'
def _encode_data_with_bom(_data):
bom_data = (bom + _data).encode(utf8)
return BytesIO(bom_data)
# basic test
data = 'a\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8)
tm.assert_frame_equal(out, expected)
# test with "regular" quoting
data = '"a"\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, quotechar='"')
tm.assert_frame_equal(out, expected)
# test in a data row instead of header
data = 'b\n1'
expected = DataFrame({'a': ['b', '1']})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'])
tm.assert_frame_equal(out, expected)
# test in empty data row with skipping
data = '\n1'
expected = DataFrame({'a': [1]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'],
skip_blank_lines=True)
tm.assert_frame_equal(out, expected)
# test in empty data row without skipping
data = '\n1'
expected = DataFrame({'a': [np.nan, 1.0]})
out = self.read_csv(_encode_data_with_bom(data),
encoding=utf8, names=['a'],
skip_blank_lines=False)
tm.assert_frame_equal(out, expected)
def test_temporary_file(self):
# see gh-13398
data1 = "0 0"
from tempfile import TemporaryFile
new_file = TemporaryFile("w+")
new_file.write(data1)
new_file.flush()
new_file.seek(0)
result = self.read_csv(new_file, sep=r'\s+', header=None)
new_file.close()
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
def test_read_csv_utf_aliases(self):
# see gh issue 13549
expected = pd.DataFrame({'mb_num': [4.8], 'multibyte': ['test']})
for byte in [8, 16]:
for fmt in ['utf-{0}', 'utf_{0}', 'UTF-{0}', 'UTF_{0}']:
encoding = fmt.format(byte)
data = 'mb_num,multibyte\n4.8,test'.encode(encoding)
result = self.read_csv(BytesIO(data), encoding=encoding)
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(self):
# see gh-5500
data = "a,b\n1\x1a,2"
expected = pd.DataFrame([["1\x1a", 2]], columns=['a', 'b'])
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte_to_file(self):
# see gh-16559
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = pd.DataFrame([["test \x1a test", " test"]],
columns=["c1", "c2"])
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
f.write(data)
result = self.read_csv(path)
tm.assert_frame_equal(result, expected)
def test_sub_character(self):
# see gh-16893
dirpath = tm.get_data_path()
filename = os.path.join(dirpath, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
result = self.read_csv(filename)
tm.assert_frame_equal(result, expected)
def test_file_handles(self):
# GH 14418 - don't close user provided file handles
fh = StringIO('a,b\n1,2')
self.read_csv(fh)
assert not fh.closed
with open(self.csv1, 'r') as f:
self.read_csv(f)
assert not f.closed
# mmap not working with python engine
if self.engine != 'python':
import mmap
with open(self.csv1, 'r') as f:
m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
self.read_csv(m)
# closed attribute new in python 3.2
if PY3:
assert not m.closed
m.close()
def test_invalid_file_buffer(self):
# see gh-15337
class InvalidBuffer(object):
pass
msg = "Invalid file path or buffer object type"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(InvalidBuffer())
# gh-16135: we want to ensure that "tell" and "seek"
# aren't actually being used when we call `read_csv`
#
# Thus, while the object may look "invalid" (these
# methods are attributes of the `StringIO` class),
# it is still a valid file-object for our purposes.
class NoSeekTellBuffer(StringIO):
def tell(self):
raise AttributeError("No tell method")
def seek(self, pos, whence=0):
raise AttributeError("No seek method")
data = "a\n1"
expected = pd.DataFrame({"a": [1]})
result = self.read_csv(NoSeekTellBuffer(data))
tm.assert_frame_equal(result, expected)
if PY3:
from unittest import mock
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(mock.Mock())
@tm.capture_stderr
def test_skip_bad_lines(self):
# see gh-15925
data = 'a\n1\n1,2,3\n4\n5,6,7'
with pytest.raises(ParserError):
self.read_csv(StringIO(data))
with pytest.raises(ParserError):
self.read_csv(StringIO(data), error_bad_lines=True)
expected = DataFrame({'a': [1, 4]})
out = self.read_csv(StringIO(data),
error_bad_lines=False,
warn_bad_lines=False)
tm.assert_frame_equal(out, expected)
val = sys.stderr.getvalue()
assert val == ''
# Reset the stderr buffer.
sys.stderr = StringIO()
out = self.read_csv(StringIO(data),
error_bad_lines=False,
warn_bad_lines=True)
tm.assert_frame_equal(out, expected)
val = sys.stderr.getvalue()
assert 'Skipping line 3' in val
assert 'Skipping line 5' in val
| [
"tbutler.github@internetalias.net"
] | tbutler.github@internetalias.net |
768c574e336ec16dd3c35ce7b0d914c8cd847468 | e46e8ae7c95d16ce69bd21159335ab86bc2415cf | /Chapter2 recurrence/all_range.py | 30f51a965f3127acd33384f06420cb5e435672ea | [] | no_license | HuichuanLI/play-with-data-structure-python | 7f15ca1e679c1f2287a147e6472ebcebd96b235f | b36436efc6af4b1865dacc79db5fa7160eab9d6e | refs/heads/master | 2023-05-25T19:08:01.745918 | 2023-05-16T17:30:40 | 2023-05-16T17:30:40 | 210,399,837 | 32 | 8 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | data_list = [1, 2, 3, 4]
arranges = []
total = 0
def search(depth, datas):
if depth == len(data_list) + 1:
print(arranges)
global total
total += 1
else:
for element in datas:
# 1.设置现场
arranges.append(element)
next_datas = datas[:]
next_datas.remove(element)
# 2.递归
search(depth + 1, next_datas)
# 3.恢复现场
arranges.pop()
if __name__ == "__main__":
search(1, data_list)
print("有{}排列方式".format(total))
| [
"lhc14124908@163.com"
] | lhc14124908@163.com |
16991783d0b774c9275a5c191e4b92ce33c85c6d | e79b7882427836346ed6b5fe07506ab6210228c8 | /seq2class/data_input.py | 35006e6f6688c814c51c8c88fa841fffd8d76517 | [] | no_license | feizhihui/standard-logistic-regression | d6ecc03a717dfcac2dd93a78bb5b61c24a755cfa | f54f758f9122e1a0668525a1f395e5aac2d5e0ba | refs/heads/master | 2021-09-04T20:08:54.907030 | 2018-01-22T02:33:02 | 2018-01-22T02:33:02 | 108,926,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | # encoding=utf-8
import numpy as np
class DataMaster(object):
# ==============
def __init__(self, train_mode=True):
if train_mode:
filename = '../Data/ecoli_modifications.gff'
# filename = '../Data/lambda_modifications.gff'
cat_idx, seq_idx = 4, 10
# cat_idx, seq_idx = 2, 8
else:
filename = '../Data/lambda_modifications.gff'
# filename = '../Data/ecoli_modifications.gff'
cat_idx, seq_idx = 2, 8
# cat_idx, seq_idx = 4, 10
with open(filename, 'r') as file:
train_x, train_y, train_c = [], [], []
for row in file.readlines()[4:]:
cols = row.split()
cat, seq = cols[cat_idx], cols[seq_idx].split(";")[1][-41:]
if seq.endswith("N"):
continue
# print(seq, cat)
# assert seq[20] == "A" or seq[20] == "C", "Error:" + seq[20]
train_x.append(self.seq2matrix(seq))
if cat == "modified_base":
train_y.append(0)
else:
train_y.append(1)
train_c.append(cat)
print('Data input completed filename=', filename)
self.datasets = np.array(train_x, dtype=np.float32)
self.datalabels = np.array(train_y, dtype=np.int32)
self.datacat = np.array(train_y, dtype=np.str)
print("availabel data numbers", str(len(self.datalabels)))
if train_mode:
self.pos_idx = (self.datalabels == 1).reshape(-1)
self.neg_idx = (self.datalabels == 0).reshape(-1)
self.datasize = len(self.datalabels[self.pos_idx]) * 2
print("positive data numbers", str(self.datasize // 2))
else:
self.datasize = len(self.datalabels)
# AGCT=>0123
def seq2matrix(self, line):
seq_arr = np.zeros([41])
for j, c in enumerate(line):
if c == 'A':
seq_arr[j] = 0
elif c == 'G':
seq_arr[j] = 1
elif c == 'C':
seq_arr[j] = 2
elif c == 'T':
seq_arr[j] = 3
else:
raise BaseException("Character Exceptin:" + c)
return seq_arr
def shuffle(self):
mark = list(range(self.datasize // 2))
np.random.shuffle(mark)
self.train_x = np.concatenate([self.datasets[self.pos_idx], self.datasets[self.neg_idx][mark]])
self.train_y = np.concatenate([self.datalabels[self.pos_idx], self.datalabels[self.neg_idx][mark]])
self.train_c = np.concatenate([self.datacat[self.pos_idx], self.datacat[self.neg_idx][mark]])
mark = list(range(self.datasize))
np.random.shuffle(mark)
self.train_x = self.train_x[mark]
self.train_y = self.train_y[mark]
self.train_c = self.train_c[mark]
# marks = list(range(len(self.datasets)))
# self.train_x = self.datasets[marks]
# self.train_y = self.datalabels[marks]
if __name__ == '__main__':
DataMaster()
| [
"helloworld@csu.edu.cn"
] | helloworld@csu.edu.cn |
22d2c3c3f83ddb99fb4cac35a591c5d7abff3324 | 60cf5de97160c0c104b447879edd0ea1ca9724e8 | /q9.py | 1c3aa54c1c05b472f9496d5158c1dc41c9d979cc | [] | no_license | VinayHaryan/String | 6f6b7924ab87ac8ea5509edefaa3aeda795b0de0 | 089dcf02a8d26afcae0ac2b23c640be5a6079095 | refs/heads/main | 2023-05-27T22:15:31.792837 | 2021-06-17T08:39:42 | 2021-06-17T08:39:42 | 377,736,749 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | '''
CONVERT A LIST OF CHARACTERS INTO A STRING
Given a list of characters, merge all of them into a string.
Examples:
Input : ['g', 'e', 'e', 'k', 's', 'f', 'o',
'r', 'g', 'e', 'e', 'k', 's']
Output : geeksforgeeks
Input : ['p', 'r', 'o', 'g', 'r', 'a', 'm',
'm', 'i', 'n', 'g']
Output : programming
'''
S = ['g', 'e', 'e', 'k', 's', 'f', 'o', 'r', 'g', 'e', 'e', 'k', 's']
print(''.join(S)) | [
"noreply@github.com"
] | VinayHaryan.noreply@github.com |
d65e3c85419dce845b7906e831452a24b0c0a059 | 75d8667735782cd1d0eb4877e52c89da5cd92dde | /nova/conf/libvirt.py | a5a1598c2af9d257272381e95696a7a45b4b2704 | [
"Apache-2.0"
] | permissive | bopopescu/nova-token | ffecfd3ec561936b7d9d7e691bc57383cde05436 | ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2 | refs/heads/master | 2022-11-22T09:53:31.073483 | 2016-05-14T02:47:01 | 2016-05-15T22:02:55 | 282,105,621 | 0 | 0 | Apache-2.0 | 2020-07-24T02:42:19 | 2020-07-24T02:42:18 | null | UTF-8 | Python | false | false | 32,335 | py | begin_unit
comment|'# Copyright 2016 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'itertools'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'conf'
name|'import'
name|'paths'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
nl|'\n'
comment|'# Downtime period in milliseconds'
nl|'\n'
DECL|variable|LIVE_MIGRATION_DOWNTIME_MIN
name|'LIVE_MIGRATION_DOWNTIME_MIN'
op|'='
number|'100'
newline|'\n'
comment|'# Step count'
nl|'\n'
DECL|variable|LIVE_MIGRATION_DOWNTIME_STEPS_MIN
name|'LIVE_MIGRATION_DOWNTIME_STEPS_MIN'
op|'='
number|'3'
newline|'\n'
comment|'# Delay in seconds'
nl|'\n'
DECL|variable|LIVE_MIGRATION_DOWNTIME_DELAY_MIN
name|'LIVE_MIGRATION_DOWNTIME_DELAY_MIN'
op|'='
number|'10'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_group
name|'libvirt_group'
op|'='
name|'cfg'
op|'.'
name|'OptGroup'
op|'('
string|'"libvirt"'
op|','
nl|'\n'
DECL|variable|title
name|'title'
op|'='
string|'"Libvirt Options"'
op|','
nl|'\n'
name|'help'
op|'='
string|'"""\nLibvirt options allows cloud administrator to configure related\nlibvirt hypervisor driver to be used within an OpenStack deployment.\n"""'
op|')'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_general_opts
name|'libvirt_general_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'rescue_image_id'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Rescue ami image. This will not be used if an image id '"
nl|'\n'
string|"'is provided by the user.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'rescue_kernel_id'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Rescue aki image'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'rescue_ramdisk_id'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Rescue ari image'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'virt_type'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'kvm'"
op|','
nl|'\n'
DECL|variable|choices
name|'choices'
op|'='
op|'('
string|"'kvm'"
op|','
string|"'lxc'"
op|','
string|"'qemu'"
op|','
string|"'uml'"
op|','
string|"'xen'"
op|','
string|"'parallels'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Libvirt domain type'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'connection_uri'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"''"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Override the default libvirt URI '"
nl|'\n'
string|"'(which is dependent on virt_type)'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'inject_password'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Inject the admin password at boot time, '"
nl|'\n'
string|"'without an agent.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'inject_key'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Inject the ssh public key at boot time'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'inject_partition'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
op|'-'
number|'2'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The partition to inject to : '"
nl|'\n'
string|"'-2 => disable, -1 => inspect (libguestfs only), '"
nl|'\n'
string|"'0 => not partitioned, >0 => partition number'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'use_usb_tablet'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'True'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Sync virtual and real mouse cursors in Windows VMs'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'live_migration_inbound_addr'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Live migration target ip or hostname '"
nl|'\n'
string|"'(if this option is set to None, which is the default, '"
nl|'\n'
string|"'the hostname of the migration target '"
nl|'\n'
string|"'compute node will be used)'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'live_migration_uri'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Override the default libvirt live migration target URI '"
nl|'\n'
string|"'(which is dependent on virt_type) '"
nl|'\n'
string|'\'(any included "%s" is replaced with \''
nl|'\n'
string|"'the migration target hostname)'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'live_migration_flag'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '"
nl|'\n'
string|"'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Migration flags to be set for live migration'"
op|','
nl|'\n'
DECL|variable|deprecated_for_removal
name|'deprecated_for_removal'
op|'='
name|'True'
op|','
nl|'\n'
DECL|variable|deprecated_reason
name|'deprecated_reason'
op|'='
string|"'The correct live migration flags can be '"
nl|'\n'
string|"'inferred from the new '"
nl|'\n'
string|"'live_migration_tunnelled config option. '"
nl|'\n'
string|"'live_migration_flag will be removed to '"
nl|'\n'
string|"'avoid potential misconfiguration.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'block_migration_flag'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '"
nl|'\n'
string|"'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '"
nl|'\n'
string|"'VIR_MIGRATE_NON_SHARED_INC'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Migration flags to be set for block migration'"
op|','
nl|'\n'
DECL|variable|deprecated_for_removal
name|'deprecated_for_removal'
op|'='
name|'True'
op|','
nl|'\n'
DECL|variable|deprecated_reason
name|'deprecated_reason'
op|'='
string|"'The correct block migration flags can be '"
nl|'\n'
string|"'inferred from the new '"
nl|'\n'
string|"'live_migration_tunnelled config option. '"
nl|'\n'
string|"'block_migration_flag will be removed to '"
nl|'\n'
string|"'avoid potential misconfiguration.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'live_migration_tunnelled'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Whether to use tunnelled migration, where migration '"
nl|'\n'
string|"'data is transported over the libvirtd connection. If '"
nl|'\n'
string|"'True, we use the VIR_MIGRATE_TUNNELLED migration flag, '"
nl|'\n'
string|"'avoiding the need to configure the network to allow '"
nl|'\n'
string|"'direct hypervisor to hypervisor communication. If '"
nl|'\n'
string|"'False, use the native transport. If not set, Nova '"
nl|'\n'
string|"'will choose a sensible default based on, for example '"
nl|'\n'
string|"'the availability of native encryption support in the '"
nl|'\n'
string|"'hypervisor.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'live_migration_bandwidth'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'0'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Maximum bandwidth(in MiB/s) to be used during migration. '"
nl|'\n'
string|"'If set to 0, will choose a suitable default. Some '"
nl|'\n'
string|"'hypervisors do not support this feature and will return '"
nl|'\n'
string|"'an error if bandwidth is not 0. Please refer to the '"
nl|'\n'
string|"'libvirt documentation for further details'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'live_migration_downtime'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'500'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Maximum permitted downtime, in milliseconds, for live '"
nl|'\n'
string|"'migration switchover. Will be rounded up to a minimum '"
nl|'\n'
string|"'of %dms. Use a large value if guest liveness is '"
nl|'\n'
string|"'unimportant.'"
op|'%'
name|'LIVE_MIGRATION_DOWNTIME_MIN'
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'live_migration_downtime_steps'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'10'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Number of incremental steps to reach max downtime value. '"
nl|'\n'
string|"'Will be rounded up to a minimum of %d steps'"
op|'%'
nl|'\n'
name|'LIVE_MIGRATION_DOWNTIME_STEPS_MIN'
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'live_migration_downtime_delay'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'75'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Time to wait, in seconds, between each step increase '"
nl|'\n'
string|"'of the migration downtime. Minimum delay is %d seconds. '"
nl|'\n'
string|"'Value is per GiB of guest RAM + disk to be transferred, '"
nl|'\n'
string|"'with lower bound of a minimum of 2 GiB per device'"
op|'%'
nl|'\n'
name|'LIVE_MIGRATION_DOWNTIME_DELAY_MIN'
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'live_migration_completion_timeout'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'800'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Time to wait, in seconds, for migration to successfully '"
nl|'\n'
string|"'complete transferring data before aborting the '"
nl|'\n'
string|"'operation. Value is per GiB of guest RAM + disk to be '"
nl|'\n'
string|"'transferred, with lower bound of a minimum of 2 GiB. '"
nl|'\n'
string|"'Should usually be larger than downtime delay * downtime '"
nl|'\n'
string|"'steps. Set to 0 to disable timeouts.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'live_migration_progress_timeout'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'150'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Time to wait, in seconds, for migration to make forward '"
nl|'\n'
string|"'progress in transferring data before aborting the '"
nl|'\n'
string|"'operation. Set to 0 to disable timeouts.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'snapshot_image_format'"
op|','
nl|'\n'
DECL|variable|choices
name|'choices'
op|'='
op|'('
string|"'raw'"
op|','
string|"'qcow2'"
op|','
string|"'vmdk'"
op|','
string|"'vdi'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Snapshot image format. Defaults to same as source image'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'disk_prefix'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Override the default disk prefix for the devices attached'"
nl|'\n'
string|"' to a server, which is dependent on virt_type. '"
nl|'\n'
string|"'(valid options are: sd, xvd, uvd, vd)'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'wait_soft_reboot_seconds'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'120'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Number of seconds to wait for instance to shut down after'"
nl|'\n'
string|"' soft reboot request is made. We fall back to hard reboot'"
nl|'\n'
string|"' if instance does not shutdown within this window.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'cpu_mode'"
op|','
nl|'\n'
DECL|variable|choices
name|'choices'
op|'='
op|'('
string|"'host-model'"
op|','
string|"'host-passthrough'"
op|','
string|"'custom'"
op|','
string|"'none'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|'\'Set to "host-model" to clone the host CPU feature flags; \''
nl|'\n'
string|'\'to "host-passthrough" to use the host CPU model exactly; \''
nl|'\n'
string|'\'to "custom" to use a named CPU model; \''
nl|'\n'
string|'\'to "none" to not set any CPU model. \''
nl|'\n'
string|'\'If virt_type="kvm|qemu", it will default to \''
nl|'\n'
string|'\'"host-model", otherwise it will default to "none"\''
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'cpu_model'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Set to a named libvirt CPU model (see names listed '"
nl|'\n'
string|"'in /usr/share/libvirt/cpu_map.xml). Only has effect if '"
nl|'\n'
string|'\'cpu_mode="custom" and virt_type="kvm|qemu"\''
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'snapshots_directory'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'$instances_path/snapshots'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Location where libvirt driver will store snapshots '"
nl|'\n'
string|"'before uploading them to image service'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'xen_hvmloader_path'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'/usr/lib/xen/boot/hvmloader'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Location where the Xen hvmloader is kept'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'ListOpt'
op|'('
string|"'disk_cachemodes'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
op|'['
op|']'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Specific cachemodes to use for different disk types '"
nl|'\n'
string|"'e.g: file=directsync,block=none'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'rng_dev_path'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'A path to a device that will be used as source of '"
nl|'\n'
string|"'entropy on the host. Permitted options are: '"
nl|'\n'
string|"'/dev/random or /dev/hwrng'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'ListOpt'
op|'('
string|"'hw_machine_type'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'For qemu or KVM guests, set this option to specify '"
nl|'\n'
string|"'a default machine type per host architecture. '"
nl|'\n'
string|"'You can find a list of supported machine types '"
nl|'\n'
string|"'in your environment by checking the output of '"
nl|'\n'
string|'\'the "virsh capabilities"command. The format of the \''
nl|'\n'
string|"'value for this config option is host-arch=machine-type. '"
nl|'\n'
string|"'For example: x86_64=machinetype1,armv7l=machinetype2'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'sysinfo_serial'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'auto'"
op|','
nl|'\n'
DECL|variable|choices
name|'choices'
op|'='
op|'('
string|"'none'"
op|','
string|"'os'"
op|','
string|"'hardware'"
op|','
string|"'auto'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|'\'The data source used to the populate the host "serial" \''
nl|'\n'
string|"'UUID exposed to guest in the virtual BIOS.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'mem_stats_period_seconds'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'10'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'A number of seconds to memory usage statistics period. '"
nl|'\n'
string|"'Zero or negative value mean to disable memory usage '"
nl|'\n'
string|"'statistics.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'ListOpt'
op|'('
string|"'uid_maps'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
op|'['
op|']'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'List of uid targets and ranges.'"
nl|'\n'
string|"'Syntax is guest-uid:host-uid:count'"
nl|'\n'
string|"'Maximum of 5 allowed.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'ListOpt'
op|'('
string|"'gid_maps'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
op|'['
op|']'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'List of guid targets and ranges.'"
nl|'\n'
string|"'Syntax is guest-gid:host-gid:count'"
nl|'\n'
string|"'Maximum of 5 allowed.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'realtime_scheduler_priority'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'1'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'In a realtime host context vCPUs for guest will run in '"
nl|'\n'
string|"'that scheduling priority. Priority depends on the host '"
nl|'\n'
string|"'kernel (usually 1-99)'"
op|')'
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_imagebackend_opts
name|'libvirt_imagebackend_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'images_type'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'default'"
op|','
nl|'\n'
DECL|variable|choices
name|'choices'
op|'='
op|'('
string|"'raw'"
op|','
string|"'qcow2'"
op|','
string|"'lvm'"
op|','
string|"'rbd'"
op|','
string|"'ploop'"
op|','
string|"'default'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'VM Images format. If default is specified, then'"
nl|'\n'
string|"' use_cow_images flag is used instead of this one.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'images_volume_group'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'LVM Volume Group that is used for VM images, when you'"
nl|'\n'
string|"' specify images_type=lvm.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'sparse_logical_volumes'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Create sparse logical volumes (with virtualsize)'"
nl|'\n'
string|"' if this flag is set to True.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'images_rbd_pool'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'rbd'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The RADOS pool in which rbd volumes are stored'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'images_rbd_ceph_conf'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"''"
op|','
comment|'# default determined by librados'
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Path to the ceph configuration file to use'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'hw_disk_discard'"
op|','
nl|'\n'
DECL|variable|choices
name|'choices'
op|'='
op|'('
string|"'ignore'"
op|','
string|"'unmap'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Discard option for nova managed disks. Need'"
nl|'\n'
string|"' Libvirt(1.0.6) Qemu1.5 (raw format) Qemu1.6(qcow2'"
nl|'\n'
string|"' format)'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_imagecache_opts
name|'libvirt_imagecache_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'image_info_filename_pattern'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'$instances_path/$image_cache_subdirectory_name/'"
nl|'\n'
string|"'%(image)s.info'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Allows image information files to be stored in '"
nl|'\n'
string|"'non-standard locations'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'remove_unused_kernels'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'True'
op|','
nl|'\n'
DECL|variable|deprecated_for_removal
name|'deprecated_for_removal'
op|'='
name|'True'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'DEPRECATED: Should unused kernel images be removed? '"
nl|'\n'
string|"'This is only safe to enable if all compute nodes have '"
nl|'\n'
string|"'been updated to support this option (running Grizzly or '"
nl|'\n'
string|"'newer level compute). This will be the default behavior '"
nl|'\n'
string|"'in the 13.0.0 release.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'remove_unused_resized_minimum_age_seconds'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'3600'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Unused resized base images younger than this will not be '"
nl|'\n'
string|"'removed'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'checksum_base_images'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Write a checksum for files in _base to disk'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'checksum_interval_seconds'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'3600'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'How frequently to checksum base images'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_lvm_opts
name|'libvirt_lvm_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'volume_clear'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'zero'"
op|','
nl|'\n'
DECL|variable|choices
name|'choices'
op|'='
op|'('
string|"'none'"
op|','
string|"'zero'"
op|','
string|"'shred'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Method used to wipe old volumes.'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'volume_clear_size'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'0'
op|','
nl|'\n'
name|'help'
op|'='
string|"'Size in MiB to wipe at start of old volumes. 0 => all'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_utils_opts
name|'libvirt_utils_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'snapshot_compression'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Compress snapshot images when possible. This '"
nl|'\n'
string|"'currently applies exclusively to qcow2 images'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_vif_opts
name|'libvirt_vif_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'use_virtio_for_bridges'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'True'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Use virtio for bridge interfaces with KVM/QEMU'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_opts
name|'libvirt_volume_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'ListOpt'
op|'('
string|"'qemu_allowed_storage_drivers'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
op|'['
op|']'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Protocols listed here will be accessed directly '"
nl|'\n'
string|"'from QEMU. Currently supported protocols: [gluster]'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_aoe_opts
name|'libvirt_volume_aoe_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'num_aoe_discover_tries'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'3'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Number of times to rediscover AoE target to find volume'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_glusterfs_opts
name|'libvirt_volume_glusterfs_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'glusterfs_mount_point_base'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'paths'
op|'.'
name|'state_path_def'
op|'('
string|"'mnt'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Directory where the glusterfs volume is mounted on the '"
nl|'\n'
string|"'compute node'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_iscsi_opts
name|'libvirt_volume_iscsi_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'num_iscsi_scan_tries'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'5'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Number of times to rescan iSCSI target to find volume'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'iscsi_use_multipath'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Use multipath connection of the iSCSI or FC volume'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'iscsi_iface'"
op|','
nl|'\n'
DECL|variable|deprecated_name
name|'deprecated_name'
op|'='
string|"'iscsi_transport'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The iSCSI transport iface to use to connect to target in '"
nl|'\n'
string|"'case offload support is desired. Default format is of '"
nl|'\n'
string|"'the form <transport_name>.<hwaddress> where '"
nl|'\n'
string|"'<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, '"
nl|'\n'
string|"'cxgb4i, qla4xxx, ocs) and <hwaddress> is the MAC address '"
nl|'\n'
string|"'of the interface and can be generated via the '"
nl|'\n'
string|"'iscsiadm -m iface command. Do not confuse the '"
nl|'\n'
string|"'iscsi_iface parameter to be provided here with the '"
nl|'\n'
string|"'actual transport name.'"
op|')'
op|','
nl|'\n'
comment|'# iser is also supported, but use LibvirtISERVolumeDriver'
nl|'\n'
comment|'# instead'
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_iser_opts
name|'libvirt_volume_iser_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'IntOpt'
op|'('
string|"'num_iser_scan_tries'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
number|'5'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Number of times to rescan iSER target to find volume'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'BoolOpt'
op|'('
string|"'iser_use_multipath'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Use multipath connection of the iSER volume'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_net_opts
name|'libvirt_volume_net_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'rbd_user'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The RADOS client name for accessing rbd volumes'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'rbd_secret_uuid'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'The libvirt UUID of the secret for the rbd_user'"
nl|'\n'
string|"'volumes'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_nfs_opts
name|'libvirt_volume_nfs_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'nfs_mount_point_base'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'paths'
op|'.'
name|'state_path_def'
op|'('
string|"'mnt'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Directory where the NFS volume is mounted on the'"
nl|'\n'
string|"' compute node'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'nfs_mount_options'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Mount options passed to the NFS client. See section '"
nl|'\n'
string|"'of the nfs man page for details'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_quobyte_opts
name|'libvirt_volume_quobyte_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'quobyte_mount_point_base'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'paths'
op|'.'
name|'state_path_def'
op|'('
string|"'mnt'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Directory where the Quobyte volume is mounted on the '"
nl|'\n'
string|"'compute node'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'quobyte_client_cfg'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Path to a Quobyte Client configuration file.'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_scality_opts
name|'libvirt_volume_scality_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'scality_sofs_config'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Path or URL to Scality SOFS configuration file'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'scality_sofs_mount_point'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'$state_path/scality'"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Base dir where Scality SOFS shall be mounted'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_volume_smbfs_opts
name|'libvirt_volume_smbfs_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'smbfs_mount_point_base'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
name|'paths'
op|'.'
name|'state_path_def'
op|'('
string|"'mnt'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Directory where the SMBFS shares are mounted on the '"
nl|'\n'
string|"'compute node'"
op|')'
op|','
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'smbfs_mount_options'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"''"
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Mount options passed to the SMBFS client. See '"
nl|'\n'
string|"'mount.cifs man page for details. Note that the '"
nl|'\n'
string|"'libvirt-qemu uid and gid must be specified.'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|libvirt_remotefs_opts
name|'libvirt_remotefs_opts'
op|'='
op|'['
nl|'\n'
name|'cfg'
op|'.'
name|'StrOpt'
op|'('
string|"'remote_filesystem_transport'"
op|','
nl|'\n'
DECL|variable|default
name|'default'
op|'='
string|"'ssh'"
op|','
nl|'\n'
DECL|variable|choices
name|'choices'
op|'='
op|'('
string|"'ssh'"
op|','
string|"'rsync'"
op|')'
op|','
nl|'\n'
DECL|variable|help
name|'help'
op|'='
string|"'Use ssh or rsync transport for creating, copying, '"
nl|'\n'
string|"'removing files on the remote host.'"
op|')'
op|','
nl|'\n'
op|']'
newline|'\n'
nl|'\n'
DECL|variable|ALL_OPTS
name|'ALL_OPTS'
op|'='
name|'list'
op|'('
name|'itertools'
op|'.'
name|'chain'
op|'('
nl|'\n'
name|'libvirt_general_opts'
op|','
nl|'\n'
name|'libvirt_imagebackend_opts'
op|','
nl|'\n'
name|'libvirt_imagecache_opts'
op|','
nl|'\n'
name|'libvirt_lvm_opts'
op|','
nl|'\n'
name|'libvirt_utils_opts'
op|','
nl|'\n'
name|'libvirt_vif_opts'
op|','
nl|'\n'
name|'libvirt_volume_opts'
op|','
nl|'\n'
name|'libvirt_volume_aoe_opts'
op|','
nl|'\n'
name|'libvirt_volume_glusterfs_opts'
op|','
nl|'\n'
name|'libvirt_volume_iscsi_opts'
op|','
nl|'\n'
name|'libvirt_volume_iser_opts'
op|','
nl|'\n'
name|'libvirt_volume_net_opts'
op|','
nl|'\n'
name|'libvirt_volume_nfs_opts'
op|','
nl|'\n'
name|'libvirt_volume_quobyte_opts'
op|','
nl|'\n'
name|'libvirt_volume_scality_opts'
op|','
nl|'\n'
name|'libvirt_volume_smbfs_opts'
op|','
nl|'\n'
name|'libvirt_remotefs_opts'
nl|'\n'
op|')'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|register_opts
name|'def'
name|'register_opts'
op|'('
name|'conf'
op|')'
op|':'
newline|'\n'
indent|' '
name|'conf'
op|'.'
name|'register_group'
op|'('
name|'libvirt_group'
op|')'
newline|'\n'
name|'conf'
op|'.'
name|'register_opts'
op|'('
name|'ALL_OPTS'
op|','
name|'group'
op|'='
name|'libvirt_group'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|list_opts
dedent|''
name|'def'
name|'list_opts'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
name|'libvirt_group'
op|':'
name|'ALL_OPTS'
op|'}'
newline|'\n'
dedent|''
endmarker|''
end_unit
| [
"dmg@uvic.ca"
] | dmg@uvic.ca |
892b7d8bf3495d401e44eb57611f36916d1b43a7 | fbfd4efc9f879f90c194aaefe6217a314737483e | /lib/python3.6/site-packages/pytablereader/spreadsheet/excelloader.py | 4c7a6c47870aa80cc986c122a450bdcbc578913c | [] | no_license | EricSchles/pshtt_command | f3519da6def9b42afd7be274f6910be5086a0c6d | a1a01c95aad54fbf124483e3b34cce02ce8eb1d7 | refs/heads/master | 2021-01-20T11:38:32.529084 | 2017-08-28T18:56:34 | 2017-08-28T18:56:34 | 101,676,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,665 | py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import xlrd
from six.moves import range
from .._logger import FileSourceLogger
from .._validator import FileValidator
from ..error import InvalidDataError
from ..error import OpenError
from ..tabledata import TableData
from .core import SpreadSheetLoader
class ExcelTableFileLoader(SpreadSheetLoader):
"""
A file loader class to extract tabular data from Microsoft Excel |TM|
files.
:param str file_path: Path to the loading Excel workbook file.
.. py:attribute:: table_name
Table name string. Defaults to ``%(sheet)s``.
.. py:attribute:: start_row
The first row to search header row.
"""
@property
def format_name(self):
return "excel"
@property
def _sheet_name(self):
return self._worksheet.name
@property
def _row_count(self):
return self._worksheet.nrows
@property
def _col_count(self):
return self._worksheet.ncols
def __init__(self, file_path=None):
super(ExcelTableFileLoader, self).__init__(file_path)
self._validator = FileValidator(file_path)
self._logger = FileSourceLogger(self)
def load(self):
"""
Extract tabular data as |TableData| instances from an Excel file.
|spreadsheet_load_desc|
:return:
Loaded |TableData| iterator.
|TableData| created for each sheet in the workbook.
|load_table_name_desc|
=================== ====================================
Format specifier Value after the replacement
=================== ====================================
``%(filename)s`` Filename of the workbook
``%(sheet)s`` Name of the sheet
``%(format_name)s`` ``"spreadsheet"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ====================================
:rtype: |TableData| iterator
:raises pytablereader.error.InvalidDataError:
If the header row is not found.
:raises pytablereader.error.OpenError:
If failed to open the source file.
"""
self._validate()
self._logger.logging_load()
try:
workbook = xlrd.open_workbook(self.source)
except xlrd.biffh.XLRDError as e:
raise OpenError(e)
for worksheet in workbook.sheets():
self._worksheet = worksheet
if self._is_empty_sheet():
continue
self.__extract_not_empty_col_idx()
try:
start_row_idx = self._get_start_row_idx()
except InvalidDataError:
continue
header_list = self.__get_row_values(start_row_idx)
record_list = [
self.__get_row_values(row_idx)
for row_idx in range(start_row_idx + 1, self._row_count)
]
self.inc_table_count()
yield TableData(
self._make_table_name(), header_list, record_list,
is_strip_quote=True)
def _is_empty_sheet(self):
return any([
self._col_count == 0,
self._row_count <= 1,
# nrows == 1 means exists header row only
])
def _get_start_row_idx(self):
for row_idx in range(self.start_row, self._row_count):
if self.__is_header_row(row_idx):
break
else:
raise InvalidDataError("header row not found")
return row_idx
def __is_header_row(self, row_idx):
cell_type_list = self._worksheet.row_types(
row_idx, self._start_col_idx, self._end_col_idx + 1)
return xlrd.XL_CELL_EMPTY not in cell_type_list
@staticmethod
def __is_empty_cell_type_list(cell_type_list):
return all([
cell_type == xlrd.XL_CELL_EMPTY
for cell_type in cell_type_list
])
def __extract_not_empty_col_idx(self):
col_idx_list = [
col_idx
for col_idx in range(self._col_count)
if not self.__is_empty_cell_type_list(
self._worksheet.col_types(col_idx))
]
self._start_col_idx = min(col_idx_list)
self._end_col_idx = max(col_idx_list)
def __get_row_values(self, row_idx):
return self._worksheet.row_values(
row_idx, self._start_col_idx, self._end_col_idx + 1)
| [
"ericschles@gmail.com"
] | ericschles@gmail.com |
44cc6301ab5207e6ca94e5f9b3f11c3fa2c5d5df | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02661/s571444851.py | 19bb23426d999efd3a0308fcee53bb7a6337cc35 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | N = int(input())
A = []
B = []
for i in range(N):
a, b = list(map(int, input().split()))
A.append(a)
B.append(b)
A = sorted(A)
B = sorted(B)
if N % 2:
ans = B[N//2] - A[N//2] + 1
else:
l = A[N//2-1] + A[N//2]
r = B[N//2-1] + B[N//2]
ans = r-l+1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0885f9deabdbc6da3ce5bc00b3bdbd05693f715c | 76a8ea60480331f0f61aeb61de55be9a6270e733 | /downloadable-site-packages/statsmodels/tsa/arima/estimators/burg.py | 13bff501b0145d03d78b3770b04644aef1090db9 | [
"MIT"
] | permissive | bhagyas/Pyto | cd2ec3f35bec703db4ac29b56d17abc4bf03e375 | 907024a9b3e04a2a9de54976778c0e1a56b7b83c | refs/heads/master | 2022-11-19T13:05:07.392454 | 2020-07-21T17:33:39 | 2020-07-21T17:33:39 | 281,886,535 | 2 | 0 | MIT | 2020-07-23T07:48:03 | 2020-07-23T07:48:02 | null | UTF-8 | Python | false | false | 2,286 | py | """
Burg's method for estimating AR(p) model parameters.
Author: Chad Fulton
License: BSD-3
"""
import numpy as np
from statsmodels.tools.tools import Bunch
from statsmodels.regression import linear_model
from statsmodels.tsa.arima.specification import SARIMAXSpecification
from statsmodels.tsa.arima.params import SARIMAXParams
def burg(endog, ar_order=0, demean=True):
"""
Estimate AR parameters using Burg technique.
Parameters
----------
endog : array_like or SARIMAXSpecification
Input time series array, assumed to be stationary.
ar_order : int, optional
Autoregressive order. Default is 0.
demean : bool, optional
Whether to estimate and remove the mean from the process prior to
fitting the autoregressive coefficients.
Returns
-------
parameters : SARIMAXParams object
Contains the parameter estimates from the final iteration.
other_results : Bunch
Includes one component, `spec`, which is the `SARIMAXSpecification`
instance corresponding to the input arguments.
Notes
-----
The primary reference is [1]_, section 5.1.2.
This procedure assumes that the series is stationary.
This function is a light wrapper around `statsmodels.linear_model.burg`.
References
----------
.. [1] Brockwell, Peter J., and Richard A. Davis. 2016.
Introduction to Time Series and Forecasting. Springer.
"""
spec = SARIMAXSpecification(endog, ar_order=ar_order)
endog = spec.endog
# Workaround for statsmodels.tsa.stattools.pacf_burg which doesn't work
# on integer input
# TODO: remove when possible
if np.issubdtype(endog.dtype, np.dtype(int)):
endog = endog * 1.0
if not spec.is_ar_consecutive:
raise ValueError('Burg estimation unavailable for models with'
' seasonal or otherwise non-consecutive AR orders.')
p = SARIMAXParams(spec=spec)
if ar_order == 0:
p.sigma2 = np.var(endog)
else:
p.ar_params, p.sigma2 = linear_model.burg(endog, order=ar_order,
demean=demean)
# Construct other results
other_results = Bunch({
'spec': spec,
})
return p, other_results
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
96580f873da4af76eeaa0a7a2437894bdc7df269 | 67ed96b8b4762c78c8f96d16bae2076e30dc184d | /CMSSW_5_3_20/src/flowAnalysis/SkimTrack/test/crab/tracktc/anaM185150/crab_pPbrereco.py | 8cd44ec51eb369b9e8a3f23ae7c6680aa1c168ef | [] | no_license | XuQiao/HI | 4dae1dcf600d0ea64ea26403197135790ba3c4a2 | e963cd9a5a393480e83e697b37327f4f7c4de8d4 | refs/heads/master | 2020-12-25T16:53:40.474495 | 2017-02-16T06:00:17 | 2017-02-16T06:00:17 | 64,085,142 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_('General')
config.General.transferOutputs = True
config.General.requestName = 'FlowLYZHMpPbReReco'
config.section_('JobType')
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'skimtrack_185150_cfi.py'
config.JobType.outputFiles = ['skimTreeTrack.root']
config.section_('Data')
config.Data.inputDBS = 'phys03'
config.Data.inputDataset = '/PAHighPt/davidlw-PA2013_FlowCorr_PromptReco_TrkHM_Gplus_Rereco_ReTracking_v18-28b2b9cce04ec3f20baeb96fbd2295a8/USER'
#config.Data.lumiMask = ''
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 50
#config.Data.runRange = '193093-193999'
#config.Data.publishDBS = 'https://cmsweb.cern.ch/dbs/prod/phys03/DBSWriter/'
#config.Data.inputDBS = 'https://cmsweb.cern.ch/dbs/prod/global/DBSReader/'
config.Data.publication = False
#config.Data.publishDataName = ''
config.Data.outLFNDirBase = '/store/user/qixu/flow/PACorrHM/skim/tracktc/multiM185150/FlowLYZHMpPbReReco'
config.section_('Site')
config.Site.storageSite = 'T2_US_Vanderbilt'
| [
"qixu@cern.ch"
] | qixu@cern.ch |
4ec1cd9429ade9f0f19dfd8668f57e95449097d2 | 214c4a79fd77008bf688aa2fc8bafdff8a80780b | /src/spring/azext_spring/vendored_sdks/appplatform/v2022_04_01/_configuration.py | c776f7eae7d56275c15a1602afefcb70b5d0b14a | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | openapi-env-test/azure-cli-extensions | abb8bcf005a5b75ee47a12745a958bcb64996a44 | d4ffc4100d82af66c4f88e9401397c66b85bd501 | refs/heads/master | 2022-09-15T18:08:46.848581 | 2022-08-09T06:09:14 | 2022-08-09T06:09:14 | 243,698,123 | 0 | 1 | MIT | 2020-11-10T06:59:47 | 2020-02-28T06:53:57 | Python | UTF-8 | Python | false | false | 3,668 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class AppPlatformManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for AppPlatformManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(AppPlatformManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-appplatform/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
6d4b092383c9317c609ea382bc8fb36cc34e6ff7 | d9e7bd5f582dd3d1a63fb10197896d462ce49027 | /numpy/arrays1.py | cd7171f3ef68e24bfb1c1eb73f84987129ff02d1 | [] | no_license | abhinai96/Python_conceptual_based_programs | 137aa8d4c1354ba7586f7ec2dea6683109cf9393 | 795883b28389ae2b0c46ddacea493530f40774a6 | refs/heads/master | 2022-12-15T11:57:28.862114 | 2020-09-15T03:10:35 | 2020-09-15T03:10:35 | 295,593,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | """import numpy as np
a=np.array([[1,2,3],[5,2,1],[3,4,5]])
print(a)"""
"""import numpy as np
a=np.array([[1,2,3],[4,5,6]])
b=a.diagonal()
print(b)"""
"""import numpy as np
a=np.array([[1,2,3],[4,5,6]])
b=np.delete(a,1,axis=1)
print(b)"""
"""# A basic code for matrix input from user
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
# Initialize matrix
matrix = []
print("Enter the entries rowwise:")
# For user input
for i in range(R): # A for loop for row entries
a =[]
for j in range(C): # A for loop for column entries
a.append(int(input()))
matrix.append(a)
# For printing the matrix
for i in range(R):
for j in range(C):
print(matrix[i][j], end = " ")
print()"""
| [
"noreply@github.com"
] | abhinai96.noreply@github.com |
cdfd8a0323170f29eafbe3b58d1bfae11f02cfc2 | 048df2b4dc5ad153a36afad33831017800b9b9c7 | /atcoder/arc062/arc062_b.py | b829cb10fa916c10935843f0ebfb04777b776ee5 | [] | no_license | fluffyowl/past-submissions | a73e8f5157c647634668c200cd977f4428c6ac7d | 24706da1f79e5595b2f9f2583c736135ea055eb7 | refs/heads/master | 2022-02-21T06:32:43.156817 | 2019-09-16T00:17:50 | 2019-09-16T00:17:50 | 71,639,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | S = raw_input()
print len(S)/2-S.count('p')
| [
"nebukuro09@gmail.com"
] | nebukuro09@gmail.com |
55fa7227b63e17cf878b0681f01929bab06e011c | ec00584ab288267a7cf46c5cd4f76bbec1c70a6b | /Python/__Data structure/__List/__sorting/is_palindrome.py | 4bf8f9ced8a4ee03328a9eb29a0597957fff6ee3 | [] | no_license | rahuldbhadange/Python | b4cc806ff23953389c9507f43d817b3815260e19 | 7e162117f1acc12537c7eeb36d6983d804122ff3 | refs/heads/master | 2021-06-23T05:04:20.053777 | 2020-01-28T10:34:28 | 2020-01-28T10:34:28 | 217,307,612 | 0 | 0 | null | 2021-06-10T22:44:11 | 2019-10-24T13:35:42 | Python | UTF-8 | Python | false | false | 513 | py | def is_palindrome(word):
q = len(word) - 1
# print(word)
for p in word:
# print("p:", p)
# print("q:", word[q])
if p == word[q]:
# print(p, word[q])
q = q - 1
# print("Yes !!!")
else:
# print("No !!!", p, word[q])
return "No !!!", p, word[q] # ***important
break
# return "Yes !!!"
ans, p, q = is_palindrome('delevelid')
if ans == None:
print("Yes !!!")
else:
print(ans, p, q)
| [
"46024570+rahuldbhadange@users.noreply.github.com"
] | 46024570+rahuldbhadange@users.noreply.github.com |
ef9b3d8b1e318bffc12724be8704c0874cabf335 | 2fac796fa58c67fb5a4a95a6e7f28cbef169318b | /python/drop-eggs.py | 8f4ad841c4590fea0575ac1b7cbb9419816ca206 | [] | no_license | jwyx3/practices | f3fe087432e79c8e34f3af3a78dd10278b66dd38 | 6fec95b9b4d735727160905e754a698513bfb7d8 | refs/heads/master | 2021-03-12T20:41:59.816448 | 2019-04-14T06:47:30 | 2019-04-14T06:47:30 | 18,814,777 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | class Solution:
# @param {int} n an integer
# @return {int} an integer
# NOTE: remember solution!!
def dropEggs(self, n):
# Write your code here
ans = 0
while ans * (ans + 1) / 2 < n:
ans += 1
return ans
def dropEggs(self, n):
import math
# the min integer meet: ans * (ans + 1) / 2 >= n
return int(math.ceil(math.sqrt(2 * n + 0.25) - 0.5))
# another solution is DP which will also apply to dropEggs II
| [
"jwyx88003@gmail.com"
] | jwyx88003@gmail.com |
efee169b60626091e6edbb58979c264141b84775 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_6_mask_unet/mask_5_7a_tv_sobel/tv_s004_sobel_k5_s080/step12 .py | 3efac9d72a8c90fca1e3e55139426debea4e8184 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,248 | py | '''
目前只有 step12 一定需要切換資料夾到 該komg_model所在的資料夾 才能執行喔!
'''
if(__name__ == "__main__"):
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step12_result_analyzer import Row_col_exps_analyzer
from step11 import *
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir)
##########################################################################################################################################################################################################################################################################################
ana_dir = template_dir
##########################################################################################################################################################################################################################################################################################
"""
以下留下一些example這樣子
core_amount == 7 是因為 目前 see_amount == 7 ,想 一個core 一個see
task_amount == 7 是因為 目前 see_amount == 7
single_see_multiprocess == True 代表 see內 還要 切 multiprocess,
single_see_core_amount == 2 代表切2分
所以總共會有 7*2 = 14 份 process 要同時處理,
但建議不要用,已經測過,爆記憶體了
"""
### 直接看 dtd_hdr_mix 的狀況
#################################################################################################################################################################################################################
ana_name = "1_ch"
ch_analyze = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what="mask",
row_col_results=[mask_tv_s004_sobel_k5_s080_ch[:4],
mask_tv_s004_sobel_k5_s080_ch[4:]], show_in_img=False, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=512, img_w=512)\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=True, single_see_core_amount=6)
############################################
ana_name = "2_ep"
ep_analyze = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what="mask",
row_col_results=[mask_tv_s004_sobel_k5_s080_ep[:8],
mask_tv_s004_sobel_k5_s080_ep[8:]], show_in_img=False, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=512, img_w=512)\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=True, single_see_core_amount=6)
############################################
ana_name = "3_noC"
noC_and_add_analyze = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what="mask",
row_col_results=[mask_tv_s004_sobel_k5_s080_noC_and_add[:3] + [mask_tv_s004_sobel_k5_s080_ch[2]],
mask_tv_s004_sobel_k5_s080_noC_and_add[3:] + [mask_tv_s004_sobel_k5_s080_ch[2]]], show_in_img=False, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=512, img_w=512)\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=True, single_see_core_amount=6)
################################################################################################################################################################################################################# | [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
ad27de88b1a43ef76f350d6be322d8b7a45a4a3f | f5d77defeaa244ed8df517d13f21cd6f073adebc | /programas/Laboratorios/8_Archivos/uneArchivos.py | 2b1e581415d9988aa9a5f139b75f649a06d7766b | [] | no_license | lizetheP/PensamientoC | 02a02cf6d537e1637a933a4f3957995f6690d7d6 | 5d5740e0afa4fc487fdc5f2c466df63e9b4a664f | refs/heads/master | 2023-08-10T08:07:09.604983 | 2023-08-08T16:53:10 | 2023-08-08T16:53:10 | 200,893,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | def uneArchivos(origen, destino):
file1 = open(origen, "r")
file2 = open(destino, "a")
while True:
letra = file1.read(1)
file2.write(letra)
if not letra:
break
file1.close()
file2.close()
nombre = str(input("Introduce el nombre del archivo origen: "))
nombre2 = str(input("Introduce el nombre del archivo destino: "))
uneArchivos(nombre, nombre2) | [
"lizetheperez@gmail.com"
] | lizetheperez@gmail.com |
bd2e36959015fb718b9c87a58c8b3635a9e88f72 | 15e85b4d9527e7a87aded5b3c99ad9c785bca915 | /datcore-sdk/python/datcore_sdk/api/organizations_api.py | 0bcba63a4e47d34d39f06c9129cf1d9cb29b28d9 | [
"MIT"
] | permissive | mguidon/aiohttp-dsm | 4161f9977d3dffbb727aa26cce4e9fb347aa4e21 | 612e4c7f6f73df7d6752269965c428fda0276191 | refs/heads/master | 2020-03-30T09:03:49.791406 | 2018-10-02T07:05:35 | 2018-10-02T07:05:35 | 151,058,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77,038 | py | # coding: utf-8
"""
Blackfynn Swagger
Swagger documentation for the Blackfynn api # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from datcore_sdk.api_client import ApiClient
class OrganizationsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_to_organization(self, id, add_to_organization_request, **kwargs): # noqa: E501
"""adds members to an organization, notifies them over email # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_to_organization(id, add_to_organization_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:param AddToOrganizationRequest add_to_organization_request: (required)
:return: dict(str, AddUserResponse)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_to_organization_with_http_info(id, add_to_organization_request, **kwargs) # noqa: E501
else:
(data) = self.add_to_organization_with_http_info(id, add_to_organization_request, **kwargs) # noqa: E501
return data
def add_to_organization_with_http_info(self, id, add_to_organization_request, **kwargs): # noqa: E501
"""adds members to an organization, notifies them over email # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_to_organization_with_http_info(id, add_to_organization_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:param AddToOrganizationRequest add_to_organization_request: (required)
:return: dict(str, AddUserResponse)
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'add_to_organization_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_to_organization" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `add_to_organization`") # noqa: E501
# verify the required parameter 'add_to_organization_request' is set
if ('add_to_organization_request' not in local_var_params or
local_var_params['add_to_organization_request'] is None):
raise ValueError("Missing the required parameter `add_to_organization_request` when calling `add_to_organization`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'add_to_organization_request' in local_var_params:
body_params = local_var_params['add_to_organization_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{id}/members', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, AddUserResponse)', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def add_to_team(self, organization_id, id, add_to_team_request, **kwargs): # noqa: E501
"""adds a member to a team, notifies them over email # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_to_team(organization_id, id, add_to_team_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:param AddToTeamRequest add_to_team_request: (required)
:return: list[UserDTO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_to_team_with_http_info(organization_id, id, add_to_team_request, **kwargs) # noqa: E501
else:
(data) = self.add_to_team_with_http_info(organization_id, id, add_to_team_request, **kwargs) # noqa: E501
return data
def add_to_team_with_http_info(self, organization_id, id, add_to_team_request, **kwargs): # noqa: E501
"""adds a member to a team, notifies them over email # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_to_team_with_http_info(organization_id, id, add_to_team_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:param AddToTeamRequest add_to_team_request: (required)
:return: list[UserDTO]
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'id', 'add_to_team_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_to_team" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `add_to_team`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `add_to_team`") # noqa: E501
# verify the required parameter 'add_to_team_request' is set
if ('add_to_team_request' not in local_var_params or
local_var_params['add_to_team_request'] is None):
raise ValueError("Missing the required parameter `add_to_team_request` when calling `add_to_team`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'add_to_team_request' in local_var_params:
body_params = local_var_params['add_to_team_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/teams/{id}/members', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[UserDTO]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_team(self, organization_id, create_group_request, **kwargs): # noqa: E501
"""creates a new team in an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_team(organization_id, create_group_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param CreateGroupRequest create_group_request: team to create (required)
:return: ExpandedTeamResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_team_with_http_info(organization_id, create_group_request, **kwargs) # noqa: E501
else:
(data) = self.create_team_with_http_info(organization_id, create_group_request, **kwargs) # noqa: E501
return data
def create_team_with_http_info(self, organization_id, create_group_request, **kwargs): # noqa: E501
"""creates a new team in an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_team_with_http_info(organization_id, create_group_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param CreateGroupRequest create_group_request: team to create (required)
:return: ExpandedTeamResponse
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'create_group_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_team" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `create_team`") # noqa: E501
# verify the required parameter 'create_group_request' is set
if ('create_group_request' not in local_var_params or
local_var_params['create_group_request'] is None):
raise ValueError("Missing the required parameter `create_group_request` when calling `create_team`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_group_request' in local_var_params:
body_params = local_var_params['create_group_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/teams', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExpandedTeamResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_organization_invite(self, organization_id, id, **kwargs): # noqa: E501
"""delete an invite to a particular organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_organization_invite(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: (required)
:param str id: id of the invite to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_organization_invite_with_http_info(organization_id, id, **kwargs) # noqa: E501
else:
(data) = self.delete_organization_invite_with_http_info(organization_id, id, **kwargs) # noqa: E501
return data
def delete_organization_invite_with_http_info(self, organization_id, id, **kwargs): # noqa: E501
"""delete an invite to a particular organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_organization_invite_with_http_info(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: (required)
:param str id: id of the invite to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_organization_invite" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `delete_organization_invite`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_organization_invite`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/invites/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_team(self, organization_id, id, **kwargs): # noqa: E501
"""deletes a team # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_team(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:return: TeamNode
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_team_with_http_info(organization_id, id, **kwargs) # noqa: E501
else:
(data) = self.delete_team_with_http_info(organization_id, id, **kwargs) # noqa: E501
return data
def delete_team_with_http_info(self, organization_id, id, **kwargs): # noqa: E501
"""deletes a team # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_team_with_http_info(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:return: TeamNode
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_team" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `delete_team`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_team`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/teams/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamNode', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_members(self, id, **kwargs): # noqa: E501
"""get the members that belong to an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_members(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:return: list[UserDTO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_members_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_members_with_http_info(id, **kwargs) # noqa: E501
return data
def get_members_with_http_info(self, id, **kwargs): # noqa: E501
"""get the members that belong to an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_members_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:return: list[UserDTO]
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_members" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_members`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{id}/members', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[UserDTO]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_organization(self, id, **kwargs): # noqa: E501
"""get an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organization(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:return: ExpandedOrganizationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_organization_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_organization_with_http_info(id, **kwargs) # noqa: E501
return data
def get_organization_with_http_info(self, id, **kwargs): # noqa: E501
"""get an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organization_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:return: ExpandedOrganizationResponse
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_organization" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_organization`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExpandedOrganizationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_organization_invites(self, organization_id, **kwargs): # noqa: E501
"""get all invites that belong to this organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organization_invites(organization_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: (required)
:return: list[UserInviteDTO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_organization_invites_with_http_info(organization_id, **kwargs) # noqa: E501
else:
(data) = self.get_organization_invites_with_http_info(organization_id, **kwargs) # noqa: E501
return data
def get_organization_invites_with_http_info(self, organization_id, **kwargs): # noqa: E501
"""get all invites that belong to this organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organization_invites_with_http_info(organization_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: (required)
:return: list[UserInviteDTO]
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_organization_invites" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `get_organization_invites`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/invites', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[UserInviteDTO]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_organizations(self, **kwargs): # noqa: E501
"""get a logged in user's organizations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organizations(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_admins: whether or not to include owners & admins
:return: GetOrganizationsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_organizations_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_organizations_with_http_info(**kwargs) # noqa: E501
return data
def get_organizations_with_http_info(self, **kwargs): # noqa: E501
"""get a logged in user's organizations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_organizations_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_admins: whether or not to include owners & admins
:return: GetOrganizationsResponse
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['include_admins'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_organizations" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'include_admins' in local_var_params:
query_params.append(('includeAdmins', local_var_params['include_admins'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetOrganizationsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_team(self, organization_id, id, **kwargs): # noqa: E501
"""gets the team for the organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_team(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:return: ExpandedTeamResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_team_with_http_info(organization_id, id, **kwargs) # noqa: E501
else:
(data) = self.get_team_with_http_info(organization_id, id, **kwargs) # noqa: E501
return data
def get_team_with_http_info(self, organization_id, id, **kwargs): # noqa: E501
"""gets the team for the organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_team_with_http_info(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:return: ExpandedTeamResponse
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_team" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `get_team`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_team`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/teams/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExpandedTeamResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_team_members(self, organization_id, id, **kwargs): # noqa: E501
"""get the members that belong to a team # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_team_members(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:return: list[UserDTO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_team_members_with_http_info(organization_id, id, **kwargs) # noqa: E501
else:
(data) = self.get_team_members_with_http_info(organization_id, id, **kwargs) # noqa: E501
return data
def get_team_members_with_http_info(self, organization_id, id, **kwargs): # noqa: E501
"""get the members that belong to a team # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_team_members_with_http_info(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:return: list[UserDTO]
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_team_members" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `get_team_members`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_team_members`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/teams/{id}/members', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[UserDTO]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_teams(self, id, **kwargs): # noqa: E501
"""get the teams that belong to an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_teams(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:return: list[ExpandedTeamResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_teams_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_teams_with_http_info(id, **kwargs) # noqa: E501
return data
def get_teams_with_http_info(self, id, **kwargs): # noqa: E501
"""get the teams that belong to an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_teams_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:return: list[ExpandedTeamResponse]
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_teams" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_teams`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{id}/teams', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ExpandedTeamResponse]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def refresh_organization_invite(self, organization_id, id, **kwargs): # noqa: E501
"""refresh an invite to a particular organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.refresh_organization_invite(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: (required)
:param str id: id of the invite to refresh (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.refresh_organization_invite_with_http_info(organization_id, id, **kwargs) # noqa: E501
else:
(data) = self.refresh_organization_invite_with_http_info(organization_id, id, **kwargs) # noqa: E501
return data
def refresh_organization_invite_with_http_info(self, organization_id, id, **kwargs): # noqa: E501
"""refresh an invite to a particular organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.refresh_organization_invite_with_http_info(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: (required)
:param str id: id of the invite to refresh (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method refresh_organization_invite" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `refresh_organization_invite`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `refresh_organization_invite`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/invites/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_from_organization(self, organization_id, id, **kwargs): # noqa: E501
"""removes a member from an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_from_organization(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: member id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_from_organization_with_http_info(organization_id, id, **kwargs) # noqa: E501
else:
(data) = self.remove_from_organization_with_http_info(organization_id, id, **kwargs) # noqa: E501
return data
def remove_from_organization_with_http_info(self, organization_id, id, **kwargs): # noqa: E501
"""removes a member from an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_from_organization_with_http_info(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: member id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_from_organization" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `remove_from_organization`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `remove_from_organization`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/members/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_from_team(self, organization_id, team_id, id, **kwargs): # noqa: E501
"""removes a member from a team # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_from_team(organization_id, team_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str team_id: team id (required)
:param str id: member id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.remove_from_team_with_http_info(organization_id, team_id, id, **kwargs) # noqa: E501
else:
(data) = self.remove_from_team_with_http_info(organization_id, team_id, id, **kwargs) # noqa: E501
return data
def remove_from_team_with_http_info(self, organization_id, team_id, id, **kwargs): # noqa: E501
"""removes a member from a team # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.remove_from_team_with_http_info(organization_id, team_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str team_id: team id (required)
:param str id: member id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'team_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_from_team" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `remove_from_team`") # noqa: E501
# verify the required parameter 'team_id' is set
if ('team_id' not in local_var_params or
local_var_params['team_id'] is None):
raise ValueError("Missing the required parameter `team_id` when calling `remove_from_team`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `remove_from_team`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'team_id' in local_var_params:
path_params['teamId'] = local_var_params['team_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/teams/{teamId}/members/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_member(self, organization_id, id, update_member_request, **kwargs): # noqa: E501
"""update a member for an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_member(organization_id, id, update_member_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: member id (required)
:param UpdateMemberRequest update_member_request: (required)
:return: UserDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_member_with_http_info(organization_id, id, update_member_request, **kwargs) # noqa: E501
else:
(data) = self.update_member_with_http_info(organization_id, id, update_member_request, **kwargs) # noqa: E501
return data
def update_member_with_http_info(self, organization_id, id, update_member_request, **kwargs): # noqa: E501
"""update a member for an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_member_with_http_info(organization_id, id, update_member_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: member id (required)
:param UpdateMemberRequest update_member_request: (required)
:return: UserDTO
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'id', 'update_member_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_member" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `update_member`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_member`") # noqa: E501
# verify the required parameter 'update_member_request' is set
if ('update_member_request' not in local_var_params or
local_var_params['update_member_request'] is None):
raise ValueError("Missing the required parameter `update_member_request` when calling `update_member`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'update_member_request' in local_var_params:
body_params = local_var_params['update_member_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/members/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserDTO', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_organization(self, id, update_organization, **kwargs): # noqa: E501
"""updates an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_organization(id, update_organization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:param UpdateOrganization update_organization: organization to update (required)
:return: ExpandedOrganizationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_organization_with_http_info(id, update_organization, **kwargs) # noqa: E501
else:
(data) = self.update_organization_with_http_info(id, update_organization, **kwargs) # noqa: E501
return data
def update_organization_with_http_info(self, id, update_organization, **kwargs): # noqa: E501
"""updates an organization # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_organization_with_http_info(id, update_organization, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: organization id (required)
:param UpdateOrganization update_organization: organization to update (required)
:return: ExpandedOrganizationResponse
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'update_organization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_organization" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_organization`") # noqa: E501
# verify the required parameter 'update_organization' is set
if ('update_organization' not in local_var_params or
local_var_params['update_organization'] is None):
raise ValueError("Missing the required parameter `update_organization` when calling `update_organization`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'update_organization' in local_var_params:
body_params = local_var_params['update_organization']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExpandedOrganizationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_team(self, organization_id, id, **kwargs): # noqa: E501
"""updates a team # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_team(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:return: TeamDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_team_with_http_info(organization_id, id, **kwargs) # noqa: E501
else:
(data) = self.update_team_with_http_info(organization_id, id, **kwargs) # noqa: E501
return data
def update_team_with_http_info(self, organization_id, id, **kwargs): # noqa: E501
"""updates a team # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_team_with_http_info(organization_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str organization_id: organization id (required)
:param str id: team id (required)
:return: TeamDTO
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['organization_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_team" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in local_var_params or
local_var_params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `update_team`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_team`") # noqa: E501
collection_formats = {}
path_params = {}
if 'organization_id' in local_var_params:
path_params['organizationId'] = local_var_params['organization_id'] # noqa: E501
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/organizations/{organizationId}/teams/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamDTO', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"guidon@itis.ethz.ch"
] | guidon@itis.ethz.ch |
784bb7180ef6d6cee3341ea995174a71a8b217e1 | a4e502e9487cf17c53f9f931ec0dbc12168fea52 | /packages/pyre/tracking/File.py | 4c9369af2e3cfd664a739d1c24e5d9541b5c50d4 | [
"BSD-3-Clause"
] | permissive | bryanvriel/pyre | bdc5dd59c46d53ff81f2ece532b9073ac3b65be1 | 179359634a7091979cced427b6133dd0ec4726ea | refs/heads/master | 2021-09-28T00:10:26.454282 | 2018-11-11T16:42:07 | 2018-11-11T16:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
# declaration
class File:
"""
A locator that records a position within a file
"""
# meta methods
def __init__(self, source, line=None, column=None):
# save my info
self.source = source
self.line = line
self.column = column
# all done
return
def __str__(self):
text = [
"file={!r}".format(str(self.source))
]
if self.line is not None:
text.append("line={.line!r}".format(self))
if self.column is not None:
text.append("column={.column!r}".format(self))
return ", ".join(text)
# implementation details
__slots__ = "source", "line", "column"
# end of file
| [
"michael.aivazis@orthologue.com"
] | michael.aivazis@orthologue.com |
f2e6792f3d5959656ede35df98bb81357ceeaa40 | 5d1fe71ab6ca5810680039552e2b7c884c212738 | /jackdaw/utils/bhimport.py | 8a34b8a08e3b86a8089b879efc2fe97e7686d6ac | [] | no_license | zimshk/jackdaw | 76977c516a1426302840ec63659bdf0224898cbd | 86d927a0a1a56d0d8685513df7c41afb21e7c521 | refs/heads/master | 2022-12-26T04:32:06.674672 | 2020-10-05T09:54:00 | 2020-10-05T09:54:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,196 | py | import zipfile
import json
import codecs
import pprint
from jackdaw.dbmodel import *
from jackdaw.dbmodel.adinfo import ADInfo
from jackdaw.dbmodel.adcomp import Machine
from jackdaw.dbmodel.aduser import ADUser
from jackdaw.dbmodel.adgroup import Group
from jackdaw.dbmodel.adou import ADOU
class BHImport:
def __init__(self, db_conn = None, db_session = None):
self.zipfile = None
self.files = None
self.db_conn = db_conn
self.db_session = db_session
self.is_zip = False
self.fd = {}
self.ads = {}
self.adn = {} #name -> ad_id
#self.setup_db()
def setup_db(self):
if self.db_session is None:
self.db_session = get_session(self.db_conn)
def import_machines(self):
print('Importing machines!')
for machine in self.get_file('computers')['computers']:
#pprint.pprint(machine)
#input()
m = Machine()
m.ad_id = self.ads[machine['Properties']['objectsid'].rsplit('-',1)[0]]
m.sAMAccountName = machine['Name'].split('.', 1)[0] + '$'
m.objectSid = machine['Properties']['objectsid']
m.description = machine['Properties']['description']
m.operatingSystemVersion = machine['Properties']['operatingsystem']
self.db_session.add(m)
self.db_session.commit()
def import_users(self):
print('Importing users!')
for user in self.get_file('users')['users']:
#pprint.pprint(user)
#input()
m = ADUser()
m.ad_id = self.ads[user['Properties']['objectsid'].rsplit('-',1)[0]]
m.name = user['Name'].split('@', 1)[0]
m.objectSid = user['Properties']['objectsid']
m.description = user['Properties']['description']
m.displayName = user['Properties']['displayname']
m.email = user['Properties']['email']
self.db_session.add(m)
self.db_session.commit()
def import_sessions(self):
print('Importing sessions!')
for session in self.get_file('sessions')['sessions']:
#pprint.pprint(session)
#input()
try:
if session['ComputerName'].startswith('['):
continue
ad_name = session['UserName'].rsplit('@', 1)[1]
cname = session['ComputerName'] + '$'
if session['ComputerName'].find('.') != -1:
cname = session['ComputerName'].split('.', 1)[0] + '$'
qry = self.db_session.query(
Machine.id
).filter_by(ad_id = self.adn[ad_name]
).filter(Machine.sAMAccountName == cname
)
machine_id = qry.first()
if machine_id is None:
raise Exception('Could not find machine!')
m = NetSession()
m.machine_id = machine_id[0]
m.username = session['UserName'].split('@', 1)[0]
self.db_session.add(m)
except Exception as e:
#print(e)
#pprint.pprint(session)
#input()
continue
self.db_session.commit()
def import_ous(self):
print('Importing ous!')
for ou in self.get_file('ous')['ous']:
#pprint.pprint(groups)
#input()
try:
ad_name = ou['Name'].rsplit('@', 1)[1]
m = ADOU()
m.ad_id = self.adn[ad_name]
m.name = ou['Name'].split('@', 1)[0]
m.objectSid = ou['Properties']['objectsid']
m.description = ou['Properties'].get('description', None)
self.db_session.add(m)
except Exception as e:
print(e)
pprint.pprint(ou)
input()
continue
self.db_session.commit()
def import_domains(self):
print('Importing domains!')
for domain in self.get_file('domains')['domains']:#['computers']:
#pprint.pprint(domain)
#input()
di = ADInfo()
di.name = domain['Name']
di.objectSid = domain['Properties']['objectsid']
self.db_session.add(di)
self.db_session.commit()
self.db_session.refresh(di)
self.ad_id = di.id
self.ads[di.objectSid] = di.id
self.adn[di.name] = di.id
def import_gpos(self):
print('Importing gpos!')
for gpo in self.get_file('gpos')['gpos']:
pprint.pprint(gpo)
input()
try:
ad_name = ou['Name'].rsplit('@', 1)[1]
m = ADOU()
m.ad_id = self.adn[ad_name]
m.name = ou['Name'].split('@', 1)[0]
m.objectSid = ou['Properties']['objectsid']
m.description = ou['Properties'].get('description', None)
self.db_session.add(m)
except Exception as e:
print(e)
pprint.pprint(ou)
input()
continue
self.db_session.commit()
def import_groups(self):
print('Importing groups!')
for groups in self.get_file('groups')['groups']:
#pprint.pprint(groups)
#input()
try:
ad_name = groups['Name'].rsplit('@', 1)[1]
m = Group()
m.ad_id = self.adn[ad_name]
m.name = groups['Name'].split('@', 1)[0]
m.objectSid = groups['Properties']['objectsid']
m.description = groups['Properties'].get('description', None)
self.db_session.add(m)
except Exception as e:
print(e)
pprint.pprint(groups)
input()
continue
self.db_session.commit()
def get_file(self, filetype):
if self.is_zip is True:
with zipfile.ZipFile(filepath, 'r') as zf:
with zf.open(self.fd[filetype]) as data:
return json.load(data)
@staticmethod
def from_zipfile(filepath):
bh = BHImport()
if not zipfile.is_zipfile(filepath):
raise Exception('The file on this path doesnt look like a valid zip file! %s' % filepath)
bh.is_zip = True
zip = zipfile.ZipFile(filepath, 'r')
for filename in zip.namelist():
if filename.find('_computers.json') != -1:
bh.fd['computers'] = filename
elif filename.find('_domains.json') != -1:
bh.fd['domains'] = filename
elif filename.find('_gpos.json') != -1:
bh.fd['gpos'] = filename
elif filename.find('_groups.json') != -1:
bh.fd['groups'] = filename
elif filename.find('_ous.json') != -1:
bh.fd['ous'] = filename
elif filename.find('_sessions.json') != -1:
bh.fd['sessions'] = filename
elif filename.find('_users.json') != -1:
bh.fd['users'] = filename
return bh
def from_folder(self, folderpath):
pass
def run(self):
#DO NOT CHANGE THIS ORDER!!!!
self.setup_db()
self.import_domains()
#self.import_groups()
#self.import_machines()
#self.import_users()
#self.import_sessions()
self.import_gpos()
#self.import_ous() #not working!
if __name__ == '__main__':
import sys
db_conn = 'sqlite:///bhtest.db'
filepath = sys.argv[1]
create_db(db_conn)
bh = BHImport.from_zipfile(filepath)
bh.db_conn = db_conn
bh.run()
| [
"info@skelsec.com"
] | info@skelsec.com |
c3ae3203d8281f0e944b529adfd94d0da0039498 | d08cf46d3e16ab8e6a958731168469ba38daf069 | /tests/test_la.py | b60f2699fb65d8f09970ee2b497d1629a323508c | [
"BSD-2-Clause"
] | permissive | spectralDNS/shenfun | ce808edc5258c896f2cccfbd88e67153e3f621c9 | bcda39d8d8e4741df1cafe719d81733cc1024def | refs/heads/master | 2023-07-27T20:29:57.075970 | 2023-07-11T12:33:04 | 2023-07-11T12:33:04 | 79,914,066 | 190 | 46 | BSD-2-Clause | 2022-05-11T19:10:33 | 2017-01-24T13:29:02 | Python | UTF-8 | Python | false | false | 1,261 | py | import numpy as np
import pytest
from shenfun import SparseMatrix, la
import warnings
warnings.filterwarnings('ignore')
N = 10
d = [
{0: np.arange(N)+1},
{0: -2, 2: 1},
{-1: 1, 0: -2, 1: 1},
{-2: 1, 0: -2, 2: 1},
{-2: 1, 0: -2, 2: 1, 4: 0.1},
{-4: 0.1, -2: 1, 0: -2, 2: 1, 4: 0.1}
]
@pytest.mark.parametrize('di', d)
def test_XDMA(di):
"""Testing
- DiagMA
- TwoDMA
- TDMA
- TDMA_O
- FDMA
- PDMA
"""
M = SparseMatrix(di, (N, N))
sol = la.Solver(M)
sol2 = la.Solve(M)
b = np.ones(N)
u_hat = np.zeros_like(b)
u_hat = sol(b, u_hat)
u_hat2 = np.zeros_like(b)
u_hat2 = sol2(b, u_hat2)
assert np.allclose(u_hat2, u_hat)
bh = np.ones((N, N))
uh = np.zeros_like(bh)
uh2 = np.zeros_like(bh)
uh = sol(bh, uh, axis=1)
uh2 = sol(bh, uh2, axis=1)
assert np.allclose(uh2, uh)
assert np.allclose(uh[0], u_hat)
uh = sol(bh, uh, axis=0)
uh2 = sol(bh, uh2, axis=0)
assert np.allclose(uh2, uh)
assert np.allclose(uh[:, 0], u_hat)
if __name__ == "__main__":
#test_solve('GC')
#test_TDMA()
#test_TDMA_O()
#test_DiagMA()
#test_PDMA('GC')
#test_FDMA()
#test_TwoDMA()
test_XDMA(d[1])
| [
"mikaem@math.uio.no"
] | mikaem@math.uio.no |
9e85a07333331e1c9399606d62b5558722bd154b | d2dcc2033847c7a5284b5c4d89a3660b0c21de12 | /applications/sptEditor/src/model/vd/axleCounter.py | 268141ed06aaf5f61f13e37976404529d7829852 | [] | no_license | shaxbee/eu07-spt | c0d27848041ed0511f9f3c32eddc7a6b28877cf9 | 78ae6e9cf4273aa1ca7e05db1326a1587f5eb3f1 | refs/heads/master | 2020-05-19T16:43:34.723930 | 2012-09-26T00:18:31 | 2012-09-26T00:18:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | '''
Created on 2009-09-29
@author: gfirlejczyk
'''
class AxleCounter(object):
'''
Class handling axle counter like point that defines borders of VD Group
'''
def __init__(self, id = None):
self.__id = id
self.__geometryPoint = (0,0,0)
self.__railTrackingID = 0
def __repr__(self):
return 'AxleCounter(id=%s, RailTrackingId=%s, GeometryPoint=%s)' %(
self.__id,
self.__geometryPoint,
self.__railTrackingID)
def setRailTracking(self,railTrackingId):
'''Setting up railTrackingId which is connected to axle counter'''
self.__railTrackingID = railTrackingId
def getRailTracking(self):
'''Returns RailTracking Id which is connected to axle counter'''
return self.__railTrackingID
def setGeometryPoint(self,geometryPoint):
'''Set geometry point in 3d where axle counter is putting down'''
self.__geometryPoint = geometryPoint
def getGeometryPoint(self):
'''Get geometry point in 3d where axle counter is putting down'''
return self.__geometryPoint
def getAxleCounterId(self):
'''Returns axle counter identification number'''
return self.__id | [
"devnull@localhost"
] | devnull@localhost |
f5231fa60f936daac57c69923c3542d899206ca6 | d1e88701f65692df9b9e9dcdd9ceb4620e8e0ea5 | /docs/conf.py | abba2785a2c692df1567185e035e2248457a9ea6 | [] | permissive | kiwnix/django-superform | 48e81fe5f93829969eb2e7cdcb472c5008a74e79 | 835897f1a5fd4dd2769793dda90aa5b1805c289d | refs/heads/master | 2023-03-09T08:05:45.232864 | 2021-02-03T05:28:12 | 2021-02-03T05:28:12 | 340,774,151 | 0 | 1 | BSD-3-Clause | 2021-02-20T23:16:28 | 2021-02-20T23:16:28 | null | UTF-8 | Python | false | false | 9,064 | py | # -*- coding: utf-8 -*-
#
# django-superform documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 7 10:47:24 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import codecs
from datetime import date
import re
import sys
from os import path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
doc_path = path.dirname(path.abspath(__file__))
project_base_path = path.dirname(doc_path)
sys.path.insert(0, project_base_path)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-superform'
copyright = str(date.today().year) + u', Gregor Müllegger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
def read(*parts):
return codecs.open(path.join(path.dirname(__file__), *parts),
encoding='utf-8').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
version_tuple = find_version(
path.join(project_base_path, 'django_superform', '__init__.py')).split('.')
#
# The short X.Y version.
version = '.'.join(version_tuple[:2])
# The full version, including alpha/beta/rc tags.
release = '.'.join(version_tuple)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-superformdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-superform.tex', u'django-superform Documentation',
u'Gregor Müllegger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-superform', u'django-superform Documentation',
[u'Gregor Müllegger'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-superform', u'django-superform Documentation',
u'Gregor Müllegger', 'django-superform', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"gregor@muellegger.de"
] | gregor@muellegger.de |
3be998d3bcdd62746b22000503603354a0fb28a3 | a6ed990fa4326c625a2a02f0c02eedf758ad8c7b | /meraki/sdk/python/updateNetworkApplianceTrafficShapingUplinkSelection.py | cbd1010acb7d9d178e903f4416c79445dcbee416 | [] | no_license | StevenKitavi/Meraki-Dashboard-API-v1-Documentation | cf2352976c6b6c00c17a5f6442cedf0aeed46c22 | 5ed02a7def29a2ce455a3f2cfa185f76f44789f5 | refs/heads/main | 2023-03-02T08:49:34.846055 | 2021-02-05T10:31:25 | 2021-02-05T10:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,996 | py | import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
network_id = 'L_646829496481105433'
response = dashboard.appliance.updateNetworkApplianceTrafficShapingUplinkSelection(
network_id,
activeActiveAutoVpnEnabled=True,
defaultUplink='wan1',
loadBalancingEnabled=True,
wanTrafficUplinkPreferences=[{'trafficFilters': [{'type': 'custom', 'value': {'protocol': 'tcp', 'source': {'port': 'any', 'cidr': '192.168.1.0/24'}, 'destination': {'port': 'any', 'cidr': 'any'}}}], 'preferredUplink': 'wan2'}, {'trafficFilters': [{'type': 'custom', 'value': {'protocol': 'tcp', 'source': {'port': '1-1024', 'vlan': 10, 'host': 254}, 'destination': {'port': 'any', 'cidr': 'any'}}}], 'preferredUplink': 'wan1'}],
vpnTrafficUplinkPreferences=[{'trafficFilters': [{'type': 'applicationCategory', 'value': {'id': 'meraki:layer7/category/1'}}, {'type': 'application', 'value': {'id': 'meraki:layer7/application/33'}}, {'type': 'custom', 'value': {'protocol': 'tcp', 'source': {'port': 'any', 'cidr': '192.168.1.0/24'}, 'destination': {'port': 'any', 'cidr': 'any'}}}, {'type': 'custom', 'value': {'protocol': 'tcp', 'source': {'port': 'any', 'cidr': 'any'}, 'destination': {'port': 'any', 'network': 'L_23456789', 'vlan': 20, 'host': 200}}}, {'type': 'custom', 'value': {'protocol': 'tcp', 'source': {'port': 'any', 'cidr': 'any'}, 'destination': {'port': '1-1024', 'fqdn': 'www.google.com'}}}], 'preferredUplink': 'wan2', 'failOverCriterion': 'poorPerformance', 'performanceClass': {'type': 'custom', 'customPerformanceClassId': '123456'}}, {'trafficFilters': [{'type': 'application', 'value': {'id': 'meraki:layer7/application/9'}}], 'preferredUplink': 'defaultUplink'}, {'trafficFilters': [{'type': 'application', 'value': {'id': 'meraki:layer7/application/106'}}], 'preferredUplink': 'bestForVoIP'}, {'trafficFilters': [{'type': 'application', 'value': {'id': 'meraki:layer7/application/107'}}], 'preferredUplink': 'loadBalancing', 'performanceClass': {'type': 'builtin', 'builtinPerformanceClassName': 'VoIP'}}, {'trafficFilters': [{'type': 'application', 'value': {'id': 'meraki:layer7/application/162'}}], 'preferredUplink': 'loadBalancing', 'performanceClass': {'type': 'custom', 'customPerformanceClassId': '123456'}}, {'trafficFilters': [{'type': 'application', 'value': {'id': 'meraki:layer7/application/168'}}], 'preferredUplink': 'wan2', 'failOverCriterion': 'poorPerformance', 'performanceClass': {'type': 'builtin', 'builtinPerformanceClassName': 'VoIP'}}, {'trafficFilters': [{'type': 'application', 'value': {'id': 'meraki:layer7/application/171'}}], 'preferredUplink': 'wan2', 'failOverCriterion': 'poorPerformance', 'performanceClass': {'type': 'custom', 'customPerformanceClassId': '123456'}}]
)
print(response) | [
"shiychen@cisco.com"
] | shiychen@cisco.com |
bccb08932de8e91329d9be799b262b36c9254568 | 673517e68db4b2540ac3a908a6374aaaa72e0f27 | /synergine/synergy/event/Event.py | 82a31511b1891015a5bb6d189010374c641d9ab2 | [
"Apache-2.0"
] | permissive | buxx/synergine | 3a977b69bc35c1a5af1056f98028f8b7412795d2 | da05d762cdbc993362807d4851e1ca74784438ae | refs/heads/master | 2021-07-03T19:57:24.486164 | 2017-09-04T09:19:45 | 2017-09-04T09:19:45 | 23,734,878 | 6 | 2 | Apache-2.0 | 2021-06-10T14:15:26 | 2014-09-06T13:15:07 | Python | UTF-8 | Python | false | false | 2,423 | py | from synergine.core.exceptions import NotConcernedEvent
from synergine.core.simulation.mechanism.Mechanism import Mechanism
from synergine.cst import COL_ALL
class Event():
"""
Event are called by mechanisms and trig associated actions if conditions matches.
"""
_mechanism = Mechanism
"""Mechanism class who run this event with prepared parameters"""
_concern = COL_ALL
"""The COL id of concerned synergies objects"""
_each_cycle = 1
"""Event ca be executed each x cycle if needed"""
_first_cycle_force = False
"""Event will be executed at first cycle regardless of _each_cycle"""
def __init__(self, actions):
self._actions = actions
@classmethod
def get_mechanism(cls):
"""
:return: Mechanism class who will run this event
:rtype: Mechanism
"""
return cls._mechanism
@classmethod
def get_concern(cls):
"""
:return: COL name if concerned synergies objects
"""
return cls._concern
@classmethod
def get_each_cycle(cls):
"""
:return: The number of each cycle where execute this event
"""
return cls._each_cycle
@classmethod
def is_first_cycle_force(cls):
return cls._first_cycle_force
def observe(self, object_id, context, parameters={}):
"""
Return actions who have to be executed.
:param object_id: The id of observed synergy object
:param context: The Context
:param parameters: Mechanism prepared dict of parameters
:return: list of actions
:rtype: list (of Action)
"""
active_actions = []
try:
parameters = self._prepare(object_id, context, parameters)
for action in self._actions:
action_object = action(object_id, parameters)
active_actions.append(action_object)
except NotConcernedEvent:
pass # Object not concerned by this event
return active_actions
def _prepare(self, object_id, context, parameters={}):
"""
Return dict with parameters for actions
:param object_id: The id of observed synergy object
:param context: The Context
:param parameters: Mechanism prepared dict of parameters
:raise: NotConcernedEvent
:return:
"""
raise NotImplementedError() | [
"sevajol.bastien@gmail.com"
] | sevajol.bastien@gmail.com |
a877700a9eee5373e8e8075a715386a8c0cbcb9f | 7dd30cae84f19aca8125d5cb35b099cb32cbcb64 | /4-Object_Detection/YOLOV3/core/common.py | 75b29aafe3ed6ee1fab993824ad9f7a08b566e12 | [
"MIT"
] | permissive | Ray507/TensorFlow2.0-Examples | a5d7e38c10132a26203a4783cf539741953040a2 | 90037a846411aab5eb0fd6e74e699e8e58c78cc5 | refs/heads/master | 2020-06-21T18:49:46.249163 | 2019-07-18T06:00:06 | 2019-07-18T06:00:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | py | #! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : common.py
# Author : YunYang1994
# Created date: 2019-07-11 23:12:53
# Description :
#
#================================================================
import tensorflow as tf
class BatchNormalization(tf.keras.layers.BatchNormalization):
"""
define BatchNormalization layers in 'tf' style !!!
"""
def call(self, x, training=False):
if not training:
training = tf.constant(False)
training = tf.logical_and(training, self.trainable)
return super().call(x, training)
def convolutional(input_layer, filters_shape, downsample=False, activate=True, bn=True):
if downsample:
input_layer = tf.keras.layers.ZeroPadding2D(((1, 0), (1, 0)))(input_layer)
padding = 'valid'
strides = 2
else:
strides = 1
padding = 'same'
conv = tf.keras.layers.Conv2D(filters=filters_shape[-1], kernel_size = filters_shape[0], strides=strides, padding=padding,
use_bias=not bn, kernel_regularizer=tf.keras.regularizers.l2(0.0005))(input_layer)
if bn: conv = BatchNormalization()(conv)
if activate == True: conv = tf.nn.leaky_relu(conv, alpha=0.1)
return conv
def residual_block(input_layer, input_channel, filter_num1, filter_num2):
short_cut = input_layer
conv = convolutional(input_layer, filters_shape=(1, 1, input_channel, filter_num1))
conv = convolutional(conv , filters_shape=(3, 3, filter_num1, filter_num2))
residual_output = short_cut + conv
return residual_output
def upsample(input_layer):
return tf.image.resize(input_layer, (input_layer.shape[1] * 2, input_layer.shape[2] * 2), method='nearest')
| [
"YunYang1994@github.com"
] | YunYang1994@github.com |
547f748a31131edfdd4d47aab9f0b1066b0077be | 4b87a0de0f43de2bde41f2590faac970c18fe482 | /api/android/v1/daily_salary/views.py | 63b2ac976436c9b036a2c2f97896f3a312229ea6 | [] | no_license | krishSona/testbackend | d0bc325776537d9814b9022b3538b5e8a840e6a4 | d87e050d02542c58876d4f81c2ea99815ab4160e | refs/heads/master | 2023-04-08T01:26:42.070058 | 2021-04-03T06:08:54 | 2021-04-03T06:08:54 | 354,214,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | import json
from django.http import JsonResponse
from rest_framework import viewsets, permissions
from rest_framework.response import Response
from api.android.v1.daily_salary.serializers import WorkerSerializer
from workers.models import Worker, Advance
# def session(request):
# phone_number = request.GET.get('phone_number')
# if phone_number is not None:
# _worker = Worker.objects.filter(phone=phone_number).first()
# data = {"status": 200, "id": _worker.id}
# else:
# data = {"status": 400, "message": "आपका खाता पंजीकृत नहीं है। कृपया अपने ठेकेदार से संपर्क करें।"}
# return JsonResponse(data)
# def worker(request):
# worker_id = request.GET.get('worker_id')
# if worker_id is not None:
# _worker = Worker.objects.filter(id=worker_id).first()
# data = {"balance": "₹" + str(_worker.balance)}
# else:
# data = {"status": 400, "message": "आपका खाता पंजीकृत नहीं है। कृपया अपने ठेकेदार से संपर्क करें।"}
# return JsonResponse(data)
def transfer_to_bank(request):
worker_id = request.GET.get('worker_id')
if worker_id is not None:
_worker = Worker.objects.filter(id=worker_id).first()
_balance = _worker.balance
_worker.balance = 0
_worker.save()
_advance = Advance.objects.create(worker_id=_worker.id, account_id=_worker.account_id, amount=_balance,
utr="AXPS7170EHAG23G")
data = {"Bank Name:": _advance.account.ifscode.bank.name, "Bank A/C:": _advance.account.number,
"IFSC:": _advance.account.ifscode.code, "Deposited:": _advance.amount, "Txn. No.:": _advance.utr}
else:
data = {"status": 400, "message": "आपका खाता पंजीकृत नहीं है। कृपया अपने ठेकेदार से संपर्क करें।"}
return JsonResponse(data)
class WorkerViewSet(viewsets.ModelViewSet):
swagger_schema = None
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Worker.objects.all().order_by('-id')
serializer_class = WorkerSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
queryset = Worker.objects.all().order_by('-id')
phone = self.request.query_params.get('phoneNumber', None)
if phone is not None:
queryset = queryset.filter(phone=phone)
return queryset
class AdvanceViewSet(viewsets.ModelViewSet):
swagger_schema = None
queryset = Advance.objects.all().order_by('-id')
permission_classes = [permissions.IsAuthenticated]
def create(self, request):
worker_id = request.data.get('workerId')
_advance = Advance()
# if worker_id is not None:
_worker = Worker.objects.filter(id=worker_id).first()
if _worker is not None:
_balance = _worker.balance
_worker.balance = 0
_worker.save()
_advance = Advance.objects.create(worker_id=_worker.id, account_id=_worker.account_id, amount=_balance,
utr="AXPS7170EHAG23G")
data = {'Bank Name: ': _advance.bank_name, 'Bank A/C: ': _advance.account_number, 'Deposited: ': _advance.amount, 'IFSC: ': _advance.ifsc, 'Txn. No.: ': _advance.utr}
return Response(data)
| [
"kali@dailysalary.in"
] | kali@dailysalary.in |
ed79d1c85a699c3fb5afa4325d3328de963e4a26 | 7762f4f7ae2c5703f89780ff416bd1d300388c2c | /stompy/restriction/body.py | 47b9ee83e38204c125ac2aa5a756de00805a29a6 | [] | no_license | braingram/stompy | 36463d6e90bd7d475060c7866d678cad9e5a5693 | 7a92e670c88b500a5d3300863075ef4102c7ec02 | refs/heads/master | 2020-04-12T05:37:44.920930 | 2020-03-15T19:18:22 | 2020-03-15T19:18:22 | 61,507,690 | 3 | 4 | null | 2019-02-23T19:17:25 | 2016-06-20T01:00:47 | Python | UTF-8 | Python | false | false | 13,656 | py | #!/usr/bin/env python
"""
One issue is not knowing the non-halt target during halt (because set_target
is used for both). So legs should have
target: path to walk when walking
halted: if halted (don't want)
I want this to be modular so I can write tests that run in
a perfect simulated environment (that doesn't require bullet)
so I can validate changes.
It should also be compatible with bullet to allow for other tests
Things to remove (from body):
- odometer
Body needs the following:
- halt/unhalt state
- current 'walk' target (curve or translate)
- current 'halt' target (always stop?)
- enable state
- feet (for sending plans, attaching callbacks, etc)
- foot centers (that might be offset)
- arbitrate leg res state changes (lift -> swing etc)
Leg needs the following
- halt/unhalt state
- current 'walk' target [in leg coords]
- current 'halt' target [in leg coords]
- enable?
- foot (for sending plans, attaching callbacks, etc)
- foot center (possibly offset)
- joint info (limits, xyz, etc)
- joint configuration (angles, foot xyz)
- time since last lift
- restriction modifier
- loaded/unloaded height
Supply this a stance plan in body coordinates
it will produce plans in body coordinates
restriction will be updated with foot coordinates
it will produce 'requests' for plans that will be 'accepted'
"""
import math
from .. import consts
from .. import kinematics
from . import leg
from .. import log
from . import odometer
from .. import signaler
parameters = {
# slow down plan proportional to most restricted leg
'speed_by_restriction': False,
# threshold at which a leg is considered 'restricted' and could be lifted
'r_thresh': 0.4,
# if restricted by more than this, halt lateral movement
'r_max': 0.8,
# allow this many feet up at a time
'max_feet_up': 1,
# allow this much slop (in inches) between actual and target body height
'height_slop': 3.,
# joint limit restriction shape parameters
'fields.joint_angle.eps': 0.3,
'fields.joint_angle.range': 0.9, # limit movement to ratio of total range
'fields.joint_angle.inflection': 0.4,
# calf angle restriction shape parameters
'fields.calf_angle.eps': 0.3,
'fields.calf_angle.inflection': 0.4,
'fields.calf_angle.max': 30,
# min distance from foot to hip restriction shape parameter
'fields.min_hip.eps': 0.15,
# max restriction (and avoid) this many inches from the min_hip_distance
'fields.min_hip.buffer': 10.0,
# distance from foot to 'center' restriction shape parameters
'fields.center.eps': 0.1,
'fields.center.inflection': 5.,
'fields.center.radius': 30.,
# angle (degrees from vertical = 0) of calf when foot at center position
'target_calf_angle': 10.0,
# lift foot this many inches off the ground
'lift_height': 12.0,
# keep body [hip to thigh pins] this many inches off ground
'lower_height': -40.0,
# min/max lower height setting available on slider
'min_lower_height': -70,
'max_lower_height': -40,
# if leg sees < this many lbs, consider unloaded (during lifted)
'unloaded_weight': 600.,
# if leg sees > this many lbs, consider loaded (during lowering)
'loaded_weight': 400.,
# finish swing when within this many inches of target
'swing_slop': 5.0,
# ratio of actual step size to maximum step size (1.0)
'step_ratio': 0.6,
# if re-locating a leg moves less than many this inches, don't lift
'min_step_size': 6.0,
}
parameter_metas = {
'max_feet_up': {'min': 0, 'max': 3},
}
class BodyTarget(object):
def __init__(self, rotation_center, speed, dz):
self.rotation_center = rotation_center
self.speed = speed
self.dz = dz
def __eq__(self, other):
if other is None:
return False
return (
(self.rotation_center == other.rotation_center) and
(self.speed == other.speed) and
(self.dz == other.dz))
def __repr__(self):
return (
"BodyTarget(%r, %r, %r)" %
(self.rotation_center, self.speed, self.dz))
class Body(signaler.Signaler):
def __init__(self, legs, param):
"""Takes leg controllers"""
super(Body, self).__init__()
self.odo = odometer.Odometer()
self.logger = log.make_logger('Res-Body')
self.param = param
self.param.set_param_from_dictionary('res', parameters)
[
self.param.set_meta('res.%s' % (k, ), parameter_metas[k])
for k in parameter_metas]
self.legs = legs
self.feet = {}
self.halted = False
self.enabled = False
self.target = None
inds = sorted(self.legs)
self.neighbors = {}
if len(inds) > 1:
for (i, n) in enumerate(inds):
if i == 0:
self.neighbors[n] = [
inds[len(inds) - 1], inds[i+1]]
elif i == len(inds) - 1:
self.neighbors[n] = [inds[i - 1], inds[0]]
else:
self.neighbors[n] = [inds[i - 1], inds[i + 1]]
for i in self.legs:
self.feet[i] = leg.Foot(self.legs[i], self.param)
self.feet[i].on(
'restriction', lambda s, ln=i: self.on_restriction(s, ln))
self.feet[i].on(
'state', lambda s, ln=i: self.on_foot_state(s, ln))
#print("Feet:", self.feet)
self.disable()
def set_halt(self, value):
self.halted = value
for i in self.feet:
self.feet[i].set_halt(value)
self.odo.enabled = not value
self.trigger('halt', value)
def enable(self, foot_states):
self.logger.debug("enable")
self.enabled = True
self.set_halt(False)
# TODO always reset odometer on enable?
self.odo.reset()
# TODO set foot states, target?
for i in self.feet:
self.feet[i].reset()
def offset_foot_centers(self, dx, dy):
for i in self.feet:
ldx, ldy, _ = kinematics.body.body_to_leg_rotation(i, dx, dy, 0.)
# TODO limit to inside limits
# don't allow -X offset?
if self.param['limit_center_x_shifts'] and ldx < 0:
ldx = 0
self.feet[i].center_offset = (ldx, ldy)
def calc_stance_speed(self, bxy, mag):
# scale to pid future time ms
speed = (
mag * self.param['speed.foot'] *
self.param['speed.scalar'] * consts.PLAN_TICK)
# find furthest foot
x, y = bxy
z = 0.
mr = None
for i in self.feet:
tx, ty, tz = kinematics.body.body_to_leg(i, x, y, z)
r = tx * tx + ty * ty + tz * tz
if mr is None or r > mr:
mr = r
mr = math.sqrt(mr)
# account for radius sign
rspeed = speed / mr
max_rspeed = (
self.param['speed.foot'] / self.param['arc_speed_radius'] *
self.param['speed.scalar'])
if abs(rspeed) > max_rspeed:
print("Limiting because of angular speed")
rspeed = math.copysign(max_rspeed, rspeed)
# TODO this should adjust speed on times OTHER than set_target
if self.param['res.speed_by_restriction']:
rs = self.get_speed_by_restriction()
else:
rs = 1.
return rspeed * rs
def set_target(self, target=None, update_swing=True):
if target is None:
target = self.target
if not isinstance(target, BodyTarget):
raise ValueError("Body.set_target requires BodyTarget")
self.logger.debug({"set_target": (target, update_swing)})
self.target = target
if target.dz != 0.0:
# TODO update stand height
self.odo.set_target(self.target) # TODO fix odometer
pass
for i in self.feet:
self.feet[i].set_target(target)
return
def disable(self):
self.logger.debug("disable")
self.enabled = False
for i in self.feet:
self.feet[i].set_state(None)
def get_speed_by_restriction(self):
rmax = max([
self.feet[i].restriction['r'] for i in self.feet
if self.feet[i].state not in ('swing', 'lower')])
return max(0., min(1., 1. - rmax))
def on_foot_state(self, state, leg_number):
# TODO update 'support' legs
pass
def on_restriction(self, restriction, leg_number):
if not self.enabled:
return
# only update odometer when not estopped
self.odo.update()
if (
self.halted and
(
restriction['r'] < self.param['res.r_max'] or
self.feet[leg_number] in ('wait', 'swing', 'lower') or
restriction['nr'] < restriction['r'])):
# unhalt?
maxed = False
for i in self.feet:
# make sure foot is not in swing (or lower?)
#if self.feet[i].state in ('swing', 'lower', 'wait'):
if self.feet[i].state in ('swing', 'lower', 'wait'):
continue
r = self.feet[i].restriction
if r['nr'] < r['r']: # moving to a less restricted spot
continue
if r['r'] > self.param['res.r_max']:
maxed = True
if not maxed:
self.logger.debug({
"unhalt": {
'restriction': {
i: self.feet[i].restriction for i in self.feet},
'states': {
i: self.feet[i].state for i in self.feet},
#'_pre_halt_target': self._pre_halt_target,
}})
self.set_halt(False)
return
if (
restriction['r'] > self.param['res.r_max'] and
(not self.halted) and
(self.feet[leg_number].state not in ('wait', 'swing', 'lower')) and
restriction['nr'] >= restriction['r']):
self.set_halt(True)
return
# TODO scale stance speed by restriction?
if (
(restriction['r'] > self.param['res.r_thresh']) and
self.feet[leg_number].state == 'stance'):
#if self.halted:
# print(
# leg_number, self.feet[leg_number].state,
# restriction)
# lift?
# check n_feet up
states = {i: self.feet[i].state for i in self.feet}
n_up = len([
s for s in states.values() if s not in ('stance', 'wait')])
# check if neighbors are up
if len(self.neighbors.get(leg_number, [])) == 0:
#if self.halted:
# print("halted but no neighbors")
return
ns = self.neighbors[leg_number]
n_states = [states[n] for n in ns]
ns_up = len([s for s in n_states if s not in ('stance', 'wait')])
# check if any other feet are restricted:
last_lift_times = {}
for ln in self.feet:
if ln == leg_number:
last_lift_times[ln] = self.feet[ln].last_lift_time
continue
if states[ln] not in ('stance', 'wait'):
continue
if (
self.feet[ln].restriction is not None and
self.feet[ln].restriction['r'] >
self.param['res.r_thresh']):
# found another restricted foot
#other_restricted.append(ln)
last_lift_times[ln] = self.feet[ln].last_lift_time
#if self.halted:
# print("last_lift_times: %s" % last_lift_times)
# print("ns_up: %s, n_up: %s" % (ns_up, n_up))
# yes? pick least recently lifted
if ns_up == 0 and n_up < self.param['res.max_feet_up']:
n_can_lift = self.param['res.max_feet_up'] - n_up
#if self.halted:
# print("n_can_lift: %s" % n_can_lift)
#if self.halted:
# self.feet[leg_number].set_state('lift')
if len(last_lift_times) > n_can_lift:
# TODO prefer lifting of feet with
# restriction_modifier != 0
# only allow this foot if it was moved later than
# the other restricted feet
ln_by_lt = sorted(
last_lift_times, key=lambda ln: last_lift_times[ln])
#if self.halted:
# print(
# "ln_by_lt: %s[%s]" %
# (ln_by_lt, ln_by_lt[:n_can_lift+1]))
if leg_number in ln_by_lt[:n_can_lift+1]:
if self.feet[leg_number].should_lift():
self.feet[leg_number].set_state('lift')
else:
#if self.halted:
# print("lift %s" % leg_number)
# check if should lift based on swing target being
# > N in from current position
if self.feet[leg_number].should_lift():
self.feet[leg_number].set_state('lift')
| [
"brettgraham@gmail.com"
] | brettgraham@gmail.com |
b6a65345f6f8b4e0cf98221415f4497b86696cbe | db1e48d5f7b1b51c5a535b2a9477e350ad7d35c7 | /angr/engines/pcode/arch/ArchPcode_PIC_12_LE_16_PIC_12C5xx.py | 2674614f8e7b1d4e0fc0450e838d0ee8201dd2af | [
"BSD-2-Clause"
] | permissive | helloexp/angr | f4540c737c9e828e1bdd95bae0758558dd742143 | 724f2172bec21f51b2f798ab5613cf86bef62dae | refs/heads/master | 2022-01-31T03:15:09.922425 | 2022-01-15T06:34:54 | 2022-01-15T06:34:54 | 216,943,426 | 0 | 0 | BSD-2-Clause | 2019-10-23T01:39:41 | 2019-10-23T01:39:40 | null | UTF-8 | Python | false | false | 1,210 | py | ###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_PIC_12_LE_16_PIC_12C5xx(ArchPcode):
name = 'PIC-12:LE:16:PIC-12C5xx'
pcode_arch = 'PIC-12:LE:16:PIC-12C5xx'
description = 'PIC-12C5xx'
bits = 16
ip_offset = 0x0
sp_offset = 0x2
bp_offset = sp_offset
instruction_endness = Endness.LE
register_list = [
Register('indf', 1, 0x0),
Register('tmr0', 1, 0x1),
Register('pcl.0', 1, 0x2),
Register('status.0', 1, 0x3),
Register('fsr.0', 1, 0x4),
Register('osccal', 1, 0x5),
Register('gpio', 1, 0x6),
Register('pc', 2, 0x0, alias_names=('ip',)),
Register('stkptr', 1, 0x2),
Register('w', 1, 0x3),
Register('pcl', 1, 0x4),
Register('fsr', 1, 0x5),
Register('status', 1, 0x6),
Register('pa', 1, 0x7),
Register('z', 1, 0x8),
Register('dc', 1, 0x9),
Register('c', 1, 0xa),
Register('option', 1, 0xb),
Register('tris', 1, 0x20)
]
register_arch(['pic-12:le:16:pic-12c5xx'], 16, Endness.LE, ArchPcode_PIC_12_LE_16_PIC_12C5xx)
| [
"noreply@github.com"
] | helloexp.noreply@github.com |
a2274f3e0f46f3c92f5974b51f2bb1bb09735353 | 914453d01d6e403fcdb2d93073ae7d3aa6552ca6 | /python_challenges/challenge2.py | 3b49e01617329ee5fc6b390989bf96bd92c1c01a | [] | no_license | AgustinParmisano/hackerank | 684bc5c1d43ae0288328caac6ae7a5c8ca043aa1 | 648563f77c21985602ccab85936b434e6bd1188e | refs/heads/master | 2020-04-28T03:09:04.582595 | 2019-03-11T20:44:04 | 2019-03-11T20:44:04 | 174,925,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | def swap_case(s):
r = ''
for i in s:
i = i.upper() if i.islower() else i.lower()
r+=i
return(r)
| [
"agustinparmisano@gmail.com"
] | agustinparmisano@gmail.com |
7e56d4685227decfee31b1786f9de6321591bb55 | 3637fe729395dac153f7abc3024dcc69e17f4e81 | /personal/pythonic/rules/rule2.py | aefdd616b730723104181fffd2d3a6dc46a081e5 | [] | no_license | madmonkyang/cda-record | daced6846c2456f20dddce7f9720602d1583a02a | c431e809e8d0f82e1bca7e3429dd0245560b5680 | refs/heads/master | 2023-06-15T08:16:46.230569 | 2021-07-15T16:27:36 | 2021-07-15T16:27:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # ---------------- 避免劣化代码 ----------------
"""
1)避免只用大小写来区分不同的对象
2)避免使用容易引起混淆的名称,如 element, list, dict
3)不要害怕过长的变量名(不追求过分缩写)
"""
# Bad:
# 不推荐使用 list, element 这种变量名
def funA(list, num):
for element in list:
if num == element:
return True
else:
pass
# Good:
def find_num(search_list, num):
for listValue in search_list:
if num == listValue:
return True
else:
pass
# ---------------- 深入认识Python ----------------
"""
不好的风格:
if foo == 'blah': do_blah_thing()
do_one(); do_two(); do_three()
Pythonic风格:
if foo == 'blah':
do_blah_thing()
do_one()
do_two()
do_three()
"""
| [
"silentbalanceyh@126.com"
] | silentbalanceyh@126.com |
b63277ee93f4fd83c0e024ffaf151d39ce83f2bf | 8c39ba92cc71ff78242477d3256f6ee3daa872c7 | /conan/tools/build/cross_building.py | b446d562e003d06e75c9beea85008625a8b25035 | [
"MIT"
] | permissive | conan-io/conan | eb4427e534a0edbb1fb06c753d5d9587faaef93c | bac455d1329b6744cdc41747354a727c9233179f | refs/heads/release/2.0 | 2023-09-03T18:51:54.345761 | 2023-09-03T17:30:43 | 2023-09-03T17:30:43 | 47,190,624 | 7,754 | 1,182 | MIT | 2023-09-14T15:16:09 | 2015-12-01T13:17:02 | Python | UTF-8 | Python | false | false | 2,043 | py |
def cross_building(conanfile=None, skip_x64_x86=False):
"""
Check if we are cross building comparing the *build* and *host* settings. Returns ``True``
in the case that we are cross-building.
:param conanfile: The current recipe object. Always use ``self``.
:param skip_x64_x86: Do not consider cross building when building to 32 bits from 64 bits:
x86_64 to x86, sparcv9 to sparc or ppc64 to ppc32
:return: ``True`` if we are cross building, ``False`` otherwise.
"""
build_os = conanfile.settings_build.get_safe('os')
build_arch = conanfile.settings_build.get_safe('arch')
host_os = conanfile.settings.get_safe("os")
host_arch = conanfile.settings.get_safe("arch")
if skip_x64_x86 and host_os is not None and (build_os == host_os) and \
host_arch is not None and ((build_arch == "x86_64") and (host_arch == "x86") or
(build_arch == "sparcv9") and (host_arch == "sparc") or
(build_arch == "ppc64") and (host_arch == "ppc32")):
return False
if host_os is not None and (build_os != host_os):
return True
if host_arch is not None and (build_arch != host_arch):
return True
return False
def can_run(conanfile):
"""
Validates whether is possible to run a non-native app on the same architecture.
It’s an useful feature for the case your architecture can run more than one target.
For instance, Mac M1 machines can run both `armv8` and `x86_64`.
:param conanfile: The current recipe object. Always use ``self``.
:return: ``bool`` value from ``tools.build.cross_building:can_run`` if exists, otherwise,
it returns ``False`` if we are cross-building, else, ``True``.
"""
# Issue related: https://github.com/conan-io/conan/issues/11035
allowed = conanfile.conf.get("tools.build.cross_building:can_run", check_type=bool)
if allowed is None:
return not cross_building(conanfile)
return allowed
| [
"noreply@github.com"
] | conan-io.noreply@github.com |
acb23822e7825dd59688e84c89494509cdefc861 | ce005d2e7c72cf74491e099c047873bf56c2f0cd | /pymedphys/_vendor/pylinac/core/utilities.py | dd09826632bee13b79df9871f984e67cce4815b1 | [
"Apache-2.0",
"MIT"
] | permissive | ikevin2810/pymedphys | c645c6baccefd7a26fff37775dc72ddf2a14e9f5 | ed408fc6d20e8640dfbd434b681b3b0828dd360d | refs/heads/master | 2022-11-24T06:10:56.193835 | 2020-07-09T07:20:23 | 2020-07-09T07:20:23 | 278,422,520 | 1 | 0 | Apache-2.0 | 2020-07-09T16:59:23 | 2020-07-09T16:59:22 | null | UTF-8 | Python | false | false | 7,175 | py | # Copyright (c) 2014-2019 James Kerns
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Adapted from https://github.com/jrkerns/pylinac/tree/698254258ff4cb87812840c42b34c93ae32a4693
# pylint: disable = redefined-argument-from-local, unidiomatic-typecheck
"""Utility functions for pylinac."""
import collections
import decimal
import os
import os.path as osp
import struct
import subprocess
from datetime import datetime
from pymedphys._imports import numpy as np
from pymedphys._imports import pydicom
def clear_data_files():
"""Delete all demo files, image classifiers, etc from the demo folder"""
demo_folder = osp.join(osp.dirname(osp.dirname(__file__)), "demo_files")
if osp.isdir(demo_folder):
for file in os.listdir(demo_folder):
full_file = osp.join(demo_folder, file)
if osp.isfile(full_file):
os.remove(full_file)
print("Pylinac data files cleared.")
def assign2machine(source_file: str, machine_file: str):
"""Assign a DICOM RT Plan file to a specific machine. The source file is overwritten to contain
the machine of the machine file.
Parameters
----------
source_file : str
Path to the DICOM RTPlan file that contains the fields/plan desired
(e.g. a Winston Lutz set of fields or Varian's default PF files).
machine_file : str
Path to a DICOM RTPlan file that has the desired machine. This is easily obtained from pushing a plan from the TPS
for that specific machine. The file must contain at least one valid field.
"""
dcm_source = pydicom.dcmread(source_file)
dcm_machine = pydicom.dcmread(machine_file)
for beam in dcm_source.BeamSequence:
beam.TreatmentMachineName = dcm_machine.BeamSequence[0].TreatmentMachineName
dcm_source.save_as(source_file)
def is_close(val, target, delta=1):
"""Return whether the value is near the target value(s).
Parameters
----------
val : number
The value being compared against.
target : number, iterable
If a number, the values are simply evaluated.
If a sequence, each target is compared to ``val``.
If any values of ``target`` are close, the comparison is considered True.
Returns
-------
bool
"""
try:
targets = (value for value in target)
except (AttributeError, TypeError):
targets = [target] # type: ignore
for target in targets:
if target - delta < val < target + delta:
return True
return False
def typed_property(name, expected_type_or_tuple_of_types):
"""Type-enforced property. Python Cookbook 9.21 (3rd ed)."""
storage_name = "_" + name
@property
def prop(self):
return getattr(self, storage_name, None)
@prop.setter
def prop(self, value):
if not isinstance(value, expected_type_or_tuple_of_types):
raise TypeError(
f"{name} must be a {expected_type_or_tuple_of_types}. Got: {type(value)}"
)
setattr(self, storage_name, value)
return prop
def simple_round(number, decimals: int = 0):
"""Round a number to the given number of decimals. Fixes small floating number errors."""
num = int(round(number * 10 ** decimals))
num /= 10 ** decimals
return num
def isnumeric(obj):
"""Check whether the passed object is numeric in any sense."""
return isinstance(obj, (int, float, decimal.Decimal, np.number))
def is_float_like(number):
return isinstance(number, (float, np.float, np.float16, np.float32, np.float64))
def is_int_like(number):
return isinstance(number, (int, np.int, np.int16, np.int32, np.int64, np.int8))
def is_iterable(obj):
"""Determine if an object is iterable."""
return isinstance(obj, collections.Iterable)
class Structure:
"""A simple structure that assigns the arguments to the object."""
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(**kwargs)
def decode_binary(file, dtype, num_values=1, cursor_shift=0):
"""Read in a raw binary file and convert it to given data types.
Parameters
----------
file : file object
The open file object.
dtype : int, float, str
The expected data type to return. If int or float, will return numpy array.
num_values : int
The expected number of dtype to return
.. note:: This is not the same as the number of bytes.
cursor_shift : int
The number of bytes to move the cursor forward after decoding. This is used if there is a
reserved section after the read-in segment.
"""
f = file
if dtype == str: # if string
output = f.read(num_values)
if type(f) is not str: # in py3 fc will be bytes
output = output.decode()
# strip the padding ("\x00")
output = output.strip("\x00")
elif dtype == int:
ssize = struct.calcsize("i") * num_values
output = np.asarray(struct.unpack("i" * num_values, f.read(ssize)))
if len(output) == 1:
output = int(output)
elif dtype == float:
ssize = struct.calcsize("f") * num_values
output = np.asarray(struct.unpack("f" * num_values, f.read(ssize)))
if len(output) == 1:
output = float(output)
else:
raise TypeError(f"datatype '{dtype}' was not valid")
# shift cursor if need be (e.g. if a reserved section follows)
if cursor_shift:
f.seek(cursor_shift, 1)
return output
def open_path(path: str):
"""Open the specified path in the system default viewer."""
if os.name == "darwin":
launcher = "open"
elif os.name == "posix":
launcher = "xdg-open"
elif os.name == "nt":
launcher = "explorer"
subprocess.call([launcher, path])
def file_exists(filename: str):
"""Check if the file exists and if it does add a timestamp"""
if osp.exists(filename):
filename, ext = osp.splitext(filename)
mytime = datetime.now().strftime("%Y%m%d%H%M%S")
filename = filename + mytime + ext
return filename
| [
"me@simonbiggs.net"
] | me@simonbiggs.net |
da17e6c2943d816d5cfa487be905b868ba633af5 | bfd749303a728c3ec6abd92c0bd0515c8cf75192 | /ceilometer/openstack/common/log.py | 0683f98de85d095b265bb3b619a86e143977f224 | [] | no_license | plomakin/ceilometer | 5d823986c3a7515e615544b578b7629a6ccbeb58 | 16c892bbfa605aef6c80226c3198e997930cc973 | refs/heads/master | 2021-01-20T19:49:49.547093 | 2012-10-29T21:34:46 | 2012-10-29T21:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,970 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import cStringIO
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import stat
import sys
import traceback
from ceilometer.openstack.common import cfg
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import jsonutils
from ceilometer.openstack.common import local
from ceilometer.openstack.common import notifier
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s %(levelname)s %(name)s [%(request_id)s '
'%(user_id)s %(project_id)s] %(instance)s'
'%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s %(process)d %(levelname)s %(name)s [-]'
' %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s %(process)d TRACE %(name)s %(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
generic_log_opts = [
cfg.StrOpt('logdir',
default=None,
help='Log output to a per-service log file in named directory'),
cfg.StrOpt('logfile',
default=None,
help='Log output to a named file'),
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error'),
cfg.StrOpt('logfile_mode',
default='0644',
help='Default file mode used when creating log files'),
]
CONF = cfg.CONF
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file or CONF.logfile
logdir = CONF.log_dir or CONF.logdir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class ContextAdapter(logging.LoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
else:
instance_uuid = kwargs.pop('instance_uuid', None)
if instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('ceilometer.openstack.common.notifier.log_notifier' in
CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
notifier.api.ERROR,
dict(error=record.msg))
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
def setup(product_name):
"""Setup logging."""
sys.excepthook = _create_logging_excepthook(product_name)
if CONF.log_config:
try:
logging.config.fileConfig(CONF.log_config)
except Exception:
traceback.print_exc()
raise
else:
_setup_logging_from_conf(product_name)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf(product_name):
log_root = getLogger(product_name).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
mode = int(CONF.logfile_mode, 8)
st = os.stat(logpath)
if st.st_mode != (stat.S_IFREG | mode):
os.chmod(logpath, mode)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
log_root.addHandler(PublishErrorsHandler(logging.ERROR))
for handler in log_root.handlers:
datefmt = CONF.log_date_format
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
if CONF.verbose or CONF.debug:
log_root.setLevel(logging.DEBUG)
else:
log_root.setLevel(logging.INFO)
level = logging.NOTSET
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
for handler in log_root.handlers:
logger.addHandler(handler)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class LegacyFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
| [
"julien@danjou.info"
] | julien@danjou.info |
f0bbc69af03e781a03605562638f43f37e5446bc | cad999eacee16dc0e001a57f50b5d8b0f4d4ebf6 | /103.py | 83aec0546a4d75a1bbf8170d83702f78938dcc69 | [] | no_license | divyanarra0/pythonprogram | 8694a41ba3b39eb44a94a693eac3f7f5f18b588b | 10d8f59a472ccd4548771bad29be84a1a44854d8 | refs/heads/master | 2020-03-27T10:32:21.664657 | 2019-05-14T07:31:00 | 2019-05-14T07:31:00 | 146,427,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | string1= raw_input()
print string1.title()
| [
"noreply@github.com"
] | divyanarra0.noreply@github.com |
d953e11c4a8d357d4c6c96235dddf34c3f007316 | b0ea541c0aef0fa8946aef3130490dc4fa068e9b | /ABC_PS1_Final/catkin_ws/build/learning_ros_noetic/Part_5/object_grabber/catkin_generated/pkg.develspace.context.pc.py | 09a9f2fa704bbcae1cffdeb519029b82389c0271 | [] | no_license | ABCaps35/ECSE473_ABC | b66c8288412a34c72c858e16fd2f93540291b8ff | f03b9ec90317dd730aa723cb7fa7254ea03e412f | refs/heads/master | 2023-03-09T09:46:47.963268 | 2021-02-11T03:44:19 | 2021-02-11T03:44:19 | 337,913,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/abcaps35/catkin_ws/devel/include;/home/abcaps35/catkin_ws/src/learning_ros_noetic/Part_5/object_grabber/include".split(';') if "/home/abcaps35/catkin_ws/devel/include;/home/abcaps35/catkin_ws/src/learning_ros_noetic/Part_5/object_grabber/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;roscpp;std_msgs;geometry_msgs;cartesian_planner;tf;xform_utils;object_manipulation_properties;generic_gripper_services".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lobject_grabber_lib;-lobject_grabber_lib2".split(';') if "-lobject_grabber_lib;-lobject_grabber_lib2" != "" else []
PROJECT_NAME = "object_grabber"
PROJECT_SPACE_DIR = "/home/abcaps35/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"acapelli345@gmail.com"
] | acapelli345@gmail.com |
0037dab49f23b6ce338fa052ef1a4f5907b18201 | 4b44a299bafbd4ca408ce1c89c9fe4a449632783 | /python3/13_OOP/47_Double_Linked_list.py | ee1958aedd7227c94122e736960e288a86341345 | [] | no_license | umunusb1/PythonMaterial | ecd33d32b2de664eaaae5192be7c3f6d6bef1d67 | 1e0785c55ccb8f5b9df1978e1773365a29479ce0 | refs/heads/master | 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | #!/usr/bin/python
"""
Purpose: Double Linked List
h e l l o
3 4 6 8 9
n1 n2 n3
pre_n_add None 3 4
value h e l l o
next_n_add 4 6 8
"""
class DoubleLinkedList:
def __init__(self, data, prev_nd_addr=None, next_nd_addr=None):
self.data = data
self.prev_nd_addr = prev_nd_addr
self.next_nd_addr = next_nd_addr
def set_prev_node_address(self, prev_n_add):
self.prev_nd_addr = prev_n_add
def set_next_node_address(self, next_n_add):
self.next_nd_addr = next_n_add
def __repr__(self):
return f'{self.prev_nd_addr}|{self.data}|{self.next_nd_addr}'
d1 = DoubleLinkedList(10)
print(d1)
d2 = DoubleLinkedList(20)
print(d2)
d3 = DoubleLinkedList(30)
print(d3)
d1.set_next_node_address(id(d2))
d2.set_prev_node_address(id(d1))
d2.set_next_node_address(id(d3))
d3.set_prev_node_address(id(d2))
print()
print(d1)
print(d2)
print(d3)
# Assignment L create a double linked list for word 'hello'
'''
id()|h|id() e l l o
'''
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
3e51bac5b5e1539a03b9eac81156e4809de878df | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/cloud/dataproc/v1beta2/dataproc-v1beta2-py/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py | d0d97988371953e2d76178a1a77ce1f65d4657ce | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,721 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dataproc_v1beta2.types import autoscaling_policies
from google.protobuf import empty_pb2 # type: ignore
from .base import AutoscalingPolicyServiceTransport, DEFAULT_CLIENT_INFO
class AutoscalingPolicyServiceGrpcTransport(AutoscalingPolicyServiceTransport):
"""gRPC backend transport for AutoscalingPolicyService.
The API interface for managing autoscaling policies in the
Cloud Dataproc API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'dataproc.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'dataproc.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def create_autoscaling_policy(self) -> Callable[
[autoscaling_policies.CreateAutoscalingPolicyRequest],
autoscaling_policies.AutoscalingPolicy]:
r"""Return a callable for the create autoscaling policy method over gRPC.
Creates new autoscaling policy.
Returns:
Callable[[~.CreateAutoscalingPolicyRequest],
~.AutoscalingPolicy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_autoscaling_policy' not in self._stubs:
self._stubs['create_autoscaling_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/CreateAutoscalingPolicy',
request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize,
response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize,
)
return self._stubs['create_autoscaling_policy']
@property
def update_autoscaling_policy(self) -> Callable[
[autoscaling_policies.UpdateAutoscalingPolicyRequest],
autoscaling_policies.AutoscalingPolicy]:
r"""Return a callable for the update autoscaling policy method over gRPC.
Updates (replaces) autoscaling policy.
Disabled check for update_mask, because all updates will be full
replacements.
Returns:
Callable[[~.UpdateAutoscalingPolicyRequest],
~.AutoscalingPolicy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_autoscaling_policy' not in self._stubs:
self._stubs['update_autoscaling_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/UpdateAutoscalingPolicy',
request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize,
response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize,
)
return self._stubs['update_autoscaling_policy']
@property
def get_autoscaling_policy(self) -> Callable[
[autoscaling_policies.GetAutoscalingPolicyRequest],
autoscaling_policies.AutoscalingPolicy]:
r"""Return a callable for the get autoscaling policy method over gRPC.
Retrieves autoscaling policy.
Returns:
Callable[[~.GetAutoscalingPolicyRequest],
~.AutoscalingPolicy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_autoscaling_policy' not in self._stubs:
self._stubs['get_autoscaling_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/GetAutoscalingPolicy',
request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize,
response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize,
)
return self._stubs['get_autoscaling_policy']
@property
def list_autoscaling_policies(self) -> Callable[
[autoscaling_policies.ListAutoscalingPoliciesRequest],
autoscaling_policies.ListAutoscalingPoliciesResponse]:
r"""Return a callable for the list autoscaling policies method over gRPC.
Lists autoscaling policies in the project.
Returns:
Callable[[~.ListAutoscalingPoliciesRequest],
~.ListAutoscalingPoliciesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_autoscaling_policies' not in self._stubs:
self._stubs['list_autoscaling_policies'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/ListAutoscalingPolicies',
request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize,
response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize,
)
return self._stubs['list_autoscaling_policies']
@property
def delete_autoscaling_policy(self) -> Callable[
[autoscaling_policies.DeleteAutoscalingPolicyRequest],
empty_pb2.Empty]:
r"""Return a callable for the delete autoscaling policy method over gRPC.
Deletes an autoscaling policy. It is an error to
delete an autoscaling policy that is in use by one or
more clusters.
Returns:
Callable[[~.DeleteAutoscalingPolicyRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_autoscaling_policy' not in self._stubs:
self._stubs['delete_autoscaling_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/DeleteAutoscalingPolicy',
request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_autoscaling_policy']
__all__ = (
'AutoscalingPolicyServiceGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
8d9f5d7fe3cdbefae918b1115493c758f326a538 | 1c91439673c898c2219ee63750ea05ff847faee1 | /configs/_base_/models/swin_transformer_v2/base_256.py | f711a9c8dcebf644d0479a887e4383a630c67384 | [
"Apache-2.0"
] | permissive | ChenhongyiYang/GPViT | d7ba7f00d5139a989a999664ab0874c5c9d53d4d | 2b8882b2da41d4e175fe49a33fcefad1423216f4 | refs/heads/main | 2023-06-08T00:10:07.319078 | 2023-05-26T15:52:54 | 2023-05-26T15:52:54 | 577,075,781 | 78 | 2 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='SwinTransformerV2',
arch='base',
img_size=256,
drop_path_rate=0.5),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1024,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
| [
"chenhongyiyang@Chenhongyis-MacBook-Pro.local"
] | chenhongyiyang@Chenhongyis-MacBook-Pro.local |
ca4c6b2353645358b198d8ca20aaa41fea654678 | 2f0cb310e2ec8fb176ee240aa964a7eef5ed23b4 | /giico/quality_control_and_material_testing/doctype/organic_impurities/test_organic_impurities.py | 899ff309e9d723fb0ae2427dada4a1eea3848932 | [
"MIT"
] | permissive | thispl/giico | b96cf6b707f361275f8723d15f8ea1f95f908c9c | 14c5631639ab56a586a7962be9871d722c20e205 | refs/heads/master | 2021-06-18T03:56:02.928303 | 2021-04-27T06:42:59 | 2021-04-27T06:42:59 | 200,183,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, VHRS and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestOrganicImpurities(unittest.TestCase):
pass
| [
"hereabdulla@gmail.com"
] | hereabdulla@gmail.com |
f057dda260993448165227e609ab66b028018f15 | 1285703d35b5a37734e40121cd660e9c1a73b076 | /at_coder/abc/old/126/d.py | 0892272a0581a2cc5c7ac9868ed404a890a8fa87 | [] | no_license | takin6/algorithm-practice | 21826c711f57131108168775f08e4e13d07a3b38 | f4098bea2085a77d11c29e1593b3cc3f579c24aa | refs/heads/master | 2022-11-30T09:40:58.083766 | 2020-08-07T22:07:46 | 2020-08-07T22:07:46 | 283,609,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | N = int(input())
adj = [ [] for _ in range(N) ]
for _ in range(N):
u,v,w = map(int,input().split())
u -= 1
v -= 1
adj[u].append((v,w))
adj[v].append((u,w))
def dfs(node, w, color):
colors[node] = color
for n,nw in adj[node]:
if colors[n] != -1:
if (w+nw)%2 == 1 and colors[n] == 0: continue
if (w+nw)%2 == 0 and colors[n] == 1: continue
return False
else:
nc = 1 if w+nw%2==0 else 0
if not dfs(node, nw, nc):
return False
return True
colors = [-1] * N
for i in range(N):
if colors[i] == -1:
dfs(i, 1)
for c in colors: print(c) | [
"takayukiinoue116@gmail.com"
] | takayukiinoue116@gmail.com |
a462ab5e1744a75fa107be73da72ae72b7366260 | 5980a1a0ae2bed966dc9d06a1e3f3b4929e17f04 | /director/data_format/dot_dict.py | e8df212277ea5fadfd3e6ef8e8a398eb9ee4b6b9 | [] | no_license | coblan/helpers | 4aa4c6846cacf5f4a176e2de2fade0e98bd8126f | b2d3cb2583ce7469c88165968a8e74cda6e8cf11 | refs/heads/master | 2023-08-24T22:37:13.239226 | 2023-08-23T07:38:22 | 2023-08-23T07:38:22 | 83,221,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | class DotObj(object):
def __init__(self,dc):
for k,v in dc.items():
setattr(self,k,v)
def __getattr__(self,name):
try:
return object.__getattr__(self,name)
except AttributeError:
return '' | [
"he_yulin@163.com"
] | he_yulin@163.com |
f00fc45c49e835d6c5baeef7d26a870c3c2cd541 | cedab14839cfc276f028436ba79d103a8aff0d5b | /Philippines/Subject3_Math/E3_Math_StreetView/1_PassFailSplit.py | 4bc194e252404379e22294a537ebb7586a9e8911 | [] | no_license | wmgeolab/schoolCNN | aa686a4103695c1e10f5afa68ec2919761d33c15 | 1c73ec90732ec565ce552b27e4b2108a8ee916da | refs/heads/master | 2021-01-09T03:25:44.895023 | 2020-02-21T22:52:41 | 2020-02-21T22:52:41 | 242,230,029 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | import pandas as pd
import shutil
import os
# Read in the data
df = pd.read_csv("./Philippines/Subject3_Math/E3_Math_StreetView/data/y1314_Math.csv")
df.head()
# Set up the base directory
directory = "./Philippines/Subject3_Math/E3_Math_StreetView/data/imagery/"
for filename in os.listdir(directory):
# The firt 6 characters in the file's path name are the school's unique ID number
schoolID = filename[0:6]
# Use the school's ID to subset the dataframe to that school
subset = df[df['school_id'] == int(schoolID)]
# Construct the name of the file that will be copied into the pass or fail folder
fname = directory + filename
# If the school's intervention value is 1, move it into the fail folder (the school scored below average on the NAT)
if subset['intervention'].tolist()[0] == 1:
shutil.copy(fname, "./Philippines/Subject3_Math/E3_Math_StreetView/data/fail/")
# If the school's intervention value is 0, move it into the pass folder (the school scored above average on the NAT)
if subset['intervention'].tolist()[0] == 0:
shutil.copy(fname, "./Philippines/Subject3_Math/E3_Math_StreetView/data/pass/")
| [
"hmbaier@email.wm.edu"
] | hmbaier@email.wm.edu |
5cfdfcac2d6b71e28041fc9cbcbab1ca89063cc2 | 346efbc9dbbb1d656fd579400530c0269dfce56d | /codeforces/1409/d.py | ec0869bd1541bf903931095714622d10d2f4ed60 | [] | no_license | lmun/competitiveProgramingSolutions | 1c362e6433fc985e371afe88f08277268c46afde | 06d62240e2b3c58dd9ee72e41a78f7246d966652 | refs/heads/master | 2023-08-24T04:52:04.218922 | 2021-10-29T15:06:28 | 2021-10-29T15:06:28 | 167,073,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def a(n,s):
if sum(map(int,str(n)))<=s:
return 0
return 10-n%10+10*a(1+n//10,s) if n%10 else 10*a(n//10,s)
for t in range(int(input())):
print(a(*map(int,input().split()))) | [
"lester@ug.uchile.cl"
] | lester@ug.uchile.cl |
1372f4114d3691052c65bfba4ddb42ea9662728d | 2e6c95871bd255873fb563347c0f070e6fcdde74 | /rf-grid-search.py | df196f6c517feac250666fcca3ebd1f5ff59ccc2 | [] | no_license | MSBradshaw/BioHackathon2020 | 3203c5232bebd70d2c2a88b7f49063a09da023c4 | 31826b698a408541200b6f75bfe9c03217bf2d1a | refs/heads/master | 2022-08-05T11:57:32.221444 | 2020-05-29T17:30:29 | 2020-05-29T17:30:29 | 258,961,184 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
import pandas as pd
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
print('RANDOM FOREST')
train = pd.read_csv('train.csv')
abstracts = [BeautifulSoup(x).get_text() for x in train['abstract']]
tfidf = TfidfVectorizer()
X = tfidf.fit_transform(abstracts)
y = train['type'].to_numpy()
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [
{'n_estimators': [100, 200, 300, 400, 500, 600, 700, 800, 900], 'max_features': ['auto', 'sqrt', 'log2', None],'criterion':['gini','entropy']}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
RandomForestClassifier(), tuned_parameters, scoring='%s_macro' % score
)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
| [
"michaelscottbradshaw@gmail.com"
] | michaelscottbradshaw@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.