blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
089a72d393f80d9edfc8e61a7dfbfce118eb3090 | 21590487701d2dcbe1a1c1dd81c6e983f7523cb6 | /opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/view.py | 28f7b4fe08779f08d905507fbc87a5b0d4907dab | [
"Apache-2.0"
] | permissive | open-telemetry/opentelemetry-python | 837199e541c03cff311cad075401791ee2a23583 | d8490c5f557dd7005badeb800095cb51b553c98c | refs/heads/main | 2023-08-26T06:47:23.837997 | 2023-08-17T22:35:13 | 2023-08-17T22:35:13 | 185,478,926 | 1,361 | 668 | Apache-2.0 | 2023-09-14T20:48:40 | 2019-05-07T21:13:30 | Python | UTF-8 | Python | false | false | 6,371 | py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fnmatch import fnmatch
from logging import getLogger
from typing import Optional, Set, Type
# FIXME import from typing when support for 3.7 is removed
from typing_extensions import final
from opentelemetry.metrics import Instrument
from opentelemetry.sdk.metrics._internal.aggregation import (
Aggregation,
DefaultAggregation,
)
_logger = getLogger(__name__)
class View:
"""
A `View` configuration parameters can be used for the following
purposes:
1. Match instruments: When an instrument matches a view, measurements
received by that instrument will be processed.
2. Customize metric streams: A metric stream is identified by a match
between a view and an instrument and a set of attributes. The metric
stream can be customized by certain attributes of the corresponding view.
The attributes documented next serve one of the previous two purposes.
Args:
instrument_type: This is an instrument matching attribute: the class the
instrument must be to match the view.
instrument_name: This is an instrument matching attribute: the name the
instrument must have to match the view. Wild card characters are supported. Wild
card characters should not be used with this attribute if the view has also a
``name`` defined.
meter_name: This is an instrument matching attribute: the name the
instrument meter must have to match the view.
meter_version: This is an instrument matching attribute: the version
the instrument meter must have to match the view.
meter_schema_url: This is an instrument matching attribute: the schema
URL the instrument meter must have to match the view.
name: This is a metric stream customizing attribute: the name of the
metric stream. If `None`, the name of the instrument will be used.
description: This is a metric stream customizing attribute: the
description of the metric stream. If `None`, the description of the instrument will
be used.
attribute_keys: This is a metric stream customizing attribute: this is
a set of attribute keys. If not `None` then only the measurement attributes that
are in ``attribute_keys`` will be used to identify the metric stream.
aggregation: This is a metric stream customizing attribute: the
aggregation instance to use when data is aggregated for the
corresponding metrics stream. If `None` an instance of
`DefaultAggregation` will be used.
instrument_unit: This is an instrument matching attribute: the unit the
instrument must have to match the view.
This class is not intended to be subclassed by the user.
"""
_default_aggregation = DefaultAggregation()
def __init__(
self,
instrument_type: Optional[Type[Instrument]] = None,
instrument_name: Optional[str] = None,
meter_name: Optional[str] = None,
meter_version: Optional[str] = None,
meter_schema_url: Optional[str] = None,
name: Optional[str] = None,
description: Optional[str] = None,
attribute_keys: Optional[Set[str]] = None,
aggregation: Optional[Aggregation] = None,
instrument_unit: Optional[str] = None,
):
if (
instrument_type
is instrument_name
is instrument_unit
is meter_name
is meter_version
is meter_schema_url
is None
):
raise Exception(
"Some instrument selection "
f"criteria must be provided for View {name}"
)
if (
name is not None
and instrument_name is not None
and ("*" in instrument_name or "?" in instrument_name)
):
raise Exception(
f"View {name} declared with wildcard "
"characters in instrument_name"
)
# _name, _description, _aggregation and _attribute_keys will be
# accessed when instantiating a _ViewInstrumentMatch.
self._name = name
self._instrument_type = instrument_type
self._instrument_name = instrument_name
self._instrument_unit = instrument_unit
self._meter_name = meter_name
self._meter_version = meter_version
self._meter_schema_url = meter_schema_url
self._description = description
self._attribute_keys = attribute_keys
self._aggregation = aggregation or self._default_aggregation
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-branches
@final
def _match(self, instrument: Instrument) -> bool:
if self._instrument_type is not None:
if not isinstance(instrument, self._instrument_type):
return False
if self._instrument_name is not None:
if not fnmatch(instrument.name, self._instrument_name):
return False
if self._instrument_unit is not None:
if not fnmatch(instrument.unit, self._instrument_unit):
return False
if self._meter_name is not None:
if instrument.instrumentation_scope.name != self._meter_name:
return False
if self._meter_version is not None:
if instrument.instrumentation_scope.version != self._meter_version:
return False
if self._meter_schema_url is not None:
if (
instrument.instrumentation_scope.schema_url
!= self._meter_schema_url
):
return False
return True
| [
"noreply@github.com"
] | open-telemetry.noreply@github.com |
ac08f6261540d61606e9d0662c5697f306f0e938 | 26cb67f6111ac7e372c085da2413018b1eb8ac03 | /ch01/mnist_simple_nn.py | 494e0fa19d5e07b19679331895afb72e547939f7 | [] | no_license | prakharchoudhary/Deep-Learning-With-Keras | 19697288e2f7c73f04298708d5a6d2ea828a80df | 06be2eb555b61bc6e884cc3723bcab26bcf56551 | refs/heads/master | 2021-09-04T01:24:21.497713 | 2018-01-14T01:44:07 | 2018-01-14T01:44:07 | 110,025,589 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | from __future__ import print_function
import numpy as np
import os
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
np.random.seed(1671)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# network and training
NB_EPOCH = 200
BATCH_SIZE = 128
VERBOSE = 1
NB_CLASSES = 10 # number of outputs = number of digits
OPTIMIZER = SGD()
N_HIDDEN = 128
VALIDATION_SPLIT = 0.2
# data: shuffled and split between train and test splits
(X_train, y_train), (X_test, y_test) = mnist.load_data()
RESHAPED = 784
X_train = X_train.reshape(60000, RESHAPED)
X_test = X_test.reshape(10000, RESHAPED)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# normalize
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, NB_CLASSES)
y_test = np_utils.to_categorical(y_test, NB_CLASSES)
# model
model = Sequential()
model.add(Dense(NB_CLASSES, input_shape=(RESHAPED,)))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=\
OPTIMIZER, metrics=['accuracy'])
# train
history = model.fit(X_train, y_train,
batch_size = BATCH_SIZE,
epochs = NB_EPOCH,
verbose=VERBOSE,
validation_split=VALIDATION_SPLIT)
score = model.evaluate(X_test, y_test, verbose=VERBOSE)
print("Test score:", score[0])
print("Test accuracy:", score[1]) | [
"prakhar2397@gmail.com"
] | prakhar2397@gmail.com |
c2a3a5f82d48fe741854aa936d64f6e12b1e3ec9 | f19891e0ca7f038eadd13023a9bece2d356cd3ae | /mycomments/views.py | 95450ba5b25c9b739629669e2c1d76d0025611ca | [] | no_license | yangjufo/Personal-Website | 472f3b1d7a659b1412efa5b9f8b5f919abe32701 | 371acc3499d95e6a8977136ee99344f1a64cc272 | refs/heads/master | 2023-07-25T09:23:47.233295 | 2023-05-10T02:57:26 | 2023-05-10T02:57:26 | 161,396,911 | 3 | 0 | null | 2023-07-06T00:09:50 | 2018-12-11T21:37:17 | CSS | UTF-8 | Python | false | false | 1,487 | py | from django.shortcuts import render, get_object_or_404, redirect
from myblog.models import Post
from myprojects.models import Project
from .forms import PostCommentForm, ProjectCommentForm
def post_comment(request, post_pk):
post = get_object_or_404(Post, pk=post_pk)
if request.method == 'POST':
form = PostCommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect(post)
else:
comment_list = post.postcomment_set.all()
context = {'post': post,
'form': form,
'comment_list': comment_list}
return render(request, 'myblog/detail.html', context=context)
return redirect(post)
def project_comment(request, project_pk):
project = get_object_or_404(Project, pk=project_pk)
if request.method == 'POST':
form = ProjectCommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.project = project
comment.save()
return redirect(project)
else:
comment_list = project.projectcomment_set.all()
context = {'project': project,
'form': form,
'comment_list': comment_list}
return render(request, 'myprojects/detail.html', context=context)
return redirect(project)
| [
"yangjufo@gmail.com"
] | yangjufo@gmail.com |
ee47fcfdf7a59f221d9969062fb255d6a6681ada | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/zope.index-Zope-3.2.1/zope.index/topic/interfaces.py | cbdcc8c606b9467acce77a49a2fdef85fec318c8 | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Basic interfaces shared between different types of index.
$Id: interfaces.py 28610 2004-12-09 20:56:05Z jim $
"""
from zope.interface import Interface
class ITopicQuerying(Interface):
"""Query over topics, seperated by white space."""
def search(query, operator='and'):
"""Execute a search given by 'query' as a list/tuple of filter ids.
'operator' can be 'and' or 'or' to search for matches in all
or any filter.
Return an IISet of docids
"""
class ITopicFilteredSet(Interface):
"""Interface for filtered sets used by topic indexes."""
def clear():
"""Remove all entries from the index."""
def index_doc(docid, context):
"""Add an object's info to the index."""
def unindex_doc(docid):
"""Remove an object with id 'docid' from the index."""
def getId():
"""Return the id of the filter itself."""
def setExpression(expr):
"""Set the filter expression, e.g. 'context.meta_type=='...'"""
def getExpression():
"""Return the filter expression."""
def getIds():
"""Return an IISet of docids."""
| [
"chris@thegermanfriday.com"
] | chris@thegermanfriday.com |
fa9d903d5396f85df5627e200ca209ae558bc194 | 0d0cf0165ca108e8d94056c2bae5ad07fe9f9377 | /27_Machine_Learning_with_PySpark/4_Ensembles_And_Pipelines/delayedFlightsWithGradientBoostedTrees.py | 9de7abcfeab1b9f58f5255be9554eff99d04cc37 | [] | no_license | MACHEIKH/Datacamp_Machine_Learning_For_Everyone | 550ec4038ebdb69993e16fe22d5136f00101b692 | 9fe8947f490da221430e6dccce6e2165a42470f3 | refs/heads/main | 2023-01-22T06:26:15.996504 | 2020-11-24T11:21:53 | 2020-11-24T11:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | # Delayed flights with Gradient-Boosted Trees
# You've previously built a classifier for flights likely to be delayed using a Decision Tree. In this exercise you'll compare a Decision Tree model to a Gradient-Boosted Trees model.
# The flights data have been randomly split into flights_train and flights_test.
# Instructions
# 100 XP
# Import the classes required to create Decision Tree and Gradient-Boosted Tree classifiers.
# Create Decision Tree and Gradient-Boosted Tree classifiers. Train on the training data.
# Create an evaluator and calculate AUC on testing data for both classifiers. Which model performs better?
# Find the number of trees and the relative importance of features in the Gradient-Boosted Tree classifier.
# Import the classes required
from pyspark.ml.classification import DecisionTreeClassifier, GBTClassifier
from pyspark.ml.evaluation import BinaryClassificationEvaluator
# Create model objects and train on training data
tree = DecisionTreeClassifier().fit(flights_train)
gbt = GBTClassifier().fit(flights_train)
# Compare AUC on testing data
evaluator = BinaryClassificationEvaluator()
evaluator.evaluate(tree.transform(flights_test))
evaluator.evaluate(gbt.transform(flights_test))
# Find the number of trees and the relative importance of features
print(gbt.trees)
print(gbt.featureImportances)
| [
"noreply@github.com"
] | MACHEIKH.noreply@github.com |
4d12ffae44bbe6022dbfa96ed98457df680a2200 | b656a1e124e5ece94a163c3cca1de8d8e70efd20 | /twip/plot.py | 6a9cafd62b2623b57dd81c39028f8ce075b43529 | [
"MIT"
] | permissive | zuxfoucault/twip | 3a456bdfdb4cb92f57399a3a02429e2eab94b75e | 8d276f30362f5f78d1761bc7249f621e6f86c994 | refs/heads/master | 2020-12-25T13:33:21.144259 | 2016-06-07T04:05:32 | 2016-06-07T04:05:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,979 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Plot functions like scatter_matrix and double-histograms"""
from __future__ import print_function, division
# from past.builtins import basestring
import json
import os
import logging
from collections import Mapping
from itertools import product
import datetime
import pandas as pd
from pandas.tools.plotting import scatter_matrix
from matplotlib import pyplot as plt
from twip.constant import DATA_PATH, IMAGES_PATH
log = logging.getLogger(__name__)
np = pd.np
def is_quantized(x, N=1000, distinct=0.1):
if isinstance(x, pd.DataFrame):
return [is_quantized(x[c]) for c in x.columns]
elif isinstance(x, np.ndarrayclass_or_type_or_tuple):
if len(x.shape) == 1:
return is_quantized(np.array(x))
else:
return [is_quantized(row) for row in x]
else:
N = min(N, len(x)) or len(x)
if distinct <= 1:
distinct = distinct * N
M = len(set(x[:N]))
if M <= distinct:
return True
else:
return False
def num_digits(x):
"""Number of digits required to display an integer value
>>> num_digits(1000)
4
>>> num_digits(999)
3
>>> num_digits(0)
1
>>> num_digits(-1)
1
"""
return int(np.math.log((abs(x) * 1.0000000000001 or 1), 10)) + 1
def compose_suffix(num_docs=0, num_topics=0, suffix=None):
"""Create a short, informative, but not-so-unique identifying string for a trained model
If a str suffix is provided then just pass it through.
>>> compose_suffix(num_docs=100, num_topics=20)
'_100X20'
>>> compose_suffix(suffix='_sfx')
'_sfx'
>>> compose_suffix(suffix='')
''
>>> compose_suffix(suffix=None)
'_0X0'
"""
if not isinstance(suffix, basestring):
suffix = '_{}X{}'.format(num_docs, num_topics)
return suffix
def scatmat(df, category=None, colors='rgob',
num_plots=4, num_topics=100, num_columns=4,
show=False, block=False, data_path=DATA_PATH, save=False, verbose=1):
"""FIXME: empty plots that dont go away, Plot and/save scatter matrix in groups of num_columns topics"""
if category is None:
category = list(df.columns)[-1]
if category in df.columns:
category = df[category]
else:
category = pd.Series(category)
suffix = compose_suffix(len(df), num_topics, save)
save = bool(save)
for i in range(min(num_plots * num_columns, num_topics) / num_plots):
scatter_matrix(df[df.columns[i * num_columns:(i + 1) * num_columns]],
marker='+', c=[colors[int(x) % len(colors)] for x in category.values],
figsize=(18, 12))
if save:
name = 'scatmat_topics_{}-{}.jpg'.format(i * num_columns, (i + 1) * num_columns) + suffix
plt.savefig(os.path.join(data_path, name + '.jpg'))
if show:
if block:
plt.show()
else:
plt.show(block=False)
def summarize_topics(f='lsi_topics.json', num_topics=1000, num_tokens=10, column_width=10, do_print=True, justify=True, data_path=DATA_PATH):
"""Load json file containing topic key/value pairs and print the top m words for the top n features"""
if isinstance(f, basestring):
if os.path.sep not in f:
f = os.path.expanduser(os.path.join(data_path, f))
f = open(f, 'rUb')
if isinstance(f, pd.DataFrame):
f = list(np.array(f[f.columns[-1]]))
elif isinstance(f, file):
f = json.load(f)
if isinstance(f, Mapping):
f = [v for k, v in sorted(f.items())]
topics = list(f)
s = ''
digits = num_digits(min(len(topics), num_topics) - 1)
for i, t in enumerate(topics):
if i > num_topics:
break
t_sorted = sorted(t.items(), key=lambda x: -abs(x[1]))[:num_tokens]
line = '{:{}d}: {}'.format(i, digits, ' '.join(('-+'[int(v > 0)] + '{:{}s}'.format(k[:column_width], column_width) for (k, v) in t_sorted)))
if not justify:
line = ' '.join([col for col in line.split(' \t') if col])
s += line + '\n'
if do_print:
print(s)
return s.split('\n')[:-1] # get rid of last empty string for last newline
def df_from_groups(groups, columns=None):
"""Create DataFrame of GroupBy object with columns for each product(grouped_value, column_label)"""
if columns is None:
columns = list(groups.get_group(groups.indices.keys()[0]).columns)
df = pd.DataFrame()
for col, group_label in product(columns, groups.indices.keys()):
label = '{}_{}'.format(col, group_label)
df[label] = pd.Series(groups.get_group(group_label)[col].values)
return df
def groups_from_scores(df, groupby='dustin', threshold=0.7):
if groupby is None:
for col in reversed(df.columns):
if is_quantized(df[col]):
break
groupby = col
if threshold is not None:
df.ix[df[groupby] < threshold, groupby] = 0
df.ix[df[groupby] >= threshold, groupby] = 1
return df.groupby(groupby)
def score_hist(df, columns=None, groupby='dustin', threshold=0.7, stacked=True,
bins=20, percent=True, alpha=0.33, show=True, block=False, save=False):
"""Plot multiple histograms on one plot, typically of "score" values between 0 and 1
Typically the groupby or columns of the dataframe are the classification categories (0, .5, 1)
And the values are scores between 0 and 1.
"""
df = df if columns is None else df[([] if groupby is None else [groupby]) + list(columns)].copy()
if groupby is not None or threshold is not None:
df = groups_from_scores(df, groupby=groupby, threshold=threshold)
percent = 100. if percent else 1.
if isinstance(df, pd.core.groupby.DataFrameGroupBy):
df = df_from_groups(df, columns=columns) * percent
columns = df.columns if columns is None else columns
if bins is None:
bins = 20
if isinstance(bins, int):
bins = np.linspace(np.min(df.min()), np.max(df.max()), bins)
log.debug('bins: {}'.format(bins))
figs = []
df.plot(kind='hist', alpha=alpha, stacked=stacked, bins=bins)
# for col in df.columns:
# series = df[col] * percent
# log.debug('{}'.format(series))
# figs.append(plt.hist(series, bins=bins, alpha=alpha,
# weights=percent * np.ones_like(series) / len(series.dropna()),
# label=stringify(col)))
plt.legend()
plt.xlabel('Score (%)')
plt.ylabel('Percent')
plt.title('{} Scores for {}'.format(np.sum(df.count()), columns))
plt.draw()
if save or not show:
fig = plt.gcf()
today = datetime.datetime.today()
fig.savefig(os.path.join(IMAGES_PATH, 'score_hist_{:04d}-{:02d}-{:02d}_{:02d}{:02d}.jpg'.format(*today.timetuple())))
if show:
plt.show(block=block)
return figs
| [
"github@totalgood.com"
] | github@totalgood.com |
36fc8913d91d327993f77840c214a9f843d9fcb4 | 8fef8af953e8dafde78c671e8ee9813d08ab2d60 | /strings/Easy/find_duplicates.py | 12bc45e728ec2cfae818691c431cca5b61333813 | [
"MIT"
] | permissive | htrahddis-hub/DSA-Together-HacktoberFest | 037b009c744863070e0f1b61167c18f9101335f2 | a5c6165c449c5b5b91e56815f2a38d5fd23bf354 | refs/heads/main | 2023-08-23T18:52:55.654386 | 2021-10-17T15:45:14 | 2021-10-17T15:45:14 | 418,180,825 | 1 | 0 | MIT | 2021-10-17T15:56:21 | 2021-10-17T15:56:21 | null | UTF-8 | Python | false | false | 351 | py | from collections import Counter
def calc_char_freq(string):
freq_count = Counter(string)
#print(freq_count)
#print(freq_count.keys())
for key in freq_count.keys():
if freq_count.get(key) > 1:
print("(" + key + ", " + str(freq_count.get(key)) + ")")
myStr = 'Hello World. Let’s learn DSA '
calc_char_freq(myStr) | [
"dsrao0712@gmail.com"
] | dsrao0712@gmail.com |
43b84b14b189c4954bb13d5371c06e1c0f50eb22 | 8a6cf531ed72310d7114237407302ef075171937 | /ores/scoring_systems/__init__.py | ba61b56ea203c54f40bd7b8909fd15c742c58266 | [
"MIT"
] | permissive | ureesoriano/ores | 64a7f3c8a8917fe33449302c55cff23952a5719c | dda9db6c8737d12acbae5b0d43938d93c9e7ea8e | refs/heads/master | 2020-03-17T21:54:12.610518 | 2018-05-20T08:36:13 | 2018-05-20T08:36:13 | 133,980,352 | 0 | 0 | MIT | 2018-05-18T16:43:18 | 2018-05-18T16:43:18 | null | UTF-8 | Python | false | false | 225 | py | from .scoring_system import ScoringSystem
from .single_thread import SingleThread
from .process_pool import ProcessPool
from .celery_queue import CeleryQueue
__all__ = [ScoringSystem, SingleThread, ProcessPool, CeleryQueue]
| [
"aaron.halfaker@gmail.com"
] | aaron.halfaker@gmail.com |
fe983a1b1b3fa99006ce8cd9827b2a2735e71c87 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/192/usersdata/267/70602/submittedfiles/al6.py | 35e4de59840cc412a740ba5edebdfc41a479e7d8 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | # -*- coding: utf-8 -*-
n=int(input("Forneça um número: "))
#REPETIÇÃO
i=2
div=0
print()
print()
while i<n:
if (n%i)==0:
div=div+1
i=i+1
print(div)
if div>0:
print("NÃO É PRIMO")
else:
print("É PRIMO")
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
4c0da37d6a0a7c57fb7c36e23ba15e05bfb39223 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fractions_20200802115423.py | 06f0a5b0b4bf9444a2a5a7cc6b016ea95a9796ae | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | def fractions(numerator,denominator):
if denominator == 0 :
return str(numerator)
number = numerator / denominator
if numerator % denominator == 0:
return str(numerator // denominator)
newStr = str(number)
print(newStr)
largeStr = newStr.split(".")
if len(largeStr[1]) > 1:
return largeStr[0] + "." + '(' + largeStr[1][0] + ')'
return newStr
def frac(numerator,denominator):
res = ""
if numerator == 0:
return "0"
if denominator == 0:
return "undefined"
if (numerator < 0 and denominator > 0) or (numerator > 0 and denominator <0):
res += "-"
if numerator % denominator == 0:
return str(numerator / denominator)
else:
# this means its has a remainder
res += str(numerator // denominator)
res += "."
newDict = {}
rem = numerator % denominator
print(rem)
while rem != 0:
print('dict',newDict)
if rem in newDict:
position = res.find(".")
break
newDict[rem] = len(res)
rem *=10
res_part = rem // denominator
res += str(res_part)
rem = rem % denominator
print('res',res)
# print('dict',newDict)
print(frac(4,333)) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
aa7983cc344204bb68f7c905587ff50d61e3320d | f0e0c1637f3b49fd914410361c3f1f3948462659 | /Python/Regex and Parsing/validating_roman_numerals.py | 6e6e70c00a2a5dadfd455ab5441aff6701e5c06a | [] | no_license | georggoetz/hackerrank-py | 399bcd0599f3c96d456725471708068f6c0fc4b1 | a8478670fcc65ca034df8017083269cb37ebf8b0 | refs/heads/master | 2021-09-18T07:47:32.224981 | 2018-07-11T09:24:49 | 2018-07-11T09:24:49 | 111,611,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | # http://www.hackerrank.com/contests/python-tutorial/challenges/validate-a-roman-number
import re
str = input()
pattern = r'^M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$'
print(bool(re.search(pattern, str)))
| [
"GeorgG@haufe.com"
] | GeorgG@haufe.com |
511a20420067239ae966f105b7dc8469af80faf0 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/InnerDetector/InDetDigitization/FastTRT_Digitization/share/TRT_Digitization_jobOptions.py | dabd823dd8cfdaf5cf08b314808d3c024438f673 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | ###############################################################
#
# TRT Digitization
#
#==============================================================
from Digitization.DigitizationFlags import jobproperties
from AthenaCommon.AlgSequence import AlgSequence
job = AlgSequence()
from AthenaCommon.CfgGetter import getAlgorithm
job += getAlgorithm("TRTFastDigitization/TRTFastDigitization", tryDefaultConfigurable=True)
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
b0bfb4b1de7723ff6f2da7d92bd4c40bb9712c1f | b5fabc6c6de064690f8d4ee423001cf9365a3d9f | /flash/text/seq2seq/summarization/cli.py | ced60edc545de164c5b61c473d99c11b39d5cbc8 | [
"Apache-2.0"
] | permissive | dmarx/lightning-flash | 021dfd76bde6e30309f14feb5853020b0babe90d | 4cda031c1f9c8d8754fd36b5720d2a5a7d866765 | refs/heads/master | 2023-09-06T06:24:29.856354 | 2021-11-24T23:38:14 | 2021-11-24T23:38:14 | 422,352,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,815 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flash.core.data.utils import download_data
from flash.core.utilities.flash_cli import FlashCLI
from flash.text import SummarizationData, SummarizationTask
__all__ = ["summarization"]
def from_xsum(
backbone: str = "sshleifer/distilbart-xsum-1-1",
batch_size: int = 4,
num_workers: int = 0,
**input_transform_kwargs,
) -> SummarizationData:
"""Downloads and loads the XSum data set."""
download_data("https://pl-flash-data.s3.amazonaws.com/xsum.zip", "./data/")
return SummarizationData.from_csv(
"input",
"target",
train_file="data/xsum/train.csv",
val_file="data/xsum/valid.csv",
backbone=backbone,
batch_size=batch_size,
num_workers=num_workers,
**input_transform_kwargs,
)
def summarization():
"""Summarize text."""
cli = FlashCLI(
SummarizationTask,
SummarizationData,
default_datamodule_builder=from_xsum,
default_arguments={
"trainer.max_epochs": 3,
"model.backbone": "sshleifer/distilbart-xsum-1-1",
},
legacy=True,
)
cli.trainer.save_checkpoint("summarization_model_xsum.pt")
if __name__ == "__main__":
summarization()
| [
"noreply@github.com"
] | dmarx.noreply@github.com |
6500ddcef45ebb60f4859b4e18e90d126ff762a4 | cf14275eb2ad7a50da0f482ead52e12168e7de6f | /gs27_SerachFilter/gs27_SerachFilter/settings.py | 2779df72d0bd677fe4f7d0b7da2ff1a51fbee6bd | [] | no_license | balamurali1/Environment | 319c4087de011949f405d78a43a15b45b04efb05 | f5312d56f102423cfb11900cfa99775ffa4f67c5 | refs/heads/master | 2023-09-04T06:56:20.449830 | 2021-10-30T09:13:00 | 2021-10-30T09:13:00 | 420,183,269 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,417 | py | """
Django settings for gs27_SerachFilter project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-@$nr*!aa%c72tk1e#l_-k(#wqg_dz_4b7c%!c%%njri!jew$^$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gs27_SerachFilter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gs27_SerachFilter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
#Search(basename) ni change cheyadam...
REST_FRAMEWORK={
'SEARCH_PARAM':'q'
}
| [
"balamurali1@gmail.com"
] | balamurali1@gmail.com |
7308f3706181e418776a05680b7eeb0949a38651 | 43a1e9c15132398433ef1bd941e49eb0372136e6 | /day30/re_ex/findall_ex.py | 3adeee7d9bbb535c13bf69d27566c50c934f5cd0 | [] | no_license | dlatnrud/pyworks | 3eaf253f7e9cf74e6504770885e4a63fd1c4e293 | 745ae5c6a85015800d049176b7d5aeb0df0f000a | refs/heads/master | 2023-08-12T16:14:50.936403 | 2021-10-15T00:48:04 | 2021-10-15T00:48:04 | 402,286,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # findall()함수 - 내용을 리스트로 반환
import re
str = "Two is too"
m1 = re.findall("T[ow]o", str)
print(m1)
m2 = re.findall("T[ow]o", str, re.IGNORECASE) # 대,소문자 허용
print(m2)
pat = re.compile("T[^o]o")
m3 = re.findall(pat, str)
print(m3) | [
"dlatnrud2268@naver.com"
] | dlatnrud2268@naver.com |
76e1b899a9413afa79402e6df099cb153c5158f4 | 4f8ac283115a41e057f86b99147e94776b9ee08a | /Arrays/chocolate_distribution_problem.py | 66cee3f886eb35f3a65910c19e87fdee63aa6b1a | [] | no_license | Saswati08/Data-Structures-and-Algorithms | 107d6582c749c7f4fa7fb951ad2930f37bbbe3b7 | 1338aafa79b5f908ed4e0a5c842fcd12f0bd4d9c | refs/heads/master | 2022-12-17T02:30:39.409611 | 2020-09-09T13:43:06 | 2020-09-09T13:43:06 | 275,190,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | #Given an array A of positive integers of size N, where each value represents number of chocolates in a packet. Each packet can have variable number of chocolates. There are M students, the task is to distribute chocolate packets such that :
#1. Each student gets one packet.
#2. The difference between the number of chocolates given to the students having packet with maximum chocolates and student having packet with minimum chocolates is minimum.
t = int(input())
for i in range(t):
n = int(input())
a = [int(x) for x in input().split()]
k = int(input())
a.sort()
# print(t, n, a, k)
min_d = a[n - 1]
for j in range(n - k + 1):
if a[j + k - 1] - a[j] < min_d:
min_d = a[j + k - 1] - a[j]
print(min_d)
| [
"saswati18015@iiitd.ac.in"
] | saswati18015@iiitd.ac.in |
972ed5c80889b58b8ccdd0a5c149b1bb5b46135c | 6b9840743e961a9ce2f264848247d450f0c9f399 | /subarray/maximum_product_subarray.py | 55f4ee956ddcd1417e2ca6d2ae5154d297e08afa | [] | no_license | jingjinghaha/LeetCode | 699a91a511b3886efd217ab36c21414bf059f767 | 4b3944ae13ccf20e9df252f3c434f6600878293c | refs/heads/master | 2021-08-26T06:39:02.481065 | 2017-11-21T21:52:33 | 2017-11-21T21:52:33 | 105,195,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | '''
Find the contiguous subarray within an array (containing at least one number) which has the largest product.
For example, given the array [2,3,-2,4],
the contiguous subarray [2,3] has the largest product = 6.
'''
class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
import sys
if not nums:
return None
largest = nums[0]
local_max = nums[0]
local_min = nums[0]
for i in range(1, len(nums)):
if nums[i] < 0:
tmp = local_max
local_max = local_min
local_min = tmp
local_max = max(nums[i], nums[i]*local_max)
local_min = min(nums[i], nums[i] *local_min)
largest = max(largest, local_max)
return largest
| [
"wufangjing1018@gmail.com"
] | wufangjing1018@gmail.com |
04d6b37a3d4041db6e899f10a2a460f6146896c3 | d3308f888e9bb43647f03e8bdecbe37b90bcfcc8 | /main.py | 7b21b62fdec1886cb8299a0ce7b1d6b55a9ab8a6 | [
"MIT"
] | permissive | AndreMiras/p4a-service-sticky | 2b83b9fcecf8b4bc7297119969c87eede457e192 | 68e2d4d6a3dd712368610e7c1e2618c14ff39bb8 | refs/heads/master | 2020-03-28T12:44:23.134229 | 2018-09-14T18:16:39 | 2018-09-14T18:16:39 | 148,329,241 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | from kivy.app import App
from kivy.uix.label import Label
from kivy.utils import platform
class MyApp(App):
def build(self):
self.start_service()
return Label(text='Hello world')
def start_service(self):
if platform == 'android':
from jnius import autoclass
package_name = 'myapp'
package_domain = 'org.test'
service_name = 'service'
service_class = '{}.{}.Service{}'.format(
package_domain, package_name, service_name.title())
service = autoclass(service_class)
mActivity = autoclass('org.kivy.android.PythonActivity').mActivity
argument = ''
service.start(mActivity, argument)
if __name__ == '__main__':
MyApp().run()
| [
"andre.miras@gmail.com"
] | andre.miras@gmail.com |
0fcb8df1f46d278fd02afacb9968518050f8b6b6 | 95cdf7753fc4022be239666a902df217a93f7125 | /po_selenium_1/page_object/search_page.py | 198d9d6dedd1e8cb677692edd19e8665eec3ad7f | [] | no_license | he9mei/python_appium | ffe1b872d3732b9e8510da0dd24f7a791c534be0 | 9fc5dcb67769d2d103756b9fca82d2cfeae40e72 | refs/heads/master | 2022-06-01T03:24:36.821931 | 2022-05-22T07:55:58 | 2022-05-22T07:55:58 | 223,714,095 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | from selenium.webdriver.common.by import By
from po_selenium_1.base.base_page import Base
from time import sleep
class Search(Base):
#定位元素
search_input_box=(By.ID,"kw")
search_button=(By.ID,"su")
def input_text(self,text):
self.locator_element(*self.search_input_box).send_keys(text)
def click_button(self):
self.locator_element(*self.search_button).click()
def search_text(self,url,text):
self.open_browser(url)
self.input_text(text)
self.click_button()
sleep(1)
'''
#也可以进一步把搜索功能写成一个函数,测试用例中直接调用,类似于一个关键字。
#当然也可以测试用例中再写。
#如果多个测试用例都需要搜索功能的化,为了避免代码冗余就可以这样写。
'''
| [
"396167189@qq.com"
] | 396167189@qq.com |
ed4048b89ae310e1f702b436107098dee49631f7 | cd6fc5c18bbda744c87ca684e2cc2201bae8f01d | /salesforce/models.py | 1436a6b1d9c6ddcb3f447233eab0a9c97dba4a1c | [
"Apache-2.0"
] | permissive | apankit2490/django-heroku-connect-sample | ee8c88101ec1cbdb01d5fb7c286b28728931afdd | a5b2d8e5abf5dff23880e38c03dad42db26ebd56 | refs/heads/master | 2021-03-15T00:19:37.662820 | 2020-03-12T11:08:34 | 2020-03-12T11:08:34 | 246,806,943 | 0 | 0 | Apache-2.0 | 2020-03-12T10:32:10 | 2020-03-12T10:32:10 | null | UTF-8 | Python | false | false | 456 | py | from heroku_connect.db import models as hc_models
class User(hc_models.HerokuConnectModel):
sf_object_name = 'User'
username = hc_models.Text(
sf_field_name='Username', max_length=80)
email = hc_models.Email(sf_field_name='Email')
department = hc_models.Text(
sf_field_name='Department', max_length=80)
title = hc_models.Text(sf_field_name='Title', max_length=80)
def __str__(self):
return self.username
| [
"info@johanneshoppe.com"
] | info@johanneshoppe.com |
c039a38c35baaa8d09e5bf7663c377cabbdc0e2e | b4bc264e22469db4af12fb33fc7df33a1c4dde73 | /soln.py | 4967413c2554a9c1453d497a87f200e1fb0d9bf7 | [] | no_license | GoodnessEzeokafor/computation_with_python | 6d3e420e7de7275acf4efa3b3896473e3933f9c9 | e6f8e7215513c9e00fb94618aee34293388facca | refs/heads/master | 2020-04-03T23:07:18.412780 | 2018-10-31T20:30:35 | 2018-10-31T20:30:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | if __name__ == '__main__':
x = int(input("Enter a value for x: "))
pwr = 1
root = 0
while pwr <= 6 and pwr > 0:
pwr += 1
while root ** pwr < abs(x):
root += 1
if root ** pwr == abs(x):
print("Root of", str(x), 'is', root, "Raised to the power", pwr)
| [
"gootech442@gmail.com"
] | gootech442@gmail.com |
baaab178a23fdd519ba959e71d0c6bdb8bdb716c | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/scitbx/examples/bevington/SConscript | 2e74a3ae0bf59255a434640ed2839adb375c2ae7 | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 1,512 | import libtbx.load_env
import os
Import("env_base", "env_etc")
try:
env_etc.eigen_dist = os.path.abspath(os.path.join(libtbx.env.dist_path("boost"),"../eigen"))
if os.path.isdir(env_etc.eigen_dist):
env_etc.eigen_include = env_etc.eigen_dist
env_etc.scitbx_ex_bev_common_includes = [
env_etc.eigen_include,
env_etc.libtbx_include,
env_etc.scitbx_include,
env_etc.boost_include,
]
env = env_base.Clone(SHLINKFLAGS=env_etc.shlinkflags)
env.Append(LIBS=["cctbx"] + env_etc.libm)
env_etc.include_registry.append(
env=env,
paths=env_etc.scitbx_ex_bev_common_includes)
if (env_etc.static_libraries): builder = env.StaticLibrary
else: builder = env.SharedLibrary
# future expansion, create static library
#builder(
# target="#lib/scitbx_ex_bev",
# source=["scitbx_ex_bev_core.cpp"]
# )
if (not env_etc.no_boost_python):
Import("env_boost_python_ext")
env_scitbx_ex_bev_boost_python_ext = env_boost_python_ext.Clone()
# env_scitbx_ex_bev_boost_python_ext.Prepend(
# LIBS=["scitbx_ex_bev",])
env_scitbx_ex_bev_boost_python_ext.SharedLibrary(
target="#lib/scitbx_examples_bevington_ext", source="bevington_ext.cpp")
env_etc.include_registry.append(
env=env_scitbx_ex_bev_boost_python_ext,
paths=env_etc.scitbx_ex_bev_common_includes)
Export("env_scitbx_ex_bev_boost_python_ext")
except Exception:
pass
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com | |
7797fced9b8e71e1679b05d62c84f622537d3d51 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adverbs/_together.py | f85f759ad26eafe0be70aa27693660958a3c3bb0 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py |
#calss header
class _TOGETHER():
def __init__(self,):
self.name = "TOGETHER"
self.definitions = [u'with each other: ', u'If two people are described as together, they have a close romantic and often sexual relationship with each other: ', u'If two people get together or get it together, they start a sexual relationship with each other: ', u'at the same time: ', u'combined: ', u'in one place: ', u'in addition to; and also: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adverbs'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
09931c88d478bb7afea7997aed64f06e7c71c573 | f95d2646f8428cceed98681f8ed2407d4f044941 | /Demo-spider/So/So/items.py | 5ce9e500728b3f3b9344e8ebba47bfec3ee185db | [] | no_license | q2806060/python-note | 014e1458dcfa896f2749c7ebce68b2bbe31a3bf8 | fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983 | refs/heads/master | 2020-08-18T01:12:31.227654 | 2019-10-17T07:40:40 | 2019-10-17T07:40:40 | 215,731,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SoItem(scrapy.Item):
# define the fields for your item here like:
# 图片的链接
imgLink = scrapy.Field()
| [
"C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn"
] | C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn |
76efe8b92384a5af9bdfbc43be166f2f3c7e3924 | 90b2b0d37414be6396751f81fbebcdad1be6b8d5 | /EthiopiaDrought/CMIP_CHIRPS_AnomalyMapGen原始值_Big.py | 0a6f1203fd49a7d8f75a093fe47e27ee3b5a82d0 | [] | no_license | Simonhong111/ETDROUGHT | 19b0196deb0c5c8e1796efee4f33002b21b1d885 | edf8b7d88df44a07805d1c4c1240325a7394f059 | refs/heads/master | 2022-12-21T04:59:08.653864 | 2020-09-27T20:38:47 | 2020-09-27T20:38:47 | 299,108,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,306 | py | from osgeo import gdal,osr,ogr
import os
import glob
import numpy as np
import pandas as pd
import h5py
from netCDF4 import Dataset
from dateutil import rrule
from datetime import *
from matplotlib import cm
from matplotlib import pyplot as plt
from scipy import signal
def clipbyshp(input_raster,output_raster,input_shape, dstNodata=-9999):
"""
:param input_raster: the raster data being processed later
:param output_raster: the clipped datas' savepaths
:param input_shape: the shape defining the extent
:return: none
"""
ds = gdal.Warp(output_raster,
input_raster,
format='GTiff',
cutlineDSName=input_shape, # or any other file format
# cutlineDSName=None,
# cutlineWhere="FIELD = 'whatever'",
# optionally you can filter your cutline (shapefile) based on attribute values
cropToCutline=True,
dstNodata=dstNodata) # select the no data value you like
ds = None
def write_Img(data, path, proj, geotrans,im_width, im_heigth,im_bands=1, dtype=gdal.GDT_Float32):
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(path, im_width, im_heigth, im_bands, dtype)
dataset.SetGeoTransform(geotrans)
dataset.SetProjection(str(proj))
if im_bands ==1:
dataset.GetRasterBand(1).WriteArray(data)
else:
for id in range(im_bands):
# print("**********")
dataset.GetRasterBand(id+1).WriteArray(data[:,:,id])
del dataset
def chirpsAnomMap(chirpsdirectory,yy,mm):
chirps_file = os.path.join(chirpsdirectory,"chirps-v2.0.{}_{}.tif".format(mm,str(yy)))
chirps_raster = gdal.Open(chirps_file).ReadAsArray()
return chirps_raster
# anomalyMap = chirpsAnomMap(r"D:\Cornell\EthiopianDrought\ChirpsDailyMonth",2009,'short')
# anomalyMap[anomalyMap==-9999] = np.nan
# plt.imshow(anomalyMap)
# plt.colorbar()
# plt.show()
def chirpsPVIAnomMap(chirpsdirectory,yy,mm):
chirps_file = os.path.join(chirpsdirectory,"{}_pvi_{}.tif".format(mm,str(yy)))
chirps_raster = gdal.Open(chirps_file).ReadAsArray()
return chirps_raster
def cmip5AnomMap(chirpsdirectory,yy,mm):
chirps_file = os.path.join(chirpsdirectory,"cmip5_{}_{}.tif".format(mm,str(yy)))
chirps_raster = gdal.Open(chirps_file).ReadAsArray()
return chirps_raster
# anomalyMap = chirpsAnomMap(r"D:\Cornell\EthiopianDrought\ChirpsDailyMonth",2009,'short')
# anomalyMap[anomalyMap==-9999] = np.nan
# plt.imshow(anomalyMap)
# plt.colorbar()
# plt.show()
def cmip5PVIAnomMap(chirpsdirectory,yy,mm):
chirps_file = os.path.join(chirpsdirectory,"{}_pvi_{}.tif".format(mm,str(yy)))
chirps_raster = gdal.Open(chirps_file).ReadAsArray()
return chirps_raster
ref_path = r"D:\Cornell\EthiopianDrought\AData\CMIP5PVI\Big\long_pvi_2006.tif"
ref_raster = gdal.Open(ref_path)
geo_t = ref_raster.GetGeoTransform()
# 计算矢量边界
daShapefile = r"D:\Cornell\EthiopianDrought\ETH_outline_SHP\ETH_outline.shp"
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(daShapefile, 0)
layer = dataSource.GetLayer()
feature = layer.GetFeature(0)
geo = feature.GetGeometryRef()
geo = str(geo).split("((")[1].split("))")[0].split(",")
x = []
y = []
for term in geo:
x.append(float(term.split(" ")[0]))
y.append(float(term.split(" ")[1]))
x = np.array(x)
y = np.array(y)
x = (x - geo_t[0]) / geo_t[1]
y = (y - geo_t[3]) / geo_t[5]
# plt.imshow(ref_raster.ReadAsArray())
# # plt.colorbar()
# plt.plot(x,y)
# plt.show()
yy = str(2006)
mm = 'short'
cm_rf_path = r"D:\Cornell\EthiopianDrought\CMIPMonth\Big"
cm_pvi_path = r"D:\Cornell\EthiopianDrought\AData\CMIP5PVI\Big"
ch_rf_path = r"D:\Cornell\EthiopianDrought\ChirpsDailyMonth\Big"
ch_pvi_path = r"D:\Cornell\EthiopianDrought\AData\PVIDaily\Big"
chrf = chirpsAnomMap(ch_rf_path,yy,mm)
chpvi = chirpsPVIAnomMap(ch_pvi_path,yy,mm)
cmrf = cmip5AnomMap(cm_rf_path,yy,mm)
cmpvi = cmip5PVIAnomMap(cm_pvi_path,yy,mm)
print(cmrf.max(),cmrf.min())
print(chrf.max(),chrf[chrf >-9999].min())
print(cmpvi.max(),cmpvi.min())
print(chpvi.max(),chpvi[chpvi>-9999].min())
chrfmax,chrfmin = chrf.max(),chrf[chrf >-9999].min()
chpvimax,chpvimin = chpvi.max(),chpvi[chpvi>-9999].min()
cmrfmax,cmrfmin = cmrf.max(),cmrf.min()
cmpvimax,cmpvimin = cmpvi.max(),cmpvi.min()
fig = plt.figure(figsize=(10, 10))
plt.title("{} {} rains Original Value Map ".format(yy,mm) + '\n', fontsize=16)
plt.xticks([])
plt.yticks([])
ax1 = fig.add_subplot(2, 2, 1)
ax1.set_title("{} Rains CMIP5 Rainfall Original Value Map".format(mm))
mask1 = np.where(cmrf > -9999)
cmrf[cmrf == -9999] = np.nan
cax1 = ax1.imshow(cmrf, cmap=plt.get_cmap("RdBu"), vmin=0, vmax=7)
cbar1 = plt.colorbar(cax1, ax=ax1, fraction=0.036, pad=0.04)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_title("{} rains average (mm)".format(mm))
ax1.plot(x,y)
ax2 = fig.add_subplot(2, 2,2)
ax2.set_title("{} Rains CHIRPS Rainfall Original Value Map".format(mm))
mask2 = np.where(chrf > -9999)
chrf[chrf == -9999] = np.nan
cax2 = ax2.imshow(chrf, cmap=plt.get_cmap("RdBu"), vmin=0, vmax=7)
print("chrf vmax",chrf.max())
cbar2 = plt.colorbar(cax2, ax=ax2, fraction=0.036, pad=0.04)
ax2.set_xticks([])
ax2.set_yticks([])
ax2.set_ylabel("{} rains average (mm)".format(mm))
ax2.plot(x,y)
ax3 = fig.add_subplot(2, 2, 3)
ax3.set_title("{} Rains CMIP5 PVI Original Value Map".format(mm))
mask3 = np.where(cmpvi > -9999)
cmpvi[cmpvi == -9999] = np.nan
cax3 = ax3.imshow(cmpvi, cmap=plt.get_cmap("RdBu"), vmin=0.1, vmax=0.7)
cbar3 = plt.colorbar(cax3, ax=ax3, fraction=0.036, pad=0.04)
ax3.set_xticks([])
ax3.set_yticks([])
ax3.plot(x,y)
ax4 = fig.add_subplot(2, 2,4)
ax4.set_title("{} Rains CHIRPS PVI Original Value Map".format(mm))
mask4 = np.where(chpvi > -9999)
chpvi[chpvi == -9999] = np.nan
cax4 = ax4.imshow(chpvi, cmap=plt.get_cmap("RdBu"), vmin=0.1, vmax=0.7)
cbar4 = plt.colorbar(cax4, ax=ax4, fraction=0.036, pad=0.04)
ax4.set_xticks([])
ax4.set_yticks([])
ax4.plot(x,y)
# fig.tight_layout() # 调整整体空白
plt.show()
| [
"1475598891@qq.com"
] | 1475598891@qq.com |
8c009da5987b3bef9516c1d41d407c1ccd6bc38b | 1b36425f798f484eda964b10a5ad72b37b4da916 | /posthog/version_requirement.py | 0f60d553e762ee5e8c4450613318c424c29fb318 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dorucioclea/posthog | 0408baa2a7ae98e5bea352c516f741ddc17c0a3e | 8848981baf237117fb22d28af0770a0165881423 | refs/heads/master | 2023-01-23T11:01:57.942146 | 2023-01-13T09:03:00 | 2023-01-13T09:03:00 | 241,222,000 | 0 | 0 | MIT | 2020-02-17T22:34:37 | 2020-02-17T22:34:36 | null | UTF-8 | Python | false | false | 2,562 | py | from typing import Tuple
from semantic_version.base import SimpleSpec, Version
from posthog import redis
class ServiceVersionRequirement:
accepted_services = ("clickhouse", "postgresql", "redis")
def __init__(self, service, supported_version):
if service not in self.accepted_services:
services_str = ", ".join(self.accepted_services)
raise Exception(
f"service {service} cannot be used to specify a version requirement. service should be one of {services_str}"
)
self.service = service
try:
self.supported_version = SimpleSpec(supported_version)
except:
raise Exception(
f"The provided supported_version for service {service} is invalid. See the Docs for SimpleSpec: https://pypi.org/project/semantic-version/"
)
def is_service_in_accepted_version(self) -> Tuple[bool, Version]:
service_version = self.get_service_version()
return service_version in self.supported_version, service_version
def get_service_version(self) -> Version:
if self.service == "postgresql":
return get_postgres_version()
if self.service == "clickhouse":
return get_clickhouse_version()
if self.service == "redis":
return get_redis_version()
def get_postgres_version() -> Version:
from django.db import connection
with connection.cursor() as cursor:
cursor.execute("SHOW server_version")
rows = cursor.fetchone()
version = rows[0]
return version_string_to_semver(version)
def get_clickhouse_version() -> Version:
from posthog.clickhouse.client.connection import default_client
client = default_client()
rows = client.execute("SELECT version()")
client.disconnect()
version = rows[0][0]
return version_string_to_semver(version)
def get_redis_version() -> Version:
client = redis.get_client()
version = client.execute_command("INFO")["redis_version"]
return version_string_to_semver(version)
def version_string_to_semver(version: str) -> Version:
minor = 0
patch = 0
# remove e.g. `-alpha`, Postgres metadata (`11.13 (Ubuntu 11.13-2.heroku1+1)`), etc
version_parts = version.split("(")[0].split("-")[0].split(".")
major = int(version_parts[0])
if len(version_parts) > 1:
minor = int(version_parts[1])
if len(version_parts) > 2:
patch = int(version_parts[2])
return Version(major=major, minor=minor, patch=patch)
| [
"noreply@github.com"
] | dorucioclea.noreply@github.com |
3e255fea45c3779e4727fe3eb7188f372c11c3b8 | 9e518397a2cff3778f9dd878cda1ce21fb07625f | /neerc_secna/otbor/B.py | b1483841794d6004d40d33700031c3f32c8fc5c1 | [] | no_license | sabal202/python_examples | 24f8e2f5ff739bdfc0d1289df92340fe9af4df7c | 3149c0a7bbef2468af6b41547285b483634bd54b | refs/heads/master | 2021-09-15T06:06:19.101712 | 2018-05-27T14:56:26 | 2018-05-27T14:56:26 | 106,803,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | import sys, codecs
save_stdin = sys.stdin
save_stdout = sys.stdout
sys.stdin = codecs.open("input.txt", "r", "utf-8")
sys.stdout = codecs.open("output.txt", "w+")
A, B = list(map(int, input().split()))
d, D = list(map(lambda x: int(x) * 2, input().split()))
if (d + D <= A and D <= B and d <= B) or (d + D <= B and D <= A and d <= A):
print("YES")
else:
print("NO")
| [
"sabal2000@mail.ru"
] | sabal2000@mail.ru |
f6ce172b147c249bb6a9181d1c64a593f1def82a | bc316db4565812f2fd5059eeb4e3ecc35c79d387 | /backend_Django/Famesta/user/migrations/0001_initial.py | 6c512f432134d1c25b649473994c3638e0c03d78 | [] | no_license | koko-js478/Famesta-Django-Angular | 734268dd42a862a54512da6ae78fbe31845ceb1b | fdce83eb2b34cb6d79888bd6b76bcf1087726588 | refs/heads/master | 2022-11-18T13:38:05.741016 | 2020-07-17T11:06:31 | 2020-07-17T11:06:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py | # Generated by Django 3.0.4 on 2020-04-24 15:04
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('mobile', models.IntegerField(null=True, verbose_name='Mobile')),
('status', models.BooleanField(default=False, verbose_name='Status')),
('profile_picture', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Profile')),
('BioDescription', models.CharField(blank=True, max_length=300, null=True, verbose_name='Bio')),
('date_of_birth', models.DateField(verbose_name='DOB')),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=10, verbose_name='Gender')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"vipin.s0106@gmail.com"
] | vipin.s0106@gmail.com |
cf9436d5386fa786cd32acbb1451fc6d82a3f513 | ba6ac9acfbf969eac1d6f3e06e9ce8174fef1cfd | /geotrek/common/tests.py | 0092ccb32071025ae2a1369709005b0a60f3bbca | [
"BSD-2-Clause"
] | permissive | camillemonchicourt/Geotrek | 550e71917ff577ccd99506432fde55d731c59475 | c33eac7e4479e3aa5b16608c0aa7665c4a72e9a1 | refs/heads/master | 2023-08-03T13:16:51.929524 | 2014-11-28T16:16:21 | 2014-11-28T16:16:21 | 24,842,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,231 | py | import os
import mock
from django.db import connection
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from mapentity.tests import MapEntityTest
from mapentity.factories import UserFactory
from geotrek.settings import EnvIniReader
from geotrek.authent.tests import AuthentFixturesTest
from .utils import almostequal, sampling, sql_extent, uniquify
from .utils.postgresql import debug_pg_notices
from . import check_srid_has_meter_unit
class CommonTest(AuthentFixturesTest, MapEntityTest):
def get_bad_data(self):
return {'topology': 'doh!'}, _(u'Topology is not valid.')
class StartupCheckTest(TestCase):
def test_error_is_raised_if_srid_is_not_meters(self):
delattr(check_srid_has_meter_unit, '_checked')
with self.settings(SRID=4326):
self.assertRaises(ImproperlyConfigured, check_srid_has_meter_unit, None)
class ViewsTest(TestCase):
def setUp(self):
self.user = UserFactory.create(username='homer', password='dooh')
success = self.client.login(username=self.user.username, password='dooh')
self.assertTrue(success)
def test_settings_json(self):
url = reverse('common:settings_json')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_admin_check_extents(self):
url = reverse('common:check_extents')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.user.is_superuser = True
self.user.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class UtilsTest(TestCase):
def test_almostequal(self):
self.assertTrue(almostequal(0.001, 0.002))
self.assertFalse(almostequal(0.001, 0.002, precision=3))
self.assertFalse(almostequal(1, 2, precision=0))
self.assertFalse(almostequal(-1, 1))
self.assertFalse(almostequal(1, -1))
def test_sampling(self):
self.assertEqual([0, 2, 4, 6, 8], sampling(range(10), 5))
self.assertEqual([0, 3, 6, 9], sampling(range(10), 3))
self.assertEqual(['a', 'd', 'g', 'j'], sampling('abcdefghijkl', 4))
def test_sqlextent(self):
ext = sql_extent("SELECT ST_Extent('LINESTRING(0 0, 10 10)'::geometry)")
self.assertEqual((0.0, 0.0, 10.0, 10.0), ext)
def test_uniquify(self):
self.assertEqual([3, 2, 1], uniquify([3, 3, 2, 1, 3, 1, 2]))
def test_postgresql_notices(self):
def raisenotice():
cursor = connection.cursor()
cursor.execute("""
CREATE OR REPLACE FUNCTION raisenotice() RETURNS boolean AS $$
BEGIN
RAISE NOTICE 'hello'; RETURN FALSE;
END; $$ LANGUAGE plpgsql;
SELECT raisenotice();""")
raisenotice = debug_pg_notices(raisenotice)
with mock.patch('geotrek.common.utils.postgresql.logger') as fake_log:
raisenotice()
fake_log.debug.assert_called_with('hello')
class EnvIniTests(TestCase):
ini_file = os.path.join('conf.ini')
def setUp(self):
with open(self.ini_file, 'w') as f:
f.write("""[settings]\nkey = value\nkeyint = 3\nlist = a, b,c\nfloats = 0.4 ,1.3""")
self.envini = EnvIniReader(self.ini_file)
os.environ['KEYINT'] = '4'
def test_existing_key(self):
self.assertEqual(self.envini.get('key'), 'value')
self.assertEqual(self.envini.get('keyint'), '4')
self.assertEqual(self.envini.get('keyint', env=False), '3')
def test_missing_key(self):
self.assertEqual(self.envini.get('unknown', 'void'), 'void')
self.assertEqual(self.envini.get('unknown', None), None)
self.assertRaises(ImproperlyConfigured, self.envini.get, 'unknown')
def test_helpers(self):
self.assertEqual(self.envini.getint('keyint'), 4)
self.assertEqual(self.envini.getstrings('list'), ['a', 'b', 'c'])
self.assertEqual(self.envini.getfloats('floats'), [0.4, 1.3])
def tearDown(self):
os.remove(self.ini_file)
| [
"mathieu.leplatre@makina-corpus.com"
] | mathieu.leplatre@makina-corpus.com |
d5dbcabfadb8cc38120372091a58c65e23ff7255 | 26e91aead18d0fad6f5ce8fc4adf7d8e05a2f07f | /tests/conftest.py | a94894f8648ab8cbb1d8712e158818e09e0db0b3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | leathe/byceps | 40c1f8a1aab3521fcac45d88eab6364d448d4e67 | cd0c618af63fed1cd7006bb67da46eac0ddbb1c7 | refs/heads/master | 2020-12-02T09:02:51.087511 | 2019-12-14T17:00:22 | 2019-12-14T17:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | """
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from contextlib import contextmanager
import pytest
from byceps.database import db as _db
from tests.base import (
CONFIG_FILENAME_TEST_ADMIN,
CONFIG_FILENAME_TEST_PARTY,
create_app,
)
from tests.database import set_up_database, tear_down_database
from tests.helpers import create_user
@pytest.fixture(scope='session')
def db():
return _db
@contextmanager
def database_recreated(db):
set_up_database(db)
yield
tear_down_database(db)
@pytest.fixture
def make_admin_app():
"""Provide the admin web application."""
def _wrapper(**config_overrides):
return create_app(CONFIG_FILENAME_TEST_ADMIN, config_overrides)
return _wrapper
@pytest.fixture(scope='session')
def admin_app():
"""Provide the admin web application."""
app = create_app(CONFIG_FILENAME_TEST_ADMIN)
yield app
@pytest.fixture
def admin_app_with_db(admin_app, db):
with admin_app.app_context():
with database_recreated(db):
yield admin_app
@pytest.fixture
def admin_client(admin_app):
"""Provide a test HTTP client against the admin web application."""
return admin_app.test_client()
@pytest.fixture(scope='session')
def party_app():
"""Provide a party web application."""
app = create_app(CONFIG_FILENAME_TEST_PARTY)
yield app
@pytest.fixture
def party_app_with_db(party_app, db):
with party_app.app_context():
with database_recreated(db):
yield party_app
@pytest.fixture
def admin_user():
return create_user('Admin')
@pytest.fixture
def normal_user():
return create_user()
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
f3f4e7b4d6bb31ad3f298ce7c8d443695db45611 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stdlib/_weakrefset.pyi | da09442e855b1ddffe67d28cdbb7f2b6a731e8ff | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 2,428 | pyi | import sys
from _typeshed import Self
from collections.abc import Iterable, Iterator, MutableSet
from typing import Any, Generic, TypeVar, overload
if sys.version_info >= (3, 9):
from types import GenericAlias
__all__ = ["WeakSet"]
_S = TypeVar("_S")
_T = TypeVar("_T")
class WeakSet(MutableSet[_T], Generic[_T]):
@overload
def __init__(self, data: None = ...) -> None: ...
@overload
def __init__(self, data: Iterable[_T]) -> None: ...
def add(self, item: _T) -> None: ...
def discard(self, item: _T) -> None: ...
def copy(self: Self) -> Self: ...
def remove(self, item: _T) -> None: ...
def update(self, other: Iterable[_T]) -> None: ...
def __contains__(self, item: object) -> bool: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T]: ...
def __ior__(self: Self, other: Iterable[_T]) -> Self: ... # type: ignore[override,misc]
def difference(self: Self, other: Iterable[_T]) -> Self: ...
def __sub__(self: Self, other: Iterable[Any]) -> Self: ...
def difference_update(self, other: Iterable[Any]) -> None: ...
def __isub__(self: Self, other: Iterable[Any]) -> Self: ...
def intersection(self: Self, other: Iterable[_T]) -> Self: ...
def __and__(self: Self, other: Iterable[Any]) -> Self: ...
def intersection_update(self, other: Iterable[Any]) -> None: ...
def __iand__(self: Self, other: Iterable[Any]) -> Self: ...
def issubset(self, other: Iterable[_T]) -> bool: ...
def __le__(self, other: Iterable[_T]) -> bool: ...
def __lt__(self, other: Iterable[_T]) -> bool: ...
def issuperset(self, other: Iterable[_T]) -> bool: ...
def __ge__(self, other: Iterable[_T]) -> bool: ...
def __gt__(self, other: Iterable[_T]) -> bool: ...
def __eq__(self, other: object) -> bool: ...
def symmetric_difference(self, other: Iterable[_S]) -> WeakSet[_S | _T]: ...
def __xor__(self, other: Iterable[_S]) -> WeakSet[_S | _T]: ...
def symmetric_difference_update(self, other: Iterable[_T]) -> None: ...
def __ixor__(self: Self, other: Iterable[_T]) -> Self: ... # type: ignore[override,misc]
def union(self, other: Iterable[_S]) -> WeakSet[_S | _T]: ...
def __or__(self, other: Iterable[_S]) -> WeakSet[_S | _T]: ...
def isdisjoint(self, other: Iterable[_T]) -> bool: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
| [
"intellij-monorepo-bot-no-reply@jetbrains.com"
] | intellij-monorepo-bot-no-reply@jetbrains.com |
dbd2f762dae4423f27820ac4782babbb9d8f41d8 | 3f85a2b5ebaf040d295bd5d98c49b59e9ea82643 | /extract_descriptors_geodesc.py | ce0cf6c6e966046ab612c5d2dbab7f9653de40fc | [
"Apache-2.0"
] | permissive | vcg-uvic/image-matching-benchmark-baselines | 6b69d0db384c4af90b431f421077aa0f8e1ec04f | 01510c4d2c07cad89727013241a359bb22689a1b | refs/heads/master | 2021-01-04T00:35:04.375020 | 2020-10-01T17:19:54 | 2020-10-01T17:19:54 | 292,169,250 | 19 | 1 | Apache-2.0 | 2020-10-01T17:19:56 | 2020-09-02T03:29:45 | null | UTF-8 | Python | false | false | 5,578 | py | import torch
import numpy as np
import argparse
import h5py
from tqdm import tqdm
import os
import sys
import shutil
import json
from utils import cv2_greyscale, cv2_scale, np_reshape, str2bool, save_h5
import tensorflow as tf
import torchvision.transforms as transforms
sys.path.append(os.path.join('third_party', 'geodesc'))
from third_party.geodesc.utils.tf import load_frozen_model
def get_transforms():
transform = transforms.Compose([
transforms.Lambda(cv2_greyscale), transforms.Lambda(cv2_scale),
transforms.Lambda(np_reshape)
])
return transform
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_path",
default=os.path.join('..', 'benchmark-patches-8k'),
type=str,
help='Path to the pre-generated patches')
parser.add_argument(
"--save_path",
default=os.path.join('..', 'benchmark-features'),
type=str,
help='Path to store the features')
parser.add_argument(
"--method_name", default='sift8k_8000_geodesc', type=str)
parser.add_argument(
"--weights_path",
default=os.path.join('third_party', 'geodesc', 'model', 'geodesc.pb'),
type=str,
help='Path to the model weights')
parser.add_argument(
"--subset",
default='both',
type=str,
help='Options: "val", "test", "both", "spc-fix", "lms-fix"')
parser.add_argument(
"--clahe-mode",
default='None',
type=str,
help='can be None, detector, descriptor, both')
args = parser.parse_args()
if args.subset not in ['val', 'test', 'both', 'spc-fix', 'lms-fix']:
raise ValueError('Unknown value for --subset')
seqs = []
if args.subset == 'spc-fix':
seqs += ['st_pauls_cathedral']
elif args.subset == 'lms-fix':
seqs += ['lincoln_memorial_statue']
else:
if args.subset in ['val', 'both']:
with open(os.path.join('data', 'val.json')) as f:
seqs += json.load(f)
if args.subset in ['test', 'both']:
with open(os.path.join('data', 'test.json')) as f:
seqs += json.load(f)
print('Processing the following scenes: {}'.format(seqs))
suffix = ""
if args.clahe_mode.lower() == 'detector':
suffix = "_clahe_det"
elif args.clahe_mode.lower() == 'descriptor':
suffix = "_clahe_desc"
elif args.clahe_mode.lower() == 'both':
suffix = "_clahe_det_desc"
elif args.clahe_mode.lower() == 'none':
pass
else:
raise ValueError("unknown CLAHE mode. Try detector, descriptor or both")
args.method_name += suffix
print('Saving descriptors to folder: {}'.format(args.method_name))
transforms = get_transforms()
graph = load_frozen_model(args.weights_path, print_nodes=False)
with tf.Session(graph=graph) as sess:
for idx, seq_name in enumerate(seqs):
print('Processing "{}"'.format(seq_name))
seq_descriptors = {}
patches_h5py_file = os.path.join(args.dataset_path, seq_name,
'patches{}.h5'.format(suffix))
with h5py.File(patches_h5py_file, 'r') as patches_h5py:
for key, patches in tqdm(patches_h5py.items()):
patches = patches.value
bs = 128
descriptors = []
for i in range(0, len(patches), bs):
seq_data = patches[i:i + bs, :, :, :]
seq_data = np.array(
[transforms(patch)
for patch in seq_data]).squeeze(axis=3)
# compute output
processed_seq = np.zeros(
(len(seq_data), 32, 32), np.float32)
for j in range(len(seq_data)):
processed_seq[j] = (seq_data[j] - np.mean(
seq_data[j])) / (np.std(seq_data[j]) + 1e-8)
processed_seq = np.expand_dims(processed_seq, axis=-1)
descs = sess.run("squeeze_1:0",
feed_dict={"input:0": processed_seq})
if descs.ndim == 1:
descs = descs[None, ...]
descriptors.extend(descs)
descriptors = np.array(descriptors)
seq_descriptors[key] = descriptors.astype(np.float32)
print('Processed {} images: {} descriptors/image'.format(
len(seq_descriptors),
np.array([s.shape[0]
for s in seq_descriptors.values()]).mean()))
cur_path = os.path.join(args.save_path, args.method_name, seq_name)
if not os.path.exists(cur_path):
os.makedirs(cur_path)
save_h5(seq_descriptors, os.path.join(cur_path, 'descriptors.h5'))
sub_files_in = ['keypoints{}.h5'.format(suffix), 'scales{}.h5'.format(suffix), 'angles{}.h5'.format(suffix), 'scores{}.h5'.format(suffix)]
sub_files_out = ['keypoints.h5', 'scales.h5', 'angles.h5', 'scores.h5']
for sub_file_in, sub_file_out in zip(sub_files_in, sub_files_out):
shutil.copyfile(
os.path.join(args.dataset_path, seq_name, sub_file_in),
os.path.join(cur_path, sub_file_out))
print('Done sequence: {}'.format(seq_name))
| [
"ducha.aiki@gmail.com"
] | ducha.aiki@gmail.com |
2da1b9389ad3071e3d6bd9941f97d72328c39759 | c91e100abe2e978edf282f112500727da82e0e53 | /uf/application/tiny_bert.py | 368dbb119e752195615b0cbdc5e86963143a4f4b | [
"Apache-2.0"
] | permissive | BestJex/unif | 868e3ed2514130b1193c2a7c813e988ee126bd9d | b53bf8fc85e57544300066a44bf72cbcc9d1ed56 | refs/heads/master | 2023-02-15T10:40:23.054436 | 2021-01-14T01:59:46 | 2021-01-14T01:59:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,144 | py | # coding:=utf-8
# Copyright 2020 Tencent. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Applications based on TinyBERT. '''
import os
import copy
import numpy as np
from uf.tools import tf
from .base import ClassifierModule
from uf.modeling.tiny_bert import TinyBERTCLSDistillor
from .bert import BERTClassifier, get_bert_config
from uf.tokenization.word_piece import get_word_piece_tokenizer
import uf.utils as utils
class TinyBERTClassifier(BERTClassifier, ClassifierModule):
''' Single-label classifier on TinyBERT, a distillation model. '''
_INFER_ATTRIBUTES = BERTClassifier._INFER_ATTRIBUTES
def __init__(self,
config_file,
vocab_file,
max_seq_length=128,
label_size=None,
init_checkpoint=None,
output_dir=None,
gpu_ids=None,
drop_pooler=False,
hidden_size=384,
num_hidden_layers=4,
do_lower_case=True,
truncate_method='LIFO'):
super(ClassifierModule, self).__init__(
init_checkpoint, output_dir, gpu_ids)
self.batch_size = 0
self.max_seq_length = max_seq_length
self.label_size = label_size
self.truncate_method = truncate_method
self._drop_pooler = drop_pooler
self._id_to_label = None
self.__init_args__ = locals()
self.bert_config = get_bert_config(config_file)
self.tokenizer = get_word_piece_tokenizer(vocab_file, do_lower_case)
self._key_to_depths = 'unsupported'
self.student_config = copy.deepcopy(self.bert_config)
self.student_config.hidden_size = hidden_size
self.student_config.intermediate_size = 4 * hidden_size
self.student_config.num_hidden_layers = num_hidden_layers
def to_bert(self):
''' Isolate student tiny_bert out of traing graph. '''
if not self._graph_built:
raise ValueError(
'Fit, predict or score before saving checkpoint.')
if not self.output_dir:
raise ValueError('Attribute `output_dir` is None.')
tf.logging.info(
'Saving checkpoint into %s/bert_model.ckpt'
% (self.output_dir))
self.init_checkpoint = (
self.output_dir + '/bert_model.ckpt')
assignment_map = {}
for var in self.global_variables:
if var.name.startswith('tiny/'):
assignment_map[var.name.replace('tiny/', '')[:-2]] = var
saver = tf.train.Saver(assignment_map, max_to_keep=1000000)
saver.save(self.sess, self.init_checkpoint)
self.student_config.to_json_file(
os.path.join(self.output_dir, 'bert_config.json'))
def convert(self, X=None, y=None, sample_weight=None, X_tokenized=None,
is_training=False):
self._assert_legal(X, y, sample_weight, X_tokenized)
if is_training:
assert y is None, (
'Training of %s is unsupervised. `y` should be None.'
% self.__class__.__name__)
n_inputs = None
data = {}
# convert X
if X or X_tokenized:
tokenized = False if X else X_tokenized
input_ids, input_mask, segment_ids = self._convert_X(
X_tokenized if tokenized else X, tokenized=tokenized)
data['input_ids'] = np.array(input_ids, dtype=np.int32)
data['input_mask'] = np.array(input_mask, dtype=np.int32)
data['segment_ids'] = np.array(segment_ids, dtype=np.int32)
n_inputs = len(input_ids)
if n_inputs < self.batch_size:
self.batch_size = max(n_inputs, len(self._gpu_ids))
if y:
# convert y and sample_weight
label_ids = self._convert_y(y)
data['label_ids'] = np.array(label_ids, dtype=np.int32)
# convert sample_weight
if is_training or y:
sample_weight = self._convert_sample_weight(
sample_weight, n_inputs)
data['sample_weight'] = np.array(sample_weight, dtype=np.float32)
return data
def _forward(self, is_training, split_placeholders, **kwargs):
distillor = TinyBERTCLSDistillor(
student_config=self.student_config,
bert_config=self.bert_config,
is_training=is_training,
input_ids=split_placeholders['input_ids'],
input_mask=split_placeholders['input_mask'],
segment_ids=split_placeholders['segment_ids'],
sample_weight=split_placeholders.get('sample_weight'),
scope='bert',
drop_pooler=self._drop_pooler,
label_size=self.label_size,
**kwargs)
(total_loss, losses, probs, preds) = distillor.get_forward_outputs()
return (total_loss, losses, probs, preds)
def _get_fit_ops(self, as_feature=False):
return [self._train_op, self._losses['losses']]
def _get_fit_info(self, output_arrays, feed_dict, as_feature=False):
# loss
batch_losses = output_arrays[1]
loss = np.mean(batch_losses)
info = ''
info += ', distill loss %.6f' % loss
return info
def _get_predict_ops(self):
return [self._probs['probs']]
def _get_predict_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
# probs
probs = utils.transform(output_arrays[0], n_inputs)
# preds
preds = np.argmax(probs, axis=-1).tolist()
if self._id_to_label:
preds = [self._id_to_label[idx] for idx in preds]
outputs = {}
outputs['preds'] = preds
outputs['probs'] = probs
return outputs
def _get_score_ops(self):
return [self._probs['probs']]
def _get_score_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
# accuracy
probs = utils.transform(output_arrays[0], n_inputs)
preds = np.argmax(probs, axis=-1)
labels = self.data['label_ids']
accuracy = np.mean(preds == labels)
# loss
losses = [-np.log(probs[i][label]) for i, label in enumerate(labels)]
sample_weight = self.data['sample_weight']
losses = np.array(losses) * sample_weight
loss = np.mean(losses)
outputs = {}
outputs['accuracy'] = accuracy
outputs['loss'] = loss
return outputs
| [
"luv_dusk@163.com"
] | luv_dusk@163.com |
e4d84220fec6d1432eb4d623104c8cf64db1db63 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /rk4nyFSXc6vcSWMWH_6.py | 84098467fb04579fe5d0e7c7fced0ba71dfd64d9 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | """
Create a function that returns `True` if each pair of adjacent numbers in a
list shares **at least one digit** and `False` otherwise.
### Examples
shared_digits([33, 53, 6351, 12, 2242, 44]) ➞ True
# 33 and 53 share 3, 53 and 6351 share 3 and 5, etc.
shared_digits([1, 11, 12, 13, 14, 15, 16]) ➞ True
shared_digits([33, 44, 55, 66, 77]) ➞ False
shared_digits([1, 12, 123, 1234, 1235, 6789]) ➞ False
### Notes
N/A
"""
def shared_digits(lst):
lst = list( map( set, map(str,lst) ) )
for i in range(len(lst)-1):
if lst[i].intersection(lst[i+1]) == set(): return False
return True
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a4135b3aedd4b7c041f2620410e29f99faf58cac | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/260/51107/submittedfiles/testes.py | 007a28c43199c2fc38a6a06062c357e5f4774cee | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
#!/usr/bin/python
anterior=0
maior=1
for i in range(1,4,1):
q=int(input("digite o número de alunos"))
if maior>anterior:
maior=q
dia=i
else:
maior=anterior
anterior=q
dia=dia
print(maior)
print(dia) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
032beed1aaa62756956dc2bbad2019e01ac5cdf5 | dd4551e1670307d0d69ead23ac7f9c5c2c5f2ead | /deployer/hosts/models.py | 83d2d3e38c7c5512842a911b9ba0ff4e67226a9f | [
"MIT"
] | permissive | GaretJax/docker-deployer | 7cccf3e43913fbd2e264c283ce786c4f4c2006f2 | 51632aec0d8496d8a1449c9d349bdcaaac2b7ebd | refs/heads/master | 2021-01-01T19:39:10.300854 | 2013-12-12T22:56:31 | 2013-12-12T22:56:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | from sqlalchemy import Column, Integer, String, Unicode, Enum
from sqlalchemy.ext.declarative import declarative_base
import docker
Base = declarative_base()
class Host(Base):
__tablename__ = 'host'
id = Column(Integer(), primary_key=True)
name = Column(Unicode(120), nullable=False, unique=True)
url = Column(String(120), nullable=False)
version = Column(Enum('1.6', '1.7'), nullable=False)
@property
def active_instances(self):
Instance = self.instances._entities[0].type
return self.instances.filter(Instance.stopped == None)
@property
def inactive_instances(self):
Instance = self.instances._entities[0].type
return self.instances.filter(Instance.stopped != None)
def get_client(self, version=None):
if version is None:
version = self.version
return docker.Client(base_url=self.url, version=version)
| [
"jonathan.stoppani@wsfcomp.com"
] | jonathan.stoppani@wsfcomp.com |
2410812fdfa3464676ad197639ab969859afeae3 | 5dc7b0a3a167b47449fc42c6aa06afb3e90958a8 | /tabletop/mutations/add_like.py | a2692b0914201eac45a156e9c19c3aec16e27846 | [
"Apache-2.0"
] | permissive | dcramer/tabletop-server | 59ae330075d55ddb371fb75d77e5e35fb376c410 | 062f56d149a29d5ab8605e220c156c1b4fb52d2f | refs/heads/master | 2022-12-09T12:55:22.755395 | 2018-12-23T23:42:14 | 2018-12-23T23:43:06 | 147,248,700 | 8 | 0 | Apache-2.0 | 2022-12-08T02:53:14 | 2018-09-03T20:07:20 | Python | UTF-8 | Python | false | false | 1,535 | py | import graphene
from django.db import IntegrityError, transaction
from tabletop.models import Checkin, Follower, Like, Player
from tabletop.schema import CheckinNode
class AddLike(graphene.Mutation):
class Arguments:
checkin = graphene.UUID(required=True)
ok = graphene.Boolean()
errors = graphene.List(graphene.String)
checkin = graphene.Field(CheckinNode)
def mutate(self, info, checkin: str = None):
current_user = info.context.user
if not current_user.is_authenticated:
return AddLike(ok=False, errors=["Authentication required"])
try:
checkin = Checkin.objects.get(id=checkin)
except Checkin.DoesNotExist:
return AddLike(ok=False, errors=["Checkin not found"])
# you can only like if you are friends w/ one of the players
# or a player in the agme
player_ids = Player.objects.filter(checkin=checkin).values_list(
"user", flat=True
)
if current_user.id in player_ids:
pass
elif Follower.objects.filter(to_user=current_user, from_user_id__in=player_ids):
pass
else:
return AddLike(ok=False, errors=["Cannot add like to Checkin"])
try:
with transaction.atomic():
Like.objects.create(checkin=checkin, created_by=info.context.user)
except IntegrityError as exc:
if "duplicate key" not in str(exc):
raise
return AddLike(ok=True, checkin=checkin)
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
c0517b2255e4a1fefea1928e57671b64921d04d7 | a99f2f8f48d6dc92efe1f2d7f43744f0a29b37af | /scripts/black_check_all_files.py | ba07d34cdd668d3f399b4494e46d02d2d85aec96 | [
"Apache-2.0"
] | permissive | dibir-magomedsaygitov/bezier | ee766d3c96772cfc63387eb879d648b76c378e6a | a3c408d11133aa1b97fb6dd673888cf56f03178e | refs/heads/main | 2023-03-06T12:56:09.286932 | 2021-02-19T06:35:07 | 2021-02-19T06:35:07 | 339,666,352 | 0 | 0 | Apache-2.0 | 2021-02-17T09:09:11 | 2021-02-17T09:09:10 | null | UTF-8 | Python | false | false | 1,247 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run `black --check` on all Python files."""
import pathlib
import subprocess
import sys
_SCRIPT_FILE = pathlib.Path(__file__).resolve()
ROOT_DIR = _SCRIPT_FILE.parent.parent
ADVICE = """\
`black --check` failed on your local branch.
To resolve, run `nox -s blacken`.
"""
def main():
all_files = subprocess.check_output(["git", "ls-files", "*.py"])
all_files = all_files.decode("utf-8").strip()
if not all_files:
return
cmd = ["black", "--line-length", "79", "--check"] + all_files.split("\n")
status_code = subprocess.call(cmd)
if status_code != 0:
print(ADVICE, file=sys.stderr)
sys.exit(status_code)
if __name__ == "__main__":
main()
| [
"daniel.j.hermes@gmail.com"
] | daniel.j.hermes@gmail.com |
56020e4562c60ebb49e476821a488c1339953083 | cca5ceb42b09e567d79fcb46f298757c1ff04447 | /Regex/Regex.py | 34a23181d334168f8319133e9251ec3f9db671fb | [] | no_license | NishantGhanate/PythonScripts | 92933237720e624a0f672729743a98557bea79d6 | 60b92984d21394002c0d3920bc448c698e0402ca | refs/heads/master | 2022-12-13T11:56:14.442286 | 2022-11-18T14:26:33 | 2022-11-18T14:26:33 | 132,910,530 | 25 | 15 | null | 2022-12-09T09:03:58 | 2018-05-10T14:18:33 | Python | UTF-8 | Python | false | false | 200 | py | import re
strs = "how much for the maple syrup? $20.99? That's ricidulous!!!"
print (strs)
nstr = re.sub(r'[?|$|.|!]',r'',strs)
print (nstr)
nestr = re.sub(r'[^a-zA-Z0-9 ]',r'',nstr)
print (nestr)
| [
"nishant7.ng@gmail.com"
] | nishant7.ng@gmail.com |
0ddc86138b7313aad58583072743980f5e566324 | 5a1f77b71892745656ec9a47e58a078a49eb787f | /8_Cloudrip_Mountain/406-Ice_Hunter/ice_hunter.py | 58b5baabd0cff4452a02850a6abe2cfd489b632a | [
"MIT"
] | permissive | ripssr/Code-Combat | 78776e7e67c033d131e699dfeffb72ca09fd798e | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | refs/heads/master | 2020-06-11T20:17:59.817187 | 2019-07-21T09:46:04 | 2019-07-21T09:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | def isSubstring(word, substring):
rightEdge = len(word) - len(substring) + 1
for i in range(rightEdge):
for j in range(len(substring)):
shiftedIndex = i + j
if word[shiftedIndex] != substring[j]:
break
if j == substring.length - 1:
return True
return False
enemies = hero.findEnemies()
for enemy in enemies:
if isSubstring(enemy.id, "bos"):
while enemy.health > 0:
hero.attack(enemy)
| [
"katik.hello@gmail.com"
] | katik.hello@gmail.com |
d017c0f4c68283f91d8a47a98eea1325e3cc4141 | 8355bc4e1ad1a863124c1d80d4a00b28ef587b48 | /src/probnum/filtsmooth/particle/__init__.py | 3fdca5b768e006a0859165d52eb29b2daaec3587 | [
"MIT"
] | permissive | probabilistic-numerics/probnum | af62f04253a08da71174e5c1b7d733deb1914eee | af410278783069542610d16b10ba12d2940a05a6 | refs/heads/main | 2023-08-31T05:12:08.877238 | 2023-06-19T20:34:15 | 2023-06-19T20:34:15 | 218,856,084 | 384 | 56 | MIT | 2023-09-10T18:52:24 | 2019-10-31T20:29:11 | Python | UTF-8 | Python | false | false | 1,055 | py | """Particle filtering and smoothing."""
from ._importance_distributions import (
BootstrapImportanceDistribution,
ImportanceDistribution,
LinearizationImportanceDistribution,
)
from ._particle_filter import ParticleFilter, effective_number_of_events
from ._particle_filter_posterior import ParticleFilterPosterior
# Public classes and functions. Order is reflected in documentation.
__all__ = [
"ParticleFilter",
"ParticleFilterPosterior",
"effective_number_of_events",
"ImportanceDistribution",
"BootstrapImportanceDistribution",
"LinearizationImportanceDistribution",
]
# Set correct module paths (for superclasses).
# Corrects links and module paths in documentation.
ParticleFilter.__module__ = "probnum.filtsmooth.particle"
ParticleFilterPosterior.__module__ = "probnum.filtsmooth.particle"
ImportanceDistribution.__module__ = "probnum.filtsmooth.particle"
BootstrapImportanceDistribution.__module__ = "probnum.filtsmooth.particle"
LinearizationImportanceDistribution.__module__ = "probnum.filtsmooth.particle"
| [
"noreply@github.com"
] | probabilistic-numerics.noreply@github.com |
41e88f29211ba316030073872fbe6ea667ddd084 | bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75 | /Array/largest_number_at_least_twice_of_thers.py | 794293dfb4e072fa3e632cfb4bff9e6e6a02f634 | [] | no_license | harvi7/Leetcode-Problems-Python | d3a5e8898aceb11abc4cae12e1da50061c1d352c | 73adc00f6853e821592c68f5dddf0a823cce5d87 | refs/heads/master | 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | class Solution:
def dominantIndex(self, nums: List[int]) -> int:
m = max(nums)
if all(m >= 2*x for x in nums if x != m):
return nums.index(m)
return -1 | [
"iamharshvirani7@gmail.com"
] | iamharshvirani7@gmail.com |
73fb25ca4e2010ce6540f4549878673898f8e503 | fe5cfc4e1d6dd7bdff2421a446f1caf59080ba8b | /Formatieve opdracht 2a.py | e74b429f8fde0da1c15ae6e10331d19599f9e57b | [] | no_license | wail0152/Groeps-project-SP | ac581bdd6966744cb1f308c78473e11309d3a9ad | 2a0c6f0c416d537cfd3c70a2792e11bc11b644aa | refs/heads/master | 2021-01-08T02:18:11.239557 | 2020-03-20T15:17:22 | 2020-03-20T15:17:22 | 241,882,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client["huwebshop"]
collection = db["products"]
# opdracht 1
print(collection.find_one())
# opdracht 2
for product in collection.find():
if product["name"][0] == "R":
print(product)
break
# opdracht 3
total = 0
count = 0
for product in collection.find():
try:
total += product["price"]["mrsp"] / 100
count += 1
except KeyError:
continue
print(total / count)
| [
"abou.w@hotmail.com"
] | abou.w@hotmail.com |
09c6c4c342509a39e0e119c284668e7f80173dc9 | 81b424a71b6c1f812172706940ffdbefa055cb1e | /src/posts/forms.py | 53f8314b23d1e5a86bcf28ecc71f5cabaed00e6a | [] | no_license | SheraramPrajapat1998/social | 5297b0b9330ce6c9e3ca829b80736c38a5b10e8d | 800c1069bd34ec29d8d38f5d6541195f4a1bcdba | refs/heads/main | 2023-01-02T09:21:22.701531 | 2020-10-26T11:21:10 | 2020-10-26T11:21:10 | 306,550,200 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | from django import forms
from .models import Post, Comment
class PostModelForm(forms.ModelForm):
content = forms.CharField(widget=forms.Textarea(attrs={'rows':3}))
class Meta:
model = Post
fields = ('content', 'image', 'draft')
class CommentModelForm(forms.ModelForm):
body = forms.CharField(label='', widget=forms.TextInput(attrs={'placeholder': 'Add a comment...'}))
class Meta:
model = Comment
fields = ('body', ) | [
"sheraramprajapat1998@gmail.com"
] | sheraramprajapat1998@gmail.com |
173677f88950da617a43ee3596cea41b8221987a | c22130d238d1ba666360512690aa34a081265a56 | /tests/conftest.py | f9e575a9bb12c8dac2b370b7ae94820b00db8687 | [
"Apache-2.0"
] | permissive | jre21/pip-api | 18a617df1b1e7ea47cab6189e03e2e0a8387bcbf | 6824afa634484eca258b2f757a35b7df8ebf56d7 | refs/heads/master | 2020-05-18T22:05:50.449003 | 2019-04-18T21:48:52 | 2019-04-18T21:48:52 | 184,681,684 | 0 | 0 | null | 2019-05-03T01:17:58 | 2019-05-03T01:17:57 | null | UTF-8 | Python | false | false | 3,728 | py | import os
import shutil
import subprocess
import pytest
import pretend
import virtualenv
from packaging.version import Version
import pip_api
@pytest.yield_fixture
def some_distribution(data):
return pretend.stub(
name="dummyproject",
version=Version('0.0.1'),
location=None,
filename=data.join('dummyproject-0.0.1.tar.gz'),
editable=False,
)
@pytest.yield_fixture
def tmpdir(tmpdir):
"""
Return a temporary directory path object which is unique to each test
function invocation, created as a sub directory of the base temporary
directory. The returned object is a ``tests.lib.path.Path`` object.
This uses the built-in tmpdir fixture from pytest itself but modified
to return our typical path object instead of py.path.local as well as
deleting the temporary directories at the end of each test case.
"""
assert tmpdir.isdir()
yield str(tmpdir)
# Clear out the temporary directory after the test has finished using it.
# This should prevent us from needing a multiple gigabyte temporary
# directory while running the tests.
shutil.rmtree(str(tmpdir))
class TestData:
def __init__(self, data_location):
self.data_location = data_location
def join(self, *args):
return os.path.join(self.data_location, *args)
@pytest.fixture
def data(tmpdir):
data_location = os.path.join(tmpdir, 'data')
shutil.copytree(os.path.join(os.getcwd(), 'tests', 'data'), data_location)
return TestData(data_location)
@pytest.fixture(autouse=True)
def isolate(tmpdir):
"""
Isolate our tests so that things like global configuration files and the
like do not affect our test results.
We use an autouse function scoped fixture because we want to ensure that
every test has it's own isolated home directory.
"""
# Create a directory to use as our home location.
home_dir = os.path.join(str(tmpdir), "home")
os.makedirs(home_dir)
# Set our home directory to our temporary directory, this should force
# all of our relative configuration files to be read from here instead
# of the user's actual $HOME directory.
os.environ["HOME"] = home_dir
# We want to disable the version check from running in the tests
os.environ["PIP_DISABLE_PIP_VERSION_CHECK"] = "true"
@pytest.yield_fixture
def venv(tmpdir, isolate):
"""
Return a virtual environment which is unique to each test function
invocation created inside of a sub directory of the test function's
temporary directory.
"""
venv_location = os.path.join(str(tmpdir), "workspace", "venv")
venv = virtualenv.create_environment(venv_location)
os.environ['PIPAPI_PYTHON_LOCATION'] = os.path.join(
venv_location, "bin", "python"
)
yield venv
del os.environ['PIPAPI_PYTHON_LOCATION']
shutil.rmtree(venv_location)
class PipTestEnvironment:
def __init__(self):
# Install the right version of pip. By default,
# virtualenv gets the version from the wheels that
# are bundled along with it
self.run('install', 'pip=={}'.format(str(pip_api.PIP_VERSION)))
def run(self, *args):
python_location = os.environ['PIPAPI_PYTHON_LOCATION']
return subprocess.check_output(
[python_location, '-m', 'pip'] + list(args)
).decode('utf-8')
@pytest.fixture
def pip(tmpdir, venv):
"""
Return a PipTestEnvironment which is unique to each test function and
will execute all commands inside of the unique virtual environment for this
test function. The returned object is a
``tests.lib.scripttest.PipTestEnvironment``.
"""
return PipTestEnvironment()
| [
"di@users.noreply.github.com"
] | di@users.noreply.github.com |
89f8bfaca42042451a02a4bc0286675c3fa0c88b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_stepsister.py | 785d4bedada0a17e1be885ba2ab3060c2fa72908 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py |
#calss header
class _STEPSISTER():
def __init__(self,):
self.name = "STEPSISTER"
self.definitions = [u"not your parents' daughter, but the daughter of a person one of your parents has married"]
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
20cb73a5410ddff3ce09ea988d5106fda6fa9256 | ca3a49676cdf1016b2d729f0432b451d35b7a281 | /alignment/find_bug/find_zero.py | cfbc24c9c73c08a0f7bf1a8a92db8e733c169afd | [
"MIT"
] | permissive | SquareandCompass/code-align-evals-data | 3bb71b605316f56bb27466f23706a329f3fb4938 | 97446d992c3785d6605f1500b2c9b95d042e7b9c | refs/heads/main | 2023-06-19T12:47:56.277363 | 2021-07-21T00:22:56 | 2021-07-21T00:22:56 | 640,147,842 | 0 | 1 | null | 2023-05-13T06:22:30 | 2023-05-13T06:22:29 | null | UTF-8 | Python | false | false | 1,819 | py | import math
def poly(xs: list, x: float):
"""
Evaluates polynomial with coefficients xs at point x.
return xs[0] + xs[1] * x + xs[1] * x^2 + .... xs[n] * x^n
"""
return sum([coeff * math.pow(x, i) for i, coeff in enumerate(xs)])
def find_zero(xs: list):
""" xs are coefficients of a polynomial.
find_zero find x such that poly(x) = 0.
find_zero returns only only zero point, even if there are many.
Moreover, find_zero only takes list xs having even number of coefficients
and largest non zero coefficient as it guarantees
a solution.
>>> round(find_zero([1, 2]), 2) # f(x) = 1 + 2x
-0.5
>>> round(find_zero([-6, 11, -6, 1]), 2) # (x - 1) * (x - 2) * (x - 3) = -6 + 11x - 6x^2 + x^3
1.0
Example solution:
# line 1
begin, end = -1., 1.
# line 2
while poly(xs, begin) * poly(xs, end) > 0:
# line 3
begin *= 2.0
# line 4
end *= 2.0
# line 5
while end - begin > 1e-10:
# line 6
center = (begin + end) / 2.0
# line 7
if poly(xs, center) * poly(xs, begin) >= 0:
# line 8
begin = center
# line 9
else:
# line 10
end = center
# line 11
return begin
"""
# Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4
# END OF CONTEXT
print("7")
# END OF SOLUTION
METADATA = {}
def check(candidate):
import io
from contextlib import redirect_stdout
f = io.StringIO()
with redirect_stdout(f):
candidate([])
out = f.getvalue().strip('\n')
assert "7" == out
for i in range(0, 15):
if i != 7:
assert str(i) != out
if __name__ == '__main__':
check(find_zero)
| [
"barnes@openai.com"
] | barnes@openai.com |
57cbf879b779e435d92573ec4c7ed7ff90a1352e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04031/s250903075.py | bd3904ccba81d40d8b99f6b4a120b9f518756e75 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import copy
N = int(input())
a = list(map(int,input().split()))
res = 1e+9
for i in range(min(a),max(a)+1):
tmp = copy.copy(a)
res = min(res,sum(map(lambda x:(x-i)**2,tmp)))
print(res)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4ac45c70b73e1044d9eb87df9ed9491b76fdb5a2 | df94279aa2e1d0f1f87717382607adf59ec338e5 | /tests/test_checks.py | 3db8163e89a15105fa4de640d9d217a9fc7ce642 | [
"MIT"
] | permissive | kshitizlondon/django-rest-registration | 54ce439a95cecab2e198df22ee9904c823b2c943 | 3ef0f76852339b2bb28b046e75d155b265e98649 | refs/heads/master | 2021-05-30T13:19:06.757902 | 2016-02-20T01:38:48 | 2016-02-20T01:38:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | from django.apps import apps
from django.test import TestCase
from django.test.utils import override_settings
from rest_registration.checks import __ALL_CHECKS__
def simulate_checks():
app_configs = apps.app_configs
errors = []
for check in __ALL_CHECKS__:
errors.extend(check(app_configs))
return errors
class ChecksTestCase(TestCase):
def test_checks_default(self):
errors = simulate_checks()
self.assertEqual(len(errors), 4)
@override_settings(
REST_REGISTRATION={
'REGISTER_VERIFICATION_URL': '/verify-account/',
'REGISTER_EMAIL_VERIFICATION_URL': '/verify-email/',
'RESET_PASSWORD_VERIFICATION_URL': '/reset-password/',
'VERIFICATION_FROM_EMAIL': 'jon.doe@example.com',
}
)
def test_checks_minmal_setup(self):
errors = simulate_checks()
self.assertEqual(len(errors), 0)
| [
"apragacz@o2.pl"
] | apragacz@o2.pl |
3df2238151ac5c4e71e2182dab170a980f32853c | 2b832e5d3d88b25998f44d21fdb3fa40c2072a9e | /init/__init__.py | ec7c2c8411e9208be5b3876ba422c410529ec258 | [
"MIT"
] | permissive | lijunzhe123/Automation | 387536505e0b77fd9cc1d7dc9d017dc1268925eb | 18122ce2c5debe485fab7dac5f8007f4b7b2d51f | refs/heads/main | 2023-06-17T10:04:51.296922 | 2021-07-10T17:58:56 | 2021-07-10T17:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@author: yuejl
@application:
@contact: lewyuejian@163.com
@file: __init__.py.py
@time: 2021/7/1 0001 11:21
@desc:
''' | [
"lewyuejian@163.com"
] | lewyuejian@163.com |
591431203c8e4436f2d0abecbc73f02535107f29 | 181af10fcf40b824fe92d3b8f72fd15d6d1490c2 | /Biweekly Contests/51-100/biweek 81/2316. Count Unreachable Pairs of Nodes in an Undirected Graph/Count Unreachable Pairs of Nodes in an Undirected Graph.py | bfd038fff7908d9bd18f9a7332ddca90dd47a17b | [] | no_license | wangyendt/LeetCode | 402c59a0b7b7f5b3a672231ea5dad8056ade36af | 4a3ba15284c45b2d8bf38306c8c8526ae174615c | refs/heads/master | 2023-08-10T06:27:54.995152 | 2023-08-10T02:22:27 | 2023-08-10T02:22:27 | 176,651,399 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,516 | py | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: wangye(Wayne)
@license: Apache Licence
@file: Count Unreachable Pairs of Nodes in an Undirected Graph.py
@time: 2022/06/25
@contact: wang121ye@hotmail.com
@site:
@software: PyCharm
# code is far away from bugs.
"""
from typing import *
import collections
class UnionFind:
"""An implementation of union find data structure.
It uses weighted quick union by rank with path compression.
This module implements an union find or disjoint set data structure.
An union find data structure can keep track of a set of elements into a number
of disjoint (non overlapping) subsets. That is why it is also known as the
disjoint set data structure. Mainly two useful operations on such a data
structure can be performed. A *find* operation determines which subset a
particular element is in. This can be used for determining if two
elements are in the same subset. An *union* Join two subsets into a
single subset.
The complexity of these two operations depend on the particular implementation.
It is possible to achieve constant time (O(1)) for any one of those operations
while the operation is penalized. A balance between the complexities of these
two operations is desirable and achievable following two enhancements:
1. Using union by rank -- always attach the smaller tree to the root of the
larger tree.
2. Using path compression -- flattening the structure of the tree whenever
find is used on it.
"""
def __init__(self, N):
"""Initialize an empty union find object with N items.
Args:
N: Number of items in the union find object.
"""
self._id = list(range(N))
self._count = N
self._rank = [0] * N
def find(self, p):
"""Find the set identifier for the item p."""
id = self._id
while p != id[p]:
id[p] = p = id[id[p]] # Path compression using halving.
return p
def count(self):
"""Return the number of items."""
return self._count
def connected(self, p, q):
"""Check if the items p and q are on the same set or not."""
return self.find(p) == self.find(q)
def union(self, p, q):
"""Combine sets containing p and q into a single set."""
id = self._id
rank = self._rank
i = self.find(p)
j = self.find(q)
if i == j:
return
self._count -= 1
if rank[i] < rank[j]:
id[i] = j
elif rank[i] > rank[j]:
id[j] = i
else:
id[j] = i
rank[i] += 1
def is_percolate(self):
return len(self._id) == 1
def __str__(self):
"""String representation of the union find object."""
return " ".join([str(x) for x in self._id])
def __repr__(self):
"""Representation of the union find object."""
return "UF(" + str(self) + ")"
class Solution:
def countPairs(self, n: int, edges: List[List[int]]) -> int:
uf = UnionFind(n)
for e1, e2 in edges:
uf.union(e1, e2)
res = collections.defaultdict(int)
for i in range(n):
res[uf.find(i)] += 1
# print(res)
ret = n * (n - 1) // 2
for v in res.values():
ret -= v * (v - 1) // 2
return ret
so = Solution()
print(so.countPairs(n=7, edges=[[0, 2], [0, 5], [2, 4], [1, 6], [5, 4]]))
| [
"905317742@qq.com"
] | 905317742@qq.com |
f9041f73bbd606275af0d91489a6b957b3a6c766 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_tenures.py | fb887b226bc8618187ae5b9a4b258c01cd9015aa | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _TENURES():
def __init__(self,):
self.name = "TENURES"
self.definitions = tenure
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['tenure']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8f962f8206e5bdccf6696bb7996ff5b51df6ad4a | db3bdaa3a47d2d55077335a66c5f4abbc7a00c46 | /mmseg/models/decode_heads/sep_aspp_head.py | 71881890bd5fd59d79063f05c57f5a1deb18f5cd | [
"Apache-2.0"
] | permissive | ZidanMusk/mmsegmentation | ef0316ab752e6818d306c23190402be3a6d1f3b4 | a2738fd9befee1a30aaecd60ec997394e439a542 | refs/heads/master | 2022-12-16T07:38:01.446100 | 2020-09-22T06:56:13 | 2020-09-22T06:56:13 | 297,679,279 | 1 | 0 | Apache-2.0 | 2020-09-22T14:43:53 | 2020-09-22T14:43:52 | null | UTF-8 | Python | false | false | 3,487 | py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.ops import DepthwiseSeparableConvModule, resize
from ..builder import HEADS
from .aspp_head import ASPPHead, ASPPModule
class DepthwiseSeparableASPPModule(ASPPModule):
"""Atrous Spatial Pyramid Pooling (ASPP) Module with depthwise separable
conv."""
def __init__(self, **kwargs):
super(DepthwiseSeparableASPPModule, self).__init__(**kwargs)
for i, dilation in enumerate(self.dilations):
if dilation > 1:
self[i] = DepthwiseSeparableConvModule(
self.in_channels,
self.channels,
3,
dilation=dilation,
padding=dilation,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
@HEADS.register_module()
class DepthwiseSeparableASPPHead(ASPPHead):
"""Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation.
This head is the implementation of `DeepLabV3+
<https://arxiv.org/abs/1802.02611>`_.
Args:
c1_in_channels (int): The input channels of c1 decoder. If is 0,
the no decoder will be used.
c1_channels (int): The intermediate channels of c1 decoder.
"""
def __init__(self, c1_in_channels, c1_channels, **kwargs):
super(DepthwiseSeparableASPPHead, self).__init__(**kwargs)
assert c1_in_channels >= 0
self.aspp_modules = DepthwiseSeparableASPPModule(
dilations=self.dilations,
in_channels=self.in_channels,
channels=self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
if c1_in_channels > 0:
self.c1_bottleneck = ConvModule(
c1_in_channels,
c1_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
self.c1_bottleneck = None
self.sep_bottleneck = nn.Sequential(
DepthwiseSeparableConvModule(
self.channels + c1_channels,
self.channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
DepthwiseSeparableConvModule(
self.channels,
self.channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
aspp_outs = [
resize(
self.image_pool(x),
size=x.size()[2:],
mode='bilinear',
align_corners=self.align_corners)
]
aspp_outs.extend(self.aspp_modules(x))
aspp_outs = torch.cat(aspp_outs, dim=1)
output = self.bottleneck(aspp_outs)
if self.c1_bottleneck is not None:
c1_output = self.c1_bottleneck(inputs[0])
output = resize(
input=output,
size=c1_output.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
output = torch.cat([output, c1_output], dim=1)
output = self.sep_bottleneck(output)
output = self.cls_seg(output)
return output
| [
"xvjiarui0826@gmail.com"
] | xvjiarui0826@gmail.com |
0b897a87c4a4e0d22925c747937978849ff1003a | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/npc/DistributedBossSkeleton.py | d44c27baae259995fbc08bd4e9e32a7e6dc1e7d2 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | from pandac.PandaModules import Vec4
from direct.directnotify import DirectNotifyGlobal
from pirates.npc.DistributedNPCSkeleton import DistributedNPCSkeleton
from pirates.pirate import AvatarTypes
from pirates.pirate.AvatarType import AvatarType
from pirates.npc.Boss import Boss
class DistributedBossSkeleton(DistributedNPCSkeleton, Boss):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBossSkeleton')
def __init__(self, cr):
DistributedNPCSkeleton.__init__(self, cr)
Boss.__init__(self, cr)
def generate(self):
DistributedNPCSkeleton.generate(self)
def announceGenerate(self):
DistributedNPCSkeleton.announceGenerate(self)
if not self.isInInvasion():
self.addBossEffect(AvatarTypes.Undead)
def disable(self):
self.removeBossEffect()
DistributedNPCSkeleton.disable(self)
def setAvatarType(self, avatarType):
avatarType = AvatarType.fromTuple(avatarType)
DistributedNPCSkeleton.setAvatarType(self, avatarType)
self.loadBossData(self.getUniqueId(), avatarType)
def getEnemyScale(self):
return Boss.getEnemyScale(self)
def getBossEffect(self):
return Boss.getBossEffect(self)
def getBossHighlightColor(self):
return Boss.getBossHighlightColor(self)
def getShortName(self):
return Boss.getShortName(self)
def skipBossEffect(self):
return self.isGhost
| [
"brandoncarden12345@gmail.com"
] | brandoncarden12345@gmail.com |
350ef7b96542adba818035827a6af008b844667b | 8bbeb7b5721a9dbf40caa47a96e6961ceabb0128 | /python/552.Student Attendance Record II(学生出勤记录 II).py | 68d48fe97cf00da57671f8a114e3c6ccd4b0c288 | [
"MIT"
] | permissive | lishulongVI/leetcode | bb5b75642f69dfaec0c2ee3e06369c715125b1ba | 6731e128be0fd3c0bdfe885c1a409ac54b929597 | refs/heads/master | 2020-03-23T22:17:40.335970 | 2018-07-23T14:46:06 | 2018-07-23T14:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,290 | py | """
<p>Given a positive integer <b>n</b>, return the number of all possible attendance records with length n, which will be regarded as rewardable. The answer may be very large, return it after mod 10<sup>9</sup> + 7.</p>
<p>A student attendance record is a string that only contains the following three characters:</p>
<p>
<ol>
<li><b>'A'</b> : Absent. </li>
<li><b>'L'</b> : Late.</li>
<li> <b>'P'</b> : Present. </li>
</ol>
</p>
<p>
A record is regarded as rewardable if it doesn't contain <b>more than one 'A' (absent)</b> or <b>more than two continuous 'L' (late)</b>.</p>
<p><b>Example 1:</b><br />
<pre>
<b>Input:</b> n = 2
<b>Output:</b> 8
<b>Explanation:</b>
There are 8 records with length 2 will be regarded as rewardable:
"PP" , "AP", "PA", "LP", "PL", "AL", "LA", "LL"
Only "AA" won't be regarded as rewardable owing to more than one absent times.
</pre>
</p>
<p><b>Note:</b>
The value of <b>n</b> won't exceed 100,000.
</p>
<p>给定一个正整数 <strong>n</strong>,返回长度为 n 的所有可被视为可奖励的出勤记录的数量。 答案可能非常大,你只需返回结果mod 10<sup>9</sup> + 7的值。</p>
<p>学生出勤记录是只包含以下三个字符的字符串:</p>
<ol>
<li><strong>'A'</strong> : Absent,缺勤</li>
<li><strong>'L'</strong> : Late,迟到</li>
<li><strong>'P'</strong> : Present,到场</li>
</ol>
<p>如果记录不包含<strong>多于一个'A'(缺勤)</strong>或<strong>超过两个连续的'L'(迟到)</strong>,则该记录被视为可奖励的。</p>
<p><strong>示例 1:</strong></p>
<pre>
<strong>输入:</strong> n = 2
<strong>输出:</strong> 8 <strong>
解释:</strong>
有8个长度为2的记录将被视为可奖励:
"PP" , "AP", "PA", "LP", "PL", "AL", "LA", "LL"
只有"AA"不会被视为可奖励,因为缺勤次数超过一次。</pre>
<p><strong>注意:n </strong>的值不会超过100000。</p>
<p>给定一个正整数 <strong>n</strong>,返回长度为 n 的所有可被视为可奖励的出勤记录的数量。 答案可能非常大,你只需返回结果mod 10<sup>9</sup> + 7的值。</p>
<p>学生出勤记录是只包含以下三个字符的字符串:</p>
<ol>
<li><strong>'A'</strong> : Absent,缺勤</li>
<li><strong>'L'</strong> : Late,迟到</li>
<li><strong>'P'</strong> : Present,到场</li>
</ol>
<p>如果记录不包含<strong>多于一个'A'(缺勤)</strong>或<strong>超过两个连续的'L'(迟到)</strong>,则该记录被视为可奖励的。</p>
<p><strong>示例 1:</strong></p>
<pre>
<strong>输入:</strong> n = 2
<strong>输出:</strong> 8 <strong>
解释:</strong>
有8个长度为2的记录将被视为可奖励:
"PP" , "AP", "PA", "LP", "PL", "AL", "LA", "LL"
只有"AA"不会被视为可奖励,因为缺勤次数超过一次。</pre>
<p><strong>注意:n </strong>的值不会超过100000。</p>
"""
class Solution(object):
def checkRecord(self, n):
"""
:type n: int
:rtype: int
"""
| [
"lishulong@wecash.net"
] | lishulong@wecash.net |
186415bd46ca8420d00a0aacc730035e12e8ce8d | 8839bd1f2e35726b6c8066985690fa2fa86b09a6 | /3.pyWiFi-ESP32/1.基础实验/3.外部中断/main.py | 9c1d42d2b004cdca0e1196e1b3548f1c7a342666 | [
"MIT"
] | permissive | elektrik-elektronik-muhendisligi/MicroPython-Examples-1 | a9532b06aba470f7f26f841929f4fb145549f70b | f7b08e95ff73e3417af21918c9c6bcf2f83281c6 | refs/heads/master | 2021-05-25T22:58:36.207098 | 2020-04-01T09:50:53 | 2020-04-01T09:50:53 | 253,956,073 | 1 | 0 | null | 2020-04-08T01:39:46 | 2020-04-08T01:39:45 | null | UTF-8 | Python | false | false | 564 | py | '''
实验名称:外部中断
版本:v1.0
日期:2019.8
作者:01Studio
说明:通过按键改变LED的亮灭状态(外部中断方式)
'''
from machine import Pin
import time
LED=Pin(2,Pin.OUT) #构建LED对象,开始熄灭
KEY=Pin(0,Pin.IN,Pin.PULL_UP) #构建KEY对象
state=0 #LED引脚状态
#LED状态翻转函数
def fun(KEY):
global state
time.sleep_ms(10) #消除抖动
if KEY.value()==0: #确认按键被按下
state = not state
LED.value(state)
KEY.irq(fun,Pin.IRQ_FALLING) #定义中断,下降沿触发
| [
"237827161@qq.com"
] | 237827161@qq.com |
3012477f1ca7fff4bcdccebbb3b2f1d13a1c2a2f | b72c37e3ccda507b231649cddd5c7845c6c34ba1 | /PythonBasic/Day10/function_as_args2.py | 44001eefb340bda50ed40e27c1db4166a3779e32 | [] | no_license | ljrdemail/AID1810 | 51c61c255b5c5efc1dc642b46691a614daedd85e | b417bd831bc1550ab953ce7ca23f54e34b8b2692 | refs/heads/master | 2020-04-24T09:45:14.781612 | 2019-02-21T11:26:49 | 2019-02-21T11:26:49 | 171,866,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | def goodbye(L):
for x in L:
print("再见:", x)
def hello(L):
for x in L:
print("你好:", x)
def fx(fn, L): #把fn的地址和列表传入fx函数
fn(L)
fx(hello, ["Tom", "Jerry", "Spike"])
fx(goodbye, ["上海", "北京", "深圳"])
| [
"root"
] | root |
04bfded95f84d96c0feaca84c444f9bc97c4ed3a | d86add94a5848a1901c62235f969e8d17d9e1ad6 | /app/lib/base/decorators.py | 1a7076aac5058fcc2bbc6b7dc9c99ee181511f40 | [
"MIT"
] | permissive | Excloudx6/SnitchDNS | 3a948623892d1ffbd2118ac40df8acaad86e6578 | 8f76a76618a3609ec35c5be678b87e90254c8ed2 | refs/heads/master | 2023-07-26T04:48:38.837761 | 2021-09-15T08:52:39 | 2021-09-15T08:52:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | from functools import wraps
from flask_login import current_user
from flask import redirect, url_for, flash
from app.lib.base.provider import Provider
def admin_required(f):
@wraps(f)
def wrapped_view(**kwargs):
if not current_user.admin:
flash('Access Denied', 'error')
return redirect(url_for('home.index'))
return f(**kwargs)
return wrapped_view
def must_have_base_domain(f):
@wraps(f)
def wrapped_view(**kwargs):
if not current_user.admin:
if len(Provider().dns_zones().base_domain) == 0:
flash('The base domain has not been configured by your administrator.', 'error')
return redirect(url_for('home.index'))
return f(**kwargs)
return wrapped_view
def api_auth(f):
@wraps(f)
def wrapped_view(**kwargs):
from app.lib.api.auth import ApiAuth
from app.lib.api.base import ApiBase
if not ApiAuth().auth(True):
return ApiBase().send_access_denied_response()
return f(**kwargs)
return wrapped_view
| [
"p@vel.gr"
] | p@vel.gr |
b4d1c962f462d9cc40b3d0cc9ec0404af28563a9 | dac498d66ec02ad9b52c4c3b074b3bd68d4aee00 | /joke/loginmanage.py | 03a1eb4f16b6ff60ac65b5f0db38735ca588dd4e | [] | no_license | wangjian2254/haha | 3f95c22166af0495098783a40cd1a5d5326cc6e6 | d856cf43bbb12d49334078432a74cbe1ef47cf98 | refs/heads/master | 2016-09-05T20:41:38.188010 | 2014-03-22T11:22:24 | 2014-03-22T11:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | #coding=utf-8
#author:u'王健'
#Date: 13-3-5
#Time: 下午10:15
import uuid
from google.appengine.api import memcache
from models.model import User, UserJoke
__author__ = u'王健'
def setLogin(web,username):
uid=str(uuid.uuid4())
memcache.set('webusername'+uid,username,36000)
setCookie='webusername='+uid+';'
web.response.headers.add_header('Set-Cookie', setCookie+'Max-Age = 3600000;path=/;')
def setLogout(web):
setCookie='webusername=;'
web.response.headers.add_header('Set-Cookie', setCookie+'Max-Age = 3600000;path=/;')
def getUser(user):
user_joke=memcache.get('userbyid'+str(user))
if not user_joke:
user_joke=UserJoke.get_by_id(int(user))
memcache.set('userbyid'+str(user),user_joke,720000)
return user_joke
def get_current_user(web):
guist={}
Cookies = {} # tempBook Cookies
Cookies['request_cookie_list'] = [{'key': cookie_key, 'value': cookie_value} for cookie_key, cookie_value in web.request.cookies.iteritems()]
for c in Cookies['request_cookie_list']:
if c['key']=='webusername':
guist["userid"]=memcache.get('webusername'+c['value'])
if guist and guist.has_key('userid') and guist['userid']:
user=memcache.get('userlogin'+str(guist['userid']))
if not user:
user=getUser(guist['userid'])
memcache.set('userlogin'+str(guist['userid']),user,36000)
if user:
return user
return False
| [
"wangjian2254@gmail.com"
] | wangjian2254@gmail.com |
6bb1f5033d2d517256fda85186b3a4fc5bf68207 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5708284669460480_0/Python/mediocrates/gcj_2015-1c-2.py | 1597de67f4d6a70b0464fc4c0f21cd8646e2efde | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | with open('B-small-attempt0 (1).in', 'r+b') as f:
T = int(f.readline().strip())
for i in range(1, T+1):
K, L, S = map(int, f.readline().strip().split())
keyboard = f.readline().strip()
target = f.readline().strip()
isPossible = all([c in keyboard for c in target])
if not isPossible:
print 'Case #%d: 0.0' % i
continue
possibleOverlapStarts = [j+1 for (j, c) in enumerate(target[1:]) if c == target[0]]
maxOverlapLength = 0
maxOverlapStr = ''
for p in possibleOverlapStarts:
possibleOverlapLength = L - p
if possibleOverlapLength > maxOverlapLength:
if target[p:] == target[0:possibleOverlapLength]:
maxOverlapLength = possibleOverlapLength
maxOverlapStr = target[p:]
probs = {}
for c in keyboard:
probs[c] = 1.0*keyboard.count(c)/K
bringBananas = 1.0 + (S-L)/(L-maxOverlapLength)
expectedBananasOneTime = 1.0
for c in target:
expectedBananasOneTime *= probs[c]
expectedBananas = (S-L+1)* expectedBananasOneTime
print "Case #%d: %f" % (i, bringBananas - expectedBananas)
# print keyboard, target, S
# print bringBananas
# print expectedBananasOneTime
# print expectedBananas
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
e7e66df3e2d78baff5e392317914cea6f33e6618 | 30a4b8b4c5212f8915f67900e46784645753bf4e | /tests/test_lists.py | 72127d4dcfe993099a529a218a5a8823f9b95244 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | isabella232/omb-pdf | dff06168295f5527d95f0090084645e2e15a41e7 | 5a35757b848097525a920d53789c7ee5c10bc87e | refs/heads/master | 2021-08-30T22:02:32.610174 | 2017-12-19T15:37:43 | 2017-12-19T15:37:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,203 | py | import pytest
from ombpdf.document import OMBListItem, OMBListItemMarker
from ombpdf.lists import annotate_lists
from . import bbox
def test_annotate_lists_works(m_16_19_doc):
lists = annotate_lists(m_16_19_doc)
assert str(lists[1][1][0]).startswith('1. Transitioning to')
assert lists[1][1][0].annotation == OMBListItem(
list_id=1,
number=1,
is_ordered=True,
indentation=1
)
assert lists[1][1][0][0].annotation == OMBListItemMarker(is_ordered=True)
assert str(lists[1][2][0]).startswith('2. Migrating to inter-agency')
assert lists[1][2][0].annotation == OMBListItem(
list_id=1,
number=2,
is_ordered=True,
indentation=1
)
assert str(lists[2][1][0]).startswith('• Coordinating with OMB')
assert lists[2][1][0].annotation == OMBListItem(
list_id=2,
number=1,
is_ordered=False,
indentation=1
)
assert lists[2][1][0][0].annotation == OMBListItemMarker(is_ordered=False)
assert str(lists[5][1][0]).startswith('a. A description of any')
assert lists[5][1][0].annotation == OMBListItem(
list_id=5,
number=1,
is_ordered=True,
indentation=2,
)
def test_lists_are_annotated_on_m_15_17(m_15_17_doc):
lists = annotate_lists(m_15_17_doc)
titles = [
'Improve Educational Outcomes and Life Outcomes for Native Youth',
'Increase Access to Quality Teacher Housing',
'Improve Access to the Internet',
'Support the Implementation ofthe Indian Child Welfare Act',
'Reduce Teen Suicide',
]
for i in range(1, 6):
assert lists[1][i][0].annotation == OMBListItem(
list_id=1,
number=i,
is_ordered=False,
indentation=1
)
assert titles[i-1] in ' '.join(str(line) for line in lists[1][i])
@pytest.mark.xfail(raises=AssertionError)
def test_unordered_2():
doc, _, lines = bbox.find_lines('http://localhost:5000/rawlayout/2011/m11-29.pdf?bbox=2,67,554.390625,560,737.390625#2')
doc.annotators.require('lists')
for line in lines:
assert isinstance(line.annotation, OMBListItem)
| [
"varmaa@gmail.com"
] | varmaa@gmail.com |
7f10811d561a8b3d8b400aa566c595d11bfa2ea3 | dc49f1f89faee6ffe601f4736e2b4cf329533413 | /ihome/ihome/apps/homes/migrations/0003_auto_20191018_1622.py | fda2094bfb999259c62f661e58cd348ce0325dee | [
"MIT"
] | permissive | woobrain/ihome | 416d8e50feff48f4a60a7a3d473f14aea4cb0b0d | fbc7230fe6d445b2a258c49c48297258ae702bb3 | refs/heads/master | 2022-12-10T07:39:55.743400 | 2019-11-16T00:46:23 | 2019-11-16T00:46:23 | 222,025,889 | 0 | 0 | MIT | 2022-12-08T05:33:31 | 2019-11-16T00:42:25 | JavaScript | UTF-8 | Python | false | false | 679 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-10-18 08:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homes', '0002_auto_20191018_0818'),
]
operations = [
migrations.AlterField(
model_name='area',
name='create_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='area',
name='update_time',
field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),
),
]
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
5cadbfa4c5708cf50e83d0555f318b11263bee7c | 77e64a1f810bb860db97e961f168cc0ddb223f62 | /pyEX/premium/valuengine/__init__.py | 22c09f54002b45789877f03c8e866888c3d88457 | [
"Apache-2.0"
] | permissive | cjwang/pyEX | 4ae4b86ddac0dda904925c553fa0b9ff81b68032 | 1b5f40f80110afaa4809ea48fac067033c7bdf89 | refs/heads/main | 2022-12-25T07:11:48.252227 | 2020-10-08T23:34:46 | 2020-10-08T23:34:46 | 303,057,426 | 1 | 0 | Apache-2.0 | 2020-10-11T06:30:53 | 2020-10-11T06:30:53 | null | UTF-8 | Python | false | false | 986 | py | # -*- coding: utf-8 -*-
from ...common import _expire, _getJson, _strOrDate, PyEXception, _EST
@_expire(hour=10, tz=_EST)
def valuEngineStockResearchReport(symbol='', date=None, token='', version=''):
'''ValuEngine provides research on over 5,000 stocks with stock valuations, Buy/Hold/Sell recommendations, and forecasted target prices, so that you the individual investor can make informed decisions. Every ValuEngine Valuation and Forecast model for the U.S. equities markets has been extensively back-tested. ValuEngine’s performance exceeds that of many well-known stock-picking styles. Reports available since March 19th, 2020.
https://iexcloud.io/docs/api/#valuengine-stock-research-report
Args:
symbol (str); symbol to use
'''
if not symbol or not date:
raise PyEXception("symbol and date required")
return _getJson('files/download/VALUENGINE_REPORT?symbol={}&date={}'.format(symbol, _strOrDate(date)), token=token, version=version)
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
e45d99f58316dd2351f525153acd26a7e81eb9fe | f99f97415a8724725c06ac59b4f666da2c63ec8a | /xtobjdis/xtcmjoin.py | 44df2222bb932c72dbe2cff964161a9629caf857 | [] | no_license | 0x90/esp-arsenal | 46687deb10013ed3e23ceae8cc3d034d8961fb0f | 532981ed3db9c6bc4ebf31e8f7648fcc19161007 | refs/heads/master | 2021-01-19T05:33:28.508899 | 2018-03-25T03:05:57 | 2018-03-25T03:05:57 | 64,477,601 | 38 | 19 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!/usr/bin/python3
# This is just a very simple script to join multiple callmaps (as produced by
# xtobjdis' --callmap option) into a single callmap file.
#
# Usage: xtcmjoin FILE [FILE ...] > OUTFILE
import sys
import json
result = []
for filename in sys.argv[1:]:
with open(filename, 'r') as f:
mapdata = json.load(f)
result.extend(mapdata)
json.dump(result, sys.stdout, sort_keys=True, indent=4)
| [
"oleg.kupreev@gmail.com"
] | oleg.kupreev@gmail.com |
53f3e7f478137c6f5cb03fb22558b4516505538f | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D03B/PAYMULD03BUN.py | 91e305f8ec40f20fedc33f39c8f07c2935f1d7e3 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 4,550 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD03BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 1},
{ID: 'BUS', MIN: 0, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 2, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'FII', MIN: 0, MAX: 5, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'LIN', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'RFF', MIN: 0, MAX: 2},
{ID: 'BUS', MIN: 0, MAX: 1},
{ID: 'FCA', MIN: 0, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 1, LEVEL: [
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'RFF', MIN: 0, MAX: 1},
]},
{ID: 'FII', MIN: 1, MAX: 2, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'INP', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
]},
{ID: 'GEI', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'NAD', MIN: 0, MAX: 1},
{ID: 'RCS', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 10},
]},
{ID: 'PRC', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 1, MAX: 1},
]},
{ID: 'SEQ', MIN: 1, MAX: 999999, LEVEL: [
{ID: 'MOA', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'BUS', MIN: 0, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 3},
{ID: 'PAI', MIN: 0, MAX: 1},
{ID: 'FCA', MIN: 0, MAX: 1},
{ID: 'FII', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'INP', MIN: 0, MAX: 3, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
]},
{ID: 'GEI', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'NAD', MIN: 0, MAX: 1},
{ID: 'RCS', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 10},
]},
{ID: 'PRC', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'DOC', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 5},
{ID: 'NAD', MIN: 0, MAX: 2},
{ID: 'CUX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'AJT', MIN: 0, MAX: 100, LEVEL: [
{ID: 'MOA', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 5},
]},
{ID: 'DLI', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'MOA', MIN: 1, MAX: 5},
{ID: 'PIA', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'CUX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'AJT', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 5},
]},
]},
]},
{ID: 'GEI', MIN: 0, MAX: 1, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 5},
]},
]},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 5},
{ID: 'AUT', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
fffe146a9e2ae5ae2283c75d729f19fd877f14c6 | 9bc228372e586a1f90bb0685c43e744be9638ecd | /17_채희찬/session6/urls.py | ce559f08de38289a2e3964961e0b272ef33101aa | [
"MIT"
] | permissive | LikeLionSCH/9th_ASSIGNMENT | 3e58862a76e3232aed7e19e8939da23330ff2e22 | c211995ad12f404833ffec7fd80e1229b82a3bfa | refs/heads/master | 2023-07-03T10:27:11.843177 | 2021-08-02T14:52:02 | 2021-08-02T14:52:02 | 379,633,279 | 7 | 18 | MIT | 2021-08-02T14:52:03 | 2021-06-23T14:36:59 | Python | UTF-8 | Python | false | false | 1,055 | py | """Wordcount URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from wordcount_app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.count, name="'count"),
path('count', views.count, name="count"),
path('result', views.result, name="result"),
path('test', views.test, name="test"),
path('test1', views.test1, name="test1"),
path('test2', views.test2, name="test2"),
]
| [
"eqholic1125@gmail.com"
] | eqholic1125@gmail.com |
ee4c7018e5253c22f4e97f666ec9a0b775ef5fd2 | 2ffd079c34cb07c738f7e5f703764fed68f2c8c0 | /Solutions/Two_Sum.py | 025439a768f1e31efc4d638706fcb4b6a55591ac | [] | no_license | WuIFan/LeetCode | bc96355022c875bdffb39c89a2088457b97d30ab | 689a100ada757bc20334d5f0084587af3039ca7b | refs/heads/master | 2022-05-24T07:13:01.023733 | 2022-04-03T15:26:23 | 2022-04-03T15:26:23 | 202,471,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dist = {}
for i in range(len(nums)):
if target-nums[i] in dist:
return [dist[target-nums[i]],i]
else:
dist[nums[i]] = i
nums = [3,2,4]
target = 6
nums = [2,7,11,15]
target = 9
print (Solution().twoSum(nums,target)) | [
"denny91002@gmail.com"
] | denny91002@gmail.com |
a3b11172d56239193079f99126e5cc6c17beb1e5 | ffef4697f09fb321a04f2b3aad98b688f4669fb5 | /tests/ut/python/parallel/test_auto_parallel_parameter_cast.py | 67b8f98fafae2e9e31cdf5acd09a7555abb621ec | [
"Apache-2.0",
"AGPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"MPL-1.1",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"MPL-2.0",
"LGPL-2.1-only",
"GPL-2.0-only",
"Libpng",
"BSL-1.0",
"MIT",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Z... | permissive | Ewenwan/mindspore | 02a0f1fd660fa5fec819024f6feffe300af38c9c | 4575fc3ae8e967252d679542719b66e49eaee42b | refs/heads/master | 2021-05-19T03:38:27.923178 | 2020-03-31T05:49:10 | 2020-03-31T05:49:10 | 251,512,047 | 1 | 0 | Apache-2.0 | 2020-03-31T05:48:21 | 2020-03-31T05:48:20 | null | UTF-8 | Python | false | false | 3,339 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from mindspore import context
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore import Tensor
from tests.ut.python.ops.test_math_ops import VirtualLoss
import mindspore as ms
from mindspore.common import dtype as mstype
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore import Tensor, Parameter
from mindspore.parallel._utils import _reset_op_id as reset_op_id
from mindspore.parallel import set_algo_parameters
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y, z, w):
predict = self.network(x, y, z, w)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y, z, w):
return C.grad_all(self.network)(x, y, z, w)
# model_parallel test
def test_common_parameter():
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.matmul1 = P.MatMul()
self.matmul2 = P.MatMul()
self.matmul3 = P.MatMul()
self.weight1 = Parameter(Tensor(np.ones([64, 64]).astype(np.float16) * 0.01), "w", requires_grad=True)
self.cast1 = P.Cast()
self.cast2 = P.Cast()
def construct(self, x, y, z, w):
m1_result = self.matmul1(x, self.cast1(self.weight1, mstype.float32))
m2_result = self.matmul2(z, self.cast2(self.weight1, mstype.float32))
m3_result = self.matmul3(m2_result, m1_result)
return m3_result
size = 8
context.set_auto_parallel_context(device_num=size, global_rank=0)
set_algo_parameters(elementwise_op_strategy_follow=True)
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
y = Tensor(np.ones([64, 64]), dtype=ms.float32)
z = Tensor(np.ones([64, 64]), dtype=ms.float32)
w = Tensor(np.ones([64, 64]), dtype=ms.float32)
net = NetWithLoss(Net())
context.set_auto_parallel_context(parallel_mode="auto_parallel")
reset_op_id()
_executor.compile(net, x, y, z, w, phase='train')
strategies = _executor._get_strategy(net)
expected_strategies = {'Default/network-Net/MatMul-op8': [[1, 1], [1, 8]],
'Default/network-Net/MatMul-op9': [[1, 1], [1, 8]],
'Default/network-Net/Cast-op10': [[1, 8]],
'Default/network-Net/MatMul-op0': [[1, 1], [1, 8]],
'Default/network-Net/Cast-op11': [[1, 8]]}
assert strategies == expected_strategies | [
"leon.wanghui@huawei.com"
] | leon.wanghui@huawei.com |
27db6175564c85ec104ba403cd5ea540c3cab21a | b77eda8a67f78293f3f0f978609adf45575ceb5f | /array/Ratndeep/one diff in adj of number.py | f4a2112387b1631979700d1e7501fac298609643 | [
"MIT"
] | permissive | Nagendracse1/Competitive-Programming | 8b02c7688968f4930c913dc410087a1fa59bcaa8 | 325e151b9259dbc31d331c8932def42e3ab09913 | refs/heads/master | 2023-02-02T23:08:55.036635 | 2020-12-18T18:22:24 | 2020-12-18T18:22:24 | 313,898,641 | 0 | 0 | MIT | 2020-12-18T18:22:25 | 2020-11-18T10:25:51 | Python | UTF-8 | Python | false | false | 437 | py | n = int(input())
if n<=10:
for i in range(1,n):
print(i,end=" ")
print()
else:
for i in range(1,11):
print(i,end=" ")
for i in range(11,n):
cur_num = str(i)
x=0
flag=0
while x<len(cur_num)-1:
if abs(int(cur_num[x])-int(cur_num[x+1]))!=1:
flag=1
break
x+=1
if flag==0:
print(i,end=" ")
print()
| [
"ratndeepk07@gmail.com"
] | ratndeepk07@gmail.com |
da7638e2212c3a2b53231e334473866942779e5c | 4b81a7e86fe4e05b2486898bd55d52639b698626 | /backend/owldock/asgi.py | d3b8e258718ac7b10a44700bafde268d579a87ff | [] | no_license | dandavison/_owldock | 94c910b172fa495bb769268c196c5131c7e02127 | 196e849cb70de44523132e67659f8344f8d5cc0a | refs/heads/main | 2023-08-18T19:54:42.782049 | 2021-08-01T21:42:56 | 2021-08-01T22:02:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for owldock project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "owldock.settings")
application = get_asgi_application()
| [
"dandavison7@gmail.com"
] | dandavison7@gmail.com |
e5c4ea9d17294066863dc8214c33a66ee2e8b975 | 2bcf18252fa9144ece3e824834ac0e117ad0bdf3 | /zpt/trunk/site-packages/zpt/_pytz/zoneinfo/Africa/Niamey.py | 2178ea142d80e6415e700bac8425e15173ef0498 | [
"MIT",
"ZPL-2.1"
] | permissive | chadwhitacre/public | 32f65ba8e35d38c69ed4d0edd333283a239c5e1d | 0c67fd7ec8bce1d8c56c7ff3506f31a99362b502 | refs/heads/master | 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | '''tzinfo timezone information for Africa/Niamey.'''
from zpt._pytz.tzinfo import DstTzInfo
from zpt._pytz.tzinfo import memorized_datetime as d
from zpt._pytz.tzinfo import memorized_ttinfo as i
class Niamey(DstTzInfo):
'''Africa/Niamey timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Niamey'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1911,12,31,23,51,32),
d(1934,2,26,1,0,0),
d(1960,1,1,0,0,0),
]
_transition_info = [
i(480,0,'LMT'),
i(-3600,0,'WAT'),
i(0,0,'GMT'),
i(3600,0,'WAT'),
]
Niamey = Niamey()
| [
"chad@zetaweb.com"
] | chad@zetaweb.com |
09d041280896c1ee198a80238f9ad95a2df9f978 | aa7ae21225d64a4e37eb5a012aa6ee97d87cba83 | /util/dos_cmd.py | e2bbd9cbbfb484102bee10d18b7fd03f90d3b89b | [] | no_license | turkey66/appium_test | 68c912cb75ab0939c958b742da7de71057bd2702 | 7bd8750ebbda32b7b8a9d71baa5f58d1c8731000 | refs/heads/master | 2020-03-31T04:45:22.662529 | 2018-10-07T08:20:42 | 2018-10-07T08:20:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | # -*- coding: utf-8 -*-
import os
#print(os.system('adb devices'))
#print(os.popen('adb devices').readlines())
class DosCmd:
def excute_cmd_result(self, command):
result = os.popen(command).readlines()
result_list = [_.strip() for _ in result if _ != '\n']
return result_list
def excute_cmd(self, command):
os.system(command)
if __name__ == '__main__':
dc = DosCmd()
print(dc.excute_cmd_result('adb devices'))
print(dc.excute_cmd_result('netstat -ano | findstr 8080')) | [
"eric@example.com"
] | eric@example.com |
7bc2f1e7d3cec2211fa40741253ce2bb2a920932 | 8076124f4087781e0513dbe09c0f43dc6a861ab0 | /src/sentry/api/endpoints/organization_event_details.py | 2cc0b8ef5760f3904c9ab431b2383f87eb5624be | [
"BSD-2-Clause"
] | permissive | sharmapacific/sentry | 75e3356f87cb5a1e812e0974b081fd47852dfe33 | fceabe7cb84de587fe05b2c36edc013058e7e55a | refs/heads/master | 2020-08-19T00:13:48.748983 | 2019-10-17T17:09:06 | 2019-10-17T17:09:06 | 215,851,537 | 1 | 0 | BSD-3-Clause | 2019-10-17T17:43:49 | 2019-10-17T17:43:49 | null | UTF-8 | Python | false | false | 4,005 | py | from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.bases import OrganizationEventsEndpointBase, OrganizationEventsError, NoProjects
from sentry.api.event_search import get_reference_event_conditions
from sentry import eventstore, features
from sentry.models.project import Project
from sentry.api.serializers import serialize
class OrganizationEventDetailsEndpoint(OrganizationEventsEndpointBase):
def get(self, request, organization, project_slug, event_id):
if not features.has("organizations:events-v2", organization, actor=request.user):
return Response(status=404)
try:
params = self.get_filter_params(request, organization)
snuba_args = self.get_snuba_query_args(request, organization, params)
except OrganizationEventsError as exc:
return Response({"detail": exc.message}, status=400)
except NoProjects:
return Response(status=404)
try:
project = Project.objects.get(slug=project_slug, organization_id=organization.id)
except Project.DoesNotExist:
return Response(status=404)
# We return the requested event if we find a match regardless of whether
# it occurred within the range specified
event = eventstore.get_event_by_id(project.id, event_id)
if event is None:
return Response({"detail": "Event not found"}, status=404)
# Scope the pagination related event ids to the current event
# This ensure that if a field list/groupby conditions were provided
# that we constrain related events to the query + current event values
event_slug = u"{}:{}".format(project.slug, event_id)
snuba_args["conditions"].extend(get_reference_event_conditions(snuba_args, event_slug))
data = serialize(event)
data["nextEventID"] = self.next_event_id(snuba_args, event)
data["previousEventID"] = self.prev_event_id(snuba_args, event)
data["oldestEventID"] = self.oldest_event_id(snuba_args, event)
data["latestEventID"] = self.latest_event_id(snuba_args, event)
data["projectSlug"] = project_slug
return Response(data)
def next_event_id(self, snuba_args, event):
"""
Returns the next event ID if there is a subsequent event matching the
conditions provided. Ignores the project_id.
"""
next_event = eventstore.get_next_event_id(event, filter=self._get_filter(snuba_args))
if next_event:
return next_event[1]
def prev_event_id(self, snuba_args, event):
"""
Returns the previous event ID if there is a previous event matching the
conditions provided. Ignores the project_id.
"""
prev_event = eventstore.get_prev_event_id(event, filter=self._get_filter(snuba_args))
if prev_event:
return prev_event[1]
def latest_event_id(self, snuba_args, event):
"""
Returns the latest event ID if there is a newer event matching the
conditions provided
"""
latest_event = eventstore.get_latest_event_id(event, filter=self._get_filter(snuba_args))
if latest_event:
return latest_event[1]
def oldest_event_id(self, snuba_args, event):
"""
Returns the oldest event ID if there is a subsequent event matching the
conditions provided
"""
oldest_event = eventstore.get_earliest_event_id(event, filter=self._get_filter(snuba_args))
if oldest_event:
return oldest_event[1]
def _get_filter(self, snuba_args):
return eventstore.Filter(
conditions=snuba_args["conditions"],
start=snuba_args.get("start", None),
end=snuba_args.get("end", None),
project_ids=snuba_args["filter_keys"].get("project_id", None),
group_ids=snuba_args["filter_keys"].get("issue", None),
)
| [
"noreply@github.com"
] | sharmapacific.noreply@github.com |
0e2e77f0904ddd2648285841826ec0d31e236762 | 6b51a2e7fab5021364023bad6ef4a26116dcc260 | /get_allelic_number_based_on_SNP2.0.py | 09ee88347b0db45aaa5f1aea9674e249c85de93d | [
"MIT"
] | permissive | dikingchen/bioscript | 4f43f377bc2e9eb7d3bb515a8dec9a1df23fb98a | 86d35c571b5805aa89a2c6aa689b8eac39e2cfb0 | refs/heads/master | 2020-03-27T14:59:41.675964 | 2018-09-03T02:39:42 | 2018-09-03T02:39:42 | 146,691,965 | 0 | 0 | MIT | 2018-09-03T01:31:44 | 2018-08-30T03:41:18 | null | UTF-8 | Python | false | false | 7,527 | py | #!/usr/bin/env python3.4
'''
#-------------------------------------------------------------------------------
#Author:WangYu (wang_yu@nwsuaf.edu.cn)
#Time: 2016/3/10
#Version: 2.0
#useage: get allelic numbers based on SNP from sam file
#-------------------------------------------------------------------------------
'''
import os
import sys
import re
import getopt
def usage():
print('''Useage: python script.py [option] [parameter]
-s/--snp_file input the snp file
-b/--bam_file input the bam/sam file
-o/--output the output results file
-h/--help show possible options''')
#######################default
opts, args = getopt.getopt(sys.argv[1:], "hs:b:o:",["help","snp_file=","bam_file=","output="])
for op, value in opts:
if op == "-s" or op == "--snp_file":
snp_file = value
elif op == "-b" or op == "--bam_file":
bam_file = value
elif op == "-o" or op == "--output":
output = value
elif op == "-h" or op == "--help":
usage()
sys.exit(1)
if len(sys.argv) < 7:
usage()
sys.exit(1)
f1=open(snp_file)
f2=os.popen('samtools view '+bam_file)
f3=open(output,'w')
#load snp dictionary########################
'''
1 612 T C
1 638 A C
1 681 G C
1 1596 T C
'''
ref_dict={}
alt_dict={}
genome_dict={}
allele_ref={}
allele_alt={}
allele_other={}
snpinfo={}
for snp in f1:
snp=snp.split()
index=snp[0]+'-'+snp[1]
ref_dict[index]=snp[2]
alt_dict[index]=snp[3]
snpinfo[index]=snp[2]+'\t'+snp[3]
genome_dict[snp[0]]=genome_dict.get(snp[0],0)+1
f1.close()
########################################
def decide_ref_or_alt_sub(i,sub_reads_info):
snp_index=reads[2]+'-'+str(int(reads[3])+i)
if snp_index in ref_dict.keys():
if sub_reads_info==ref_dict[snp_index]:
allele_ref[snp_index]=allele_ref.get(snp_index,0)+1
elif sub_reads_info==alt_dict[snp_index]:
allele_alt[snp_index]=allele_alt.get(snp_index,0)+1
else:
allele_other[snp_index]=allele_other.get(snp_index,0)+1
else:
pass
def decide_ref_or_alt():
'''
in this function, we match 7 different modle.
for example:
125M
10M5N115M
10M5N5M10N110M
107M2D18M
112M1I12M
6M1I63M2I55M
31M1D54M1D40M
'''
if re.search('^(\d+)M$',reads[5]): ###125M###
for i in range(reads_length):
sub_reads_info=reads[9][i]
decide_ref_or_alt_sub(i,sub_reads_info)
elif re.search('^(\d+)M(\d+)N(\d+)M$',reads[5]): ###10M5N115M###
pos_number=re.findall('\d+',reads[5])
for i in range(int(pos_number[0])):
sub_reads_info=reads[9][i]
decide_ref_or_alt_sub(i,sub_reads_info)
for i in range(int(pos_number[0])+int(pos_number[1]),int(pos_number[0])+int(pos_number[1])+int(pos_number[2])):
sub_reads_info=reads[9][i-int(pos_number[1])]
decide_ref_or_alt_sub(i,sub_reads_info)
elif re.search('^(\d+)M(\d+)N(\d+)M(\d+)N(\d+)M$',reads[5]): ###10M5N5M10N110M###
pos_number=re.findall('\d+',reads[5])
for i in range(int(pos_number[0])):
sub_reads_info=reads[9][i]
decide_ref_or_alt_sub(i,sub_reads_info)
for i in range(int(pos_number[0])+int(pos_number[1]),int(pos_number[0])+int(pos_number[1])+int(pos_number[2])):
sub_reads_info=reads[9][i-int(pos_number[1])]
decide_ref_or_alt_sub(i,sub_reads_info)
for i in range(int(pos_number[0])+int(pos_number[1])+int(pos_number[2])+int(pos_number[3]),int(pos_number[0])+int(pos_number[1])+int(pos_number[2])+int(pos_number[3])+int(pos_number[4])):
sub_reads_info=reads[9][i-int(pos_number[1])-int(pos_number[3])]
decide_ref_or_alt_sub(i,sub_reads_info)
elif re.search('^(\d+)M(\d+)D(\d+)M$',reads[5]): ###107M2D18M###
pos_number=re.findall('\d+',reads[5])
for i in range(int(pos_number[0])):
sub_reads_info=reads[9][i]
decide_ref_or_alt_sub(i,sub_reads_info)
for i in range(int(pos_number[0])+int(pos_number[1]),int(pos_number[0])+int(pos_number[1])+int(pos_number[2])):
sub_reads_info=reads[9][i-int(pos_number[1])]
decide_ref_or_alt_sub(i,sub_reads_info)
elif re.search('^(\d+)M(\d+)I(\d+)M$',reads[5]): ###112M1I12M##
pos_number=re.findall('\d+',reads[5])
for i in range(int(pos_number[0])):
sub_reads_info=reads[9][i]
decide_ref_or_alt_sub(i,sub_reads_info)
for i in range(int(pos_number[0]),int(pos_number[0])+int(pos_number[2])):
sub_reads_info=reads[9][i+int(pos_number[1])]
decide_ref_or_alt_sub(i,sub_reads_info)
elif re.search('^(\d+)M(\d+)D(\d+)M(\d+)D(\d+)M$',reads[5]): ###31M1D54M1D40M###
pos_number=re.findall('\d+',reads[5])
for i in range(int(pos_number[0])):
sub_reads_info=reads[9][i]
decide_ref_or_alt_sub(i,sub_reads_info)
for i in range(int(pos_number[0])+int(pos_number[1]),int(pos_number[0])+int(pos_number[1])+int(pos_number[2])):
sub_reads_info=reads[9][i-int(pos_number[1])]
decide_ref_or_alt_sub(i,sub_reads_info)
for i in range(int(pos_number[0])+int(pos_number[1])+int(pos_number[2])+int(pos_number[3]),int(pos_number[0])+int(pos_number[1])+int(pos_number[2])+int(pos_number[3])+int(pos_number[4])):
sub_reads_info=reads[9][i-int(pos_number[1])-int(pos_number[3])]
decide_ref_or_alt_sub(i,sub_reads_info)
elif re.search('^(\d+)M(\d+)I(\d+)M(\d+)I(\d+)M$',reads[5]): ###31M1I54M1I40M###
pos_number=re.findall('\d+',reads[5])
for i in range(int(pos_number[0])):
sub_reads_info=reads[9][i]
decide_ref_or_alt_sub(i,sub_reads_info)
for i in range(int(pos_number[0]),int(pos_number[0])+int(pos_number[2])):
sub_reads_info=reads[9][i+int(pos_number[1])]
decide_ref_or_alt_sub(i,sub_reads_info)
for i in range(int(pos_number[0])+int(pos_number[2]),int(pos_number[0])+int(pos_number[2])+int(pos_number[4])):
sub_reads_info=reads[9][i+int(pos_number[1])+int(pos_number[3])]
decide_ref_or_alt_sub(i,sub_reads_info)
else:
pass
################## define dictionary #############
reads_name={}
reads_region_ref={}
reads_region_start={}
reads_region_end={}
##################################################
for reads in f2:
reads=reads.split()
try:
if int(reads[4]) >= 50:
if reads[2] in genome_dict:
reads_length=len(reads[9])
decide_ref_or_alt()
else:
pass
else:
pass
except:
pass
#f3.write('reads_name\tchr\tstart\tend\tref\talt\n')
############################## output what we want ################
f4=open(snp_file)
for snp in f4:
snp=snp.split()
try:
f3.write(snp[0]+'\t'+snp[1]+'\t'\
+snpinfo[snp[0]+'-'+snp[1]]+'\t'\
+str(allele_ref.get(snp[0]+'-'+snp[1],0))+'\t'\
+str(allele_alt.get(snp[0]+'-'+snp[1],0))+'\t'\
+str(allele_other.get(snp[0]+'-'+snp[1],0))+'\n')
except KeyError:
pass
###################### close file ###############################
f4.close()
f2.close()
f3.close()
| [
"you@example.com"
] | you@example.com |
da85b5a7defc853de8ed8df3a737b37d94c89891 | 60b48df762a515a734cfbedd7ca101df43f04824 | /rllib/models/utils.py | c1d7a8c440fdf58315d4fe72d00694f962f57cab | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | LuBingtan/ray | a02b13c4dceab2b0d54870fd3abae5c11bae916e | 298742d7241681ee1f307ec0dd3cd7e9713a3c7d | refs/heads/master | 2023-03-05T16:32:35.596725 | 2022-06-05T23:21:53 | 2022-06-05T23:21:53 | 223,334,544 | 0 | 1 | Apache-2.0 | 2023-03-04T08:56:53 | 2019-11-22T06:01:51 | Python | UTF-8 | Python | false | false | 6,220 | py | from typing import Optional
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.framework import try_import_jax, try_import_tf, try_import_torch
@DeveloperAPI
def get_activation_fn(name: Optional[str] = None, framework: str = "tf"):
"""Returns a framework specific activation function, given a name string.
Args:
name (Optional[str]): One of "relu" (default), "tanh", "elu",
"swish", or "linear" (same as None).
framework: One of "jax", "tf|tfe|tf2" or "torch".
Returns:
A framework-specific activtion function. e.g. tf.nn.tanh or
torch.nn.ReLU. None if name in ["linear", None].
Raises:
ValueError: If name is an unknown activation function.
"""
# Already a callable, return as-is.
if callable(name):
return name
# Infer the correct activation function from the string specifier.
if framework == "torch":
if name in ["linear", None]:
return None
if name == "swish":
from ray.rllib.utils.torch_utils import Swish
return Swish
_, nn = try_import_torch()
if name == "relu":
return nn.ReLU
elif name == "tanh":
return nn.Tanh
elif name == "elu":
return nn.ELU
elif framework == "jax":
if name in ["linear", None]:
return None
jax, _ = try_import_jax()
if name == "swish":
return jax.nn.swish
if name == "relu":
return jax.nn.relu
elif name == "tanh":
return jax.nn.hard_tanh
elif name == "elu":
return jax.nn.elu
else:
assert framework in ["tf", "tfe", "tf2"], "Unsupported framework `{}`!".format(
framework
)
if name in ["linear", None]:
return None
tf1, tf, tfv = try_import_tf()
fn = getattr(tf.nn, name, None)
if fn is not None:
return fn
raise ValueError(
"Unknown activation ({}) for framework={}!".format(name, framework)
)
@DeveloperAPI
def get_filter_config(shape):
"""Returns a default Conv2D filter config (list) for a given image shape.
Args:
shape (Tuple[int]): The input (image) shape, e.g. (84,84,3).
Returns:
List[list]: The Conv2D filter configuration usable as `conv_filters`
inside a model config dict.
"""
# VizdoomGym (large 480x640).
filters_480x640 = [
[16, [24, 32], [14, 18]],
[32, [6, 6], 4],
[256, [9, 9], 1],
]
# VizdoomGym (small 240x320).
filters_240x320 = [
[16, [12, 16], [7, 9]],
[32, [6, 6], 4],
[256, [9, 9], 1],
]
# 96x96x3 (e.g. CarRacing-v0).
filters_96x96 = [
[16, [8, 8], 4],
[32, [4, 4], 2],
[256, [11, 11], 2],
]
# Atari.
filters_84x84 = [
[16, [8, 8], 4],
[32, [4, 4], 2],
[256, [11, 11], 1],
]
# Small (1/2) Atari.
filters_42x42 = [
[16, [4, 4], 2],
[32, [4, 4], 2],
[256, [11, 11], 1],
]
# Test image (10x10).
filters_10x10 = [
[16, [5, 5], 2],
[32, [5, 5], 2],
]
shape = list(shape)
if len(shape) in [2, 3] and (shape[:2] == [480, 640] or shape[1:] == [480, 640]):
return filters_480x640
elif len(shape) in [2, 3] and (shape[:2] == [240, 320] or shape[1:] == [240, 320]):
return filters_240x320
elif len(shape) in [2, 3] and (shape[:2] == [96, 96] or shape[1:] == [96, 96]):
return filters_96x96
elif len(shape) in [2, 3] and (shape[:2] == [84, 84] or shape[1:] == [84, 84]):
return filters_84x84
elif len(shape) in [2, 3] and (shape[:2] == [42, 42] or shape[1:] == [42, 42]):
return filters_42x42
elif len(shape) in [2, 3] and (shape[:2] == [10, 10] or shape[1:] == [10, 10]):
return filters_10x10
else:
raise ValueError(
"No default configuration for obs shape {}".format(shape)
+ ", you must specify `conv_filters` manually as a model option. "
"Default configurations are only available for inputs of shape "
"[42, 42, K] and [84, 84, K]. You may alternatively want "
"to use a custom model or preprocessor."
)
@DeveloperAPI
def get_initializer(name, framework="tf"):
"""Returns a framework specific initializer, given a name string.
Args:
name: One of "xavier_uniform" (default), "xavier_normal".
framework: One of "jax", "tf|tfe|tf2" or "torch".
Returns:
A framework-specific initializer function, e.g.
tf.keras.initializers.GlorotUniform or
torch.nn.init.xavier_uniform_.
Raises:
ValueError: If name is an unknown initializer.
"""
# Already a callable, return as-is.
if callable(name):
return name
if framework == "jax":
_, flax = try_import_jax()
assert flax is not None, "`flax` not installed. Try `pip install jax flax`."
import flax.linen as nn
if name in [None, "default", "xavier_uniform"]:
return nn.initializers.xavier_uniform()
elif name == "xavier_normal":
return nn.initializers.xavier_normal()
if framework == "torch":
_, nn = try_import_torch()
assert nn is not None, "`torch` not installed. Try `pip install torch`."
if name in [None, "default", "xavier_uniform"]:
return nn.init.xavier_uniform_
elif name == "xavier_normal":
return nn.init.xavier_normal_
else:
assert framework in ["tf", "tfe", "tf2"], "Unsupported framework `{}`!".format(
framework
)
tf1, tf, tfv = try_import_tf()
assert (
tf is not None
), "`tensorflow` not installed. Try `pip install tensorflow`."
if name in [None, "default", "xavier_uniform"]:
return tf.keras.initializers.GlorotUniform
elif name == "xavier_normal":
return tf.keras.initializers.GlorotNormal
raise ValueError(
"Unknown activation ({}) for framework={}!".format(name, framework)
)
| [
"noreply@github.com"
] | LuBingtan.noreply@github.com |
ba28c5109dcaa0ec2044193eb17e562af519cbb8 | 48c6aa99481961c607b415d6f6bd5318e91e9ab6 | /backend/migrations/0001_initial.py | cad6cdb78903e47d836135c73a029f71eea274f5 | [] | no_license | shedolkar12/viandManagement | 56ba107c9971a42960825392d55f11ae03b72efa | 3b20ba416e91610b421b16233aa7131d00e40c38 | refs/heads/master | 2022-12-15T20:07:11.427862 | 2020-09-24T11:27:40 | 2020-09-24T11:27:40 | 298,006,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,741 | py | # Generated by Django 2.2 on 2020-09-23 21:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Branch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=100)),
('city', models.CharField(max_length=30)),
('pincode', models.CharField(max_length=10)),
('longitude', models.FloatField(blank=True)),
('latitude', models.FloatField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Categories',
fields=[
('id', models.CharField(max_length=40, primary_key=True, serialize=False)),
('description', models.CharField(max_length=60)),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ComboProduct',
fields=[
('id', models.CharField(max_length=50, primary_key=True, serialize=False)),
('description', models.CharField(max_length=100)),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CustomerAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=100)),
('pincode', models.CharField(max_length=10)),
('latitude', models.FloatField(blank=True)),
('longitude', models.FloatField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.Customer')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.CharField(max_length=40, primary_key=True, serialize=False)),
('description', models.CharField(max_length=40)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='ProductCategoryRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.Categories')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.Product')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('order_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('amount', models.FloatField()),
('order_breakup', models.CharField(blank=True, default='', max_length=300)),
('status', models.CharField(max_length=10)),
('created_at', models.DateTimeField(auto_now_add=True)),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.Branch')),
('customer_address', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.CustomerAddress')),
],
),
migrations.CreateModel(
name='ComboProductRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('combo_product', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.ComboProduct')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.Product')),
],
),
migrations.CreateModel(
name='Storage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('unit', models.FloatField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.Branch')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.Product')),
],
options={
'unique_together': {('branch', 'product', 'unit')},
},
),
migrations.CreateModel(
name='Pricing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.FloatField()),
('unit', models.FloatField()),
('version', models.CharField(max_length=4)),
('created_at', models.DateTimeField(auto_now_add=True)),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.Branch')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='backend.Product')),
],
options={
'unique_together': {('product', 'unit', 'version')},
},
),
]
| [
"harshal95iitk@gmail.com"
] | harshal95iitk@gmail.com |
6553745f4461a17292f48f1043ea00178287eb2a | 1ed27591e9eb95c356d53307160b515b60a824d1 | /baidu2.py | d1d4d4cf9651f130fa515b2d31992ee6a9bdb3d1 | [] | no_license | tianjingang/python | a6003729e7c8676ddcc90014474a369cd0e28cea | 62aa54c441be55864b91eb3896ec14ed616c9052 | refs/heads/master | 2020-07-05T11:01:58.558083 | 2017-01-17T06:00:06 | 2017-01-17T06:00:06 | 74,121,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | #!C:\Python27\python.exe
# -*- coding: UTF-8 -*-
print
#引入模块
import urllib
import urllib2
import re
import MySQLdb
#定义类
class News:
#init
def __init__(self):
self.url="http://news.baidu.com/"
#getpage
def getpage(self):
url=self.url
request=urllib2.Request(url)
response=urllib2.urlopen(request)
return response.read()
#gettitle
def gettitle(self):
page=self.getpage()
#print content
left=re.compile('<div id="headline-tabs" class="mod-headline-tab">(.*?)<ul id="goTop" class="mod-sidebar">',re.S)
pattern=re.search(left,page)
return pattern.group(1)
def geta(self):
page=self.gettitle()
#print content
left=re.compile('<a href="(http://.*?").*?">(.*?)</a>',re.S)
pattern=re.findall(left,page)
return pattern
def toimg(self,data):
res=re.compile('<img .*?>')
con=re.sub(res,'',data)
return con
db= MySQLdb.connect("localhost","root","root","python",charset="gbk")
cursor = db.cursor()
new=News()
arr= new.geta()
for i in range(len(arr)):
for item in arr:
print item[0][:-1],new.toimg(item[1])
sql = "INSERT INTO two(title,url) VALUES (%s, %s)" %("'"+new.toimg(item[1])+"'","'"+item[0][:-1]+"'")
#print sql
try:
cursor.execute(sql)
db.commit()
except:
# Rollback in case there is any error
db.rollback()
| [
"email@example.com"
] | email@example.com |
f4bf5cb1cf2c7f122cb8c01816a9def27baf8a74 | a39d0d1f0e257d0fff5de58e3959906dafb45347 | /Lutts/Processes/fork_count.py | 25d96ae9bddd34a2a8aad5e8e733978b46f610cd | [] | no_license | Twishar/Python | 998d7b304070b621ca7cdec548156ca7750ef38e | 1d1afa79df1aae7b48ac690d9b930708767b6d41 | refs/heads/master | 2021-09-23T14:18:36.195494 | 2018-09-24T12:33:36 | 2018-09-24T12:33:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | import os, time
def counter(count): #вызывается в новом процессе
for i in range(count):
time.sleep(1) #имитировать работу
print('[%s]=> %s' % (os.getpid(), i))
for i in range(5):
pid = os.fork()
if pid !=0: #в родительском процессе:
print('Process %d spawned' %pid) #продолжить цикл
else:
counter(5) #в дочернем процессе
os._exit(0) #вызвать функцию и завершить
print('Main process exiting.') #родитель не должен ждать | [
"stognienkovv@gmail.com"
] | stognienkovv@gmail.com |
bbb33c0727b60e87c3658ebc6ce0886d4a58b524 | 7911da973079f325a515cd2ee66f7590a9f32e48 | /guvi/python/play135.py | 99c2b4ac3c9f564c1529d71aadbd44115722da6d | [] | no_license | Ponkiruthika112/Guvi | 5d2ff3dcf55d6c52c0f09a1e577d8b11632c7a92 | 319e5b4dab5654fabc25ef15c1d528f76d833c15 | refs/heads/master | 2020-04-21T06:05:03.581658 | 2018-08-02T05:53:48 | 2018-08-02T05:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | n=int(input())
st=input().strip().split(" ")
a=[]
for x in range(n):
a.append(int(st[x]))
k=(n//2)
for x in range(k):
for y in range(x+1,k):
if a[x]>a[y]:
t=a[x]
a[x]=a[y]
a[y]=t
for x in range(k,n):
for y in range(x+1,n):
if a[x]<a[y]:
t=a[x]
a[x]=a[y]
a[y]=t
ans=""
for x in a:
ans+=str(x)+" "
print(ans.strip())
| [
"noreply@github.com"
] | Ponkiruthika112.noreply@github.com |
da3d984bfab997a7b36288abf5ba5f1900bfe93e | 377ec156e459f70ad32e625de2dde2672736dd06 | /Exercises/Cookbook/Class/repr.py | e266988a291aa5a71f7afef458477051e37ff740 | [] | no_license | tsonglew/learn-python | b657cc34d3e27993ec0dcce152796bea43224d4f | edbf0b5d24bf0e2d9ad7aa5811c7d3aa0a66b57c | refs/heads/master | 2021-06-13T13:46:35.199562 | 2017-04-14T16:57:38 | 2017-04-14T16:57:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | #-*- coding: utf-8 -*-
class Pair:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
# 0指的是self本身
return 'Pair({0.x!r}, {0.y!r})'.format(self)
def __str__(self):
return '({0.x!s}, {0.y!s})'.format(self)
# !r 格式化代码指明输出使用__repr__()代替默认的__str__()
# 自定义__repr__()和__str__()写详细的说明,如果__str__()未被定义就会使用__repr__()
# 来代替输出
| [
"417879751@qq.com"
] | 417879751@qq.com |
559998185cce7b9fd7c63d970b8c66a55d1ce247 | 1ffe032eb68b63a50cdde6000c210f457d62b96d | /h2o-py/tests/testdir_algos/gbm/pyunit_cv_carsGBM.py | 0c097ec5dbf776045d69928870408bb9f7c81548 | [
"Apache-2.0"
] | permissive | moidin/h2o-3 | 979e287ef9ea9a7dc4d7167e03687aede81d91d5 | 8b5e36d326042b8c42bfe7503bd4d7eaf25c3ee3 | refs/heads/master | 2021-01-21T03:20:43.617128 | 2015-07-28T21:03:10 | 2015-07-28T21:03:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,100 | py | import sys
sys.path.insert(1, "../../../")
import h2o
import random
def cv_carsGBM(ip,port):
# read in the dataset and construct training set (and validation set)
cars = h2o.import_frame(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:multinomial
problem = random.sample(range(3),1)[0]
# pick the predictors and response column, along with the correct distribution
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
distribution = "bernoulli"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
response_col = "cylinders"
distribution = "multinomial"
cars[response_col] = cars[response_col].asfactor()
else :
response_col = "economy"
distribution = "gaussian"
print "Distribution: {0}".format(distribution)
print "Response column: {0}".format(response_col)
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution,
fold_assignment="Modulo")
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution,
fold_assignment="Modulo")
h2o.check_models(gbm1, gbm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution,
fold_assignment="Random")
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, distribution=distribution,
fold_assignment="Random")
try:
h2o.check_models(gbm1, gbm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame(python_obj=[[random.randint(0,num_folds-1)] for f in range(cars.nrow())])
fold_assignments.setNames(["fold_assignments"])
cars = cars.cbind(fold_assignments)
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], training_frame=cars, distribution=distribution,
fold_column="fold_assignments", keep_cross_validation_predictions=True)
num_cv_models = len(gbm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][1]['name'])
assert isinstance(cv_model1, type(gbm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model1),type(gbm))
assert isinstance(cv_model2, type(gbm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model2),type(gbm))
# 4. keep_cross_validation_predictions
cv_predictions = gbm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = gbm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# # 5. manually construct models
# fold1 = cars[cars["fold_assignments"]==0]
# fold2 = cars[cars["fold_assignments"]==1]
# manual_model1 = h2o.gbm(y=fold2[response_col],
# x=fold2[predictors],
# validation_y=fold1[response_col],
# validation_x=fold1[predictors],
# distribution=distribution)
# manual_model2 = h2o.gbm(y=fold1[response_col],
# x=fold1[predictors],
# validation_y=fold2[response_col],
# validation_x=fold2[predictors],
# distribution=distribution)
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow(), distribution=distribution,
fold_assignment="Modulo")
# 2. nfolds = 0
gbm1 = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=0, distribution=distribution)
# check that this is equivalent to no nfolds
gbm2 = h2o.gbm(y=cars[response_col], x=cars[predictors], distribution=distribution)
h2o.check_models(gbm1, gbm2)
# 3. cross-validation and regular validation attempted
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=random.randint(3,10), validation_y=cars[response_col],
validation_x=cars[predictors], distribution=distribution)
## error cases
# 1. nfolds == 1 or < 0
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=random.sample([-1,1], 1)[0],
distribution=distribution)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow()+1, distribution=distribution,
fold_assignment="Modulo")
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], nfolds=3, fold_column="fold_assignments",
distribution=distribution, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# # 4. fold_column and fold_assignment both specified
# try:
# gbm = h2o.gbm(y=cars[response_col], x=cars[predictors], fold_assignment="Random", fold_column="fold_assignments",
# distribution=distribution, training_frame=cars)
# assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
# except EnvironmentError:
# assert True
if __name__ == "__main__":
h2o.run_test(sys.argv, cv_carsGBM) | [
"eric.eckstrand@gmail.com"
] | eric.eckstrand@gmail.com |
603da445ac12eeebe5c185c910c9d46f51f201af | f9248ec00b661ee4790a780b7adaec79c0d68ec8 | /grades.py | 09573f0951c24c2e883ecb6e5ae49df72fa72cc2 | [] | no_license | michaelzh17/6001_Python | 0ec463f02840bf3162cd3247d76494d1592e82e3 | 53833604db4d769f71e63044813e3500f3e0fb6f | refs/heads/master | 2021-01-11T22:31:17.832117 | 2018-12-21T10:34:18 | 2018-12-21T10:34:18 | 78,979,863 | 0 | 0 | null | 2017-04-06T02:54:57 | 2017-01-15T00:07:38 | Python | UTF-8 | Python | false | false | 1,876 | py | #!/usr/bin/env python3
class Grades(object):
def __init__(self):
"""Create empty grade book"""
self.students = []
self.grades = {}
self.isSorted = True
def addStudent(self, student):
"""Assumes: student is of type Student
Add student to the grade book"""
if student in self.students:
raise ValueError('Duplicate student')
self.students.append(student)
self.grades[student.getIdNum()] = []
self.isSorted = False
def addGrade(self, student, grade):
"""Assumes: grade is a float
Add grade to the list of grades for student"""
try:
self.grades[student.getIdNum()].append(grade)
except:
raise ValueError('Student not in mapping')
def getGrades(self, student):
"""Return a list of grades for student"""
try: #return copy of list of student's grades
return self.grades[student.getIdNum()][:]
except:
raise ValueError('Student not in mapping')
def getStudents(self):
"""Return a sorted list of the students in the grade book"""
if not self.isSorted:
self.students.sort()
self.isSorted = True
return self.students[:] #return copy of list of students
def gradeReport(course):
"""Assume course is of type Grades"""
report = ''
for s in course.getStudents():
tot = 0.0
numGrades = 0
for g in course.getGrades(s):
tot += g
numGrades += 1
try:
average = tot/numGrades
report = report + '\n'\
+ str(s) + '\'s mean grade is ' + str(average)
except ZeroDivisionError:
report = report + '\n'\
+ str(s) + ' has no grades'
return report
| [
"macalzhang@gmail.com"
] | macalzhang@gmail.com |
c6cac383f77c6c1693c64b0e517cf0ac0cc0fb30 | e7659b2e0d731c6bfd0d9d5bdcbf718dc727c6f2 | /francisco-topo/time_results/unused/rep_atk_def_socket/plot_ruio.py | 40a880a6b644cf5a848913ba76915a55f4089efa | [
"Apache-2.0"
] | permissive | ComitUniandes/ICS-SDN | aa3de9fbd84795db8a97d45e2ec2325c1dd1091a | fa1df31849dbe26264eabc1aa58ebda2f79ca698 | refs/heads/master | 2020-03-29T15:01:32.736612 | 2018-09-23T23:09:41 | 2018-09-23T23:09:41 | 150,041,157 | 1 | 0 | Apache-2.0 | 2020-06-16T04:45:40 | 2018-09-24T01:27:20 | Matlab | UTF-8 | Python | false | false | 558 | py | """Plot the solution that was generated by differential_equation.py."""
from numpy import loadtxt
from pylab import figure, plot, xlabel, grid, hold, legend, title, savefig
from matplotlib.font_manager import FontProperties
import sys
t, x1, x2 = loadtxt(sys.argv[1] , unpack=True)
figure(1, figsize=(6, 4.5))
xlabel('t')
grid(True)
hold(True)
lw = 1
plot(t, x1, 'b', linewidth=lw)
plot(t, x2, 'r', linewidth=lw)
#legend((r'$L101$', r'$L102$', r'$L103$'), prop=FontProperties(size=16))
title('Tank Levels with Control')
savefig(sys.argv[2], dpi=100)
| [
"None"
] | None |
70ba7e6bc909228908287f95ee119ba9f529c6bc | 60618d48e09a140926d97b01cb9b6f76fcc65703 | /python scrapy/exe/leihanduanzhi.py | c0cdd7db61b734ecf26474362ae24ff43d36fe9f | [] | no_license | Incipe-win/Python | ca8f36cc8785eb13512f71a3cf10149d4e1b855e | 5bab36b90591c74dedb6ead3484a279b90a1bcbd | refs/heads/master | 2021-01-07T08:11:42.293541 | 2020-12-06T09:17:02 | 2020-12-06T09:17:02 | 241,629,236 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | import requests
import re
import time
import json
class LeiHan:
def __init__(self):
self.url = "https://wengpa.com/neihan/page/{}/"
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36"}
self.content = []
def parse_url(self, url):
response = requests.get(url, headers=self.headers)
print(response.status_code)
return response.content.decode()
def parse_text(self, str_html, page):
if page <= 5:
regular = r"<p><strong>.*?</strong>(.*?)</p>"
self.content.append(re.findall(regular, str_html, re.S))
else:
regular = r"<p>(.*?)</p>"
self.content.append(re.findall(regular, str_html, re.S))
def save_text(self):
# print(self.content)
with open("leihan.txt", "a", encoding="utf-8") as f:
for content in self.content:
for data in content:
f.write(data)
f.write("\n")
def run(self):
page = 1
while page != 3:
url = self.url.format(page)
print(url, end=" ")
str_html = self.parse_url(url)
self.parse_text(str_html, page)
# print(self.content)
page += 1
self.save_text()
if __name__ == "__main__":
leihan = LeiHan()
leihan.run()
| [
"whc_9_13@163.com"
] | whc_9_13@163.com |
f1fc92350ae73a85dfb83e7c728d24488d718271 | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/number-of-ways-to-wear-different-hats-to-each-other/383145521.py | f06c2d20c1f1da056ad81377ed81c6257a6d85c6 | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | # title: number-of-ways-to-wear-different-hats-to-each-other
# detail: https://leetcode.com/submissions/detail/383145521/
# datetime: Wed Aug 19 17:22:41 2020
# runtime: 512 ms
# memory: 33.8 MB
from functools import lru_cache
class Solution:
def numberWays(self, hats: List[List[int]]) -> int:
MOD = 10 ** 9 + 7
def bitcount(a):
cnt = 0
while a:
a &= a - 1
cnt += 1
return cnt
@lru_cache(None)
def dp(i, choosen, k):
if k == n:
return 1
if i + k < n:
return 0
cnt = dp(i - 1, choosen, k)
for p in range(n):
if choosen & (1 << p):
continue
if hats[p] & (1 << i):
cnt += dp(i - 1, choosen | (1 << p), k + 1) % MOD
return cnt
n = len(hats)
people_mask = (1 << n) - 1
hats = [reduce(lambda a, b: a | (1 << b), h, 0) for h in hats]
cnt = dp(40, 0, 0)
return cnt % MOD | [
"ljm51689@gmail.com"
] | ljm51689@gmail.com |
978ad01a5dc43f071400139d7bc9530b9f303226 | 5e9ed7cb1e68c4169dd20f5f90b2aaa49d7dfd9b | /php/generate_php_zip.py | c744e1de7514d53b3cdf623e752656ca146c4a7a | [] | no_license | suchja/generators | 1c7fb780dc2646963920454ebf24622f01c56748 | f27a81f0f92b1aac236a83caaa28d02d717e77fd | refs/heads/master | 2020-12-11T03:34:45.014886 | 2015-10-08T10:27:53 | 2015-10-08T10:27:53 | 43,313,739 | 0 | 0 | null | 2015-09-28T16:48:51 | 2015-09-28T16:48:51 | null | UTF-8 | Python | false | false | 4,647 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PHP ZIP Generator
Copyright (C) 2012-2015 Matthias Bolte <matthias@tinkerforge.com>
Copyright (C) 2011 Olaf Lüke <olaf@tinkerforge.com>
generate_php_zip.py: Generator for PHP ZIP
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
import datetime
import sys
import os
import shutil
import subprocess
sys.path.append(os.path.split(os.getcwd())[0])
import common
import php_common
from php_released_files import released_files
class PHPZipGenerator(common.Generator):
tmp_dir = '/tmp/generator/php'
tmp_source_dir = os.path.join(tmp_dir, 'source')
tmp_source_tinkerforge_dir = os.path.join(tmp_source_dir, 'Tinkerforge')
tmp_examples_dir = os.path.join(tmp_dir, 'examples')
def get_bindings_name(self):
return 'php'
def prepare(self):
common.recreate_directory(self.tmp_dir)
os.makedirs(self.tmp_source_dir)
os.makedirs(self.tmp_source_tinkerforge_dir)
os.makedirs(self.tmp_examples_dir)
def generate(self, device):
if not device.is_released():
return
# Copy device examples
tmp_examples_device_dir = os.path.join(self.tmp_examples_dir,
device.get_camel_case_category(),
device.get_camel_case_name())
if not os.path.exists(tmp_examples_device_dir):
os.makedirs(tmp_examples_device_dir)
for example in common.find_device_examples(device, '^Example.*\.php$'):
shutil.copy(example[1], tmp_examples_device_dir)
def finish(self):
root_dir = self.get_bindings_root_directory()
# Copy IP Connection examples
for example in common.find_examples(root_dir, '^Example.*\.php$'):
shutil.copy(example[1], self.tmp_examples_dir)
# Copy bindings and readme
package_files = ['<file name="Tinkerforge/IPConnection.php" role="php" />']
for filename in released_files:
shutil.copy(os.path.join(root_dir, 'bindings', filename), self.tmp_source_tinkerforge_dir)
package_files.append('<file name="Tinkerforge/{0}" role="php" />'.format(os.path.basename(filename)))
shutil.copy(os.path.join(root_dir, 'IPConnection.php'), self.tmp_source_tinkerforge_dir)
shutil.copy(os.path.join(root_dir, 'changelog.txt'), self.tmp_dir)
shutil.copy(os.path.join(root_dir, 'readme.txt'), self.tmp_dir)
shutil.copy(os.path.join(root_dir, '..', 'configs', 'license.txt'), self.tmp_dir)
# Make package.xml
version = common.get_changelog_version(root_dir)
date = datetime.datetime.now().strftime("%Y-%m-%d")
common.specialize_template(os.path.join(root_dir, 'package.xml.template'),
os.path.join(self.tmp_source_dir, 'package.xml'),
{'{{VERSION}}': '.'.join(version),
'{{DATE}}': date,
'{{FILES}}': '\n '.join(package_files)})
# Make PEAR package
with common.ChangedDirectory(self.tmp_source_dir):
args = ['/usr/bin/pear',
'package',
'package.xml']
if subprocess.call(args) != 0:
raise Exception("Command '{0}' failed".format(' '.join(args)))
# Remove build stuff
shutil.move(os.path.join(self.tmp_source_dir, 'Tinkerforge-{0}.{1}.{2}.tgz'.format(*version)),
os.path.join(self.tmp_dir, 'Tinkerforge.tgz'))
os.remove(os.path.join(self.tmp_source_dir, 'package.xml'))
# Make zip
common.make_zip(self.get_bindings_name(), self.tmp_dir, root_dir, version)
def generate(bindings_root_directory):
common.generate(bindings_root_directory, 'en', PHPZipGenerator)
if __name__ == "__main__":
generate(os.getcwd())
| [
"matthias@tinkerforge.com"
] | matthias@tinkerforge.com |
03964fb3216ad6f1c86627096d8738333dec22d1 | 0e25538b2f24f1bc002b19a61391017c17667d3d | /cwmipermanentevents/win_cwmilogfileconsumer.py | c69452cebe0f859b26463dfa731e7627a8d22ca4 | [] | no_license | trondhindenes/Ansible-Auto-Generated-Modules | 725fae6ba9b0eef00c9fdc21179e2500dfd6725f | efa6ac8cd2b545116f24c1929936eb8cc5c8d337 | refs/heads/master | 2020-04-06T09:21:00.756651 | 2016-10-07T07:08:29 | 2016-10-07T07:08:29 | 36,883,816 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# <COPYRIGHT>
# <CODEGENMETA>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_cwmilogfileconsumer
version_added:
short_description: Generated from DSC module cwmipermanentevents version 1.1 at 07.10.2016 01.01.34
description:
- DSC Resources for managing WMI permanent events
options:
Filename:
description:
-
required: True
default:
aliases: []
Name:
description:
-
required: True
default:
aliases: []
Text:
description:
-
required: True
default:
aliases: []
Ensure:
description:
-
required: False
default:
aliases: []
choices:
- Absent
- Present
IsUnicode:
description:
-
required: False
default:
aliases: []
MaximumFileSize:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_username:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_password:
description:
-
required: False
default:
aliases: []
AutoInstallModule:
description:
- If true, the required dsc resource/module will be auto-installed using the Powershell package manager
required: False
default: false
aliases: []
choices:
- true
- false
AutoConfigureLcm:
description:
- If true, LCM will be auto-configured for directly invoking DSC resources (which is a one-time requirement for Ansible DSC modules)
required: False
default: false
aliases: []
choices:
- true
- false
| [
"trond@hindenes.com"
] | trond@hindenes.com |
90c2fddb780d56fa8b68031a4c4a2df33e49ba7e | 40aa8243e28eaecc895e0187e1e12e3b66195003 | /Practice/Boson/test_boson.py | 933bd1a84eda6f2354fa6a1c2b83de558c9c33e6 | [] | no_license | Toruitas/Python | 94c5dc82736914203e8b712055b824796816c145 | 2d1ea9cdad9501ae9d934c90d516a5a846d2a631 | refs/heads/master | 2016-09-05T20:10:41.584677 | 2015-06-01T03:14:51 | 2015-06-01T03:14:51 | 17,966,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | __author__ = 'Stuart'
# Problem 2: Unit test for P1
import unittest
from Boson import problem_one
class BooleanTestCase(unittest.TestCase):
"""test for utility_function"""
def test_problem_one(self):
self.assertTrue(problem_one("banana","banana"))
self.assertTrue(problem_one("banana","ananab"))
self.assertFalse(problem_one("a","bbbbbbbb"))
self.assertTrue(problem_one("ichwh", "which"))
self.assertFalse(problem_one("ichic","which"))
self.assertTrue(problem_one("",""))
self.assertFalse(problem_one("","a"))
if __name__=="__main__":
unittest.main() | [
"Toruitas@gmail.com"
] | Toruitas@gmail.com |
3a62ad2d16767e6c24472f55cc2057293d9560f7 | be8dc68a5be1a8095a278be74090dce963745f8b | /models/vae.py | 82e9df59da3b00ecf12fb9eff3b7e89c31ad2c67 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | drclab/piVAE | 63fd571cbd3de0b8b6aa72f113682d4281864fd7 | 5bfa1e27db948419b750f5f123f5504ffdceeaec | refs/heads/master | 2023-03-16T17:23:08.644786 | 2020-05-09T15:00:39 | 2020-05-09T15:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,266 | py | # ---------------------------------------------------------
# TensorFlow piVAE Encoder & Decoder Models
# Licensed under The MIT License [see LICENSE for details]
# Written by Lukas Adam
# Email: gm.lukas.adam@gmail.com
# ---------------------------------------------------------
# Imports
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras.layers import Lambda, Input, Dense
from tensorflow.keras import Model
from tensorflow.keras import backend as K
tfd = tfp.distributions
# reparameterization trick
# instead of sampling from Q(z|X), sample epsilon = N(0,I)
# z = z_mean + sqrt(var) * epsilon
def _sampling(args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * (z_log_var)) * epsilon
##################################################
def vae_encoder(input_shape, intermediate_dim, latent_dim, sampling=_sampling):
inputs = Input(shape=input_shape, name='encoder_input')
x = Dense(intermediate_dim, activation='relu')(inputs)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
return encoder
##################################################
def vae_decoder(latent_dim, intermediate_dim, original_dim):
# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(intermediate_dim, activation='relu')(latent_inputs)
outputs = Dense(original_dim)(x)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
return decoder
| [
"="
] | = |
a43cbe95848f28880ff7f48739f8500f9ad2aa6c | 5a563285d26ab10f4e6015c85095eba30ba12af2 | /project/urls.py | 2a33b9388ab33a5ea2975335dd51e0602e6e7361 | [] | no_license | pavelm2007/kitchen | e41e2433edad1e6156accfab5d5994612c80551d | 6edd44da568d7b50e48ce02e147e3cfd2fc9de4d | refs/heads/master | 2016-09-11T03:19:08.852036 | 2014-05-15T08:18:04 | 2014-05-15T08:18:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | from django.conf.urls import patterns, include, url, handler403, handler404, handler500
from django.views.generic import TemplateView
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from common.views import Main_Page
admin.autodiscover()
urlpatterns = patterns('',
url(r'^cked/', include('cked.urls')),
url(r'^stock_discounts/', include('stock_discounts.urls')),
url(r'^tips/', include('tips.urls.entries')),
url(r'^news/', include('coltrane.urls.entries')),
url(r'^feedback/', include('feedback.urls')),
url(r'^page/', include('flatpages.urls')),
url(r'^catalog/', include('catalog.urls', namespace='catalog')),
url(r'^$', Main_Page.as_view(), name='index'),
# Examples:
# url(r'^$', 'project.views.home', name='home'),
# url(r'^project/', include('project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT}),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATICFILES_DIRS}),
)
urlpatterns += patterns('flatpages.views',
url(r'^contact/$', 'flatpage', {'url': '/contact/'}, name='contact'),
url(r'^questions/$', 'flatpage', {'url': '/questions/'}, name='questions'),
# url(r'^contacts/', 'flatpage', {
# 'url': '/contacts/'}, name='contacts'),
(r'^page/(?P<url>.*)$', 'flatpage'),
)
handler403 = 'common.views.Error403'
handler404 = 'common.views.Error404'
handler500 = 'common.views.Error500'
| [
"pavelm2007@yandex.ru"
] | pavelm2007@yandex.ru |
ab54e3c9de2e501e90955a96567f79fe65e8fcbf | 74f1bb2c16870e140c33dc94f828f58ce042ff57 | /octopus/modules/es/autocomplete.py | f7e7085a68574d5623949193cd344ec54f65778c | [
"Apache-2.0"
] | permissive | richard-jones/magnificent-octopus | 1c4fee308732920b5521be7c998890fd23d48aa2 | 4a65f5a3e919af61887302d2911849233347f18f | refs/heads/develop | 2022-07-25T20:53:02.234255 | 2019-02-19T12:22:56 | 2019-02-19T12:22:56 | 22,357,685 | 2 | 3 | NOASSERTION | 2022-07-06T19:15:17 | 2014-07-28T20:58:11 | Python | UTF-8 | Python | false | false | 5,180 | py | from octopus.core import app
import esprit
import json
from flask import Blueprint, request, abort, make_response
from octopus.lib import webapp, plugin
blueprint = Blueprint('autocomplete', __name__)
@blueprint.route("/term/<config_name>")
@webapp.jsonp
def term(config_name):
# get the configuration
acc = app.config.get("AUTOCOMPLETE_TERM")
cfg = acc.get(config_name)
if cfg is None:
abort(404)
# get the query value
q = request.values.get("q")
if q is None or q == "":
abort(400)
q = q.strip()
# apply any input filters to the query value
ifs = cfg.get("input_filter")
if ifs is not None:
q = ifs(q)
# get the filters that will be used to match documents
filter = cfg.get("filter")
if filter is None:
abort(500)
# now build the query object
field = filter.keys()[0]
params = filter.get(field, {})
wq = _do_wildcard(q, params.get("start_wildcard", True), params.get("end_wildcard", True))
query = {"query" : {"bool" : {"must" : [{"wildcard" : {field : {"value" : wq}}}]}}}
# the size of this query is 0, as we're only interested in the facet
query["size"] = 0
# get the size of the facet
size = request.values.get("size")
if size is None or size == "":
size = cfg.get("default_size")
else:
try:
size = int(size)
except:
abort(400)
if size > cfg.get("max_size", 25):
size = cfg.get("max_size", 25)
# build the facet
facet = cfg.get("facet")
if facet is None:
abort(500)
query["facets"] = {facet : {"terms" : {"field" : facet, "size" : size}}}
# get the name of the model that will handle this query, and then look up
# the class that will handle it
dao_name = cfg.get("dao")
dao_klass = plugin.load_class(dao_name)
if dao_klass is None:
abort(500)
# issue the query
res = dao_klass.query(q=query)
terms = esprit.raw.get_facet_terms(res, facet)
records = [t.get("term") for t in terms]
# make the response
resp = make_response(json.dumps(records))
resp.mimetype = "application/json"
return resp
@blueprint.route('/compound/<config_name>')
@webapp.jsonp
def compound(config_name):
# get the configuration
acc = app.config.get("AUTOCOMPLETE_COMPOUND")
cfg = acc.get(config_name)
if cfg is None:
abort(404)
# get the query value
q = request.values.get("q")
if q is None or q == "":
abort(400)
q = q.strip()
# apply any input filters to the query value
ifs = cfg.get("input_filter")
if ifs is not None:
q = ifs(q)
# get the filters that will be used to match documents
filters = cfg.get("filters")
if filters is None or len(filters.keys()) == 0:
abort(500)
# now build the query object
query = {"query" : {"bool" : {"should" : []}}}
for field, params in filters.iteritems():
wq = _do_wildcard(q, params.get("start_wildcard", True), params.get("end_wildcard", True))
boost = params.get("boost", 1.0)
wcq = {"wildcard" : {field : {"value" : wq, "boost" : boost}}}
query["query"]["bool"]["should"].append(wcq)
# set the size of the result set
size = request.values.get("size")
if size is None or size == "":
size = cfg.get("default_size")
else:
try:
size = int(size)
except:
abort(400)
if size > cfg.get("max_size", 25):
size = cfg.get("max_size", 25)
query["size"] = size
# add the fields constraint
esv = app.config.get("ELASTIC_SEARCH_VERSION", "0.90.13")
fields_key = "fields"
if esv.startswith("1"):
fields_key = "_source"
fields = cfg.get("fields")
if fields is None or len(fields) == 0:
abort(500)
query[fields_key] = fields
# get the name of the model that will handle this query, and then look up
# the class that will handle it
dao_name = cfg.get("dao")
dao_klass = plugin.load_class(dao_name)
if dao_klass is None:
abort(500)
# issue the query
res = dao_klass.query(q=query)
records = esprit.raw.unpack_json_result(res)
# rewrite the field names if necessary
field_name_map = cfg.get("field_name_map")
mapped_records = []
if field_name_map is not None and len(field_name_map.keys()) > 0:
for r in records:
newobj = {}
for k, v in r.iteritems():
newk = field_name_map.get(k)
if newk is None:
newobj[k] = v
else:
newobj[newk] = v
mapped_records.append(newobj)
records = mapped_records
# make the response
resp = make_response(json.dumps(records))
resp.mimetype = "application/json"
return resp
def _do_wildcard(q, start, end):
# add/remove wildcard characters from the string
if end:
if not q.endswith("*"):
q += "*"
else:
q = q.rstrip("*")
if start:
if not q.startswith("*"):
q = "*" + q
else:
q = q.lstrip("*")
return q
| [
"richard@cottagelabs.com"
] | richard@cottagelabs.com |
ea94102712e8d3cd81b0d8b4856498f34aca7df8 | 575b01bd3e73bb9bc27b364eb5bd83a64cb33ab4 | /common/holdout_validator.py | a66c297ee611c707ab1abc0b46688f49c6a21a63 | [] | no_license | backonhighway/kaggle_avito | 18c4f6b2a618b6f2e3b5279e10e2c31a312f5e66 | a281de87e0ad143be4a6154b0bbe5485ffe5a321 | refs/heads/master | 2020-03-22T01:29:48.895056 | 2018-07-01T07:30:41 | 2018-07-01T07:30:41 | 139,308,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | import os, sys
ROOT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../'))
sys.path.append(ROOT)
APP_ROOT = os.path.join(ROOT, "avito")
import pandas as pd
import numpy as np
from dask import dataframe as dd
from sklearn import metrics
from avito.common import pocket_logger
class HoldoutValidator:
def __init__(self, model, valid_x, valid_y, max_series):
self.logger = pocket_logger.get_my_logger()
self.model = model
self.valid_x = valid_x
self.valid_y = valid_y
self.max_series = max_series
print("Initialized validator.")
def validate(self):
y_pred = self.model.predict(self.valid_x)
y_true = self.valid_y
score = metrics.mean_squared_error(y_true, y_pred) ** 0.5
self.output_score(score, "valid score=")
max_value = self.max_series
c_pred = np.where(y_pred > max_value, max_value, y_pred)
score = metrics.mean_squared_error(y_true, c_pred) ** 0.5
self.output_score(score, "clipped score=")
def output_prediction(self, filename):
self.holdout_df["pred"].to_csv(filename, index=False)
def output_score(self, score, msg):
score_msg = msg + "{:.15f}".format(score)
print(score_msg)
self.logger.info(score_msg) | [
"pocketsuteado@gmail.com"
] | pocketsuteado@gmail.com |
49569796f88c9e934e3ab46f264c4f58235edcb0 | 0fc1ba5ecb2d8eac5533890c0c1df2034e42eeff | /s10/prim.py | 81a073206dd364a9c691d7990cf25108bb9fa633 | [] | no_license | oscarburga/tutorias-complejidad-algoritmica-2021-1 | 00c8d017ed63d10378b7ba543096564082ef3d3c | c2ad9c27e082de442e739f61264f19160ade9c07 | refs/heads/master | 2023-06-12T07:16:26.528929 | 2021-07-04T21:00:03 | 2021-07-04T21:00:03 | 354,336,135 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | #!/usr/bin/python3.7
import heapq as pq
n, m = map(int, input().split())
adj = [[] for _ in range(n)]
for _ in range(m):
x, y, w = map(int, input().split())
x -= 1
y -= 1
adj[x].append((y, w))
adj[y].append((x, w))
inf = 10**18
# c[v]: peso minimo de arista para llegar al vertice v
# vis[v]: True si v ya ha sido visitado
c = [inf] * n
vis = [False] * n
# Vamos a empezar el Prim desde el vértice '0'
c[0] = 0
q = []
pq.heappush(q, (c[0], 0))
MST = 0
cnt = 0
while len(q):
d, v = pq.heappop(q)
if vis[v] == True:
continue
vis[v] = True
MST += d
cnt += 1
for e, w in adj[v]: # e: vertice vecino, w: peso de la arista
if w < c[e]:
c[e] = w
pq.heappush(q, (w, e))
if cnt < n:
print("IMPOSSIBLE")
else:
print(MST)
| [
"oscarburga2001@gmail.com"
] | oscarburga2001@gmail.com |
d29dc13396fdfe0a132ebcbde835e7013aaba222 | eee1a4000e8736bcef9853d4e2710ca7be8e3d7b | /venv/Scripts/pip3-script.py | 25d063346151324143e8f4d535499a95944c9023 | [] | no_license | Arthur31Viana/PythonExercicios | d6f7abb095998e07979a9c9bbf20b8ff66fc6979 | 98131674917e4817a93f087f175b4409ca5dfce6 | refs/heads/master | 2020-05-31T19:18:58.415486 | 2019-06-05T19:12:32 | 2019-06-05T19:12:32 | 190,454,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | #!"C:\Users\Arthur Viana\PycharmProjects\PythonExercicios\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"arthur.ae@hotmail.com"
] | arthur.ae@hotmail.com |
245742df1422fa3bec36e727a9a5d80955f8149d | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/common/Lib/plat-os2emx/pwd.py | 825ea8cf5665e6059450fe6c0db523dfb50ba7f4 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,260 | py | # 2017.08.29 21:58:59 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-os2emx/pwd.py
"""Replacement for pwd standard extension module, intended for use on
OS/2 and similar systems which don't normally have an /etc/passwd file.
The standard Unix password database is an ASCII text file with 7 fields
per record (line), separated by a colon:
- user name (string)
- password (encrypted string, or "*" or "")
- user id (integer)
- group id (integer)
- description (usually user's name)
- home directory (path to user's home directory)
- shell (path to the user's login shell)
(see the section 8.1 of the Python Library Reference)
This implementation differs from the standard Unix implementation by
allowing use of the platform's native path separator character - ';' on OS/2,
DOS and MS-Windows - as the field separator in addition to the Unix
standard ":". Additionally, when ":" is the separator path conversions
are applied to deal with any munging of the drive letter reference.
The module looks for the password database at the following locations
(in order first to last):
- ${ETC_PASSWD} (or %ETC_PASSWD%)
- ${ETC}/passwd (or %ETC%/passwd)
- ${PYTHONHOME}/Etc/passwd (or %PYTHONHOME%/Etc/passwd)
Classes
-------
None
Functions
---------
getpwuid(uid) - return the record for user-id uid as a 7-tuple
getpwnam(name) - return the record for user 'name' as a 7-tuple
getpwall() - return a list of 7-tuples, each tuple being one record
(NOTE: the order is arbitrary)
Attributes
----------
passwd_file - the path of the password database file
"""
import os
__passwd_path = []
if os.environ.has_key('ETC_PASSWD'):
__passwd_path.append(os.environ['ETC_PASSWD'])
if os.environ.has_key('ETC'):
__passwd_path.append('%s/passwd' % os.environ['ETC'])
if os.environ.has_key('PYTHONHOME'):
__passwd_path.append('%s/Etc/passwd' % os.environ['PYTHONHOME'])
passwd_file = None
for __i in __passwd_path:
try:
__f = open(__i, 'r')
__f.close()
passwd_file = __i
break
except:
pass
def __nullpathconv(path):
return path.replace(os.altsep, os.sep)
def __unixpathconv(path):
if path[0] == '$':
conv = path[1] + ':' + path[2:]
elif path[1] == ';':
conv = path[0] + ':' + path[2:]
else:
conv = path
return conv.replace(os.altsep, os.sep)
__field_sep = {':': __unixpathconv}
if os.pathsep:
if os.pathsep != ':':
__field_sep[os.pathsep] = __nullpathconv
def __get_field_sep(record):
fs = None
for c in __field_sep.keys():
if record.count(c) == 6:
fs = c
break
if fs:
return fs
else:
raise KeyError, '>> passwd database fields not delimited <<'
return
class Passwd:
def __init__(self, name, passwd, uid, gid, gecos, dir, shell):
self.__dict__['pw_name'] = name
self.__dict__['pw_passwd'] = passwd
self.__dict__['pw_uid'] = uid
self.__dict__['pw_gid'] = gid
self.__dict__['pw_gecos'] = gecos
self.__dict__['pw_dir'] = dir
self.__dict__['pw_shell'] = shell
self.__dict__['_record'] = (self.pw_name,
self.pw_passwd,
self.pw_uid,
self.pw_gid,
self.pw_gecos,
self.pw_dir,
self.pw_shell)
def __len__(self):
return 7
def __getitem__(self, key):
return self._record[key]
def __setattr__(self, name, value):
raise AttributeError('attribute read-only: %s' % name)
def __repr__(self):
return str(self._record)
def __cmp__(self, other):
this = str(self._record)
if this == other:
return 0
elif this < other:
return -1
else:
return 1
def __read_passwd_file():
if passwd_file:
passwd = open(passwd_file, 'r')
else:
raise KeyError, '>> no password database <<'
uidx = {}
namx = {}
sep = None
while 1:
entry = passwd.readline().strip()
if len(entry) > 6:
if sep is None:
sep = __get_field_sep(entry)
fields = entry.split(sep)
for i in (2, 3):
fields[i] = int(fields[i])
for i in (5, 6):
fields[i] = __field_sep[sep](fields[i])
record = Passwd(*fields)
if not uidx.has_key(fields[2]):
uidx[fields[2]] = record
if not namx.has_key(fields[0]):
namx[fields[0]] = record
elif len(entry) > 0:
pass
else:
break
passwd.close()
if len(uidx) == 0:
raise KeyError
return (uidx, namx)
def getpwuid(uid):
u, n = __read_passwd_file()
return u[uid]
def getpwnam(name):
u, n = __read_passwd_file()
return n[name]
def getpwall():
u, n = __read_passwd_file()
return n.values()
if __name__ == '__main__':
getpwall()
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\plat-os2emx\pwd.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:58:59 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
950fad18c2a225efd004c79b4d6d495227942058 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/devtestlabs/azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/aio/operations/__init__.py | 7ccc7d9fe9732634f0ef0d8f8f90cade815703ed | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 2,982 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._provider_operations_operations import ProviderOperationsOperations
from ._labs_operations import LabsOperations
from ._operations import Operations
from ._global_schedules_operations import GlobalSchedulesOperations
from ._artifact_sources_operations import ArtifactSourcesOperations
from ._arm_templates_operations import ArmTemplatesOperations
from ._artifacts_operations import ArtifactsOperations
from ._costs_operations import CostsOperations
from ._custom_images_operations import CustomImagesOperations
from ._formulas_operations import FormulasOperations
from ._gallery_images_operations import GalleryImagesOperations
from ._notification_channels_operations import NotificationChannelsOperations
from ._policy_sets_operations import PolicySetsOperations
from ._policies_operations import PoliciesOperations
from ._schedules_operations import SchedulesOperations
from ._service_runners_operations import ServiceRunnersOperations
from ._users_operations import UsersOperations
from ._disks_operations import DisksOperations
from ._environments_operations import EnvironmentsOperations
from ._secrets_operations import SecretsOperations
from ._service_fabrics_operations import ServiceFabricsOperations
from ._service_fabric_schedules_operations import ServiceFabricSchedulesOperations
from ._virtual_machines_operations import VirtualMachinesOperations
from ._virtual_machine_schedules_operations import VirtualMachineSchedulesOperations
from ._virtual_networks_operations import VirtualNetworksOperations
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ProviderOperationsOperations",
"LabsOperations",
"Operations",
"GlobalSchedulesOperations",
"ArtifactSourcesOperations",
"ArmTemplatesOperations",
"ArtifactsOperations",
"CostsOperations",
"CustomImagesOperations",
"FormulasOperations",
"GalleryImagesOperations",
"NotificationChannelsOperations",
"PolicySetsOperations",
"PoliciesOperations",
"SchedulesOperations",
"ServiceRunnersOperations",
"UsersOperations",
"DisksOperations",
"EnvironmentsOperations",
"SecretsOperations",
"ServiceFabricsOperations",
"ServiceFabricSchedulesOperations",
"VirtualMachinesOperations",
"VirtualMachineSchedulesOperations",
"VirtualNetworksOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
f94c13e58621ea442f0dfae7b2e78c0974899738 | 4127a99269737c4640e53bad9b32c2c2f7f172d3 | /iptw/plot_hr_paxlovid.py | 74b25db7c3dc5970f0384670c6c4fb4eabe49265 | [] | no_license | calvin-zcx/pasc_phenotype | 0401d920b3cc441405abe9e689672415d57fd984 | 40efce36581721cd91e599ea6e61429fe7ac1f67 | refs/heads/master | 2023-08-31T04:41:34.823658 | 2023-08-30T16:07:52 | 2023-08-30T16:07:52 | 446,634,747 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,692 | py | import os
import shutil
import zipfile
import pickle
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import re
import numpy as np
import csv
from collections import Counter, defaultdict
import pandas as pd
from misc.utils import check_and_mkdir, stringlist_2_str, stringlist_2_list
from scipy import stats
import re
import itertools
import functools
import random
import seaborn as sns
print = functools.partial(print, flush=True)
import zepid
from zepid.graphics import EffectMeasurePlot
import shlex
np.random.seed(0)
random.seed(0)
from misc import utils
def plot_forest_for_dx_organ_pax(database='recover', star=True, text_right=False):
# df = pd.read_excel(
# r'../data/recover/output/results/Paxlovid-allnarrow-V5/causal_effects_specific.xlsx',
# sheet_name='dx')
# df = pd.read_excel(
# r'../data/recover/output/results/Paxlovid-malenarrow/causal_effects_specific_male.xlsx',
# sheet_name='dx')
# df = pd.read_excel(
# r'../data/recover/output/results/Paxlovid-above65narrow/causal_effects_specific_above65.xlsx',
# sheet_name='dx')
# df = pd.read_excel(
# r'../data/recover/output/results/Paxlovid-outpatientnarrow/causal_effects_specific_outpatient.xlsx',
# sheet_name='dx')
# df = pd.read_excel(
# r'../data/recover/output/results/Paxlovid-femalenarrow/causal_effects_specific_female.xlsx',
# sheet_name='dx')
df = pd.read_excel(
r'../data/recover/output/results/Paxlovid-inpatienticunarrow/causal_effects_specific_inpatienticu.xlsx',
sheet_name='dx')
df_select = df.sort_values(by='hr-w', ascending=True)
# df_select = df_select.loc[df_select['selected'] == 1, :] #
print('df_select.shape:', df_select.shape)
organ_list = df_select['Organ Domain'].unique()
print(organ_list)
organ_list = [
'Overall',
'Diseases of the Nervous System',
'Diseases of the Skin and Subcutaneous Tissue',
'Diseases of the Respiratory System',
'Diseases of the Circulatory System',
'Diseases of the Blood and Blood Forming Organs and Certain Disorders Involving the Immune Mechanism',
'Endocrine, Nutritional and Metabolic Diseases',
'Diseases of the Digestive System',
'Diseases of the Genitourinary System',
'Diseases of the Musculoskeletal System and Connective Tissue',
# 'Certain Infectious and Parasitic Diseases',
'General']
# 'Injury, Poisoning and Certain Other Consequences of External Causes']
organ_n = np.zeros(len(organ_list))
labs = []
measure = []
lower = []
upper = []
pval = []
pasc_row = []
nabsv = []
ncumv = []
for i, organ in enumerate(organ_list):
print(i + 1, 'organ', organ)
for key, row in df_select.iterrows():
name = row['PASC Name Simple'].strip('*')
pasc = row['pasc']
hr = row['hr-w']
ci = stringlist_2_list(row['hr-w-CI'])
p = row['hr-w-p']
domain = row['Organ Domain']
# nabs = row['no. pasc in +']
ncum = stringlist_2_list(row['cif_1_w'])[-1] * 1000
ncum_ci = [stringlist_2_list(row['cif_1_w_CILower'])[-1] * 1000,
stringlist_2_list(row['cif_1_w_CIUpper'])[-1] * 1000]
# use nabs for ncum_ci_negative
nabs = stringlist_2_list(row['cif_0_w'])[-1] * 1000
if star:
if p <= 0.001:
name += '***'
elif p <= 0.01:
name += '**'
elif p <= 0.05:
name += '*'
# if (database == 'V15_COVID19') and (row['selected'] == 1) and (row['selected oneflorida'] == 1):
# name += r'$^{‡}$'
#
# if (database == 'oneflorida') and (row['selected'] == 1) and (row['selected insight'] == 1):
# name += r'$^{‡}$'
# if pasc == 'PASC-General':
# pasc_row = [name, hr, ci, p, domain, nabs, ncum]
# continue
if domain == organ:
organ_n[i] += 1
if len(name.split()) >= 5:
name = ' '.join(name.split()[:4]) + '\n' + ' '.join(name.split()[4:])
labs.append(name)
measure.append(hr)
lower.append(ci[0])
upper.append(ci[1])
pval.append(p)
nabsv.append(nabs)
ncumv.append(ncum)
p = EffectMeasurePlot(label=labs, effect_measure=measure, lcl=lower, ucl=upper,
nabs=nabsv, ncumIncidence=ncumv)
p.labels(scale='log')
# organ = 'ALL'
# p.labels(effectmeasure='aHR', add_label1='CIF per\n1000', add_label2='No. of\nCases') # aHR
p.labels(effectmeasure='aHR', add_label1='CIF per\n1000\nin Pos', add_label2='CIF per\n1000\nin Neg')
# p.colors(pointcolor='r')
# '#F65453', '#82A2D3'
# c = ['#870001', '#F65453', '#fcb2ab', '#003396', '#5494DA','#86CEFA']
c = '#F65453'
p.colors(pointshape="s", errorbarcolor=c, pointcolor=c) # , linecolor='black'), # , linecolor='#fcb2ab')
ax = p.plot_with_incidence(figsize=(8.6, .42 * len(labs)), t_adjuster=0.0108, max_value=2.5, min_value=0.15,
size=5, decimal=2,
text_right=text_right)
# plt.title(drug_name, loc="right", x=.7, y=1.045) #"Random Effect Model(Risk Ratio)"
# plt.title('pasc', loc="center", x=0, y=0)
# plt.suptitle("Missing Data Imputation Method", x=-0.1, y=0.98)
# ax.set_xlabel("Favours Control Favours Haloperidol ", fontsize=10)
organ_n_cumsum = np.cumsum(organ_n)
for i in range(len(organ_n) - 1):
ax.axhline(y=organ_n_cumsum[i] - .5, xmin=0.09, color=p.linec, zorder=1, linestyle='--') # linewidth=1,
# ax.set_yticklabels(labs, fontsize=11.5)
ax.set_yticklabels(labs, fontsize=14)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(False)
plt.tight_layout()
output_dir = r'../data/recover/output/results/Paxlovid-inpatienticunarrow/figure/'
check_and_mkdir(output_dir)
plt.savefig(output_dir + 'hr_2CIF-V5.png', bbox_inches='tight', dpi=600)
plt.savefig(output_dir + 'hr_2CIF-V5.pdf', bbox_inches='tight', transparent=True)
plt.show()
print()
# plt.clf()
plt.close()
if __name__ == '__main__':
plot_forest_for_dx_organ_pax(database='recover')
print('Done!')
| [
"calvin-zcx@users.noreply.github.com"
] | calvin-zcx@users.noreply.github.com |
67cb893902b540be6e12e1bc3bd8c73472f214f4 | d93159d0784fc489a5066d3ee592e6c9563b228b | /DQMOffline/Alignment/python/LaserAlignmentT0ProducerDQM_cfi.py | 1675986f0f636cb6341974c581dbac1f366e7727 | [] | permissive | simonecid/cmssw | 86396e31d41a003a179690f8c322e82e250e33b2 | 2559fdc9545b2c7e337f5113b231025106dd22ab | refs/heads/CAallInOne_81X | 2021-08-15T23:25:02.901905 | 2016-09-13T08:10:20 | 2016-09-13T08:53:42 | 176,462,898 | 0 | 1 | Apache-2.0 | 2019-03-19T08:30:28 | 2019-03-19T08:30:24 | null | UTF-8 | Python | false | false | 1,532 | py | import FWCore.ParameterSet.Config as cms
LaserAlignmentT0ProducerDQM = cms.EDAnalyzer( "LaserAlignmentT0ProducerDQM",
# specify the input digi collections to run on
DigiProducerList = cms.VPSet(
cms.PSet(
DigiLabel = cms.string( 'ZeroSuppressed' ),
DigiType = cms.string( 'Processed' ),
DigiProducer = cms.string( 'ALCARECOTkAlLASsiStripDigis' )
),
cms.PSet(
DigiLabel = cms.string( 'VirginRaw' ),
DigiType = cms.string( 'Raw' ),
DigiProducer = cms.string( 'ALCARECOTkAlLASsiStripDigis' )
),
cms.PSet(
DigiLabel = cms.string( 'ProcessedRaw' ),
DigiType = cms.string( 'Raw' ),
DigiProducer = cms.string( 'ALCARECOTkAlLASsiStripDigis' )
),
cms.PSet(
DigiLabel = cms.string( 'ScopeMode' ),
DigiType = cms.string( 'Raw' ),
DigiProducer = cms.string( 'ALCARECOTkAlLASsiStripDigis' )
)
),
# the lower threshold for the strip amplitude;
# profiles with digis above will be considered containing signal
LowerAdcThreshold = cms.uint32( 15 ),
# the upper threshold for the strip amplitude;
# profiles with digis below will be considered containing a signal
UpperAdcThreshold = cms.uint32( 220 ),
# the dqm folder name to write to
FolderName = cms.string( "TkAlLAS" ),
# additionally dump in plain ROOT file?
OutputInPlainROOT = cms.bool( False ),
# if plain ROOT output, then write to this file
PlainOutputFileName = cms.string( "TkAlLAS.dqm.root" )
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
a03f9af926025114147e26a598beb8f111dfd873 | 8a42be3f930d8a215394a96ad2e91c95c3b7ff86 | /Build/Instalation/GeneralDb/Marathon/MarathonTests_1.1/linux_HSQLDB_Edit/TestCases/Y1_NamedFldTests/Create_GroupLayoutMenu2/AA3_CreateFlds3b.py | 2fd7a65fc5b90510224a03237363295796ce21fd | [] | no_license | java-tools/jrec | 742e741418c987baa4350390d126d74c0d7c4689 | 9ece143cdd52832804eca6f3fb4a1490e2a6f891 | refs/heads/master | 2021-09-27T19:24:11.979955 | 2017-11-18T06:35:31 | 2017-11-18T06:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,097 | py | useFixture(default)
def test():
java_recorded_version = '1.6.0_22'
if window('Record Editor'):
select_menu('Record Layouts>>Edit Layout')
click('New2')
select('RecordDef.Record Name_Txt', 'zx1xzFLDg1')
select('RecordDef.Description_Txt', 'Group Test 1')
click('Insert')
select('RecordFieldsJTbl', 'cell:FieldName,0()')
click('Delete2')
select('RecordDef.Record Type_Txt', 'Group of Records')
select('TabbedPane', 'Child Records')
click('Insert')
select('ChildRecordsJTbl', 'cell:Child Record,0()')
select('ChildRecordsJTbl', 'zxzxzFLD1', 'Child Record,0')
select('ChildRecordsJTbl', 'cell:Child Record,0(zxzxzFLD1)')
click('Insert')
select('ChildRecordsJTbl', 'cell:Child Record,1()')
select('ChildRecordsJTbl', 'zxzxzFLD2', 'Child Record,1')
select('ChildRecordsJTbl', 'cell:Child Record,1(zxzxzFLD2)')
click('Insert')
select('ChildRecordsJTbl', 'cell:Child Record,2()')
select('ChildRecordsJTbl', 'zxzxzFLD3', 'Child Record,2')
select('ChildRecordsJTbl', 'cell:Child Record,2(zxzxzFLD3)')
assert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD2, , , , , ], [, zxzxzFLD3, , , , , ]]')
select('ChildRecordsJTbl', 'cell:Child Record,2(zxzxzFLD3)')
click('Save As')
if window('Input'):
select('OptionPane.textField', 'zxzxzFLDg2')
click('OK')
close()
select('TabbedPane', 'Child Records')
assert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD2, , , , , ], [, zxzxzFLD3, , , , , ]]')
select('RecordDef.Description_Txt', 'Group Test 2')
select('ChildRecordsJTbl', 'cell:Child Name,1()')
click('Delete2')
assert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD3, , , , , ]]')
select('RecordList.Record Name_Txt', 'zx1xzFLDg1')
select('TabbedPane', 'Child Records')
select('RecordList.Description_Txt', '%')
select('TabbedPane', 'Child Records')
assert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD2, , , , , ], [, zxzxzFLD3, , , , , ]]')
assert_p('RecordDef.Description_Txt', 'Text', 'Group Test 1')
assert_p('RecordDef.Record Name_Txt', 'Text', 'zx1xzFLDg1')
select('RecordList.Record Name_Txt', 'zxzxzFLDg2')
select('TabbedPane', 'Child Records')
select('RecordList.Description_Txt', '%%')
select('TabbedPane', 'Child Records')
assert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD3, , , , , ]]')
assert_p('RecordDef.Description_Txt', 'Text', 'Group Test 2')
assert_p('RecordDef.Record Name_Txt', 'Text', 'zxzxzFLDg2')
click('Delete3')
if window('Delete: zxzxzFLDg2'):
click('Yes')
close()
select('RecordList.Record Name_Txt', 'zx1xzFLDg1')
select('RecordList.Description_Txt', '%')
select('TabbedPane', 'Child Records')
assert_p('ChildRecordsJTbl', 'Content', '[[, zxzxzFLD1, , , , , ], [, zxzxzFLD2, , , , , ], [, zxzxzFLD3, , , , , ]]')
assert_p('RecordDef.Record Name_Txt', 'Text', 'zx1xzFLDg1')
click('BasicInternalFrameTitlePane$NoFocusButton2')
close()
| [
"bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec"
] | bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec |
c2b3af64857c45375bda0713aa8fd0f641facdd7 | 0cfb5831a748ebd46e438e3ad7e7a09c1d196499 | /com/chapter_06/section_02/task_6.2.4_dictionaryMod.py | 6bebe76bd8bd605135584f6b46ed551709e71f0d | [] | no_license | StevenGeGe/pythonFromIntroductionToPractice01 | 7cfe8cdb4bc5c0ddbe25b44976231d72d9e10108 | 9d2ba499056b30ded14180e6c4719ee48edd9772 | refs/heads/master | 2023-02-15T04:08:59.878711 | 2020-12-28T13:27:55 | 2020-12-28T13:27:55 | 310,980,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/11/17 20:24
# @Author : Yong
# @Email : Yong_GJ@163.com
# @File : task_6.2.4_dictionaryMod.py
# @Software: PyCharm
# 字典的修改
alien_0 = {'color': 'green'}
print("The alien is " + alien_0['color'] + ".")
# 输出: The alien is green.
alien_0['color'] = 'yellow'
print("The alien is " + alien_0['color'] + ".")
# 输出: print("The alien is " + alien_0['color'] + ".")
# 字典修改进阶
alien_1 = {'x_position': 0, 'y_position': 25, 'speed': 'medium'}
print("Original x-position: " + str(alien_1['x_position']))
# 向右移动外星人 # 据外星人当前速度决定将其移动多远
if alien_1['speed'] == 'slow':
x_increment = 1
elif alien_1['speed'] == 'medium':
x_increment = 2
else:
# 这个外星人的速度一定很快
x_increment = 3
# 新位置等于老位置加上增量
alien_1['x_position'] = alien_1['x_position'] + x_increment
print("New x-position: " + str(alien_1['x_position']))
| [
"Yong_GJ@163.com"
] | Yong_GJ@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.