blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bdf608e30942d1e434a5454fe9cbc342f5efd52f | d10e19f07b209b2fa661fb616947d0382fd0bbb0 | /util/reload/del_e.py | ddff6d5f5e907b62b99314dae8f2bfa3b02afe35 | [] | no_license | Do3956/test | 6bda9633aa2762b8f0f4b05b154810107c40d9ee | 15bbc285dc3acbbabaefb188cb264e56fb24c84d | refs/heads/master | 2021-07-02T23:52:29.519329 | 2018-02-07T06:19:28 | 2018-02-07T06:19:28 | 95,771,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | # -*- coding: utf-8 -*-
"""
__author__ = 'do'
__mtime__ = '2018/1/11'
__content__ = '析构函数重载'
"""
class Life:
def __init__(self, name='name'):
print 'Hello', name
self.name = name
def __del__(self):
print 'Goodby', self.name
brain = Life('Brain') # call __init__
brain = 'loretta' # call __del__ | [
"395614269@163.com"
] | 395614269@163.com |
ce2ea39a792d9d09f4cc66658a211858bd44ab01 | 5667cc877342204b7d54b6c3cc5a9f4854f08829 | /.history/apppersona/forms_20201101194225.py | fafb266b000ff5ea951dc20c6446324342a53378 | [] | no_license | Nyckhos/TestCommit | d62e3f6fefb04ab5647475cc7ead0d72cbd89efa | 9aa8e2e35280b7862960cc8a864e9c02ac7f4796 | refs/heads/main | 2023-01-05T05:57:59.223641 | 2020-11-02T02:08:18 | 2020-11-02T02:08:18 | 309,237,224 | 2 | 0 | null | 2020-11-02T02:30:43 | 2020-11-02T02:30:43 | null | UTF-8 | Python | false | false | 3,647 | py | from django import forms
from django.db import models
from django.db.models.fields import CharField, EmailField
from django.forms import ModelForm, Textarea
from django.forms.widgets import EmailInput, HiddenInput, PasswordInput, Select, TextInput
from .models import Persona
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import UserCreationForm
class FormularioPersona(forms.ModelForm):
class Meta:
model = Persona
fields = ('nombre', 'apellido', 'email', 'celular',
'region')
widgets = {
'nombre': Textarea(attrs={'rows': 1, 'cols': 30, 'style': 'resize:none;', 'id': 'nombre', 'class': 'form-control', 'required onfocus': "setVisibility('100','inline')", 'onBlur': "setVisibility('100','none')", 'onkeyup':"sync()"}),
'apellido': Textarea(attrs={'rows': 1, 'cols': 30, 'style': 'resize:none;', 'id': 'apellido', 'class': "form-control", 'required onfocus': "setVisibility('101','inline')", 'onBlur': "setVisibility('101','none')", 'onkeyup':"sync()"}),
'email': EmailInput(attrs={'rows': 1, 'cols': 30, 'style': 'resize:none;', 'id': 'email', 'class': "form-control", 'required onfocus': "setVisibility('102','inline')", 'onBlur': "setVisibility('102','none')", 'type': 'email', 'onkeyup':"sync()"}),
'celular': Textarea(attrs={'rows': 1, 'cols': 30, 'style': 'resize:none;', 'id': 'celular', 'class': "form-control", 'required onfocus': "setVisibility('103','inline')", 'onBlur': "setVisibility('103','none')"}),
'region': Select(attrs={'class': "form-control", 'onfocus': "setVisibility('105','inline')", 'onBlur': "setVisibility('105','none')", 'default': "rm"}),
}
labels = {
'nombre': _('Nombre'),
'apellido': _('Apellido'),
'email': _('Correo Electronico'),
'celular': _('Numero Telefonico'),
'region': _('Region'),
}
#help_texts = {
# 'nombre': _('Ingrese su nombre.'),
#}
#error_messages = {
# 'nombre': {
# 'max_length': _("El nombre excede el limite de caracteres."),
# },
#}
class ExtendedUserCreationForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(ExtendedUserCreationForm, self).__init__(*args, **kwargs)
self.fields['first_name'].widget = TextInput(attrs = {'class': 'form-control', 'id':'first_name', 'type':'hidden'})
self.fields['last_name'].widget = TextInput(attrs = {'class': 'form-control', 'id':'last_name', 'type':'hidden'})
self.fields['username'].widget = TextInput(attrs = {'class': 'form-control', 'id':'username', 'required onfocus': "setVisibility('104','inline')", 'onBlur': "setVisibility('104','none')"})
self.fields['password1'].widget = PasswordInput(attrs = {'class': 'form-control', 'id':'password1'})
self.fields['password2'].widget = PasswordInput(attrs = {'class': 'form-control', 'id':'password2'})
self.fields['email'].widget = EmailInput(attrs = {'class': 'form-control', 'id':'useremail', 'type':'hidden'})
class Meta:
model = User
fields = ('first_name', 'last_name', 'username', 'email', 'password1', 'password2')
def save(self, commit=True):
user = super().save(commit=False)
user.email = self.cleaned_data['email']
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
if commit:
user.save()
return user
| [
"fernandox_240997@live.com"
] | fernandox_240997@live.com |
7b3468eb08adcb7c9eab99a524080c7e23f65b33 | 255e7b37e9ce28bbafba5a3bcb046de97589f21c | /suqing/fuckal/python/linkedlist/reverse-nodes-in-k-group.py | 99781eaab54715ab585558e6417e2f13b6e426ff | [] | no_license | dog2humen/ForTheCoffee | 697d2dc8366921aa18da2fa3311390061bab4b6f | 2f940aa9dd6ce35588de18db08bf35a2d04a54f4 | refs/heads/master | 2023-04-15T09:53:54.711659 | 2021-04-28T13:49:13 | 2021-04-28T13:49:13 | 276,009,709 | 2 | 2 | null | 2020-07-01T08:29:33 | 2020-06-30T05:50:01 | Python | UTF-8 | Python | false | false | 1,684 | py | # coding:utf8
"""
25. K 个一组翻转链表
给你一个链表,每 k 个节点一组进行翻转,请你返回翻转后的链表。
k 是一个正整数,它的值小于或等于链表的长度。
如果节点总数不是 k 的整数倍,那么请将最后剩余的节点保持原有顺序。
输入:head = [1,2,3,4,5], k = 2
输出:[2,1,4,3,5]
链接:https://leetcode-cn.com/problems/reverse-nodes-in-k-group
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseKGroup(self, head: ListNode, k: int) -> ListNode:
return self.reverseKGroup_v1(head, k)
def reverseKGroup_v1(self, head: ListNode, k: int) -> ListNode:
"""
递归思路, 每k个翻转一次
"""
if not head:
return None
a, b = head, head # 区间[a, b)中包含k个待翻转的
for i in range(k):
if not b: # 不足k个, 不用翻转
return head
b = b.next
nhead = self.reverseHT(a, b)
a.next = self.reverseKGroup_v1(b, k)
return nhead
def reverseHT(self, head, tail):
"""
翻转区间为[head, tail)的链表
返回翻转后的头结点
"""
pre, cur = None, head
while cur != tail:
nextNode = cur.next
cur.next = pre
pre = cur
cur = nextNode
return pre
def reverseKGroup_v2(self, head: ListNode, k: int) -> ListNode:
"""
迭代
"""
| [
"116676671@qq.com"
] | 116676671@qq.com |
61d453f7043d551d259a951e4642f47d5429b7cd | f6302d4915f1186106270ac78273534920bdc553 | /tests/test_scripts/test_gen_jsonld.py | 385e3a242135665eba7991fc0854bfad5e25d86a | [
"CC0-1.0"
] | permissive | biolink/biolinkml | 7daeab5dfbefb8c9a6ce15176725be4b0faf86c7 | 34531bd9cb8805029035c7b7726398ebee972b97 | refs/heads/master | 2021-06-10T20:10:56.718829 | 2021-04-04T20:05:59 | 2021-04-04T20:05:59 | 167,634,946 | 25 | 19 | CC0-1.0 | 2021-10-02T01:45:34 | 2019-01-26T01:07:26 | Python | UTF-8 | Python | false | false | 4,595 | py | import os
import re
import unittest
# This has to occur post ClickTestCase
from functools import reduce
from typing import List, Tuple
import click
from rdflib import Graph, URIRef
from biolinkml import METAMODEL_NAMESPACE
from biolinkml.generators.jsonldcontextgen import ContextGenerator
from biolinkml.generators import jsonldgen
from tests.test_scripts.environment import env
from tests.utils.clicktestcase import ClickTestCase
cwd = os.path.dirname(__file__)
meta_context = 'file:./output/gencontext/meta.jsonld'
repl: List[Tuple[str, str]] = [
(r'"source_file_size": [0-9]+', ''),
(r'"source_file_date": "[^"]+"', ''),
(r'"generation_date": "[^"]+"', ''),
(r'"source_file": "[^"]+"', '')
]
def filtr(txt: str) -> str:
return reduce(lambda s, expr: re.sub(expr[0], expr[1], s), repl, txt)
class GenJSONLDTestCase(ClickTestCase):
testdir = "genjsonld"
click_ep = jsonldgen.cli
prog_name = "gen-jsonld"
env = env
def test_help(self):
self.do_test("--help", 'help')
def test_meta(self):
self.temp_file_path('meta.jsonld')
self.do_test(f"--context {meta_context}", 'meta.jsonld', filtr=filtr)
self.do_test(f'-f jsonld --context {meta_context}', 'meta.jsonld', filtr=filtr)
self.do_test(f'-f xsv --context {meta_context}', 'meta_error',
expected_error=click.exceptions.BadParameter)
def check_size(self, g: Graph, g2: Graph, root: URIRef, expected_classes: int, expected_slots: int,
expected_types: int, expected_subsets: int, expected_enums: int, model: str) -> None:
"""
Check
:param g:
:param g2:
:param root:
:param expected_classes:
:param expected_slots:
:param expected_types:
:param expected_subsets:
:param expected_enums:
:param model:
:return:
"""
for graph in [g, g2]:
n_classes = len(list(graph.objects(root, METAMODEL_NAMESPACE.classes)))
n_slots = len(list(graph.objects(root, METAMODEL_NAMESPACE.slots)))
n_types = len(list(graph.objects(root, METAMODEL_NAMESPACE.types)))
n_subsets = len(list(graph.objects(root, METAMODEL_NAMESPACE.subsets)))
n_enums = len(list(graph.objects(root, METAMODEL_NAMESPACE.enums)))
self.assertEqual(expected_classes, n_classes, f"Expected {expected_classes} classes in {model}")
self.assertEqual(expected_slots, n_slots, f"Expected {expected_slots} slots in {model}")
self.assertEqual(expected_types, n_types, f"Expected {expected_types} types in {model}")
self.assertEqual(expected_subsets, n_subsets, f"Expected {expected_subsets} subsets in {model}")
self.assertEqual(expected_enums, n_enums, f"Expected {expected_enums} enums in {model}")
def test_meta_output(self):
""" Generate a context AND a jsonld for the metamodel and make sure it parses as RDF """
tmp_jsonld_path = self.temp_file_path('metajson.jsonld')
tmp_rdf_path = self.temp_file_path('metardf.ttl')
tmp_meta_context_path = self.temp_file_path('metacontext.jsonld')
# Generate an image of the metamodel
gen = ContextGenerator(env.meta_yaml, importmap=env.import_map)
base = gen.schema.id
if base[-1] not in '/#':
base += '/'
base += gen.schema.name
# Generate context
with open(tmp_meta_context_path, 'w') as tfile:
tfile.write(gen.serialize())
# Generate JSON
with open(tmp_jsonld_path, 'w') as tfile:
tfile.write(jsonldgen.JSONLDGenerator(env.meta_yaml, fmt=jsonldgen.JSONLDGenerator.valid_formats[0],
importmap=env.import_map).serialize(context=tmp_meta_context_path))
# Convert JSON to TTL
g = Graph()
g.load(tmp_jsonld_path, format="json-ld")
g.serialize(tmp_rdf_path, format="ttl")
g.bind('meta', METAMODEL_NAMESPACE)
new_ttl = g.serialize(format="turtle").decode()
# Make sure that the generated TTL matches the JSON-LD (probably not really needed, as this is more of a test
# of rdflib than our tooling but it doesn't hurt
new_g = Graph()
new_g.parse(data=new_ttl, format="turtle")
# Make sure that both match the expected size (classes, slots, types, and model name for error reporting)
self.check_size(g, new_g, URIRef(base), 17, 122, 14, 1, 1, "meta")
if __name__ == '__main__':
unittest.main()
| [
"solbrig@jhu.edu"
] | solbrig@jhu.edu |
a23323ac53143abd4e91ae0d35c314a55e9716cb | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /r6ywkSJHWqA7EK5fG_14.py | e1800ffbfe2d70f4996996c5a58be1d4e86742ab | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | """
Write a method that accepts two integer parameters rows and cols. The output
is a 2d array of numbers displayed in column-major order, meaning the numbers
shown increase sequentially down each column and wrap to the top of the next
column to the right once the bottom of the current column is reached.
### Examples
printGrid(3, 6) ➞ [
[1, 4, 7, 10, 13, 16],
[2, 5, 8, 11, 14, 17],
[3, 6, 9, 12, 15, 18]
]
printGrid(5, 3) ➞ [
[1, 6, 11],
[2, 7, 12],
[3, 8, 13],
[4, 9, 14],
[5, 10, 15]
]
printGrid(4, 1) ➞ [
[1],
[2],
[3],
[4]
]
### Notes
N/A
"""
def printgrid(rows, cols):
return [list(range(i, rows*cols+1, rows)) for i in range(1, rows+1)]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
e8c56a08cc5141e47f33f2dd24e7959853718791 | d82ac08e029a340da546e6cfaf795519aca37177 | /chapter_06_model_evaluation_hyperparameter_tuning/09_precision_recall.py | 8ed40b8d08c5c3316ccb51e1e9aa19e22539551a | [] | no_license | CSwithJC/PythonMachineLearning | 4409303c3f4d4177dc509c83e240d7a589b144a0 | 0c4508861e182a8eeacd4645fb93b51b698ece0f | refs/heads/master | 2021-09-04T04:28:14.608662 | 2018-01-15T20:25:36 | 2018-01-15T20:25:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,384 | py | import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, make_scorer
"""Precision, Recall, F1-Score:
Precision (also called positive predictive value) is the fraction of
relevant instances among the retrieved instances, while recall
(also known as sensitivity) is the fraction of relevant instances
that have been retrieved over the total amount of relevant instances.
F1-Score is calculated using both precision and recall.
"""
df = pd.read_csv('../data/wdbc.csv')
# Convert M (malign) and B (benign) into numbers
X = df.iloc[:, 2:].values
y = df.iloc[:, 1].values
le = LabelEncoder()
y = le.fit_transform(y)
le.transform(['M', 'B'])
# Divide Dataset into separate training (80%) and testing (20%)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1)
pipe_svc = Pipeline([('scl', StandardScaler()),
('clf', SVC(random_state=1))])
pipe_svc.fit(X_train, y_train)
y_pred = pipe_svc.predict(X_test)
# Create the Confusion Matrix:
confmat = confusion_matrix(y_true=y_test,
y_pred=y_pred)
print(confmat)
# Precision, Recall, F1-Score:
print('Precision: %.3f' % precision_score(y_true=y_test, y_pred=y_pred))
print('Recall: %.3f' % recall_score(y_true=y_test, y_pred=y_pred))
print('F1: %.3f' % f1_score(y_true=y_test, y_pred=y_pred))
# We can do grid search based on any of these scores, not just accuracy:
# All the different values we will try for C.
param_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
# Combinations of different parameters to be ran here for Grid Search:
param_grid = [
{
'clf__C': param_range,
'clf__kernel': ['linear']
},
{ # NOTE: gamma parameter is specific to kernel SVMs.
'clf__C': param_range,
'clf__gamma': param_range,
'clf__kernel': ['rbf']
}
]
scorer = make_scorer(f1_score, pos_label=0)
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring=scorer,
cv=10,
n_jobs=-1)
| [
"jean.mendez2@upr.edu"
] | jean.mendez2@upr.edu |
0f376f00c936710d9751db5444a986c0a3a66788 | 17dcc3e6a5e418a7c4c2e79f6e34ae7df39fdbcd | /polyaxon_lib/datasets/imdb.py | b9f921cf262d4dc58eef86b59c760646f5f749fc | [
"MIT"
] | permissive | polyaxon/polyaxon-lib | 55358fa8f56a1cd12a443672f4d6cb990c51ae8f | d357b7fee03b2f47cfad8bd7e028d3e265a10575 | refs/heads/master | 2021-09-11T19:43:59.391273 | 2018-04-11T15:35:03 | 2018-04-11T15:35:03 | 94,631,683 | 7 | 4 | MIT | 2018-01-25T14:08:54 | 2017-06-17T15:18:04 | Python | UTF-8 | Python | false | false | 4,981 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import json
import os
import six.moves.cPickle as pickle # pylint: disable=import-error
import numpy as np
import tensorflow as tf
from polyaxon_lib import Modes
from polyaxon_lib.datasets.converters.sequence_converters import SequenceToTFExampleConverter
from polyaxon_lib.datasets.utils import (
download_datasets,
delete_datasets,
make_dataset_dir,
create_sequence_dataset_input_fn,
create_sequence_dataset_predict_input_fn
)
_DATA_URL = 'http://www.iro.umontreal.ca/~lisa/deep/data/'
_FILENAME = 'imdb.pkl'
_MAX_TOKEN = 10000
_UNK = 1
META_DATA_FILENAME_FORMAT = '{}/meta_data.json'
RECORD_FILE_NAME_FORMAT = '{}/imdb_{}.tfrecord'
def _clean_tokens(seq):
return [1 if t >= _MAX_TOKEN else t for t in seq]
def prepare_dataset(converter, dataset_dir, dataset, data_name, num_items, num_eval=0):
filename = RECORD_FILE_NAME_FORMAT.format(dataset_dir, data_name)
if num_eval:
eval_filename = RECORD_FILE_NAME_FORMAT.format(dataset_dir, Modes.EVAL)
if tf.gfile.Exists(filename):
print('`{}` Dataset files already exist. '
'Exiting without re-creating them.'.format(filename))
return
tokens = dataset[0]
labels = dataset[1]
if num_eval:
# split data
idx = np.random.permutation(num_items)
eval_tokens = [{'source_token': _clean_tokens(tokens[i])} for i in idx[:num_eval]]
eval_labels = [{'label': labels[i]} for i in idx[:num_eval]]
tokens = [{'source_token': _clean_tokens(tokens[i])} for i in idx[num_eval:]]
labels = [{'label': labels[i]} for i in idx[num_eval:]]
else:
tokens = [{'source_token': _clean_tokens(t)} for t in tokens[:100]]
labels = [{'label': l} for l in labels]
with tf.python_io.TFRecordWriter(filename) as tfrecord_writer:
with tf.Session('') as session:
converter.convert(session=session, writer=tfrecord_writer,
sequence_features_list=tokens,
context_features_list=labels,
total_num_items=num_items - num_eval)
if num_eval:
with tf.python_io.TFRecordWriter(eval_filename) as tfrecord_writer:
with tf.Session('') as session:
converter.convert(session=session, writer=tfrecord_writer,
sequence_features_list=eval_tokens,
context_features_list=eval_labels,
total_num_items=num_eval)
def prepare(dataset_dir):
"""Runs download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
make_dataset_dir(dataset_dir)
if all([
tf.gfile.Exists(RECORD_FILE_NAME_FORMAT.format(dataset_dir, Modes.TRAIN)),
tf.gfile.Exists(RECORD_FILE_NAME_FORMAT.format(dataset_dir, Modes.EVAL)),
tf.gfile.Exists(RECORD_FILE_NAME_FORMAT.format(dataset_dir, Modes.PREDICT)),
]):
print('`{}` Dataset files already exist.')
return
download_datasets(dataset_dir, _DATA_URL, [_FILENAME])
with open(os.path.join(dataset_dir, _FILENAME), 'rb') as f:
train_set = pickle.load(f)
test_set = pickle.load(f)
converter = SequenceToTFExampleConverter(sequence_features_types={'source_token': 'int'},
context_features_types={'label': 'int'})
num_items = len(train_set[0])
len_eval_data = int(num_items * 0.1)
len_test_data = len(test_set[0])
prepare_dataset(converter, dataset_dir, train_set, Modes.TRAIN, num_items, len_eval_data)
prepare_dataset(converter, dataset_dir, test_set, Modes.PREDICT, len_test_data)
# Finally, write the meta data:
with open(META_DATA_FILENAME_FORMAT.format(dataset_dir), 'w') as meta_data_file:
meta_data = converter.get_meta_data()
meta_data['num_samples'] = {Modes.TRAIN: num_items - len_eval_data,
Modes.EVAL: len_eval_data,
Modes.PREDICT: len_test_data}
meta_data['items_to_descriptions'] = {
'source_token': 'A sequence of word ids.',
'label': 'A single integer 0 or 1',
}
meta_data['num_classes'] = 2
json.dump(meta_data, meta_data_file)
delete_datasets(dataset_dir, [_FILENAME])
print('\nFinished converting the IMDB dataset!')
def create_input_fn(dataset_dir):
return create_sequence_dataset_input_fn(
dataset_dir, prepare, RECORD_FILE_NAME_FORMAT, META_DATA_FILENAME_FORMAT,
bucket_boundaries=[140, 200, 300, 400, 500])
def create_predict_input_fn(dataset_dir):
return create_sequence_dataset_predict_input_fn(
dataset_dir, prepare, RECORD_FILE_NAME_FORMAT, META_DATA_FILENAME_FORMAT,
bucket_boundaries=[140, 200, 300, 400, 500])
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
d345272523d8337ac757fdf062c2baac998b6531 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/2946.py | 87758d9e393a1e5e783c705de41a6ed22abb34c8 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | numbers = []
t = int(raw_input())
for i in xrange(1, t + 1):
numbers = [int(s) for s in list(raw_input())]
numbers = [0] + numbers
while True :
change = False
for j in xrange(1, len(numbers)):
if (numbers[j] < numbers[j-1]):
numbers[j-1] = numbers[j-1] -1
change = True
for y in xrange(j,len(numbers)) :
numbers[y] = 9
break ;
if change == False:
break
while(numbers[0] == 0) : del numbers[0]
s ="".join(map(str, numbers))
print "Case #{}: {}".format( i, s)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
bdd20ca186269d2759e9ecf5ae8ac0a444d8f194 | cd9c2c6df0567bd7be18909f4150a26a45c8a4f7 | /utils/rpi3/setup-system.py | cce2a6faf5e0796d16d7210a85c82987f73b6111 | [
"Apache-2.0"
] | permissive | zeta1999/mjmech | d98fd506e6d7e799953a328ebe5db06e379591cb | 9e44f82849afc35180d4bda22282dba1cde42be0 | refs/heads/master | 2022-04-19T19:24:44.638122 | 2020-04-16T16:41:54 | 2020-04-16T16:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,530 | py | #!/usr/bin/python3 -B
# Copyright 2018 Josh Pieper. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sets up a raspberry pi 3b+ for use as an mjmech computer.
It is intended to be run as root, like:
sudo ./setup-system.py
"""
import os
import shlex
import shutil
import subprocess
import time
ORIG_SUFFIX = time.strftime(".orig-%Y%m%d-%H%M%S")
def run(*args, **kwargs):
print('run: ' + args[0])
subprocess.check_call(*args, shell=True, **kwargs)
def ensure_present(filename, line):
'''Ensure the given line is present in the named file.'''
current_content = [
x.strip() for x in open(filename, encoding='utf-8').readlines()]
if line.strip() in current_content:
# Yes, the line is already present there
return
shutil.copy(filename, filename + ORIG_SUFFIX)
print('ensure_present({}): Adding: {}'.format(filename, line))
# Nope, we need to add it.
with open(filename, 'a', encoding='utf-8') as f:
f.write(line + '\n')
def ensure_contents(filename, contents):
'''Ensure the given file has exactly the given contents'''
if os.path.exists(filename):
existing = open(filename, encoding='utf-8').read()
if existing == contents:
return
shutil.copy(filename, filename + ORIG_SUFFIX)
print('ensure_contents({}): Updating'.format(filename))
with open(filename, 'w', encoding='utf-8') as f:
f.write(contents)
def set_config_var(name, value):
'''Set the given variable in /boot/config.txt'''
contents = open('/boot/config.txt', encoding='utf-8').readlines()
new_value = '{}={}'.format(name, value)
maybe_value = [x for x in contents
if x.startswith('{}='.format(name))]
if len(maybe_value) == 1 and maybe_value[0].strip() == new_value:
return
new_contents = ([
x for x in contents
if not x.startswith('{}='.format(name))] +
[new_value + '\n'])
shutil.copy('/boot/config.txt', '/boot/config.txt' + ORIG_SUFFIX)
print('set_config_var({})={}'.format(name, value))
open('/boot/config.txt', 'w', encoding='utf-8').write(
''.join([x for x in new_contents]))
def main():
if os.getuid() != 0:
raise RuntimeError('must be run as root')
# Some useful utilities
run('apt install --yes socat setserial')
# Things necessary to be an AP
run('apt install --yes hostapd dnsmasq')
# P1 Camera - Yes
run('raspi-config nonint do_camera 0')
# P2 SSH - Yes
run('raspi-config nonint do_ssh 0')
# P6 Serial
# Login shell - No
# Serial enabled - Yes
#
# NOTE: The version of raspi-config we have now doesn't support
# enabling the UART from noninteractive mode.
run('raspi-config nonint do_serial 1')
# This we have to manually enable the UART once it is done.
set_config_var('enable_uart', '1')
# Switch to use the PL011 UART
# https://www.raspberrypi.org/documentation/configuration/uart.md
ensure_present('/boot/config.txt', 'dtoverlay=pi3-disable-bt')
ensure_contents('/etc/network/interfaces',
'''
# interfaces(5) file used by ifup(8) and ifdown(8)
# Please note that this file is written to be used with dhcpcd
# For static IP, consult /etc/dhcpcd.conf and 'man dhcpcd.conf'
# Include files from /etc/network/interfaces.d:
source-directory /etc/network/interfaces.d
auto eth0
iface eth0 inet static
address 192.168.17.47
netmask 255.255.255.0
network 192.168.17.0
broadcast 192.168.17.255
# gateway 192.168.17.1
dns-nameservers 8.8.8.8 8.8.4.4
post-up ip route add 239.89.108.0/24 dev eth0
allow-hotplug wlan0
iface wlan0 inet static
address 192.168.16.47
netmask 255.255.255.0
network 192.168.16.0
broadcast 192.168.16.255
post-up iw dev wlan0 set power_save off || true
post-up iw dev wlan0 set retry long 1 || true
post-up ip route add 239.89.108.0/24 dev wlan0
''')
ensure_contents('/etc/hostapd/hostapd.conf',
'''
country_code=US
interface=wlan0
driver=nl80211
ssid=MjMech
hw_mode=a
ieee80211n=1
require_ht=1
ieee80211ac=1
require_vht=1
ieee80211d=1
ieee80211h=0
ht_capab=[HT40+][SHORT-GI-20][DSSS_CK-40][MAX-AMSDU-3839]
vht_capab=[MAX-MDPU-3895][SHORT-GI-80][SU-BEAMFORMEE]
vht_oper_chwidth=1
channel=36
vht_oper_centr_freq_seg0_idx=42
wmm_enabled=0
macaddr_acl=0
auth_algs=1
ignore_broadcast_ssid=0
wpa=2
wpa_passphrase=WalkingRobots
wpa_key_mgmt=WPA-PSK
wpa_pairwise=TKIP
rsn_pairwise=CCMP
''')
ensure_present('/etc/default/hostapd',
'DAEMON_CONF="/etc/hostapd/hostapd.conf"')
ensure_contents('/etc/dnsmasq.conf',
'''
interface=wlan0
dhcp-range=192.168.16.100,192.168.16.150,255.255.255.0,24h
''')
ensure_present('/etc/dhcpcd.conf', 'denyinterfaces wlan0')
run('systemctl unmask hostapd')
run('systemctl enable hostapd')
run('systemctl start hostapd')
if __name__ == '__main__':
main()
| [
"jjp@pobox.com"
] | jjp@pobox.com |
66e70df8459191ede0a0d58d871bb1cceccfeaa2 | e29734c2b3543a05a28b6bc460c3248ea37aaf5c | /apps/course/migrations/0012_auto_20190417_1723.py | 3f626f2c421f9e72f9bb96824deeb14fd09983c7 | [] | no_license | simida0755/PopularBlogs | fda6dbe06751dde013ba57f73c708fd7106a49ee | 3a86989232206d0727223306c0e2d2c62d35fa9b | refs/heads/master | 2020-05-21T15:54:09.853341 | 2019-05-13T02:15:28 | 2019-05-13T02:15:28 | 186,101,555 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # Generated by Django 2.0.2 on 2019-04-17 17:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0011_auto_20190416_1459'),
]
operations = [
migrations.RenameField(
model_name='blogposts',
old_name='Like_nums',
new_name='like_nums',
),
]
| [
"simida027@163.com"
] | simida027@163.com |
7aefc106ad42e43fe1b6ff523e1dac4fc64c01d2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_fume.py | ce796bac3558af33ff267419b8d051aff846cfaa | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py |
#calss header
class _FUME():
def __init__(self,):
self.name = "FUME"
self.definitions = [u'to be very angry, sometimes without expressing it: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
df5c7b8d6d7517e53f42e5a9d2b47dc530a7a8e4 | aedf65a662083d82fd2ef021a883dd842961d445 | /webapp/linkstop/apps/accounts/admin.py | bb93b6b45bbaaaf78846834daee1847aa3489c47 | [] | no_license | hercules261188/Locidesktop | b95f9f4dd709d33f21b7b9f43d52e3b76c99912b | cab3a3bda807780244e4e5ce9c3745b6d04ddbc9 | refs/heads/master | 2021-12-02T15:12:47.876242 | 2011-01-10T09:21:27 | 2011-01-10T09:21:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.contrib import admin
from models import *
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user',)
admin.site.register(Profile, ProfileAdmin)
class InviteAdmin(admin.ModelAdmin):
list_display = ('code', 'uses_remaining')
admin.site.register(Invite, InviteAdmin)
| [
"willmcgugan@gmail.com"
] | willmcgugan@gmail.com |
9b6387e95c7865363144a250492269e8afc55560 | af4d559792c4255d5f26bc078cd176b70c0e643f | /hpsklearn/components/compose/_target.py | 47122eee5792a436fd2494fc78ae7673a5121e0f | [
"BSD-3-Clause"
] | permissive | hyperopt/hyperopt-sklearn | ec7d5f97ba8fd5a2c283dfec2fa9e0170b61c6ce | 4b3f6fde3a1ded2e71e8373d52c1b51a0239ef91 | refs/heads/master | 2023-08-02T07:19:20.259964 | 2022-12-15T17:53:07 | 2022-12-15T17:53:07 | 8,293,893 | 1,480 | 292 | NOASSERTION | 2022-12-15T17:53:08 | 2013-02-19T16:09:53 | Python | UTF-8 | Python | false | false | 1,423 | py | from hyperopt.pyll import scope
from sklearn import compose
@scope.define
def sklearn_TransformedTargetRegressor(*args, **kwargs):
return compose.TransformedTargetRegressor(*args, **kwargs)
def transformed_target_regressor(name: str,
regressor: object = None,
transformer: object = None,
func: callable = None,
inverse_func: callable = None,
check_inverse: bool = True):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.compose.TransformedTargetRegressor model.
Args:
name: name | str
regressor: regressor object | object
transformer: estimator object | object
func: function to apply to `y` before fit | callable
inverse_func: function to apply to prediction | callable
check_inverse: check whether inverse leads to original targets | bool
"""
def _name(msg):
return f"{name}.transformed_target_regressor_{msg}"
# TODO: Try implementing np.exp and np.log | np.sqrt and np.square combinations
hp_space = dict(
regressor=regressor,
transformer=transformer,
func=func,
inverse_func=inverse_func,
check_inverse=check_inverse
)
return scope.sklearn_TransformedTargetRegressor(**hp_space)
| [
"38689620+mandjevant@users.noreply.github.com"
] | 38689620+mandjevant@users.noreply.github.com |
28b398292b9bf6064d42889b0c2063af30c40fc2 | 58c3dd075b87ec882ccbce27d81928ea5dd46223 | /presentation/views/user/stories_views.py | ee43e98b33a2556704a7c743fbe22155926874aa | [] | no_license | panuta/storypresso | 706328652f583f7d3574eccca2bc885c49a9d0b9 | ef0fecacbd67b09c8ab64d842709344fa6e2773d | refs/heads/master | 2021-01-02T06:32:05.443474 | 2013-04-11T12:47:55 | 2013-04-11T12:47:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,890 | py | # -*- encoding: utf-8 -*-
import urllib2
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from common.shortcuts import response_json_success
from common.utilities import clean_content
from domain.models import Story, StoryEditingContent, EditingStory, StoryContent
from presentation.exceptions import PublishingException
from presentation.forms import WriteStoryForm, PublishStoryForm
@login_required
def view_my_stories(request, showing_stories):
if showing_stories == 'all':
stories = Story.objects.filter(created_by=request.user)
elif showing_stories == 'draft':
stories = Story.objects.filter(created_by=request.user, is_draft=True)
elif showing_stories == 'published':
stories = Story.objects.filter(created_by=request.user, is_draft=False)
else:
raise Http404
return render(request, 'user/stories.html', {'stories': stories, 'showing_stories': showing_stories})
@login_required
def write_my_story(request, story_uid):
if not story_uid:
story = None
story_content = None
else:
story = get_object_or_404(Story, uid=story_uid)
if story.created_by.id != request.user.id:
raise Http404
story_content, created = StoryContent.objects.get_or_create(story=story)
if request.method == 'POST':
form = WriteStoryForm(request.POST)
if form.is_valid():
uid = form.cleaned_data['uid']
if story and story.uid != uid:
raise Http404
if not story:
try:
story = Story.objects.get(uid=uid) # Maybe this story is already saved via autosave
story_content = story.content
except Story.DoesNotExist:
story = Story.objects.create(uid=uid, created_by=request.user)
story_content = StoryContent.objects.create(story=story)
story.title = form.cleaned_data['title'].strip()
story.save()
story_content.body = clean_content(form.cleaned_data['body'])
story_content.save()
try:
editing_story = EditingStory.objects.get(story=story).delete()
StoryEditingContent.objects.filter(editing_story=editing_story).delete()
except EditingStory.DoesNotExist:
pass
submit_method = request.POST.get('submit')
if submit_method == 'publish':
try:
story.is_ready_to_publish()
except PublishingException, e:
messages.error(request, e.message)
return redirect('write_my_story', story.uid)
return redirect('publishing_my_story_excerpt', story.uid)
else:
messages.success(request, u'บันทึกข้อมูลเรียบร้อย')
return redirect('write_my_story', story.uid)
if not story:
story = Story(uid=request.POST.get('uid'), created_by=request.user)
else:
if not story:
# Pre-generate uuid for Story using when autosave
story = Story(uid=Story.objects.generate_uuid(), created_by=request.user)
story_content = StoryContent(story=story)
form = WriteStoryForm(initial={
'uid': story.uid,
'title': story.title,
'body': story_content.body,
})
return render(request, 'user/story_write.html', {'story': story, 'form': form})
@login_required
def publishing_my_story_excerpt(request, story_uid):
story = get_object_or_404(Story, uid=story_uid)
if story.created_by.id != request.user.id:
raise Http404
if request.method == 'POST':
form = PublishStoryForm(request.POST)
if form.is_valid():
pass
else:
form = PublishStoryForm(initial={
'excerpt': story.excerpt,
'category': story.primary_category,
'title': story.title,
'summary': story.summary,
'price': story.price,
})
return render(request, 'user/story_publish/story_publish_excerpt.html', {'story': story, 'form': form})
@login_required
def publishing_my_story_details(request, story_uid):
story = get_object_or_404(Story, uid=story_uid)
if story.created_by.id != request.user.id:
raise Http404
return render(request, 'user/story_publish/story_publish_details.html', {'story': story})
@login_required
def publishing_my_story_confirm(request, story_uid):
story = get_object_or_404(Story, uid=story_uid)
if story.created_by.id != request.user.id:
raise Http404
return render(request, 'user/story_publish/story_publish_confirm.html', {'story': story})
@login_required
def edit_my_story_general(request, story_uid):
story = get_object_or_404(Story, uid=story_uid)
if story.created_by.id != request.user.id:
raise Http404
if request.method == 'POST':
pass
else:
pass
return render(request, 'user/story_edit_general.html', {'story': story})
@login_required
def edit_my_story_content(request, story_uid):
story = get_object_or_404(Story, uid=story_uid)
if story.created_by.id != request.user.id:
raise Http404
if request.method == 'POST':
form = WriteStoryForm(request.POST)
if form.is_valid():
story.title = form.cleaned_data['title']
story.save()
story.content.body = clean_content(form.cleaned_data['body'])
story.content.save()
submit_method = request.POST.get('submit')
if submit_method == 'publish':
if not story.is_draft:
messages.warning(request, u'ผลงานนี้ถูกเผยแพร่ไปก่อนหน้านี้แล้ว')
return redirect('publish_my_story', story.uid)
try:
story.is_ready_to_publish()
except PublishingException, e:
messages.error(request, e.message)
return redirect('edit_my_story_content', story.uid)
return redirect('publish_my_story', story.uid)
else: # DRAFT, SAVE
messages.success(request, u'บันทึกข้อมูลเรียบร้อย')
return redirect('edit_my_story_content', story.uid)
else:
form = WriteStoryForm(initial={
'uid': story.uid,
'title': story.title,
'body': story.content.body,
})
return render(request, 'user/story_edit_content.html', {'story': story, 'form': form})
@csrf_exempt
@require_POST
@login_required
def ajax_autosave_editing_story(request, story_uid):
try:
story = Story.objects.get(uid=story_uid)
if story.created_by.id != request.user.id:
raise Http404
except Story.DoesNotExist:
story = Story.objects.create(uid=story_uid, is_draft=True, created_by=request.user)
editing_story, created = EditingStory.objects.get_or_create(story=story)
story_editing_content, created = StoryEditingContent.objects.get_or_create(editing_story=editing_story)
content = request.POST.get('id_body')
if content:
content = urllib2.unquote(content).decode("utf8")
story_editing_content.body = content
story_editing_content.save()
return response_json_success()
def ajax_upload_image_editing_story(request):
pass
def ajax_recent_image_editing_story(request):
pass | [
"panuta@gmail.com"
] | panuta@gmail.com |
344de7f08b6061681f6b255140a70d46b64199c1 | 2416a6bde05651717f99dd45c5116cd13a7a38f7 | /docs/source/clear_docs.py | 3514db0143d1ddae01c133c04f99d3a02fbd37f2 | [
"BSD-3-Clause"
] | permissive | modulexcite/nbgrader | b1080a61d0ba5252c16c3d7471e13a40ce659624 | 1748c40f47b69c81e19068d4ba3c3205af72317d | refs/heads/master | 2021-01-20T21:40:48.115091 | 2015-08-13T01:57:08 | 2015-08-13T01:57:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,105 | py | import os
import sys
import subprocess as sp
from copy import deepcopy
try:
from IPython.nbformat import read, write
from IPython.nbconvert.preprocessors import ClearOutputPreprocessor
except ImportError:
print("Warning: IPython could not be imported, some tasks may not work")
def run(cmd):
print(" ".join(cmd))
proc = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.STDOUT)
stdout, _ = proc.communicate()
if proc.poll() != 0:
print(stdout.decode())
print("Command exited with code: {}".format(proc.poll()))
sys.exit(1)
def _check_if_directory_in_path(pth, target):
while pth not in ('', '/'):
pth, dirname = os.path.split(pth)
if dirname == target:
return True
return False
def clear_notebooks(root):
"""Clear the outputs of documentation notebooks."""
# cleanup ignored files
run(['git', 'clean', '-fdX', root])
print("Clearing outputs of notebooks in '{}'...".format(os.path.abspath(root)))
preprocessor = ClearOutputPreprocessor()
for dirpath, dirnames, filenames in os.walk(root):
is_submitted = _check_if_directory_in_path(dirpath, 'submitted')
for filename in sorted(filenames):
if os.path.splitext(filename)[1] == '.ipynb':
# read in the notebook
pth = os.path.join(dirpath, filename)
with open(pth, 'r') as fh:
orig_nb = read(fh, 4)
# copy the original notebook
new_nb = deepcopy(orig_nb)
# check outputs of all the cells
if not is_submitted:
new_nb = preprocessor.preprocess(new_nb, {})[0]
# clear metadata
new_nb.metadata = {}
# write the notebook back to disk
with open(pth, 'w') as fh:
write(new_nb, fh, 4)
if orig_nb != new_nb:
print("Cleared '{}'".format(pth))
if __name__ == "__main__":
root = os.path.abspath(os.path.dirname(__file__))
clear_notebooks(root)
| [
"jhamrick@berkeley.edu"
] | jhamrick@berkeley.edu |
1ec1ad8fe95f163e80301f541560976b55f7b599 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_viewed.py | f9f11fcee4bddc5beedae71b386206b85ee7bebc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _VIEWED():
def __init__(self,):
self.name = "VIEWED"
self.definitions = view
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['view']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
f6fdf258262e61f9b025e4447b78e2845eb335c1 | 0d86bb399a13152cd05e3ba5684e4cb22daeb247 | /python-basics/unit12-modules/py119_from_import.py | d62586d3be78b54a6b3a96b8e4acc2e3c31b4925 | [] | no_license | tazbingor/learning-python2.7 | abf73f59165e09fb19b5dc270b77324ea00b047e | f08c3bce60799df4f573169fcdb1a908dcb8810f | refs/heads/master | 2021-09-06T05:03:59.206563 | 2018-02-02T15:22:45 | 2018-02-02T15:22:45 | 108,609,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18/1/4 下午10:06
# @Author : Aries
# @Site :
# @File : py119_from_import.py
# @Software: PyCharm
'''
from .. import 语句
多行导入
'''
from Tkinter import Tk, Frame, Button, Entry, \
Canvas, Text, LEFT
'''
PEP 8风格
加入括号更易读
'''
from Tkinter import (Tk, Frame, Button, Entry, Canvas, Text, LEFT)
| [
"852353298@qq.com"
] | 852353298@qq.com |
f591f2f5000f55dde12dac4747b15d2cd0031f76 | 0809ea2739d901b095d896e01baa9672f3138825 | /ORMproject1_ver_2/ORMproject1_ver_2/settings.py | db3c8140ebb48988e3492dcf972169f97a75ed03 | [] | no_license | Gagangithub1988/djangoprojects | dd001f2184e78be2fb269dbfdc8e3be1dd71ce43 | ea236f0e4172fbf0f71a99aed05ed7c7b38018e2 | refs/heads/master | 2022-11-15T23:46:46.134247 | 2020-07-15T06:37:51 | 2020-07-15T06:37:51 | 273,479,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,275 | py | """
Django settings for ORMproject1_ver_2 project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+m!bbg$#saf!qi#4ct@*n8(x0zc7eh^j_j^2)^gyhni9xyv&0!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testApp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ORMproject1_ver_2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ORMproject1_ver_2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'orm2',
'USER':'postgres',
'PASSWORD':'sql2020',
'HOST':'localhost',
'PORT':'5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"djangopython1988@gmail.com"
] | djangopython1988@gmail.com |
35659b938f37e4feeafc513cf7fc00b3e2457ce9 | 10d98fecb882d4c84595364f715f4e8b8309a66f | /tunas/schema_io_test.py | ab7f6ca4f294916dca7c1d63ac50e89c1bc14eeb | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 7,843 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for schema_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from absl.testing import absltest
from absl.testing import parameterized
import six
from tunas import schema
from tunas import schema_io
# First style of decorator use: decorating a namedtuple subclass.
@schema_io.register_namedtuple(
'schema_io_test.NamedTuple1',
deprecated_names=['schema_io_test.DeprecatedNamedTuple1'])
class NamedTuple1(collections.namedtuple('NamedTuple1', ['foo'])):
pass
# Second style of decorator use: registering a raw namedtuple.
NamedTuple2 = collections.namedtuple('NamedTuple2', ['bar'])
schema_io.register_namedtuple('schema_io_test.NamedTuple2')(NamedTuple2)
# Decorator with default argument values.
@schema_io.register_namedtuple(
'schema_io_test.NamedTuple3',
deprecated_names=['schema_io_test.DeprecatedNamedTuple3'],
defaults={'foo': 3, 'bar': 'hi'})
class NamedTuple3(collections.namedtuple('NamedTuple3', ['foo', 'bar'])):
pass
class SchemaIoTest(parameterized.TestCase):
def test_register_namedtuple_exceptions(self):
with self.assertRaisesRegex(ValueError, 'Duplicate name'):
cls = collections.namedtuple('namedtuple3', ['baz'])
schema_io.register_namedtuple('schema_io_test.NamedTuple1')(cls)
with self.assertRaisesRegex(ValueError, 'Duplicate class'):
schema_io.register_namedtuple('NewNameForTheSameClass')(NamedTuple1)
with self.assertRaisesRegex(ValueError, 'not a namedtuple'):
schema_io.register_namedtuple('NotANamedTuple')(dict)
def test_namedtuple_class_to_name(self):
self.assertEqual(
schema_io.namedtuple_class_to_name(NamedTuple1),
'schema_io_test.NamedTuple1')
self.assertEqual(
schema_io.namedtuple_class_to_name(NamedTuple2),
'schema_io_test.NamedTuple2')
def test_namedtuple_class_to_name_not_registered(self):
cls = collections.namedtuple('cls', ['x'])
with self.assertRaisesRegex(
KeyError, 'Namedtuple class .* is not registered'):
schema_io.namedtuple_class_to_name(cls)
def test_namedtuple_name_to_class_not_registered(self):
with self.assertRaisesRegex(
KeyError, 'Namedtuple name \'blahblah\' is not registered'):
schema_io.namedtuple_name_to_class('blahblah')
def test_namedtuple_name_to_class(self):
self.assertEqual(
schema_io.namedtuple_name_to_class('schema_io_test.NamedTuple1'),
NamedTuple1)
self.assertEqual(
schema_io.namedtuple_name_to_class('schema_io_test.NamedTuple2'),
NamedTuple2)
def test_namedtuple_deprecated_name_to_class(self):
self.assertEqual(
schema_io.namedtuple_name_to_class(
'schema_io_test.DeprecatedNamedTuple1'),
NamedTuple1)
def _run_serialization_test(self,
structure,
expected_type=None):
"""Convert the structure to serialized JSON, then back to a string."""
expected_value = copy.deepcopy(structure)
serialized = schema_io.serialize(structure)
self.assertIsInstance(serialized, six.string_types)
restored = schema_io.deserialize(serialized)
self.assertEqual(restored, expected_value)
if expected_type is not None:
self.assertIsInstance(restored, expected_type)
def test_serialization_with_simple_structures(self):
# Primitives.
self._run_serialization_test(None)
self._run_serialization_test(1)
self._run_serialization_test(0.5)
self._run_serialization_test(1.0)
self._run_serialization_test('foo')
# Lists and tuples.
self._run_serialization_test([1, 2, 3])
self._run_serialization_test((1, 2, 3))
# Dictionaries.
self._run_serialization_test({'a': 3, 'b': 4})
self._run_serialization_test({10: 'x', 20: 'y'})
self._run_serialization_test({(1, 2): 'x', (3, 4): 'y'})
# Namedtuples
self._run_serialization_test(NamedTuple1(42), expected_type=NamedTuple1)
self._run_serialization_test(NamedTuple2(12345), expected_type=NamedTuple2)
# OneOf nodes.
self._run_serialization_test(schema.OneOf((1, 2, 3), 'tag'))
def test_namedtuple_deserialization_with_deprecated_names(self):
restored = schema_io.deserialize(
'["namedtuple:schema_io_test.DeprecatedNamedTuple1",["foo",51]]')
self.assertEqual(restored, NamedTuple1(51))
self.assertIsInstance(restored, NamedTuple1)
def test_serialization_with_nested_structures(self):
"""Verify that to_json and from_json are recursively called on children."""
# Lists and tuples
self._run_serialization_test((((1,),),))
self._run_serialization_test([[[1]]])
# Dictionaries.
self._run_serialization_test({'a': {'b': {'c': {'d': 'e'}}}})
# Namedtuples
self._run_serialization_test(NamedTuple1(NamedTuple2(NamedTuple1(42))))
# OneOf nodes
self._run_serialization_test(
schema.OneOf((
schema.OneOf((
schema.OneOf((1, 2, 3), 'innermost'),
), 'inner'),
), 'outer'))
# Composite data structure containing many different types.
self._run_serialization_test(
{'a': NamedTuple1([(schema.OneOf([{'b': 3}], 't'),)])})
def test_serialization_with_bad_type(self):
with self.assertRaisesRegex(ValueError, 'Unrecognized type'):
schema_io.serialize(object())
def test_deserialization_defaults(self):
# NamedTuple1 accepts one argument: foo. It has not default value.
# NamedTuple3 accepts two arguments: foo and bar. Both have default values.
# Use default arguments for both foo and bar.
value = schema_io.deserialize(
"""["namedtuple:schema_io_test.NamedTuple3"]""")
self.assertEqual(value, NamedTuple3(foo=3, bar='hi'))
# Use default argument for bar only.
value = schema_io.deserialize(
"""["namedtuple:schema_io_test.NamedTuple3", ["foo", 42]]""")
self.assertEqual(value, NamedTuple3(foo=42, bar='hi'))
# Use default argument for foo only.
value = schema_io.deserialize(
"""["namedtuple:schema_io_test.NamedTuple3", ["bar", "bye"]]""")
self.assertEqual(value, NamedTuple3(foo=3, bar='bye'))
# Don't use any default arguments.
value = schema_io.deserialize(
"""["namedtuple:schema_io_test.NamedTuple3",
["foo", 9], ["bar", "x"]]""")
self.assertEqual(value, NamedTuple3(foo=9, bar='x'))
# Default values should also work when we refer to a namedtuple by a
# deprecated name.
value = schema_io.deserialize(
"""["namedtuple:schema_io_test.DeprecatedNamedTuple3"]""")
self.assertEqual(value, NamedTuple3(foo=3, bar='hi'))
# Serialized value references a field that doesn't exist in the namedtuple.
with self.assertRaisesRegex(ValueError, 'Invalid field: baz'):
schema_io.deserialize(
"""["namedtuple:schema_io_test.NamedTuple3", ["baz", 10]]""")
# Serialized value is missing a field that should exist in the namedtuple.
with self.assertRaisesRegex(ValueError, 'Missing field: foo'):
schema_io.deserialize("""["namedtuple:schema_io_test.NamedTuple1"]""")
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
3cbe84dc02307edb834d241e25626fe9c1b861d0 | e9a5033ac69ef690602eb1206217dac6d09c1e63 | /netharn/util/util_io.py | 74d56ad483aade482c02c454104a746a9cfed118 | [
"Apache-2.0"
] | permissive | Erotemic/netharn | ba030df2c9d79fe0b392f8823bc2819383d8756f | bc4a6d75445c949e709e5ab903ba72813ec68b79 | refs/heads/master | 2021-05-26T05:05:24.931026 | 2020-08-27T00:13:34 | 2020-08-27T00:13:34 | 127,506,937 | 43 | 8 | Apache-2.0 | 2020-02-19T17:51:56 | 2018-03-31T06:49:03 | Python | UTF-8 | Python | false | false | 1,392 | py | # -*- coding: utf-8 -*-
"""
DEPRECATED
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
def read_h5arr(fpath):
import h5py
with h5py.File(fpath, 'r') as hf:
return hf['arr_0'][...]
def write_h5arr(fpath, arr):
import h5py
with h5py.File(fpath, 'w') as hf:
hf.create_dataset('arr_0', data=arr)
def read_arr(fpath):
"""
Example:
>>> import ubelt as ub
>>> import netharn as nh
>>> from os.path import join
>>> dpath = ub.ensure_app_cache_dir('netharn', 'tests')
>>> arr = np.random.rand(10)
>>> fpath = join(dpath, 'arr.npy')
>>> nh.util.write_arr(fpath, arr)
>>> arr2 = nh.util.read_arr(fpath)
>>> assert np.all(arr == arr2)
>>> # xdoctest: +REQUIRES(module:h5py)
>>> fpath = join(dpath, 'arr.h5')
>>> nh.util.write_arr(fpath, arr)
>>> arr2 = nh.util.read_arr(fpath)
>>> assert np.all(arr == arr2)
"""
if fpath.endswith('.npy'):
return np.load(fpath)
elif fpath.endswith('.h5'):
return read_h5arr(fpath)
else:
raise KeyError(fpath)
def write_arr(fpath, arr):
if fpath.endswith('.npy'):
return np.save(fpath, arr)
elif fpath.endswith('.h5'):
return write_h5arr(fpath, arr)
else:
raise KeyError(fpath)
| [
"jon.crall@kitware.com"
] | jon.crall@kitware.com |
9e21f133bfc9f87dea5673c020cb62a394593f31 | ef243d91a1826b490e935fa3f3e6c29c3cc547d0 | /PyQt5/QtNetwork/QSslKey.py | c5246b49969c93f51c37ee8902d23b3a1e0668d5 | [] | no_license | VentiFang/Python_local_module | 6b3d0b22399e817057dfd15d647a14bb1e41980e | c44f55379eca2818b29732c2815480ee755ae3fb | refs/heads/master | 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | # encoding: utf-8
# module PyQt5.QtNetwork
# from F:\Python\Python36\lib\site-packages\PyQt5\QtNetwork.pyd
# by generator 1.147
# no doc
# imports
import enum as __enum
import PyQt5.QtCore as __PyQt5_QtCore
import sip as __sip
class QSslKey(__sip.simplewrapper):
"""
QSslKey()
QSslKey(Union[QByteArray, bytes, bytearray], QSsl.KeyAlgorithm, encoding: QSsl.EncodingFormat = QSsl.Pem, type: QSsl.KeyType = QSsl.PrivateKey, passPhrase: Union[QByteArray, bytes, bytearray] = QByteArray())
QSslKey(QIODevice, QSsl.KeyAlgorithm, encoding: QSsl.EncodingFormat = QSsl.Pem, type: QSsl.KeyType = QSsl.PrivateKey, passPhrase: Union[QByteArray, bytes, bytearray] = QByteArray())
QSslKey(sip.voidptr, type: QSsl.KeyType = QSsl.PrivateKey)
QSslKey(QSslKey)
"""
def algorithm(self): # real signature unknown; restored from __doc__
""" algorithm(self) -> QSsl.KeyAlgorithm """
pass
def clear(self): # real signature unknown; restored from __doc__
""" clear(self) """
pass
def handle(self): # real signature unknown; restored from __doc__
""" handle(self) -> sip.voidptr """
pass
def isNull(self): # real signature unknown; restored from __doc__
""" isNull(self) -> bool """
return False
def length(self): # real signature unknown; restored from __doc__
""" length(self) -> int """
return 0
def swap(self, QSslKey): # real signature unknown; restored from __doc__
""" swap(self, QSslKey) """
pass
def toDer(self, passPhrase, QByteArray=None, bytes=None, bytearray=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
""" toDer(self, passPhrase: Union[QByteArray, bytes, bytearray] = QByteArray()) -> QByteArray """
pass
def toPem(self, passPhrase, QByteArray=None, bytes=None, bytearray=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
""" toPem(self, passPhrase: Union[QByteArray, bytes, bytearray] = QByteArray()) -> QByteArray """
pass
def type(self): # real signature unknown; restored from __doc__
""" type(self) -> QSsl.KeyType """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__hash__ = None
| [
"5149528+ventifang@user.noreply.gitee.com"
] | 5149528+ventifang@user.noreply.gitee.com |
e5b0008119735c1b9e21aa25ea5e1a98dd42cef7 | 2a4a17a67b9069c19396c0f8eabc8b7c4b6ff703 | /BGP3D/Chapter05/AdditionalCode/MarkerClass.py | 168ea6c1929f070d29038c045e9feff5ed2cc174 | [] | no_license | kaz101/panda-book | 0fa273cc2df5849507ecc949b4dde626241ffa5e | 859a759c769d9c2db0d11140b0d04506611c2b7b | refs/heads/master | 2022-12-19T09:36:05.794731 | 2020-09-16T19:04:10 | 2020-09-16T19:04:10 | 295,784,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,292 | py | '''
This class is used to define the markers placed along the track. These markers are
referenced by multiple parts of the game in order to understand the shape and position
of parts of the track.
'''
from pandac.PandaModules import Vec3
class Marker:
def __init__(self, pos):
self.lane = 0
# Creates a variable to store the number ID of the lane this marker is in.
self.index = 0
# Creates a variable to store the number ID of the marker within it's lane.
self.np = render.attachNewNode("MarkerNP")
self.np.setPos(pos.getX(), pos.getY(), pos.getZ())
# Creates and positions a proxy NodePath to represent the marker's position in
# space.
self.nextMarker = None
self.prevMarker = None
# Creates variables to store the next and previous markers in the lane.
self.adjMarkers = []
# Creates a list to reference the markers that are adjacent to this one.
self.facingVec = Vec3(0,1,0)
self.cycleVec = Vec3(0,0,0)
# Some functions of the marker will need vectors. This creates them ahead of time.
def getPos(self, ref = None):
if(ref == None): return(self.np.getPos())
else: return(self.np.getPos(ref))
return
# Returns the position of the marker's NodePath.
def getHpr(self, ref = None):
if(ref == None): return(self.np.getHpr())
else: return(self.np.getHpr(ref))
return
# Returns the heading, pitch, and roll of the marker's NodePath.
def setFacing(self):
nmp = self.nextMarker.getPos()
self.np.lookAt(nmp.getX(), nmp.getY(), self.np.getPos().getZ())
return
# Forces the marker to face directly toward the next marker in the lane.
def checkInFront(self, cycle):
cyclePos = cycle.root.getPos(self.np)
self.cycleVec.set(cyclePos.getX(), cyclePos.getY(), self.np.getZ())
self.cycleVec.normalize()
# Gets the directional vector to the cycle and normalizes it.
cycleAngle = self.facingVec.angleDeg(self.cycleVec)
# Gets the angle between the marker's facing and the direction to the cycle.
if(cycleAngle > 90): return(False)
else: return(True)
# Returns True if the cycle is in front of the marker or False if it is behind it.
def killMe(self):
self.np.removeNode()
return
# Removes the marker's NodePath from the scene.
| [
"kaz101130@gmail.com"
] | kaz101130@gmail.com |
3c5bd952d9054dcd4e8a2ac805219cc5d33ea51d | 009f8f1f77ef4bdfd67999058d33bccd8e743654 | /leetcode/1470_shuffle_the_array.py | 378d8363f5c46ac30fe2cf886b70ecbc5fe218d2 | [
"MIT"
] | permissive | coocos/leetcode | 4bd5faa46e916af666db025c8182f1c30cd49ee3 | 007bbeb46fa4b32e1c92fc894edeb2100eb6ba21 | refs/heads/master | 2021-06-13T13:11:10.699941 | 2021-05-01T05:55:12 | 2021-05-01T07:17:52 | 192,551,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | import unittest
from typing import List
class Solution:
"""
This solution simply slices the list into two, zips the slices together
and then keeps appending numbers from the zipped iterable to a new list, finally
returning the new list.
"""
def shuffle(self, nums: List[int], n: int) -> List[int]:
shuffled = []
for x, y in zip(nums[:n], nums[n:]):
shuffled.append(x)
shuffled.append(y)
return shuffled
class TestSolution(unittest.TestCase):
def test_first_example(self):
self.assertListEqual(
Solution().shuffle([2, 5, 1, 3, 4, 7], 3), [2, 3, 5, 4, 1, 7]
)
| [
"1397804+coocos@users.noreply.github.com"
] | 1397804+coocos@users.noreply.github.com |
ade92b08072be4fbfd43b37ed4a6fcf0b292740d | 68f836bf5d9f849722c322e9207d842c766cac6f | /backend/project_notes.py | 90146bb70cf48ade623b4dd2778474b8661a07d0 | [
"MIT"
] | permissive | valmsmith39a/u-p2-trivia-api | 061f9fd216bb93f58a83b20413dcaacbf0f36239 | 0c4a66a97af13b76a55bac6e210566f1258ea9ba | refs/heads/main | 2023-07-16T02:40:42.623996 | 2021-08-28T22:41:46 | 2021-08-28T22:41:46 | 380,091,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | """
@TODO: DONE: Set up CORS. Allow '*' for origins. Delete the sample route after completing the TODOs
"""
"""
@TODO: DONE: Use the after_request decorator to set Access-Control-Allow
"""
"""
@TODO: DONE
Create an endpoint to handle GET requests
for all available categories.
"""
"""
@TODO: DONE
Create an endpoint to handle GET requests for questions,
including pagination (every 10 questions).
This endpoint should return a list of questions,
number of total questions, current category, categories.
TEST: At this point, when you start the application
you should see questions and categories generated,
ten questions per page and pagination at the bottom of the screen for three pages.
Clicking on the page numbers should update the questions.
"""
"""
@TODO: DONE
Create an endpoint to DELETE question using a question ID.
TEST: When you click the trash icon next to a question, the question will be removed.
This removal will persist in the database and when you refresh the page.
"""
"""
@TODO: DONE
Create an endpoint to POST a new question,
which will require the question and answer text,
category, and difficulty score.
TEST: When you submit a question on the "Add" tab,
the form will clear and the question will appear at the end of the last page
of the questions list in the "List" tab.
"""
"""
@TODO: DONE
Create a POST endpoint to get questions based on a search term.
It should return any questions for whom the search term
is a substring of the question.
TEST: Search by any phrase. The questions list will update to include
only question that include that string within their question.
Try using the word "title" to start.
"""
"""
@TODO: DONE
Create a GET endpoint to get questions based on category.
TEST: In the "List" tab / main screen, clicking on one of the
categories in the left column will cause only questions of that
category to be shown.
"""
"""
@TODO: DONE
Create a POST endpoint to get questions to play the quiz.
This endpoint should take category and previous question parameters
and return a random questions within the given category,
if provided, and that is not one of the previous questions.
TEST: In the "Play" tab, after a user selects "All" or a category,
one question at a time is displayed, the user is allowed to answer
and shown whether they were correct or not.
"""
"""
@TODO:
Create error handlers for all expected errors
including 404 and 422.
"""
| [
"valmsmith39a@gmail.com"
] | valmsmith39a@gmail.com |
555460ca29e841d4a013d93d658324dae5e47e2a | 4d2238210813c1581bf44f64d8a63196f75d2df4 | /getspecialpath.py | 3c73e511700aad0decdddd01f448faddafea15e9 | [] | no_license | wwtang/code02 | b1600d34907404c81fa523cfdaa74db0021b8bb3 | 9f03dda7b339d8c310c8a735fc4f6d795b153801 | refs/heads/master | 2020-12-24T14:10:33.738734 | 2012-12-14T04:24:47 | 2012-12-14T04:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | # LAB(begin solution)
def get_special_paths(dirname):
"""Given a dirname, returns a list of all its special files."""
result = []
paths = os.listdir(dirname) # list of paths in that dir
for fname in paths:
match = re.search(r'__(\w+)__', fname)
if match:
result.append(os.path.abspath(os.path.join(dirname, fname)))
return result
| [
"andytang1994@gmail.com"
] | andytang1994@gmail.com |
9a2eecac8235d57fb18fba8bf51a48013a539f4d | f68732bc40a7a90c3a1082e4b3a4154518acafbb | /script/dbus/sessionBus/daemonLauncher/010_requestRemoveFromDesktop_01.py | 970c505fbf5f8c7eb2441038c3f4030b30cfba08 | [] | no_license | lizhouquan1017/dbus_demo | 94238a2307e44dabde9f4a4dd0cf8ec217260867 | af8442845e722b258a095e9a1afec9dddfb175bf | refs/heads/master | 2023-02-11T19:46:27.884936 | 2021-01-08T05:27:18 | 2021-01-08T05:27:18 | 327,162,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | # -*- coding: utf-8 -*-
# ***************************************************
# @Test Case ID: 010_requestRemoveFromDesktop_01
# @Test Description: 请求将指定id的程序桌面图标删除
# @Test Condition: 桌面图标不存在桌面
# @Test Step: 1.清除应用程序桌面图标
# 2.调用RequestRemoveFromDesktop接口,请求将指定id的程序桌面图标删除
# @Test Result: 2.报错
# @Test Remark:
# @Author: ut001627
# ***************************************************
import time
import pytest
from frame.base import OSBase
from aw.dbus.sessionBus.daemonLauncher import requestRemoveFromDesktop
class TestCase(OSBase):
def setUp(self):
self.app_id = 'dde-file-manager'
self.Step(f'步骤1:删除{self.app_id}桌面图标')
requestRemoveFromDesktop(app_id=self.app_id, ignore=True)
time.sleep(2)
@pytest.mark.public
def test_step(self):
self.Step(f'步骤2:调用调用RequestRemoveFromDesktop接口删除{self.app_id}桌面图标')
assert requestRemoveFromDesktop(app_id=self.app_id, is_exists=False)
time.sleep(2)
def tearDown(self):
pass
| [
"lizhouquan@uniontech.com"
] | lizhouquan@uniontech.com |
e176c56fefcc0010af610eee90e483989a3c41ca | da1721d2783ea4d67ff4e73cee6eee71292f2ef7 | /toontown/cogdominium/CogdoLevelMgr.py | 92a534dd21bec6683b43174708b853632a575be3 | [
"BSD-3-Clause"
] | permissive | open-toontown/open-toontown | bbdeb1b7bf0fb2861eba2df5483738c0112090ca | 464c2d45f60551c31397bd03561582804e760b4a | refs/heads/develop | 2023-07-07T01:34:31.959657 | 2023-05-30T23:49:10 | 2023-05-30T23:49:10 | 219,221,570 | 143 | 104 | BSD-3-Clause | 2023-09-11T09:52:34 | 2019-11-02T22:24:38 | Python | UTF-8 | Python | false | false | 174 | py | from otp.level import LevelMgr
from direct.showbase.PythonUtil import Functor
from toontown.toonbase import ToontownGlobals
class CogdoLevelMgr(LevelMgr.LevelMgr):
pass
| [
"jwcotejr@gmail.com"
] | jwcotejr@gmail.com |
eb86b016fc192ce35a1af4257388a6a1699682b9 | d58f26ef4bfacc50d32306f27ea9628214aa53aa | /panoptes_aggregation/tests/extractor_tests/test_survey_extractor.py | 0f35fc2c2c8c2e1ac6fe86d1d4946c98f49bcb26 | [] | no_license | miclaraia/python-reducers-for-caesar | 893073ce1551d953d82a59bd87dea3deffe5e6ae | f1e28992ae73e131fb400d4b400fdf8d4d597828 | refs/heads/master | 2021-01-01T18:51:04.763162 | 2017-07-25T14:04:06 | 2017-07-25T14:04:06 | 98,448,810 | 0 | 0 | null | 2017-07-26T18:01:03 | 2017-07-26T17:30:17 | Python | UTF-8 | Python | false | false | 1,955 | py | import unittest
import json
import flask
from panoptes_aggregation import extractors
class TestSurveyExtractor(unittest.TestCase):
def setUp(self):
self.classification = {
'annotations': [{
'task': 'T0',
'value': [
{
'choice': 'AGOUTI',
'answers': {'HOWMANY': '1'},
'filters': {}
}, {
'choice': 'PECCARYCOLLARED',
'answers': {'HOWMANY': '3'},
'filters': {}
}, {
'choice': 'NOTHINGHERE',
'answers': {},
'filters': {}
}
]
}]
}
self.expected = [
{
'choice': 'agouti',
'answers.howmany': {'1': 1}
},
{
'choice': 'peccarycollared',
'answers.howmany': {'3': 1}
},
{
'choice': 'nothinghere',
}
]
def test_extract(self):
result = extractors.survey_extractor.classification_to_extract(self.classification)
for i in range(len(result)):
with self.subTest(i=i):
self.assertDictEqual(result[i], self.expected[i])
def test_request(self):
request_kwargs = {
'data': json.dumps(self.classification),
'content_type': 'application/json'
}
app = flask.Flask(__name__)
with app.test_request_context(**request_kwargs):
result = extractors.survey_extractor.survey_extractor_request(flask.request)
for i in range(len(result)):
with self.subTest(i=i):
self.assertDictEqual(result[i], self.expected[i])
if __name__ == '__main__':
unittest.main()
| [
"coleman.krawczyk@gmail.com"
] | coleman.krawczyk@gmail.com |
50394fff8cb70405e3d6a9d2739f994d49fa25ba | 37edb81c3fb3dfc09d14bb228e5f8df610d21c50 | /source/_static/lecture_specific/mccall/mccall_resw_c.py | 1cf5709382e34082f3a31af6fd0fc53a6fe3ee7f | [
"BSD-3-Clause"
] | permissive | bktaha/lecture-python | 68cb003640323d8600b1396898f2fb383b417769 | b13db12120b977d1c661c9a68c5b8ddfc1b6dc89 | refs/heads/master | 2022-11-02T23:05:32.647316 | 2020-06-09T09:15:18 | 2020-06-09T09:15:18 | 272,172,927 | 0 | 0 | BSD-3-Clause | 2020-06-14T09:44:53 | 2020-06-14T09:44:52 | null | UTF-8 | Python | false | false | 535 | py | grid_size = 25
c_vals = np.linspace(2, 12, grid_size) # values of unemployment compensation
w_bar_vals = np.empty_like(c_vals)
mcm = McCallModel()
fig, ax = plt.subplots(figsize=(10, 6))
for i, c in enumerate(c_vals):
mcm.c = c
w_bar = compute_reservation_wage(mcm)
w_bar_vals[i] = w_bar
ax.set_xlabel('unemployment compensation')
ax.set_ylabel('reservation wage')
txt = r'$\bar w$ as a function of $c$'
ax.plot(c_vals, w_bar_vals, 'b-', lw=2, alpha=0.7, label=txt)
ax.legend(loc='upper left')
ax.grid()
plt.show()
| [
"mamckay@gmail.com"
] | mamckay@gmail.com |
30eee56cb38f4efbea3d3964f8dd219c0e33b62f | 4c38cf22642d01720b41b0700b17c481da47ac1c | /实战项目/家用电器用户行为分析与事件识别/阈值寻优模型.py | 99f676399cef2aa5ccd7d503f9f034cfcf8c4d3a | [] | no_license | fwmmmrm/DataMining-2 | ff2e2d75e32e54e629accc6209c23604637c6afb | c20431d90264d84fcee9ddc1dd2d71db16b36632 | refs/heads/master | 2023-02-03T12:53:01.006916 | 2020-12-19T06:26:51 | 2020-12-19T06:26:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | # -*- coding: utf-8 -*-
"""
在1-9分钟进行阈值寻优
"""
import numpy as np
import pandas as pd
def event_num(ts):
'''
得到事件数目
:param ts:
:return:
'''
d = data[u'发生时间'].diff() > ts
return d.sum() + 1
if __name__ == '__main__':
inputfile = 'data/water_heater.xls'
# 使用以后四个点的平均斜率
n = 4
threshold = pd.Timedelta(minutes=5)
data = pd.read_excel(inputfile)
data[u'发生时间'] = pd.to_datetime(data[u'发生时间'], format='%Y%m%d%H%M%S')
data = data[data[u'水流量'] > 0]
dt = [pd.Timedelta(minutes=i) for i in np.arange(1, 9, 0.25)]
# 定义阈值列
h = pd.DataFrame(dt, columns=[u'阈值'])
# 计算每个阈值对应的事件数
h[u'事件数'] = h[u'阈值'].apply(event_num)
# 计算每两个相邻点对应的斜率
h[u'斜率'] = h[u'事件数'].diff()/0.25
# 采用后n个的斜率绝对值平均作为斜率指标
h[u'斜率指标'] = pd.DataFrame(h[u'斜率'].abs()[len(h)-n:]).rolling(2).mean()
ts = h[u'阈值'][h[u'斜率指标'].idxmin() - n]
if ts > threshold:
ts = pd.Timedelta(minutes=4)
print(ts)
| [
"1695735420@qq.com"
] | 1695735420@qq.com |
3632c60cac2a105337f609999d00fbcb845ae306 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2015_03_25_alpha_Ti_composite_7th/main008.py | 15496dbf4af81557e35e3ca2f7ead381096d4b64 | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 17 16:46:35 2014
@author: nhpnp3
"""
import time
import fegrab
import microstructure_function as msf
import validation_viz
import results
import calibration
# import matplotlib.pyplot as plt
Hi = 2 # number of distinct local states in microstructure
order = 1 # choose 1, 2 or 7 local neighbors in MKS procedure
el_cal = 21 # number of elements per side of cube for calibration dataset
ns_cal = 399 # total number of samples in calibration dataset
set_id_cal = 'cal008' # set ID for calibration dataset
dir_cal = 'cal008' # directory name for .dat files
el_val = 21 # number of elements per side of cube for validation dataset
ns_val = 400 # total number of samples in validation dataset
set_id_val = 'val008' # set ID for validation dataset
dir_val = 'val008' # directory name for .dat files
doplt = 0 # if plotting of results desired set doplt = 1
# if doplt == 1:
# plt.close('all')
wrt_file = 'log_order%s_%s%s_%s%s_%s.txt' % \
(order, ns_cal, set_id_cal, ns_val, set_id_val,
time.strftime("%Y-%m-%d_h%Hm%M"))
# TOTAL CALIBRATION PROCEDURE
# Read the calibration microstructures and build the microstructure function
H = msf.msf(el_cal, ns_cal, Hi, order, set_id_cal, wrt_file)
# Read the responses from the FE .dat files and perform the fftn for the
# calibration
fegrab.fegrab(el_cal, ns_cal, set_id_cal, dir_cal, wrt_file)
# Perform the calibration
calibration.calibration_main(el_cal, ns_cal, H, set_id_cal, wrt_file)
# # TOTAL VALIDATION PROCEDURE
# Read the validation microstructures and build the microstructure function
H = msf.msf(el_val, ns_val, Hi, order, set_id_val, wrt_file)
# Read the responses from the FE .dat files and perform the fftn for the
# validation
fegrab.fegrab(el_val, ns_val, set_id_val, dir_val, wrt_file)
# Perform the validation
validation_viz.validation_zero_pad(el_cal, el_val, ns_cal, ns_val, H,
set_id_cal, set_id_val, wrt_file)
# Calculate the results of the validation
results.results(el_val, ns_val, set_id_val, 'epsilon', doplt, wrt_file)
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
edfc5ecf4e08ebc51d70cccfa64033266fcd94d5 | f00ae2cb4709539e8a78247678d9bb51913e0373 | /oacids/exported/ifaces.py | 6291040a82a66812250500bc05295488b82dd8c9 | [
"MIT"
] | permissive | openaps/oacids | 576351d34d51c62492fc0ed8be5e786273f27aee | ed8d6414171f45ac0c33636b5b00013e462e89fb | refs/heads/master | 2021-01-10T06:03:53.395357 | 2016-03-21T04:02:47 | 2016-03-21T04:02:47 | 51,559,470 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 390 | py |
# BUS='org.openaps.oacids'
BUS='org.openaps'
IFACE='org.openaps.Service'
ObjectManager = 'org.freedesktop.DBus.ObjectManager'
PATH_BASE='/org/openaps'
PATH='/org/openaps/Services'
ManagerPath = PATH
INTROSPECTABLE_IFACE='org.freedesktop.DBus.Introspectable'
# class WithProperties (dbus.service.Object):
TRIGGER_IFACE = IFACE + '.Trigger'
OPENAPS_IFACE='org.openaps.Service.Instance'
| [
"bewest@gmail.com"
] | bewest@gmail.com |
76974675ed4f3f5348441293a1ee00f825fc5953 | 7173978a10fc738ef6ee1d7c63e608da9a715d19 | /keyboards/inline/category_industrial_buttons.py | 2b046d54afb57b03c1337c4e5267bdcfa52dec6b | [] | no_license | stillnurs/telegram_bot | 80498de7489a3d99f0288f4c96ffe0b5a6ece937 | e7c3cd84f78d24484703c95e9d596ac682ef10a5 | refs/heads/master | 2023-02-09T19:54:08.919475 | 2020-12-28T19:43:44 | 2020-12-28T19:43:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
category_industrial_choice = InlineKeyboardMarkup(inline_keyboard=[
[
InlineKeyboardButton(text="Общие вопросы",
callback_data="about_industrial")
]
],
)
| [
"noorsultan.mamataliev@gmail.com"
] | noorsultan.mamataliev@gmail.com |
9d983f524780487331f1a5a8f745b286f2a278df | 1531345172f997e42b861ddd2063d7e357a3b4f2 | /python/tf_idf_demo.py | 58cd7d7e88c95d87ddc076d432d50f3384af59dd | [] | no_license | claralinanda/MIS3545-Spring2017 | 5c83e671b19cc75857fcbfb941adb88272953c60 | 8b097453558f9a5c2c5c11ada7d98423dfa24254 | refs/heads/master | 2021-01-19T11:54:06.392757 | 2017-04-03T14:05:23 | 2017-04-03T14:05:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | from math import log
def tf(term, doc, normalize=True):
doc = doc.lower().split()
if normalize:
return doc.count(term.lower()) / float(len(doc))
else:
return doc.count(term.lower()) / 1.0
def idf(term, corpus):
num_texts_with_term = len([True for text in corpus if term.lower()
in text.lower().split()])
# tf-idf calc involves multiplying against a tf value less than 0, so it's important
# to return a value greater than 1 for consistent scoring. (Multiplying two values
# less than 1 returns a value less than each of them)
try:
return 1.0 + log(float(len(corpus)) / num_texts_with_term)
except ZeroDivisionError:
return 1.0
def tf_idf(term, doc, corpus):
return tf(term, doc) * idf(term, corpus)
# Score queries by calculating cumulative tf_idf score for each term in query
query_scores = {'1': 0, '2': 0, '3': 0}
QUERY_TERMS = ['class', 'bi', 'babson', 'love']
corpus = \
{'1': 'This class is so cool love it Babson rock',
'2': 'I learn a lot in BI class I want to be an BI engineer',
'3': "One of the best class at Babson I love Babson"}
for term in [t.lower() for t in QUERY_TERMS]:
for doc in sorted(corpus):
print('TF(%s): %s' % (doc, term), tf(term, corpus[doc]))
print('IDF: %s' % (term,), idf(term, corpus.values()))
print()
for doc in sorted(corpus):
score = tf_idf(term, corpus[doc], corpus.values())
print('TF-IDF(%s): %s' % (doc, term), score)
query_scores[doc] += score
print()
print("Overall TF-IDF scores for query '%s'" % (' '.join(QUERY_TERMS),))
for (doc, score) in sorted(query_scores.items()):
print(doc, score)
| [
"zli@babson.edu"
] | zli@babson.edu |
f48b108217790987b181bc014327e857ca7cc522 | 3c6b0521eb788dc5e54e46370373e37eab4a164b | /FlowScore/eval_single.py | cda6c141996523b2197a00407f922861a8825b8d | [] | no_license | y12uc231/DialEvalMetrics | 7402f883390b94854f5d5ae142f700a697d7a21c | f27d717cfb02b08ffd774e60faa6b319a766ae77 | refs/heads/main | 2023-09-02T21:56:07.232363 | 2021-11-08T21:25:24 | 2021-11-08T21:25:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | from flow_score import *
import numpy as n
import argparse
import json
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--eval_data', type=str, default=None)
parser.add_argument('--output', type=str)
args = parser.parse_args()
return args
def main(args):
FLOW_SCORE = FlowScore(MODEL_PATH)
with open(args.eval_data) as f:
data = json.load(f)
flow_scores = []
for context, response in zip(data['contexts'], data['responses']):
flow_input = context + [response]
flow_score = FLOW_SCORE.score(flow_input) * -1
flow_scores.append(flow_score)
data['flow_scores'] = flow_scores
with open(args.output, 'w') as f:
json.dump(data, f)
if __name__ == '__main__':
args = parse_args()
main(args) | [
"yitingye@cs.cmu.edu"
] | yitingye@cs.cmu.edu |
f2f4abba432d2c0b97599ad83d7485f8c0eb3203 | 9088d49a7716bdfc9b5770e8e54ebf7be6958fcf | /09 - Repetition structure while/Des_059.py | a745d5bba64cfbc51c17879a26b37f1a9b2d9a9e | [
"MIT"
] | permissive | o-Ian/Practice-Python | 579e8ff5a63a2e7efa7388bf2d866bb1b11bdfe2 | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | refs/heads/main | 2023-05-02T02:21:48.459725 | 2021-05-18T18:46:06 | 2021-05-18T18:46:06 | 360,925,568 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | from time import sleep
n = 0
while n != 5:
n1 = int(input('Digite um número inteiro: '))
n2 = int(input('Digite outro número inteiro: '))
print('=-' * 25)
print('Digite [1] para somar \nDigite [2] para multiplicar\nDigite [3] para saber qual número é o maior\n'
'Digite [4] para digitar novos números\nDigite [5] para sair do programa')
print('=-' * 25)
n = int(input('Digite o número da operação que você deseja > '))
if n == 1:
print('A soma entre {} e {} é {}.' .format(n1, n2, n1+n2))
elif n == 2:
print('A multiplicação entre {} e {} é {}.' .format(n1, n2, n1*n2))
elif n == 3:
print('O maior número é o {}.' .format(n1 if n1 > n2 else n2))
elif n == 4:
''
else:
print('-' * 23)
print('\033[1;41mDigite um número válido!\033[m')
print('-' * 23)
sleep(1)
print('FIM')
| [
"ianstigli@hotmail.com"
] | ianstigli@hotmail.com |
05f23b2d032c3d8618084d240d489fbcc40895b0 | b69b6a68a7bd7cfa274cbbe27c185be32eba7b54 | /rpc_start/go_json_rpc_client.py | e17222899d551985f37d07640b271ca208d3f91f | [] | no_license | ZhiyuSun/code-kingdom | 3f4d2c023ffdc2a18523c3144bb7271b9835f3ba | 318c292dec5a43de1131a171678a6874613bc087 | refs/heads/master | 2021-06-23T20:29:03.682412 | 2021-06-14T08:52:41 | 2021-06-14T08:52:41 | 201,889,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | import json
import socket
# request = {
# "id":0,
# "params":["sunzhiyu"],
# "method": "HelloService.Hello"
# }
#
# client = socket.create_connection(("localhost", 2222))
# client.sendall(json.dumps(request).encode())
#
# #获取服务器返回的数据
# rsp = client.recv(1024)
# rsp = json.loads(rsp.decode())
#
# print(rsp["result"])
request = {
"id":0,
"params":["sunzhiyu"],
"method": "HelloService.Hello"
}
import requests
rsp = requests.post("http://localhost:1234/jsonrpc", json=request)
print(rsp.text)
| [
"zcgyxlgy@sina.com"
] | zcgyxlgy@sina.com |
95dc9559d334108df8bfa5d29c11957bbcfcf14a | 7b13e6acb2a1f26936462ed795ee4508b4088042 | /算法题目/算法题目/剑指offer/题15二进制中1的个数.py | 8b9f4af0a2c3eeefc3b9e876e3e82af26d04cedf | [] | no_license | guojia60180/algorithm | ed2b0fd63108f30cd596390e64ae659666d1c2c6 | ea81ff2722c7c350be5e1f0cd6d4290d366f2988 | refs/heads/master | 2020-04-19T08:25:55.110548 | 2019-05-13T13:29:39 | 2019-05-13T13:29:39 | 168,076,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | #Author guo
# -*- coding:utf-8 -*-
class Solution:
def NumberOf1(self, n):
# write code here
count = 0
if n<0:
n=n&0xffffffff
while n:
count += 1
n = (n - 1) & n
return count
def Numberof2(self,n):
if n<0:
s=bin(n&0xffffffff)
else:
s=bin(n)
return s.count('1')
#判断是否是2的整次数幂
def powerof2(self,n):
if n&(n-1)==0:
return True
else:
return False
#判断两个数二进制表示有多少位不一样
def andOr(self,m,n):
diff=m^n
count=0
while diff:
count+=1
diff=diff&(diff-1)#逐级的置0
return count
| [
"44565715+guojia60180@users.noreply.github.com"
] | 44565715+guojia60180@users.noreply.github.com |
b010ac476488c1ddac5a903d34b68d757fd309a7 | 8cb8bfd2dae516612251039e0632173ea1ea4c8a | /modules/contact/triggers.py | baf79680e6675358cf535d3c8d1db77215c8a039 | [] | no_license | nyzsirt/lift-prod | 563cc70700d26a5812a1bce0bd9795998dce6e99 | 9a5f28e49ad5e80e422a5d5efee77a2d0247aa2b | refs/heads/master | 2020-04-22T01:05:42.262876 | 2019-02-09T13:31:15 | 2019-02-09T13:31:15 | 170,003,361 | 1 | 0 | null | 2019-02-10T17:11:50 | 2019-02-10T17:11:50 | null | UTF-8 | Python | false | false | 361 | py |
class ContactTrigger:
@classmethod
def pre_save(cls, sender, document, **kwargs):
names = []
if document.name or document.surname:
if document.name:
names.append(document.name)
if document.surname:
names.append(document.surname)
document.full_name = " ".join(names)
| [
"mutlu.erdem@soft-nec.com"
] | mutlu.erdem@soft-nec.com |
270cf00b72424c4ab5ef2e7ed3870a0b805ced8e | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/Diagnostics/__init___parts/DebuggerBrowsableState.py | 7599756d739f37be9fff917e09f66f45fd9f7b82 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | class DebuggerBrowsableState(Enum,IComparable,IFormattable,IConvertible):
"""
Provides display instructions for the debugger.
enum DebuggerBrowsableState,values: Collapsed (2),Never (0),RootHidden (3)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Collapsed=None
Never=None
RootHidden=None
value__=None
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
799b4aaf7edaef6728d0dd5d5676703185f9f2f5 | 07131e91dcf2529e9c7058f8a8f239d419c8f7e0 | /1450.number-of-students-doing-homework-at-a-given-time.py | c9ff2ab40bfb09aa40ca1baf90d6766f217d4c97 | [] | no_license | Code-Wen/LeetCode_Notes | 5194c5c5306cb9f4a0fac85e06fefe6c02d65d44 | 791fc1b43beef89d668788de6d12f5c643431b8f | refs/heads/master | 2021-07-04T14:41:00.830723 | 2020-09-27T16:31:22 | 2020-09-27T16:31:22 | 178,456,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | #
# @lc app=leetcode id=1450 lang=python3
#
# [1450] Number of Students Doing Homework at a Given Time
#
# https://leetcode.com/problems/number-of-students-doing-homework-at-a-given-time/description/
#
# algorithms
# Easy (82.11%)
# Likes: 55
# Dislikes: 14
# Total Accepted: 13.4K
# Total Submissions: 16.3K
# Testcase Example: '[1,2,3]\n[3,2,7]\n4'
#
# Given two integer arrays startTime and endTime and given an integer
# queryTime.
#
# The ith student started doing their homework at the time startTime[i] and
# finished it at time endTime[i].
#
# Return the number of students doing their homework at time queryTime. More
# formally, return the number of students where queryTime lays in the interval
# [startTime[i], endTime[i]] inclusive.
#
#
# Example 1:
#
#
# Input: startTime = [1,2,3], endTime = [3,2,7], queryTime = 4
# Output: 1
# Explanation: We have 3 students where:
# The first student started doing homework at time 1 and finished at time 3 and
# wasn't doing anything at time 4.
# The second student started doing homework at time 2 and finished at time 2
# and also wasn't doing anything at time 4.
# The third student started doing homework at time 3 and finished at time 7 and
# was the only student doing homework at time 4.
#
#
# Example 2:
#
#
# Input: startTime = [4], endTime = [4], queryTime = 4
# Output: 1
# Explanation: The only student was doing their homework at the queryTime.
#
#
# Example 3:
#
#
# Input: startTime = [4], endTime = [4], queryTime = 5
# Output: 0
#
#
# Example 4:
#
#
# Input: startTime = [1,1,1,1], endTime = [1,3,2,4], queryTime = 7
# Output: 0
#
#
# Example 5:
#
#
# Input: startTime = [9,8,7,6,5,4,3,2,1], endTime =
# [10,10,10,10,10,10,10,10,10], queryTime = 5
# Output: 5
#
#
#
# Constraints:
#
#
# startTime.length == endTime.length
# 1 <= startTime.length <= 100
# 1 <= startTime[i] <= endTime[i] <= 1000
# 1 <= queryTime <= 1000
#
#
#
# @lc code=start
class Solution:
def busyStudent(self, startTime: List[int], endTime: List[int], queryTime: int) -> int:
return sum([startTime[i]<=queryTime<=endTime[i] for i in range(len(endTime))])
# @lc code=end
| [
"chenxu.wen.math@gmail.com"
] | chenxu.wen.math@gmail.com |
19cd2e3f1b9fb6e63d4dc44321ba9996e824be86 | 4236d2613e2a4f9aaf87cb9f400a63d6254aebb6 | /app/teacher/homework/test_cases/test003_home_item_list.py | 8f069373c235c2cda1b44e392f337aed518b65bb | [] | no_license | MerdLead/test_andriod_app | 3a79efb01a5185bf4e35c259ca0236afa8ec4214 | 7eac7a68eaf1bd77a4c78b5f1e1c311ea888f529 | refs/heads/master | 2020-03-30T07:25:14.285758 | 2018-09-30T06:42:35 | 2018-09-30T06:42:35 | 150,938,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | #!/usr/bin/env python
import unittest
from app.teacher.homework.object_page.release_hw_page import ReleasePage
from app.teacher.login.object_page.home_page import ThomePage
from app.teacher.login.object_page.login_page import TloginPage
from app.teacher.homework.object_page.homework_detail_page import HwDetailPage
from app.teacher.login.test_data.login_failed_toast import VALID_LOGIN_TOAST
from conf.decorator import setup, teardown, testcase
from utils.toast_find import Toast
class HomeItem(unittest.TestCase):
"""首页列表"""
@classmethod
@setup
def setUp(cls):
"""启动应用"""
cls.login = TloginPage()
cls.home = ThomePage()
cls.homework = ReleasePage()
cls.detail = HwDetailPage()
@classmethod
@teardown
def tearDown(cls):
pass
@testcase
def test_home_item_list(self):
self.login.app_status() # 判断APP当前状态
if self.home.wait_check_page(): # 页面检查点
if self.home.wait_check_no_page():
print('无最新动态 -- (用户指南) 欢迎使用在线助教,打开看看吧!')
else:
var = self.home.hw_list_operate([]) # 作业列表
while True:
self.detail.screen_swipe_up(0.5, 0.85, 0.5, 1000)
var = self.home.hw_list_operate(var[0]) # 作业列表
if int(var[1]) == 1:
break
while True:
if self.home.wait_check_image_page():
break
else:
self.homework.screen_swipe_down(0.5, 0.1, 0.85, 1000)
else:
Toast().find_toast(VALID_LOGIN_TOAST.login_failed())
print("未进入主界面")
| [
"merdlead@163.com"
] | merdlead@163.com |
4793099a4c76af4e4083bf262be99aa6c059769e | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /get_important_work.py | 15db59ad1383646a2662973fea34080ba0a76d6d | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py |
#! /usr/bin/env python
def man(str_arg):
try_thing(str_arg)
print('ask_week_by_great_week')
def try_thing(str_arg):
print(str_arg)
if __name__ == '__main__':
man('week')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
7b02309bae0bf6468085e6e3fbfe3cb4bdb6e5e9 | d6791a60af5570dbf925b26f3b9ec7608a47fbeb | /setup.py | 611dca753801f3fb6854624ad7a27c02aec9e99c | [] | no_license | ammubhave/sqlalchemy-simql | e9c2e45d5629087b1be96be58118b362b42725c6 | 4e9947a49502355e65d30eec64b1e86af0428287 | refs/heads/master | 2020-05-17T22:28:22.030723 | 2014-07-15T14:41:44 | 2014-07-15T14:41:44 | 21,827,961 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | #!/usr/bin/env python
from setuptools import setup
setup(
author='Amol Bhave',
author_email='ambhave' '@' 'mit.edu',
description='SQLAlchemy extension for SimQL',
entry_points="""
[sqlalchemy.dialects]
simql = sqlalchemy_simql.dialect:SimqlDialect
""",
install_requires=[
'SQLAlchemy>=0.9.4',
],
long_description=open('README.md', 'rt').read(),
name='SQLAlchemy-SimQL',
packages=[
'sqlalchemy_simql',
'sqlalchemy_simql.dbapi2',
'sqlalchemy_simql.dialect',
],
url='https://github.com/ammubhave/sqlalchemy-simql',
version='0.1.0',
)
| [
"ammubhave@gmail.com"
] | ammubhave@gmail.com |
dc2b1eb5e5daa541ad0ab6f159afdcfccb90587c | 11692a0e0b784252dea9ce5c2d97297d890ab520 | /arquivos-py/CursoEmVideo_Python3_DESAFIOS/desafio06.py | 7abbef956bcec92837a5f7e19dd35bfce23beca3 | [
"MIT"
] | permissive | oliveiralecca/cursoemvideo-python3 | bcd6dde54e26702f964e002221dda36c6eb8fd9f | e0a3e27d73a49ce0e72ae4faa9ac0c6da9811d2e | refs/heads/master | 2023-03-31T07:21:34.739162 | 2021-03-29T00:38:42 | 2021-03-29T00:38:42 | 271,679,170 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | print('======= DESAFIO 06 =======')
n = int(input('Digite um número: '))
d = n * 2
t = n * 3
rq = n ** (1/2) # ou: pow(n,(1/2))
print('Dobro: {}\nTriplo: {}\nRaiz Quadrada: {:.2f}'.format(d, t, rq))
| [
"oliveiraslc@yahoo.com.br"
] | oliveiraslc@yahoo.com.br |
2cecbfd5cadee6e7d888a5e28743114b68e47a97 | e1e5ffef1eeadd886651c7eaa814f7da1d2ade0a | /Systest/tests/acl/ACL_FUN_002.py | 460f887024066ecc12d883efdf8e0848b3fe6b7c | [] | no_license | muttu2244/MyPython | 1ddf1958e5a3514f9605d1f83c0930b24b856391 | 984ca763feae49a44c271342dbc15fde935174cf | refs/heads/master | 2021-06-09T02:21:09.801103 | 2017-10-10T07:30:04 | 2017-10-10T07:30:04 | 13,803,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,009 | py | #!/usr/bin/env python2.5
"""
#######################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
#######################################################################
DESCRIPTION: To Verify that SSX permits/drops packets based on Precedence option. Repeat the case for inbound and outbound filter
TEST PLAN :ACL Test plans
TEST CASES:ACL_FUN_002
TOPOLOGY DIAGRAM:
---------------------------------------------
| SSX
| TransIP = 2.2.2.45/24 |
|
| Port 2/1 |
--------------------------------------------
HOW TO RUN:python2.5 ACL_FUN_004.py
AUTHOR: rajshekar@primesoftsolutionsinc.com
REVIEWER:suresh@primesoftsolutionsinc.com
"""
import sys, os
mydir = os.path.dirname(__file__)
qa_lib_dir = os.path.join(mydir, "../../lib/py")
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
#Import Frame-work libraries
from SSX import *
from Linux import *
from log import *
from StokeTest import test_case, test_suite, test_runner
from log import buildLogger
from logging import getLogger
from acl import *
from helpers import is_healthy
from misc import *
#import config and topo file
from config import *
from topo import *
class test_ACL_FUN_002(test_case):
myLog = getLogger()
def setUp(self):
#Establish a telnet session to the SSX box.
self.ssx = SSX(ssx["ip_addr"])
self.linux=Linux(linux['ip_addr'],linux['user_name'],linux['password'])
self.ssx.telnet()
self.linux.telnet()
# Clear the SSX config
self.ssx.clear_config()
# wait for card to come up
self.ssx.wait4cards()
self.ssx.clear_health_stats()
def tearDown(self):
# Close the telnet session of SSX
self.ssx.close()
self.linux.close()
def test_ACL_FUN_002(self):
# Push SSX config
self.ssx.config_from_string(script_var['ACL_FUN_002'])
#changing context and clear port counters
self.ssx.cmd("context %s" %(script_var['context_name']))
self.ssx.cmd("clear ip counters")
time.sleep(5)
#Sending Icmp packets with 0x10 QOS bits to pass thru Precedence 0 tos 4
self.linux.cmd("ping %s -c 5 -Q 0x10 "%(script_var['ssx_phy_iface1_ip']))
time.sleep(5)
#self.ssx.cmd("show ip counters icmp")
output=ip_verify_ip_counters_icmp(self.ssx,total_tx='5',total='0',echo_request='0', echo_reply='5', unreachable='0', \
mask_request='0', mask_reply='0', source_quench= '0' , param_problem='0', timestamp='0',\
redirects='0', info_reply='0', ttl_expired='0', other='0')
print output
self.failIfEqual(output,0,"Packet Filtering Unsuccessful")
self.ssx.cmd("clear ip counters")
time.sleep(5)
#Sending Icmp packets with 0x18 QOS bits to be filtered thru Precedence 0 tos 4
self.linux.cmd("ping %s -c 5 -Q 0x30 "%(script_var['ssx_phy_iface1_ip']),timeout=40)
time.sleep(5)
output=ip_verify_ip_counters_icmp(self.ssx,total_tx='0',total='0',echo_request='0', echo_reply='0', unreachable='0', \
mask_request='0', mask_reply='0', source_quench= '0' , param_problem='0', timestamp='0',\
redirects='0', info_reply='0', ttl_expired='0', other='0')
print output
self.failIfEqual(output,0,"Packet Filtering Unsuccessful")
# Checking SSX Health
hs = self.ssx.get_health_stats()
self.failUnless(is_healthy( hs), "Platform is not healthy")
if __name__ == '__main__':
filename = os.path.split(__file__)[1].replace('.py','.log')
log = buildLogger(filename, debug=True, console=True)
suite = test_suite()
suite.addTest(test_ACL_FUN_002)
test_runner(stream = sys.stdout).run(suite)
| [
"muttu2244@yahoo.com"
] | muttu2244@yahoo.com |
df67d820d16d861532c29149ce9e341f4afbc2f8 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/192c0dc9d16458255102ce13d1707d8285defe12-<_construct_url_3>-bug.py | bc88419c2462fff4e03343f512cece7788534609 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,779 | py | def _construct_url_3(self, root, parent, obj, child_includes):
'\n This method is used by get_url when the object is the third-level class.\n '
root_rn = root['aci_rn']
root_obj = root['module_object']
parent_class = parent['aci_class']
parent_rn = parent['aci_rn']
parent_filter = parent['filter_target']
parent_obj = parent['module_object']
obj_class = obj['aci_class']
obj_rn = obj['aci_rn']
obj_filter = obj['filter_target']
mo = obj['module_object']
if (not child_includes):
self_child_includes = ('&rsp-subtree=full&rsp-subtree-class=' + obj_class)
else:
self_child_includes = '{0},{1}'.format(child_includes, obj_class)
if (not child_includes):
parent_self_child_includes = '&rsp-subtree=full&rsp-subtree-class={0},{1}'.format(parent_class, obj_class)
else:
parent_self_child_includes = '{0},{1},{2}'.format(child_includes, parent_class, obj_class)
if (self.module.params['state'] != 'query'):
path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, parent_rn, obj_rn)
filter_string = ('?rsp-prop-include=config-only' + child_includes)
elif ((mo is None) and (parent_obj is None) and (root_obj is None)):
path = 'api/class/{0}.json'.format(obj_class)
filter_string = ''
elif (root_obj is not None):
if (parent_obj is not None):
if (mo is not None):
path = 'api/mo/uni/{0}/{1}/{2}.json'.format(root_rn, parent_rn, obj_rn)
filter_string = ''
else:
path = 'api/mo/uni/{0}/{1}.json'.format(root_rn, parent_rn)
filter_string = self_child_includes.replace('&', '?', 1)
elif (mo is not None):
path = 'api/mo/uni/{0}.json'.format(root_rn)
filter_string = '?rsp-subtree-filter={0}{1}'.format(obj_filter, self_child_includes)
else:
path = 'api/mo/uni/{0}.json'.format(root_rn)
filter_string = ('?' + parent_self_child_includes)
elif (parent_obj is not None):
if (mo is not None):
path = 'api/class/{0}.json'.format(parent_class)
filter_string = '?query-target-filter={0}{1}&rsp-subtree-filter={2}'.format(parent_filter, self_child_includes, obj_filter)
else:
path = 'api/class/{0}.json'.format(parent_class)
filter_string = '?query-target-filter={1}{2}'.format(parent_filter, self_child_includes)
else:
path = 'api/class/{0}.json'.format(obj_class)
filter_string = ('?query-target-filter={0}'.format(obj_filter) + child_includes)
if ((child_includes is not None) and (filter_string == '')):
filter_string = child_includes.replace('&', '?', 1)
return (path, filter_string) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
5bd7c7213bc5d8d57013a4f313bf573a5ed1efca | 3d31d1ebeac4586a455c08d551e81e5596f4a8c4 | /dev/06_21_2018/UPS_Error.py | 34ccb3e8a9a17add7d24bebf44b7e78b6d4fa47d | [
"Python-2.0"
] | permissive | npwebste/UPS_Controller | 0ff9670abd6f4ff5d4f3a5ec3003e4f4ddfcf148 | a90ce2229108197fd48f956310ae2929e0fa5d9a | refs/heads/master | 2021-05-10T10:00:03.186490 | 2018-08-04T23:37:16 | 2018-08-04T23:37:16 | 118,942,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | # Universal Power Supply Controller
# USAID Middle East Water Security Initiative
#
# Developed by: Nathan Webster
# Primary Investigator: Nathan Johnson
#
# Version History (mm_dd_yyyy)
# 1.00 03_24_2018_NW
#
######################################################
def UPS_Error(ErrorCode):
if (ErrorCode == 'Error_VFD_Freq'):
print('VFD frequency set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Volt':
print('VFD votlage set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Amps':
print('VFD current set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Power':
print('VFD power set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_BusVolt':
print('VFD bus voltage set above maximum, shutting down motor')
elif ErrorCode == 'Error_VFD_Temp':
print('VFD temperature set above maximum, shutting down motor')
elif ErrorCode == 'Error_Solar_Voltage':
print('Solar voltage set above maximum, shutting down converter and motor')
elif ErrorCode == 'Error_DC_Link_Voltage':
print('DC link voltage set above maximum, shutting down converter and motor')
elif ErrorCode == 'Error_Voltage_Measurement':
print('Incorrect voltage measurement input')
elif ErrorCode == 'Error_Transfer_Switch':
print('Incorrect transfer switch input')
elif ErrorCode == 'Error_VFD_Power':
print('Incorrect power calculation')
elif ErrorCode == 'Error_Duty_Cycle':
print('Incorrect power calculation')
return | [
"30417327+npwebste@users.noreply.github.com"
] | 30417327+npwebste@users.noreply.github.com |
d320b73bc06a0e30403bc1b844e6b62680e3ca62 | d8b1a010bec02de76f179e0d3df113903d91db71 | /TRP_api/TRP_api/urls.py | 055e259fb94ae21368deb72c31aa206e3420ccdd | [] | no_license | vineetkashyap/api12may | 2e933815972bf24cd7ff5efe6800e5fe70122cb1 | 29c1b6fa9c55c900a2bdd1175e0e74a5113fdbdd | refs/heads/master | 2023-05-01T16:54:09.949319 | 2021-05-16T09:37:15 | 2021-05-16T09:37:15 | 366,686,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | from django.contrib import admin
from django.urls import path,include
from api import views
from django.conf.urls.static import static
from django.conf import settings
from rest_framework.authtoken.views import obtain_auth_token
from knox import views as knox_views
from rest_framework.routers import DefaultRouter
router= DefaultRouter()
router.register('truckowner',views.TruckOwnerModel_View,basename='truckowner')
router.register('transporter',views.TransporterModel_View,basename='transporter')
router.register('agent',views.Tranage_AgentSerializer_View,basename='agent')
router.register('vehicle',views.VehicleRegistraionModelSerializer_View,basename='vehicle')
router.register('driver',views.DriverRegistrationSerializer_View,basename='driver')
urlpatterns = [
path('admin/', admin.site.urls),
path('',include(router.urls)),
path('auth/',include('rest_framework.urls')),
path('gettoken/',obtain_auth_token),
path('gettruckowner/',views.gettruckowner,name='gettruckowner'),
path('gettransporter/',views.gettransporter,name='gettransporter'),
path('getagent/',views.getagent,name='getagent'),
path('getdriver/',views.getdriver,name='getdriver'),
path('getvehicle/',views.getvehicle,name='getvehicle'),
path('api/register/', views.RegisterAPI.as_view(), name='register'),
path('api/login/', views.LoginAPI.as_view(), name='login'),
path('api/logout/', knox_views.LogoutView.as_view(), name='logout'),
path('api/logoutall/', knox_views.LogoutAllView.as_view(), name='logoutall'),
]+static(settings.MEDIA_URL,document_root = settings.MEDIA_ROOT) | [
"vkvineet66@gmail.com"
] | vkvineet66@gmail.com |
41306b320939acb879a6f70f56bcdaf86f2f1648 | 47eb0bcfee356f35a607e4a31b150662d6b0f0bb | /app/shop/admin.py | d2afdb3bb984e827e90b5209645446251a790204 | [] | no_license | HyungtaeMoon/IAMPORT-ShoppingMall | 955183f2ea2a573737bc236940457733b1ead659 | 2c19290380e1c739df31583a9c55bb9d719c6af8 | refs/heads/master | 2022-12-09T14:15:48.563197 | 2019-03-22T13:02:35 | 2019-03-22T13:02:35 | 154,930,700 | 0 | 0 | null | 2022-07-29T22:51:19 | 2018-10-27T05:47:04 | Python | UTF-8 | Python | false | false | 1,637 | py | from django.contrib import admin
from django.utils.safestring import mark_safe
from .models import Item, Order
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
list_display = ['photo_tag', 'name', 'amount']
def photo_tag(self, item):
if item.photo:
return mark_safe('<img src={} style="width: 75px;" />'.format(item.photo.url))
return None
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ['imp_uid', 'user', 'name', 'amount_html', 'status_html', 'paid_at', 'receipt_link']
actions = ['do_update', 'do_cancel']
def do_update(self, request, queryset):
'주문 정보를 갱신합니다.'
total = queryset.count()
if total > 0:
for order in queryset:
order.update()
self.message_user(request, '주문 {}건의 정보를 갱신했습니다.'.format(total))
else:
self.message_user(request, '갱신할 주문이 없습니다')
do_update.short_description = '선택된 주문들의 아임포트 갱신하기'
def do_cancel(self, request, queryset):
'선택된 주문에 대해 결제 취소 요청을 합니다'
queryset = queryset.filter(status='paid')
total = queryset.count()
if total > 0:
for order in queryset:
order.cancel()
self.message_user(request, '주문 {}건을 취소했습니다'.format(total))
else:
self.message_user(request, '취소할 주문이 없습니다')
do_cancel.short_description = '선택된 주문에 대해 결제 취소 요청하기'
| [
"blessmht@gmail.com"
] | blessmht@gmail.com |
2649375b6ae17e806f70ee2bf3a16d2a3073137b | bc91d344ed2ee3f4f93547ec16350f2713e5f704 | /.history/CRUD/views_20190108015459.py | 2867d966ebaada4cf94ef22ab56fdacbeb9fdf8a | [] | no_license | SabitDeepto/Chitra | 10ecf0c4a7588234f0a50adf038783c9ce8706d0 | 160e5d64c8e4ee56a95bb639386785590160ff07 | refs/heads/master | 2020-04-27T21:55:09.685341 | 2019-03-09T16:14:35 | 2019-03-09T16:14:35 | 174,716,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | from django.shortcuts import render
from urllib3 import request
from CRUD.models import Executive
# Create your views here.
def home(request):
executive = Executive.objects.all()
templates = 'index.html'
context = {
'name': 'deepto',
'ex':executive
}
return render(request, templates, context)
def create_executive(request):
return render(request, 'reg') | [
"deepto69@gmail.com"
] | deepto69@gmail.com |
901c1ff1b89ae7d1354864bb1bf830a4d212d44a | e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7 | /flask_api/venv/lib/python3.7/site-packages/vsts/gallery/v4_1/models/answers.py | edb392c5bc4c0196922c3ebb9c193159982a3ed4 | [] | no_license | u-blavins/secret_sasquatch_society | c36993c738ab29a6a4879bfbeb78a5803f4f2a57 | 0214eadcdfa9b40254e331a6617c50b422212f4c | refs/heads/master | 2020-08-14T00:39:52.948272 | 2020-01-22T13:54:58 | 2020-01-22T13:54:58 | 215,058,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Answers(Model):
"""Answers.
:param vSMarketplace_extension_name: Gets or sets the vs marketplace extension name
:type vSMarketplace_extension_name: str
:param vSMarketplace_publisher_name: Gets or sets the vs marketplace publsiher name
:type vSMarketplace_publisher_name: str
"""
_attribute_map = {
'vSMarketplace_extension_name': {'key': 'vSMarketplaceExtensionName', 'type': 'str'},
'vSMarketplace_publisher_name': {'key': 'vSMarketplacePublisherName', 'type': 'str'}
}
def __init__(self, vSMarketplace_extension_name=None, vSMarketplace_publisher_name=None):
super(Answers, self).__init__()
self.vSMarketplace_extension_name = vSMarketplace_extension_name
self.vSMarketplace_publisher_name = vSMarketplace_publisher_name
| [
"usama.blavins1@gmail.com"
] | usama.blavins1@gmail.com |
823d055c9fbcc5df091127861acf22ab33ef3444 | 884249dc53b3e1a4461f44cc07f3c11b798a8bee | /tests/null/test_dropna.py | 9963ab1313220b26c0d86461aafd76144fe03e77 | [
"MIT"
] | permissive | MacHu-GWU/learn_pandas-project | b82ca64061c0afd2e470e0a7d17a8997981fd219 | 86d51d11d6f0a50ffcffbf743da197f4a7b12d61 | refs/heads/master | 2021-01-15T11:29:59.682659 | 2018-02-02T16:55:41 | 2018-02-02T16:55:41 | 99,621,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
from learn_pandas import assert_value_equal
def test_dropna():
data = [[1, 2, 3],
[None, 5, 6],
[7, 8, 9]]
df = pd.DataFrame(
data,
index=[1, 2, 3],
columns=list("ABC"),
)
res = df.dropna(axis=0) # by row
assert_value_equal(res, [[1, 2, 3], [7, 8, 9]])
"""
A B C
1 1 2 3
3 7 8 9
"""
res = df.dropna(axis=1) # by column
assert_value_equal(res, [[2, 3], [5, 6], [8, 9]])
"""
B C
1 2 3
2 5 6
3 8 9
"""
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| [
"MacHu-GWU@users.noreply.github.com"
] | MacHu-GWU@users.noreply.github.com |
4366e63b4ea98f08dfbfb6348ad3e918f1f35b63 | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/keras/distribute/distributed_training_utils.py | d77ef72ddd8e104d640d119c9d5f06aca8845479 | [
"Apache-2.0"
] | permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,556 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
# pylint:disable=protected-access
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import values as values_lib
from tensorflow.python.keras import backend
from tensorflow.python.ops import variables
# TODO(b/118776054): Currently we support global batch size for TPUStrategy and
# core MirroredStrategy only. Remove this check when contrib MirroredStrategy is
# no longer needed.
def global_batch_size_supported(distribution_strategy):
return (
distribution_strategy.extended._global_batch_size
) # pylint: disable=protected-access
def call_replica_local_fn(fn, *args, **kwargs):
"""Call a function that uses replica-local variables.
This function correctly handles calling `fn` in a cross-replica
context.
Args:
fn: The function to call.
*args: Positional arguments to the `fn`.
**kwargs: Keyword argument to `fn`.
Returns:
The result of calling `fn`.
"""
# TODO(b/132666209): Remove this function when we support assign_*
# for replica-local variables.
strategy = None
if "strategy" in kwargs:
strategy = kwargs.pop("strategy")
else:
if ds_context.has_strategy():
strategy = ds_context.get_strategy()
# TODO(b/120571621): TPUStrategy does not implement replica-local variables.
is_tpu = backend.is_tpu_strategy(strategy)
if (not is_tpu) and strategy and ds_context.in_cross_replica_context():
with strategy.scope():
return strategy.extended.call_for_each_replica(fn, args, kwargs)
return fn(*args, **kwargs)
def is_distributed_variable(v):
"""Returns whether `v` is a distributed variable."""
return isinstance(v, values_lib.DistributedValues) and isinstance(
v, variables.Variable
)
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
6bd5a404f883d52e21e57ccb4185601a09c9c35c | ac4b9385b7ad2063ea51237fbd8d1b74baffd016 | /.history/utils/ocr/handle_image_20210209180144.py | 622b08919b45fce1289c8a8afa9b1bf7344a199b | [] | no_license | preethanpa/ssoemprep | 76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f | ce37127845253c768d01aeae85e5d0d1ade64516 | refs/heads/main | 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,887 | py | import os
import cv2
import re
import numpy as np
from PIL import Image
import pytesseract
from pytesseract import Output
from fpdf import FPDF
import matplotlib.pyplot as plt
'''
IMAGE HANDLING METHODS
'''
# get grayscale image
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blur removal
def remove_blur(image):
return cv2.medianBlur(image,5)
# noise removal
def remove_noise(image):
return cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 15)
#thresholding
def thresholding(image):
return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
#dilation
def dilate(image):
kernel = np.ones((5,5),np.uint8)
return cv2.dilate(image, kernel, iterations = 1)
#erosion
def erode(image):
kernel = np.ones((5,5),np.uint8)
return cv2.erode(image, kernel, iterations = 1)
def extract_pdf_from_image(fileName='', pdf_path='', action='', psm=3):
'''
Extract text from image and save as PDF.
fileName=''
pdf_path='',
action='',
psm=3
'''
print(f'FileName is {fileName}')
#custom_config = r'-c tessedit_char_whitelist=123456789MALEPQRETHANabcdefghijklmnopqrstuvwxyz --psm 6'
#custom_config = r'-l eng --psm 11'
custom_config = r'-l eng --psm ' + str(psm)
pdfdir = pdf_path
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
# pdfFileName = os.path.basename(fileName).split('.')[0] + '.pdf'
pdfFileName = os.path.basename(fileName).split('.')[0]+ '.pdf'
pdfFilePath = pdfdir + '/' + pdfFileName
print(f'PDF File Path {pdfFilePath}')
#d = pytesseract.image_to_data(img, output_type=Output.DICT)
img = cv2.imread(fileName)
img1 = None
if (action == 1):
img1 = remove_noise(img)
if (action == 2):
img1 = get_grayscale(img)
#img1 = erode(img)
if (action == 3):
img1 = remove_blur(img)
#text = pytesseract.image_to_string(img1, config=custom_config,lang='eng')
text = pytesseract.image_to_pdf_or_hocr(img1, extension='pdf')
with open(pdfFilePath, mode = 'w+b') as f:
f.write(text)
return pdfFilePath
def convert_text_to_pdf(text='', pdf_path='', filename=''):
'''
Convert text file to PDF
text=''
pdf_path=''
filename=''
'''
tempdir = "/tmp"
pdfdir = pdf_path
textFileName = tempdir + '/' + filename + ".txt"
pdfFileName = pdfdir + '/' + filename + ".pdf"
if not os.path.exists(tempdir):
os.makedirs(tempdir)
if not os.path.exists(pdfdir):
os.makedirs(pdfdir)
# save FPDF() class into a
# variable pdf
pdf = FPDF()
# Add a page
pdf.add_page()
# set style and size of font
# that you want in the pdf
pdf.set_font("Arial", size = 15)
with open(textFileName, mode = 'w+b') as f:
f.write(text)
line = 1
f = open(textFileName, "r")
for x in f:
x1 = re.sub(u"(\u2014|\u2018|\u2019|\u201c|\u201d)", "", x)
pdf.cell(100, 10, txt=x1, ln=line, align='L')
line=line+1
#save the pdf with name .pdf
pdf.output(pdfFileName,'F')
def mark_region(image_path):
print(f'image_path {image_path}')
image = None
im = None
try:
except Exception as exc:
print(f'Exception in mark_region {exc')
im = cv2.imread(image_path, 1)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9,9), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,11,30)
# Dilate to combine adjacent text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
dilate = cv2.dilate(thresh, kernel, iterations=4)
# Find contours, highlight text areas, and extract ROIs
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
line_items_coordinates = []
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
print(f'y {y} and x {x}; area is {area}')
if y >= 600 and x <= 1000:
if area > 1000:
image = cv2.rectangle(im, (x,y), (2200, y+h), color=(255,0,255), thickness=3)
line_items_coordinates.append([(x,y), (2200, y+h)])
if y >= 2400 and x<= 2000:
image = cv2.rectangle(im, (x,y), (2200, y+h), color=(255,0,255), thickness=3)
line_items_coordinates.append([(x,y), (2200, y+h)])
image = cv2.imread(image, cv2.COLOR_BGR2GRAY)
# try:
# get co-ordinates to crop the image
# c = line_items_coordinates[1]
c = line_items_coordinates[0]
print(f'line_items_coordnates {c}')
# cropping image img = image[y0:y1, x0:x1]
img = image[c[0][1]:c[1][1], c[0][0]:c[1][0]]
print(f'img is {img}')
plt.figure(figsize=(10,10))
plt.imshow(img)
# convert the image to black and white for better OCR
ret,thresh1 = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY)
# pytesseract image to string to get results
text = str(pytesseract.image_to_string(thresh1, config='--psm 6'))
print(f'text is {text}')
convert_text_to_pdf(text, '', os.path.basename(pdf_doc).split('.')[0])
return (image, line_items_coordinates) | [
"{abhi@third-ray.com}"
] | {abhi@third-ray.com} |
3ae0a854793798036d9b002c8839bb944baf9a81 | 466912406272829982f75854cf0104c6ce8c9814 | /data/spider2/migrate/clean_beian_bywhois.py | 3638dc4f8cbceed4d78ca34670fd75b8d298adef | [] | no_license | logonmy/Codes | 9631fa103fc499663361fa7eeccd7cedb9bb08e4 | 92723efdeccfc193f9ee5d0ab77203c254f34bc2 | refs/heads/master | 2021-09-21T18:07:22.985184 | 2018-08-30T05:53:26 | 2018-08-30T05:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,125 | py | # -*- coding: utf-8 -*-
import os, sys
from pymongo import MongoClient
import pymongo
import time
import gevent
from gevent.event import Event
from gevent import monkey; monkey.patch_all()
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../util'))
import loghelper, config
import db
import util
#logger
loghelper.init_logger("domain_2_beian", stream=True)
logger = loghelper.get_logger("domain_2_beian")
#mongo
mongo = db.connect_mongo()
collection = mongo.info.beian
BEIANS =[]
def whoisCheck():
while True:
if len(BEIANS) == 0:
return
beian = BEIANS.pop(0)
#logger.info(beian["domain"])
domain = str(beian["domain"])
creation_date = util.whois_creation_date(domain)
if creation_date is not None:
logger.info("%s : %s -> %s", beian["domain"], beian["beianDate"], creation_date)
if beian["beianDate"] > creation_date:
collection.update({"domain": beian["domain"]}, {"$set": {"whoisChecked": True, "whoisExpire":"N"}},multi=True)
else:
logger.info("Expire %s", domain)
collection.update({"domain": beian["domain"]}, {"$set": {"whoisChecked": True, "whoisExpire":"Y"}},multi=True)
else:
logger.info("%s has no whois data",domain)
collection.update({"domain": beian["domain"]}, {"$set": {"whoisChecked": True, "whoisExpire":"NA"}},multi=True)
if __name__ == "__main__":
concurrent_num = 30
while True:
logger.info("beian check by whois start...")
# run(appmkt, WandoujiaCrawler(), "com.ctd.m3gd")
beians = list(collection.find({"whoisChecked": {"$ne": True}}, limit=10000))
for beian in beians:
BEIANS.append(beian)
#logger.info(BEIANS)
if len(beians) > 0:
threads = [gevent.spawn(whoisCheck) for i in xrange(concurrent_num)]
gevent.joinall(threads)
else:
#break
logger.info("beian check by whois end.")
time.sleep(30 * 60) | [
"hush_guo@163.com"
] | hush_guo@163.com |
0d39a0a246d72d162c9cd1e2203c2e8a6e26f448 | cfeb96d00a07cf3a5743a44fcef546aceeae3d3a | /309-BestTimetoBuyandSellStockwithCooldown.py | bc02e67e339e2e08644f56958e4aba5d681b2afe | [] | no_license | minseoch/algorithm | d5218b6187129e68aa355ce6cc89a3496e0f654c | 4c0cfe857f5d78a44c1a3bfb2571d72da4911d97 | refs/heads/master | 2023-03-04T02:10:02.344708 | 2021-01-30T04:44:28 | 2021-01-30T04:44:28 | 296,403,602 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | # 309. Best Time to Buy and Sell Stock with Cooldown
# buy-sell-cooldown
# You must sell the stock before you buy again
# After you sell your stock, you cannot buy stock on next day
class Solution:
# time O(n^2), space O(n)
def maxProfit(self, prices):
length = len(prices)
maxP = [0] * (length+2)
for i in range(length-1, -1, -1): # ith day- buying or not
# in case buying, find selling day to maximize profit
curMaxProfit = 0
for sellDay in range(i+1, length):
curProfit = prices[sellDay] - prices[i] + maxP[sellDay+2] #selling profit on 'sellDay' + profit on 2 days after
curMaxProfit = max(curMaxProfit, curProfit)
# in case do nothing, choose which is better
maxP[i] = max(maxP[i+1], curMaxProfit)
return maxP[0]
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/discuss/293789/Python-O(N)-DP-I-believe-my-state-is-easier-to-understand
# Use DP to solve this problem. It is obviously to come up with DP, because this is a "stage-decision-problem", every day we decide buy/sell/rest.
#
# The state is defined as below
#
# dp(n, 0) -> The max profix we get on day n, if we rest on day n.
# dp(n, 1) -> The max profix we get on day n, if we buy on day n.
# dp(n, 2) -> The max profix we get on day n, if we sell on day n.
#
# Below is the state transition function
#
# dp(n, 0) = max{ dp(n-1, 1), dp(n-1, 0), dp(n-1, 2) },
# if we rest on day n, we do not really care about what we have done on day n-1,
# you can do whatever you want, and we just take the max profit from day n-1
# dp(n, 1) = dp[n-1][0] - prices[n],
# if we buy on day n, we cannot buy on day n-1, because double-buy is by natural disallowed
# in the "Stock" Series. We cannot sell on day n-1, because of the new cool-down policy.
# So in day n-1, we can only rest.
# dp(n, 2) = max {dp(0, 1), dp(1, 1), ...., dp(n-1, 1)} + prices[n],
# if we sell on day n, we need to make sure we buy the stock before in one of (0...n-1).
# For example, if you rest on the first 2 days, there is NOTHING for you to sell on the 3rd day.
# Among all the possible "buy-day", we pick the one with max-profix
# Now, you might think: hmmmm, this is an O(N^2) DP because of 3.,
# we need to get max from a list of values in each iteration.
# Not really, you can keep track of the max of the past dp(n, 1).
# In the following solution, I use the var bought to keep track.
def maxProfit2(self, prices):
if not prices:
return 0
dp = [[0 for _ in range(3)] for _ in range(len(prices))]
dp[0][0] = 0
dp[0][1] = -prices[0]
dp[0][2] = float('-inf')
bought = dp[0][1]
n = len(prices)
for i in range(1, n):
dp[i][0] = max([dp[i - 1][0], dp[i - 1][2], dp[i - 1][1]])
dp[i][1] = dp[i - 1][0] - prices[i]
dp[i][2] = bought + prices[i]
bought = max(bought, dp[i][1])
print(f"bought={bought}")
print(dp)
return max(dp[n - 1])
obj = Solution()
prices = [10,20,30,0,20]
prices = [30,40,50,10,20,70]
print(obj.maxProfit(prices))
# goot to read
# https://medium.com/algorithms-and-leetcode/best-time-to-buy-sell-stocks-on-leetcode-the-ultimate-guide-ce420259b323 | [
"minseo@Minseos-iMac.local"
] | minseo@Minseos-iMac.local |
9be27768994baa40fb29747a6e6ef408b6c00d8c | b69eefcef9398cff6b00f4531736d6f391743481 | /project/settings.py | 0a9d67dad4fbcd510882739617577bbbf7afdec7 | [] | no_license | cjdd3b/j4462-project-template | c47981500c11573acdd5bf240f25aca95184d5de | 35720f9c5ea2d5dc147811af80c113f058c03496 | refs/heads/master | 2016-09-05T17:42:44.186628 | 2013-04-06T18:26:25 | 2013-04-06T18:26:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,384 | py | import os
import django
# CHASE NOTE: These are a few settings that I put into EVERY SINGLE DJANGO PROJECT that I build. You don't
# need to know how they work; the only thing you need to know is that they will make your life INFINITELY
# EASIER. In fact, I would recommend copying them into your new projects when you start work on them after
# the break (don't forget the two imports above).
PROJECT_NAME = 'project'
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# These two settings (normally found further down in the document) can cause very obnoxious problems depending
# on how your computer is set up. Setting them up like this (using the helpful settings tools above) avoids those
# problems and allows things to just magically work. If you copy these into your new project settings files, be
# sure to DELETE THE ONES DJANGO PLACES IN THE SETTINGS FILE BY DEFAULT. THERE SHOULD ONLY BE ONE STATIC_ROOT AND
# TEMPLATE_DIRS SETTING IN YOUR SETTINGS FILE -- AND IT SHOULD BE THESE (rant over).
STATIC_ROOT = os.path.join(SITE_ROOT, '%s/static' % PROJECT_NAME)
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, '%s/templates' % PROJECT_NAME)
)
########## NORMAL DJANGO SETTINGS BEGIN HERE ###########
# Django settings for project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# CHASE NOTE: Remember, we care about the DATABASES setting. For development purposes, just set the engine
# to sqlite3 as below and give your database a name. That's all you need to do!
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'project.sqlite', # Or path to database file if using sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'w$$apzca&q!2&*ewhd6&n)74c#4=^=ccc2jq0h1ybs==phy*qe'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '%s.urls' % PROJECT_NAME
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = '%s.wsgi.application' % PROJECT_NAME
# CHASE NOTE: Remember, we also care about the INSTALLED_APPS setting. Two important things to
# keep in mind here: One, uncomment the lines as noted to enable the admin (if you want it). And two:
# ANY APP YOU CREATE AND WANT TO USE MUST BE ADDED TO THIS LIST.
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
# Project-specific apps go here
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"chase.davis@gmail.com"
] | chase.davis@gmail.com |
75a5a25eea6bd1dcd30b17c4004f07a9d932559a | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2218/60751/271705.py | b7a6a56ab100877eae272baec1913325326980fa | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | num=input().split(",")
num_=[]
for i in num:
num_.append(int(i))
num_.sort()
print(max(num_[0]*num_[1]*num_[-1],num_[-1]*num_[-2]*num_[-3])) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
6e73aba4d4c121fa20b2d8c1cb4e084034e71658 | 047f45abbdb6e38e36c2c9c920d1e9a7a5702040 | /src/simulations/id_mapper_client.py | 8fda3b72f8359de4989b74db633f47081cacd9d0 | [
"Apache-2.0"
] | permissive | DD-DeCaF/simulations | 4076da1bfd887869ca8c950f4352d632bbfc4fc3 | dab77166f301c0a12e6fed973147fb4add8a62c4 | refs/heads/devel | 2023-01-23T11:32:11.031122 | 2020-05-29T16:46:08 | 2020-05-29T16:46:08 | 73,704,410 | 0 | 2 | Apache-2.0 | 2020-12-08T14:29:46 | 2016-11-14T12:53:53 | Python | UTF-8 | Python | false | false | 1,723 | py | # Copyright 2018 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import requests
from simulations.app import app
from simulations.metrics import API_REQUESTS
from simulations.utils import log_time
logger = logging.getLogger(__name__)
def query_identifiers(object_ids, db_from, db_to):
"""
Call the id mapper service.
:param object_ids: list of identifiers to query
:param db_from: the source of the identifier, e.g. 'kegg'
:param db_to: the destination type of the identifier, e.g. 'bigg'
"""
if len(object_ids) == 0:
return {}
query = json.dumps(
{"ids": object_ids, "dbFrom": db_from, "dbTo": db_to, "type": "Metabolite"}
)
logger.info(
"query id mapper at %s with %s", app.config["ID_MAPPER_API"], str(query)
)
with log_time(operation=f"ID map request for ids: {object_ids}"):
with API_REQUESTS.labels(
"model", os.environ["ENVIRONMENT"], "id-mapper", app.config["ID_MAPPER_API"]
).time():
return requests.post(
f"{app.config['ID_MAPPER_API']}/query", data=query
).json()["ids"]
| [
"ali@kvikshaug.no"
] | ali@kvikshaug.no |
90487f0a3d2431a0d2349c1cf70844a83efb30db | 683a90831bb591526c6786e5f8c4a2b34852cf99 | /HackerRank/Python/Strings/2_String_split_and_join.py | 6cfd65612058b1056b5f246af8f8a8e6cf35edc7 | [] | no_license | dbetm/cp-history | 32a3ee0b19236a759ce0a6b9ba1b72ceb56b194d | 0ceeba631525c4776c21d547e5ab101f10c4fe70 | refs/heads/main | 2023-04-29T19:36:31.180763 | 2023-04-15T18:03:19 | 2023-04-15T18:03:19 | 164,786,056 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | # !/usr/bin/env python3
# https://www.hackerrank.com/challenges/python-string-split-and-join/problem
# tags: cadenas
def split_and_join(str):
lista = str.split(" ")
str = "-".join(lista)
return str
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
| [
"davbetm@gmail.com"
] | davbetm@gmail.com |
4840c3f91c339b220ffb6aad5c14d810d3ee6ff7 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j22163-2157/sdB_galex_j22163-2157_coadd.py | e9b5611ae5b1a826b06a0b20d0720c3ed879a03f | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[334.098125,-21.957308], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_galex_j22163-2157/sdB_galex_j22163-2157_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_galex_j22163-2157/sdB_galex_j22163-2157_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
a79f048e280ae3dc1c915f26311e81ccf38b9449 | d2dda11e125068512c5c0db0f24b80bc53c94ce3 | /LeetCode/Ex100/Ex110.py | 8d9d965bb2811d6d4ec769cdb289e6704ef5dcb4 | [] | no_license | JasonVann/CrackingCodingInterview | f90163bcd37e08f6a41525f9f95663d5f42dd8e6 | 8f9327a1879949f61b462cc6c82e00e7c27b8b07 | refs/heads/master | 2021-09-02T09:28:34.553704 | 2018-01-01T12:05:12 | 2018-01-01T12:05:12 | 110,519,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py |
class Ex110(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root == None:
return True
(level, ans) = self.helper(root, 1)
return ans
def helper(self, root, level):
if root == None:
return (level-1, True)
else:
(l, lb) = self.helper(root.left, level+1)
(r, rb) = self.helper(root.right, level+1)
if lb == False or rb == False:
return (max(l, r), False)
if abs(l - r) > 1:
return (max(l, r), False)
return (max(l,r), True)
'''
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if not root:
return True
def depth(node):
if not node: #leaves
return 0
left = depth(node.left) #left child's depth
right = depth(node.right) #right child's depth
if abs(left-right)>1:
raise Exception #stop recursion and report unbalance
return max(left, right)+1
try:
return abs(depth(root.left)-depth(root.right))<=1
except:
return False
'''
'''
public boolean isBalanced(TreeNode root) {
if(root==null){
return true;
}
return height(root)!=-1;
}
public int height(TreeNode node){
if(node==null){
return 0;
}
int lH=height(node.left);
if(lH==-1){
return -1;
}
int rH=height(node.right);
if(rH==-1){
return -1;
}
if(lH-rH<-1 || lH-rH>1){
return -1;
}
return Math.max(lH,rH)+1;
}
'''
| [
"jasonvanet@gmail.com"
] | jasonvanet@gmail.com |
e235cd6eadb51407845cd19a59fb52175c2ed1f0 | 070dc1e2c5643ef9ae80b24f56cf7b6624f65818 | /video_prediction/datasets/__init__.py | e65f64ab71a1ec5a63652990d7761430f7083e2e | [
"MIT"
] | permissive | anestisdotpy/video_prediction | 9091661e9d56460e6d1dab9e1e774a8ff81241bd | e9aecb8171123c1fe673a1f16864e3554c386cc5 | refs/heads/master | 2020-06-23T02:47:24.487425 | 2019-07-23T18:02:32 | 2019-07-23T18:02:32 | 198,482,843 | 0 | 0 | MIT | 2019-07-23T17:59:23 | 2019-07-23T17:59:22 | null | UTF-8 | Python | false | false | 1,099 | py | from .base_dataset import BaseVideoDataset
from .base_dataset import VideoDataset, SequenceExampleVideoDataset, VarLenFeatureVideoDataset
from .google_robot_dataset import GoogleRobotVideoDataset
from .sv2p_dataset import SV2PVideoDataset
from .softmotion_dataset import SoftmotionVideoDataset
from .kth_dataset import KTHVideoDataset
from .ucf101_dataset import UCF101VideoDataset
from .cartgripper_dataset import CartgripperVideoDataset
def get_dataset_class(dataset):
dataset_mappings = {
'google_robot': 'GoogleRobotVideoDataset',
'sv2p': 'SV2PVideoDataset',
'softmotion': 'SoftmotionVideoDataset',
'bair': 'SoftmotionVideoDataset', # alias of softmotion
'kth': 'KTHVideoDataset',
'ucf101': 'UCF101VideoDataset',
'cartgripper': 'CartgripperVideoDataset',
}
dataset_class = dataset_mappings.get(dataset, dataset)
dataset_class = globals().get(dataset_class)
if dataset_class is None or not issubclass(dataset_class, BaseVideoDataset):
raise ValueError('Invalid dataset %s' % dataset)
return dataset_class
| [
"alexleegk@gmail.com"
] | alexleegk@gmail.com |
78221bbf47e616235915cfb1dc10c45637dc777a | bd741fe3909ae9260232b724cbb395823cf68834 | /python/setup.py | ba010859a7003260e868f6d6f749200202ce1755 | [
"MIT"
] | permissive | simondlevy/NengoCPP | a1b235dd37889c0cf8a2a2b3e6f693a27d379174 | ae09d4e96d866dd4a3748867cf8eb0df5d0acc8f | refs/heads/master | 2020-03-21T19:38:06.204688 | 2019-06-15T18:17:00 | 2019-06-15T18:17:00 | 138,961,016 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #!/usr/bin/env python
'''
Python distutils setup for for NengoCPP example
Copyright 2018 Simon D. Levy
MIT License
'''
from distutils.core import setup
setup (name = 'nengo_pidcontrol',
version = '0.1',
install_requires = ['PIL'],
description = '',
py_modules = ['nengo_pidcontrol',],
author='Simon D. Levy',
author_email='simon.d.levy@gmail.com',
license='MIT',
platforms='Linux; Windows; OS X'
)
| [
"simon.d.levy@gmail.com"
] | simon.d.levy@gmail.com |
51f2f9f4db7a0a374df9c157a737b8e90be9f0fe | 1f68b6f9f55afaa7cb32df262f4fe0864472da05 | /人工智能/人工智能入门/数据可视化/die_visual.py | 6c8f873b98271fc5d514ee8841c2c8b03fcd0baa | [] | no_license | faker-hong/testOne | 7c4496362cb5495c25c640076102fe0704f8552f | 768edc4a5526c8972fec66c6a71a38c0b24a1451 | refs/heads/master | 2022-12-04T14:47:53.614685 | 2020-10-30T03:17:50 | 2020-10-30T03:17:50 | 196,514,862 | 1 | 0 | null | 2022-11-22T02:43:32 | 2019-07-12T05:35:09 | Python | UTF-8 | Python | false | false | 498 | py | from 数据可视化.die import Die
import pygal
die = Die()
result = []
frequenices = []
for i in range(100):
re = die.roll()
result.append(re)
for value in range(1,die.num_size+1):
frequency = result.count(value)
frequenices.append(frequency)
print(frequenices)
#对结果进行可视化
hist = pygal.Bar()
hist.title='97'
hist.x_labels = [1, 2, 3, 4, 5, 6]
hist.x_title = "result"
hist.y_title = "frequency"
hist.add('D6', frequenices)
hist.render_to_file('die_visual.svg') | [
"42666723+hongcheng97@users.noreply.github.com"
] | 42666723+hongcheng97@users.noreply.github.com |
91d6b6ddd88603208daece87feef4b07ccad2846 | 1408ac8b2ed54ec7a7a8e3660bbb7389fbade037 | /Auditions/AuditionLevel/AuditionLevel.py | baf76f63f292391175f556c99955d745aa9117b9 | [] | no_license | Decennium/CodeCombat | 72a2df7288c5d5c3cb88e08425cc0e6507f7a1d5 | 068b937ae7cfc58565f2c5e1f50281c50808ccf1 | refs/heads/master | 2021-08-23T19:37:53.160014 | 2017-12-06T07:52:19 | 2017-12-06T07:52:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | # http://codecombat.com/play/level/audition-level
summonTypes = ['paladin']
def summonTroops():
type = summonTypes[len(hero.built) % len(summonTypes)]
if hero.gold > hero.costOf(type):
hero.summon(type)
def commandTroops():
for index, friend in enumerate(hero.findFriends()):
if friend.type == 'paladin':
CommandPaladin(friend)
def CommandPaladin(paladin):
if (paladin.canCast("heal")):
if (hero.health < hero.maxHealth * 0.6):
target = self
if target:
hero.command(paladin, "cast", "heal", target)
elif (paladin.health < 100):
hero.command(paladin, "shield")
else:
target = hero.findNearestEnemy()
hero.command(paladin, "attack", target)
def moveTo(position, fast=True):
if (hero.isReady("jump") and hero.distanceTo(position) > 10 and fast):
hero.jumpTo(position)
else:
hero.move(position)
def attack(target):
if target:
if (hero.distanceTo(target) > 10):
moveTo(target.pos)
elif (hero.isReady("bash")):
hero.bash(target)
elif (hero.canCast('chain-lightning', target)):
hero.cast('chain-lightning', target)
elif (hero.isReady("attack")):
hero.attack(target)
else:
pass
while True:
flag = hero.findFlag()
summonTroops()
commandTroops()
if flag:
hero.pickUpFlag(flag)
else:
enemy = hero.findNearestEnemy()
if enemy:
attack(enemy)
# find some enemy to attack
# use cleave when ready
| [
"vadim-job-hg@yandex.ru"
] | vadim-job-hg@yandex.ru |
eb9beae6dd40e83e2260ddf0a7862e298b36fe3a | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/storagesync/v20170605preview/get_sync_group.py | f3f917cbf35db452cf5c6afd92530a503ff78d3f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,972 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSyncGroupResult',
'AwaitableGetSyncGroupResult',
'get_sync_group',
]
@pulumi.output_type
class GetSyncGroupResult:
"""
Sync Group object.
"""
def __init__(__self__, id=None, name=None, sync_group_status=None, type=None, unique_id=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sync_group_status and not isinstance(sync_group_status, str):
raise TypeError("Expected argument 'sync_group_status' to be a str")
pulumi.set(__self__, "sync_group_status", sync_group_status)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
pulumi.set(__self__, "unique_id", unique_id)
@property
@pulumi.getter
def id(self) -> str:
"""
The id of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="syncGroupStatus")
def sync_group_status(self) -> str:
"""
Sync group status
"""
return pulumi.get(self, "sync_group_status")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueId")
def unique_id(self) -> Optional[str]:
"""
Unique Id
"""
return pulumi.get(self, "unique_id")
class AwaitableGetSyncGroupResult(GetSyncGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSyncGroupResult(
id=self.id,
name=self.name,
sync_group_status=self.sync_group_status,
type=self.type,
unique_id=self.unique_id)
def get_sync_group(resource_group_name: Optional[str] = None,
storage_sync_service_name: Optional[str] = None,
sync_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSyncGroupResult:
"""
Sync Group object.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param str storage_sync_service_name: Name of Storage Sync Service resource.
:param str sync_group_name: Name of Sync Group resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['storageSyncServiceName'] = storage_sync_service_name
__args__['syncGroupName'] = sync_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storagesync/v20170605preview:getSyncGroup', __args__, opts=opts, typ=GetSyncGroupResult).value
return AwaitableGetSyncGroupResult(
id=__ret__.id,
name=__ret__.name,
sync_group_status=__ret__.sync_group_status,
type=__ret__.type,
unique_id=__ret__.unique_id)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
a28abb90239b0ec78dbdbbcb02137ed32a1ca25a | 6371acdb640e62e4e6addac2ba1aa70002a8c1b1 | /Algorithms/pySINDy/env/lib/python3.6/site-packages/ipyparallel/tests/test_mongodb.py | 81b2b171b88ba68c65515ad28880228e24bdef97 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | M-Vause/SEED | 263307152ebac1e4f49cd81dcd5207ecbdf51139 | cda94a02a5ef47a1e9a885d330eef2821301ebed | refs/heads/master | 2022-12-13T20:11:58.893994 | 2020-04-27T16:10:09 | 2020-04-27T16:10:09 | 252,790,026 | 3 | 3 | MIT | 2022-12-08T01:52:05 | 2020-04-03T16:55:10 | Jupyter Notebook | UTF-8 | Python | false | false | 1,395 | py | """Tests for mongodb backend"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from unittest import TestCase
import pytest
from . import test_db
c = None
@pytest.fixture(scope='module')
def mongo_conn(request):
global c
try:
from pymongo import MongoClient
except ImportError:
pytest.skip("Requires mongodb")
conn_kwargs = {}
if 'DB_IP' in os.environ:
conn_kwargs['host'] = os.environ['DB_IP']
if 'DBA_MONGODB_ADMIN_URI' in os.environ:
# On ShiningPanda, we need a username and password to connect. They are
# passed in a mongodb:// URI.
conn_kwargs['host'] = os.environ['DBA_MONGODB_ADMIN_URI']
if 'DB_PORT' in os.environ:
conn_kwargs['port'] = int(os.environ['DB_PORT'])
try:
c = MongoClient(**conn_kwargs)
except Exception:
c = None
if c is not None:
request.addfinalizer(lambda : c.drop_database('iptestdb'))
return c
@pytest.mark.usefixture('mongo_conn')
class TestMongoBackend(test_db.TaskDBTest, TestCase):
"""MongoDB backend tests"""
def create_db(self):
try:
from ipyparallel.controller.mongodb import MongoDB
return MongoDB(database='iptestdb', _connection=c)
except Exception:
pytest.skip("Couldn't connect to mongodb")
| [
"58262117+M-Vause@users.noreply.github.com"
] | 58262117+M-Vause@users.noreply.github.com |
a4609ee7d40e60103c1c6d4a1f9cb1b4d861a532 | 45ba55b4fbdaf1657fde92beaeba4f173265afcd | /strawberry/fastapi/context.py | 6a23a0e648a9cb7d607eb89ef9083ac1c2b8e39a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | strawberry-graphql/strawberry | af96afd4edd1788c59e150597a12501fbc7bf444 | 6d86d1c08c1244e00535840d9d87925431bc6a1c | refs/heads/main | 2023-08-30T03:34:12.929874 | 2023-08-24T12:01:09 | 2023-08-24T12:01:09 | 162,690,887 | 3,408 | 529 | MIT | 2023-09-14T21:49:44 | 2018-12-21T08:56:55 | Python | UTF-8 | Python | false | false | 662 | py | from typing import Any, Dict, Optional, Union
from starlette.background import BackgroundTasks
from starlette.requests import Request
from starlette.responses import Response
from starlette.websockets import WebSocket
CustomContext = Union["BaseContext", Dict[str, Any]]
MergedContext = Union[
"BaseContext", Dict[str, Union[Any, BackgroundTasks, Request, Response, WebSocket]]
]
class BaseContext:
connection_params: Optional[Any] = None
def __init__(self) -> None:
self.request: Optional[Union[Request, WebSocket]] = None
self.background_tasks: Optional[BackgroundTasks] = None
self.response: Optional[Response] = None
| [
"noreply@github.com"
] | strawberry-graphql.noreply@github.com |
d40d835030a9e00608bc1155ed3b3db4ca2779e6 | 033da72a51c76e5510a06be93229a547a538cf28 | /Data Engineer with Python Track/22. Transactions and Error Handling in SQL Server/Chapter/03. Transactions in SQL Server/10-Doomed transactions.py | 29523ae21c339f0d22cbec5dee2878d9afa29992 | [] | no_license | ikhwan1366/Datacamp | d5dcd40c1bfeb04248977014260936b1fb1d3065 | 7738614eaebec446842d89177ae2bc30ab0f2551 | refs/heads/master | 2023-03-06T13:41:06.522721 | 2021-02-17T22:41:54 | 2021-02-17T22:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | '''
Doomed transactions
You want to insert the data of two new customers into the customer table. You prepare a script controlling that if an error occurs, the transaction rollbacks and you get the message of the error. You want to control it using XACT_ABORT in combination with XACT_STATE.
Instructions
100 XP
- Use the appropriate setting of XACT_ABORT.
- Check if there is an open transaction.
- Rollback the transaction.
- Select the error message.
'''
-- Use the appropriate setting
SET XACT_ABORT OFF;
BEGIN TRY
BEGIN TRAN;
INSERT INTO customers VALUES('Mark', 'Davis', 'markdavis@mail.com', '555909090');
INSERT INTO customers VALUES('Dylan', 'Smith', 'dylansmith@mail.com', '555888999');
COMMIT TRAN;
END TRY
BEGIN CATCH
-- Check if there is an open transaction
IF XACT_STATE() <> 0
-- Rollback the transaction
ROLLBACK TRAN;
-- Select the message of the error
SELECT ERROR_MESSAGE() AS Error_message;
END CATCH
| [
"surel.chandrapratama@gmail.com"
] | surel.chandrapratama@gmail.com |
cb33f242a73fc2923fcc835d8c1946a430e07cbc | 2136dd727f15133b2ee000dadcfa44b7e29d3ff4 | /Day5/dengluTest.py | 0572c98a88c15ab45b23d6226e2407d6d0ab4da9 | [] | no_license | zhaolanxiang113/Weekend112 | b6f56627a17d279db19ed6b409333d8c14c6bb67 | 74f3b54b246ff43a773c8b832fd9ab3af38fd509 | refs/heads/master | 2021-08-23T04:13:52.933570 | 2017-12-03T07:11:49 | 2017-12-03T07:11:49 | 112,907,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | import unittest
from selenium import webdriver
import time
class DengLuTest(unittest.TestCase):
# 3个双引号表示文档字符串,也是一种注释,会显示的文档中;
"""登录模块测试用例"""
def setUp(self):
# 打开浏览器
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
# 浏览器升级后,要注销最大化;要想使用必须满足浏览器的版本和driver的版本必须匹配
self.driver.maximize_window()
def tearDown(self):
time.sleep(5)
self.driver.quit()
def test_denglu(self):
"""登录测试正常测试用例"""
driver = self.driver
driver.get("http://localhost/index.php?m=user&c=public&a=login")
driver.find_element_by_id("username").send_keys("testing")
driver.find_element_by_id("password").send_keys("testing123")
driver.find_element_by_class_name("login_btn").click()
print("当前用户名:testing")
| [
"51Testing"
] | 51Testing |
e5be76fbbcdcd5660d468cf6ecba45afdcd662ab | 750d8ade6abc2b3bd6a24e660a4992114db6ac0c | /lib/music/gui/window.py | de2e80e9ea32e5895ffd45bdfb2ec12e9216241b | [] | no_license | dskrypa/music_manager | 8a00a4bd7b32a87dab2441614c94346fa87c4f13 | ad7265fbd203962a4bf9cf6444c8e10d561a307c | refs/heads/main | 2023-08-09T06:26:46.592118 | 2023-08-08T11:38:08 | 2023-08-08T11:38:08 | 234,730,172 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,065 | py | """
Extended Window class from PySimpleGUI
:author: Doug Skrypa
"""
import logging
import signal
from typing import Callable
from weakref import WeakSet
from PySimpleGUI import Window as _Window
from .utils import FinishInitMixin
__all__ = ['Window']
log = logging.getLogger(__name__)
class Window(_Window):
__registered_sigint_handler = None
__instances = WeakSet()
def __init__(self, *args, finalize_callback: Callable = None, **kwargs):
if self.__registered_sigint_handler is None:
self.register_sigint_handler()
self._finalize_callback = finalize_callback
super().__init__(*args, **kwargs)
self.__instances.add(self)
def _sigint_fix(self):
"""Continuously re-registers itself to be called every 250ms so that Ctrl+C is able to exit tk's mainloop"""
self.TKroot.after(250, self._sigint_fix)
def finalize(self):
super().finalize()
FinishInitMixin.finish_init_all()
self.TKroot.after(250, self._sigint_fix)
if (callback := self._finalize_callback) is not None:
callback()
return self
Finalize = finalize
@classmethod
def unregister_sigint_handler(cls):
if cls.__registered_sigint_handler:
signal.signal(signal.SIGINT, signal.SIG_DFL)
cls.__registered_sigint_handler = False
@classmethod
def register_sigint_handler(cls):
log.debug('Registering Window._handle_sigint to handle SIGINT')
signal.signal(signal.SIGINT, Window._handle_sigint)
cls.__registered_sigint_handler = True
@classmethod
def _handle_sigint(cls, *args):
"""
With just the _sigint_fix loop, the tkinter stdlib python code ignores SIGINT - this is required to actually
handle it immediately.
"""
for inst in cls.__instances:
try:
inst.write_event_value(None, None)
except AttributeError:
pass
def is_maximized(self) -> bool:
return self.TKroot.state() == 'zoomed'
| [
"dskrypa@gmail.com"
] | dskrypa@gmail.com |
ab8fca3ba9b9481bf76c0f4f3e210cfeb9dd2c62 | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/slb/slb_health_stat_oper.py | d25b96bf06dca0d98f8468c55a6dbd2e3769db38 | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 3,562 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class HealthCheckList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param status: {"type": "string", "format": "string"}
:param retries: {"type": "number", "format": "number"}
:param down_state: {"type": "number", "format": "number"}
:param up_retries: {"type": "number", "format": "number"}
:param down_cause: {"type": "number", "format": "number"}
:param partition_id: {"type": "number", "format": "number"}
:param up_cause: {"type": "number", "format": "number"}
:param ip_address: {"type": "string", "format": "string"}
:param total_retry: {"type": "number", "format": "number"}
:param health_monitor: {"type": "string", "format": "string"}
:param port: {"type": "string", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "health-check-list"
self.DeviceProxy = ""
self.status = ""
self.retries = ""
self.down_state = ""
self.up_retries = ""
self.down_cause = ""
self.partition_id = ""
self.up_cause = ""
self.ip_address = ""
self.total_retry = ""
self.health_monitor = ""
self.port = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param health_check_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"status": {"type": "string", "format": "string"}, "retries": {"type": "number", "format": "number"}, "down-state": {"type": "number", "format": "number"}, "up-retries": {"type": "number", "format": "number"}, "down-cause": {"type": "number", "format": "number"}, "partition-id": {"type": "number", "format": "number"}, "up-cause": {"type": "number", "format": "number"}, "ip-address": {"type": "string", "format": "string"}, "total-retry": {"type": "number", "format": "number"}, "health-monitor": {"type": "string", "format": "string"}, "optional": true, "port": {"type": "string", "format": "string"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.health_check_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class HealthStat(A10BaseClass):
"""Class Description::
Operational Status for the object health-stat.
Class health-stat supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/health-stat/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "health-stat"
self.a10_url="/axapi/v3/slb/health-stat/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
36121fdf1958177b680c4adf4f0ba290b78f8c7d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1102.py | f45a308ef04109b1a46b0769d4274aa0069c4829 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | import numpy as np
import sys
#dat=open("small.in").readlines()
dat=open("A-small-attempt1.in").readlines()
ntests=int(dat[0])
i=1
for j in xrange(ntests):
ans0=int(dat[i].strip('\n'))
i=i+1
grid0=np.array([[int(y) for y in x.strip('\n').split(' ')] for x in dat[i:i+4]])
i=i+4
ans1=int(dat[i].strip('\n'))
i=i+1
grid1=np.array([[int(y) for y in x.strip('\n').split(' ')] for x in dat[i:i+4]])
i=i+4
#Possibilites after first
first_row=grid0[ans0-1,]
#Possibilities after second
second_row=grid1[ans1-1,]
#Which leaves...
both=np.intersect1d(first_row,second_row)
#print first_row,second_row,both
if len(both)==1:
print "Case #%d: %d"%(j+1,both[0])
elif len(both)==0:
print "Case #%d: Volunteer cheated!"%(j+1)
else:
print "Case #%d: Bad magician!"%(j+1)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
b0cf7d3b3f1d9800be47e1905e36043ddd16474c | febcab8c3bbcccfcaa18d2168353a7897a35fe80 | /bliski_publikator/institutions/migrations/0006_institution_monitorings.py | 909ae2d96f0d4b90900aa9f003ddf6033efe5b69 | [
"MIT",
"BSD-3-Clause"
] | permissive | watchdogpolska/bliski_publikator | 256d6e4e24bb7ea9821c409b491dd24e70755082 | f67ec8edf16f803ceef1a1d1f5a2b4699117895c | refs/heads/master | 2020-12-25T17:00:27.555894 | 2018-08-13T20:02:51 | 2018-08-13T20:02:51 | 55,450,877 | 1 | 3 | MIT | 2018-08-13T20:02:52 | 2016-04-04T22:46:57 | Python | UTF-8 | Python | false | false | 632 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-09 00:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitorings', '0008_auto_20160509_0031'),
('institutions', '0005_remove_institution_monitorings'),
]
operations = [
migrations.AddField(
model_name='institution',
name='monitorings',
field=models.ManyToManyField(blank=True, through='monitorings.MonitoringInstitution', to='monitorings.Monitoring', verbose_name='Monitorings'),
),
]
| [
"naczelnik@jawnosc.tk"
] | naczelnik@jawnosc.tk |
0b9567943df58092230d0f1d716d86717c9d7c22 | aef9d6b8bb21957fa8b2235872bca51f64e7b5ff | /django101/todoapp/migrations/0001_initial.py | 0b50bc857d2c64737fe726df40e616c51bf93192 | [] | no_license | dreadlordow/Softuni-Python-Web | 3cf9cc234960bb47f1c3c2a91a1a80d0fc499fd6 | 784faccbe15023536917d610384222d839a63bae | refs/heads/master | 2023-08-28T19:39:57.149514 | 2021-02-23T16:28:55 | 2021-02-23T16:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | # Generated by Django 3.1.5 on 2021-01-16 22:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('description', models.TextField()),
('is_done', models.BooleanField()),
],
),
]
| [
"georgipavlov1913@gmail.com"
] | georgipavlov1913@gmail.com |
617d539ddc913989973f9e820c92c115d270af23 | a9e15e6bdaa45c9fdd667f5f58a537a44d5bd67c | /diy/4.py | 53349c084629084a5da41dc72bd7751b99e6f184 | [] | no_license | tawateer/leetcode | 782f4f8b4a0730ec990d3c5d0e3d80cc4792f0b3 | 0ec784dc1da2577b823977fd858f4d55a059f327 | refs/heads/master | 2020-04-22T03:31:02.668858 | 2019-10-25T09:20:31 | 2019-10-25T09:20:31 | 170,089,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | #!/bin/env python
# -*- coding:utf-8 -*-
"""
在 1 到 n 的数字中,有且只有唯一的一个数字 m 重复出现了,其它的数字都只出现一次。请把这个数字找出来、
"""
class Solution(object):
def findDuplicate(self, nums):
target = 0
for i in nums:
target ^= i
for i in range(1, len(nums)):
target ^= i
return target
s = Solution()
print s.findDuplicate([1, 2, 3, 4, 4])
print s.findDuplicate([1, 2, 2, 3, 4])
| [
"liningning@wandoujia.com"
] | liningning@wandoujia.com |
05449101e333cfaae259ede36d115e888e4a2ab9 | f20e3f75644ce8eb718c22ac9800e41aa4da90dc | /round C/wall.py | e3c88e8292dadbaeef7263246467efc199a96305 | [] | no_license | rocket3989/KickStart2020 | 7de504d88c00c86e1d394a797f01d2682826e4bf | 513c4e6305b31e16350715840e35903b3fcdd89a | refs/heads/master | 2021-04-13T02:38:44.703426 | 2020-09-28T21:02:25 | 2020-09-28T21:02:25 | 249,129,624 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | from collections import defaultdict
for tc in range(int(input())):
R, C = [int(x) for x in input().split()]
wall = []
for i in range(R):
wall.append(input().strip())
above = defaultdict(set)
below = defaultdict(set)
for r in range(R):
for c in range(C):
el = wall[r][c]
if r > 0:
if wall[r - 1][c] != el:
above[el].add(wall[r - 1][c])
if r < R - 1:
if wall[r + 1][c] != el:
below[el].add(wall[r + 1][c])
candidates = set(wall[-1])
order = []
placed = set()
while candidates:
for candidate in list(candidates):
for other in below[candidate]:
if other not in placed:
candidates.remove(candidate)
break
else:
order.append(candidate)
candidates.remove(candidate)
placed.add(candidate)
for val in above[candidate]:
candidates.add(val)
if len(order) == len(below):
print("Case #{}: {}".format(tc + 1, ''.join(order)))
else:
print("Case #{}: {}".format(tc + 1, -1))
| [
"rocket3989@gmail.com"
] | rocket3989@gmail.com |
91c9505ee1c1e349d16a2b7cac17bfa2367e0f21 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /mediastore-data_write_1/object_delete.py | dd3266bb35468f0cb8fd5427b86b4d182020811a | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/mediastore-data/delete-object.html
if __name__ == '__main__':
"""
describe-object : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/mediastore-data/describe-object.html
get-object : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/mediastore-data/get-object.html
put-object : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/mediastore-data/put-object.html
"""
parameter_display_string = """
# path : The path (including the file name) where the object is stored in the container. Format: <folder name>/<folder name>/<file name>
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("mediastore-data", "delete-object", "path", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
2832aa7a7a0496dee9f4feb0edf79292b112e86f | 69a43560780342b466360687099eb593de9fec93 | /test.py | 830e69dedc40bbdfdb7df194d4a0d9b276daa0ba | [] | no_license | JamCrumpet/email_generator | e35f7f99654a189f0a6aff832ca6207df13b6a4e | bdb96cd4b02069bce5e1d6d2e70e0233ec7cd71c | refs/heads/master | 2023-01-10T02:06:44.579407 | 2020-11-10T13:38:55 | 2020-11-10T13:38:55 | 287,095,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,849 | py | #filename = "female_first_names.txt"
#with open(filename) as file_object:
# lines = file_object.readlines()
#for line in lines:
# print(line.split())
#with open(filename,"r") as file_object:
# """ Removes numbers and percentage symbol from text file """
# contents = file_object.read()
# print(contents.replace(str(9),"").replace(str(8), "").replace(str(7),"").replace(str(6), "").replace(str(5), "")\
# .replace(str(4), "").replace(str(3), "").replace(str(2), "").replace(str(1), "").replace(str(0), "")\
# .replace("%", "").lstrip("\t").replace(" ", ""))
#class Female_email():
# """An email address with a female name"""
# def __init__(self,female_first_name,last_name,domain):
# self.female_first_name = rd_female_first_name
# self.last_name = rd_last_name
# self.domain = rd_domain
#self.rd1 = female_first_name + last_name + "@" + domain
#self.rd2 = female_first_name + "." + last_name + "@" + domain
# def random_femail_email(self):
# print(random.choice(self.rd1, self.rd2))
#print("Randomly generated email:")
#Female_email.random_femail_email()
#class Male_email():
# """An email address with a male name"""
# def __init___(self,male_first_name,last_name,domain):
# self.male_first_name = male_first_name
# self.last_name = last_name
# self.domain = domain
#print(rd_male_first_name + rd_last_name + "@" +rd_domain)
import pandas as pd
import random
# read CSV files and saves as dataframes
df_domains = pd.read_csv("domains.csv")
df_female_first_name = pd.read_csv("female_first_names.csv")
df_last_names = pd.read_csv("last_names.csv")
df_male_first_name = pd.read_csv("male_first_names.csv")
# extract necessary columns
column_domains = df_domains["domain"]
column_female_first_name = df_female_first_name["name"]
column_last_name = df_last_names["lastname"]
column_male_first_name = df_male_first_name["name"]
# pick random values from column
rd_domain = random.choice(column_domains)
rd_female_first_name = random.choice(column_female_first_name)
rd_last_name = random.choice(column_last_name)
rd_male_first_name = random.choice(column_male_first_name)
symbols = ["-", "_", "."]
# Random emails with female first name
rd_fe1 = rd_female_first_name + rd_last_name + "@" + rd_domain
rd_fe2 = rd_female_first_name + str(random.randrange(81,99)) + "@" + rd_domain
rd_fe3 = rd_female_first_name + random.choice(symbols) + rd_last_name + "@" + rd_domain
rd_fe = rd_fe1, rd_fe2, rd_fe3
# Random email with male first name
rd_me1 = rd_male_first_name + rd_last_name + "@" + rd_domain
rd_me2 = rd_male_first_name + str(random.randrange(81,99)) + "@" + rd_domain
rd_me3 = rd_male_first_name + random.choice(symbols) + rd_last_name + "@" + rd_domain
rd_me = rd_me1, rd_me2, rd_me3
print('''
Email Generator
==================
''')
length = input('email total?')
length = int(length)
print('\nhere are your passwords:')
for email in str(1):
email_address = ''
for c in range(length):
email_address += random.choice(rd_fe) + "\n"
print(email_address + "\n")
############
def femail_email_genrator():
prompt = "Type y to generate an email / type quit to cancel code"
message = ""
while message != "quit":
message = input(prompt)
if message != "quit":
for email in rd_fe:
print(random.choice(rd_fe))
femail_email_genrator()
#########
# Random email with male first name
rd_me1 = rd_male_first_name + rd_last_name + "@" + rd_domain
rd_me2 = rd_male_first_name + str(random.randrange(81,99)) + "@" + rd_domain
rd_me3 = rd_male_first_name + random.choice(symbols) + rd_last_name + "@" + rd_domain
rd_me = rd_me1, rd_me2, rd_me3 | [
"noreply@github.com"
] | JamCrumpet.noreply@github.com |
c14e4ab6c989ae7577fcc2e88f867fe373ad43d9 | 0528a8b2cbdcb3f64ce8183aa04fe8a515f5801a | /libcms/settings.py | 5afb31eeba5e5724f544de83f18866ed7edc06ea | [] | no_license | wd5/talk.arbicon | 52c17578eaeaaeab0ae620cf101b2b07eb63b9d9 | 12199997447c051612c84dcb2febcde2386fcbb6 | refs/heads/master | 2021-01-17T23:37:22.793853 | 2012-06-04T06:54:56 | 2012-06-04T06:54:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,786 | py | # -*- coding: utf-8 -*-
import os
import sys
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__)) + '/'
sys.path.insert(0, os.path.join(PROJECT_PATH, "apps"))
sys.path.insert(0, os.path.join(PROJECT_PATH, "vendors"))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Moscow'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'ru-RU'
gettext = lambda s: s
LANGUAGES = (
('ru', gettext('Russian')),
('en', gettext('English')),
# ('tt', _('Tatar')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# )),
# 'django.template.loaders.eggs.Loader',
)
#TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )),
# 'django.template.loaders.eggs.Loader',
# )
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
#'django.contrib.messages.context_processors.messages',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'libcms.urls'
AUTHENTICATION_BACKENDS = (
'arbicon.auth.ldap_auth_backend.LdapBackend',
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.admindocs',
# vendor apps
'mptt',
'guardian',
'debug_toolbar',
# cms apps
'core',
'index',
'accounts',
'pages',
'forum',
'arbicon',
'arbicon_polls',
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# guardian settings
ANONYMOUS_USER_ID = -1
LOGIN_REDIRECT_URL = "/"
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
#LOCALE_INDEPENDENT_PATHS = (
# r'^/$',
#)
from local_settings import * | [
"dostovalov@gmail.com"
] | dostovalov@gmail.com |
d3d91f91295649651652951a0785ed15a2a47eb0 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /kiX7WjSFeTmBYcEgK_1.py | 44b53df8479d2eca5cb4445ea4294227621ff84c | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | """
Create a function that takes an integer list and return the biggest between
**positive** sum, **negative** sum, or **0s** count. The major is understood
as the greatest absolute.
`l = [1,2,3,4,0,0,-3,-2]`, the function has to return `10`, because:
* Positive sum = 1+2+3+4 = 10
* Negative sum = (-3)+(-2) = -5
* 0s count = 2 (there are two zeros in list)
### Examples
major_sum([1, 2, 3, 4, 0, 0, -3, -2]) ➞ 10
major_sum([-4, -8, -12, -3, 4, 7, 1, 3, 0, 0, 0, 0]) ➞ -27
major_sum([0, 0, 0, 0, 0, 1, 2, -3]) ➞ 5
# Because -3 < 1+2 < 0sCount = 5
### Notes
* All numbers are integers.
* There aren't empty lists.
* All tests are made to return only one value.
"""
def major_sum(lst):
pos = sum([i for i in lst if i > 0])
neg = sum([i for i in lst if i < 0])
zero = lst.count(0)
if abs(neg) > pos and abs(neg) > zero :
return neg
else:
return max(pos, zero)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
1e6a6bbec7aebbcf5b92eda2fb644ada06f33d6e | 9ee3ef54384da3e08ad367dfc2a350d487caf0ee | /home/migrations/0003_auto_20210101_1212.py | 27a829564276bb52f40673aa392b6a1d46f6915d | [] | no_license | crowdbotics-apps/test-app-for-segmen-17778 | c9cc0d1d5baffeb456e96a76c6ffdcfe642efeb8 | 9b012d3e3c018871b1a86fc4935bace98b962c79 | refs/heads/master | 2023-02-08T13:06:52.480483 | 2021-01-04T13:32:52 | 2021-01-04T13:32:52 | 325,950,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | # Generated by Django 2.2.17 on 2021-01-01 12:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0002_load_initial_data"),
]
operations = [
migrations.AddField(
model_name="homepage",
name="model_field_with_long_text_to_display_username",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="homepage",
name="model_field_with_long_text_to_display_username_email",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="homepage",
name="short_name",
field=models.CharField(blank=True, max_length=256, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
3309039c811223dff99454e678e10f02a554f17c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/159/64269/submittedfiles/testes.py | 2ada4d068b2b66630f6bf6f34567c3d2085f78ca | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | # -*- coding: utf-8 -*-
import numpy as np
'''
'''
#Essa função tem como parametros os graus, minutos e segundos de um ângulo sexasimal e retorna o ângulo em decimais
def angulo(g,m,s):
angulom=m/60
angulos=s/3600
soma=g+angulom+angulos
return soma
'''
Essa função transforma ângulos decimais, contidos em uma lista, em ângulos sexasimais, agrupando-os em uma matriz de 3 colunas,
que são respectivamente os graus, minutos e segundos de cada ângulo. Com n ângulos distribuídos pelas linhas
'''
def graus (azi):
a=np.zeros((len(azi),3))
for i in range (0,len(azi),1):
graus=int(azi[i])
b=azi[i]-graus
c=b*60
minutos=int(c)
j=c-minutos
segundos=j*60
a[i,0]=graus
a[i,1]=minutos
a[i,2]=segundos
return a
#Essa função calcula o azimute de todos os pontos de acordo com a fórmula Azi=Azi-1+Dei
def azimute (angulo,dei):
azi=[]
for i in range (0,len(dei),1):
azimute=angulo+dei[i]
azi.append(azimute)
angulo=azimute
return (azi)
g=int(input('Graus do primeiro azimute:'))
m=int(input('Minutos do primeiro azimute:'))
s=int(input('Segundos do primeiro azimute:'))
n=int(input('Número de deflexões:'))
#Criou-se uma lista para armazenar as deflexões.
dei=[]
for i in range (0,n,1):
deflexao=float(input('Deflexão:'))
dei.append(deflexao)
#Depois de receber as entradas, transformamos os dados do primeiro azimute para decimal.
primeiroazi=(angulo(g,m,s))
#Aplicando na função o primeiro azimute e a lista de deflexões obtemos uma lista com os azimutes (em decimais) em todos os pontos
azimutes=azimute(primeiroazi,dei)
#Aplicar a lista com os valores do azimute (em decimais) e transformar em uma matiz com 3 colunas que são graus, minutos e segundos, respectivamente
print(graus(azimutes))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
2258e8bf2faf8c3ae5d63dac8669d6dd32476e7a | 9c19350ad4ab5e41d9bc6c627d623f0edd8325c5 | /aleph/tests/test_collections_api.py | f734c01585f2ae4014ad72f125f7748b1f3b18d9 | [
"MIT"
] | permissive | gavinrozzi/aleph | f515a70fa87d1f19fe7288a5ac0b398a71dabfd5 | a8e3d10ec34b0d0a05b4daf3fdd2d09b96928b35 | refs/heads/master | 2020-03-21T00:56:14.108536 | 2018-08-17T13:10:12 | 2018-08-17T13:10:12 | 137,916,214 | 0 | 0 | MIT | 2018-08-17T13:10:13 | 2018-06-19T16:15:29 | Python | UTF-8 | Python | false | false | 4,174 | py | import json
from aleph.core import db
from aleph.model import Entity
from aleph.tests.util import TestCase
class CollectionsApiTestCase(TestCase):
def setUp(self):
super(CollectionsApiTestCase, self).setUp()
self.rolex = self.create_user(foreign_id='user_3')
self.col = self.create_collection(
label='Test Collection',
foreign_id='test_coll_entities_api',
category='leak',
countries=[]
)
self.ent = Entity.create({
'schema': 'Person',
'name': 'Winnie the Pooh',
}, self.col)
db.session.add(self.ent)
db.session.commit()
def test_index(self):
res = self.client.get('/api/2/collections')
assert res.status_code == 200, res
assert res.json['total'] == 0, res.json
_, headers = self.login(is_admin=True)
res = self.client.get('/api/2/collections',
headers=headers)
assert res.status_code == 200, res
assert res.json['total'] == 1, res.json
def test_view(self):
res = self.client.get('/api/2/collections/%s' % self.col.id)
assert res.status_code == 403, res
_, headers = self.login(is_admin=True)
res = self.client.get('/api/2/collections/%s' % self.col.id,
headers=headers)
assert res.status_code == 200, res
assert 'test_coll' in res.json['foreign_id'], res.json
assert 'Winnie' not in res.json['label'], res.json
def test_sitemap(self):
self.update_index()
url = '/api/2/collections/%s/sitemap.xml' % self.col.id
res = self.client.get(url)
assert res.status_code == 403, res
self.grant_publish(self.col)
res = self.client.get(url)
assert res.status_code == 200, res
data = res.data.decode('utf-8')
assert self.ent.id in data, data
def test_rdf(self):
url = '/api/2/collections/%s/rdf' % self.col.id
res = self.client.get(url)
assert res.status_code == 403, res
self.grant_publish(self.col)
res = self.client.get(url)
assert res.status_code == 200, res
def test_update_valid(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url,
headers=headers)
assert res.status_code == 200, res
data = res.json
data['label'] = 'Collected Collection'
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 200, res.json
assert 'Collected' in res.json['label'], res.json
def test_update_no_label(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url, headers=headers)
data = res.json
data['label'] = ''
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 400, res.json
res = self.client.get(url, headers=headers)
data = res.json
data['category'] = 'banana'
res = self.client.post(url,
data=json.dumps(data),
headers=headers,
content_type='application/json')
assert res.status_code == 400, res.json
def test_delete(self):
_, headers = self.login(is_admin=True)
url = '/api/2/collections/%s' % self.col.id
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
res = self.client.delete(url,
headers=headers)
assert res.status_code == 204, res
res = self.client.get(url,
headers=headers)
assert res.status_code == 404, res
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
af5920639862893c22731a5bd2fc1174a0685376 | 41b77a1a17244a727aa6acd95e91e8c674986049 | /leagueOfDrivers_BE/wx_league/migrations/0025_auto_20180812_0856.py | 11875c3038b5b09e6392b1b9a149f0bbd957fca7 | [] | no_license | DataSecretbase/Renjiu | 24424ca1742a3987a395bc5da54afa1e7f34fc84 | aa90d58d92d0c1936b0ee23e4f9c970135b480d5 | refs/heads/master | 2022-12-08T05:28:09.192623 | 2019-03-10T09:44:19 | 2019-03-10T09:44:19 | 140,274,553 | 1 | 2 | null | 2022-12-08T02:21:56 | 2018-07-09T11:15:07 | Python | UTF-8 | Python | false | false | 464 | py | # Generated by Django 2.1 on 2018-08-12 08:56
from django.db import migrations, models
import wx_league.models
class Migration(migrations.Migration):
dependencies = [
('wx_league', '0024_auto_20180812_0845'),
]
operations = [
migrations.AlterField(
model_name='icon',
name='display_pic',
field=models.ImageField(upload_to=wx_league.models.filepath, verbose_name='icon 对应'),
),
]
| [
"2144799613@qq.com"
] | 2144799613@qq.com |
39b5f1e225edc865f4828bc8ca4734edd479d6ce | 92f21431bb65074757b76ec41b2f5fa4b445c566 | /estomagordo-python3/day_6a.py | 3f718401f3d9615e5aaea6aabe160a5921626ca3 | [
"Apache-2.0"
] | permissive | kodsnack/advent_of_code_2019 | f45c6235ef7ddf8ee177be3069eddfb18b64d05c | b5478e4ce4a7cb223bbb61a8f7322f6e0f68684e | refs/heads/master | 2023-03-08T18:02:11.962777 | 2023-03-01T11:20:14 | 2023-03-01T11:20:14 | 223,724,445 | 9 | 53 | Apache-2.0 | 2023-03-01T11:20:15 | 2019-11-24T10:08:42 | Python | UTF-8 | Python | false | false | 624 | py | import helpers
import re
from heapq import heappop, heappush
from collections import Counter, defaultdict
def solve(d):
graph = {}
for line in d:
a, b = line.split(')')
graph[b] = a
if a not in graph:
graph[a] = 'STOP'
count = 0
for node in graph.keys():
while node in graph and graph[node] in graph:
count += 1
node = graph[node]
return count
def read_and_solve():
with open('input_6.txt') as f:
data = [line.rstrip() for line in f]
return solve(data)
if __name__ == '__main__':
print(read_and_solve()) | [
"christofer.ohlsson@gmail.com"
] | christofer.ohlsson@gmail.com |
59a171100a3c7d5531c4504f41806319dd918ba9 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2370/60605/242607.py | ce8e14d4dde4ff225770cb30a7777d60cedc5d54 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | n = int(input())
t = n
res = ""
while t != 0:
remain = t % -2
t //= -2
if remain < 0:
t += 1
remain += 2
res = res + str(remain)
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
1c415503eb40a3d71e84667fef1513d4ecb304cb | 1ddd9929238af090fd15acb08c3a9a034039bf7c | /resolucao_python/unidade_1/script_3.py | e3a0349b9f4fa9f316310e6c25eff4d29c562754 | [] | no_license | frclasso/estatistica_geral_puc_minas_2020 | 7e2e12958411416c8f912e7d349b908d18dbe2a9 | 9d98d1ec8e645e882885833709b610065b2d4fc6 | refs/heads/main | 2023-02-23T23:06:44.785060 | 2021-01-31T21:36:25 | 2021-01-31T21:36:25 | 325,156,858 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | from typing import List
import statistics
import math
class CaculaQuartoQuartil:
"""
PROBLEMA 2
Um pesquisador está interessado em avaliar o tempos (em segundos) que os
consumidores demoram entre o início e a finalização de uma compra em um
determinado site na internet. Para isso, observou 12 consumidores escolhidos
aleatoriamente no sistema. Os dados encontram-se abaixo:
71, 73, 73, 74, 74, 75
76, 77, 77, 79, 81, 83
"""
def __init__(self, dados: List) -> None:
self.dados = dados
def calcula_percentil(self) -> int:
"""Quarto quartil corresponde ao quarto decil, 40/100
Valor decimal deve ser arredondado para cima."""
percentil = round((40/100) * len(self.dados), 1)
if isinstance(percentil, float):
return math.ceil(percentil) # arredondando pra cima (4.8)
else:
return percentil
def calcula_posicao(self, value):
"""Retorna posição a ser avaliada, caso o resultado seja um número inteiro
fazer a média entre o valor obtido e o valor da próxima posição."""
posicao = self.dados[value - 1] # Python inicia a contagem por 0 (zero)
return posicao
if __name__ == "__main__":
tempo_compra = [71, 73, 73, 74, 74, 75, 76, 77, 77, 79, 81, 83]
c = CaculaQuartoQuartil(tempo_compra)
perc = c.calcula_percentil()
print(f"Percentil [Posição] {perc}")
print(f"Valor correspondente a Posição: {c.calcula_posicao(perc)}")
print(f"Atualmente 40% dos demoram até {c.calcula_posicao(perc)} segundos"
f" entre o início e a finalização da compra")
| [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
c326d3f5a52588f010147b3d3cb5aa5fd79be81e | 0a33cc0ebb67c51cc38750f0f04c3e6c088e3b1a | /tests/components/rfxtrx/test_device_trigger.py | 8e5ee27504ba419c811be9e585c59811da12a6e7 | [
"Apache-2.0"
] | permissive | robert-alfaro/home-assistant | e9bb08ad22a167ed226fb3de8f5b36acfc393548 | 4a53121b58b77a318f08c64ad2c5372a16b800e0 | refs/heads/dev | 2023-02-28T06:46:23.217246 | 2022-04-26T17:30:08 | 2022-04-26T17:30:08 | 115,894,662 | 4 | 0 | Apache-2.0 | 2023-02-22T06:21:08 | 2018-01-01T02:00:35 | Python | UTF-8 | Python | false | false | 5,694 | py | """The tests for RFXCOM RFXtrx device triggers."""
from __future__ import annotations
from typing import Any, NamedTuple
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.rfxtrx import DOMAIN
from homeassistant.helpers.device_registry import DeviceRegistry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
)
from tests.components.rfxtrx.conftest import create_rfx_test_cfg
class EventTestData(NamedTuple):
"""Test data linked to a device."""
code: str
device_identifiers: set[tuple[str, str, str, str]]
type: str
subtype: str
DEVICE_LIGHTING_1 = {("rfxtrx", "10", "0", "E5")}
EVENT_LIGHTING_1 = EventTestData("0710002a45050170", DEVICE_LIGHTING_1, "command", "On")
DEVICE_ROLLERTROL_1 = {("rfxtrx", "19", "0", "009ba8:1")}
EVENT_ROLLERTROL_1 = EventTestData(
"09190000009ba8010100", DEVICE_ROLLERTROL_1, "command", "Down"
)
DEVICE_FIREALARM_1 = {("rfxtrx", "20", "3", "a10900:32")}
EVENT_FIREALARM_1 = EventTestData(
"08200300a109000670", DEVICE_FIREALARM_1, "status", "Panic"
)
@pytest.fixture(name="device_reg")
def device_reg_fixture(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
async def setup_entry(hass, devices):
"""Construct a config setup."""
entry_data = create_rfx_test_cfg(devices=devices)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
@pytest.mark.parametrize(
"event,expected",
[
[
EVENT_LIGHTING_1,
[
{"type": "command", "subtype": subtype}
for subtype in [
"Off",
"On",
"Dim",
"Bright",
"All/group Off",
"All/group On",
"Chime",
"Illegal command",
]
],
]
],
)
async def test_get_triggers(hass, device_reg, event: EventTestData, expected):
"""Test we get the expected triggers from a rfxtrx."""
await setup_entry(hass, {event.code: {}})
device_entry = device_reg.async_get_device(event.device_identifiers, set())
expected_triggers = [
{
"domain": DOMAIN,
"device_id": device_entry.id,
"platform": "device",
"metadata": {},
**expect,
}
for expect in expected
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
triggers = [value for value in triggers if value["domain"] == "rfxtrx"]
assert_lists_same(triggers, expected_triggers)
@pytest.mark.parametrize(
"event",
[
EVENT_LIGHTING_1,
EVENT_ROLLERTROL_1,
EVENT_FIREALARM_1,
],
)
async def test_firing_event(hass, device_reg: DeviceRegistry, rfxtrx, event):
"""Test for turn_on and turn_off triggers firing."""
await setup_entry(hass, {event.code: {"fire_event": True}})
device_entry = device_reg.async_get_device(event.device_identifiers, set())
assert device_entry
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"type": event.type,
"subtype": event.subtype,
},
"action": {
"service": "test.automation",
"data_template": {"some": ("{{trigger.platform}}")},
},
},
]
},
)
await hass.async_block_till_done()
await rfxtrx.signal(event.code)
assert len(calls) == 1
assert calls[0].data["some"] == "device"
async def test_invalid_trigger(hass, device_reg: DeviceRegistry):
"""Test for invalid actions."""
event = EVENT_LIGHTING_1
await setup_entry(hass, {event.code: {"fire_event": True}})
device_identifers: Any = event.device_identifiers
device_entry = device_reg.async_get_device(device_identifers, set())
assert device_entry
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"type": event.type,
"subtype": "invalid",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("{{trigger.platform}}")},
},
},
]
},
)
await hass.async_block_till_done()
assert len(notifications := hass.states.async_all("persistent_notification")) == 1
assert (
"The following integrations and platforms could not be set up"
in notifications[0].attributes["message"]
)
| [
"noreply@github.com"
] | robert-alfaro.noreply@github.com |
4cb41dfca9cf17c318af5b90764e56ae92b007c4 | 7bf449f96e1c72cf097dd5264c5efa4337fe1ad7 | /final_proj/VisualAnalyses.py | c76803b3fed0340d336c668b1fd9b0ec7d009f45 | [] | no_license | rgc292/final_project | 9229573a258554655c464306a8d8be4ddade5aa2 | c0ec7690bd1520020e35864eeaec501fd45f3d32 | refs/heads/master | 2021-01-18T02:32:39.062081 | 2015-12-17T03:10:50 | 2015-12-17T03:10:50 | 48,068,674 | 0 | 1 | null | 2015-12-15T20:59:08 | 2015-12-15T20:59:08 | null | UTF-8 | Python | false | false | 1,057 | py | '''
Created on Dec 12, 2015
@author: Kristen kk3175 & Rafael rgc292
'''
import line_graph as lg
import bar_graph as bg
from Pie_Chart import Pie_Chart
'''
Module to perform visual analyses of a specific NYC housing complaint.
Takes a complaint dataset as an argument.
Makes pie charts, line graphs, and bar graphs so the user can understand the housing
complaint data from different viewpoints.
'''
def make_visual_analyses(complaint_dataset):
print 'Making visual analysis tools...'
try:
pie_chart = Pie_Chart(complaint_dataset)
pie_chart.plot_by_violation_type()
line_graph = lg.LineGraph()
line_graph.plot_line_graph(complaint_dataset)
bar_graph = bg.BarGraph()
bar_graph.plot_bar_graph(complaint_dataset)
print '\nFigures are now saved in the figures folder.'
except (ValueError, TypeError):
print "\nYour range of dates does not contain information for your choice of ID."
print "Please, choose a different combination."
| [
"Rafa@192.168.1.139"
] | Rafa@192.168.1.139 |
2944d77e65016a65342224fb147b9da82d155353 | 8cc30a27835e205a3476783106ca1605a6a85c48 | /amy/trainings/views.py | 60e04fb4bf1ebf7c0f1ee24522888df26eb7dc5e | [
"MIT"
] | permissive | gaybro8777/amy | d968edc78bbd3f63f3353450334721628dbbc0f4 | 3cf99aed58a0f0acf83d2645a30d8408208ccea9 | refs/heads/develop | 2023-03-07T22:08:28.692700 | 2021-02-23T18:06:06 | 2021-02-23T18:06:06 | 341,930,505 | 0 | 0 | MIT | 2021-02-24T17:22:08 | 2021-02-24T14:40:43 | null | UTF-8 | Python | false | false | 7,189 | py | from django.contrib import messages
from django.db.models import (
Case,
When,
IntegerField,
Count,
F,
Sum,
Prefetch,
)
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from trainings.filters import (
TraineeFilter,
)
from trainings.forms import (
TrainingProgressForm,
BulkAddTrainingProgressForm,
BulkDiscardProgressesForm,
)
from workshops.base_views import (
AMYCreateView,
AMYUpdateView,
AMYDeleteView,
AMYListView,
RedirectSupportMixin,
PrepopulationSupportMixin,
)
from workshops.models import (
Badge,
Event,
Person,
Task,
TrainingProgress,
TrainingRequirement,
)
from workshops.util import (
get_pagination_items,
admin_required,
OnlyForAdminsMixin,
)
class AllTrainings(OnlyForAdminsMixin, AMYListView):
context_object_name = 'all_trainings'
template_name = 'trainings/all_trainings.html'
queryset = Event.objects.filter(tags__name='TTT').annotate(
trainees=Count(Case(When(task__role__name='learner',
then=F('task__person__id')),
output_field=IntegerField()),
distinct=True),
finished=Count(Case(When(task__role__name='learner',
task__person__badges__in=Badge.objects.instructor_badges(),
then=F('task__person__id')),
output_field=IntegerField()),
distinct=True),
).exclude(trainees=0).order_by('-start')
title = 'All Instructor Trainings'
# ------------------------------------------------------------
# Instructor Training related views
class TrainingProgressCreate(RedirectSupportMixin,
PrepopulationSupportMixin,
OnlyForAdminsMixin,
AMYCreateView):
model = TrainingProgress
form_class = TrainingProgressForm
populate_fields = ['trainee']
def get_initial(self):
initial = super().get_initial()
initial['evaluated_by'] = self.request.user
return initial
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'].helper = context['form'].create_helper
return context
class TrainingProgressUpdate(RedirectSupportMixin, OnlyForAdminsMixin,
AMYUpdateView):
model = TrainingProgress
form_class = TrainingProgressForm
template_name = 'trainings/trainingprogress_form.html'
class TrainingProgressDelete(RedirectSupportMixin, OnlyForAdminsMixin,
AMYDeleteView):
model = TrainingProgress
success_url = reverse_lazy('all_trainees')
def all_trainees_queryset():
def has_badge(badge):
return Sum(Case(When(badges__name=badge, then=1),
default=0,
output_field=IntegerField()))
return (
Person.objects
.annotate_with_instructor_eligibility()
.prefetch_related(
Prefetch(
'task_set',
to_attr='training_tasks',
queryset=Task.objects.filter(role__name='learner',
event__tags__name='TTT')
),
'training_tasks__event',
'trainingrequest_set',
'trainingprogress_set',
'trainingprogress_set__requirement',
'trainingprogress_set__evaluated_by',
).annotate(
is_swc_instructor=has_badge('swc-instructor'),
is_dc_instructor=has_badge('dc-instructor'),
is_lc_instructor=has_badge('lc-instructor'),
is_instructor=Sum(
Case(
When(
badges__name__in=Badge.INSTRUCTOR_BADGES,
then=1
),
default=0,
output_field=IntegerField()
)
),
).order_by('family', 'personal')
)
@admin_required
def all_trainees(request):
filter = TraineeFilter(
request.GET,
queryset=all_trainees_queryset(),
)
trainees = get_pagination_items(request, filter.qs)
if request.method == 'POST' and 'discard' in request.POST:
# Bulk discard progress of selected trainees
form = BulkAddTrainingProgressForm()
discard_form = BulkDiscardProgressesForm(request.POST)
if discard_form.is_valid():
for trainee in discard_form.cleaned_data['trainees']:
TrainingProgress.objects.filter(trainee=trainee)\
.update(discarded=True)
messages.success(request, 'Successfully discarded progress of '
'all selected trainees.')
# Raw uri contains GET parameters from django filters. We use it
# to preserve filter settings.
return redirect(request.get_raw_uri())
elif request.method == 'POST' and 'submit' in request.POST:
# Bulk add progress to selected trainees
instance = TrainingProgress(evaluated_by=request.user)
form = BulkAddTrainingProgressForm(request.POST, instance=instance)
discard_form = BulkDiscardProgressesForm()
if form.is_valid():
for trainee in form.cleaned_data['trainees']:
TrainingProgress.objects.create(
trainee=trainee,
evaluated_by=request.user,
requirement=form.cleaned_data['requirement'],
state=form.cleaned_data['state'],
discarded=False,
event=form.cleaned_data['event'],
url=form.cleaned_data['url'],
notes=form.cleaned_data['notes'],
)
messages.success(request, 'Successfully changed progress of '
'all selected trainees.')
return redirect(request.get_raw_uri())
else: # GET request
# If the user filters by training, we want to set initial values for
# "requirement" and "training" fields.
training_id = request.GET.get('training', None) or None
try:
initial = {
'event': Event.objects.get(pk=training_id),
'requirement': TrainingRequirement.objects.get(name='Training')
}
except Event.DoesNotExist: # or there is no `training` GET parameter
initial = None
form = BulkAddTrainingProgressForm(initial=initial)
discard_form = BulkDiscardProgressesForm()
context = {'title': 'Trainees',
'all_trainees': trainees,
'swc': Badge.objects.get(name='swc-instructor'),
'dc': Badge.objects.get(name='dc-instructor'),
'lc': Badge.objects.get(name='lc-instructor'),
'filter': filter,
'form': form,
'discard_form': discard_form}
return render(request, 'trainings/all_trainees.html', context)
| [
"piotr@banaszkiewicz.org"
] | piotr@banaszkiewicz.org |
724a23a13c33701d678c2ee7d967eef75ab6289a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03354/s490945583.py | f03b65974dfe8d09656e62cab53ea47171b1c89a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import sys
input = sys.stdin.readline
n, m = [ int(v) for v in input().split() ]
num = [ int(v)-1 for v in input().split() ]
parent_list = [ i for i in range(n) ]
def root(x):
while parent_list[x] != x:
parent_list[x] = parent_list[parent_list[x]]
x = parent_list[x]
return x
for i in range(m):
a, b = [ int(v)-1 for v in input().split() ]
ra, rb = root(a), root(b)
if ra != rb:
parent_list[ra] = rb
ans_list = []
for i, v in enumerate(num):
if root(i) == root(v):
ans_list.append(True)
else:
ans_list.append(False)
print(ans_list.count(True))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
46d0eabafb34b253ecf80a7b0f1390f6b39739a0 | 9545652800884f0e54fe6595d8634c29ea4827a2 | /中级算法/leetCode_80_常数时间插入,删除和获取随机元素.py | 52f7adb9f1fa3ec80b550e936ff7cfa8d7b23901 | [] | no_license | challeger/leetCode | 662d9f600a40fd8970568679656f6911a6fdfb05 | d75c35b6f8ab33c158de7fa977ab0b16dac4fc25 | refs/heads/master | 2023-01-13T07:34:42.464959 | 2020-11-13T02:40:31 | 2020-11-13T02:40:31 | 286,426,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | """
day: 2020-08-22
url: https://leetcode-cn.com/leetbook/read/top-interview-questions-medium/xw5rt1/
题目名: 常数时间插入,删除和获取随机元素
题目描述: 设计一个支持在平均时间复杂度O(1)下,执行以下操作的数据结构
1.insert(val):当元素val不存在时,向集合中插入此项
2.remove(val):当元素val存在时,从集合中删除此项
3.getRandom:随机返回现有集合中的一项,每个元素应该有相同的概率被返回
示例:
// 初始化一个空的集合。
RandomizedSet randomSet = new RandomizedSet();
// 向集合中插入 1 。返回 true 表示 1 被成功地插入。
randomSet.insert(1);
// 返回 false ,表示集合中不存在 2 。
randomSet.remove(2);
// 向集合中插入 2 。返回 true 。集合现在包含 [1,2] 。
randomSet.insert(2);
// getRandom 应随机返回 1 或 2 。
randomSet.getRandom();
// 从集合中移除 1 ,返回 true 。集合现在包含 [2] 。
randomSet.remove(1);
// 2 已在集合中,所以返回 false 。
randomSet.insert(2);
// 由于 2 是集合中唯一的数字,getRandom 总是返回 2 。
randomSet.getRandom();
思路:
insert直接利用列表的append
remove每次将要删除的元素与尾部元素对调,然后删除尾部元素
random利用random模块中的choice即可
所以需要一个list来记录数据,以及一个dict来记录数值对应的索引.
在insert时,self.dict[value] = len(self.list)
self.list.append(value)
在remove时,取出要删除的值的索引,以及最后一个数的值
然后将最后一个数的值放到要删除的值上,并设置dict中的索引
然后pop(), del self.dict[value]
"""
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.data = []
self._hashmap = {}
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val not in self._hashmap:
self._hashmap[val] = len(self.data)
self.data.append(val)
return True
return False
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self._hashmap:
idx, last_element = self._hashmap[val], self.data[-1]
self.data[-1], self._hashmap[last_element] = self.data[idx], idx
self.data.pop()
del self._hashmap[val]
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
from random import choice
return choice(self.data)
| [
"799613500@qq.com"
] | 799613500@qq.com |
e1fc1deef26dac9c3fe7954d0144a110fed9b04b | 2e02cdfbd1db42158e2f81f09863cf39241f45f2 | /1-Follow up in Code Interview/Optional/32. Minimum Window Substring.py | d20ddda4e59145b003187b66fe03f6fbccc81a2a | [] | no_license | LingHsiLiu/Algorithm2 | e65927720dc046738816815cfb94caa49c060a81 | 680208e58c93d12e974e49d12e682ea5dcfab922 | refs/heads/master | 2020-04-15T01:08:20.177967 | 2019-10-15T01:27:06 | 2019-10-15T01:27:06 | 164,264,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | # 32. Minimum Window Substring
# Given a string source and a string target, find the minimum window in source which will contain all the characters in target.
# Example
# For source = "ADOBECODEBANC", target = "ABC", the minimum window is "BANC"
# Challenge
# Can you do it in time complexity O(n) ?
# Clarification
# Should the characters in minimum window has the same order in target?
# Not necessary.
# Notice
# If there is no such window in source that covers all characters in target, return the emtpy string "".
# If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in source.
# The target string may contain duplicate characters, the minimum window should cover all characters including the duplicate characters in target.
class Solution:
"""
@param source : A string
@param target: A string
@return: A string denote the minimum window, return "" if there is no such a string
"""
def minWindow(self, source , target):
# write your code here
if source is None:
return ""
targetHash = self.getTargetHash(target)
targetUniqueChars = len(targetHash)
matchedUniqueChars = 0
hash = {}
n = len(source)
j = 0
minLength = n + 1
minWindowString = ""
for i in range(n):
while j < n and matchedUniqueChars < targetUniqueChars:
if source[j] in targetHash:
hash[source[j]] = hash.get(source[j], 0) + 1
if hash[source[j]] == targetHash[source[j]]:
matchedUniqueChars += 1
j += 1
if j - i < minLength and matchedUniqueChars == targetUniqueChars:
minLength = j - i
minWindowString = source[i:j]
if source[i] in targetHash:
if hash[source[i]] == targetHash[source[j]]:
matchedUniqueChars -= 1
hash[source[i]] -= 1
return minWindowString
def getTargetHash(self, target):
hash = {}
for c in target:
hash[c] = hash.get(c, 0) + 1
return hash
| [
"noreply@github.com"
] | LingHsiLiu.noreply@github.com |
c4b4bcb7cef9bd41fb9e9d20acc31e9a8e294d9d | d9d1b72da9ff37d6c29c9c0063c2dc0f5b5a107e | /django_facebook/views.py | 28a18d17e49e10da4af01d68c3df6c9c09aa45ca | [
"BSD-3-Clause"
] | permissive | fogcitymarathoner/djfb | 83c02583544e740e7d368ecb37750628f71a31f4 | fd436b579a94585a2c085f2e630bd7050e178394 | refs/heads/master | 2016-08-06T00:23:42.933057 | 2013-04-04T18:27:22 | 2013-04-04T18:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,320 | py | from django.conf import settings
from django.contrib import messages
from django.http import Http404, HttpResponse
from django.shortcuts import redirect, render_to_response
from django.template.context import RequestContext
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt
from django_facebook import exceptions as facebook_exceptions, \
settings as facebook_settings
from django_facebook.connect import CONNECT_ACTIONS, connect_user
from django_facebook.decorators import facebook_required_lazy
from django_facebook.utils import next_redirect, get_registration_backend, \
to_bool, error_next_redirect, get_instance_for
from open_facebook import exceptions as open_facebook_exceptions
from open_facebook.utils import send_warning
import logging
logger = logging.getLogger(__name__)
@csrf_exempt
@facebook_required_lazy(extra_params=dict(facebook_login='1'))
def connect(request, graph):
'''
Exception and validation functionality around the _connect view
Separated this out from _connect to preserve readability
Don't bother reading this code, skip to _connect for the bit you're interested in :)
'''
facebook_login = to_bool(request.REQUEST.get('facebook_login'))
context = RequestContext(request)
# validation to ensure the context processor is enabled
if not context.get('FACEBOOK_APP_ID'):
message = 'Please specify a Facebook app id and ensure the context processor is enabled'
raise ValueError(message)
# hide the connect page, convenient for testing with new users in production though
if not facebook_login and not settings.DEBUG and facebook_settings.FACEBOOK_HIDE_CONNECT_TEST:
raise Http404('not showing the connect page')
try:
response = _connect(request, facebook_login, graph)
except open_facebook_exceptions.FacebookUnreachable, e:
# often triggered when Facebook is slow
warning_format = u'%s, often caused by Facebook slowdown, error %s'
warn_message = warning_format % (type(e), e.message)
send_warning(warn_message, e=e)
response = error_next_redirect(request,
additional_params=dict(
fb_error_or_cancel=1)
)
return response
def _connect(request, facebook_login, graph):
'''
Handles the view logic around connect user
- (if authenticated) connect the user
- login
- register
We are already covered by the facebook_required_lazy decorator
So we know we either have a graph and permissions, or the user denied
the oAuth dialog
'''
backend = get_registration_backend()
context = RequestContext(request)
if facebook_login:
logger.info('trying to connect using Facebook')
if graph:
logger.info('found a graph object')
converter = get_instance_for('user_conversion', graph)
authenticated = converter.is_authenticated()
# Defensive programming :)
if not authenticated:
raise ValueError('didnt expect this flow')
logger.info('Facebook is authenticated')
facebook_data = converter.facebook_profile_data()
# either, login register or connect the user
try:
action, user = connect_user(request)
logger.info('Django facebook performed action: %s', action)
except facebook_exceptions.IncompleteProfileError, e:
# show them a registration form to add additional data
warning_format = u'Incomplete profile data encountered with error %s'
warn_message = warning_format % e.message
send_warning(warn_message, e=e,
facebook_data=facebook_data)
context['facebook_mode'] = True
context['form'] = e.form
return render_to_response(
facebook_settings.FACEBOOK_REGISTRATION_TEMPLATE,
context_instance=context,
)
except facebook_exceptions.AlreadyConnectedError, e:
user_ids = [u.user_id for u in e.users]
ids_string = ','.join(map(str, user_ids))
return error_next_redirect(
request,
additional_params=dict(already_connected=ids_string))
if action is CONNECT_ACTIONS.CONNECT:
# connect means an existing account was attached to facebook
messages.info(request, _("You have connected your account "
"to %s's facebook profile") % facebook_data['name'])
elif action is CONNECT_ACTIONS.REGISTER:
# hook for tying in specific post registration functionality
response = backend.post_registration_redirect(
request, user)
# compatibility for Django registration backends which return redirect tuples instead of a response
if not isinstance(response, HttpResponse):
to, args, kwargs = response
response = redirect(to, *args, **kwargs)
return response
else:
# the user denied the request
return error_next_redirect(
request,
additional_params=dict(fb_error_or_cancel='1'))
# for CONNECT and LOGIN we simple redirect to the next page
return next_redirect(request, default=facebook_settings.FACEBOOK_LOGIN_DEFAULT_REDIRECT)
return render_to_response('django_facebook/connect.html', context)
def disconnect(request):
'''
Removes Facebook from the users profile
And redirects to the specified next page
'''
if request.method == 'POST':
messages.info(
request, _("You have disconnected your Facebook profile."))
profile = request.user.get_profile()
profile.disconnect_facebook()
profile.save()
response = next_redirect(request)
return response
| [
"marc@fogtest.com"
] | marc@fogtest.com |
8a09cc29573b890e671f24fa77c061797bc1c907 | 7198ba93f84d088fe744fa022620d42f0f69f19a | /Chapter_5_code/build/ros_robotics/catkin_generated/generate_cached_setup.py | fe80f10654efb86931e10e3fa1d44c10129ffd3b | [
"MIT"
] | permissive | crepuscularlight/ROSbyExample | 114ae726f48cfef1b8e6443593c1204654453096 | fa7b1a60cacca9b1034e318a2ac16ce4c8530d7c | refs/heads/main | 2023-04-04T16:26:36.933566 | 2021-04-23T11:16:50 | 2021-04-23T11:16:50 | 359,818,748 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_5_code/devel;/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_3_code/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_5_code/devel/.private/ros_robotics/env.sh')
output_filename = '/home/liudiyang1998/Git/ROS-Robotics-By-Example/Chapter_5_code/build/ros_robotics/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"18003316366@163.com"
] | 18003316366@163.com |
d34c109a84de5826b080a028d54820ca16d96185 | 60b6645ef4544ccda1146cd596b618e42b8715d8 | /product/migrations/0002_auto_20171118_1436.py | 6b186dacbf3f08b42dd146f7343988f363b5f9b7 | [] | no_license | mamun1980/innstal | 6534c879d9deab09f8b638b484db940f118d5d7d | 5dd8051cf955e9ec72fbfbcd1fdcb681ad6e95d7 | refs/heads/master | 2021-05-16T00:59:19.606698 | 2017-11-26T18:49:16 | 2017-11-26T18:49:16 | 106,926,549 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,293 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-18 14:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('product', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='productvisited',
name='visitor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='producttype',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product.ProductCategory'),
),
migrations.AddField(
model_name='productreview',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.Product'),
),
migrations.AddField(
model_name='productreview',
name='writer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='product',
name='business',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='product',
name='product_brand',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product.ProductBrand'),
),
migrations.AddField(
model_name='product',
name='product_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product.ProductCategory'),
),
migrations.AddField(
model_name='product',
name='product_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='product.ProductType'),
),
]
| [
"mamun1980@gmail.com"
] | mamun1980@gmail.com |
8202face95f1682cd8af34eaa2404537a2b43714 | 5a3547772b61f7d1b3a81f76dd1397eb92c68e7b | /slbo/envs/mujoco/ant_env.py | 3ae2cf2cfcd1fac068ba3239dece9a9f98e5dbef | [
"MIT"
] | permissive | suen049/AdMRL | 483440f0ded14e471d879b300da9afbab68fbe66 | 50a22d4d480e99125cc91cc65dfcc0df4a883ac6 | refs/heads/master | 2023-03-12T23:15:05.154003 | 2021-03-06T15:31:21 | 2021-03-06T15:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | import numpy as np
from rllab.envs.mujoco import ant_env
from rllab.envs.base import Step
from slbo.envs import BaseModelBasedEnv
class AntEnv(ant_env.AntEnv, BaseModelBasedEnv):
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat, # 15
self.model.data.qvel.flat, # 14
# np.clip(self.model.data.cfrc_ext, -1, 1).flat, # 84
self.get_body_xmat("torso").flat, # 9
self.get_body_com("torso"), # 9
self.get_body_comvel("torso"), # 3
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def mb_step(self, states: np.ndarray, actions: np.ndarray, next_states: np.ndarray):
comvel = next_states[..., -3:]
forward_reward = comvel[..., 0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(actions / scaling), axis=-1)
contact_cost = 0.
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
notdone = np.all([next_states[..., 2] >= 0.2, next_states[..., 2] <= 1.0], axis=0)
return reward, 1. - notdone
| [
"linzichuan12@163.com"
] | linzichuan12@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.