blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f9e9852bc81691ad0a672e8840592cc337887e5 | cba46e28e6f60d9bd8cc8c24a3ff8e065e5a8e49 | /scrap_trade_proj/doc_repo/models.py | ecbf73f47f21efe2c10d67aed2895289d3d97b2a | [] | no_license | Horac-Bouthon/scrap-trade-4 | fb7e9f8f9ec41446318ce03ad5ff7024ad795771 | 7686703ce5783dd4a48dc1d9600cda01aa554faa | refs/heads/master | 2022-12-12T21:52:38.209500 | 2020-03-17T07:50:30 | 2020-03-17T07:50:30 | 227,142,003 | 0 | 0 | null | 2022-11-22T04:39:35 | 2019-12-10T14:33:20 | Python | UTF-8 | Python | false | false | 7,097 | py | from django.db import models
from django.conf import settings
from django.urls import reverse
import uuid
from pdf2image import convert_from_path
from django.utils.translation import gettext_lazy as _
from django.utils import translation as tr
from translatable.models import TranslatableModel, get_translation_model
from project_main.models import Project
from integ.models import OpenId
from customers.models import (
ProjectCustomUser,
)
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
from scrap_trade_proj.settings import THUMB_SIZE
import os
from PIL import Image
from io import BytesIO
from django.core.files.base import ContentFile
# Create your models here.
class DocType(TranslatableModel):
type_key = models.CharField(
max_length=20,
default="type",
verbose_name=tr.pgettext_lazy('DocType definition', 'Type key'),
help_text=tr.pgettext_lazy('DocType definition','Type key'),
null=True,
blank=True,
)
class Meta:
verbose_name = tr.pgettext_lazy('DocType definition', 'Document type')
verbose_name_plural = tr.pgettext_lazy('DocType definition', 'Document type')
def __str__(self):
return self.get_type_name()
def get_type_name(self):
lang = tr.get_language()
return self.translated('type_name', default=None, language=lang, fallback=True)
class DocTypeTranslation(get_translation_model(DocType, "doctype")):
type_name = models.CharField(
verbose_name=_('Type name'),
help_text=_("Display name of document type."),
max_length=50,
null=True,
blank=True,
unique=False
)
class Document(models.Model):
open_id = models.ForeignKey(
OpenId,
on_delete=models.CASCADE,
verbose_name=_('Open ID'),
help_text=_("Document Connection Key."),
related_name='my_docs',
null=True, blank=True,
)
doc_name = models.CharField(
verbose_name=_('Document name'),
help_text=_("Name of the document."),
max_length=100,
null=True,
blank=True,
unique=False,
)
doc_description = models.TextField(
verbose_name=_('Document description'),
help_text=_("Description of the document."),
null=True,
blank=True,
unique=False,
)
type = models.ForeignKey(
DocType,
on_delete=models.CASCADE,
verbose_name=tr.pgettext_lazy('Document definition', 'Type'),
help_text=tr.pgettext_lazy('Document definition','Document type'),
related_name="my_docs",
)
created_by = models.ForeignKey(
ProjectCustomUser,
on_delete=models.SET_NULL,
verbose_name=tr.pgettext_lazy('Document definition', 'Created by'),
help_text=tr.pgettext_lazy('Document definition','Link to creator'),
null=True, blank=True,
related_name="my_documents",
)
created_at = models.DateTimeField(auto_now_add=True,)
file = models.FileField(
upload_to='doc_repository/%Y/%m/%d/',
verbose_name=tr.pgettext_lazy('Document file', 'File'),
null=True,
blank=True,
)
thumbnail = models.ImageField(
upload_to='doc_thumbs/%Y/%m/%d/',
editable=False,
null=True,
blank=True,
)
open_id = models.ForeignKey(
OpenId,
on_delete=models.CASCADE,
verbose_name=tr.pgettext_lazy('UserProfile definition', 'Open id'),
help_text=tr.pgettext_lazy('UserProfile definition','Link to integration key'),
related_name='my_docs',
null=True, blank=True,
)
class Meta:
verbose_name = tr.pgettext_lazy('Document definition', 'Document')
verbose_name_plural = tr.pgettext_lazy('Document definition', 'Documents')
def __str__(self):
return '{} {} {}'.format(self.pk,
self.doc_name,
self.created_at,
)
def is_picture(self):
return self.type == DocType.objects.get(type_key = 'picture')
def is_pdf(self):
return self.type == DocType.objects.get(type_key = 'pdf')
def is_file(self):
return self.type == DocType.objects.get(type_key = 'file')
def save(self, *args, **kwargs):
"""
Make and save the thumbnail for the photo here.
"""
super().save(*args, **kwargs)
self.make_thumbnail()
def make_thumbnail(self):
"""
Create and save the thumbnail for the photo (simple resize with PIL).
"""
if self.type == DocType.objects.get(type_key = 'picture') and self.thumbnail == None:
return self.make_picture_thumb()
if self.type == DocType.objects.get(type_key = 'pdf') and self.thumbnail == None:
return self.make_pdf_thumb()
return True
def make_picture_thumb(self):
try:
image = Image.open(self.file.path)
except:
return False
image.thumbnail(THUMB_SIZE, Image.ANTIALIAS)
# Path to save to, name, and extension
thumb_name, thumb_extension = os.path.splitext(self.file.name)
xl = thumb_name.split('/')
thumb_real_name = xl[-1]
thumb_extension = thumb_extension.lower()
thumb_filename = thumb_real_name + '_thumb' + thumb_extension
if thumb_extension in ['.jpg', '.jpeg']:
FTYPE = 'JPEG'
elif thumb_extension == '.gif':
FTYPE = 'GIF'
elif thumb_extension == '.png':
FTYPE = 'PNG'
else:
return False # Unrecognized file type
# Save thumbnail to in-memory file as StringIO
temp_thumb = BytesIO()
image.save(temp_thumb, FTYPE)
temp_thumb.seek(0)
# Load a ContentFile into the thumbnail field so it gets saved
self.thumbnail.save(thumb_filename, ContentFile(temp_thumb.read()), save=True)
temp_thumb.close()
return True
def make_pdf_thumb(self):
try:
pages = convert_from_path(self.file.path, 500)
print('pages = {}'.format(pages))
except:
print('error excepted')
return False
# Path to save to, name, and extension
thumb_name, thumb_extension = os.path.splitext(self.file.name)
xl = thumb_name.split('/')
thumb_real_name = xl[-1]
thumb_filename = thumb_real_name + '_thumb.jpg'
FTYPE = 'JPEG'
# Save thumbnail to in-memory file as StringIO
temp_thumb = BytesIO()
for page in pages:
page.save(temp_thumb, FTYPE)
temp_thumb.seek(0)
# Load a ContentFile into the thumbnail field so it gets saved
self.thumbnail.save(thumb_filename, ContentFile(temp_thumb.read()), save=True)
temp_thumb.close()
img = Image.open(self.thumbnail.path)
if img.height > 100 or img.width > 100:
img.thumbnail(THUMB_SIZE, Image.ANTIALIAS)
img.save(self.thumbnail.path)
return True
| [
"tbrown.wolf@ubk.cz"
] | tbrown.wolf@ubk.cz |
09d004b1906893a51b1d0ef124aa91e4433ed1cd | 8a63821681b29f196a0dcf19308a75679f89adaf | /Algorithm/布隆过滤算法.py | f3cc26a121bba26e19360434af297a665e9ad9b0 | [] | no_license | Breathleas/notes-4 | 2b4391b6205660dae256c4132ecb3f953061a2f7 | 6c11e4583e191da323d8ffdc83534e9582036ae1 | refs/heads/master | 2021-10-09T03:18:08.560660 | 2018-12-20T14:46:31 | 2018-12-20T14:46:31 | null | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 818 | py | ----------------------------
布隆过滤算法 |
----------------------------
# 在N多数据中,判断数据是否存在
# 添加逻辑
1,存入数据,使用多个hash函数对数据进行运算
v1 = hash3(key)
v2 = hash3(key)
v3 = hash3(key)
2,多个hash值取模数组长度,把得到的结果角标设置为1
arr[v1 % arr.length] = 1;
arr[v2 % arr.length] = 1;
arr[v3 % arr.length] = 1;
# 判断逻辑
1,使用多个hash函数对数据进行运算
v1 = hash3(key)
v2 = hash3(key)
v3 = hash3(key)
2,多个hash值取模数组长度,判断结果角标是否都为1,如果是则包含,任何非1则不包含
arr[v1 % arr.length] == 1 &&
arr[v2 % arr.length] == 1 &&
arr[v3 % arr.length] == 1
# 注意
* hash运算次数越多,误报的几率越小
| [
"747692844@qq.com"
] | 747692844@qq.com |
2ca649bd865704e92b59ad46113905a39e6e9ecf | 070e06d721d450260f70fed0811b5dd147d1ea10 | /zhihudaily/cache.py | c49f84aa4649b4cb8f6609ea4b6296688742e65a | [
"MIT"
] | permissive | lord63/zhihudaily | b1e411c6a93a4cc0ec629336259021baff81d6f7 | c6aa147d146223bb5842297e58a702b574f7dce5 | refs/heads/master | 2021-07-13T16:34:51.370694 | 2020-08-29T00:32:08 | 2020-08-29T00:32:08 | 30,679,182 | 2 | 2 | MIT | 2021-03-19T21:31:47 | 2015-02-12T01:27:15 | CSS | UTF-8 | Python | false | false | 137 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask_caching import Cache
cache = Cache()
| [
"lord63.j@gmail.com"
] | lord63.j@gmail.com |
3474ecb3d40fcd0b061ae0df216446bedbe133df | 5f9e0c226c6f99f04446d60cd21282e7e6b05d2c | /shopaholic.py | 29a69172980d2cbfba97c78833ed1b6795f42e57 | [] | no_license | JONNY-ME/my-kattis-solution | 867ac267dbb5faa6f7c2af35b435498a22ae269d | 51c70e0fd25f1f369cdcd2ce49a54d5d0df2358e | refs/heads/main | 2023-06-17T20:04:04.701038 | 2021-07-16T09:35:35 | 2021-07-16T09:35:35 | 386,583,581 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | n=int(input());t=(n//3)*3;s=0
l=sorted([int(i) for i in input().split()], reverse=True)[:t]
for i in range(2, t, 3):s+=l[i]
print(s) | [
"yohannesmelese4@gmail.com"
] | yohannesmelese4@gmail.com |
91f3b83f2e2165edc7225395b0c2597f37f99802 | d0237e274f34b6a02f1d78b668a55cb150833435 | /src/yss/yss/root/__init__.py | 4a2d58135ce75c912bdf61978ef9fa5cccd8fa88 | [
"BSD-3-Clause-Modification"
] | permissive | notaliens/youshouldsing | f2a90cc7b7f5d9c64052c72b77827e2c77cd8b40 | 948f7fb30a12eccdc34bb23db4e139afab8ee782 | refs/heads/master | 2021-01-01T17:28:44.739929 | 2019-01-21T05:33:01 | 2019-01-21T05:33:01 | 13,230,677 | 2 | 0 | null | 2013-10-02T03:01:26 | 2013-09-30T23:56:46 | CSS | UTF-8 | Python | false | false | 2,375 | py | import pkg_resources
from substanced.schema import Schema
from substanced.property import PropertySheet
from substanced.interfaces import IRoot
import colander
from pyramid.security import (
Allow,
Everyone,
Authenticated,
)
from substanced.event import subscribe_root_added
from substanced.util import set_acl
from . import comments
class RootSchema(Schema):
""" The schema representing site properties. """
max_framerate = colander.SchemaNode(
colander.Int(),
title="Max Frame Rate",
missing=1,
)
class RootPropertySheet(PropertySheet):
schema = RootSchema()
@subscribe_root_added()
def root_added(event):
registry = event.registry
root = event.object
acl = list(root.__acl__)
acl.extend(
[
(Allow, Everyone, 'view'),
(Allow, Everyone, 'yss.indexed'),
(Allow, Authenticated, 'yss.like'),
]
)
set_acl(root, acl)
root.title = root.sdi_title = 'You Should Sing'
root.max_framerate = 30
root['catalogs'].add_catalog('yss')
root['songs'] = registry.content.create('Songs')
set_acl(root['songs'], [
(Allow, Authenticated, 'yss.upload'),
(Allow, Authenticated, 'yss.record'),
])
performers = root['performers'] = registry.content.create('Performers')
blameme = registry.content.create('Performer')
performers['blameme'] = blameme
blameme['recordings'] = registry.content.create('Recordings')
blameme['photo'] = registry.content.create('File')
blameme['photo_thumbnail'] = registry.content.create('File')
blameme.user = root['principals']['users']['admin']
timings_json = pkg_resources.resource_string(
'yss', 'blackbird.json').decode('utf-8')
song = registry.content.create(
'Song',
'Blackbird',
'The Beatles',
timings=timings_json,
lyrics=timings_json,
audio_stream=pkg_resources.resource_stream('yss', 'blackbird.opus')
)
root['songs']['blackbird'] = song
song.mimetype = 'audio/opus'
song.uploader = blameme
def performer(request):
user = request.user
if user is not None:
return user.performer
def includeme(config):
config.add_propertysheet('YSS', RootPropertySheet, IRoot)
config.add_request_method(performer, reify=True)
config.include(comments)
| [
"chrism@plope.com"
] | chrism@plope.com |
4aeb6283041e70a8c1de4a1ba7dc3d632e950b36 | a3d2f81c04bde252ef7554e65ecbf2c32ce3c2dc | /feincms/views/decorators.py | 62727ae79a4ef972fdad1051bad1b8e7a043a6ec | [
"BSD-2-Clause"
] | permissive | natea/feincms | 3dd6949195352ad96e13c2f3b4d8d7a1677a97b8 | 1d45e3aae5fba6e4a2eccf8ee7675b2ffff2b70c | refs/heads/master | 2020-12-25T10:10:14.358175 | 2011-03-28T14:56:55 | 2011-03-28T14:56:55 | 1,497,290 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | from django.http import HttpResponse
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps
from feincms.module.page.models import Page
def add_page_to_extra_context(view_func):
"""
Adds the best-match page to the extra_context keyword argument. Mainly used
to provide generic views which integrate into the page module.
"""
def inner(request, *args, **kwargs):
kwargs.setdefault('extra_context', {})
kwargs['extra_context']['feincms_page'] = Page.objects.best_match_for_request(request)
return view_func(request, *args, **kwargs)
return wraps(view_func)(inner)
def standalone(view_func):
"""
Marks the view method as standalone view; this means that
``HttpResponse`` objects returned from ``ApplicationContent``
are returned directly, without further processing.
"""
def inner(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
response.standalone = True
return response
return wraps(view_func)(inner) | [
"mk@spinlock.ch"
] | mk@spinlock.ch |
6e150deceef1c7a9b30fb1a8608864d3f6d44be6 | c0c4fe8f9aff2e7684fcaf10329f963873753b2a | /doc/examples/scripts/sequence/thca_synthase_polymorphism.py | b480f9e3c1e9a2e58d08430f51d6283c83744cbb | [
"BSD-3-Clause"
] | permissive | thomasnevolianis/biotite | 85e1b9d6a1fbb5d9f81501a8ebc617bc26388ab9 | 916371eb602cfcacb2d5356659298ef38fa01fcc | refs/heads/master | 2022-11-30T19:40:53.017368 | 2020-08-04T07:00:59 | 2020-08-04T07:00:59 | 285,375,415 | 0 | 0 | BSD-3-Clause | 2020-08-05T18:41:48 | 2020-08-05T18:41:47 | null | UTF-8 | Python | false | false | 4,842 | py | """
Polymorphisms in the THCA synthase gene
=======================================
The THCA synthase catalyzes the last step in the synthesis of
tetrahydrocannabinolic acid (THCA), the precursor molecule of
tetrahydrocannabinol (THC).
Two types of *cannabis sativa* are distinguished: While the *drug-type*
strains produce high levels of THCA, *fiber-type* strains produce a low
amount. One molecular difference between these two types are
polymorphisms in THCA synthase gene [1]_.
This script takes THCA synthase gene sequences from different
*cannabis sativa* strains, translates them into protein sequences and
creates a consensus sequence for each of the two strain types.
Eventually, an alignment is plotted depicting the polymorphic positions
between the two consensus sequences.
.. [1] M Kojoma, H Seki, S Yoshida and T Muranaka,
"DNA polymorphisms in the tetrahydrocannabinolic acid (THCA) synthase
gene in 'drug-type' and 'fiber-type' Cannabis sativa L."
Forensic Sci Int, 159, 132-140 (2006).
"""
# Code source: Patrick Kunzmann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import biotite.sequence as seq
import biotite.sequence.align as align
import biotite.sequence.io.genbank as gb
import biotite.sequence.align as align
import biotite.sequence.graphics as graphics
import biotite.database.entrez as entrez
import biotite.application.clustalo as clustalo
# Search for DNA sequences that belong to the cited article
query = entrez.SimpleQuery("Forensic Sci. Int.", "Journal") \
& entrez.SimpleQuery("159", "Volume") \
& entrez.SimpleQuery("132-140", "Page Number")
uids = entrez.search(query, db_name="nuccore")
# Download and read file containing the Genbank records for the THCA
# synthase genes
multi_file = gb.MultiFile.read(entrez.fetch_single_file(
uids, file_name=None, db_name="nuccore", ret_type="gb"
))
# This dictionary maps the strain ID to the protein sequence
sequences = {}
for gb_file in multi_file:
annotation = gb.get_annotation(gb_file)
# Find ID of strain in 'source' feature
strain = None
for feature in annotation:
if feature.key == "source":
strain = int(feature.qual["strain"])
assert strain is not None
# Find corresponding protein sequence in 'CDS' feature
sequence = None
for feature in annotation:
if feature.key == "CDS":
sequence = seq.ProteinSequence(
# Remove whitespace in sequence
# resulting from line breaks
feature.qual["translation"].replace(" ", "")
)
assert sequence is not None
sequences[strain] = sequence
# None of the THCA synthase variants have an insertion or deletion
# -> each one should have the same sequence length
seq_len = len(list(sequences.values())[0])
for sequence in sequences.values():
assert len(sequence) == seq_len
# Create consensus sequences for the drug-type and fiber-type cannabis
# strains
def create_consensus(sequences):
seq_len = len(sequences[0])
consensus_code = np.zeros(seq_len, dtype=int)
for seq_pos in range(seq_len):
# Count the number of occurrences of each amino acid
# at the given sequence position
counts = np.bincount(
[sequence.code[seq_pos] for sequence in sequences]
)
# The consensus amino acid is the most frequent amino acid
consensus_code[seq_pos] = np.argmax(counts)
# Create empty ProteinSequence object...
consensus_sequence = seq.ProteinSequence()
# ...and fill it with the sequence code containing the consensus
# sequence
consensus_sequence.code = consensus_code
return consensus_sequence
drug_type_consensus = create_consensus(
[sequences[strain] for strain in (1, 10, 13, 20, 53, 54)]
)
fiber_type_consensus = create_consensus(
[sequences[strain] for strain in (9, 5, 11, 45, 66, 68, 78)]
)
# Create an alignment for visualization purposes
# No insertion/deletions -> Align ungapped
matrix = align.SubstitutionMatrix.std_protein_matrix()
alignment = align.align_ungapped(
drug_type_consensus, fiber_type_consensus, matrix=matrix
)
# A colormap for hightlighting sequence dissimilarity:
# At low similarity the symbols are colored red,
# at high similarity the symbols are colored white
cmap = LinearSegmentedColormap.from_list(
"custom", colors=[(1.0, 0.3, 0.3), (1.0, 1.0, 1.0)]
# ^ reddish ^ white
)
fig = plt.figure(figsize=(8.0, 6.0))
ax = fig.add_subplot(111)
graphics.plot_alignment_similarity_based(
ax, alignment, matrix=matrix, symbols_per_line=50,
labels=["Drug-type", "Fiber-type"],
show_numbers=True, cmap=cmap, symbol_size=8
)
fig.tight_layout()
plt.show() | [
"patrick.kunzm@gmail.com"
] | patrick.kunzm@gmail.com |
b9744bf4821bf09b5f67a2fb97e23214e355a077 | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/non_slurm/max_pwr/job2.py | 82a946d08723b6eac162e05a65750b65e8a2c09f | [
"MIT"
] | permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,940 | py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 128
args_lr = 0.001
args_model = 'vgg16'
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*'
total_epochs = 5
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
ckpt_qual_dict = {}
while True:
if os.path.exists('ckpt_qual.json'):
os.rename('ckpt_qual.json', 'ckpt_qual_lock.json')
break
else:
time.sleep(1)
with open('ckpt_qual_lock.json', 'r') as fp:
ckpt_qual_dict = json.load(fp)
ckpt_qual_dict[job_name] = 1
json_file2 = json.dumps(ckpt_qual_dict)
with open('ckpt_qual_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('ckpt_qual_lock.json', 'ckpt_qual.json')
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=total_epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
os.rename('finish.json', 'finish_lock.json')
break
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
| [
"baolin.li1994@gmail.com"
] | baolin.li1994@gmail.com |
76bdb93913f9d300ebad0d08e7d3e540f3824537 | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/mayavi/enthought/mayavi/core/filter.py | 73f5839cd3d9347f5fc23d824b5081341fc89a4a | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,988 | py | """The base filter class from which all MayaVi filters derive.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from enthought.traits.api import List, Str
# Local imports
from enthought.mayavi.core.source import Source
from enthought.mayavi.core.pipeline_base import PipelineBase
from enthought.mayavi.core.pipeline_info import (PipelineInfo,
get_tvtk_dataset_name)
######################################################################
# `Filter` class.
######################################################################
class Filter(Source):
""" Base class for all the Mayavi filters.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The inputs for this filter.
inputs = List(PipelineBase, record=False)
# The icon
icon = Str('filter.ico')
# The human-readable type for this object
type = Str(' filter')
# Information about what this object can consume.
input_info = PipelineInfo(datasets=['any'])
######################################################################
# `object` interface.
######################################################################
def __init__(self, **traits):
super(Filter, self).__init__(**traits)
# Let the filter setup its pipeline.
self.setup_pipeline()
def __get_pure_state__(self):
d = super(Filter, self).__get_pure_state__()
# Inputs are setup dynamically, don't pickle them.
d.pop('inputs', None)
return d
######################################################################
# `Filter` interface.
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* its tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters.
"""
pass
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
raise NotImplementedError
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Invoke render to update any changes.
self.render()
# Propagate the data_changed event.
self.data_changed = True
######################################################################
# `Base` interface
######################################################################
def start(self):
"""This is invoked when this object is added to the mayavi
pipeline. Note that when start is invoked, all the other
information for the pipeline should be already set.
"""
# Do nothing if we are already running.
if self.running:
return
# Setup event handlers.
self._setup_event_handlers()
# Update the pipeline.
self.update_pipeline()
# Call parent method to start the children and set the state.
super(Filter, self).start()
def stop(self):
"""Invoked when this object is removed from the mayavi
pipeline. This is where you remove your actors from the
scene.
"""
if not self.running:
return
# Teardown event handlers.
self._teardown_event_handlers()
# Call parent method to stop the children and set the state.
super(Filter, self).stop()
######################################################################
# Non-public interface
######################################################################
def _set_outputs(self, new_outputs):
"""Set `self.outputs` to the given list of `new_outputs`. You
should always use this method to set `self.outputs`.
"""
old_outputs = self.outputs
self.outputs = new_outputs
if len(new_outputs) > 0:
self.output_info.datasets = \
[get_tvtk_dataset_name(new_outputs[0])]
if old_outputs == self.outputs:
# Even if the outputs don't change we want to propagate a
# data_changed event since the data could have changed.
self.data_changed = True
def _inputs_changed(self, old, new):
if self.running:
self.update_pipeline()
self._setup_input_events(old, new)
def _inputs_items_changed(self, list_event):
if self.running:
self.update_pipeline()
self._setup_input_events(list_event.removed, list_event.added)
def _setup_event_handlers(self):
self._setup_input_events([], self.inputs)
def _teardown_event_handlers(self):
self._setup_input_events(self.inputs, [])
def _setup_input_events(self, removed, added):
for input in removed:
input.on_trait_event(self.update_pipeline, 'pipeline_changed',
remove=True)
input.on_trait_event(self.update_data, 'data_changed',
remove=True)
for input in added:
input.on_trait_event(self.update_pipeline, 'pipeline_changed')
input.on_trait_event(self.update_data, 'data_changed')
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
57e5d03c8401a81a3f6c47bf41522f40cacefad2 | b8faf65ea23a2d8b119b9522a0aa182e9f51d8b1 | /vmraid/desk/form/assign_to.py | 0b937b3dc417bfc913934af983ff322a20a1063c | [
"MIT"
] | permissive | vmraid/vmraid | a52868c57b1999a8d648441eb9cd05815204345d | 3c2e2a952003ba7ea2cf13673b9e79e127f4166e | refs/heads/main | 2022-07-29T18:59:28.585133 | 2022-04-22T08:02:52 | 2022-04-22T08:02:52 | 372,473,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,475 | py | # Copyright (c) 2015, VMRaid and Contributors
# License: MIT. See LICENSE
"""assign/unassign to ToDo"""
import json
import vmraid
import vmraid.share
import vmraid.utils
from vmraid import _
from vmraid.desk.doctype.notification_log.notification_log import (
enqueue_create_notification,
get_title,
get_title_html,
)
from vmraid.desk.form.document_follow import follow_document
class DuplicateToDoError(vmraid.ValidationError):
pass
def get(args=None):
"""get assigned to"""
if not args:
args = vmraid.local.form_dict
return vmraid.get_all(
"ToDo",
fields=["allocated_to as owner", "name"],
filters={
"reference_type": args.get("doctype"),
"reference_name": args.get("name"),
"status": ("!=", "Cancelled"),
},
limit=5,
)
@vmraid.whitelist()
def add(args=None):
"""add in someone's to do list
args = {
"assign_to": [],
"doctype": ,
"name": ,
"description": ,
"assignment_rule":
}
"""
if not args:
args = vmraid.local.form_dict
users_with_duplicate_todo = []
shared_with_users = []
for assign_to in vmraid.parse_json(args.get("assign_to")):
filters = {
"reference_type": args["doctype"],
"reference_name": args["name"],
"status": "Open",
"allocated_to": assign_to,
}
if vmraid.get_all("ToDo", filters=filters):
users_with_duplicate_todo.append(assign_to)
else:
from vmraid.utils import nowdate
if not args.get("description"):
args["description"] = _("Assignment for {0} {1}").format(args["doctype"], args["name"])
d = vmraid.get_doc(
{
"doctype": "ToDo",
"allocated_to": assign_to,
"reference_type": args["doctype"],
"reference_name": args["name"],
"description": args.get("description"),
"priority": args.get("priority", "Medium"),
"status": "Open",
"date": args.get("date", nowdate()),
"assigned_by": args.get("assigned_by", vmraid.session.user),
"assignment_rule": args.get("assignment_rule"),
}
).insert(ignore_permissions=True)
# set assigned_to if field exists
if vmraid.get_meta(args["doctype"]).get_field("assigned_to"):
vmraid.db.set_value(args["doctype"], args["name"], "assigned_to", assign_to)
doc = vmraid.get_doc(args["doctype"], args["name"])
# if assignee does not have permissions, share
if not vmraid.has_permission(doc=doc, user=assign_to):
vmraid.share.add(doc.doctype, doc.name, assign_to)
shared_with_users.append(assign_to)
# make this document followed by assigned user
if vmraid.get_cached_value("User", assign_to, "follow_assigned_documents"):
follow_document(args["doctype"], args["name"], assign_to)
# notify
notify_assignment(
d.assigned_by,
d.allocated_to,
d.reference_type,
d.reference_name,
action="ASSIGN",
description=args.get("description"),
)
if shared_with_users:
user_list = format_message_for_assign_to(shared_with_users)
vmraid.msgprint(
_("Shared with the following Users with Read access:{0}").format(user_list, alert=True)
)
if users_with_duplicate_todo:
user_list = format_message_for_assign_to(users_with_duplicate_todo)
vmraid.msgprint(_("Already in the following Users ToDo list:{0}").format(user_list, alert=True))
return get(args)
@vmraid.whitelist()
def add_multiple(args=None):
if not args:
args = vmraid.local.form_dict
docname_list = json.loads(args["name"])
for docname in docname_list:
args.update({"name": docname})
add(args)
def close_all_assignments(doctype, name):
assignments = vmraid.db.get_all(
"ToDo",
fields=["allocated_to"],
filters=dict(reference_type=doctype, reference_name=name, status=("!=", "Cancelled")),
)
if not assignments:
return False
for assign_to in assignments:
set_status(doctype, name, assign_to.allocated_to, status="Closed")
return True
@vmraid.whitelist()
def remove(doctype, name, assign_to):
return set_status(doctype, name, assign_to, status="Cancelled")
def set_status(doctype, name, assign_to, status="Cancelled"):
"""remove from todo"""
try:
todo = vmraid.db.get_value(
"ToDo",
{
"reference_type": doctype,
"reference_name": name,
"allocated_to": assign_to,
"status": ("!=", status),
},
)
if todo:
todo = vmraid.get_doc("ToDo", todo)
todo.status = status
todo.save(ignore_permissions=True)
notify_assignment(todo.assigned_by, todo.allocated_to, todo.reference_type, todo.reference_name)
except vmraid.DoesNotExistError:
pass
# clear assigned_to if field exists
if vmraid.get_meta(doctype).get_field("assigned_to") and status == "Cancelled":
vmraid.db.set_value(doctype, name, "assigned_to", None)
return get({"doctype": doctype, "name": name})
def clear(doctype, name):
"""
Clears assignments, return False if not assigned.
"""
assignments = vmraid.db.get_all(
"ToDo", fields=["allocated_to"], filters=dict(reference_type=doctype, reference_name=name)
)
if not assignments:
return False
for assign_to in assignments:
set_status(doctype, name, assign_to.allocated_to, "Cancelled")
return True
def notify_assignment(
assigned_by, allocated_to, doc_type, doc_name, action="CLOSE", description=None
):
"""
Notify assignee that there is a change in assignment
"""
if not (assigned_by and allocated_to and doc_type and doc_name):
return
# return if self assigned or user disabled
if assigned_by == allocated_to or not vmraid.db.get_value("User", allocated_to, "enabled"):
return
# Search for email address in description -- i.e. assignee
user_name = vmraid.get_cached_value("User", vmraid.session.user, "full_name")
title = get_title(doc_type, doc_name)
description_html = "<div>{0}</div>".format(description) if description else None
if action == "CLOSE":
subject = _("Your assignment on {0} {1} has been removed by {2}").format(
vmraid.bold(doc_type), get_title_html(title), vmraid.bold(user_name)
)
else:
user_name = vmraid.bold(user_name)
document_type = vmraid.bold(doc_type)
title = get_title_html(title)
subject = _("{0} assigned a new task {1} {2} to you").format(user_name, document_type, title)
notification_doc = {
"type": "Assignment",
"document_type": doc_type,
"subject": subject,
"document_name": doc_name,
"from_user": vmraid.session.user,
"email_content": description_html,
}
enqueue_create_notification(allocated_to, notification_doc)
def format_message_for_assign_to(users):
return "<br><br>" + "<br>".join(users)
| [
"sowrisurya@outlook.com"
] | sowrisurya@outlook.com |
ad3b204e0353db2d321706bbc3b27e91899eaa08 | 8a0f8d4b05e26f04dd584ed51ab77c78f00740a8 | /monasca_notification/types/notifiers.py | a1040a14ac468bbb4580173f5a8bd071dbd5e838 | [] | no_license | TonyChengTW/monasca-tony | 1ce9d8fb9299864dc049c9caef0f6ce10b871670 | c2b3e70a3fd331410a642aec1a5e0ac58d4dc7e4 | refs/heads/master | 2021-06-27T02:14:06.889700 | 2017-09-15T02:46:25 | 2017-09-15T02:46:25 | 103,096,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,097 | py | # (C) Copyright 2015,2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from monasca_common.simport import simport
from monasca_notification.plugins import email_notifier
from monasca_notification.plugins import pagerduty_notifier
from monasca_notification.plugins import webhook_notifier
log = logging.getLogger(__name__)
possible_notifiers = None
configured_notifiers = None
statsd_counter = None
statsd = None
statsd_timer = None
def init(statsd_obj):
global statsd, statsd_timer, \
possible_notifiers, configured_notifiers,\
statsd_counter
statsd = statsd_obj
statsd_timer = statsd.get_timer()
statsd_counter = {}
configured_notifiers = {}
possible_notifiers = [
email_notifier.EmailNotifier(log),
webhook_notifier.WebhookNotifier(log),
pagerduty_notifier.PagerdutyNotifier(log)
]
def load_plugins(config):
global possible_notifiers
for plugin_class in config.get("plugins", []):
try:
possible_notifiers.append(simport.load(plugin_class)(log))
except Exception:
log.exception("unable to load the class {0} , ignoring it".format(plugin_class))
def enabled_notifications():
global configured_notifiers
results = []
for key in configured_notifiers:
results.append(key.upper())
return results
def config(cfg):
global possible_notifiers, configured_notifiers, statsd_counter
formatted_config = {t.lower(): v for t, v in cfg.items()}
for notifier in possible_notifiers:
ntype = notifier.type.lower()
if ntype in formatted_config:
try:
notifier.config(formatted_config[ntype])
configured_notifiers[ntype] = notifier
statsd_counter[ntype] = statsd.get_counter(notifier.statsd_name)
log.info("{} notification ready".format(ntype))
except Exception:
log.exception("config exception for {}".format(ntype))
else:
log.warn("No config data for type: {}".format(ntype))
config_with_no_notifiers = set(formatted_config.keys()) - set(configured_notifiers.keys())
# Plugins section contains only additional plugins and should not be
# considered as a separate plugin
if 'plugins' in config_with_no_notifiers:
config_with_no_notifiers.remove('plugins')
if config_with_no_notifiers:
log.warn("No notifiers found for {0}". format(", ".join(config_with_no_notifiers)))
def send_notifications(notifications):
sent = []
failed = []
invalid = []
for notification in notifications:
ntype = notification.type
if ntype not in configured_notifiers:
log.warn("attempting to send unconfigured notification: {}".format(ntype))
invalid.append(notification)
continue
notification.notification_timestamp = time.time()
with statsd_timer.time(ntype + '_time'):
result = send_single_notification(notification)
if result:
sent.append(notification)
statsd_counter[ntype].increment(1)
else:
failed.append(notification)
return sent, failed, invalid
def send_single_notification(notification):
global configured_notifiers
ntype = notification.type
try:
return configured_notifiers[ntype].send_notification(notification)
except Exception:
log.exception("send_notification exception for {}".format(ntype))
return False
| [
"tony.pig@gmail.com"
] | tony.pig@gmail.com |
a5373811bc314524609755af1eb7c1cf21b3e2e9 | b2644f42e5645f74d33e0c64ad1183172ce2be88 | /handpose/utils/image/__init__.py | 3e8695f9eb9756e8424bb94bdeb3284989271ed0 | [] | no_license | YanWanquan/handpose | e9282b2a8485f52d8b286c20a0f0d3f1e97b7b0b | a755ff80011007ba124ff5cd4c47f0c99ca28b8b | refs/heads/master | 2023-03-17T07:55:04.049356 | 2020-03-20T06:02:12 | 2020-03-20T06:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from __future__ import absolute_import
from .image_utils import *
from .colors import *
from .draw_detection import draw_detection
| [
"tsai.tsunghua@gmail.com"
] | tsai.tsunghua@gmail.com |
78cf9c6623be1565196efc57f24c7d06f933e7ac | 91a41c5ce70819a69dfc6f6f2b94f862244267da | /commands/Translate.py | 30280c6d98fd5c567a8bc61273ba085bd72bd7c0 | [] | no_license | mions1/silvia | 9e489a5537d125acdffc3089c50ffa766b739738 | f2e5bf7d0c8cf1a90fcb8a663380aff91a6f63a3 | refs/heads/master | 2023-03-18T22:43:53.610163 | 2021-03-09T15:36:11 | 2021-03-09T15:36:11 | 346,031,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | from . import Command
from google.cloud import translate_v2 as translate
from commands import builtin as bin
class Translate():
LANGUAGES = {
'af': 'africano',
'sq': 'albanese',
'ar': 'arabo',
'hy': 'armeno',
'bn': 'Bengali',
'ca': 'catalano',
'zh': 'cinese',
'zh-cn': 'Chinese (Mandarin/China)',
'zh-tw': 'Chinese (Mandarin/Taiwan)',
'zh-yue': 'Chinese (Cantonese)',
'hr': 'croato',
'cs': 'Czech',
'da': 'Danish',
'nl': 'tedesco',
'en': 'inglese',
'en-au': 'English (Australia)',
'en-uk': 'English (United Kingdom)',
'en-us': 'English (United States)',
'eo': 'Esperanto',
'fi': 'Finnish',
'fr': 'French',
'de': 'German',
'el': 'Greek',
'hi': 'Hindi',
'hu': 'Hungarian',
'is': 'Icelandic',
'id': 'Indonesian',
'it': 'italiano',
'ja': 'giapponese',
'ko': 'Korean',
'la': 'Latin',
'lv': 'Latvian',
'mk': 'Macedonian',
'no': 'Norwegian',
'pl': 'Polish',
'pt': 'portoghese',
'pt-br': 'Portuguese (Brazil)',
'ro': 'Romanian',
'ru': 'Russian',
'sr': 'Serbian',
'sk': 'Slovak',
'es': 'spagnolo',
'es-es': 'Spanish (Spain)',
'es-us': 'Spanish (United States)',
'sw': 'Swahili',
'sv': 'Swedish',
'ta': 'Tamil',
'th': 'Thai',
'tr': 'Turkish',
'vi': 'Vietnamese',
'cy': 'Welsh'
}
def __init__(self, text):
self.text = text
pass
def elaborazione(self):
split = self.text.split()
pre = "come si dice".split()
self.phrase = " ".join(split[len((self.text[:self.text.index(" ".join(pre))]).split()) + len(pre):-2])
self.language = split[-1]
self.lang_tag = self.get_tag(self.language)
pass
def esecuzione(self):
translate_client = translate.Client()
translation = translate_client.translate(
self.phrase, target_language=self.lang_tag)
self.from_lang_tag = translation["detectedSourceLanguage"]
from_language = self.get_lang(self.from_lang_tag)
print(u'Language from: {}'.format(from_language + "=" + self.from_lang_tag))
print(u'Language to: {}'.format(self.language + "=" + self.lang_tag))
print(u'Text: {}'.format(self.phrase))
print(u'Translation: {}'.format(translation['translatedText']))
# s = phrase+" " if len(phrase.split()) < 4 else ""
# s += "in "+language+" si dice "
# say(s)
self.translation = translation['translatedText']
pass
def risposta(self):
return self.phrase+" in "+self.language+" si dice "+self.translation
def run(self):
self.elaborazione()
self.esecuzione()
return self.risposta()
def get_tag(self, language="inglese"):
for key in Translate.LANGUAGES:
if Translate.LANGUAGES[key] == language:
return key
return None
def get_lang(self, tag="en"):
return Translate.LANGUAGES[tag] | [
"simone.mione1@gmail.com"
] | simone.mione1@gmail.com |
a74be285641ea899eccb1367db3ce61e01d8b919 | caa175a933aca08a475c6277e22cdde1654aca7b | /tests/models/product/product_relation_types/test_relations.py | 745370f316f860b8d7729382fd9163a74d6c8c2a | [
"MIT"
] | permissive | simonsobs/acondbs | 01d68ae40866461b85a6c9fcabdfbea46ef5f920 | d18c7b06474b0dacb1dcf1c6dbd1e743407645e2 | refs/heads/main | 2023-07-07T04:33:40.561273 | 2023-06-28T22:08:00 | 2023-06-28T22:08:00 | 239,022,783 | 0 | 1 | MIT | 2023-06-26T20:36:39 | 2020-02-07T21:07:46 | Python | UTF-8 | Python | false | false | 1,920 | py | import pytest
from flask import Flask
from acondbs.db.sa import sa
from acondbs.models import ProductRelationType
@pytest.fixture
def app(app_empty: Flask) -> Flask:
y = app_empty
#
# +--------+ +-------+
# | | --(reverse)-> | |
# | parent | | child |
# | | <-(reverse)-- | |
# +--------+ +-------+
#
#
# +------- -+
# | | --(reverse)-+
# | sibling | |
# | | <-----------+
# +------ --+
#
parent = ProductRelationType(name='parent')
child = ProductRelationType(name='child')
parent.reverse = child
assert child.reverse == parent
sibling = ProductRelationType(name='sibling')
sibling.reverse = sibling
# commit
with y.app_context():
sa.session.add(parent)
sa.session.add(sibling)
sa.session.commit()
return y
def test_reverse(app: Flask) -> None:
with app.app_context():
parent = ProductRelationType.query.filter_by(name='parent').one()
child = ProductRelationType.query.filter_by(name='child').one()
assert child is parent.reverse
assert parent is child.reverse
def test_self_reverse(app: Flask) -> None:
with app.app_context():
sibling = ProductRelationType.query.filter_by(name='sibling').one()
assert sibling is sibling.reverse
def test_cascade(app: Flask) -> None:
# delete parent
with app.app_context():
parent = ProductRelationType.query.filter_by(name='parent').one()
sa.session.delete(parent)
sa.session.commit()
# assert
with app.app_context():
parent = ProductRelationType.query.filter_by(name='parent').one_or_none()
child = ProductRelationType.query.filter_by(name='child').one_or_none()
assert parent is None
assert child is None
| [
"tai.sakuma@gmail.com"
] | tai.sakuma@gmail.com |
7f61e83b66c2db625d75c713ddfe56f43b63fe62 | 73d5c11866f739ea0f4cbdb86662e9c11d9c081a | /if_test.py | f8054c582bf14fbed0b1f4ba322f88c347869fd1 | [] | no_license | qorud02/changwonai | d9de4adbc3604be2706bfe0df8184cd60f89c0f4 | dcf9996ccc51a674681d3639c2b57a85444146ac | refs/heads/main | 2023-01-25T03:14:06.616372 | 2020-11-22T09:39:16 | 2020-11-22T09:39:16 | 314,939,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # print(type(3>5))
# print(6>5)
# if 6>5:
# print("Do 1")
# print("Do 2")
# else:
# print("Not doing")
# 나이 = 15
# if 나이 < 20:
# # print("청소년 할인")
# 걸음 = intinput("걸음을 입력해주세요")
# if 걸음 >= 1000:
# print("목표 달성")
# 시간 = 13
# if 시간 < 12:
# print("오전입니다")
# else:
# print("오후입니다")
import word
word.print_word() | [
"you@example.com"
] | you@example.com |
3eb1f20a7f22613daa82fc565db909c67c1988bc | 105212e4d2d2175d5105e05552e29b300375e039 | /DL/RL/deepmind/pysc2-examples/demo.py | 29cd7855d8c76d513357636f8fded269594a67e6 | [
"Apache-2.0"
] | permissive | Asher-1/AI | 84f0c42651c0b07e6b7e41ebb354258db64dd0d1 | a70f63ebab3163f299f7f9d860a98695c0a3f7d5 | refs/heads/master | 2022-11-26T07:24:37.910301 | 2019-05-30T13:04:31 | 2019-05-30T13:04:31 | 160,031,310 | 7 | 1 | null | 2022-11-21T22:02:53 | 2018-12-02T09:19:03 | Jupyter Notebook | UTF-8 | Python | false | false | 2,038 | py | import sys
import os
import pdb
from absl import flags
from pysc2.env import sc2_env
from pysc2.lib import actions as sc2_actions
import os
import datetime
from pysc2.env import environment
import numpy as np
from common.vec_env.subproc_vec_env import SubprocVecEnv
import random
import time
FLAGS = flags.FLAGS
def construct_action(marine_num, x, y):
move_action = []
# Base action is choosing a control group.
# 4 == select_control_group
move_action.append(
sc2_actions.FunctionCall(4, [[0], [marine_num]]))
# Right click.
# 331 == Move
move_action.append(
sc2_actions.FunctionCall(331, [[0], [int(x), int(y)]]))
return move_action
def get_position(env, marine_num):
"""Get position by selecting a unit.
This function has a side effect, so we return rewards and dones.
"""
select_action = construct_action(marine_num, -1, -1)
_, rs, dones, _, _, _, selected, _ = env.step([select_action])
xys = []
for s in selected:
pos = s.nonzero()
x = pos[1][0]
y = pos[2][0]
xys.append((x, y))
return xys, rs, dones
def main():
FLAGS(sys.argv)
env = SubprocVecEnv(1, 'CollectMineralShards')
env.reset()
total_reward = 0
for _ in range(1000):
marine = random.randrange(2)
x = random.randrange(32)
y = random.randrange(32)
print('Move %d to (%d, %d)' % (marine, x, y))
move_action = construct_action(marine, x, y)
# This controls the APM.
for _ in range(7):
obs, rs, dones, _, _, _, selected, screens = env.step([move_action])
total_reward += rs
# Querying the position
m_pos = {}
m_pos['0'], rs, dones = get_position(env, 0)
total_reward += rs
m_pos['1'], rs, dones = get_position(env, 1)
total_reward += rs
print(rs)
print(dones)
print('Total reward: ', total_reward)
print(m_pos)
env.close()
if __name__ == '__main__':
main()
| [
"ludahai19@163.com"
] | ludahai19@163.com |
e1b6bb2e250e6b34c211768511bd921692120eaf | 2837519560abb55b83ed9a0ff19fe468568057a8 | /flood_fill.py | 63c6e40f4a5d2e58d7c475dd9e75a0b37e3a94bb | [] | no_license | shahakshay11/DFS-1 | 47df064f2326805444647399c0f07bb1c56ffd53 | 9d06719ab8432b837dc6361b381a34c2663f8117 | refs/heads/master | 2022-04-18T12:21:07.249034 | 2020-04-17T05:09:41 | 2020-04-17T05:09:41 | 256,349,677 | 0 | 0 | null | 2020-04-16T23:06:32 | 2020-04-16T23:06:31 | null | UTF-8 | Python | false | false | 1,432 | py | """
// Time Complexity : O(m*n)
// Space Complexity : O(m*n)
// Did this code successfully run on Leetcode : No
// Any problem you faced while coding this : Ending recursion
// Your code here along with comments explaining your approach
Algorithm explanation
DFS
- Idea is to run DFS on sr,sc in the matrix and update the value of all
the values that have 1
"""
class Solution(object):
def floodFill(self, image, sr, sc, newColor):
"""
:type image: List[List[int]]
:type sr: int
:type sc: int
:type newColor: int
:rtype: List[List[int]]
"""
def dfs(i,j,matrix,m,n,origColor):
matrix[i][j] = newColor
directions = [(1,0),(0,1),(-1,0),(0,-1)]
for x,y in directions:
valx = x + i
valy = y + j
#We need to move only to elements having orig and new color different or else recursion will be endless
if valx >=0 and valx < m and valy >=0 and valy < n and matrix[valx][valy] == origColor:#and newColor != origColor:
dfs(valx,valy,matrix,m,n,origColor)
if not image or not image[0] or image[sr][sc] == newColor: #-> this was essentially to avoid back and forth and image won't change
return image
m = len(image)
n = len(image[0])
dfs(sr,sc,image,m,n,image[sr][sc])
return image | [
"akshay.vjti11@gmail.com"
] | akshay.vjti11@gmail.com |
2c6e6b8017bdb7a5aa4d63abd9ca3ec0a3126253 | fca120e66c06b1e3637c9b7463ee5769afc9af70 | /galaxy/tools/cwl/util.py | 4ece8d450fe43b7b05fc738f96c37f30da74cfc0 | [
"AFL-3.0",
"CC-BY-2.5",
"AFL-2.1",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jerowe/galaxy-lib | fb49cf08538b9702b1b1bb349b4a9757950bd18c | ea29d1d3ee4c28eb3e76daecca9465a1c67b571c | refs/heads/master | 2021-01-23T02:16:13.467881 | 2017-09-01T01:46:38 | 2017-09-01T01:46:38 | 102,438,112 | 0 | 0 | null | 2017-09-05T05:31:28 | 2017-09-05T05:31:28 | null | UTF-8 | Python | false | false | 6,924 | py | """Client-centric CWL-related utilities.
Used to share code between the Galaxy test framework
and other Galaxy CWL clients (e.g. Planemo)."""
import hashlib
import json
import os
from collections import namedtuple
from six import iteritems, StringIO
def output_properties(path=None, content=None):
checksum = hashlib.sha1()
properties = {
"class": "File",
}
if path is not None:
properties["path"] = path
f = open(path, "rb")
else:
f = StringIO(content)
try:
contents = f.read(1024 * 1024)
filesize = 0
while contents != "":
checksum.update(contents)
filesize += len(contents)
contents = f.read(1024 * 1024)
finally:
f.close()
properties["checksum"] = "sha1$%s" % checksum.hexdigest()
properties["size"] = filesize
return properties
def galactic_job_json(job, test_data_directory, upload_func, collection_create_func):
"""Adapt a CWL job object to the Galaxy API.
CWL derived tools in Galaxy can consume a job description sort of like
CWL job objects via the API but paths need to be replaced with datasets
and records and arrays with collection references. This function will
stage files and modify the job description to adapt to these changes
for Galaxy.
"""
datasets = []
dataset_collections = []
def upload_file(file_path):
if not os.path.isabs(file_path):
file_path = os.path.join(test_data_directory, file_path)
_ensure_file_exists(file_path)
upload_response = upload_func(FileUploadTarget(file_path))
dataset = upload_response["outputs"][0]
datasets.append((dataset, file_path))
dataset_id = dataset["id"]
return {"src": "hda", "id": dataset_id}
def upload_object(the_object):
upload_response = upload_func(ObjectUploadTarget(the_object))
dataset = upload_response["outputs"][0]
datasets.append((dataset, the_object))
dataset_id = dataset["id"]
return {"src": "hda", "id": dataset_id}
def replacement_item(value, force_to_file=False):
is_dict = isinstance(value, dict)
is_file = is_dict and value.get("class", None) == "File"
if force_to_file:
if is_file:
return replacement_file(value)
else:
return upload_object(value)
if isinstance(value, list):
return replacement_list(value)
elif not isinstance(value, dict):
return upload_object(value)
if is_file:
return replacement_file(value)
else:
return replacement_record(value)
def replacement_file(value):
file_path = value.get("location", None) or value.get("path", None)
if file_path is None:
return value
return upload_file(file_path)
def replacement_list(value):
collection_element_identifiers = []
for i, item in enumerate(value):
dataset = replacement_item(item, force_to_file=True)
collection_element = dataset.copy()
collection_element["name"] = str(i)
collection_element_identifiers.append(collection_element)
collection = collection_create_func(collection_element_identifiers, "list")
dataset_collections.append(collection)
hdca_id = collection["id"]
return {"src": "hdca", "id": hdca_id}
def replacement_record(value):
collection_element_identifiers = []
for record_key, record_value in value.items():
if record_value.get("class") != "File":
dataset = replacement_item(record_value, force_to_file=True)
collection_element = dataset.copy()
else:
dataset = upload_file(record_value["location"])
collection_element = dataset.copy()
collection_element["name"] = record_key
collection_element_identifiers.append(collection_element)
collection = collection_create_func(collection_element_identifiers, "record")
dataset_collections.append(collection)
hdca_id = collection["id"]
return {"src": "hdca", "id": hdca_id}
replace_keys = {}
for key, value in iteritems(job):
replace_keys[key] = replacement_item(value)
job.update(replace_keys)
return job, datasets
def _ensure_file_exists(file_path):
if not os.path.exists(file_path):
template = "File [%s] does not exist - parent directory [%s] does %sexist, cwd is [%s]"
parent_directory = os.path.dirname(file_path)
message = template % (
file_path,
parent_directory,
"" if os.path.exists(parent_directory) else "not ",
os.getcwd(),
)
raise Exception(message)
class FileUploadTarget(object):
def __init__(self, path):
self.path = path
class ObjectUploadTarget(object):
def __init__(self, the_object):
self.object = the_object
GalaxyOutput = namedtuple("GalaxyOutput", ["history_id", "history_content_type", "history_content_id"])
def output_to_cwl_json(galaxy_output, get_metadata, get_dataset):
"""Convert objects in a Galaxy history into a CWL object.
Useful in running conformance tests and implementing the cwl-runner
interface via Galaxy.
"""
def element_to_cwl_json(element):
element_output = GalaxyOutput(
galaxy_output.history_id,
element["object"]["history_content_type"],
element["object"]["id"],
)
return output_to_cwl_json(element_output, get_metadata, get_dataset)
output_metadata = get_metadata(galaxy_output.history_content_type, galaxy_output.history_content_id)
if output_metadata["history_content_type"] == "dataset":
ext = output_metadata["file_ext"]
assert output_metadata["state"] == "ok"
dataset_dict = get_dataset(output_metadata)
if ext == "expression.json":
if "content" in dataset_dict:
return json.loads(dataset_dict["content"])
else:
with open(dataset_dict["path"]) as f:
return json.load(f)
else:
return output_properties(**dataset_dict)
elif output_metadata["history_content_type"] == "dataset_collection":
if output_metadata["collection_type"] == "list":
rval = []
for element in output_metadata["elements"]:
rval.append(element_to_cwl_json(element))
elif output_metadata["collection_type"] == "record":
rval = {}
for element in output_metadata["elements"]:
rval[element["element_identifier"]] = element_to_cwl_json(element)
return rval
else:
raise NotImplementedError("Unknown history content type encountered")
| [
"jmchilton@gmail.com"
] | jmchilton@gmail.com |
017790c669c4f1bf70f58eb6cfd4c9089c70646d | 380372bbec9b77df14bb96fc32aca7061cca0635 | /astro/sat/tle2.py | 86c3031403c10cd04cc9e670bf7e423ce77da94d | [] | no_license | IchiroYoshida/python_public | d3c42dc31b3206db3a520a007ea4fb4ce6c1a6fd | 37ccadb1d3d42a38561c7708391f4c11836f5360 | refs/heads/master | 2023-08-16T17:19:07.278554 | 2023-08-13T21:29:51 | 2023-08-13T21:29:51 | 77,261,682 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,742 | py | #! /usr/local/bin/python3.6
"""
直近 TLE データ取得 (from NASA)
: 過去の直近の TLE データ1件を取得する
(過去データが存在しなければ、未来の直近データ)
date name version
2018.06.12 mk-mode.com 1.00 新規作成
Copyright(C) 2018 mk-mode.com All Rights Reserved.
---
引数 : [YYYYMMDD[HHMMSS]]
(JST を指定。無指定なら現在時刻とみなす)
"""
from datetime import datetime
from datetime import timedelta
import re
import requests
import sys
import traceback
FILE ='./iss_tle.py'
class TleIssNasa:
URL = (
"https://spaceflight.nasa.gov/realdata/sightings/"
"SSapplications/Post/JavaSSOP/orbit/ISS/SVPOST.html"
)
UA = (
"mk-mode Bot (by Python/{}.{}.{}, "
"Administrator: postmaster@mk-mode.com)"
).format(
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro
)
MSG_ERR = (
"Invalid date!\n"
"[USAGE] ./tle_iss_nasa.rb [YYYYMMDD[HHMMSS]]"
)
def __init__(self):
if len(sys.argv) < 2:
self.jst = datetime.now()
else:
if re.search(r"^(\d{8}|\d{14})$", sys.argv[1]) is not(None):
dt = sys.argv[1].ljust(14, "0")
try:
self.jst = datetime.strptime(dt, "%Y%m%d%H%M%S")
except ValueError as e:
print(self.MSG_ERR)
sys.exit(1)
else:
print(self.MSG_ERR)
sys.exit(0)
self.utc = self.jst - timedelta(hours=9)
def exec(self):
""" Execution """
tle = ""
utc_tle = None
try:
print(self.jst.strftime("%Y-%m-%d %H:%M:%S.%f JST"))
print(self.utc.strftime("%Y-%m-%d %H:%M:%S.%f UTC"))
print("---")
tles = self.__get_tle()
for new in reversed(tles):
tle = new
item_utc = re.split(" +", tle[0])[3]
y = 2000 + int(item_utc[0:2])
d = float(item_utc[2:])
utc_tle = datetime(y, 1, 1) + timedelta(days=d)
if utc_tle <= self.utc:
break
print("\n".join(tle))
print(utc_tle.strftime("(%Y-%m-%d %H:%M:%S.%f UTC)"))
with open(FILE, 'w') as file:
file.write("import ephem\n\n")
file.write("line1 = "+'"'+'ISS(SARYA)"\n')
file.write("line2 = "+'"'+tle[0]+'"\n')
file.write("line3 = "+'"'+tle[1]+'"\n')
file.write("iss = ephem.readtle(line1, line2, line3)")
except Exception as e:
raise
def __get_tle(self):
""" 最新 TLE 一覧取得 """
res = []
try:
html, status, reason = self.__get_html()
if status != 200 or reason != "OK":
print((
"STATUS: {} ({})"
"[ERROR] Could not retreive html."
).format(status, reason))
sys.exit(1)
for tle in re.findall(r"ISS\n +(1.+?)\n +(2.+?)\n", html):
res.append([tle[0], tle[1]])
return res
except Exception as e:
raise
def __get_html(self):
""" HTML 取得 """
try:
headers = {'User-Agent': self.UA}
res = requests.get(self.URL, headers)
return [res.text, res.status_code, res.reason]
except Exception as e:
raise
if __name__ == '__main__':
try:
obj = TleIssNasa()
obj.exec()
except Exception as e:
traceback.print_exc()
sys.exit(1)
| [
"yoshida.ichi@gmail.com"
] | yoshida.ichi@gmail.com |
1c7c6152f130673e43ea5c7a7a14923df429ae30 | 044facb13bff7414439db8706ed322ea505698fa | /old/python/sandbox/old_stuff/multi_sampling/conditionals.py | 3f0db46bbddcf90a69ec3dfa653fa07f655d73bc | [] | no_license | paglenn/WLC | 2c42944bfe707018b5dbfb6ec471518aff2642c7 | e57544eba1380da6a260532b5b72b39c0d1fa433 | refs/heads/master | 2020-12-24T14:35:54.944179 | 2015-11-09T14:43:35 | 2015-11-09T14:43:35 | 23,404,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import numpy as np
import os
from parameters import *
def calculate_TP():
TP = []
if not os.path.isfile(tp_file):
print("data file ",tp_file,"missing!")
exit()
tpFile = open(tp_file,'r')
for line in tpFile.readlines():
TP.append(float(line[:-1]))
tpFile.close()
return TP
def calculate_RP():
RP = [ ]
if not os.path.isfile(rp_file):
print("data file ",rp_file,"missing!")
exit()
rpFile = open(rp_file,'r')
for line in rpFile.readlines():
RP.append(float(line[:-1]))
rpFile.close()
return RP
def calculate_Z():
Z = []
if not os.path.isfile(z_file):
print("data file ",z_file,"missing!")
exit()
zFile = open(z_file,'r')
for line in zFile.readlines():
Z.append(float(line[:-1]))
zFile.close()
return Z
def calculate_RPTP(RP,TP):
RPTP = []
if not os.path.isfile(rptp_file):
print("data file ",rptp_file,"missing!")
exit()
rptpFile = open(rptp_file,'r')
for line in rptpFile.readlines():
RPTP.append(float(line[:-1]))
rptpFile.close()
return RPTP
TP = calculate_TP()
RP = calculate_RP()
Z = calculate_Z()
RPTP = calculate_RPTP(RP,TP)
z_X_joint = np.histogramdd(np.vstack((Z,TP,RP,RPTP)).T,normed=True)[0]
X_joint = np.histogramdd(np.vstack((TP,RP,RPTP)).T,normed=True)[0]
print(z_X_joint.shape,X_joint.shape)
#print(z_X_joint,X_joint)
#print(z_X_joint)
#z_cond = z_X_joint/X_joint
| [
"nls.pglenn@gmail.com"
] | nls.pglenn@gmail.com |
6d60af458900694275feaf31879721e43da59186 | cacb92c6dba32dfb7f2a4a2a02269f40ab0413dd | /mmdet/datasets/pipelines/test_time_aug.py | f3a04e7311af9a306703ced843e4cdfe6f1edb66 | [
"Apache-2.0"
] | permissive | dereyly/mmdet_sota | 697eab302faf28d5bce4092ecf6c4fd9ffd48b91 | fc14933ca0ec2eebb8e7b3ec0ed67cae0da3f236 | refs/heads/master | 2022-11-26T14:52:13.665272 | 2020-08-04T00:26:46 | 2020-08-04T00:26:46 | 272,046,903 | 15 | 5 | Apache-2.0 | 2020-07-16T06:22:39 | 2020-06-13T16:37:26 | Python | UTF-8 | Python | false | false | 2,741 | py | import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug(object):
"""Test-time augmentation with multiple scales and flipping
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple]: Images scales for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal" and "vertical". If flip_direction is list,
multiple flip augmentations will be applied.
It has no effect when flip == False. Default: "horizontal".
"""
def __init__(self,
transforms,
img_scale,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
aug_data = []
flip_aug = [False, True] if self.flip else [False]
for scale in self.img_scale:
for flip in flip_aug:
for direction in self.flip_direction:
_results = results.copy()
_results['scale'] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
repr_str += f'flip_direction={self.flip_direction}'
return repr_str
| [
"nikolay@xix.ai"
] | nikolay@xix.ai |
94cf91731423bf8731c68e6c7bd64038ada51c7e | 265d0477b43dd6391b939d08577bb82c57184cdf | /official/utils/registry_test.py | 47e3722993c79e96706d56757e6e3997f072ffde | [
"Apache-2.0"
] | permissive | EthanGeek/models | 02c9fca96f5b3be7503bb0e75172e2f683bf4b73 | a9fcda17153e4f36d431174934abef4151f1f687 | refs/heads/master | 2022-12-11T08:58:19.241487 | 2020-09-04T00:38:22 | 2020-09-04T00:38:22 | 292,791,920 | 1 | 0 | Apache-2.0 | 2020-09-04T08:21:02 | 2020-09-04T08:21:01 | null | UTF-8 | Python | false | false | 2,560 | py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.utils import registry
class RegistryTest(tf.test.TestCase):
def test_register(self):
collection = {}
@registry.register(collection, 'functions/func_0')
def func_test():
pass
self.assertEqual(registry.lookup(collection, 'functions/func_0'), func_test)
@registry.register(collection, 'classes/cls_0')
class ClassRegistryKey:
pass
self.assertEqual(
registry.lookup(collection, 'classes/cls_0'), ClassRegistryKey)
@registry.register(collection, ClassRegistryKey)
class ClassRegistryValue:
pass
self.assertEqual(
registry.lookup(collection, ClassRegistryKey), ClassRegistryValue)
def test_register_hierarchy(self):
collection = {}
@registry.register(collection, 'functions/func_0')
def func_test0():
pass
@registry.register(collection, 'func_1')
def func_test1():
pass
@registry.register(collection, func_test1)
def func_test2():
pass
expected_collection = {
'functions': {
'func_0': func_test0,
},
'func_1': func_test1,
func_test1: func_test2,
}
self.assertEqual(collection, expected_collection)
def test_register_error(self):
collection = {}
@registry.register(collection, 'functions/func_0')
def func_test0(): # pylint: disable=unused-variable
pass
with self.assertRaises(KeyError):
@registry.register(collection, 'functions/func_0/sub_func')
def func_test1(): # pylint: disable=unused-variable
pass
with self.assertRaises(LookupError):
registry.lookup(collection, 'non-exist')
if __name__ == '__main__':
tf.test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
c440828efe9f129ee90f410999d2d0d211b37e61 | 54a5f5ec2c5edf924b7dc7730ee7cb2a38ac4a39 | /MergingDataFrameswithpandas/E27_Using_merge_asof.py | e556c2a5d480cbe0125360e461af6d06c17a0cc1 | [] | no_license | dajofischer/Datacamp | fac413ec178375cedceababaf84f6b47a61fc821 | a03d16b8f342412f1ee077f2f196ee8404e2e21c | refs/heads/master | 2020-04-05T08:38:25.361746 | 2019-03-27T20:55:57 | 2019-03-27T20:55:57 | 156,722,561 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | # Merge auto and oil: merged
merged = pd.merge_asof(auto,oil,left_on='yr' , right_on='Date')
# Print the tail of merged
print(merged.tail())
# Resample merged: yearly
yearly = merged.resample('A',on='Date')[['mpg','Price']].mean()
# Print yearly
print(yearly)
# print yearly.corr()
print(yearly.corr())
| [
"dajofischer@gmail.com"
] | dajofischer@gmail.com |
40891161a6e4cade4b43b30586594b4651c6ee2a | d3efc82dfa61fb82e47c82d52c838b38b076084c | /crossmarketetf/crossmarket_creation_HA/YW_CETFSS_SHSG_048.py | b948171ffe226a7b5fdd317eb7b46970b408751f | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,698 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test")
from crossmarketetf.cetfservice.cetf_main_service import *
from crossmarketetf.cetfservice.cetf_get_components_asset import *
from crossmarketetf.cetfservice.cetf_utils import *
from mysql.QueryOrderErrorMsg import queryOrderErrorMsg
from service.mainService import *
from mysql.getUpOrDownPrice import getUpPrice
class YW_CETFSS_SHSG_048(xtp_test_case):
def test_YW_CETFSS_SHSG_048(self):
# -----------ETF申购-------------
title = '上海ETF申购--可深市股票退补现金替代:T-1日无成分股&资金不足&计算现金比例<最大现金比例→T日申购ETF'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、全成、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010120,
'errorMSG': queryOrderErrorMsg(11010120),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
unit_info = {
'ticker': '550630', # etf代码
'etf_unit': 1.0, # etf申购单位数
'etf_unit_sell': 1.0, # etf卖出单位数
'component_unit_sell': 1.0 # 成分股卖出单位数
}
# -----------查询ETF申购前成分股持仓-------------
component_stk_info = cetf_get_all_component_stk(Api,unit_info['ticker'])
# 查询etf最小申赎数量
unit_number = query_creation_redem_unit(unit_info['ticker'])
# etf申购数量
quantity = int(unit_info['etf_unit'] * unit_number)
# 定义委托参数信息------------------------------------------
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_PURCHASE'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
quantity,
'position_effect':
Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
g_func.cetf_parm_init(case_goal['期望状态'])
rs1 = cetf_service_test(Api, case_goal, wt_reqs,component_stk_info)
etf_creation_log(case_goal, rs1)
self.assertEqual(rs1['用例测试结果'], True)
# --------二级市场,卖出etf-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 二级市场卖出的etf数量
quantity = int(unit_info['etf_unit_sell'] * unit_number)
quantity_list = split_etf_quantity(quantity)
# 查询涨停价
limitup_px = getUpPrice(unit_info['ticker'])
rs2 = {}
for etf_quantity in quantity_list:
wt_reqs_etf = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
etf_quantity,
'position_effect':
Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs2 = serviceTest(Api, case_goal, wt_reqs_etf)
if rs2['用例测试结果'] is False:
etf_sell_log(case_goal, rs2)
self.assertEqual(rs2['用例测试结果'], True)
return
etf_sell_log(case_goal, rs2)
# ------------二级市场卖出成份股-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 查询etf成分股代码和数量
etf_components = query_cetf_component_share(unit_info['ticker'])
# 如果卖出单位大于100,表示卖出数量;小于100,表示卖出份数
rs3 = {}
for stk_code in etf_components:
# 申购用例1-43会有上海和深圳的成分股各一支,深圳成分股为'008000',只卖上海的
if stk_code != '008000':
components_share = etf_components[stk_code]
quantity = (int(unit_info['component_unit_sell'])
if unit_info['component_unit_sell'] >= 100
else int(components_share * unit_info['component_unit_sell']))
limitup_px = getUpPrice(stk_code)
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker':
stk_code,
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
quantity,
'position_effect':
Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs3 = serviceTest(Api, case_goal, wt_reqs)
if rs3['用例测试结果'] is False:
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
5ce5c41d4a76c971071d193991947217158f28a7 | 5e1074afdb17eeb3a78eaf16093c8c491d9f3c0e | /pyramid_blogr/services/blog_record.py | 98266b411230078f2501547f4a5ab7f7e57ce5ac | [] | no_license | andyk1278/pyramid_blogr | 52255cf1a09c3ed3a43f6c2503a722db30df9f19 | 1e8b53e754b5315c08a2e982c9be08e76c331748 | refs/heads/master | 2021-01-01T05:54:28.668964 | 2017-07-15T23:12:53 | 2017-07-15T23:12:53 | 97,301,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | import sqlalchemy as sa
from paginate_sqlalchemy import SqlalchemyOrmPage
from ..models.blog_record import BlogRecord
class BlogRecordService(object):
@classmethod
def all(cls, request):
query = request.dbsession.query(BlogRecord)
return query.order_by(sa.desc(BlogRecord.created))
@classmethod
def by_id(cls, _id, request):
query = request.dbsession.query(BlogRecord)
return query.get(_id)
@classmethod
def get_paginator(cls, request, page=1):
query = request.dbsession.query(BlogRecord)
query = query.order_by(sa.desc(BlogRecord.created))
query_params = request.GET.mixed()
def url_maker(link_page):
# replace page param with values generated by paginator
query_params['page'] = link_page
return request.current_route_url(_query=query_params)
return SqlalchemyOrmPage(query, page, items_per_page=5, url_maker=url_maker) | [
"andyk1278@gmail.com"
] | andyk1278@gmail.com |
5c510e80b59c7982d33d9b43ae200d9f186a4b9d | 04c7295ce65a623dc62454aa46ae4ae4ce51ca36 | /Assignment/ass1/q2/ass1q2.py | 25306781782d97e627e5d64efb3a34b5b2ddc616 | [] | no_license | hty-unsw/COMP9021-Python | 38373378162a314a82bf14453d026e641963e1b9 | 97be6dfa730247b59e608ec6d464ac16b4cf1968 | refs/heads/master | 2020-07-03T00:51:23.540099 | 2018-10-30T14:23:15 | 2018-10-30T14:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | import sys
import os.path
from itertools import groupby
try:
N = input('Please enter the name of the file you want to get data from: ')
if not os.path.exists(N):
raise EOFError
else:
f = open(N)
s = f.read()
f.close()
origin_list = s.split()
L = [int(x) for x in origin_list]
if len(L) <= 1 or L[0] <= 0:
raise ValueError
for i in range(len(L)-1):
if L[i+1] - L[i] <=0:
raise ValueError
except ValueError:
print('Sorry, input file does not store valid data.')
sys.exit()
except EOFError:
print('Sorry, there is no such file.')
sys.exit()
#============================
f = open(N)
s = f.read()
f.close()
origin_list = s.split()
L = [int(x) for x in origin_list]
List2 = [L[n] - L[n - 1] for n in range(1, len(L))]
if len(set(List2)) == 1:
print('The ride is perfect!\n'
'The longest good ride has a length of: {}\n'
'The minimal number of pillars to remove to build a perfect ride from the rest is: 0'.format(len(List2)))
else:
List3 = []
for key,group in groupby(List2):
List3.append(len(list(group)))
length = max(List3)
#=============================================
Lset = set(L)
ride_len = 0
for i in range(0,len(L)-1):
for n in range(i+1,len(L)):
diff = L[n] - L[i]
L2 = range(L[i],L[-1]+1,diff)
current_len = 0
for m in range(0,len(L2)):
if L2[m] in Lset:
current_len += 1
else:
break
if current_len > ride_len:
ride_len = current_len
value = len(L) - ride_len
print('The ride could be better...\n'
'The longest good ride has a length of: {}\n'
'The minimal number of pillars to remove to build a perfect ride from the rest is: {}'.format(length, value))
| [
"grey1991ss@gmail.com"
] | grey1991ss@gmail.com |
f9b03003acb6e4f7f8f5bea29ff4743e4c9a8b23 | 7c5e9dd27939492a5f75650e02804f2a84e982ec | /apps/shares/models.py | e47e0b3b474e210421a039b24fbfcaffa1259e91 | [] | no_license | karol-gruszczyk/janusze-biznesu | da431f7b31a7c368c07444ef5e82a25d95fcfc90 | 4d5c18a1d407704a39c7c50a4fdde0391b87d116 | refs/heads/master | 2021-01-17T20:04:06.799391 | 2015-12-04T12:13:56 | 2015-12-04T12:13:56 | 37,723,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | from django.db import models
class ShareManager(models.Manager):
@classmethod
def get_records(cls, share):
if type(share) != Share:
raise TypeError("type 'Share' expected")
return ShareRecord.objects.filter(share__pk=share.pk)
@classmethod
def get_groups(cls, share):
if type(share) != Share:
raise TypeError("type 'Share' expected")
return share.sharegroup_set.all()
class Share(models.Model):
name = models.CharField(max_length=32, db_index=True, unique=True)
verbose_name = models.CharField(max_length=64, null=True, blank=True)
updated_daily = models.BooleanField(default=False)
last_updated = models.DateTimeField(null=True)
first_record = models.DateField(null=True)
last_record = models.DateField(null=True)
num_records = models.PositiveIntegerField(null=True)
objects = ShareManager()
class Meta:
get_latest_by = 'last_updated'
def __str__(self):
return self.verbose_name if self.verbose_name else self.name
class ShareSet(models.Model):
shares = models.ManyToManyField(Share)
class ShareRecord(models.Model):
share = models.ForeignKey(Share, null=False, db_index=True, related_name='records')
date = models.DateField(null=False, db_index=True)
open = models.FloatField(null=False)
close = models.FloatField(null=False)
high = models.FloatField(null=False)
low = models.FloatField(null=False)
volume = models.FloatField(null=False)
class Meta:
unique_together = ('share', 'date',)
index_together = ['share', 'date']
class ShareGroup(models.Model):
name = models.CharField(max_length=32, db_index=True, unique=True)
verbose_name = models.CharField(max_length=64, null=True)
shares = models.ManyToManyField(Share, blank=True)
def __str__(self):
return self.verbose_name if self.verbose_name else self.name
| [
"karol.gruszczyk@gmail.com"
] | karol.gruszczyk@gmail.com |
e535b1ca14677486264c1141216705096921d269 | ae3f23efcdc4b7fdd1c224043d0ece002955956e | /host/host/containers/lxc/utils.py | 8071ab5980742ef6789095528ee0e01719973559 | [] | no_license | alexeysofin/xplace | 4466682fe76c808288d69f2808ddbca38a583bc4 | 9f12f066a62fae4e789bee94e5e554cc6de26d90 | refs/heads/master | 2023-01-12T01:02:40.137609 | 2021-02-14T20:41:30 | 2021-02-14T20:41:30 | 208,021,139 | 0 | 0 | null | 2023-01-04T10:18:46 | 2019-09-12T10:07:17 | Python | UTF-8 | Python | false | false | 375 | py | import os
from host.utils.sub_process import run_command
from .const import LXC_BIN_PATH
from .exceptions import ContainerException
def run_container_command(command, *args, command_input=None):
command = os.path.join(LXC_BIN_PATH, command)
return run_command(command, *args, command_input=command_input,
exception_class=ContainerException)
| [
"sofin.moffin"
] | sofin.moffin |
5d96058fd7a39afd318a44140707be93e5029f7e | e7022b8eb4179e87007bc184a43cfb470c8637a5 | /code/dbengine.py | 6b4a0f13b3a9004009a5ea357a8e947203f254f8 | [] | no_license | yscoder-github/nl2sql-tianchi | a683c96ce5f545e38c7eb4afbb655ff537c73339 | 2d4463c5098d7533ebf879d874a8c14e61b8f269 | refs/heads/master | 2021-07-07T10:25:47.939612 | 2020-11-29T13:01:34 | 2020-11-29T13:01:34 | 207,695,804 | 74 | 18 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | # -*- coding:utf-8 -*-
import json
import records
import re
from config import *
class DBEngine:
def __init__(self, fdb):
self.db = records.Database('sqlite:///{}'.format(fdb))
self.conn = self.db.get_connection()
def execute(self, table_id, select_index, aggregation_index, conditions, condition_relation):
"""
table_id: id of the queried table.
select_index: list of selected column index, like [0,1,2]
aggregation_index: list of aggregation function corresponding to selected column, like [0,0,0], length is equal to select_index
conditions: [[condition column, condition operator, condition value], ...]
condition_relation: 0 or 1 or 2
"""
table_id = 'Table_{}'.format(table_id)
# 条件数>1 而 条件关系为''
if condition_relation == 0 and len(conditions) > 1:
return 'Error1'
# 选择列或条件列为0
if len(select_index) == 0 or len(conditions) == 0 or len(aggregation_index) == 0:
return 'Error2'
condition_relation = rela_dict[condition_relation]
select_part = ""
for sel, agg in zip(select_index, aggregation_index):
select_str = 'col_{}'.format(sel+1)
agg_str = agg_dict[agg]
if agg:
select_part += '{}({}),'.format(agg_str, select_str)
else:
select_part += '({}),'.format(select_str)
select_part = select_part[:-1]
where_part = []
for col_index, op, val in conditions:
if PY3:
where_part.append('col_{} {} "{}"'.format(col_index+1, cond_op_dict[op], val))
else:
where_part.append('col_{} {} "{}"'.format(col_index+1, cond_op_dict[op], val.encode('utf-8')))
where_part = 'WHERE ' + condition_relation.join(where_part)
query = 'SELECT {} FROM {} {}'.format(select_part, table_id, where_part)
if PY2:
query = query.decode('utf-8')
try:
out = self.conn.query(query).as_dict()
except:
return 'Error3'
# result_set = [tuple(set(i.values())) for i in out]
if PY2:
result_set = [tuple(sorted(i.values())) for i in out]
else:
result_set = [tuple(sorted(i.values(), key=lambda x:str(x))) for i in out]
return result_set | [
"yscoder@foxmail.com"
] | yscoder@foxmail.com |
ca458ad14d97e536e9aa50c93af18dc6a1bae1b3 | 7a42d40a351824464a3c78dc0c3e78bbd8e0a92f | /bigdog_blog/blog/models.py | accbdf0df9a4805d5522a5c121acb6c6c2048712 | [] | no_license | AhMay/DerekBlogLearn | 6595063eafbc237b932e187b5cb3ad8ff32637fc | fdd5ea2fc5732cdc82ad006f7be0a2a1f30d0ba9 | refs/heads/master | 2020-07-09T05:20:33.283672 | 2019-09-29T10:10:23 | 2019-09-29T10:10:23 | 203,891,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,136 | py | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from unidecode import unidecode
from django.template.defaultfilters import slugify
from ckeditor_uploader.fields import RichTextUploadingField
from datetime import datetime
# Create your models here.
class Article(models.Model):
'''文章模型'''
STATUS_CHOICES =(
('d', '草稿'),
('p','发表'),
)
title = models.CharField('标题', max_length=200 )
slug = models.SlugField('slug', max_length=60, blank=True) #slug 最大的作用就是便于读者和搜索引擎之间从url中了解文章大概包含了什么内容
# body = models.TextField('正文')
body = RichTextUploadingField('正文')
pub_date = models.DateTimeField('发布时间', null=True, blank=True)
create_date = models.DateTimeField('创建时间', auto_now_add=True)
mod_date = models.DateTimeField('修改时间', auto_now=True)
status = models.CharField('文章状态', max_length=1, choices=STATUS_CHOICES, default='d')
views = models.PositiveIntegerField('浏览量', default=0)
author = models.ForeignKey(User, verbose_name='作者', on_delete=models.CASCADE)
users_like = models.ManyToManyField(User,related_name='articles_liked', blank=True)
category = models.ManyToManyField('Category',verbose_name='分类', blank=True, null=True) #多对多
tags = models.ManyToManyField('Tag', verbose_name='标签', blank=True, null=True) #多对多
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.id or not self.slug: #还没有保存,slug 还没有生成
self.slug = slugify(unidecode(self.title)) #中文标题
super().save(*args,**kwargs)
#当模型的各个字段之间并不彼此独立时,可以添加自定义的clean方法
def clean(self):
if self.status == 'd' and self.pub_date is not None:
self.pub_date = None
if self.status == 'p' and self.pub_date is None:
self.pub_date = datetime.now()
def get_absolute_url(self):
return reverse('blog:article_detail', args=[self.pk, self.slug])
def viewed(self):
self.views +=1
self.save(update_fields=['views'])
def published(self):
self.status = 'p'
self.pub_date = datetime.now()
self.save(update_fields=['status','pub_date'])
class Meta:
ordering = ['-pub_date']
verbose_name ='文章'
verbose_name_plural = verbose_name
class Category(models.Model):
'''文章分类'''
name = models.CharField('分类名', max_length=30)
slug = models.SlugField('slug', max_length=40,blank=True)
parent_category = models.ForeignKey('self', verbose_name='父级分类', blank=True, null=True, on_delete=models.CASCADE)
def get_absolute_url(self):
return reverse('blog:category_detail', args=[self.pk, self.slug])
def save(self, *args, **kwargs):
if not self.id or not self.slug:
self.slug = slugify(unidecode(self.name))
super().save(args, kwargs)
def has_child(self):
if self.category_set.all().count() >0: # 外键
return True
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = '分类'
verbose_name_plural = verbose_name
class Tag(models.Model):
'''文章标签'''
name = models.CharField('标签名', max_length=30, unique=True)
slug = models.SlugField('slug', max_length=40,blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('blog:tag_detail', args=[self.name])
def get_article_count(self):
return Article.objects.filter(tags__slug=self.slug).count() #slug 应该不为空
def save(self,*args, **kwargs):
if not self.id or not self.slug:
self.slug = slugify(unidecode(self.name))
super().save(*args,**kwargs)
class Meta:
ordering =['name']
verbose_name = '标签'
verbose_name_plural = verbose_name | [
"meizi111082@hotmail.com"
] | meizi111082@hotmail.com |
791c91c8bad3f89f23330677eec8ef411ee77ec5 | 532ca0c5361b54970bc435232e2a6d079c49aecd | /02_Strings and Console Output/01__Strings and Console Output/02_Practice.py | 3d197e67ca3925a0945cf843aae122c22ef018ea | [] | no_license | haveano/codeacademy-python_v1 | dc5484e8df73b9a15ffce835dde625b6454c8302 | 10e6fb2974e1c47f380bb6a33c50b171ecfbf50f | refs/heads/master | 2021-01-11T16:45:57.337493 | 2017-05-30T10:04:08 | 2017-05-30T10:04:08 | 79,660,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
Practice
Excellent! Let's get a little practice in with strings.
Instructions
Set the following variables to their respective phrases:
Set caesar to "Graham"
Set praline to "John"
Set viking to "Teresa"
"""
# Assign your variables below, each on its own line!
caesar = "Graham"
praline = "John"
viking = "Teresa"
# Put your variables above this line
print caesar
print praline
print viking
| [
"noreply@github.com"
] | haveano.noreply@github.com |
844f5b18c6467cd856b107372a08f137d28c5bbf | 07564c75c1f37f2e0304720d1c01f23a27ef3469 | /543.DiameterofBinaryTree/solution.py | 9d5a9edc240760c0c6c070bffacb98e6e92bb25b | [] | no_license | ynXiang/LeetCode | 5e468db560be7f171d7cb24bcd489aa81471349c | 763372587b9ca3f8be4c843427e4760c3e472d6b | refs/heads/master | 2020-05-21T18:27:16.941981 | 2018-01-09T22:17:42 | 2018-01-09T22:17:42 | 84,642,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return 0 if not root else self.cal(root)[0]-1
def cal(self, root):
if root == None:
return 0, 0
else:
lp, lm = self.cal(root.left)
rp, rm = self.cal(root.right)
return max(lp, rp, lm+1+rm), max(lm+1, rm+1)
| [
"yinan_xiang@163.com"
] | yinan_xiang@163.com |
6e6933c8437c6d5ee41f9b5347467037f3a952c0 | 209a7a4023a9a79693ec1f6e8045646496d1ea71 | /COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/tests/indexing/multiindex/test_indexing_slow.py | a6d678af44ae14e714169ade419968e4d2a623e4 | [
"MIT"
] | permissive | anzhao920/MicrosoftProject15_Invictus | 5e2347015411bbffbdf0ceb059df854661fb240c | 15f44eebb09561acbbe7b6730dfadf141e4c166d | refs/heads/main | 2023-04-16T13:24:39.332492 | 2021-04-27T00:47:13 | 2021-04-27T00:47:13 | 361,913,170 | 0 | 0 | MIT | 2021-04-26T22:41:56 | 2021-04-26T22:41:55 | null | UTF-8 | Python | false | false | 3,087 | py | import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
m = 50
n = 1000
cols = ["jim", "joe", "jolie", "joline", "jolia"]
vals = [
np.random.randint(0, 10, n),
np.random.choice(list("abcdefghij"), n),
np.random.choice(pd.date_range("20141009", periods=10).tolist(), n),
np.random.choice(list("ZYXWVUTSRQ"), n),
np.random.randn(n),
]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [
np.random.randint(0, 11, m),
np.random.choice(list("abcdefghijk"), m),
np.random.choice(pd.date_range("20141009", periods=11).tolist(), m),
np.random.choice(list("ZYXWVUTSRQP"), m),
]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[:: n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a = pd.concat([df, df])
b = df.drop_duplicates(subset=cols[:-1])
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
@pytest.mark.parametrize("lexsort_depth", list(range(5)))
@pytest.mark.parametrize("key", keys)
@pytest.mark.parametrize("frame", [a, b])
def test_multiindex_get_loc(lexsort_depth, key, frame):
# GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
def validate(mi, df, key):
mask = np.ones(len(df)).astype("bool")
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[: i + 1] not in mi.index
continue
assert key[: i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
return_value = right.drop(cols[: i + 1], axis=1, inplace=True)
assert return_value is None
return_value = right.set_index(cols[i + 1 : -1], inplace=True)
assert return_value is None
tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
else: # full key
return_value = right.set_index(cols[:-1], inplace=True)
assert return_value is None
if len(right) == 1: # single hit
right = Series(
right["jolia"].values, name=right.index[0], index=["jolia"]
)
tm.assert_series_equal(mi.loc[key[: i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
if lexsort_depth == 0:
df = frame.copy()
else:
df = frame.sort_values(by=cols[:lexsort_depth])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < lexsort_depth
validate(mi, df, key)
| [
"ana.kapros@yahoo.ro"
] | ana.kapros@yahoo.ro |
2f66e6e8846a2c3a80d4c7e9ad2324bdf9096648 | f0257428fed2a5f10950ee52e88a0f6811120323 | /study_oldboy/Day11/01.11_rabbitmq_topic_subscriber.py | 6b8487dfa3db86e47d7023e531f079912e4cb67b | [] | no_license | tata-LY/python | 454d42cc8f6db9a1450966aba4af6894e1b59b78 | 55d13b7f61cbb87ff3f272f596cd5b8c53b807c5 | refs/heads/main | 2023-04-08T19:31:57.945506 | 2021-04-19T05:39:17 | 2021-04-19T05:39:17 | 328,880,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021-3-9 8:47
# @Author : liuyang
# @File : 01.11_rabbitmq_topic_subscriber.py
# @Software: PyCharm
import pika
import sys
hostname = '192.168.113.11'
connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname))
channel = connection.channel()
channel.exchange_declare(exchange='topic_logs',
exchange_type='topic')
result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
binding_keys = sys.argv[1:]
if not binding_keys:
sys.stderr.write("Usage: %s [binding_key]...\n" % sys.argv[0])
sys.exit(1)
for binding_key in binding_keys:
channel.queue_bind(exchange='topic_logs',
queue=queue_name,
routing_key=binding_key)
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [X] %r:%r" % (method.routing_key, body))
channel.basic_consume(queue_name,
callback,
True)
channel.start_consuming() | [
"ainiyang20@qq.com"
] | ainiyang20@qq.com |
4c1b7e82cf084943af5406747c7b74ab3e20a6fe | ad6fe640e0074f08961a55d727bc204dcdcf8848 | /src/simplessl/ca.py | ee0d4faa639c23be7fb011b6de3c953378009ab5 | [] | no_license | andrewcooke/simple-ssl | 50a8764e340d5f78f0c17c273d0445a533db5f8c | 87d619bcd0f3c3326e9177c64252783f559ba9eb | refs/heads/master | 2021-01-01T16:13:21.228950 | 2013-12-10T01:43:17 | 2013-12-10T01:43:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py |
from script.argp import ArgP, ArgPRoot, ArgPRun
from script.attr import StrAttr
class CaCore(ArgPRoot):
dir = ArgP(StrAttr, value='.',
description='Where a CA stores the data it needs to do its work.')
def __call__(self):
print(self.dir.__get__(self, type(self)))
print("dir is " + str(self.dir))
if __name__ == '__main__':
ArgPRun(CaCore)
| [
"andrew@acooke.org"
] | andrew@acooke.org |
2725f826472f8bc43b3068109792eecdae0fc910 | 5c4515960dcbfd3861d06d90b8c9bde0bdf3ecf5 | /Iserlab/migrations/0130_mytempvm.py | f24ae4cf5f65d103faabb44569c96b7a2c006487 | [] | no_license | Mathilda1992/mcysite | 66bb2f51de622b7f7c450664c798eb11ce195cae | def82e43474ecc734c6cbb26842bd87f698b2b88 | refs/heads/master | 2021-01-11T19:58:23.611196 | 2017-06-26T08:58:11 | 2017-06-26T08:58:11 | 79,434,975 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-10 07:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Iserlab', '0129_auto_20170507_0516'),
]
operations = [
migrations.CreateModel(
name='MyTempVM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createtime', models.DateTimeField(auto_now_add=True)),
('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Iserlab.User')),
('vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Iserlab.VM')),
],
options={
'ordering': ['-createtime'],
},
),
]
| [
"machenyi2011@163.com"
] | machenyi2011@163.com |
8da88a7f51735fe571ed0f617232307ac1baf9f6 | c361a25acecd016677bbd0c6d9fc56de79cf03ed | /TSM/TestCase.py | 23412a68d4d65560b1b1074bef9519e4955fd92f | [] | no_license | danielmellado/zephyr | f8931633045959e7e9a974de8b700a287a1ae94e | dc6f85b78b50e599504966154b927fe198d7402d | refs/heads/master | 2021-01-12T22:31:24.479814 | 2015-10-14T05:39:04 | 2015-10-14T06:24:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | __author__ = 'micucci'
# Copyright 2015 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import importlib
import logging
import datetime
from common.Exceptions import *
from TestScenario import TestScenario
from VTM.VirtualTopologyManager import VirtualTopologyManager
from PTM.PhysicalTopologyManager import PhysicalTopologyManager
class TestCase(unittest.TestCase):
class_scenario = None
""" :type: TestScenario"""
vtm = None
""" :type: VirtualTopologyManager"""
ptm = None
""" :type: PhysicalTopologyManager"""
setup_logger = None
""" :type: logging.Logger"""
@staticmethod
def supported_scenarios():
"""
Subclasses should override to return a set of supported scenario classes
:return: set[class]
"""
return set()
@staticmethod
def get_class(fqn):
"""
Return the class from the fully-qualified package/module/class name
:type fqn: str
:return:
"""
class_name = fqn.split('.')[-1]
module_name = '.'.join(fqn.split('.')[0:-1])
module = importlib.import_module(module_name if module_name != '' else class_name)
impl_class = getattr(module, class_name)
if not issubclass(impl_class, TestCase):
raise ArgMismatchException('Class: ' + fqn + ' is not a subclass of TSM.TestCase')
return impl_class
@classmethod
def _get_name(cls):
return cls.__name__
@classmethod
def _prepare_class(cls, current_scenario, tsm_logger=logging.getLogger()):
cls.class_scenario = current_scenario
cls.ptm = current_scenario.ptm
cls.vtm = current_scenario.vtm
cls.setup_logger = tsm_logger
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
self.LOG = logging.getLogger('test-case-null-logger')
""" :type: logging.Logger"""
self.CONSOLE = logging.getLogger('test-case-null-logger')
""" :type: logging.Logger"""
self.start_time = None
""" :type: datetime.datetime"""
self.stop_time = None
""" :type: datetime.datetime"""
self.run_time = None
""" :type: datetime.datetime"""
self.current_scenario = self.class_scenario
""" :type: TestScenario"""
self.LOG.addHandler(logging.NullHandler())
def run(self, result=None):
self.start_time = datetime.datetime.utcnow()
self.LOG.info('Running test case: ' + self._get_name() + ' - ' + self._testMethodName)
super(TestCase, self).run(result)
self.LOG.info('Test case finished: ' + self._get_name() + ' - ' + self._testMethodName)
self.stop_time = datetime.datetime.utcnow()
self.run_time = (self.stop_time - self.start_time)
def set_logger(self, log, console=None):
self.LOG = log
self.CONSOLE = console
def runTest(self):
pass
| [
"micucci@midokura.com"
] | micucci@midokura.com |
340cced1c55c9499682cd5949e558809a69cf8be | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /GP8Tywnn2gucEfSMf_17.py | ec8e7521de8652ea402b3016afa29c871da03a89 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
def error(n):
adict = {1: 'Check the fan: e1', 2: 'Emergency stop: e2', 3: 'Pump Error: e3',4: 'c: e4', 5: 'Temperature Sensor Error: e5'}
if n not in adict.keys():
return 101
else:
return adict[n]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0c82b427b1e416b6454f6c7d36f356c26adc1fd2 | 2a45af8ec8a4c87d544f461d27795a283f8f5f67 | /python/input_complete.py | 45e2c6077dd6187d1fcb5bd81dc805136a70f19f | [] | no_license | fengidri/python-script | 2199a16a2d0cc76e6055aec31aaced4638a8c86d | 28fb8e6dbf9e6ba5a1f9c4c3d7b635212bfc5b66 | refs/heads/master | 2020-04-05T14:04:55.103302 | 2017-04-27T10:32:27 | 2017-04-27T10:32:27 | 8,678,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,186 | py | import os
import re
import readline
RE_SPACE = re.compile('.*\s+$', re.M)
class Completer(object):
def _listdir(self, root):
"List directory 'root' appending the path separator to subdirs."
res = []
for name in os.listdir(root):
path = os.path.join(root, name)
if os.path.isdir(path):
name += os.sep
res.append(name)
return res
def _complete_path(self, path=None):
"Perform completion of filesystem path."
if not path:
return self._listdir('.')
dirname, rest = os.path.split(path)
tmp = dirname if dirname else '.'
res = [os.path.join(dirname, p)
for p in self._listdir(tmp) if p.startswith(rest)]
# more than one match, or single match which does not exist (typo)
if len(res) > 1 or not os.path.exists(path):
return res
# resolved to a single directory, so return list of files below it
if os.path.isdir(path):
return [os.path.join(path, p) for p in self._listdir(path)]
# exact file match terminates this completion
return [path + ' ']
def complete_extra(self, args):
"Completions for the 'extra' command."
if not args:
return self._complete_path('.')
# treat the last arg as a path and complete it
return self._complete_path(args[-1])
def path(self, text, state):
"Generic readline completion entry point."
buffer = readline.get_line_buffer()
line = readline.get_line_buffer().split()
# show all commands
# account for last argument ending in a space
if RE_SPACE.match(buffer):
line.append('')
# resolve command to the implementation function
return (self.complete_extra(line) + [None])[state]
return [cmd + ' '][state]
def input_path( ):
comp = Completer( )
# we want to treat '/' as part of a word, so override the delimiters
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(comp.path)
if __name__ == "__main__":
raw_input( )
| [
"fengidri@gmail.com"
] | fengidri@gmail.com |
8fab41a7003e035af2fca6201f82467c6caa256e | 7f3c0c7cb3987356171e91b2e888e2bfbe2f5077 | /group_discussion/migrations/0011_topicuser_group_centrality.py | c300d7e28d073aa8945c43f8deb7baa6ca68aa0b | [] | no_license | jscott1989/newscircle | 7d329673ed58dd2309ac6182fae3452bd50a8d54 | 373eba2f9aaa747272092521581d78524585df55 | refs/heads/master | 2020-12-24T11:53:12.865783 | 2016-11-07T17:50:42 | 2016-11-07T17:50:42 | 73,105,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('group_discussion', '0010_comment_created_at'),
]
operations = [
migrations.AddField(
model_name='topicuser',
name='group_centrality',
field=models.IntegerField(null=True),
preserve_default=True,
),
]
| [
"jonathan@jscott.me"
] | jonathan@jscott.me |
72663f4b331865732b2fab003dfd7cda950f5dea | 374dea7d7d1a424d91f369cc75b11b16e1a489cd | /XDG_CACHE_HOME/Microsoft/Python Language Server/stubs.v1/bPN_2o1RXRZaK7Vxgp3oTysbcxQmJr9XStOWBh0VWNo=/_multiprocessing.cpython-37m-x86_64-linux-gnu.pyi | ab4051ca67e5ced0fe3ca7d109b3f9b4d0cd4e04 | [] | no_license | tkoon107/text-generation-LSTM-neural-net | ed0e6a0fb906f4b4fd649eadfe36c254144be016 | 6b98ee355a30da128462bfac531509539d6533ae | refs/heads/master | 2020-05-27T16:46:44.128875 | 2019-06-10T18:26:54 | 2019-06-10T18:26:54 | 188,708,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,181 | pyi | import builtins as _mod_builtins
class SemLock(_mod_builtins.object):
'Semaphore/Mutex type'
SEM_VALUE_MAX = 2147483647
__class__ = SemLock
def __enter__(self):
'enter the semaphore/lock'
return self
def __exit__(self):
'exit the semaphore/lock'
pass
def __init__(self, *args, **kwargs):
'Semaphore/Mutex type'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def _after_fork(self):
'rezero the net acquisition count after fork()'
pass
def _count(self):
'num of `acquire()`s minus num of `release()`s for this process'
pass
def _get_value(self):
'get the value of the semaphore'
pass
def _is_mine(self):
'whether the lock is owned by this thread'
pass
def _is_zero(self):
'returns whether semaphore has value zero'
pass
@classmethod
def _rebuild(cls):
pass
def acquire(self):
'acquire the semaphore/lock'
pass
@property
def handle(self):
pass
@property
def kind(self):
pass
@property
def maxvalue(self):
pass
@property
def name(self):
pass
def release(self):
'release the semaphore/lock'
pass
__doc__ = None
__file__ = '/home/trevor/anaconda3/lib/python3.7/lib-dynload/_multiprocessing.cpython-37m-x86_64-linux-gnu.so'
__name__ = '_multiprocessing'
__package__ = ''
flags = _mod_builtins.dict()
def sem_unlink():
pass
| [
"trevorlang@langdatascience.org"
] | trevorlang@langdatascience.org |
8beeca84710f4f56f3bbffee44da80f9a7637cbc | b0f41ef2af5309fc172b05232dbde501a01d1234 | /fyt/webauth/tests/get_pgt.py | 82e40defcf0403075e2e661dfa9b8d323024d423 | [] | no_license | rlmv/doc-trips | c4dfec9b80cf531b69b17ac2caaef509fa048cd3 | 59c1ffc0bff1adb4f86f1dcfaa66d8970ff55b72 | refs/heads/master | 2023-05-27T01:48:49.251830 | 2021-08-07T04:02:26 | 2021-08-07T04:02:26 | 21,745,373 | 10 | 3 | null | 2023-05-23T00:51:26 | 2014-07-11T17:36:35 | Python | UTF-8 | Python | false | false | 429 | py | # Run via bin/django shell --plain < get_pgt.py
# to pick up all the django environment
# Allows main test class to be independent of CAS implementation platform
# TODO: pass in iou - if cant take args write to file and read here
import atexit
from django_cas.models import PgtIOU
@atexit.register
def lookup_pgt():
pgt = PgtIOU.objects.latest('created')
if pgt:
print(pgt.tgt)
else:
print('FAIL')
| [
"bo.marchman@gmail.com"
] | bo.marchman@gmail.com |
7173d52df0b048d98533fa53729620a11ea3b6f5 | c4fa1ebcdd413c4ab3f0979ee3beead8a8809870 | /providers/gov/clinicaltrials/apps.py | 8ff0b4b5fc7d7930735d5f96d0039eb757546874 | [] | no_license | terroni/SHARE | e47f291db7cf100d29a7904fe820e75d29db1472 | a5631f441da1288722c68785b86128c854cbe7c1 | refs/heads/develop | 2020-12-03T02:29:47.381341 | 2016-07-11T19:40:27 | 2016-07-11T19:40:27 | 63,097,148 | 1 | 0 | null | 2016-07-11T19:45:51 | 2016-07-11T19:45:50 | null | UTF-8 | Python | false | false | 347 | py | from share.provider import ProviderAppConfig
from .harvester import ClinicalTrialsHarvester
class AppConfig(ProviderAppConfig):
name = 'providers.gov.clinicaltrials'
version = '0.0.1'
title = 'clinicaltrials'
long_title = 'ClinicalTrials.gov'
home_page = 'https://clinicaltrials.gov/'
harvester = ClinicalTrialsHarvester
| [
"icereval@gmail.com"
] | icereval@gmail.com |
b38eb2a7ce9fb308e375c9cdbe8bc50c31984eb7 | 694d57c3e512ce916269411b51adef23532420cd | /python/hardway/ex6.py | a24f3aa7aba6c3bbf44dd4427988a8f522bce255 | [] | no_license | clovery410/mycode | 5541c3a99962d7949832a0859f18819f118edfba | e12025e754547d18d5bb50a9dbe5e725fd03fd9c | refs/heads/master | 2021-05-16T02:46:47.996748 | 2017-05-10T23:43:50 | 2017-05-10T23:43:50 | 39,235,141 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | x = "There are %d types of people." % 10 # make definition of variable x
binary = "binary" # make definition of variable binary
do_not = "don't" # make definition of variable do_not
y = "Those who know %s and those who %s." % (binary, do_not) # make definition of variable y
print x # print x
print y # print y
print "I said: %r." % x # print a string with variable x
print "I also said: '%s'." % y # print a string with variable y
hilarious = False # assign variable hilarious with the value of False
joke_evaluation = "Isn't that joke so funny?! %r" # assign variable joke_evaluation a string
print joke_evaluation % hilarious # print string joke_evaluation
w = "This is the left side of..." # assign variable w with a string value
e = "a string with a right side." # assign variable e with a string value
print w + e # print string
| [
"admin@admins-MacBook-Air.local"
] | admin@admins-MacBook-Air.local |
106644e566ea536280b420280431985f0893666e | 0cef1ca8b0fd54095d263d41c22b5b72bdd297db | /ace-zero-rl/d2dsql_train.py | 2a0a577c36b7030811a1bb7d3ff74b4fc72a0ee8 | [] | no_license | budi-kurniawan/phd | 06c1b622f3ed5e518f3ce69ca0f113d411b3f01f | 3ce071462db1ee4a9b590c952a750dd9c99ca9d2 | refs/heads/main | 2023-08-22T02:59:24.228607 | 2021-10-07T12:02:34 | 2021-10-07T12:02:34 | 414,012,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,308 | py | #!/usr/bin/env python3
import os
import csv
import pickle
from datetime import datetime
import numpy as np
import ace_zero_core
import torch
import rl
from ace_zero_core import acezero
from rl import rl_utils
from rl.env.ace_zero_env import AceZeroEnvironment
from cartpole.util.dqn_util import BootstrappableDoubleDQNAgent
from dqn_train import main, get_env_dim, normalise
import dqn_train as base
INITIAL_NN_MIN_LOSS = 0.001
def bootstrap1(self, init_data, trial, out_path):
print('init min loss:', INITIAL_NN_MIN_LOSS)
bootstrap_model_path = out_path + '/bootstrap-model-0' + str(trial) + '.p'
print('bootstrap. trial ', trial, ', bootstrap_model_path:', bootstrap_model_path)
if os.path.exists(bootstrap_model_path):
#file = open(bootstrap_model_path, 'rb')
#model = pickle.load(file)
#file.close()
# problem with load_state_dict for different Pytorch versions
#self.dqn.load_state_dict(model.state_dict()) # copy weights from dqn1 to dqn2
#self.dqn = model
self.load_model(bootstrap_model_path)
print('bootstrapped file found and model loaded')
return
# if this class is derived from DoubleDQNAgent, copy weights to dqn1 and dqn2
#self.dqn1.load_state_dict(model.state_dict()) # copy weights from dqn1 to dqn2
#self.dqn2.load_state_dict(model.state_dict()) # copy weights from dqn1 to dqn2
#self.dqn = self.dqn1
print("==== bootstraping agent with len(init_data):", len(init_data))
init_data_len = len(init_data)
for i in range(init_data_len):
# s and s2 in init_data are NOT normalised
s, a, r, s2, done, _ = init_data[i]
s = base.normalise(s)
s2 = base.normalise(s2)
self.add_sample(s, a, r, s2, done)
start_time = datetime.now()
memory = self.memory.memory
print('memory length:', len(memory))
max_accuracy = 0
min_loss = float('inf')
stats_path = out_path + '/stats-0' + str(trial) + '.txt'
stats_file = open(stats_path, 'w')
for i in range(1, 1_000_000 + 1):
#minibatch = memory #memory[count : count + batch_size]
minibatch = memory #memory[count : count + size]
# next lines are copied from train() of the parent
states = np.vstack([x.state for x in minibatch])
actions = np.array([x.action for x in minibatch])
Q_predict = self.get_Q(states)
Q_target = Q_predict.clone().data.numpy() # Q_target is not a second network, most of its values are the same as the reward at the current timestep
for j in range(init_data_len):
s_not_normalised, a, r, s2_not_normalised, done, action_prefs = init_data[j]
Q_target[j] = action_prefs # we use non-normalised action_prefs and see if it works
Q_target = torch.Tensor(Q_target)
self._train(Q_predict, Q_target)
loss = self.loss.item()
if loss < min_loss:
min_loss = loss
if i % 1000 == 0:
# measure accuracy
Q_predict = self.get_Q(states)
correct_prediction = init_data_len
for j in range(init_data_len):
argmax = np.argmax(Q_predict[j].data.numpy())
if argmax != actions[j]:
correct_prediction -= 1
accuracy = correct_prediction / init_data_len
if accuracy > max_accuracy:
max_accuracy = accuracy
print('iteration ', i, "accuracy:", accuracy, "max:", max_accuracy, ", loss:", loss, ', min Loss:', min_loss)
# if i == 50000 or i % 100_000 == 0:
# end_time = datetime.now()
# delta = end_time - start_time
# msg = 'iteration' + str(i) + ', min loss:' + str(min_loss) + ', loss:' + str(loss) + ', bootstrap time:' + str(delta.total_seconds()) + ' seconds'
# stats_file.write(msg + '\n')
# intermediate_bootstrap_model_path = out_path + '/bootstrap-model-0' + str(trial) + '-' + str(i).zfill(7) + '.pt'
# self.save_model(intermediate_bootstrap_model_path)
# print(msg)
if min_loss < INITIAL_NN_MIN_LOSS:
print('loss ' + str(min_loss) + '. Break at iteration' + str(i))
break
self.save_model(bootstrap_model_path)
#file = open(bootstrap_model_path,'wb')
#pickle.dump(self.dqn, file)
#file.close()
stats_file.write('min loss: ' + str(min_loss) + ', max score: ' + str(max_accuracy))
stats_file.close()
def fixed_epsilon(epsiode: int, max_episode: int, min_eps: float) -> float:
return 0.05
def epsilon_annealing2(epsiode: int, max_episode: int, min_eps: float) -> float:
min_eps = 0.01
slope = (min_eps - 1.0) / max_episode
return max(slope * epsiode + 0.2, min_eps)
def not_normalise(state):
print('not normalise')
return state
def get_bootstrap_data(trial):
bootstrap_file = bootstrap_training_set_path + '/trainingset0' + str(trial) + '.txt'
print('get bootstrap_data for trial', trial, bootstrap_file)
data = []
file = open(bootstrap_file, 'r')
lines = file.readlines()
for line in lines:
# format episode name,[state],[actions preferences],[next state],reward. Example: 1,[1,2,3,4],[1,2,3,4,5,],[1,2,3,4],1
index1 = line.index(',')
ep = int(line[0 : index1])
index1 = line.index('[', index1 + 1)
index2 = line.index(']', index1 + 1)
state = line[index1+1 : index2]
# data from trainingset0x.txt has NOT been normalised, see create_classifier() in aircombat_classifier.py
state = [float(s) for s in state.split(',')]
index1 = line.index('[', index1 + 1)
index2 = line.index(']', index1 + 1)
action_prefs = line[index1+1 : index2]
action_prefs = [float(s) for s in action_prefs.split(',')]
index1 = line.index('[', index1 + 1)
index2 = line.index(']', index1 + 1)
next_state = line[index1+1 : index2]
next_state = [float(s) for s in next_state.split(',')]
reward = float(line[index2 + 2 : ])
action = np.argmax(action_prefs)
data.append((np.array(state), action, reward, np.array(next_state), False, action_prefs))
# we can sort data on reward and trim rows here
return data
base.DQNAgent = BootstrappableDoubleDQNAgent
base.get_bootstrap_data = get_bootstrap_data
base.hidden_dim = 300
base.DQNAgent.bootstrap = bootstrap1
#base.epsilon_annealing = epsilon_annealing2
base.epsilon_annealing = fixed_epsilon
#base.normalise = not_normalise
if __name__ == '__main__':
#os.environ['OMP_NUM_THREADS'] = '1' --> does not work
#torch.set_num_threads(1) --> does not work
scenario_name = 'standard-001.json'
bootstrap_training_set_path = 'rl_results/ql-d2dspl-001.json'
out_path = 'rl_results/bootstrapped-dqn-002j.json'
if not os.path.exists(out_path):
os.mkdir(out_path)
print('start:', datetime.now().strftime('%d/%m/%y %H:%M:%S'))
env = AceZeroEnvironment(scenario_name)
INITIAL_NN_MIN_LOSS = 0.01 #0.001
input_dim, output_dim = get_env_dim(env)
base.NUM_EPISODES = 10_000
base.START_TRIAL = 0
base.NUM_TRIALS = 1 + base.START_TRIAL
main(env, input_dim, output_dim, out_path)
| [
"budi2020@gmail.com"
] | budi2020@gmail.com |
3f210c9ae5529fd9ab9a3ad99bdf62987cd5fcbc | 44fb87ff6b94736610c7e84ecc00c4045f097328 | /mabozen/conf/datatype_mapping/datatype_mapping_xpath.py | 01a80772abe130002aad38b60ec1c2899faf6cca | [
"MIT"
] | permissive | mabotech/mabozen | c02899dad34310e3c5c68afe2af05d3f11946511 | 531b138fea1212e959ecfb9370b622b0c9f519a5 | refs/heads/master | 2016-09-06T21:31:51.731077 | 2014-07-20T13:44:01 | 2014-07-20T13:44:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,406 | py |
from lxml import etree
class Singleton(type):
def __call__(cls, *args):
if not hasattr(cls, 'instance'):
cls.instance = super(Singleton, cls).__call__(*args)
return cls.instance
class Counter(object):
__metaclass__ = Singleton
def __init__(self):
self.i = 0
def inc(self):
self.i = self.i + 1
return self.i
def dbdt_insert(dbdt_dict):
sql = """insert into mt_t_db_datatype( id, fk_datatype,fk_db,name,alias,shortname,identity,Undefined,scale,width,showscale,showtype,showwidth,ordering,active,createdon,createdby,rowversionstamp)
values (%(id)s, %(fk_datatype)s, %(fk_db)s, '%(name)s', '%(alias)s', '%(shortname)s', '%(identity)s', %(undefined)s, %(scale)s, %(width)s, %(showscale)s, %(showtype)s, %(showwidth)s, %(ordering)s,1, now(), 'MT', 1 );\n""" % dbdt_dict
return sql
def db_insert(vt):
sql = """ insert into mt_t_db_platform(id,name,ordering,active,createdon,createdby,rowversionstamp)
values ( %s, '%s', %s, 1, now(), 'MT', 1);\n """ % vt
return sql
def dt_insert(vd):
print "dt_insert", vd['name']
sql =""" insert into mt_t_datatype(id,name, alias, shortname, ordering,active,createdon,createdby,rowversionstamp)
values ( %(fk_datatype)s, '%(name)s', '%(name)s', '%(shortname)s', %(ordering)s, 1, now(), 'MT', 1 ) ;\n""" % vd
return sql
def convert(val):
if val == 'false':
return 0
elif val == 'true':
return 1
else:
raise Exception('val not false or true')
def dt_extract(pid, node):
i = 0
sql = ""
for dt in node.xpath('DataType'):
i = i + 1
dt_dict = {}
dt_dict['ordering'] = i
dt_dict['fk_db'] = pid
dt_dict['fk_datatype'] = dt.xpath('@DatatypeId')[0]
dt_dict['name'] = dt.xpath('@DatatypeName')[0]
dt_dict['alias'] = dt.xpath('@DatatypeName')[0]
dt_dict['identity'] = dt.xpath('@Identity')[0]
dt_dict['scale'] = dt.xpath('@Scale')[0]
dt_dict['shortname'] = dt.xpath('@ShortName')[0]
dt_dict['showscale'] = convert ( dt.xpath('@ShowScale')[0] )
dt_dict['showtype'] = convert ( dt.xpath('@ShowType')[0] )
dt_dict['showwidth'] = convert ( dt.xpath('@ShowWidth')[0] )
dt_dict['undefined'] = convert( dt.xpath('@Undefined')[0] )
dt_dict['width'] = dt.xpath('@Width')[0]
sql = sql + dt_insert(dt_dict)
return sql
def extract(pid, node):
i = 0
sql = ""
ct = Counter()
for dt in node.xpath('DataType'):
i = i + 1
dt_dict = {}
dt_dict['ordering'] = i
dt_dict['fk_db'] = pid
dt_dict['id'] = ct.inc()
dt_dict['fk_datatype'] = dt.xpath('@DatatypeId')[0]
dt_dict['name'] = dt.xpath('@DatatypeName')[0]
dt_dict['alias'] = dt.xpath('@DatatypeName')[0]
dt_dict['identity'] = dt.xpath('@Identity')[0]
dt_dict['scale'] = dt.xpath('@Scale')[0]
dt_dict['shortname'] = dt.xpath('@ShortName')[0]
dt_dict['showscale'] = convert ( dt.xpath('@ShowScale')[0] )
dt_dict['showtype'] = convert ( dt.xpath('@ShowType')[0] )
dt_dict['showwidth'] = convert ( dt.xpath('@ShowWidth')[0] )
dt_dict['undefined'] = convert( dt.xpath('@Undefined')[0] )
dt_dict['width'] = dt.xpath('@Width')[0]
sql = sql + dbdt_insert(dt_dict)
return sql
def main():
sqlfile = "../../../output/mapping/dt03.sql"
fh = open(sqlfile, 'w')
fn = "DatatypeMappings_SystemDefault.xml"
tree = etree.parse(fn)
DBPlatform = tree.xpath('/DataTypeMapping/DBPlatform')
ordering = 0
for node in DBPlatform:
ordering = ordering + 1
#print node.xpath('@MappingName')
pid = node.xpath('@PlatformId')
pname = node.xpath('@MappingName')
print(pname)
vt = (pid[0], pname[0], ordering)
sql = db_insert(vt)
#fh.write(sql)
if pid == ['0']:
print "Logical (system)"
#sql = dt_extract(pid[0], node)
#fh.write(sql)
if pname[0] == "PostgreSQL 8.0 (system)":
sql = extract(pid[0], node)
fh.write(sql)
fh.close()
if __name__ == '__main__':
main() | [
"aidear@163.com"
] | aidear@163.com |
4a122303145739c3efd1bba21c198a0a48e14a57 | c8c4721e2282aaeece7bb36e6b7c33fe2e4af207 | /torch/testing/_core.py | 8fab432009def6a7c3e7565a7173d08351d7c12f | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | suo/pytorch | ac1008c62906cf0b055c2c851dab611c0a1276b8 | fb0e27d38a8fdab4e1c14d6378c9e41cb30fd6a3 | refs/heads/master | 2023-04-18T18:57:04.622931 | 2022-01-28T04:57:17 | 2022-01-28T05:01:06 | 142,352,607 | 1 | 0 | NOASSERTION | 2019-11-08T19:38:03 | 2018-07-25T20:50:09 | C++ | UTF-8 | Python | false | false | 2,217 | py | """
The testing package contains testing-specific utilities.
"""
import torch
import random
import operator
FileCheck = torch._C.FileCheck
__all__ = [
"FileCheck",
"make_non_contiguous",
]
# Helper function that returns True when the dtype is an integral dtype,
# False otherwise.
# TODO: implement numpy-like issubdtype
def is_integral(dtype: torch.dtype) -> bool:
return dtype in (torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def is_quantized(dtype: torch.dtype) -> bool:
return dtype in (torch.quint8, torch.qint8, torch.qint32, torch.quint4x2)
# Helper function that maps a flattened index back into the given shape
# TODO: consider adding torch.unravel_index
def _unravel_index(flat_index, shape):
flat_index = operator.index(flat_index)
res = []
# Short-circuits on zero dim tensors
if shape == torch.Size([]):
return 0
for size in shape[::-1]:
res.append(flat_index % size)
flat_index = flat_index // size
if len(res) == 1:
return res[0]
return tuple(res[::-1])
def make_non_contiguous(tensor: torch.Tensor) -> torch.Tensor:
if tensor.numel() <= 1: # can't make non-contiguous
return tensor.clone()
osize = list(tensor.size())
# randomly inflate a few dimensions in osize
for _ in range(2):
dim = random.randint(0, len(osize) - 1)
add = random.randint(4, 15)
osize[dim] = osize[dim] + add
# narrow doesn't make a non-contiguous tensor if we only narrow the 0-th dimension,
# (which will always happen with a 1-dimensional tensor), so let's make a new
# right-most dimension and cut it off
input = tensor.new(torch.Size(osize + [random.randint(2, 3)]))
input = input.select(len(input.size()) - 1, random.randint(0, 1))
# now extract the input of correct size from 'input'
for i in range(len(osize)):
if input.size(i) != tensor.size(i):
bounds = random.randint(1, input.size(i) - tensor.size(i))
input = input.narrow(i, bounds, tensor.size(i))
input.copy_(tensor)
# Use .data here to hide the view relation between input and other temporary Tensors
return input.data
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b7704990a6b56c8a4e5cc6c0ba186a1021d6aab8 | b7312dc013ba06e5b44b33c0411f4948c4794346 | /study10/process_pool.py | 1c084f6d5a90687202c69b86aeb29cb4307bbb0a | [] | no_license | GaoFuhong/python-code | 50fb298d0c1e7a2af55f1e13e48063ca3d1a189f | 7d17c98011e5a1e74d49332da9f87f5cb576822d | refs/heads/master | 2021-02-07T20:25:06.997173 | 2020-03-01T02:22:41 | 2020-03-01T02:26:04 | 244,072,971 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | # Author:Fuhong Gao
#进程池 (在windows中启动进程是真的慢。。。)
from multiprocessing import Process,Pool
import time,os
def Foo(i):
time.sleep(1)
print('in process-',os.getpid())
return i+100
def Bar(arg):
print("--> exec done:",arg,'child process id:',os.getpid())
if __name__ == '__main__': #__name__ == 'main' 如果手动执行这个脚本,就执行下面的内容,如果当做一个模块导入,就不执行
pool = Pool(5) #等价于pool = Pool(processes = 5) 允许进程池里同时放入5个进程
print('main process id:',os.getpid())
for j in range(10):
pool.apply_async(func=Foo,args=(j,),callback=Bar) #callback: 回调 主进程执行的回调
# pool.apply(func=Foo,args=(j,)) #串行
# pool.apply_async(func=Foo,args=(j,)) #并行
print('-----------------------')
pool.close()
pool.join() #进程池中进程执行完毕再关闭,如果注释,程序将直接关闭 | [
"1350086369@qq.com"
] | 1350086369@qq.com |
89cd1fb4b5a3ed87edef90e19a0037aa6a37efde | 7a3fc3ea3dd71e4ec85ac73e0af57ae976777513 | /.history/flaskblog_20210526065440.py | 1b3ae27d0644bdbc0bcb4fa8d28387719b71771f | [] | no_license | khanhdk0000/first_proj | 72e9d2bbd788d6f52bff8dc5375ca7f75c0f9dd0 | bec0525353f98c65c3943b6d42727e3248ecfe22 | refs/heads/main | 2023-05-12T10:36:08.026143 | 2021-06-05T15:35:22 | 2021-06-05T15:35:22 | 374,148,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | from flask import Flask, render_template, url_for, request, jsonify
from enum import Enum
app = Flask(__name__)
class PaletteType(Enum):
MONOTONE = 1
DOUTONE = 2
COLORFUL = 3
class HarmonyRule(Enum):
COMPLEMENTARY = 1
ANALOGOUS = 2
TRIAD = 3
SQUARE = 4
BRISE_FAN = 5
SPLIT_COMPLEMETARY = 6
DOUBLE_SPLIT_COMPLEMENTARY = 7
NONE = 8
res = {
'primaryColor': '#FFFFFF',
'paletteType': PaletteType(1).name,
'DOUTONE': HarmonyRule(8).name,
'COLORFUL': HarmonyRule(2).name,
}
posts = [
{
'author': 'Corey Schafer',
'title': 'Blog Post 1',
'content': 'First post content',
'date_posted': 'April 20, 2018'
},
{
'author': 'Jane Doe',
'title': 'Blog Post 2',
'content': 'Second post content',
'date_posted': 'April 21, 2018'
}
]
@app.route('/')
@app.route('/home')
def hello_world():
return render_template('home.html', posts=posts, title='Hey')
@app.route('/about')
def about():
my_var = request.args.get('my_var', None)
return render_template('about.html', posts=posts, var=my_var)
@app.route('/test')
def test():
return 'This is my first API call!'
@app.route('/test2', methods=["POST"])
def testpost():
input_json = request.get_json(force=True)
domain = input_json['domain']
if domain == 'Ecommerce':
res['primaryColor'] = '#E1D89F'
return res
dictToReturn = {'text':input_json['text']}
return jsonify(res)
if __name__ == '__main__':
app.run(debug=True)
| [
"khanhtran28092000@gmail.com"
] | khanhtran28092000@gmail.com |
191371fc150002e925fb46d7fe0edfe9a4d109f0 | c732e1ab1135c4bc0598265ee8fea4db5dc12a2b | /mbme/cs285/envs/__init__.py | 4fdbfc79d65e87b09a17aadc8beb187936db0a46 | [] | no_license | Sohojoe/berkeleydeeprlcourse_fall2020 | 169782fb566aa338e617a301ec1ab7e5f62e49cd | 97eeafde4ff02c8a3429ec5096ed597418b01954 | refs/heads/main | 2023-02-11T13:53:01.867290 | 2021-01-03T05:51:36 | 2021-01-03T05:51:36 | 325,815,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | from gym.envs.registration import register
def register_envs():
register(
id='marathon-hopper-v0',
entry_point='cs285.envs.marathon_envs:HopperEnv',
max_episode_steps=1000,
)
register(
id='marathon-walker-v0',
entry_point='cs285.envs.marathon_envs:WalkerEnv',
max_episode_steps=1000,
)
register(
id='marathon-ant-v0',
entry_point='cs285.envs.marathon_envs:AntEnv',
max_episode_steps=1000,
)
| [
"joe@joebooth.com"
] | joe@joebooth.com |
f0a10672945a50f5744a4032fcec4abfcacbea79 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/sms/pymessages/venv/Lib/site-packages/pip/_internal/commands/list.py | a8e16bf2ddf8cd9df43f0fe7e41d1b984e58ffbc | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:da8deb630dfb102ae17b8f81bb9b3fd82d1bc21620821878f37dccc5c58012e8
size 11312
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
837354c48998a7897e7ccb9dd125e15314090e4f | ef1d38cfef63f22e149d6c9dd14e98955693c50d | /webhook/protos/pogoprotos/data/telemetry/rpc_response_telemetry_pb2.py | 3affea348e33cb38a14af4df63940874948ced59 | [] | no_license | Kneckter/WebhookListener | 4c186d9012fd6af69453d9d51ae33a38aa19b5fd | ea4ff29b66d6abf21cc1424ed976af76c3da5511 | refs/heads/master | 2022-10-09T04:26:33.466789 | 2019-11-24T17:30:59 | 2019-11-24T17:30:59 | 193,372,117 | 2 | 0 | null | 2022-09-23T22:26:10 | 2019-06-23T16:39:34 | Python | UTF-8 | Python | false | true | 3,158 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/data/telemetry/rpc_response_telemetry.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.data.telemetry import rpc_response_time_pb2 as pogoprotos_dot_data_dot_telemetry_dot_rpc__response__time__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/telemetry/rpc_response_telemetry.proto',
package='pogoprotos.data.telemetry',
syntax='proto3',
serialized_pb=_b('\n6pogoprotos/data/telemetry/rpc_response_telemetry.proto\x12\x19pogoprotos.data.telemetry\x1a\x31pogoprotos/data/telemetry/rpc_response_time.proto\"u\n\x14RpcResponseTelemetry\x12\x17\n\x0fwindow_duration\x18\x01 \x01(\x02\x12\x44\n\x10response_timings\x18\x02 \x03(\x0b\x32*.pogoprotos.data.telemetry.RpcResponseTimeb\x06proto3')
,
dependencies=[pogoprotos_dot_data_dot_telemetry_dot_rpc__response__time__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RPCRESPONSETELEMETRY = _descriptor.Descriptor(
name='RpcResponseTelemetry',
full_name='pogoprotos.data.telemetry.RpcResponseTelemetry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='window_duration', full_name='pogoprotos.data.telemetry.RpcResponseTelemetry.window_duration', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_timings', full_name='pogoprotos.data.telemetry.RpcResponseTelemetry.response_timings', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=253,
)
_RPCRESPONSETELEMETRY.fields_by_name['response_timings'].message_type = pogoprotos_dot_data_dot_telemetry_dot_rpc__response__time__pb2._RPCRESPONSETIME
DESCRIPTOR.message_types_by_name['RpcResponseTelemetry'] = _RPCRESPONSETELEMETRY
RpcResponseTelemetry = _reflection.GeneratedProtocolMessageType('RpcResponseTelemetry', (_message.Message,), dict(
DESCRIPTOR = _RPCRESPONSETELEMETRY,
__module__ = 'pogoprotos.data.telemetry.rpc_response_telemetry_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.telemetry.RpcResponseTelemetry)
))
_sym_db.RegisterMessage(RpcResponseTelemetry)
# @@protoc_insertion_point(module_scope)
| [
"kasmar@gitlab.com"
] | kasmar@gitlab.com |
a54658bd8a22e553868a14b5f60211cd32da3b52 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/parent_database/month_uml_friend.py | aed12b5960becaee8d5dc22304612a6d6ecdb811 | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | const request = require('request');
const uuidv4 = require('uuid/v4');
/* Checks to see if the subscription key is available
as an environment variable. If you are setting your subscription key as a
string, then comment these lines out.
If you want to set your subscription key as a string, replace the value for
the Ocp-Apim-Subscription-Key header as a string. */
const subscriptionKey="2785789c0f1c3d8f85baa3853975e246";
if (!subscriptionKey) {
throw new Error('Environment variable for your subscription key is not set.')
};
/* If you encounter any issues with the base_url or path, make sure that you are
using the latest endpoint: https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate */
function translateText(){
let options = {
method: 'POST',
baseUrl: 'https://api.cognitive.microsofttranslator.com/',
url: 'translate',
qs: {
'api-version': '3.0',
'to': ['']
},
headers: {
'0b8fa22af0e00cf51cfb47305382addb': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': uuidv4().toString()
},
body: [{
'text': 'Hello World!'
}],
json: true,
};
request(options, function(err, res, body){
console.log(JSON.stringify(body, null, 4));
});
};
// Call the function to translate text.
translateText();
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
c4f259189903ac5cf05092f0bda5b48ad1860a48 | 680bd46e8eae20e78a425f766432711a47235374 | /models/netflow_qo_s_report_table_row.py | ffb909260dced53f35dbb127152a5ba862a72a9d | [
"Apache-2.0"
] | permissive | ILMostro/lm-sdk-python | 9f45217d64c0fc49caf2f4b279a124c2efe3d24d | 40da5812ab4d50dd1c6c3c68f7ea13c4d8f4fb49 | refs/heads/master | 2022-02-01T16:51:12.810483 | 2019-07-16T17:54:11 | 2019-07-16T17:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,477 | py | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.netflow_data_base import NetflowDataBase # noqa: F401,E501
class NetflowQoSReportTableRow(NetflowDataBase):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data_type': 'str',
'received': 'float',
'sent': 'float',
'type': 'str'
}
attribute_map = {
'data_type': 'dataType',
'received': 'received',
'sent': 'sent',
'type': 'type'
}
def __init__(self, data_type=None, received=None, sent=None, type=None): # noqa: E501
"""NetflowQoSReportTableRow - a model defined in Swagger""" # noqa: E501
self._data_type = None
self._received = None
self._sent = None
self._type = None
self.discriminator = None
if data_type is not None:
self.data_type = data_type
if received is not None:
self.received = received
if sent is not None:
self.sent = sent
if type is not None:
self.type = type
@property
def data_type(self):
"""Gets the data_type of this NetflowQoSReportTableRow. # noqa: E501
:return: The data_type of this NetflowQoSReportTableRow. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this NetflowQoSReportTableRow.
:param data_type: The data_type of this NetflowQoSReportTableRow. # noqa: E501
:type: str
"""
self._data_type = data_type
@property
def received(self):
"""Gets the received of this NetflowQoSReportTableRow. # noqa: E501
:return: The received of this NetflowQoSReportTableRow. # noqa: E501
:rtype: float
"""
return self._received
@received.setter
def received(self, received):
"""Sets the received of this NetflowQoSReportTableRow.
:param received: The received of this NetflowQoSReportTableRow. # noqa: E501
:type: float
"""
self._received = received
@property
def sent(self):
"""Gets the sent of this NetflowQoSReportTableRow. # noqa: E501
:return: The sent of this NetflowQoSReportTableRow. # noqa: E501
:rtype: float
"""
return self._sent
@sent.setter
def sent(self, sent):
"""Sets the sent of this NetflowQoSReportTableRow.
:param sent: The sent of this NetflowQoSReportTableRow. # noqa: E501
:type: float
"""
self._sent = sent
@property
def type(self):
"""Gets the type of this NetflowQoSReportTableRow. # noqa: E501
:return: The type of this NetflowQoSReportTableRow. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this NetflowQoSReportTableRow.
:param type: The type of this NetflowQoSReportTableRow. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetflowQoSReportTableRow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetflowQoSReportTableRow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"bamboo@build01.us-west-1.logicmonitor.net"
] | bamboo@build01.us-west-1.logicmonitor.net |
c30d66e61d205d652025d4aacc12cb9ed94b3e99 | 67f86bb3d09cbc86cac698b3f0abaf01457a966a | /master/bopytest-code/code/ch4/dt/2/unnecessary_math.py | dca5e5454a0dd1e77c0f748a62b36acbbfcd06ca | [
"MIT"
] | permissive | tied/DevArtifacts | efba1ccea5f0d832d4227c9fe1a040cb93b9ad4f | 931aabb8cbf27656151c54856eb2ea7d1153203a | refs/heads/master | 2020-06-06T01:48:32.149972 | 2018-12-08T15:26:16 | 2018-12-08T15:26:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | """
This module defines multiply(a, b) and divide(a, b).
>>> import unnecessary_math as um
Here's how you use multiply:
>>> um.multiply(4, 3)
12
>>> um.multiply('a', 3)
'aaa'
Here's how you use divide:
>>> um.divide(10, 5)
2.0
"""
def multiply(a, b):
"""
Returns a multiplied by b.
>>> import unnecessary_math as um
>>> um.multiply(4, 3)
12
>>> um.multiply('a', 3)
'aaa'
"""
return a * b
def divide(a, b):
"""
Returns a divided by b.
>>> import unnecessary_math as um
>>> um.divide(10, 5)
2.0
"""
return a / b
| [
"alexander.rogalsky@yandex.ru"
] | alexander.rogalsky@yandex.ru |
7321457036e055ce425ff626c31f3526474e40da | 45c52da4d20f912e462359b051a3a8f1dced7210 | /module/SequencePlayback.py | bac331fac2553ba6b50ad8f1140d1ee368a3731b | [] | no_license | solpie/SeqTruan | 8b55c37d198898e40a2808d751b011d23022e552 | 4ed23592bf96a9d9261a7cc5fa82cd04d7e3da7d | refs/heads/master | 2021-01-10T03:38:30.230285 | 2015-06-15T14:21:43 | 2015-06-15T14:21:43 | 36,845,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,285 | py | __author__ = 'toramisu'
import os
from PyQt5.Qt import QTimer
from module.Events import *
from model import SImage
class SequencePlayback():
def __init__(self):
self.imageSequence = []
self.framerate = 0
self.currentFrame = -1
self.currentFrameIdx = -1
self.endFrameIdx = 1
self.state = ''
self.timer = QTimer()
self.timer.timerEvent = self.onTick
Event.add(AudioPlaybackEvent.TICK, self.onTick)
Event.add(PlaybackEvent.STATE, self.onState)
self.setFramerate(24)
pass
def onState(self, state):
self.state = state
if state == PlayStateType.PLAY:
self.play()
elif state == PlayStateType.PAUSE:
self.pause()
pass
pass
def onTick(self, time):
self.render()
pass
# def load(self, imagesPath=None):
# if imagesPath:
# for root, dirs, files in os.walk(imagesPath):
# for filespath in files:
# filename = os.path.join(root, filespath).replace('\\', '/')
# # todo support image ext
# if filename.find('.png') < 0:
# continue
# simage = SImage(filename)
# self.imageSequence.append(simage)
# simage.frameIdx = len(self.imageSequence)
# self.endFrameIdx = simage.frameIdx
# print('[load img]: ', filename)
# Event.dis(ActionEvent.LOAD_SEQ, self.imageSequence)
# pass
def play(self):
if not self.timer.isActive():
self.timer.start()
pass
pass
def pause(self):
if self.timer.isActive():
self.timer.stop()
pass
pass
def render(self):
self.currentFrameIdx = (self.currentFrameIdx + 1) % self.endFrameIdx
event = SequencePlaybackEvent()
event.type = SequencePlaybackEvent.RENDER_FRAME
event.frameIdx = self.currentFrameIdx
Event.dis(SequencePlaybackEvent.RENDER_FRAME, event)
def setFramerate(self, framerate):
self.framerate = framerate
self.timer.setInterval(1000 / self.framerate)
pass
| [
"solpie.net@gmail.com"
] | solpie.net@gmail.com |
c2895596fd10a2fc4f221b84f49ad9db95988517 | bb7712c8fab2380ffd37e53136097d8d322a73e7 | /order/migrations/0014_remove_order_order_id.py | e67b11e4d630676c1cd8936e9fbc22e9ab0dfd43 | [] | no_license | nitin1011/Daily-Kart | 5dfaad06c4ab7ea236a8f1b0e29aaea4baba0b81 | 59859bd2dc66563ff1ab0649591e4b19b6b4a85b | refs/heads/master | 2020-08-15T09:52:22.826037 | 2019-10-15T14:56:28 | 2019-10-15T14:56:28 | 215,320,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # Generated by Django 2.2.2 on 2019-10-02 10:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('order', '0013_auto_20191002_1617'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='order_id',
),
]
| [
"nitinjethwani10@gmail.com"
] | nitinjethwani10@gmail.com |
9ba33c43b52f06943abc89aadcf47123451c8752 | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-QTKit/PyObjCTest/test_qtmoviemodernizer.py | 924bd3a96364bbd4331610fd72fab57807c6deb9 | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | from PyObjCTools.TestSupport import *
from QTKit import *
class TestQTError (TestCase):
@min_os_level('10.9')
def testConstants(self):
self.assertEqual(QTMovieModernizerStatusUnknown, 0)
self.assertEqual(QTMovieModernizerStatusPreparing, 1)
self.assertEqual(QTMovieModernizerStatusRunning, 2)
self.assertEqual(QTMovieModernizerStatusCancelled, 3)
self.assertEqual(QTMovieModernizerStatusFailed, 4)
self.assertEqual(QTMovieModernizerStatusCompletedWithSuccess, 5)
self.assertEqual(QTMovieModernizerStatusNotRequired, 6)
self.assertIsInstance(QTMovieModernizerOutputFormat_H264, unicode)
self.assertIsInstance(QTMovieModernizerOutputFormat_AppleProRes422, unicode)
self.assertIsInstance(QTMovieModernizerOutputFormat_AppleProRes4444, unicode)
@min_os_level('10.9')
def testMethods(self):
self.assertResultIsBOOL(QTMovieModernizer.requiresModernization_error_)
self.assertArgIsOut(QTMovieModernizer.requiresModernization_error_, 1)
self.assertArgIsBlock(QTMovieModernizer.modernizeWithCompletionHandler_, 0, b'v')
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
c4201c9f0a97d7dd5f456d9fa220b91b3ac2ab85 | 43ec4ea3b633244f5deef45315f19844a852a034 | /ExanteTaxCalculator/src/infrastructure/report_row.py | 80d336933e1578980605ea6bc60f1f9c0ec53095 | [] | no_license | mateuszmidor/PythonStudy | 4a34feab04fe1bcc62a67506a5e7be85fb209d8c | 579b79b76cb5ce27cb9af09a2bd3db3c5ad65595 | refs/heads/master | 2023-05-25T11:32:50.274138 | 2023-05-17T06:08:29 | 2023-05-17T06:08:29 | 21,539,459 | 0 | 0 | null | 2023-05-23T00:40:51 | 2014-07-06T12:20:36 | Python | UTF-8 | Python | false | false | 2,427 | py | from decimal import Decimal
from enum import Enum
from dataclasses import dataclass, fields
from datetime import datetime
from typing import Dict
from src.infrastructure.errors import InvalidReportRowError
@dataclass
class ReportRow:
"""ReportRow is raw CSV report row parsed into a dataclass."""
class OperationType(Enum):
"""Names reflect Exante Transaction Report 'Operation type' column"""
UNKNOWN = "UNKNOWN"
TRADE = "TRADE"
COMMISSION = "COMMISSION"
FUNDING_WITHDRAWAL = "FUNDING/WITHDRAWAL"
AUTOCONVERSION = "AUTOCONVERSION"
DIVIDEND = "DIVIDEND"
TAX = "TAX"
US_TAX = "US TAX"
CORPORATE_ACTION = "CORPORATE ACTION"
ISSUANCE_FEE = "ISSUANCE FEE"
STOCK_SPLIT = "STOCK SPLIT"
transaction_id: int
account_id: str
symbol_id: str
operation_type: OperationType
when: datetime
sum: Decimal
asset: str
eur_equivalent: Decimal
comment: str
def __post_init__(self):
if self.transaction_id < 0:
raise InvalidReportRowError(f"transaction_id should be >= 0, got: {self.transaction_id}")
if self.account_id == "":
raise InvalidReportRowError("account_id should not be empty")
if self.symbol_id == "":
raise InvalidReportRowError("symbol_id should not be empty")
if self.asset == "":
raise InvalidReportRowError("asset should not be empty")
# actually, sum can be 0 for COMMISSION
# if self.sum == 0:
# raise InvalidReportRowError("sum should not be zero")
@classmethod
def from_dict(cls, d: Dict[str, str]):
try:
return cls(
transaction_id=int(d["Transaction ID"]),
account_id=d["Account ID"],
symbol_id=d["Symbol ID"],
operation_type=ReportRow.OperationType(d["Operation type"]),
when=datetime.strptime(d["When"], "%Y-%m-%d %H:%M:%S"),
sum=Decimal(d["Sum"]),
asset=d["Asset"],
eur_equivalent=Decimal(d["EUR equivalent"]),
comment=d["Comment"],
)
except (KeyError, ValueError) as e:
raise InvalidReportRowError from e
def __str__(self) -> str:
lines = [f"{field.name} = {getattr(self, field.name)}" for field in fields(self)]
return "\n".join(lines)
| [
"3demaniac@gmail.com"
] | 3demaniac@gmail.com |
7c80d929692aa65b1400c04ded4efe8f817eae4c | 2e60017779c5c286629ab5a3a7aeb27a6b19a60b | /python/problem_48.py | 704454c508774c17e70776fe8245c09cd9169b94 | [] | no_license | jamesjiang52/10000-Lines-of-Code | f8c7cb4b8d5e441693f3e0f6919731ce4680f60d | 3b6c20b288bad1de5390ad672c73272d98e93ae0 | refs/heads/master | 2020-03-15T03:50:38.104917 | 2018-05-07T04:41:52 | 2018-05-07T04:41:52 | 131,952,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | if __name__ == '__main__':
import sys
sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')
from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt') as answers:
for line in answers:
answers_list.append(int(line))
progress_ = Progress("Problem 048: Self powers", 0, 1000)
sum_power = 0
for i in range(1, 1001):
progress_.count = i
progress_.progress()
sum_power += i**i
progress_.count = int(str(sum_power)[-10:])
progress_.total = answers_list[48]
progress_.progress()
if __name__ == '__main__':
input()
| [
"jamesjiang52@gmail.com"
] | jamesjiang52@gmail.com |
dd23bdde62f4bfd0ad65c5a3b1ba35c4b11db1ee | 0a6c04ce9a83c983558bf2a9b0622c0076b6b6c4 | /collab_app/migrations/0004_auto_20200301_1307.py | 50bdf63652ca7d923d08b6cb80c6723d53e38497 | [] | no_license | madhu0309/collaboratory | f4384affa8a489a1dc5b2614ac83d8ed2547dae1 | 5217d713d2a174e868a26ac9eb00836d006a09ad | refs/heads/master | 2022-12-14T23:13:47.816593 | 2020-03-18T17:43:56 | 2020-03-18T17:43:56 | 235,501,050 | 1 | 0 | null | 2022-12-08T03:50:22 | 2020-01-22T04:52:31 | JavaScript | UTF-8 | Python | false | false | 764 | py | # Generated by Django 3.0.2 on 2020-03-01 13:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('collab_app', '0003_remove_answer_votes'),
]
operations = [
migrations.AddField(
model_name='answer',
name='num_vote_down',
field=models.PositiveIntegerField(db_index=True, default=0),
),
migrations.AddField(
model_name='answer',
name='num_vote_up',
field=models.PositiveIntegerField(db_index=True, default=0),
),
migrations.AddField(
model_name='answer',
name='vote_score',
field=models.IntegerField(db_index=True, default=0),
),
]
| [
"madhu@micropyramid.com"
] | madhu@micropyramid.com |
c82c18ec7ff65774ec97137cad3ce006dd7fbfb1 | 7b280b947f639959bdd034628b433c3b27ef7b91 | /first/contact.py | a7afb6069609cfb7a2478a56d0b8ead2389277c5 | [] | no_license | amaurirg/tutoriais_flask | c0d5e523a16c05e76e4573c02bbe50bef66e28f0 | e8aa6c8e89f1400f0be34204cb1cbb456bfb04b5 | refs/heads/master | 2020-03-31T07:56:22.888257 | 2018-10-08T07:56:42 | 2018-10-08T07:56:42 | 152,039,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from flask import Blueprint, render_template, request, abort, current_app
bp = Blueprint('contact', __name__, url_prefix='/contact')
@bp.route("/", methods=['GET', 'POST'])
def contact():
if request.method == "GET":
return render_template('contact.html')
# processar dados
# print(request.form)
name = request.form.get('name')
message = request.form.get('message')
# validar dados
if not name or not message:
abort(400, 'Formulário inválido!')
# banco de dados
current_app.db.messages.insert_one({'name': name, 'message': message})
return "Sua mensagem foi enviada com sucesso!"
def configure(app):
app.register_blueprint(bp)
| [
"amaurirg@terra.com.br"
] | amaurirg@terra.com.br |
521fcf8431b2f9adc31ed607139f229a7a6b82d1 | 5873213f0615c13d26c389d8e6aff0291e639d51 | /manage.py | eb5d140281a83806bc9b6fd1521585ff4293510d | [
"MIT"
] | permissive | conferency/conf-panda | 15d9645d5834b78ea27560c58d15a0fe628749ab | d69094174e880b771cd1a5cad981f65374008359 | refs/heads/master | 2020-05-18T16:03:56.716017 | 2019-05-18T04:03:55 | 2019-05-18T04:03:55 | 184,514,509 | 0 | 2 | MIT | 2019-05-18T04:03:57 | 2019-05-02T03:14:46 | JavaScript | UTF-8 | Python | false | false | 7,434 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import glob
from app import create_app, db
from app.utils.fakedata import generate_test_confs, generate_fake_tickets, \
generate_test_users, generate_fake_papers, generate_fake_reviews, \
generate_fake_transactions, generate_fake_schedule, \
generate_default_addons, generate_admin, generate_fake_confs, \
generate_main_conf
from app.models import User, Follow, Role, Permission, Post, Comment, Paper, \
Review, PaperStatus, Invitation, Configuration, Conference, Ticket, \
EmailTemplate, JoinTrack, Author, TicketTransaction, Track, \
Registration, FormConfiguration, PromoCode, Product, ProductOption, \
Payout, Website, Page, UserDoc, Todo, EventLog, DelegateReview, \
ConferenceSchedule, Session, ReviewPreference, RequestLog, \
ConferencePayment, ConferenceTransaction, ConferenceAddon, ReviewComment, \
FavSession, TicketPrice, paper_reviewer, paper_author
from flask_script import Manager, Shell, Server
from flask_migrate import Migrate, MigrateCommand
from config import config
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
print('Test Coverage Analysis Starting')
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
# get config
app = create_app(os.getenv('CONF_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
@migrate.configure
def configure_alembic(c):
# modify config object
c.set_main_option('compare_type', 'True')
return c
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Todo=Todo, Comment=Comment,
Paper=Paper, Review=Review, PaperStatus=PaperStatus,
Invitation=Invitation, Configuration=Configuration,
Conference=Conference, Track=Track,
EmailTemplate=EmailTemplate, JoinTrack=JoinTrack,
Author=Author, ConferenceSchedule=ConferenceSchedule,
TicketTransaction=TicketTransaction, Ticket=Ticket,
Registration=Registration, FormConfiguration=FormConfiguration,
PromoCode=PromoCode, Product=Product, Session=Session,
ProductOption=ProductOption, Payout=Payout, Website=Website,
Page=Page, UserDoc=UserDoc, EventLog=EventLog,
DelegateReview=DelegateReview,
ReviewPreference=ReviewPreference, RequestLog=RequestLog,
ConferencePayment=ConferencePayment,
ConferenceTransaction=ConferenceTransaction,
ConferenceAddon=ConferenceAddon, ReviewComment=ReviewComment,
FavSession=FavSession, TicketPrice=TicketPrice,
paper_reviewer=paper_reviewer, paper_author=paper_author)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('runserver', Server(threaded=True))
@manager.command
def test_logging():
"""Test logging."""
app.logger.error('This is a error log test')
app.logger.info('This is a info log test')
@manager.command
def test(coverage=False):
"""Run the unit tests."""
# enable test coverage
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
print("**************Testing Started**********")
# run the app in tesing configration
app.config.from_object(config['testing'])
config['testing'].init_app(app)
# Remove the sqlite database files if exist
for fl in glob.glob('data-test.sqlite'):
os.remove(fl)
print('old test sqlite database removed')
deploy() # redeploy the database
fakedata() # generate the fakedata
import unittest
tests = unittest.TestLoader().discover('tests')
result = unittest.TextTestRunner(verbosity=2).run(tests).wasSuccessful()
# generate test coverage report
if COV:
COV.stop()
COV.save()
print('Test Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
# the exit code is used for CircleCI
import sys
if result: # tests passed
sys.exit(0)
else: # tests failed
sys.exit(1)
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
# run the db migration script
# this creates all tables when first run, after that
# if the database has no changes, nothing happens
@manager.command
def deploy():
"""Initialize the database and populate init data."""
from flask_migrate import upgrade
upgrade() # upgrade to the latest db schema
# setup necessary data to initialize database
if Conference.query.filter_by(short_name='main').first():
print('database already initialized')
else:
# add registration form questions
FormConfiguration.insert_formConfiguration()
Role.insert_roles() # create user roles
generate_main_conf() # generate default main conference
generate_admin() # generate the site admin
# Caution!!!: this reset db migration and related sqlite files
# The name is designed on purpose to highlight the potential danger
@manager.command
def reset_db_danger():
"""Reset db migration and delete all related files."""
from flask.ext.migrate import init, migrate
# Remove the migration folder if exist
if os.path.exists('migrations'):
shutil.rmtree('migrations')
# Remove the sqlite database files if exist
for fl in glob.glob('*.sqlite'):
os.remove(fl)
# Reset Migration Database
init()
# migrate database to latest revision
migrate(message='init')
@manager.command
def testconfs(email='harryjwang@gmail.com'):
"""Generate fake pending confs."""
generate_fake_confs(10, email) # create 10 pending conferences
@manager.command
def fakedata():
"""Generate fake testing data."""
if User.query.filter_by(email='chair@conferency.com').first():
print ('fake data already generated')
else:
generate_test_confs() # load testing confs and tracks
generate_fake_tickets() # create fake tickets
generate_test_users() # create named fake users
# generate_fake_users(100) # create random users
# add_self_follows() # create self-follows for all users
generate_fake_papers(100) # create random papers
generate_fake_reviews() # create random reviews
generate_fake_transactions() # create fake tickets
generate_fake_schedule()
generate_default_addons()
if __name__ == '__main__':
manager.run()
| [
"harryjwang@gmail.com"
] | harryjwang@gmail.com |
d12502e37805d16aa3555932c6c6d5b5764cea57 | 6c5daf5133656a33574dc2f5b62b9f1a1bdf1390 | /linear programming/gurobi/examples/workforce2.py | d79f3916924e23f35f65e3328dbe22c0e6552f91 | [] | no_license | RobinChen121/Python-Practices | 6c10b721dce3a8d2b76e190959d0940c52f0d1cc | 85bd9ad30c245dd62dc7ea837f964eaecbe24ed9 | refs/heads/master | 2023-08-31T10:08:01.613828 | 2023-08-27T14:51:46 | 2023-08-27T14:51:46 | 142,564,793 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,945 | py | #!/usr/bin/python
# Copyright 2018, Gurobi Optimization, LLC
# Assign workers to shifts; each worker may or may not be available on a
# particular day. If the problem cannot be solved, use IIS iteratively to
# find all conflicting constraints.
from gurobipy import *
# Number of workers required for each shift
shifts, shiftRequirements = multidict({
"Mon1": 3,
"Tue2": 2,
"Wed3": 4,
"Thu4": 4,
"Fri5": 5,
"Sat6": 6,
"Sun7": 5,
"Mon8": 2,
"Tue9": 2,
"Wed10": 3,
"Thu11": 4,
"Fri12": 6,
"Sat13": 7,
"Sun14": 5 })
# Amount each worker is paid to work one shift
workers, pay = multidict({
"Amy": 10,
"Bob": 12,
"Cathy": 10,
"Dan": 8,
"Ed": 8,
"Fred": 9,
"Gu": 11 })
# Worker availability
availability = tuplelist([
('Amy', 'Tue2'), ('Amy', 'Wed3'), ('Amy', 'Fri5'), ('Amy', 'Sun7'),
('Amy', 'Tue9'), ('Amy', 'Wed10'), ('Amy', 'Thu11'), ('Amy', 'Fri12'),
('Amy', 'Sat13'), ('Amy', 'Sun14'), ('Bob', 'Mon1'), ('Bob', 'Tue2'),
('Bob', 'Fri5'), ('Bob', 'Sat6'), ('Bob', 'Mon8'), ('Bob', 'Thu11'),
('Bob', 'Sat13'), ('Cathy', 'Wed3'), ('Cathy', 'Thu4'), ('Cathy', 'Fri5'),
('Cathy', 'Sun7'), ('Cathy', 'Mon8'), ('Cathy', 'Tue9'), ('Cathy', 'Wed10'),
('Cathy', 'Thu11'), ('Cathy', 'Fri12'), ('Cathy', 'Sat13'),
('Cathy', 'Sun14'), ('Dan', 'Tue2'), ('Dan', 'Wed3'), ('Dan', 'Fri5'),
('Dan', 'Sat6'), ('Dan', 'Mon8'), ('Dan', 'Tue9'), ('Dan', 'Wed10'),
('Dan', 'Thu11'), ('Dan', 'Fri12'), ('Dan', 'Sat13'), ('Dan', 'Sun14'),
('Ed', 'Mon1'), ('Ed', 'Tue2'), ('Ed', 'Wed3'), ('Ed', 'Thu4'),
('Ed', 'Fri5'), ('Ed', 'Sun7'), ('Ed', 'Mon8'), ('Ed', 'Tue9'),
('Ed', 'Thu11'), ('Ed', 'Sat13'), ('Ed', 'Sun14'), ('Fred', 'Mon1'),
('Fred', 'Tue2'), ('Fred', 'Wed3'), ('Fred', 'Sat6'), ('Fred', 'Mon8'),
('Fred', 'Tue9'), ('Fred', 'Fri12'), ('Fred', 'Sat13'), ('Fred', 'Sun14'),
('Gu', 'Mon1'), ('Gu', 'Tue2'), ('Gu', 'Wed3'), ('Gu', 'Fri5'),
('Gu', 'Sat6'), ('Gu', 'Sun7'), ('Gu', 'Mon8'), ('Gu', 'Tue9'),
('Gu', 'Wed10'), ('Gu', 'Thu11'), ('Gu', 'Fri12'), ('Gu', 'Sat13'),
('Gu', 'Sun14')
])
# Model
m = Model("assignment")
# Assignment variables: x[w,s] == 1 if worker w is assigned to shift s.
# Since an assignment model always produces integer solutions, we use
# continuous variables and solve as an LP.
x = m.addVars(availability, ub=1, name="x")
# The objective is to minimize the total pay costs
m.setObjective(quicksum(pay[w]*x[w,s] for w,s in availability), GRB.MINIMIZE)
# Constraint: assign exactly shiftRequirements[s] workers to each shift s
reqCts = m.addConstrs((x.sum('*', s) == shiftRequirements[s]
for s in shifts), "_")
# Optimize
m.optimize()
status = m.status
if status == GRB.Status.UNBOUNDED:
print('The model cannot be solved because it is unbounded')
exit(0)
if status == GRB.Status.OPTIMAL:
print('The optimal objective is %g' % m.objVal)
exit(0)
if status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % status)
exit(0)
# do IIS
print('The model is infeasible; computing IIS')
removed = []
# Loop until we reduce to a model that can be solved
while True:
m.computeIIS()
print('\nThe following constraint cannot be satisfied:')
for c in m.getConstrs():
if c.IISConstr:
print('%s' % c.constrName)
# Remove a single constraint from the model
removed.append(str(c.constrName))
m.remove(c)
break
print('')
m.optimize()
status = m.status
if status == GRB.Status.UNBOUNDED:
print('The model cannot be solved because it is unbounded')
exit(0)
if status == GRB.Status.OPTIMAL:
break
if status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % status)
exit(0)
print('\nThe following constraints were removed to get a feasible LP:')
print(removed)
| [
"40953071+RobinChen121@users.noreply.github.com"
] | 40953071+RobinChen121@users.noreply.github.com |
9fbb49ae597c0575ccb1b43a4264273f6ae1d2df | 4b7d5c8824df4462a338993efcdfa3b17199ff5b | /基础/day8/logging_mod.py | 9016888efa6bae9265a37e4dcdd2363dd0e69449 | [] | no_license | kobe24shou/python | 9c287babfb357e7f650fab453f3e60614b7a71fc | f78f147101f182207a69f0dc8e1595b54280164a | refs/heads/master | 2021-06-02T12:40:59.424542 | 2020-06-28T06:13:51 | 2020-06-28T06:13:51 | 101,620,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/env python
# -*-coding:utf-8-*-
# Author:ls
# aishou24@gmail.com
# date:2018/6/10
import logging
# basicConfig 配置日志级别,日志格式,输出位置
logging.basicConfig(level=logging.DEBUG, # 日志级别debug
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='test.log',
filemode='a') # w 或 a 模式
# Sun, 10 Jun 2018 16:56:33 logging_mod.py[line:15] DEBUG debug message
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
logging.critical('critical message')
| [
"aishou24@gmail.com"
] | aishou24@gmail.com |
ba2aa6eceb577173190bad4307320afd84789292 | 716abd9e5ba4b72b72cc5f724a6cc0a6ad4390d1 | /11-Loops - for and while loops with break, continue and pass/49-Simple-practice-with-for-loop.py | 52de14b4a76bdbac60ab24a638e83c0cd0010073 | [] | no_license | devopstasks/PythonScripting | ac45edd72dc134ec3539b962f02dfc866f365ecf | 48bc37733ae6b3be4e2d64909ffe0962b6908518 | refs/heads/master | 2023-03-29T11:18:01.329452 | 2021-04-07T03:25:20 | 2021-04-07T03:25:20 | 350,388,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | '''
============
Simple practice with forloop
=> Read a string and print chars and their index values
============
'''
#str=input("Enter a string: ")
'''
str="python"
for each in str:
print(each,'-->',str.index(each))
'''
str="python"
index=0
for each in str:
print(f'{each}-->{index}')
index=index+1
| [
"rpadhan2015@gmail.com"
] | rpadhan2015@gmail.com |
92ddc4f26addab8501de36d65fb48ec5d2eecd95 | 0b5383d3099d62cb5e5b2f197bf4d648b51e3a1d | /flask_projects/Discover_Flask/project/users/form.py | 5c226e9142c70f9a9267b23d3dd098079fb7633a | [] | no_license | Sysa/py_samples | 386134d160dad2be6797d415c5cc6a657ef9e375 | 957a77e6601106e1917d4931784a25277478eeb8 | refs/heads/master | 2021-01-13T09:15:38.395169 | 2016-10-31T13:29:41 | 2016-10-31T13:29:41 | 72,438,805 | 1 | 0 | null | 2016-10-31T13:27:58 | 2016-10-31T13:27:58 | null | UTF-8 | Python | false | false | 862 | py | from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class LoginForm(Form):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
class RegisterForm(Form):
username = StringField(
'username',
validators=[DataRequired(), Length(min=3, max=25)]
)
email = StringField(
'email',
validators=[DataRequired(), Email(message=None), Length(min=6, max=40)]
)
password = PasswordField(
'password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Repeat password',
validators=[
DataRequired(), EqualTo('password', message='Passwords must match.')
]
) | [
"meth787@gmail.com"
] | meth787@gmail.com |
4c2698dc3814d58bc9b6639b43b0bb2be5e29d8a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/316/89621/submittedfiles/testes.py | b9143a54f8f0a7ae84e865f2e91e6678453e876f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n=int(input('Digite um numero:'))
m=int(input('Digite um numero:'))
d=1
while n%d>=0 and m%d>=0:
d=d+1
print(d)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
efbf8b4c9f0e58d826b8a8ef0a14b26e8a9afc91 | 736785ff9b31f83e1452117652dcdf5b22d9c39f | /main.py | 80029795276e8fe8f022c79e760d0d3a17e746c6 | [] | no_license | moskrc/mock_subprocess | 9fc99a43e7dedeabfd44c475c539c44f31c1c25b | 217023448f3c0caa3f69504ca499c3aca047905f | refs/heads/master | 2016-09-11T02:45:05.678718 | 2014-12-19T11:17:21 | 2014-12-19T11:17:21 | 28,226,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | import f1
import f2
def superfunction():
return [x.action() for x in [f1, f2]]
if __name__ == '__main__':
print superfunction() | [
"moskrc@gmail.com"
] | moskrc@gmail.com |
2b83412570a153958decb635c6bfddbd7e7c41df | 6d3a1f9fa0f56c1081e15f1c8a26e1829067779c | /Class 16 (OOP coding)/inheritance_polymorphism_encapsulation .py | 60e455e8c6d2c0b596de53f0c6fd099ea3171355 | [] | no_license | siyam04/python-course_materials | e813e0072eba325ef52054f06516fdc1bb2cf0a4 | 9627c85a083b9c0e38604ea1fe021428ceb3fcae | refs/heads/master | 2022-04-07T17:54:23.550256 | 2020-03-03T07:34:41 | 2020-03-03T07:34:41 | 155,817,366 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,320 | py |
########################################## Inheritance ########################################
# Parent class
class Bird:
def __init__(self):
print("Bird is ready")
def whoisThis(self):
print("Bird")
def swim(self):
print("Swim faster")
# child class
class Penguin(Bird):
def __init__(self):
# call super() function
super().__init__()
print("Penguin is ready")
def whoisThis(self):
print("Penguin")
def run(self):
print("Run faster")
# Body
peggy = Penguin()
peggy.whoisThis()
peggy.swim()
peggy.run()
"""
Output:
------------------
Bird is ready
Penguin is ready
Penguin
Swim faster
Run faster
------------------
In the above program, we created two classes i.e. Bird (parent class) and Penguin (child class).
The child class inherits the functions of parent class. We can see this from swim() method.
Again, the child class modified the behavior of parent class. We can see this from whoisThis() method.
Furthermore, we extend the functions of parent class, by creating a new run() method.
Additionally, we use super() function before __init__() method.
This is because we want to pull the content of __init__() method from the parent class into the
child class.
"""
########################################## Encapsulation ########################################
class Computer:
def __init__(self):
self.__maxprice = 900
def sell(self):
print("Selling Price: {}".format(self.__maxprice))
def setMaxPrice(self, price):
self.__maxprice = price
# Body
c = Computer()
c.sell()
# change the price
c.__maxprice = 1000
c.sell()
# using setter function
c.setMaxPrice(1000)
c.sell()
"""
Output:
--------------------
Selling Price: 900
Selling Price: 900
Selling Price: 1000
--------------------
In the above program, we defined a class Computer. We use __init__() method to store the maximum
selling price of computer. We tried to modify the price. However, we can’t change it because
Python treats the __maxprice as private attributes. To change the value, we used a setter
function i.e setMaxPrice() which takes price as parameter.
"""
########################################## Polymorphism ########################################
class Parrot:
def fly(self):
print("Parrot can fly")
def swim(self):
print("Parrot can't swim")
class Penguin:
def fly(self):
print("Penguin can't fly")
def swim(self):
print("Penguin can swim")
# common interface
def flying_test(bird):
bird.fly()
# instantiate objects
blu = Parrot()
peggy = Penguin()
# passing the object
flying_test(blu)
flying_test(peggy)
"""
Output:
------------------
Parrot can fly
Penguin can't fly
------------------
In the above program, we defined two classes Parrot and Penguin. Each of them have common method
fly() method. However, their functions are different. To allow polymorphism, we created common
interface i.e flying_test() function that can take any object. Then, we passed the objects
blu and peggy in the flying_test() function, it ran effectively.
"""
| [
"galib.abdullah04@gmail.com"
] | galib.abdullah04@gmail.com |
a20725d5d2ebeee77dd60da9f8c772ea992798ff | a1119965e2e3bdc40126fd92f4b4b8ee7016dfca | /trunk/repy/tests/ut_repytests_testfilehash.py | 1a5946da31b5ba95584d7d6e2132c62f32cb7260 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | SeattleTestbed/attic | 0e33211ddf39efdbcf5573d4fc7fa5201aa7310d | f618a962ce2fd3c4838564e8c62c10924f5df45f | refs/heads/master | 2021-06-10T23:10:47.792847 | 2017-05-15T12:05:43 | 2017-05-15T12:05:43 | 20,154,061 | 0 | 1 | null | 2014-10-16T17:21:06 | 2014-05-25T12:34:00 | Python | UTF-8 | Python | false | false | 377 | py | #pragma repy
"""
Files should not be hashable...
"""
if callfunc == "initialize":
myfileobj = file('junk_test.out','w')
try:
mydict = {}
try:
mydict[myfileobj] = 7
except AttributeError:
# I should get an exception here...
pass
else:
print 'files are hashable!'
finally:
myfileobj.close()
removefile('junk_test.out')
| [
"USER@DOMAIN"
] | USER@DOMAIN |
50e646d3d84227c5ed5c8135beca75e6d4bf19dd | a2960cf4ba59a3ccfcb8deb4b46e3b55e17843a1 | /app/api/v1/ports.py | 34fa38442f9a6c16e4ef5d690de1c34ac4c6d2b8 | [
"MIT"
] | permissive | cmz0228/backend | 4108869751d0ea03a6841c82cc123d116b79986a | 31a4fc7027a14147f971ca3d1097e957456daed3 | refs/heads/main | 2023-04-06T23:55:20.687821 | 2021-04-18T10:27:29 | 2021-04-18T10:27:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,119 | py | import typing as t
from fastapi import (
APIRouter,
HTTPException,
Request,
Depends,
Response,
encoders,
)
from fastapi.encoders import jsonable_encoder
from app.db.session import get_db
from app.db.schemas.port import (
PortOut,
PortOpsOut,
PortCreate,
PortEdit,
PortEditBase,
PortUserCreate,
PortUserEdit,
PortUserOut,
PortUserOpsOut,
)
from app.db.schemas.port_usage import (
PortUsageEdit,
PortUsageOut,
PortUsageCreate,
)
from app.db.crud.port import (
get_ports,
get_port,
create_port,
edit_port,
delete_port,
get_port_users,
add_port_user,
edit_port_user,
delete_port_user,
)
from app.db.crud.port_usage import create_port_usage, edit_port_usage
from app.db.crud.port_forward import delete_forward_rule
from app.db.crud.user import get_user
from app.core.auth import (
get_current_active_user,
get_current_active_superuser,
get_current_active_admin,
)
from app.utils.tasks import (
trigger_tc,
remove_tc,
trigger_iptables_reset,
trigger_port_clean,
)
ports_router = r = APIRouter()
@r.get(
"/servers/{server_id}/ports",
response_model=t.Union[t.List[PortOpsOut], t.List[PortOut]],
response_model_exclude_none=False,
)
async def ports_list(
response: Response,
server_id: int,
offset: int = 0,
limit: int = 100,
db=Depends(get_db),
user=Depends(get_current_active_user),
):
"""
Get all ports related to server
"""
ports = get_ports(db, server_id, user)
# This is necessary for react-admin to work
response.headers["Content-Range"] = f"0-9/{len(ports)}"
if user.is_admin():
return [PortOpsOut(**port.__dict__) for port in ports]
return [PortOut(**port.__dict__) for port in ports]
@r.get(
"/servers/{server_id}/ports/{port_id}",
response_model=t.Union[PortOpsOut, PortOut],
response_model_exclude_none=False,
response_model_exclude_unset=False,
)
async def port_get(
response: Response,
server_id: int,
port_id: int,
db=Depends(get_db),
user=Depends(get_current_active_user),
):
"""
Get port by id
"""
port = get_port(db, server_id, port_id)
if not port:
raise HTTPException(status_code=404, detail="Port not found")
if user.is_admin():
return PortOpsOut(**port.__dict__)
if not any(user.id == u.user_id for u in port.allowed_users):
raise HTTPException(status_code=404, detail="Port not found")
return PortOut(**port.__dict__)
@r.post(
"/servers/{server_id}/ports",
response_model=PortOpsOut,
response_model_exclude_none=True,
)
async def port_create(
request: Request,
server_id: int,
port: PortCreate,
db=Depends(get_db),
user=Depends(get_current_active_admin),
):
"""
Create a new port on server
"""
db_port = create_port(db, server_id, port)
trigger_tc(db_port)
return db_port
@r.put(
"/servers/{server_id}/ports/{port_id}",
response_model=PortOpsOut,
response_model_exclude_none=True,
)
async def port_edit(
request: Request,
server_id: int,
port_id: int,
port: PortEdit,
db=Depends(get_db),
user=Depends(get_current_active_user),
):
"""
Update an existing port
"""
db_port = get_port(db, server_id, port_id)
if not db_port:
raise HTTPException(status_code=404, detail="Port not found")
if not user.is_admin():
if not any(u.user_id == user.id for u in db_port.allowed_users):
raise HTTPException(status_code=403, detail="Operation not allowed")
port = PortEditBase(**port.dict(exclude_unset=True))
db_port = edit_port(db, db_port, port)
trigger_tc(db_port)
return db_port
@r.delete(
"/servers/{server_id}/ports/{port_id}",
response_model=PortOpsOut,
response_model_exclude_none=True,
)
async def port_delete(
request: Request,
server_id: int,
port_id: int,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Delete an existing port on server
"""
db_port = get_port(db, server_id, port_id)
if not db_port:
raise HTTPException(status_code=404, detail="Port not found")
if db_port.forward_rule:
trigger_port_clean(db_port.server, db_port)
delete_port(db, server_id, port_id)
remove_tc(server_id, db_port.num)
return db_port
@r.get(
"/servers/{server_id}/ports/{port_id}/users",
response_model=t.List[PortUserOpsOut],
)
async def port_users_get(
request: Request,
server_id: int,
port_id: int,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Get all port users
"""
port_users = get_port_users(db, server_id, port_id)
return jsonable_encoder(port_users)
@r.post(
"/servers/{server_id}/ports/{port_id}/users",
response_model=PortUserOpsOut,
)
async def port_user_add(
request: Request,
server_id: int,
port_id: int,
port_user: PortUserCreate,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Add a port user to port
"""
db_user = get_user(db, port_user.user_id)
if not db_user:
raise HTTPException(status_code=400, detail="User not found")
port_user = add_port_user(db, server_id, port_id, port_user)
return jsonable_encoder(port_user)
@r.put(
"/servers/{server_id}/ports/{port_id}/users/{user_id}",
response_model=PortUserOpsOut,
)
async def port_user_edit(
request: Request,
server_id: int,
port_id: int,
user_id: int,
port_user: PortUserEdit,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Add a port user to port
"""
port_user = edit_port_user(db, server_id, port_id, user_id, port_user)
if not port_user:
raise HTTPException(status_code=400, detail="Port user not found")
return jsonable_encoder(port_user)
@r.delete(
"/servers/{server_id}/ports/{port_id}/users/{user_id}",
response_model=PortUserOut,
)
async def port_users_delete(
request: Request,
server_id: int,
port_id: int,
user_id: int,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Delete a port user for port
"""
port_user = delete_port_user(db, server_id, port_id, user_id)
return port_user
@r.post(
"/servers/{server_id}/ports/{port_id}/usage",
response_model=PortUsageOut,
)
async def port_usage_edit(
server_id: int,
port_id: int,
port_usage: PortUsageEdit,
db=Depends(get_db),
user=Depends(get_current_active_admin),
):
"""
Update a port usage
"""
db_port_usage = edit_port_usage(db, port_id, port_usage)
if (
db_port_usage
and sum(
[
port_usage.download,
port_usage.upload,
port_usage.download_accumulate,
port_usage.upload_accumulate,
]
)
== 0
):
trigger_iptables_reset(db_port_usage.port)
return db_port_usage
| [
"me@leishi.io"
] | me@leishi.io |
9906aeacea03508f3edd03e58a19465ae05d4766 | 1ec8734beba25739979cbd4a9414a95273cce6aa | /8.18/正则语法.py | 51c2934c93e1dc7af23cb1d4bd7816eaa0bb4200 | [] | no_license | MATATAxD/untitled1 | 4431e4bc504e74d9a96f54fd6065ce46d5d9de40 | 18463f88ce60036959aabedabf721e9d938bacfb | refs/heads/master | 2023-01-01T23:16:30.140947 | 2020-10-23T04:32:38 | 2020-10-23T04:32:38 | 306,529,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | import re
# userInput=input('输入一个手机号')
# partern=r'^1[3,5,7,1,8]\d{9}$'
# result=re.search(partern,userInput)
# if result ==None:
# print('不是手机号')
# else:
# print('是一个正确的手机号')
#
# userInput=input('输入一个座机号')
# partern=r'^(0\d{2,3}-)?\d{7}$'
# result=re.search(partern,userInput)
# if result == None:
# print('不是座机号')
# else:
# print('是一个正确的座机号')
userInput=input('输入一个身份证号')
partern=r'^([1-9]\d{5}[12]\d{3}(0[1-9]|1[012])(0[1-9]|[12][0-9]|3[01])\d{3}[0-9xX])$'
result=re.search(partern,userInput)
if result == None:
print('不是身份证号码')
else:
print('是一个正确的身份证号')
| [
"502513072@qq.com"
] | 502513072@qq.com |
434f9f64b285e82a6dac620f02b1f9b1d0e86234 | 1698fe3ff15a6737c70501741b32b24fe68052f4 | /two-scoops-of-django-1.8-master/code/chapter_09_example_5.py | a3e490dcea97b2ebae7e56c6624897a2d81b32a1 | [] | no_license | menhswu/djangoapps | 4f3718244c8678640af2d2a095d20a405e337884 | 039a42aa9d1537e7beb4071d86bea7a42253d8b3 | refs/heads/master | 2023-03-04T03:56:01.070921 | 2021-01-28T07:35:02 | 2021-01-28T07:35:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | """
Using This Code Example
=========================
The code examples provided are provided by Daniel Greenfeld and Audrey Roy of
Two Scoops Press to help you reference Two Scoops of Django: Best Practices
for Django 1.8. Code samples follow PEP-0008, with exceptions made for the
purposes of improving book formatting. Example code is provided "as is", and
is not intended to be, and should not be considered or labeled as "tutorial code".
Permissions
============
In general, you may use the code we've provided with this book in your programs
and documentation. You do not need to contact us for permission unless you're
reproducing a significant portion of the code or using it in commercial
distributions. Examples:
* Writing a program that uses several chunks of code from this course does not require permission.
* Selling or distributing a digital package from material taken from this book does require permission.
* Answering a question by citing this book and quoting example code does not require permission.
* Incorporating a significant amount of example code from this book into your product's documentation does require permission.
Attributions usually include the title, author, publisher and an ISBN. For
example, "Two Scoops of Django: Best Practices for Django 1.8, by Daniel
Roy Greenfeld and Audrey Roy Greenfeld. Copyright 2015 Two Scoops Press (ISBN-WILL-GO-HERE)."
If you feel your use of code examples falls outside fair use of the permission
given here, please contact us at info@twoscoopspress.org."""
# simple decorator template
import functools
def decorator(view_func):
@functools.wraps(view_func)
def new_view_func(request, *args, **kwargs):
# You can modify the request (HttpRequest) object here.
response = view_func(request, *args, **kwargs)
# You can modify the response (HttpResponse) object here.
return response
return new_view_func
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
41c00b612cd1bdc6862d390f2e3a72bc198fff45 | a94c446a0d9ce77df965674f63be54d54b2be577 | /raspy/bcm_2835_pwm_clock_divider.py | b929606aabbb14e10f0005a5afeecee08175d1b5 | [
"MIT"
] | permissive | cyrusbuilt/RasPy | 3434e02c2bff09ef9f3ff4995bda14edc781c14b | 1e34840cc90ea7f19317e881162209d3d819eb09 | refs/heads/master | 2020-03-18T20:19:27.426002 | 2018-08-03T17:07:25 | 2018-08-03T17:07:25 | 135,207,376 | 0 | 0 | MIT | 2018-08-03T17:07:26 | 2018-05-28T20:42:17 | Python | UTF-8 | Python | false | false | 515 | py | """This module provides clock divider constants for the BCM2835 chipset."""
CLOCK_DIVIDER_1 = 1
CLOCK_DIVIDER_2 = 2
CLOCK_DIVIDER_4 = 4
CLOCK_DIVIDER_8 = 8
CLOCK_DIVIDER_16 = 16
CLOCK_DIVIDER_32 = 32
CLOCK_DIVIDER_64 = 64
CLOCK_DIVIDER_128 = 128
CLOCK_DIVIDER_256 = 256
CLOCK_DIVIDER_512 = 512
CLOCK_DIVIDER_1024 = 1024
CLOCK_DIVIDER_2048 = 2048
CLOCK_DIVIDER_4096 = 4096
CLOCK_DIVIDER_8192 = 8192
CLOCK_DIVIDER_16384 = 16384
CLOCK_DIVIDER_32768 = 32768
# TODO docstring the above values. # pylint: disable=fixme
| [
"cyrusbuilt@gmail.com"
] | cyrusbuilt@gmail.com |
d694bff671f5e22a281d723afa307c1dc5ab3397 | 67d8173a716da10a7350213d98938aae9f2115ce | /ProgrammingCourses/CS61A/project/scheme/tests/19.py | e80c835acb0cc9ba1a1b160467141c731af8b1cc | [] | no_license | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 2,160 | py | test = {
"name": "Problem 19",
"points": 2,
"suites": [
{
"cases": [
{
"code": r"""
scm> (let-to-lambda 1)
1
scm> (let-to-lambda 'a)
a
scm> (let-to-lambda '(+ 1 2))
(+ 1 2)
scm> (let-to-lambda '(let ((a 1)
.... (b 2))
.... (+ a b)))
((lambda (a b) (+ a b)) 1 2)
""",
"hidden": False,
"locked": False
},
{
"code": r"""
scm> '(quoted expressions remain the same)
(quoted expressions remain the same)
scm> (let-to-lambda '(quote (let ((a 1) (b 2)) (+ a b))))
(quote (let ((a 1) (b 2)) (+ a b)))
""",
"hidden": False,
"locked": False
}
],
"scored": True,
"setup": r"""
scm> (load 'questions)
""",
"teardown": "",
"type": "scheme"
},
{
"cases": [
{
"code": r"""
scm> '(lambda parameters not affected but body affected)
(lambda parameters not affected but body affected)
scm> (let-to-lambda '(lambda (let a b) (+ let a b)))
(lambda (let a b) (+ let a b))
scm> (let-to-lambda '(lambda (x) a (let ((a x)) a)))
(lambda (x) a ((lambda (a) a) x))
""",
"hidden": False,
"locked": False
},
{
"code": r"""
scm> (let-to-lambda '(let ((a (let ((a 2)) a))
.... (b 2))
.... (+ a b)))
((lambda (a b) (+ a b)) ((lambda (a) a) 2) 2)
scm> (let-to-lambda '(let ((a 1))
.... (let ((b a))
.... b)))
((lambda (a) ((lambda (b) b) a)) 1)
scm> (let-to-lambda '(+ 1 (let ((a 1)) a)))
(+ 1 ((lambda (a) a) 1))
""",
"hidden": False,
"locked": False
}
],
"scored": True,
"setup": r"""
scm> (load 'questions)
""",
"teardown": "",
"type": "scheme"
}
]
}
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
fbc6337296c2ae2561d7e93f7aebcf43171a0297 | bcabd9b183bc011e1ccf7e367fbed0dcaa03eee6 | /1 PYTHON/3 TELUSKO/65_Binary_search.py | 91df698e604998a020488263b59b6aa37105ea3a | [] | no_license | rajeshsvv/Lenovo_Back | 287fe4da2c696aa248ec57a4c45c4f234f6ca9ed | 7e49e38aaf934c65f9992a78404d2b81a4cd0204 | refs/heads/master | 2022-12-23T16:44:41.488128 | 2019-08-29T10:00:10 | 2019-08-29T10:00:10 | 204,859,914 | 0 | 1 | null | 2022-12-10T11:50:31 | 2019-08-28T06:05:35 | Python | UTF-8 | Python | false | false | 505 | py | # in binary search u r values should be in sorted order
pos = -1
def search(list, n):
l = 0
u = len(list) - 1
while l <= u:
mid = (l + u) // 2
if list[mid] == n:
globals()['pos'] = mid
return True
else:
if list[mid] < n:
l = mid + 1
else:
u = mid - 1
return False
list = [4, 7, 8, 12, 46, 99]
n = 12
if search(list, n):
print("Found at", pos)
else:
print("Not Found")
| [
"rajeshsvv01@gmail.com"
] | rajeshsvv01@gmail.com |
633624a2e73da3f8a48a008d6b0e0b666b7e34d5 | 3fe1a72d444a60582fe1e45349c03584e26f7238 | /karel_env/state_generator.py | 2508bbbdf3ba9b0758d5e88a3b87ed3093fdf6d2 | [
"MIT"
] | permissive | tedrepo/demo2program | 16f0b332a08ff8936439b19084cdf71092c8995d | 23464a69bfbf6fac9752fd423d14b03d37d1d1c6 | refs/heads/master | 2023-07-09T17:41:15.128227 | 2018-12-02T00:41:54 | 2018-12-02T00:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class KarelStateGenerator(object):
def __init__(self, seed=None):
self.rng = np.random.RandomState(seed)
# generate an initial env
def generate_single_state(self, h=8, w=8, wall_prob=0.1):
s = np.zeros([h, w, 16]) > 0
# Wall
s[:, :, 4] = self.rng.rand(h, w) > 1 - wall_prob
s[0, :, 4] = True
s[h-1, :, 4] = True
s[:, 0, 4] = True
s[:, w-1, 4] = True
# Karel initial location
valid_loc = False
while(not valid_loc):
y = self.rng.randint(0, h)
x = self.rng.randint(0, w)
if not s[y, x, 4]:
valid_loc = True
s[y, x, self.rng.randint(0, 4)] = True
# Marker: num of max marker == 1 for now
s[:, :, 6] = (self.rng.rand(h, w) > 0.9) * (s[:, :, 4] == False) > 0
s[:, :, 5] = 1 - (np.sum(s[:, :, 6:], axis=-1) > 0) > 0
assert np.sum(s[:, :, 5:]) == h*w, np.sum(s[:, :, :5])
marker_weight = np.reshape(np.array(range(11)), (1, 1, 11))
return s, y, x, np.sum(s[:, :, 4]), np.sum(marker_weight*s[:, :, 5:])
| [
"waltersun81@gmail.com"
] | waltersun81@gmail.com |
fff0e6483247918bc0215838062d0a78d3f2aa30 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/venues/karaoke_venue/karaoke_duet_individualsim_situation.py | 3c1e72aaa8387818454955a79a9a54b702283a3c | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\venues\karaoke_venue\karaoke_duet_individualsim_situation.py
# Compiled at: 2016-07-16 01:45:12
# Size of source mod 2**32: 2320 bytes
from sims4.tuning.instances import lock_instance_tunables
from situations.bouncer.bouncer_types import BouncerExclusivityCategory
from situations.situation import Situation
from situations.situation_complex import SituationState, TunableSituationJobAndRoleState, SituationComplexCommon, SituationStateData
from situations.situation_types import SituationCreationUIOption
class KaraokeDuetState(SituationState):
pass
class KaraokeDuetSimSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'karaoke_singer_job': TunableSituationJobAndRoleState(description='\n The default job and role for a Sim in this situation. They only\n have one role, so this is what will be given for them to do.\n ')}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
def __init__(self, *arg, **kwargs):
(super().__init__)(*arg, **kwargs)
self._duet_sim = None
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.karaoke_singer_job.job, cls.karaoke_singer_job.role_state)]
def _on_set_sim_job(self, sim, job_type):
super()._on_set_sim_job(sim, job_type)
self._duet_sim = sim
@classmethod
def default_job(cls):
return cls.karaoke_singer_job.job
def start_situation(self):
super().start_situation()
self._change_state(KaraokeDuetState())
def sim_of_interest(self, sim_info):
if self._duet_sim is not None:
if self._duet_sim.sim_info is sim_info:
return True
return False
@classmethod
def _states(cls):
return (SituationStateData(1, KaraokeDuetState),)
lock_instance_tunables(KaraokeDuetSimSituation, exclusivity=(BouncerExclusivityCategory.NORMAL),
creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE),
_implies_greeted_status=False) | [
"cristina.caballero2406@gmail.com"
] | cristina.caballero2406@gmail.com |
a9e79ced7f79f55849f13742310819e73a64dfb1 | 09f8a3825c5109a6cec94ae34ea17d9ace66f381 | /cohesity_management_sdk/models/cassandra_protection_source.py | c79152ddb527ab066e9f0f7c688e1e1fb21e2b99 | [
"Apache-2.0"
] | permissive | cohesity/management-sdk-python | 103ee07b2f047da69d7b1edfae39d218295d1747 | e4973dfeb836266904d0369ea845513c7acf261e | refs/heads/master | 2023-08-04T06:30:37.551358 | 2023-07-19T12:02:12 | 2023-07-19T12:02:12 | 134,367,879 | 24 | 20 | Apache-2.0 | 2023-08-31T04:37:28 | 2018-05-22T06:04:19 | Python | UTF-8 | Python | false | false | 3,340 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
import cohesity_management_sdk.models.cassandra_cluster
import cohesity_management_sdk.models.cassandra_keyspace
class CassandraProtectionSource(object):
"""Implementation of the 'CassandraProtectionSource' model.
Specifies an Object representing Cassandra.
Attributes:
cluster_info (CassandraCluster): Information of a Cassandra cluster,
only valid for an entity of type kCluster.
keyspace_info (CassandraKeyspace): Information of a cassandra
keyspapce, only valid for an entity of type kKeyspace.
name (string): Specifies the instance name of the Cassandra entity.
mtype (TypeCassandraProtectionSourceEnum): Specifies the type of the
managed Object in Cassandra Protection Source. Replication strategy
options for a keyspace. 'kCluster' indicates a Cassandra cluster
distributed over several physical nodes. 'kKeyspace' indicates a
Keyspace enclosing one or more tables. 'kTable' indicates a Table
in the Cassandra environment.
uuid (string): Specifies the UUID for the Cassandra entity. Note : For
each entity an ID unique within top level entity should be assigned
by imanis backend. Example, UUID for a table can be the string
<keyspace_name>.<table_name>
"""
# Create a mapping from Model property names to API property names
_names = {
"cluster_info":'clusterInfo',
"keyspace_info":'keyspaceInfo',
"name":'name',
"mtype":'type',
"uuid":'uuid',
}
def __init__(self,
cluster_info=None,
keyspace_info=None,
name=None,
mtype=None,
uuid=None,
):
"""Constructor for the CassandraProtectionSource class"""
# Initialize members of the class
self.cluster_info = cluster_info
self.keyspace_info = keyspace_info
self.name = name
self.mtype = mtype
self.uuid = uuid
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
cluster_info = cohesity_management_sdk.models.cassandra_cluster.CassandraCluster.from_dictionary(dictionary.get('clusterInfo')) if dictionary.get('clusterInfo') else None
keyspace_info = cohesity_management_sdk.models.cassandra_keyspace.CassandraKeyspace.from_dictionary(dictionary.get('keyspaceInfo')) if dictionary.get('keyspaceInfo') else None
name = dictionary.get('name')
mtype = dictionary.get('type')
uuid = dictionary.get('uuid')
# Return an object of this model
return cls(
cluster_info,
keyspace_info,
name,
mtype,
uuid
) | [
"naveena.maplelabs@cohesity.com"
] | naveena.maplelabs@cohesity.com |
c57bd4cdfecc79a54141289af0ac284ba85f3d3b | bc42b7700ccc0014282f943a20f968dc2172c4c5 | /Day6 : Mask-RCNN on Videos/mask_rcnn_videos.py | 6b1dbe56385fa9916b3bc62811e774e2ed2e25bb | [] | no_license | vgaurav3011/100-days-of-ML-Code-2 | 1a4b6836ac1b378caeed63a253a3d2b71bada32f | 339992040d807f1c382c858b53e35ed2699518d9 | refs/heads/master | 2022-04-27T07:30:10.797553 | 2020-04-22T07:37:53 | 2020-04-22T07:37:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,340 | py | import numpy as np
# import argparse
import random
import time
import cv2
import imutils
import os
mask_rcnn = ''
visualize = False
confi = 0.5
threshold = 0.3
#defining the labels path and
# loading the coco class labels
labelsPath = 'assets/object_detection_classes_coco.txt'
# f = open("demofile.txt", "r")
LABELS = open(labelsPath , ).read().strip().split("\n")
# initializing a list of colors to represent each class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# loading the model
weightsPath = 'assets/frozen_inference_graph.pb'
configPath = 'assets/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt'
print("loading the model........")
net = cv2.dnn.readNetFromTensorflow(weightsPath, configPath)
video_dir = 'videos/2.mp4'
vs = cv2.VideoCapture(video_dir)
writer = None
# frame count
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("{} total frames in video".format(total))
except:
print("could not determine # of frames in video")
total = -1
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# construct a blob from the input frame and then perform a
# forward pass of the Mask R-CNN, giving us (1) the bounding box
# coordinates of the objects in the image along with (2) the
# pixel-wise segmentation for each specific object
blob = cv2.dnn.blobFromImage(frame, swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
(boxes, masks) = net.forward(["detection_out_final",
"detection_masks"])
end = time.time()
# loop over the number of detected objects
for i in range(0, boxes.shape[2]):
# extract the class ID of the detection along with the
# confidence (i.e., probability) associated with the
# prediction
classID = int(boxes[0, 0, i, 1])
confidence = boxes[0, 0, i, 2]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > confi:
# scale the bounding box coordinates back relative to the
# size of the frame and then compute the width and the
# height of the bounding box
(H, W) = frame.shape[:2]
box = boxes[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
boxW = endX - startX
boxH = endY - startY
# extract the pixel-wise segmentation for the object,
# resize the mask such that it's the same dimensions of
# the bounding box, and then finally threshold to create
# a *binary* mask
mask = masks[i, classID]
mask = cv2.resize(mask, (boxW, boxH),
interpolation=cv2.INTER_NEAREST)
mask = (mask > threshold)
# extract the ROI of the image but *only* extracted the
# masked region of the ROI
roi = frame[startY:endY, startX:endX][mask]
# grab the color used to visualize this particular class,
# then create a transparent overlay by blending the color
# with the ROI
color = COLORS[classID]
blended = ((0.4 * color) + (0.6 * roi)).astype("uint8")
# store the blended ROI in the original frame
frame[startY:endY, startX:endX][mask] = blended
# draw the bounding box of the instance on the frame
color = [int(c) for c in color]
cv2.rectangle(frame, (startX, startY), (endX, endY),
color, 2)
# draw the predicted label and associated probability of
# the instance segmentation on the frame
text = "{}: {:.4f}".format(LABELS[classID], confidence)
cv2.putText(frame, text, (startX, startY - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter('output/output', fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# some information on processing single frame
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(
elap * total))
# write the output frame to disk
writer.write(frame)
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release()
| [
"you@example.com"
] | you@example.com |
db837659d0416abcd64e830e6dd62418e2d5388a | 01fa2aca31eb73a559d192fd29e44350f26a13a9 | /HAX/18.CocoJoe/script.module.lambdascrapers/lib/lambdascrapers/sources_scrubs/ko/drama4u.py | 1cb35e3200c5d19d766ec5ba2e8f07258beb26e7 | [
"Beerware"
] | permissive | RandomIntermition/k4y108837s | b4beedeff375645bd4fa9ad348631a9a9f3640b6 | e9115aad49795dfe30a96c278cedaf089abcc11d | refs/heads/master | 2022-05-01T18:45:57.298903 | 2022-03-30T03:41:08 | 2022-03-30T03:41:08 | 109,356,425 | 1 | 0 | null | 2019-11-08T02:20:47 | 2017-11-03T05:36:48 | Python | UTF-8 | Python | false | false | 5,121 | py | # -*- coding: UTF-8 -*-
# -Cleaned and Checked on 11-23-2018 by JewBMX in Scrubs.
# Only browser checks for active domains.
import re,urllib,urlparse
from resources.lib.modules import cleantitle,client,directstream,source_utils,dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['ko']
self.domains = ['4udrama.com'] # old drama4u.us
self.base_link = 'https://4udrama.com'
self.search_link = '/search?s=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases))
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases))
return self.__get_episode_link(url)
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases))
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases))
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return self.__get_episode_link(url, episode)
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'tab-pane'})
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
try:
if 'drama4u' in i or 'k-vid' in i:
r = client.request(i, referer=url)
r = re.findall('''var\s*source\s*=\s*\[({.*?})\]\s*;''', r)[0]
i = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', r, re.DOTALL)]
i += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', r, re.DOTALL)]
r = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]
for u, q in list(set(r)):
try:
tag = directstream.googletag(u)
if tag:
sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
else:
sources.append({'source': 'CDN', 'quality': q, 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
except:
pass
else:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles):
try:
query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'container-search'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie-cat'})
r = dom_parser.parse_dom(r, 'h4', attrs={'class': 'title'})
r = dom_parser.parse_dom(r, 'a', req=['title', 'href'])
r = [(i.attrs['href'], i.attrs['title']) for i in r]
r = [(i[0]) for i in r if cleantitle.get(i[1]) in t][0]
return source_utils.strip_domain(r)
except:
return
def __get_episode_link(self, url, episode='1'):
try:
if not url:
return
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'list-espisode'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie-item-espisode'})
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content) for i in r]
r = [(i[0], re.findall('EP\s*(\d+)$', i[1])) for i in r]
r = [i[0] for i in r if i[1] and int(i[1][0]) == int(episode)][0]
return source_utils.strip_domain(r)
except:
return
| [
"github+github@github.github"
] | github+github@github.github |
5cb5741e1ccf992fab5d6962ca406c0996b96972 | 5e8342e4f6e48688f4a0079310e8f0b5e5386044 | /POO/Alumnos/profesor.py | 7aa8c1fac4b48d3b71088e1d6df86f077606537f | [] | no_license | fernado1981/python_ | 27a154406b5fba7e18da418bc5f75c58f3ccc24f | 7d846cd332405464fa14707ea3f2286a918fc9de | refs/heads/master | 2023-02-15T19:30:02.257345 | 2021-01-21T10:35:46 | 2021-01-21T10:35:46 | 277,186,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | class profesor:
limit = 3
def __init__(self, alumnado, name):
self.alumnos = alumnado
self.name = name
def aprobar(self):
if self.name in self.alumnos:
for c, v in self.alumnos.items():
if c == self.name:
v['aprobar'] = True
def suspender(self):
for c, v in self.alumnos.items():
if c == self.name:
v['aprobar'] = False
def amonestaciones(self, num):
for c, v in self.alumnos.items():
if c == self.name:
v['amonestaciones'] = num
if v['amonestaciones'] >= self.limit:
self.suspender()
def desamonestar(self, num):
for c, v in self.alumnos.items():
if c == self.name:
v['amonestaciones'] -= num
if v['amonestaciones'] < 3:
v['aprobar'] = True
def verAlumnado(self):
for c,v in self.alumnos.items():
print(c, v)
def suspensos(self):
for c,v in self.alumnos.items():
if not v['aprobar']:
print(c,v)
def aprobados(self):
for c, v in self.alumnos.items():
if v['aprobar']:
print(c, v)
| [
"fernando.manrique.villanueva@gmail.com"
] | fernando.manrique.villanueva@gmail.com |
14559afff0cea58f13cdd39a5f7e9f4982efc821 | 246ec8733c63a28518160af8fc9e21ae04f76649 | /fairseq/tasks/__init__.py | 92f9d53190d0779fc407d6a8cdfd932a63079362 | [
"MIT"
] | permissive | fyabc/BT4MolGen | 80050dc24031753fa3052ef60a5bea170d9d9c56 | 05d161ae9a7dbbcc3c95c71417d5e7f92ed0572c | refs/heads/master | 2023-05-29T01:38:36.614479 | 2021-06-18T15:56:13 | 2021-06-18T15:56:13 | 370,941,322 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import importlib
import os
from .fairseq_task import FairseqTask
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(args, **kwargs):
return TASK_REGISTRY[args.task].setup_task(args, **kwargs)
def register_task(name):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Please see the
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError('Cannot register duplicate task ({})'.format(name))
if not issubclass(cls, FairseqTask):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if not file.startswith('_') and (file.endswith('.py') or os.path.isdir(path)):
task_name = file[:file.find('.py')] if file.endswith('.py') else file
importlib.import_module('fairseq.tasks.' + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group('Task name')
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group('Additional command-line arguments')
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + '_parser'] = parser
def get_task(name):
return TASK_REGISTRY[name]
| [
"fyabc@mail.ustc.edu.cn"
] | fyabc@mail.ustc.edu.cn |
9ebae106d0ffd798ae05342b8bf2684406293bbf | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_01_28_gsh_database_codes/plot_slice_compare_uniaxial2cyclic.py | d45d2d052bb44de92754eebaa18e75b9cd20c0cf | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import h5py
f = h5py.File('slice.hdf5', 'r')
slc_uni = f.get('slice_uni')[...].real
slc_cyc = f.get('slice_cyc')[...].real
par = f.get('parameters')[...]
f.close()
th = np.round(par[0]*180./np.pi, 2)
phi2 = np.round(par[1]*180./np.pi, 0)
en = np.round(par[2], 4)
fig = plt.figure(num=1, figsize=[14, 8])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(slc_uni[:, 1], slc_uni[:, 2], slc_uni[:, 5], c='b')
ax.scatter(slc_cyc[:, 1], slc_cyc[:, 2], slc_cyc[:, 5], c='r')
title_text = "theta = %s, phi2 = %s, en = %s" % (th, phi2, en)
ax.set_title(title_text)
ax.set_xlabel('phi1')
ax.set_ylabel('Phi')
ax.set_zlabel('FIP')
plt.show()
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
6f03b5b578c7f4027dd73f0fbcbb4198d4c5e38f | 15b3b60252af3e5ebd8be4a9fbcccc96469acaad | /pre_clean.py | 30bfe2e18f4c68a8065eef0a6895f5ac0df7ba16 | [] | no_license | yingl/jkb | b6d50cd5d5ba64798a28f6948f1490f334f52b97 | fdf68dee5fbe5a9cfbf2a41af78d2f2ec16a459c | refs/heads/master | 2020-03-28T11:59:47.752625 | 2020-01-23T06:49:46 | 2020-01-23T06:49:46 | 148,262,569 | 10 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-fi',
'--filein',
type=str)
parser.add_argument('-fo',
'--fileout',
type=str)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
fi = args.filein
fo = args.fileout
with open(fi, 'r', encoding='utf-8') as fi:
text = fi.read()
text = text.replace('<h2>', '## ')
text = text.replace('</h2>', '')
text = text.replace('<h3>', '### ')
text = text.replace('</h3>', '')
text = text.replace('<strong>', '<b>')
text = text.replace('</strong>', '</b>')
text = text.replace('<p>', '')
text = text.replace('</p>', '')
text = text.replace('<ul>', '')
text = text.replace('</ul>', '')
text = text.replace('<ol>', '')
text = text.replace('</ol>', '')
text = text.replace('<li>', '')
text = text.replace('</li>', '')
text = text.replace('<center>', '')
text = text.replace('</center>', '')
with open(fo, 'w+', encoding='utf-8') as fo:
fo.write(text) | [
"linying_43151@163.com"
] | linying_43151@163.com |
517a5b458cf820892048f7df658197d34e5aadd7 | c1869b7106a4651ecc0f0f53b82d5f11021896e3 | /examples/DKVMN/DKVMN.py | 22aaa5b5608ca405710ad4a60415fbe0a2e2f8ae | [
"MIT"
] | permissive | bigdata-ustc/XKT | 6efd7ff5b09c22ed9099f5b9b614edceff1cada0 | b3ac07541b92001b62d7cff4e8fe7e5a69c5c93c | refs/heads/master | 2021-09-22T19:22:25.563651 | 2021-09-16T02:56:10 | 2021-09-16T02:56:10 | 194,855,614 | 18 | 9 | MIT | 2021-09-16T02:56:11 | 2019-07-02T12:06:12 | Python | UTF-8 | Python | false | false | 699 | py | # coding: utf-8
# 2021/5/26 @ tongshiwei
import mxnet as mx
from XKT.DKVMN import etl
from XKT import DKVMN
batch_size = 32
train = etl("../../data/a0910c/train.json", batch_size=batch_size)
valid = etl("../../data/a0910c/valid.json", batch_size=batch_size)
test = etl("../../data/a0910c/test.json", batch_size=batch_size)
model = DKVMN(
hyper_params=dict(
ku_num=146,
key_embedding_dim=10,
value_embedding_dim=10,
key_memory_size=20,
hidden_num=100
)
)
model.train(train, valid, end_epoch=2)
model.save("dkvmn")
model = DKVMN.from_pretrained("dkvmn")
print(model.eval(test))
inputs = mx.nd.ones((2, 3))
outputs, _ = model(inputs)
print(outputs) | [
"tongsw@mail.ustc.edu.cn"
] | tongsw@mail.ustc.edu.cn |
a5669df662c5cbc0c978c08431dfe48e19ea5151 | bf4178e73f0f83781be6784d7587cb34a38d6edd | /platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/ocelot/filters/phy_filters.py | e49ca3348ff7ba8253f8aecb5959917a02921b04 | [] | no_license | kolbertv/ZigbeeSiliconV3 | 80d70515e93be1413c24cdcb3485f50c65a1564b | ab0bd8d4bb6c1048adef81d0e66d96006c2fabd9 | refs/heads/master | 2023-01-02T07:18:01.393003 | 2020-10-25T15:33:08 | 2020-10-25T15:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,896 | py | """
Lynx specific filters
"""
from pyradioconfig.calculator_model_framework.interfaces.iphy_filter import IPhyFilter
class PhyFilters(IPhyFilter):
#Studio black list (files removed before Studio distribution)
customer_phy_groups = [
'Phys_Internal_Base_Customer_Aclara',
'Phys_Internal_Base_Customer_Acuity',
'Phys_Internal_Base_Customer_Chamberlain',
'Phys_Internal_Base_Customer_Essence',
'Phys_Internal_Base_Customer_HoneywellEnergyAxis',
'Phys_Internal_Base_Customer_Lutron',
'Phys_Internal_Base_Customer_Sigfox',
'Phys_Internal_Base_Experimental',
'Phys_Internal_Base_Utility',
'Phys_Internal_Base_ValOnly',
'Phys_Internal_Connect',
'Phys_Internal_Longrange',
'Phys_Internal_RAIL_Base_Standard_BLE',
'Phys_Internal_RAIL_Base_Standard_IEEE802154',
'Phys_RAIL_Base_Standard_BLE',
'Phys_RAIL_Base_Standard_IEEE802154',
'Phys_RAIL_Base_Standard_IEEE802154GB',
'Phys_RAIL_Base_Standard_ZWave',
]
#Studio white list (these PHYs show in Studio as proprietary starting points)
simplicity_studio_phy_groups = ['Phys_Studio_Base', 'Phys_Studio_Base_Standard_SUNFSK', 'Phys_Studio_Connect',
'Phys_Studio_LongRange', 'Phys_Studio_MBus','Phys_Studio_WiSUN']
# Special designation for simulation PHYs
sim_tests_phy_groups = []
# Special designation for non-functional PHYs
non_functional_phy_groups = [] | [
"1048267279@qq.com"
] | 1048267279@qq.com |
69d80edf9f62e78f34fc9b40f7ed035eb1dba0cd | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/notmuch.py | 2d64141806c5209a2c04e5546647640e733fe6b2 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | from .base import GnuRecipe
class NotMuchRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(NotMuchRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'b4bf09ec9b7b64180704faa26d66cad5' \
'f911a5a00ef812da34cb02c3f8872831'
self.name = 'notmuch'
self.version = '0.25.1'
self.version_regex = r'(?P<version>\d+\.\d+\.\d+)'
self.version_url = 'https://notmuchmail.org/releases'
self.depends = ['autotools', 'gmime', 'python3-sphinx',
'talloc', 'xapian']
self.url = 'https://notmuchmail.org/releases/notmuch-$version.tar.gz'
self.configure_args += ['--without-ruby']
# needed to make sphinx test pass to install manpages
self.environment['PYTHON'] = 'python3'
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
00c249df496d497a62d7988f77d4d095b9b8644e | 925f6b2376bcdcf175194b4f390beeffb57d67e0 | /sosmypc/sosmypc/core/forms.py | c69f02eae6de9e7a5494462446fa674876afd469 | [] | no_license | CoutinhoElias/sosmypc | a0a86f0c05f5f0d6e0beb3a7b22da73ed8951ac4 | ce77520f0e7fe33441de030f85c85c4fccce8afb | refs/heads/master | 2021-01-18T23:21:22.377626 | 2016-06-02T18:07:27 | 2016-06-02T18:07:27 | 53,994,124 | 1 | 1 | null | 2016-05-18T19:29:35 | 2016-03-16T02:23:09 | JavaScript | UTF-8 | Python | false | false | 6,118 | py | import datetime as datetime
from django import forms
from django.contrib.auth.forms import UserCreationForm
from material import Layout, Row, Fieldset, Span3, Span2, Span10, Span8, Span7, Span5
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from .models import ProfissoesPessoa, Qualificacao
from django_addanother.widgets import AddAnotherWidgetWrapper
from django.core.urlresolvers import reverse_lazy
class LoginForm(forms.Form):
username = forms.CharField(max_length=30,label="Nome")
email = forms.EmailField(label="E-mail")
password = forms.CharField(widget=forms.PasswordInput,label="Senha")
class RegistrationForm(forms.Form, UserCreationForm):
username = forms.CharField(max_length=30,required=True,label='Login')
email = forms.EmailField(label="E-mail",required=True)
#senha = forms.CharField(widget=forms.PasswordInput,label='Senha')
#confirma_senha = forms.CharField(widget=forms.PasswordInput, label="Confirmar senha")
nome = forms.CharField(required=True,label='Nome Completo')
cep = forms.IntegerField(max_value=99999999,required=True,label='CEP')
#tipo_logradouro = forms.CharField(required=True,label='Tipo')
logradouro = forms.CharField(required=True,label='Logradouro')
numero = forms.CharField(required=True,label='Número')
bairro = forms.CharField(required=True,label='Bairro')
cidade = forms.CharField(required=True,label='Cidade')
estado = forms.CharField(required=True,label='UF')
#last_name = forms.CharField(required=True, label='Último nome')
#gender = forms.ChoiceField(choices=((None, ''), ('F', 'Feminino'), ('M', 'Masculino'), ('O', 'Outro')),label='Gênero',required=False)
profissional = forms.BooleanField(required=False, label='Sou profissional.')
agree_toc = forms.BooleanField(required=True, label='Eu aceito os termos e condições de uso.')
layout = Layout(
Fieldset('Cadastrar em SOS my PC',
'username','email',
Row('password1', 'password2')),
Fieldset('Dados Pessoais','nome',
Row(Span2('cep'),# Span2('tipo_logradouro'),
Span8('logradouro'),Span2('numero')),
Row(Span5('bairro'),Span5('cidade'),Span2('estado')) ),
'profissional', 'agree_toc')
class CommentForm(forms.Form):
nome = forms.CharField(required=True,label='Nome Completo')
email=forms.EmailField(label="E-mail",required=True)
mensagem=forms.CharField(required=True,label='Comentário',widget=forms.Textarea)
class UserForm(forms.Form):
username = forms.CharField(label="Nome usuário", max_length=32, widget=forms.TextInput(
attrs={'class': 'form-control input-lg'}))
email = forms.EmailField(max_length=32, widget=forms.EmailInput(
attrs={'class': 'form-control input-lg'}))
#password = forms.CharField(label="Senha", max_length=32, widget=forms.PasswordInput(
#attrs={'class': 'form-control input-lg'}))
first_name = forms.CharField(label="Primeiro nome", max_length=32, widget=forms.TextInput(
attrs={'class': 'form-control input-lg'}))
last_name = forms.CharField(label="Sobrenome", max_length=32, widget=forms.TextInput(
attrs={'class': 'form-control input-lg'}))
is_staff = forms.BooleanField(label="É usuário do sistema?", initial=False)
is_superuser = forms.BooleanField(label="É Administrador do sistema?", initial=False)
class ProfissaoForm(forms.Form):#Atualmente sem uso.
profissao = forms.CharField(max_length=30,label="Profissao")
class ProfissoesPessoaForm(forms.Form): #Atualmente sem uso.
pessoa = forms.CharField(max_length=30,label="Pessoa")
profissao = forms.CharField(max_length=30,label="Profissao")
rating = forms.IntegerField(label="Rating")
layout = Layout(
'pessoa',
Row('profissao', 'rating'))
# @property #Trabalhando com modal | Primeiro declara esta função abaixo:
# def helper(self):
# helper = FormHelper()
# helper.form_tag = False # don't render form DOM element
# helper.render_unmentioned_fields = True # render all fields
# helper.label_class = 'col-md-2'
# helper.field_class = 'col-md-10'
# return helper
class ProfissoesPessoaModelForm(forms.ModelForm):
class Meta:
model = ProfissoesPessoa
fields = '__all__'
layout = Layout(
'pessoa',
Row('profissao', 'rating'))
# class QualificacaoModelForm(forms.ModelForm):
# class Meta:
# model = Qualificacao
# fields = ['descricao']
# widgets = {
# 'groups': AddAnotherWidgetWrapper(
# forms.SelectMultiple,
# reverse_lazy('add_qualificacao'),
# )
# }
"""Passos para trabalhar com django rest
1 - pip install djangorestframework
2 - pip instal httpie
3 - No Setting do projeto antes de suas apps insira 'rest_framework',
4 - No urls.py chame assim:
url(r'^pessoas/all/', all_pessoas)
5 - Na pasta do projeto (Neste caso a pasta core onde se encontram os arquivos views, forms, apps e models.py
vamos criar um arquivo chamado serializers.py
Neste arquivo vamos colocar o código abaixo:
from rest_framework import serializers
from core.models import *
class PessoaSerializer(serializes. ModelSerializer):
class Meta:
model = Pessoa
fields = ('pk', ...)
Repita isso para cada classe do models.py
6 - Na views.py vamos fazer os seguintes passos:
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderes import JSONRenderer
from rest_framework.renderes import Response
from rest_framework.decorators import api_view
from Pessoa.serializers import *
@api_view(['GET'])
def all_Pessoas(request, **kwargs):
pessoas = Pessoa.objects.all()
serializers = PessoaSerializer(pessoas, many=True)
return Response(serializers.data)
"""
| [
"coutinho.elias@gmail.com"
] | coutinho.elias@gmail.com |
cd9eca8fdea4985097b9053005381853c3e81a01 | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/contrib/nn/python/ops/alpha_dropout_test.py | a46269392668d58794c05b147c0b616940dd905c | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,629 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sampling_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.nn.python.ops.alpha_dropout import alpha_dropout
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test
class AlphaDropoutTest(test.TestCase):
def testAlphaDropout(self):
x_dim, y_dim = 40, 30
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = random_ops.random_normal([x_dim, y_dim])
output = alpha_dropout(t, keep_prob)
self.assertEqual([x_dim, y_dim], output.get_shape())
t_mean, t_std = nn_impl.moments(t, axes=[0, 1])
output_mean, output_std = nn_impl.moments(output, axes=[0, 1])
self.assertLess(abs(t_mean.eval() - output_mean.eval()), 0.1)
self.assertLess(abs(t_std.eval() - output_std.eval()), 0.1)
def testShapedDropoutShapeError(self):
# Runs shaped dropout and verifies an error is thrown on misshapen noise.
x_dim = 40
y_dim = 30
keep_prob = 0.5
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim + 3])
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim])
# test that broadcasting proceeds
_ = alpha_dropout(t, keep_prob, noise_shape=[y_dim])
_ = alpha_dropout(t, keep_prob, noise_shape=[1, y_dim])
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim, 1])
_ = alpha_dropout(t, keep_prob, noise_shape=[1, 1])
def testInvalidKeepProb(self):
x_dim, y_dim = 40, 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
alpha_dropout(t, -1.0)
with self.assertRaises(ValueError):
alpha_dropout(t, 1.1)
with self.assertRaises(ValueError):
alpha_dropout(t, [0.0, 1.0])
with self.assertRaises(ValueError):
alpha_dropout(t, array_ops.placeholder(dtypes.float64))
with self.assertRaises(ValueError):
alpha_dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))
def testNoDropoutFast(self):
x = array_ops.zeros((5,))
for p in 1, constant_op.constant(1.0):
y = alpha_dropout(x, keep_prob=p)
self.assertTrue(x is y)
if __name__ == '__main__':
test.main()
| [
"hanshuobest@163.com"
] | hanshuobest@163.com |
b275fe795cf9ab1470d80878cc1dcd2f8bff4dfb | 2e7fa13a40dafa81c5852b7a9d70555c45814574 | /QT/pyqt/Qline_Edit.py | 84efff4077167ce8cc0584f2f562a6c1388a02b6 | [] | no_license | Ziaeemehr/miscellaneous | 5768c6f5a2fe76468faed4283a3572a44ccd0239 | 43a62aaa28c577b09f605a135818a2dacc75d67c | refs/heads/master | 2021-07-24T02:43:51.032849 | 2020-09-23T06:17:17 | 2020-09-23T06:17:17 | 217,556,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
def window():
app = QApplication(sys.argv)
win = QWidget()
e1 = QLineEdit()
e1.setValidator(QIntValidator())
e1.setMaxLength(4)
e1.setAlignment(Qt.AlignRight)
e1.setFont(QFont("Arial",20))
e2 = QLineEdit()
e2.setValidator(QDoubleValidator(0.99,99.99,2))
flo = QFormLayout()
flo.addRow("integer validator", e1)
flo.addRow("Double validator",e2)
e3 = QLineEdit()
e3.setInputMask('+99_9999_999999')
flo.addRow("Input Mask",e3)
e4 = QLineEdit()
e4.textChanged.connect(textchanged)
flo.addRow("Text changed",e4)
e5 = QLineEdit()
e5.setEchoMode(QLineEdit.Password)
flo.addRow("Password",e5)
e6 = QLineEdit("Hello Python")
e6.setReadOnly(True)
flo.addRow("Read Only",e6)
e5.editingFinished.connect(enterPress)
win.setLayout(flo)
win.setWindowTitle("PyQt")
win.show()
sys.exit(app.exec_())
def textchanged(text):
print "contents of text box: "+text
def enterPress():
print "edited"
if __name__ == '__main__':
window() | [
"a.ziaeemehr@gmail.com"
] | a.ziaeemehr@gmail.com |
7be4d69ad6872e1ec239fc9e76020f4128811aa0 | a0f7cd0dac6b24ca8f0eb26e13f55e7d3bfd6073 | /tutorgame/regexapp/migrations/0001_initial.py | 5fcf0d31f6456839a59a6d71d3b5d49dd99c32ce | [] | no_license | tomaccosheep/capstone-draft-7 | 217d0e279c7a3a25207f084ee5f148de5815fe0c | 0a66c24397d2c0d4878a057c6bdd21a1009b15b6 | refs/heads/master | 2021-01-22T17:49:05.881406 | 2017-08-14T19:00:19 | 2017-08-14T19:00:19 | 102,405,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-13 18:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card_Manager',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique_id', models.CharField(max_length=32)),
],
),
]
| [
"al.burns.email@gmail.com"
] | al.burns.email@gmail.com |
a8ff5cf0bc6c164790c98a11d4fee5fbabbc3acc | 669be04e813baf7ac5a444ff9197237a8674126d | /product.py | 0fec1a80bb34ec37fb8020167d1acab1e59d7db0 | [] | no_license | vangali12/PythonOOP | d24d588eddfa6b03919dd6735b8bf3c898630425 | 579d365981b9d1520ec88dcbfe52147745be94ef | refs/heads/master | 2021-07-08T04:07:00.184547 | 2017-10-05T19:07:37 | 2017-10-05T19:07:37 | 105,929,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | class Product(object):
def __init__(self, price, name, weight, brand, cost):
self.price = price
self.name = name
self.weight = weight
self.brand = brand
self.cost = cost
self.status = "sale"
def sell(self):
self.status = "sold"
return self
def addTax(self, tax):
self.price = self.price + (self.price * tax)
return self
def returnItem(self, reason):
if (reason is "defective"):
self.status = "defective"
self.price = 0
return self
if (reason is "new"):
self.status = "sale"
return self
if (reason is "opened"):
self.status = "used"
self.price = float((self.price * 0.8))
return self
def displayInfo(self):
print("Price: " + str(self.price))
print("Name: " + self.name)
print("Weight: " + self.weight)
print("Brand " + self.brand)
print("Cost: " + str(self.cost))
print("Status: " + self.status)
#item1 = Product(21, "strawberries", "1lb", "Driscoll's", 5)
#item1.displayInfo()
#item1.sell().displayInfo()
#item1.addTax(0.1).displayInfo()
#item1.returnItem("defective").displayInfo()
#item1.returnItem("new").displayInfo()
#item1.returnItem("opened").displayInfo() | [
"30483940+vangali12@users.noreply.github.com"
] | 30483940+vangali12@users.noreply.github.com |
0a27983665eca4c578a5013cd7157737e2c6dec8 | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/automox/icon_automox/actions/run_command/action.py | c10f2f3ba76d3af402ecb5308a617e712b5da48f | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 1,408 | py | import insightconnect_plugin_runtime
from .schema import RunCommandInput, RunCommandOutput, Input, Output, Component
# Custom imports below
class RunCommand(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="run_command", description=Component.DESCRIPTION, input=RunCommandInput(), output=RunCommandOutput()
)
def run(self, params={}):
policy_id = params.get(Input.POLICY_ID)
command = params.get(Input.COMMAND)
command_payload = {"command_type_name": command}
# Craft command and argument based on inputs which vary based on command being run
if command == "InstallUpdate":
command_payload["args"] = params.get(Input.PATCHES)
elif command == "PolicyTest":
command_payload["command_type_name"] = f"policy_{policy_id}_test"
elif command == "PolicyRemediate":
command_payload["command_type_name"] = f"policy_{policy_id}_remediate"
self.logger.info(
f"Running {command_payload['command_type_name']} command with the following "
f"arguments: {command_payload.get('args', 'No arguments defined')}"
)
self.connection.automox_api.run_device_command(
params.get(Input.ORG_ID), params.get(Input.DEVICE_ID), command_payload
)
return {Output.SUCCESS: True}
| [
"noreply@github.com"
] | rapid7.noreply@github.com |
9a42e5f17ab99a99ebc15ec69c703a5a312f984f | 2c4648efe8c7e408b8c3a649b2eed8bb846446ec | /codewars/Python/8 kyu/ValidateCodeWithSimpleRegex/validate_code_test.py | 8aefdd9d13dd8eab5601b43efe0c1ecca12ebe4b | [] | no_license | Adasumizox/ProgrammingChallenges | 9d79bd1b0ce4794b576124f9874aabb86d5c0713 | 3630fcde088d7991e344eb1b84805e9e756aa1a2 | refs/heads/master | 2021-07-16T08:16:57.538577 | 2020-07-19T19:58:28 | 2020-07-19T19:58:28 | 190,159,085 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | from validate_code import validate_code
import unittest
class TestValidateCodeWithSimpleRegex(unittest.TestCase):
def test(self):
self.assertEqual(validate_code(123), True)
self.assertEqual(validate_code(248), True)
self.assertEqual(validate_code(8), False)
self.assertEqual(validate_code(321), True)
self.assertEqual(validate_code(9453), False)
def test_rand(self):
from random import randint
validate_sol=lambda code: str(code)[0] in "123"
for _ in range(40):
code=int(str(randint(1,6))+str(randint(1,10**randint(1,9))))
self.assertEqual(validate_code(code), validate_sol(code), "It should work for random inputs too")
if __name__ == '__main__':
unittest.main() | [
"darkdan099@gmail.com"
] | darkdan099@gmail.com |
d5854818d5e3e6c8e2cdd670b2817f56b180997d | 2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f | /aws.elastictranscoder.Preset-python/__main__.py | 65842b4d7b1e0d1857cb2f1f1a92513b38482b13 | [] | no_license | ehubbard/templates-aws | e323b693a18234defe6bd56ffcc64095dc58e3a1 | 2ae2e7a5d05490078017fed6d132dcdde1f21c63 | refs/heads/master | 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | import pulumi
import pulumi_aws as aws
bar = aws.elastictranscoder.Preset("bar",
audio={
"audioPackingMode": "SingleTrack",
"bitRate": 96,
"channels": 2,
"codec": "AAC",
"sampleRate": 44100,
},
audio_codec_options={
"profile": "AAC-LC",
},
container="mp4",
description="Sample Preset",
thumbnails={
"format": "png",
"interval": 120,
"maxHeight": "auto",
"maxWidth": "auto",
"paddingPolicy": "Pad",
"sizingPolicy": "Fit",
},
video={
"bitRate": "1600",
"codec": "H.264",
"displayAspectRatio": "16:9",
"fixedGop": "false",
"frameRate": "auto",
"keyframesMaxDist": 240,
"maxFrameRate": "60",
"maxHeight": "auto",
"maxWidth": "auto",
"paddingPolicy": "Pad",
"sizingPolicy": "Fit",
},
video_codec_options={
"ColorSpaceConversionMode": "None",
"InterlacedMode": "Progressive",
"Level": "2.2",
"MaxReferenceFrames": 3,
"Profile": "main",
},
video_watermarks=[{
"horizontalAlign": "Right",
"horizontalOffset": "10px",
"id": "Test",
"maxHeight": "20%",
"maxWidth": "20%",
"opacity": "55.5",
"sizingPolicy": "ShrinkToFit",
"target": "Content",
"verticalAlign": "Bottom",
"verticalOffset": "10px",
}])
| [
"jvp@justinvp.com"
] | jvp@justinvp.com |
2e7a3c78dc70b3d8601a2cc34252103d8834c6d2 | 4da9c19d9839c670fda30a45a7e223da624eee4a | /Codechef Problem solutions/chef and happiness new.py | c2f55bca41451258e8ad08c4309a6725aa068313 | [] | no_license | JineshKamdar98/Codchef-Problem-Solutions | 3e1737669cc0657ccc224e06f800b587130f5787 | 4447679aa3fb45a2d57f93bf3f724f6223049506 | refs/heads/master | 2020-05-05T06:38:10.306619 | 2019-04-06T06:16:10 | 2019-04-06T06:16:10 | 179,795,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | for t in range(int(input())):
n=int(input())
a=list(map(int,input().split()[:n]))
a.sort()
index=[]
value=[]
f=0
for i in range(n-1,-1,-1):
if((a[i]-1) in index):
f=0
break
else:
index.append(a[i]-1)
value.append(a[i])
if(value.count(a[i])>1):
f=1
if(f==1):
print("Truly Happy")
else:
print("Poor Chef")
| [
"noreply@github.com"
] | JineshKamdar98.noreply@github.com |
29904bf7638508da99c3a12ea8f0679def218f3a | 4148260054c2cf4605dacb8bdef3605c82eca470 | /temboo/Library/Google/Picasa/AddCommentToPhoto.py | 8f14bfc24bb5d18a65fd905bcc1b74a6b9fa4d9f | [] | no_license | wimsy/actuarize-web | 0f23d5f00afe3d36d430621cdb497d2e64998416 | 5f43af3019da6fb08cafeec9ff0a89df5196b864 | refs/heads/master | 2021-03-12T19:38:21.887681 | 2012-12-19T01:13:50 | 2012-12-19T01:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,767 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# AddCommentToPhoto
# Adds a comment to a specified photo in Google Picasa.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class AddCommentToPhoto(Choreography):
"""
Create a new instance of the AddCommentToPhoto Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Google/Picasa/AddCommentToPhoto')
def new_input_set(self):
return AddCommentToPhotoInputSet()
def _make_result_set(self, result, path):
return AddCommentToPhotoResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AddCommentToPhotoChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the AddCommentToPhoto
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class AddCommentToPhotoInputSet(InputSet):
"""
Set the value of the AccessToken input for this choreography. ((optional, string) The access token retrieved in the last step of the Oauth process. Access tokens that are expired will be refreshed and returned in the Choreo output.)
"""
def set_AccessToken(self, value):
InputSet._set_input(self, 'AccessToken', value)
"""
Set the value of the AlbumID input for this choreography. ((required, integer) The id of the album which contains the photo you want to add a comment to.)
"""
def set_AlbumID(self, value):
InputSet._set_input(self, 'AlbumID', value)
"""
Set the value of the ClientID input for this choreography. ((required, string) The client id provided by Google.)
"""
def set_ClientID(self, value):
InputSet._set_input(self, 'ClientID', value)
"""
Set the value of the ClientSecret input for this choreography. ((required, string) The client secret provided by Google.)
"""
def set_ClientSecret(self, value):
InputSet._set_input(self, 'ClientSecret', value)
"""
Set the value of the Comment input for this choreography. ((required, string) The comment that you want to add to a photo.)
"""
def set_Comment(self, value):
InputSet._set_input(self, 'Comment', value)
"""
Set the value of the PhotoID input for this choreography. ((required, integer) The id of the photo you want to add a comment to.)
"""
def set_PhotoID(self, value):
InputSet._set_input(self, 'PhotoID', value)
"""
Set the value of the RefreshToken input for this choreography. ((required, string) The refresh token retrieved in the last step of the OAuth process. This is used when an access token is expired or not provided.)
"""
def set_RefreshToken(self, value):
InputSet._set_input(self, 'RefreshToken', value)
"""
Set the value of the UserID input for this choreography. ((optional, string) Google Picasa username. Defaults to 'default' which means the server will use the UserID of the user whose access token was specified.)
"""
def set_UserID(self, value):
InputSet._set_input(self, 'UserID', value)
"""
A ResultSet with methods tailored to the values returned by the AddCommentToPhoto choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class AddCommentToPhotoResultSet(ResultSet):
"""
Retrieve the value for the "AccessToken" output from this choreography execution. ((optional, string) The access token retrieved in the last step of the Oauth process. Access tokens that are expired will be refreshed and returned in the Choreo output.)
"""
def get_AccessToken(self):
return self._output.get('AccessToken', None)
"""
Retrieve the value for the "Response" output from this choreography execution. ((xml) The response from Google Picasa.)
"""
def get_Response(self):
return self._output.get('Response', None)
class AddCommentToPhotoChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AddCommentToPhotoResultSet(response, path)
| [
"mike.wimsatt@gmail.com"
] | mike.wimsatt@gmail.com |
2c683ab4db0dca1536a5101026e78e0f0ce3d233 | 707287238a36b8e5f3e26c347cca580549b441e5 | /combgen/linexts/pruesse_ruskey/coroutine/gen_all_no_sign.py | 9c51d769c4821cbf0a96ea4b8610985ba7f55d44 | [] | no_license | sahands/coroutine-generation | 2a01e3c5a36fc6b82d8087a15591a452e4bca636 | f0b318016b8925b2ab16640a588210548f7989db | refs/heads/master | 2016-09-06T04:54:02.453166 | 2015-01-06T21:32:58 | 2015-01-06T21:32:58 | 17,954,406 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from .gen_all import gen_all
def gen_all_no_sign(n, poset, a_b_pairs):
for i, pi in enumerate(gen_all(n, poset, a_b_pairs)):
if i % 2 == 0:
yield pi[1:]
| [
"sahands@gmail.com"
] | sahands@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.