blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d44967b219c2a51424a6673dad657e88b2aadb93 | bcad774066bda2ed89b37e200e00312e49a38f2b | /powertrain/models/project.py | 09397241e9bf9eae8741286ea6dc2373ccaf64cc | [] | no_license | VUIIS/powertrain | d52774fd4c4f80b4940003e76a12c63feaee774b | 6260c70dbcc36b1fec5d25cabba34ee72bc309cc | refs/heads/master | 2020-05-24T15:13:16.280501 | 2014-05-21T20:17:03 | 2014-05-21T20:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" project.py
A collection of MR sessions & associated their processing
"""
__author__ = 'Scott Burns <scott.s.burns@vanderbilt.edu>'
__copyright__ = 'Copyright 2014 Vanderbilt University. All Rights Reserved'
from .. import db
class Project(db.Model):
"""TODO: Documentation for Project"""
__tablename__ = 'projects'
id = db.Column(db.Integer, primary_key=True)
# attrs
name = db.Column(db.String)
# relations
mrsessions = db.relationship('MRSession', backref='project')
users = db.relationship("User",
secondary='project_to_user',
backref="projects")
tasks = db.relationship("Task", backref='project')
def __repr__(self):
return "<Project(name={0.name}, nsessions={1:d})>".format(self,
len(self.mrsessions))
| [
"scott.s.burns@gmail.com"
] | scott.s.burns@gmail.com |
a8bf4a6ab220f84abfd45454ca066195a871ad21 | 5506e6244a67bc46903858cb8ed4f6bf83d577c1 | /examples/fcis/coco/eval_coco.py | 39708a8000cb18f799b12a3bb8b1bbcb679604ba | [
"MIT"
] | permissive | knorth55/chainer-psroi-align | c122d00cf3b911546978053adccb19fff1486d06 | 66b55e9ea24f8fd36215a604a65235ba53026cc1 | refs/heads/master | 2020-03-19T05:03:29.820238 | 2018-08-05T14:31:17 | 2018-08-05T14:32:13 | 135,895,474 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | import argparse
import chainer
from chainer import iterators
from chainercv.datasets import coco_instance_segmentation_label_names
from chainercv.datasets import COCOInstanceSegmentationDataset
from chainercv.evaluations import eval_instance_segmentation_coco
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from psroi_align.links.model import FCISPSROIAlignResNet101
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained-model')
parser.add_argument('--gpu', type=int, default=-1)
args = parser.parse_args()
proposal_creator_params = {
'nms_thresh': 0.7,
'n_train_pre_nms': 12000,
'n_train_post_nms': 2000,
'n_test_pre_nms': 6000,
'n_test_post_nms': 1000,
'force_cpu_nms': False,
'min_size': 0
}
model = FCISPSROIAlignResNet101(
n_fg_class=len(coco_instance_segmentation_label_names),
min_size=800, max_size=1333,
anchor_scales=(2, 4, 8, 16, 32),
pretrained_model=args.pretrained_model,
proposal_creator_params=proposal_creator_params)
model.use_preset('coco_evaluate')
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
dataset = COCOInstanceSegmentationDataset(
split='minival', use_crowded=True,
return_crowded=True, return_area=True)
iterator = iterators.SerialIterator(
dataset, 1, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
# delete unused iterators explicitly
del in_values
pred_masks, pred_labels, pred_scores = out_values
gt_masks, gt_labels, gt_areas, gt_crowdeds = rest_values
result = eval_instance_segmentation_coco(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_areas, gt_crowdeds)
keys = [
'map/iou=0.50:0.95/area=all/max_dets=100',
'map/iou=0.50/area=all/max_dets=100',
'map/iou=0.75/area=all/max_dets=100',
'map/iou=0.50:0.95/area=small/max_dets=100',
'map/iou=0.50:0.95/area=medium/max_dets=100',
'map/iou=0.50:0.95/area=large/max_dets=100',
'mar/iou=0.50:0.95/area=all/max_dets=1',
'mar/iou=0.50:0.95/area=all/max_dets=10',
'mar/iou=0.50:0.95/area=all/max_dets=100',
'mar/iou=0.50:0.95/area=small/max_dets=100',
'mar/iou=0.50:0.95/area=medium/max_dets=100',
'mar/iou=0.50:0.95/area=large/max_dets=100',
]
print('')
for key in keys:
print('{:s}: {:f}'.format(key, result[key]))
if __name__ == '__main__':
main()
| [
"shingogo@hotmail.co.jp"
] | shingogo@hotmail.co.jp |
8821ece150449d0aafa9471faf4ac0a8cfb93e82 | 2d08f3dd8cb72fc0f64ac98d6f5409b2d8117f75 | /service/dpms/tasks.py | 78029a4f875dde4552ff5a27d3bed4073287c647 | [] | no_license | Minexora/homeauto | b13291054543669376f1ed22a395ec1001883fa3 | dc44eec86236a916fcf589047ff5ed9272088950 | refs/heads/master | 2023-08-23T23:09:13.992762 | 2021-09-28T05:56:25 | 2021-09-28T05:56:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | from invoke import task
JOB = 'dpms'
PORT = 9095
TAG_x86 = f'bang6:5000/{JOB.lower()}_x86:latest'
TAG_pi = f'bang6:5000/{JOB.lower()}_pi:latest'
ANSIBLE_TAG = 'homeauto_dpms'
@task
def build_image_x86(ctx):
ctx.run(f'docker build --network=host -t {TAG_x86} .')
@task
def build_image_pi(ctx):
ctx.run(f'docker build --file Dockerfile.pi --network=host -t {TAG_pi} .')
@task(pre=[build_image_x86])
def push_image_x86(ctx):
ctx.run(f'docker push {TAG_x86}')
@task(pre=[build_image_pi])
def push_image_pi(ctx):
ctx.run(f'docker push {TAG_pi}')
@task(pre=[build_image_x86])
def shell(ctx):
ctx.run(f'docker run --rm -it --cap-add SYS_PTRACE -v /tmp/.X11-unix/:/tmp/.X11-unix/ -v /home/drewp/.Xauthority:/root/.Xauthority --net=host {TAG_x86} /bin/bash', pty=True)
@task(pre=[build_image_x86])
def local_run(ctx):
ctx.run(f'docker run --rm -it -v /tmp/.X11-unix/:/tmp/.X11-unix/ -v /home/drewp/.Xauthority:/root/.Xauthority -p {PORT}:{PORT} -v /etc/resolv.conf:/etc/resolv.conf --net=host {TAG_x86} python3 dpms_service.py -v', pty=True)
@task(pre=[push_image_x86, push_image_pi])
def redeploy(ctx):
ctx.run(f'sudo /my/proj/ansible/playbook -t {ANSIBLE_TAG}')
#ctx.run(f'supervisorctl -s http://bang:9001/ restart {JOB}_{PORT}')
| [
"drewp@bigasterisk.com"
] | drewp@bigasterisk.com |
b0c48d7ca0b2687f98e0c4e2ceb112548ed8608b | 2d0f940ebbf3d6f9eb5962715e2880bdc70f40b9 | /week-04/day-3/16.py | cc2313515ccdd4f5c1b0ea8758f8337071ee35a5 | [] | no_license | greenfox-velox/agyenes | 9482cdfc33d0f68f24209ce2af3629e2ccd1a120 | d341d14d7cd0bc627d02e02acabf79bc45c8ccfa | refs/heads/master | 2021-01-21T14:12:08.707870 | 2016-07-14T15:09:28 | 2016-07-14T15:09:28 | 58,042,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | # create a 300x300 canvas.
# make it look like a nigth sky:
# - The background should be black
# - The stars can be small squares
# - The stars should have random positions on the canvas
# - The stars should have random color (some shade of grey)
from tkinter import *
import random
root = Tk()
canvas = Canvas(root, width='300', height='300', bg='black')
canvas.pack()
i = 0
while i < 30:
r = random.randint(1, 290)
s = random.randint(1, 290)
print(canvas.create_rectangle(r, s, r + 5, s + 5, fill='gray'))
i += 1
canvas.pack()
root.mainloop()
| [
"aron.gyenes@gmail.com"
] | aron.gyenes@gmail.com |
63f1c759388a1f6c898a492e8f947d5c7f9b7a99 | 12d05a7f65e6ca8ffa701670ed1bec209af77a51 | /capsule_biblosa/models/biblosa/model_context_fusion.py | 3a4558947f0e4fb55f264baf4a5bd036125fec6a | [] | no_license | Bobby-Han/text-classification | ec8015e6bb438fceb8d7b61117519e2d6469c57d | 2fa6d3ed4f3b9288ff7fb385c9cced44daf522ca | refs/heads/master | 2023-02-24T13:27:36.761439 | 2021-02-01T06:27:53 | 2021-02-01T06:27:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py |
from models.biblosa.configs import cfg
from models.biblosa.utils_biblosa.record_log import _logger
import tensorflow as tf
from models.biblosa.model_template import ModelTemplate
from models.biblosa.nn_utils.nn import linear
from models.biblosa.nn_utils.baselines.interface import sentence_encoding_models
class ModelContextFusion(ModelTemplate):
def __init__(self, W_embedding, scope):
super(ModelContextFusion, self).__init__(W_embedding, scope)
self.update_tensor_add_ema_and_opt()
def build_network(self):
with tf.variable_scope('emb'):
emb = tf.nn.embedding_lookup(self.W_embedding, self.token_seq) # bs,sl1,tel
with tf.variable_scope('sent_encoding'):
rep = sentence_encoding_models(
emb, self.token_mask, cfg.context_fusion_method, 'relu',
'ct_based_sent2vec', cfg.wd, self.is_train, cfg.dropout, block_len=cfg.block_len)
print('emb ', emb.shape) # (?, 200, 256)
print('rep ', rep.shape) # (?, 512)
# exit(0)
with tf.variable_scope('output'):
pre_logits = tf.nn.relu(linear([rep], cfg.hidden_units_num, True, scope='pre_logits_linear',
wd=cfg.wd, input_keep_prob=cfg.dropout,
is_train=self.is_train)) # bs, hn
logits = linear([pre_logits], cfg.n_class, False, scope='get_output',
wd=cfg.wd, input_keep_prob=cfg.dropout, is_train=self.is_train) # bs, 5
_logger.done()
return logits | [
"2501967525@qq.com"
] | 2501967525@qq.com |
895ac87d8ad6d02777cd3138ceae448566c4978f | fb28a622b21f5127c83c7fe6193b6312294b2dbe | /apps/videos/serializers.py | 2168d3dee4765b054deb4c9de8ce3792e35b1ecf | [] | no_license | laoyouqing/video | 0cd608b1f9d3a94da4a537867fafce6f7dcd1297 | 9aa7ecf17f0145437408a8c979f819bb61617294 | refs/heads/master | 2022-12-19T11:02:01.343892 | 2019-08-21T04:00:13 | 2019-08-21T04:00:13 | 203,500,521 | 0 | 0 | null | 2022-12-08T06:03:17 | 2019-08-21T03:40:13 | Python | UTF-8 | Python | false | false | 1,732 | py | from rest_framework import serializers
from videos.models import IndexGoodsBanner, Video
class BannerSerializer(serializers.ModelSerializer):
'''轮播图'''
class Meta:
model = IndexGoodsBanner
fields = ('video','image')
class IndexSerializer(serializers.ModelSerializer):
'''首页'''
class Meta:
model = Video
fields = ('id','image','name','desc','status','url','try_see')
class DetailSerializer(serializers.ModelSerializer):
'''详情'''
class Meta:
model = Video
fields = ('id','image','name','desc','price','url','detail','number','standard_para','try_see','status','indexgoodsbanner_set')
depth=1
class IndexGoodsBannerSerializer1(serializers.ModelSerializer):
'''视频图片'''
create_time = serializers.DateTimeField(read_only=True, format="%Y-%m-%d %H:%M:%S")
update_time = serializers.DateTimeField(read_only=True, format="%Y-%m-%d %H:%M:%S")
class Meta:
model = IndexGoodsBanner
fields = '__all__'
# 后台
class VideoSerializer1(serializers.ModelSerializer):
'''视频'''
create_time = serializers.DateTimeField(read_only=True, format="%Y-%m-%d %H:%M:%S")
update_time = serializers.DateTimeField(read_only=True, format="%Y-%m-%d %H:%M:%S")
number = serializers.IntegerField(read_only=True)
indexgoodsbanner_set = IndexGoodsBannerSerializer1(many=True,read_only=True)
class Meta:
model = Video
fields = '__all__'
class UploadImageSerializer(serializers.Serializer):
'''图片上传'''
image=serializers.ImageField()
class UploadVideoSerializer(serializers.Serializer):
'''视频上传'''
video=serializers.FileField() | [
"lingo.lin@foxmail.com"
] | lingo.lin@foxmail.com |
3113ca577299f9cc1031afc05fceafb52ffc31cc | 9479825e910f237557325fe3513f3c4d39c841de | /pmg/bills.py | 1bad8a91e470ef5762b2bc0ceec71f996ab9023d | [
"Apache-2.0"
] | permissive | lukehuang/pmg-cms-2 | d9d37d6b4fec024f5a72d0c4078c557fd730de7c | 8a259c34a9cf6a4e6dc5881855290ad1bca1bc20 | refs/heads/master | 2021-01-20T21:06:21.684800 | 2017-08-29T09:34:38 | 2017-08-29T09:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,714 | py | from itertools import groupby
import datetime
import os.path
import bisect
from flask import url_for
MIN_YEAR = 2006
ICONS = {
"member": "bill-introduced.png",
"committee": "committee-discussion.png",
"house": "house.png",
"president": "signed-by-president.png",
"unknown": "bill-introduced.png",
}
def get_location(event):
if event.get('type') in ['bill-signed', 'bill-act-commenced', 'bill-enacted']:
return {
'name': 'Office of the President',
'class': 'president',
}
if event.get('house'):
return {
'name': event['house']['name'],
'class': event['house']['short_name'],
}
if event.get('committee'):
if 'house' in event['committee']:
return {
'name': event['committee']['house']['name'],
'class': event['committee']['house']['short_name'],
}
return {
'name': event['committee']['name'],
'url': url_for('committee_detail', committee_id=event['committee']['id']),
'class': '',
}
return {'name': 'Unknown', 'class': ''}
def get_agent(event, bill):
info = None
if event.get('type') in ['bill-signed', 'bill-act-commenced', 'bill-enacted']:
info = {
'name': 'The President',
'type': 'president',
}
elif event.get('type') == 'bill-introduced':
info = {
'name': bill['introduced_by'] or (bill.get('place_of_introduction') or {}).get('name'),
'type': 'member',
}
elif event.get('member'):
info = {
'name': event['member']['name'],
'type': 'member',
'url': url_for('member', member_id=event['member']['id'])
}
elif event.get('committee'):
info = {
'name': event['committee']['name'],
'type': 'committee',
'url': url_for('committee_detail', committee_id=event['committee']['id'])
}
elif event.get('house'):
info = {
'name': event['house']['name'],
'type': 'house',
}
else:
info = {'name': 'Unknown', 'type': 'unknown'}
info['icon'] = ICONS[info['type']]
return info
def bill_history(bill):
""" Work out the history of a bill and return a description of it. """
history = []
events = bill.get('events', [])
events.sort(key=lambda e: [e['date'], get_location(e), get_agent(e, bill)])
for location, location_events in groupby(events, get_location):
location_history = []
for agent, agent_events in groupby(location_events, lambda e: get_agent(e, bill)):
info = {'events': list(agent_events)}
info.update(agent)
location_history.append(info)
info = {'events': location_history}
info.update(location)
history.append(info)
return history
def count_parliamentary_days(date_from, date_to):
""" Count the number of parliamentary days between two dates, inclusive.
"""
i = bisect.bisect(PARLIAMENTARY_DAYS, date_from)
j = bisect.bisect(PARLIAMENTARY_DAYS, date_to)
return j - i + 1
def load_parliamentary_days():
""" Load the dates when parliament sat from data/parliament-sitting-days.txt
This file can be updated from a spreadsheet using bin/load_parliamentary_days.py
"""
with open(os.path.join(os.path.dirname(__file__), "../data/parliament-sitting-days.txt"), "r") as f:
lines = f.readlines()
dates = [datetime.date(*(int(x) for x in d.split("-"))) for d in lines]
return sorted(dates)
PARLIAMENTARY_DAYS = load_parliamentary_days()
| [
"greg@kempe.net"
] | greg@kempe.net |
868f86a8b4a8d2b93140c412dfc8cb90f7d0cf46 | 6300fcf67d4fcb5387a9f0f7370a8ffe8f4097d9 | /AutoParts/AutoParts/vehicle/migrations/0001_initial.py | 25670b28e9b21a438a2e658bc4a5e4d008468c93 | [] | no_license | Borislav-source/Final-Project | e34ac1cbb71e3a32ed490361d3583c2e1e8bfbc9 | 501b258d103c2e1b8947451f4bdf750709d040fd | refs/heads/master | 2023-07-17T15:03:19.390774 | 2021-09-01T14:06:09 | 2021-09-01T14:06:09 | 393,977,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | # Generated by Django 3.2.6 on 2021-08-11 11:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EngineModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('engine', models.CharField(max_length=10)),
('power', models.CharField(max_length=10)),
('engine_code', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=15)),
('image', models.FileField(upload_to='Manufacturers')),
],
),
migrations.CreateModel(
name='VehicleModels',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=15)),
('image_url', models.URLField()),
('production_date', models.DateTimeField()),
('engine', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehicle.enginemodel')),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vehicle_type', models.CharField(choices=[('Car', 'Car'), ('Truck', 'Truck'), ('Motorcycle', 'Motorcycle')], max_length=25)),
('manufacturer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehicle.manufacturer')),
('model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehicle.vehiclemodels')),
],
),
]
| [
"tsv.borislav@gmail.com"
] | tsv.borislav@gmail.com |
aa5977428acf1b3da665912ae0e0285b48cc5ba3 | 79f541042e4b4d6bb443e7a758ca918817ea0f33 | /Pygame/09_PythonGameDevelopment.py | 93dd9e7f40723bcdebe22646355eeeccb814bb5a | [] | no_license | ashutoshm1771/Source-Code-from-Tutorials | d5f950db8f5f648e87303835e9558eeba404939a | f5552d4bd0f4bebcf5c674ff730fcb61f2d7a1ce | refs/heads/master | 2020-09-15T06:08:31.777622 | 2019-11-22T09:08:31 | 2019-11-22T09:08:31 | 223,364,275 | 4 | 0 | null | 2019-11-22T09:01:51 | 2019-11-22T09:01:48 | null | UTF-8 | Python | false | false | 789 | py | import pygame
pygame.init()
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
gameDisplay = pygame.display.set_mode((800,600))
pygame.display.set_caption('Slither')
gameExit = False
lead_x = 300
lead_y = 300
lead_x_change = 0
clock = pygame.time.Clock()
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
lead_x_change = -10
if event.key == pygame.K_RIGHT:
lead_x_change = 10
lead_x += lead_x_change
gameDisplay.fill(white)
pygame.draw.rect(gameDisplay, black, [lead_x,lead_y,10,10])
pygame.display.update()
clock.tick(15)
pygame.quit()
quit()
| [
"buckyroberts@gmail.com"
] | buckyroberts@gmail.com |
1c383a35c5f3d2a314d4952ad778c6e5be01641a | 4edb067c8c748e503e154bb2b9190843f6f1684a | /tests/test_text/test_postag.py | 31e8bb2600ffba623f8fbf1bb2281dcae63b8a9b | [
"Apache-2.0"
] | permissive | DistrictDataLabs/yellowbrick-docs-zh | 5ecbdccfaff4a6822d60250719b37af9b8d37f61 | 3118e67f2bed561a00885e6edb2cabb3520ad66b | refs/heads/master | 2021-04-09T11:00:29.709555 | 2019-04-06T15:23:55 | 2019-04-06T15:23:55 | 125,447,764 | 22 | 5 | Apache-2.0 | 2019-04-06T14:52:40 | 2018-03-16T01:37:09 | Python | UTF-8 | Python | false | false | 2,190 | py | # -*- coding: utf8 -*-
# tests.test_text.test_postag
# Tests for the part-of-speech tagging visualization
#
# Author: Rebecca Bilbro <rbilbro@districtdatalabs.com>
# Created: 2017-03-22 15:46
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_postag.py [bd9cbb9] rebecca.bilbro@bytecubed.com $
"""
Tests for the part-of-speech tagging visualization
"""
##########################################################################
## Imports
##########################################################################
import pytest
from yellowbrick.text.postag import *
try:
import nltk
from nltk import pos_tag, word_tokenize
except ImportError:
nltk = None
##########################################################################
## Data
##########################################################################
pie = """
In a small saucepan, combine sugar and eggs
until well blended. Cook over low heat, stirring
constantly, until mixture reaches 160° and coats
the back of a metal spoon. Remove from the heat.
Stir in chocolate and vanilla until smooth. Cool
to lukewarm (90°), stirring occasionally. In a small
bowl, cream butter until light and fluffy. Add cooled
chocolate mixture; beat on high speed for 5 minutes
or until light and fluffy. In another large bowl,
beat cream until it begins to thicken. Add
confectioners' sugar; beat until stiff peaks form.
Fold into chocolate mixture. Pour into crust. Chill
for at least 6 hours before serving. Garnish with
whipped cream and chocolate curls if desired.
"""
##########################################################################
## PosTag Tests
##########################################################################
class TestPosTag(object):
"""
PosTag (Part of Speech Tagging Visualizer) Tests
"""
@pytest.mark.skipif(nltk is None, reason="test requires nltk")
def test_integrated_postag(self):
"""
Assert no errors occur during postag integration
"""
tokens = word_tokenize(pie)
tagged = pos_tag(tokens)
visualizer = PosTagVisualizer()
visualizer.transform(tagged)
| [
"benjamin@bengfort.com"
] | benjamin@bengfort.com |
230af37383f1ca4090d9c867135ee998803d7b1c | d7ca36f20465870e67e7d6832f8e1b8348af12fc | /calculate/txt/cal_existed_ratio.py | 7984c110ad59c21e6693e043ff37b090a8839091 | [] | no_license | hlcr/LanguageNetworkAnalysis | c109e670534367c782fb71697a92a3ca95aba098 | 65f6c8086f3e4282b15359cc99cf57a682e6b814 | refs/heads/master | 2020-04-24T07:40:04.100213 | 2020-04-17T09:02:05 | 2020-04-17T09:02:05 | 171,805,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,282 | py | import tool.util as util
import networkx as nx
import os
import copy
from collections import OrderedDict
from collections import Counter
def get_speical_dict(w_dict):
r_dict = OrderedDict()
index = 0
stop_value = 0
for k,v in w_dict.items():
if v == 1 or v < stop_value:
break
r_dict[k] = v
index += 1
if index == 50:
stop_value = v
return r_dict
def calculate_existed_ratio(g1, g2, d1):
gg = get_speical_dict(g1)
s1 = set(gg.keys())
# s1 = set(gg.keys()) & set(nx.k_core(d1).nodes())
s2 = set(g2.keys())
s3 = s1 & s2
# return str(len(s1))+"\t"+str(len(s3))
if len(s1) == 0:
return 0
return str(len(s3)/len(s1))
# 计算最大公共子图的比率
# pkl_dir: pkl 所在的目录
# mcs_dir: 结果生成的目录
# is_front: 是否跟前面的比较
# key_word:关键词
# lap: 步长
def loop_compare(com_function, keyword_list, pkl_dir1, result_dir, mode=1, lap=1, type="pkl"):
for key in keyword_list:
print(key)
if mode == 0:
util.create_directory(result_dir + key + "//")
pkl_dir = pkl_dir1.format(key)
f_list = util.get_file_list(pkl_dir, '.txt')
os.chdir(pkl_dir)
result_list = []
# 升序排序
nw_list = sorted(f_list)
ii = len(nw_list)-1
while ii - 2*lap >= 0:
g2 = util.txt2dict(util.get_list_from_file(nw_list[ii]))
# 迭代生成子图
k = 1
while k < lap:
g2 = nx.compose(g2, util.get_nw(nw_list[ii - k]))
k += 1
ii -= lap
g1 = util.txt2dict(util.get_list_from_file(nw_list[ii]))
d1 = util.get_nw("D:\semantic analysis\新结果\去虚词去单字共现网络//{0}//p//".format(key)+nw_list[ii].split(".")[0]+".pkl")
# 迭代生成子图
k = 1
while k < lap:
g1 = nx.compose(g1, util.get_nw(nw_list[ii - k]))
k += 1
# 生成连通子图
if mode == 1:
r1, r2 = com_function(copy.deepcopy(g1), copy.deepcopy(g2))
result_list.append(nw_list[ii + lap][0:-4] + "\t" + str(r1))
result_list.append((nw_list[ii][0:-4] + "\t" + str(r2)))
elif mode == 0:
result_list = com_function(copy.deepcopy(g1), copy.deepcopy(g2))
util.save_file(result_dir + key + "//" + nw_list[ii + lap][0:-4] + ".txt", result_list)
elif mode == 2:
r1 = com_function(copy.deepcopy(g1), copy.deepcopy(g2), d1)
# result_list.append(str(r1))
result_list.append(nw_list[ii + lap][0:-4] + "\t" + str(r1))
ii -= lap
if mode != 0:
result_list.reverse()
util.save_file(result_dir+key+".txt", result_list)
key_list = ["美好","无聊"]
pkl_dir = r"D:\semantic analysis\新结果\去重去虚词去单字词频数\{0}//"
result_dir = r"D:\semantic analysis\新结果\去虚词去单字共现网络最大频率全图节点保留比例//"
loop_compare(calculate_existed_ratio, key_list, pkl_dir, result_dir, 2, 1)
# loop_compare(same_node_degree, key_list, pkl_dir, result_dir, 0)
| [
"hongliryan@gmail.com"
] | hongliryan@gmail.com |
de0d500cff740ded2b8b65ed5b9fa0c2b6958890 | fa0c53ac2a91409eaf0fc7c082a40caae3ffa0d8 | /com/lc/demoLearnPython/python_coroutine.py | 440f38100a78c152f46f97d2175aa78035aae963 | [] | no_license | ahviplc/pythonLCDemo | aba6d8deb1e766841461bd772560d1d50450057b | 22f149600dcfd4d769e9f74f1f12e3c3564e88c2 | refs/heads/master | 2023-07-24T01:41:59.791913 | 2023-07-07T02:32:45 | 2023-07-07T02:32:45 | 135,969,516 | 7 | 2 | null | 2023-02-02T03:24:14 | 2018-06-04T04:12:49 | Python | UTF-8 | Python | false | false | 3,443 | py | # _*_ coding: utf-8 _*_
"""
python_coroutine.py
Version: 0.1
Author: LC
DateTime: 2018年11月28日11:10:11
一加壹博客最Top-一起共创1+1>2的力量!~LC
LC博客url: http://oneplusone.top/index.html
http://oneplusone.vip/index.html
"""
import asyncio
import aiohttp
import threading
# 生产者、消费者例子
def consumer(): # 定义消费者,由于有yeild关键词,此消费者为一个生成器
print("[Consumer] Init Consumer ......")
r = "init ok" # 初始化返回结果,并在启动消费者时,返回给生产者
while True:
n = yield r # 消费者通过yield关键词接收生产者产生的消息,同时返回结果给生产者
print("[Consumer] conusme n = %s, r = %s" % (n, r))
r = "consume %s OK" % n # 消费者消费结果,下个循环返回给生产者
def produce(c): # 定义生产者,此时的 c 为一个生成器
print("[Producer] Init Producer ......")
r = c.send(None) # 启动消费者生成器,同时第一次接收返回结果
print("[Producer] Start Consumer, return %s" % r)
n = 0
while n < 5:
n += 1
print("[Producer] While, Producing %s ......" % n)
r = c.send(n) # 向消费者发送消息,同时准备接收结果。此时会切换到消费者执行
print("[Producer] Consumer return: %s" % r)
c.close() # 关闭消费者生成器
print("[Producer] Close Producer ......")
# produce(consumer())
# 异步IO例子:适配Python3.4,使用asyncio库
@asyncio.coroutine
def hello(index): # 通过装饰器asyncio.coroutine定义协程
print('Hello world! index=%s, thread=%s' % (index, threading.currentThread()))
yield from asyncio.sleep(1) # 模拟IO任务
print('Hello again! index=%s, thread=%s' % (index, threading.currentThread()))@asyncio.coroutine
loop = asyncio.get_event_loop() # 得到一个事件循环模型
tasks = [hello(1), hello(2)] # 初始化任务列表
loop.run_until_complete(asyncio.wait(tasks)) # 执行任务
loop.close() # 关闭事件循环列表
# 异步IO例子:适配Python3.5,使用async和await关键字
async def hello1(index): # 通过关键字async定义协程
print('Hello world! index=%s, thread=%s' % (index, threading.currentThread()))
await asyncio.sleep(1) # 模拟IO任务
print('Hello again! index=%s, thread=%s' % (index, threading.currentThread()))
loop = asyncio.get_event_loop() # 得到一个事件循环模型
tasks = [hello1(1), hello1(2)] # 初始化任务列表
loop.run_until_complete(asyncio.wait(tasks)) # 执行任务
loop.close() # 关闭事件循环列表
# aiohttp 实例
async def get(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
print(url, resp.status)
print(url, await resp.text())
loop = asyncio.get_event_loop() # 得到一个事件循环模型
tasks = [ # 初始化任务列表
get("http://zhushou.360.cn/detail/index/soft_id/3283370"),
get("http://zhushou.360.cn/detail/index/soft_id/3264775"),
get("http://zhushou.360.cn/detail/index/soft_id/705490")
]
loop.run_until_complete(asyncio.wait(tasks)) # 执行任务
loop.close() # 关闭事件循环列表
| [
"ahlc@sina.cn"
] | ahlc@sina.cn |
c4ba986be18b469faf2ba1f5a3a4d5d81c6b8e66 | 788e275792f21d8b62334cddd718e6dfa347a7e2 | /citizensnb/pipelines.py | 1612d1a3090ba3dbbdeb99e6266a69d83dccd121 | [] | no_license | daniel-kanchev/citizensnb | 7d99e4e6d976cc173c4ec2fa9c9ec31bdf89a750 | 53fc15778834a3d415fa0eaf41ceed4f9a85243e | refs/heads/main | 2023-04-01T22:33:09.936186 | 2021-04-08T11:56:58 | 2021-04-08T11:56:58 | 355,888,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | from itemadapter import ItemAdapter
import sqlite3
class DatabasePipeline:
# Database setup
conn = sqlite3.connect('citizensnb.db')
c = conn.cursor()
def open_spider(self, spider):
self.c.execute(""" DROP TABLE IF EXISTS articles """)
self.c.execute(""" CREATE TABLE articles (
title text,
date text,
link text,
content text
) """)
def process_item(self, item, spider):
# Insert values
self.c.execute("INSERT INTO articles ("
"title, "
"date, "
"link, "
"content)"
" VALUES (?,?,?,?)",
(item.get('title'),
item.get('date'),
item.get('link'),
item.get('content')
))
if 'link' in item.keys():
print(f"New Article: {item['link']}")
else:
print(f"New Article: {item['title']}")
self.conn.commit() # commit after every entry
return item
def close_spider(self, spider):
self.conn.commit()
self.conn.close() | [
"daniel.kanchev@adata.pro"
] | daniel.kanchev@adata.pro |
f7e7997bb9af7f47fab1ac86f96bfc6ad43b2dbd | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/list_sub_customer_coupons_response.py | cf9c5ebc5ddb0a9eee98ed289b96806ff56b8f60 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,441 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListSubCustomerCouponsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'user_coupons': 'list[IQueryUserCouponsResultV2]'
}
attribute_map = {
'count': 'count',
'user_coupons': 'user_coupons'
}
def __init__(self, count=None, user_coupons=None):
"""ListSubCustomerCouponsResponse - a model defined in huaweicloud sdk"""
super(ListSubCustomerCouponsResponse, self).__init__()
self._count = None
self._user_coupons = None
self.discriminator = None
if count is not None:
self.count = count
if user_coupons is not None:
self.user_coupons = user_coupons
@property
def count(self):
"""Gets the count of this ListSubCustomerCouponsResponse.
|参数名称:个数| |参数的约束及描述:个数|
:return: The count of this ListSubCustomerCouponsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListSubCustomerCouponsResponse.
|参数名称:个数| |参数的约束及描述:个数|
:param count: The count of this ListSubCustomerCouponsResponse.
:type: int
"""
self._count = count
@property
def user_coupons(self):
"""Gets the user_coupons of this ListSubCustomerCouponsResponse.
|参数名称:优惠券记录。具体请参见表 IQueryUserCouponsResult。| |参数约束以及描述:优惠券记录。具体请参见表 IQueryUserCouponsResult。|
:return: The user_coupons of this ListSubCustomerCouponsResponse.
:rtype: list[IQueryUserCouponsResultV2]
"""
return self._user_coupons
@user_coupons.setter
def user_coupons(self, user_coupons):
"""Sets the user_coupons of this ListSubCustomerCouponsResponse.
|参数名称:优惠券记录。具体请参见表 IQueryUserCouponsResult。| |参数约束以及描述:优惠券记录。具体请参见表 IQueryUserCouponsResult。|
:param user_coupons: The user_coupons of this ListSubCustomerCouponsResponse.
:type: list[IQueryUserCouponsResultV2]
"""
self._user_coupons = user_coupons
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSubCustomerCouponsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
1dce5d2fab9f4c3846f11d3dcfe047c8042d3654 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R1/benchmark/startCirq145.py | 1a54dc465638320a4a50b1bdc04f6cd82d0003f3 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=12
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.Z.on(input_qubit[3])) # number=7
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq145.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
348467fbb33b3dd2a17a50137f42632fbc2167e2 | 9a8edc31e2898b4abe3d5be57459d15237ae90b6 | /Groupe3_BD_Serveur-master/g8/g3_tf_idf_week_v2.1.py | 259b81b7b5b50c735c05d887cb81f14de1eba16b | [] | no_license | sid-ut3/watchnews | c6245c01d6d4ff840113bd39826b58da2efbef8f | 3a2e1b56acefe26f4e7d99910d002e27699da302 | refs/heads/master | 2020-04-01T17:21:47.596354 | 2018-10-17T10:03:37 | 2018-10-17T10:03:37 | 153,425,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,410 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 08:57:58 2018
@author: laura
"""
"""
g1
@Author : F.C
g3
@Author : L.B. M.I. A.H. C.G.
g8
@Author : F.R.
"""
import MySQLdb
from datetime import datetime, timedelta
import timestring
from flask import Flask, request, jsonify
import json
import requests
from flask_restful import Resource, Api
import mysql.connector
from flask_mysqldb import MySQL
app = Flask(__name__) # we are using this variable to use the flask
# microframework
api = Api(app)
# MySQL configurations
servername = "localhost"
username = "root"
passwordDB = ""
databasename = "bdd_test"
db = MySQLdb.connect(user = username, passwd = passwordDB,
host = servername, db = databasename)
@app.route("/link_by_source/", methods = ['GET', 'POST', 'PATCH', 'PUT', 'DELETE'])
def exec_query(query) :
"""
input : query
output : result of the query
this function execute the query from the data base
"""
cursor = db.cursor()
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
return result
def api_link_by_source():
"""
input : /
output : json data
this function returns a json data formated the way Stats wanted it
"""
week = {}
list_word = []
date_min = """SELECT MIN(date_publication) FROM mv_tf_idf_week"""
date_max = """SELECT MAX(date_publication) FROM mv_tf_idf_week"""
date_min_res = exec_query (date_min)
date_max_res = exec_query (date_max)
date_max_res = str(date_max_res[0][0])
date_min_res = str(date_min_res[0][0])
week["Period"] = date_min_res + " - " + date_max_res
id_words = """SELECT DISTINCT id_word FROM mv_tf_idf_week WHERE
date_publication BETWEEN %s and %s ORDER BY id_word
""" % ("'" + date_min_res + "'", "'" + date_max_res + "'" )
id_words_res = exec_query (id_words)
for i in range(0, len(id_words_res)):
list_word.append(id_words_res[i][0])
for word in range(0, len(list_word)):
week_words_tf_idf =[]
week_words_tf = []
list_week = []
for day in range(7):
day_query = datetime.strptime(date_min_res, "%Y-%m-%d") \
+ timedelta(days=day)
list_article = []
id_article = """SELECT id_article FROM mv_tf_idf_week WHERE
date_publication = %s AND id_word = %s ORDER BY id_article
""" % ("'" + str(day_query) + "'", list_word[word])
id_article_res = exec_query (id_article)
list_tf_idf = []
list_tf = []
for article in range(0, len(id_article_res)):
list_article.append(id_article_res[article][0])
q_tf_idf = """SELECT tf_idf FROM mv_tf_idf_week WHERE
id_word = %s AND id_article = %s AND date_publication = %s
""" % (list_word[word], list_article[article], "'"
+ str(day_query) + "'")
tf_idf_res = exec_query (q_tf_idf)
tf = []
tf_idf = []
for j in range(0, len(tf_idf_res)):
tf_idf.append(tf_idf_res[j][0])
list_tf_idf.append(tf_idf[0])
q_tf = """SELECT tf FROM mv_tf_idf_week WHERE
id_word = %s AND id_article = %s AND date_publication = %s
""" % (list_word[word], list_article[article], "'"
+ str(day_query) + "'")
tf_res = exec_query (q_tf)
for k in range(0, len(tf_res)):
tf.append(tf_res[k][0])
list_tf.append(tf[0])
week_words_tf_idf.append(list_tf_idf)
list_week.append(list_article)
week_words_tf.append(list_tf)
week[str(list_word[word])+"_tf_idf"] = week_words_tf_idf
week[str(list_word[word])+"_tf"] = week_words_tf
json = jsonify(week)
return json | [
"kps@anthracite.local"
] | kps@anthracite.local |
8e8d4a5bcd47f7570f4054c3f0e64a1f6642bbbf | 80c9df63673ffa88ed1ef54cece43e14734d1e0f | /run-client.py | 21039e4c700e389c1260dcd2f846aa44f0bf5673 | [] | no_license | Chovin/Troop | 03407e4ebc1012253bf1e6b1415e9db6254fc9c4 | 37ade96be9d5cc0d34b7b106755fe9d8f5ea8c2a | refs/heads/master | 2021-08-06T20:18:32.344172 | 2017-10-31T15:20:36 | 2017-10-31T15:20:36 | 109,102,534 | 0 | 0 | null | 2017-11-01T07:48:16 | 2017-11-01T07:48:16 | null | UTF-8 | Python | false | false | 1,917 | py | #!/usr/bin/env python
"""
Troop-Client
------------
Real-time collaborative Live Coding.
- Troop is a real-time collaborative tool that enables group live
coding within the same document. To run the client application it
must be able to connect to a running Troop Server instance on
your network.
- Using other Live Coding Languages:
Troop is designed to be used with FoxDot (http://foxdot.org) but
is also configured to work with Tidal Cycles (http://tidalcycles.org).
You can run this file with the `--mode` flag followed by "tidalcycles"
to use the Tidal Cycles language. You can also use any other application
that can accept code commands as strings via the stdin by specifying
the path of the interpreter application, such as ghci in the case of
Tidal Cycles, in place of the "tidalcycles" string when using the
`--mode` flag.
"""
from src.client import Client
from src.config import *
import os.path
import sys
if "--mode" in sys.argv:
name = sys.argv[ sys.argv.index("--mode") + 1 ]
lang = getInterpreter(name)
else:
lang = FOXDOT
if "-p" in sys.argv or "--public" in sys.argv:
host, port = PUBLIC_SERVER_ADDRESS
elif os.path.isfile('client.cfg'):
host, port = Client.read_configuration_file('client.cfg')
"""
You can set a configuration file if you are connecting to the same
server on repeated occasions. A password should not be stored. The
file (client.cfg) should look like:
host=<host_ip>
port=<port_no>
"""
else:
host = readin("Troop Server Address", default="localhost")
port = readin("Port Number", default="57890")
if "--log" in sys.argv or "-l" in sys.argv:
logging = True
else:
logging = False
name = readin("Enter a name").replace(" ", "_")
myClient = Client(host, port, name, lang, logging)
| [
"ryankirkbride26@gmail.com"
] | ryankirkbride26@gmail.com |
f9d2b343bf2502d1ead8b7f8b4962e7f3498deb9 | 8fd07ea363ba4263bafe25d213c72cc9a93e2b3e | /nsd2018-master/nsd1802/python/day03/position_args.py | 24fd09342ec789d08e99e8d2d19e2c6ac10e7456 | [] | no_license | ml758392/python_tedu | 82e12ae014f0fc81230386fab07f901510fc8837 | 9f20798604db0ac8cd7b69d8c7a52ee361ebc7a7 | refs/heads/master | 2020-04-12T08:30:42.354663 | 2019-03-29T11:55:30 | 2019-03-29T11:55:30 | 162,386,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | import sys
print(sys.argv) # sys.argv是sys模块里的argv列表
# python3 position_args.py
# python3 position_args.py 10
# python3 position_args.py 10 bob
| [
"yy.tedu.cn"
] | yy.tedu.cn |
868c8919f895151b196d2ca5d3243b356e5b1603 | 7a57aeafb5bcf30510649a7e9e32400ff7994815 | /virtual/bin/django-admin | 3478668038e80cdb831bf4811a9756da9fab1a02 | [] | no_license | EugeneZnm/FINPLANNER | 2b17e53a461742a4889362b509b29cf5a4484be0 | ef3dd88be02bed091dbadcc9fbc500bd1ff9740d | refs/heads/master | 2022-12-15T22:51:08.822562 | 2020-12-01T22:59:08 | 2020-12-01T22:59:08 | 155,441,523 | 1 | 0 | null | 2022-11-22T03:05:21 | 2018-10-30T19:06:14 | Python | UTF-8 | Python | false | false | 323 | #!/home/eugene/Documents/Moringa/CORE/PYTHON/FINPLANNER/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"eugenenzioki@gmail.com"
] | eugenenzioki@gmail.com | |
5dacc908061af45c9cdd386cefd44c6f23e9bea9 | 1bc2a635a93b5bc84606edf9ac2226851cac9e6d | /rolling-gui.py | 94053a32749bb8af8a2ce44486ab7997863fc25d | [
"MIT"
] | permissive | coolkat64/rolling | 819149cbb1e11a455b93a030477f9da91e2f93e4 | 4c3ee2401128e993a52ac9b52cdbd32e17728129 | refs/heads/master | 2022-11-29T00:35:14.058665 | 2020-07-31T20:37:15 | 2020-07-31T20:37:15 | 285,312,272 | 0 | 0 | MIT | 2020-08-05T14:25:48 | 2020-08-05T14:25:47 | null | UTF-8 | Python | false | false | 204 | py | # coding: utf-8
from sqlalchemy.sql import default_comparator
def main():
from rolling.gui.run import main as photographer_gui_run
photographer_gui_run()
if __name__ == "__main__":
main()
| [
"sevajol.bastien@gmail.com"
] | sevajol.bastien@gmail.com |
8cc25453f6a74c516925cd6336d6cfc5fb51e5c1 | 515a97129ce1b2b8eecca4b2087fde8985b82d5b | /Code-Scraps/old_modules/SpiceFeeds/Advertisements.py | beda188c44933632d7b4f61293836275184facc7 | [] | no_license | SpiceBot/scraps | 3ad6e81ac75e2b6a684fea64eb7e75477b0f4f63 | 90125e1397b57ac87cae5f3e506363aa04ddffdc | refs/heads/master | 2020-05-02T21:51:01.297114 | 2019-03-28T15:38:28 | 2019-03-28T15:38:28 | 178,232,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,619 | py | #!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import sopel.module
import sys
import os
from random import randint
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(shareddir)
from BotShared import *
# author jimender2
defaultoptions = [
"DoubleD recommends these new drapes https://goo.gl/BMTMde", "Spiceduck for spicerex mascot 2k18", "Deathbybandaid is looking for developers for spicebot and spicethings",
"Upgrade to premium to remove ads", "Selling panties cheap. Msg DoubleD for more info.", "On sale now: tears of an orphan child!", "One-way ticket to Hell just $199",
"Get a free xboner here", "Extra, Extra, read all about it! A giant Beaver is attacking Canadian people!", "Want to make fast money? Sell Drugs", "Syrup",
"I love Apple products .... In the trash", "Did you know that I am a female?", "Wanna be friends?", "New Features released every day", "I feel neglected. Use me more. Duel assault in me!"]
hardcoded_not_in_this_chan = ["#spiceworks", "##spicebottest", "#spicebottest"]
@sopel.module.commands('ads', 'advertisements', 'ad', 'advertisement', 'spam')
def mainfunction(bot, trigger):
"""Check to see if module is enabled."""
enablestatus, triggerargsarray, botcom, instigator = spicebot_prerun(bot, trigger, 'ads')
if not enablestatus:
# IF "&&" is in the full input, it is treated as multiple commands, and is split
commands_array = spicemanip(bot, triggerargsarray, "split_&&")
if commands_array == []:
commands_array = [[]]
for command_split_partial in commands_array:
triggerargsarray_part = spicemanip(bot, command_split_partial, 'create')
execute_main(bot, trigger, triggerargsarray_part, botcom, instigator)
def execute_main(bot, trigger, triggerargsarray, botcom, instigator):
"""Check to see if there are ads and retrieve one."""
databasekey = 'ads'
command = spicemanip(bot, triggerargsarray, 1) or 'get'
if not sayingscheck(bot, databasekey) and command != "add":
sayingsmodule(bot, databasekey, defaultoptions, 'initialise')
message = sayingsmodule(bot, databasekey, triggerargsarray, command)
osd(bot, trigger.sender, 'say', ["[Advertisement]", message, "[Advertisement]"])
@sopel.module.interval(60)
def advertisement(bot):
"""Get and share random advert at random intervals."""
now = time.time()
last_timesince = time_since(bot, bot.nick, "ads_last_time") or 0
next_timeout = get_database_value(bot, bot.nick, "ads_next_timeout") or 0
if last_timesince <= next_timeout:
return
# set time to now
set_database_value(bot, bot.nick, "ads_last_time", now)
# how long until next event
next_timeout = randint(1200, 7200)
set_database_value(bot, bot.nick, "ads_next_timeout", next_timeout)
message = sayingsmodule(bot, databasekey, defaultoptions, 'get') or "Spiceduck for Spiceworks mascot 2k18"
for channel in bot.channels:
if channel not in hardcoded_not_in_this_chan:
channelmodulesarray = get_database_value(bot, channel, 'modules_enabled') or []
if 'ads' in channelmodulesarray:
osd(bot, channel, 'say', ["[Advertisement]", message, "[Advertisement]"])
# compare timestamps
def time_since(bot, nick, databasekey):
"""Figure out when the last ad was."""
now = time.time()
last = get_database_value(bot, nick, databasekey)
return abs(now - int(last))
| [
"sam@deathbybandaid.net"
] | sam@deathbybandaid.net |
4d58aeda11c26fdd95e76fa20027c44e07eed599 | b13ca274b4463c9900840ee6516094b7509b6041 | /empower/apps/e2eqosmanager/algorithms/exponentialquantumadaptation.py | 1a72f07dda32cb9d4c2b1b07847109214c2d15c8 | [
"Apache-2.0"
] | permissive | imec-idlab/sdn_wifi_manager | 09d206f2f649aa715752d3c44e011d3f54faf592 | eda52649f855722fdec1d02e25a28c61a8fbda06 | refs/heads/master | 2021-06-23T08:03:22.482931 | 2020-12-03T11:30:10 | 2020-12-03T11:30:10 | 162,106,793 | 0 | 0 | Apache-2.0 | 2019-03-27T16:23:31 | 2018-12-17T09:33:47 | Python | UTF-8 | Python | false | false | 2,940 | py | #!/usr/bin/env python3
#
# Copyright (c) 2019 Pedro Heleno Isolani
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
KEEP_CONFIGURATION = 0
EXPLOITATION = 1
EXPLORATION = 2
""" Simple Exponential Quantum Adaptation using different adaptation rates for Exploitation and Exploration """
class ExponentialQuantumAdaptation:
def __init__(self,
exploration_rate=0.05, exploration_trigger=5,
exploitation_rate=0.20, exploitation_trigger=1,
min_quantum=200):
self.exploration_rate = exploration_rate # % to increase BE quantum
self.exploitation_rate = exploitation_rate # % to decrease BE quantum
self.exploration_trigger = exploration_trigger # int to represent when to increase BE quantum
self.exploitation_trigger = exploitation_trigger # int to represent when to decrease BE quantum
self.exploration_counter = 0 # int to trigger exploration
self.exploitation_counter = 0 # int to trigger exploitation
self.min_quantum = min_quantum
self.status = KEEP_CONFIGURATION
def exploit(self):
self.exploitation_counter += 1
if self.exploitation_counter >= self.exploitation_trigger:
self.status = EXPLOITATION
self.exploitation_counter = 0
def explore(self):
self.exploration_counter += 1
if self.exploration_counter >= self.exploration_trigger:
self.status = EXPLORATION
self.exploration_counter = 0
def get_new_quantum(self, old_quantum):
if self.status == EXPLORATION:
new_quantum = int(old_quantum + (old_quantum * self.exploration_rate))
elif self.status == EXPLOITATION:
new_quantum = int(old_quantum - (old_quantum * self.exploitation_rate))
if new_quantum < self.min_quantum:
new_quantum = self.min_quantum
else:
new_quantum = int(old_quantum)
self.status = KEEP_CONFIGURATION
return new_quantum
def __str__(self):
return "Exploitation rate: " + str(self.exploitation_rate) + \
" trigger: " + str(self.exploitation_trigger) + \
" counter: " + str(self.exploitation_counter) + \
"Exploration rate:" + str(self.exploration_rate) + \
" trigger: " + str(self.exploration_trigger) + \
" counter: " + str(self.exploration_counter)
| [
"pedroisolani@gmail.com"
] | pedroisolani@gmail.com |
5c462392b871c1e216a91cdc40e909732cc9f8cd | e238db1ae3e641d84af17e9cf6a881eb43b20039 | /a.py | 6f737a52fd3c1d8d2e2e590d51737e14a909dc53 | [] | no_license | RevathiRathi/Revat | 1aa478f51b147e7b044d7519f54938eda0149619 | 792757fb56243846e3049889bf502014c62d658e | refs/heads/master | 2020-04-15T05:01:36.873956 | 2019-07-18T13:40:34 | 2019-07-18T13:40:34 | 164,406,245 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | #revs
s=input()
t=[]
for i in s:
t.append(s.count(i))
for i in range(0,len(t)):
a=max(t)
if t[i]==a:
print(s[i])
break
| [
"noreply@github.com"
] | RevathiRathi.noreply@github.com |
f0b562001a2fdb11fa8dc1bab18cd321327500b1 | 4546398a18590e4e182629fb55d185547dd6df0a | /2023/problems/guillaume/data/random_generator.py | 0b2cd7f91cca91dc4c3ec1c4b0aedb4eb8066aa8 | [] | no_license | ForritunarkeppniFramhaldsskolanna/Keppnir | 352341fa97c6349af65b513c03171f3e706f7db2 | 65c8eb5358d8a49f956edf76c2d47b9372accc3c | refs/heads/master | 2023-04-28T15:33:36.396225 | 2023-04-23T15:00:15 | 2023-04-23T15:00:15 | 78,303,702 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | #!/usr/bin/python3
import sys
import random
random.seed(int(sys.argv[-1]))
min_n = eval(sys.argv[1])
max_n = eval(sys.argv[2])
min_non_draws = eval(sys.argv[3])
max_non_draws = eval(sys.argv[4])
tp = sys.argv[5]
n = random.randint(min_n, max_n)
min_non_draws = min((min_non_draws, n))
max_non_draws = min((max_non_draws, n))
def uniform_interleave(s, t):
i = 0
j = 0
res = []
while i+j < len(s)+len(t):
s_rem = len(s) - i
t_rem = len(t) - j
total_rem = s_rem + t_rem
if random.randint(1, total_rem) <= s_rem:
res.append(s[i])
i += 1
else:
res.append(t[j])
j += 1
return ''.join(res)
def get_uniform():
x = random.randint(min_non_draws, max_non_draws)
y = random.randint(0, x)
guillaume = "G"*y
arnar = "A"*(x-y)
draws = "D"*(n-x)
return uniform_interleave(draws, uniform_interleave(arnar, guillaume))
def get_streak():
x = random.randint(min_non_draws, max_non_draws)
y = random.randint(0, x)
valid_games = ["G" if i < y else "A" for i in range(x)]
draws = "D"*(n - x)
return uniform_interleave(valid_games, draws)
d = []
if tp == "uniform":
d = get_uniform()
elif tp == "streak":
d = get_streak()
else:
d = ''.join([tp[i%len(tp)] for i in range(n)])
print(len(d))
print(d)
| [
"bjarki.agust@gmail.com"
] | bjarki.agust@gmail.com |
214b28f3d904980ec408c9cf9c6bc8e5727d741a | 8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6 | /venv/Lib/site-packages/mypy/typeshed/stdlib/3/_importlib_modulespec.pyi | a3cf0197992e4fc82dffb75581e346cf586ef642 | [] | no_license | RodrigoNeto/cursopythonyt | fc064a2e6106324e22a23c54bdb9c31040ac9eb6 | 279dad531e21a9c7121b73d84fcbdd714f435e7e | refs/heads/master | 2023-07-03T00:54:09.795054 | 2021-08-13T12:42:24 | 2021-08-13T12:42:24 | 395,646,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | pyi | # ModuleSpec, ModuleType, Loader are part of a dependency cycle.
# They are officially defined/exported in other places:
#
# - ModuleType in types
# - Loader in importlib.abc
# - ModuleSpec in importlib.machinery (3.4 and later only)
#
# _Loader is the PEP-451-defined interface for a loader type/object.
from abc import ABCMeta
from typing import Any, Dict, List, Optional, Protocol
class _Loader(Protocol):
def load_module(self, fullname: str) -> ModuleType: ...
class ModuleSpec:
def __init__(
self,
name: str,
loader: Optional[Loader],
*,
origin: Optional[str] = ...,
loader_state: Any = ...,
is_package: Optional[bool] = ...,
) -> None: ...
name: str
loader: Optional[_Loader]
origin: Optional[str]
submodule_search_locations: Optional[List[str]]
loader_state: Any
cached: Optional[str]
parent: Optional[str]
has_location: bool
class ModuleType:
__name__: str
__file__: str
__dict__: Dict[str, Any]
__loader__: Optional[_Loader]
__package__: Optional[str]
__spec__: Optional[ModuleSpec]
def __init__(self, name: str, doc: Optional[str] = ...) -> None: ...
class Loader(metaclass=ABCMeta):
def load_module(self, fullname: str) -> ModuleType: ...
def module_repr(self, module: ModuleType) -> str: ...
def create_module(self, spec: ModuleSpec) -> Optional[ModuleType]: ...
# Not defined on the actual class for backwards-compatibility reasons,
# but expected in new code.
def exec_module(self, module: ModuleType) -> None: ...
| [
"rodrigoneto.forseti@gmail.com"
] | rodrigoneto.forseti@gmail.com |
612589466e47db3874b810aa5c365e41273ef98f | 42efe06c233479b1882cc6e0e418c9ef6e0a1434 | /CodingTest_Study1/week06/ex1149.py | 8f2567cd9a2925229f49bd1b41e1bb474b224f59 | [] | no_license | KimTaesong/Algorithm | 146d53cb24b12355330b212edb87ec9d22123359 | 4c32275dd21fa692258e5348a02ce0c1c0b4ec91 | refs/heads/master | 2023-06-23T16:46:52.495443 | 2021-07-21T00:30:49 | 2021-07-21T00:30:49 | 194,765,200 | 8 | 2 | null | 2021-07-21T00:30:49 | 2019-07-02T01:20:10 | Python | UTF-8 | Python | false | false | 511 | py | n = int(input())
painting_cost = [[0, 0, 0]]
dp_cost = [[0] * 3 for _ in range(n+1)]
for i in range(n):
painting_cost.append(list(map(int, input().split())))
for i in range(3):
dp_cost[1][i] = painting_cost[1][i]
for i in range(2, n+1):
dp_cost[i][0] = painting_cost[i][0] + min(dp_cost[i-1][1], dp_cost[i-1][2])
dp_cost[i][1] = painting_cost[i][1] + min(dp_cost[i-1][0], dp_cost[i-1][2])
dp_cost[i][2] = painting_cost[i][2] + min(dp_cost[i-1][0], dp_cost[i-1][1])
print(min(dp_cost[n]))
| [
"taesongweb@gmail.com"
] | taesongweb@gmail.com |
85c0b27292ad066cf2bf4e1ce48054ec9373c3aa | 236d1d029767e4aa342df6d9dc425cb5a5247707 | /wheelwhere_server/search/views.py | 86d4b42815c8bff955f457ee768c888676655473 | [] | no_license | KilJaeeun/wheelwhereserver | 2f3ed05cbea4d34bde7af52acebbff01645be052 | 9428876a12ea2a8480f2811de793132555282db9 | refs/heads/master | 2022-12-08T02:51:53.357391 | 2020-08-27T05:59:41 | 2020-08-27T05:59:41 | 285,256,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | from django.shortcuts import render
from django.db.models import Q
# Create your views here.
from facility.models import post
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.views import APIView
class SearchView(APIView):
permission_classes = [permissions.AllowAny]
def get(self, request):
word = request.GET['word']
post_list = post.objects.filter(
Q(name__icontains=word)|Q(address__icontains=word)|Q(description__icontains=word)
).distinct() #중복을 제거한다.
context = []
for i in post_list:
param={}
param['id']=i.id
param['name']=i.name
param['is_toilet']=i.is_toilet
param['is_elibator']=i.is_elibator
param['is_parking']=i.is_parking
param['is_tuck']=i.is_tuck
param['is_tuck']=i.is_tuck
param['is_helper']=i.is_helper
param['description']=i.description
param['latitude']=i.latitude
param['longitude']=i.longitude
param['star']=i.star
context.append(param)
return Response({'msg': 'success', 'object_list': context}) | [
"rha3122@naver.com"
] | rha3122@naver.com |
1ca24d79f42173ecdb2eae95b6d1da39aedafcb7 | a72724b201b24f287555a695840d662b13a8dee5 | /tv/models.py | 1bb02afc9510c8b7f12d828a6732c17baa19cf0d | [] | no_license | webmalc/fh-django | f2bdfe63e2322abaf3523aff5d259495d3b1e1d9 | 0d11ba23c8def6dd1b03d8362730f8665915bd34 | refs/heads/master | 2020-05-14T02:38:37.757625 | 2016-01-29T14:45:28 | 2016-01-29T14:45:28 | 37,921,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | from django.db import models
from fh.models import CommonInfo
from django.core.urlresolvers import reverse
class Channel(CommonInfo):
""" Channel model """
CATEGORIES = {
'Kids': 'fa fa-child',
'Educational': 'fa fa-flask',
'Public': 'fa fa-group',
'Music': 'fa fa-music',
'Sport': 'fa fa-futbol-o',
'Regional': 'fa fa-globe',
'Entertainment': 'fa fa-star',
'Man': 'fa fa-male',
'Woman': 'fa fa-female',
'Films': 'fa fa-film',
'News': 'fa fa-newspaper-o',
'Religion': 'fa fa-bolt',
'Other': 'fa fa-tv'
}
title = models.CharField(max_length=255)
category = models.CharField(max_length=255, choices=[(i, i) for i in sorted(CATEGORIES.keys())])
is_enabled = models.BooleanField(default=True, verbose_name='Is enabled?')
code = models.TextField()
alternative_code = models.TextField(null=True, blank=True)
is_favorite = models.BooleanField(default=False, verbose_name='Is favorite?')
def get_absolute_url(self):
return reverse('tv:channel_show', kwargs={'pk': self.pk})
def __str__(self):
return '%s' % self.title
class Meta:
ordering = ['category', 'title']
| [
"webmalc@gmail.com"
] | webmalc@gmail.com |
fcd45042b397c3fd5d3c2009a394807a3bdf8735 | 1b5f653955779f45e78ca6dda925518779d09e8f | /submissions/1029.py | 509d2f71c2c5d9cfde862b3fbe5d8ef3da0849b1 | [] | no_license | LeonardoSaid/uri-py-solutions | ad285f552934ead54ad2410e23113e84b0724f72 | 43c10c0e99e99d22b4b5ae2871e5d897f8823b42 | refs/heads/master | 2020-08-11T00:28:48.661578 | 2020-04-23T20:21:39 | 2020-04-23T20:21:39 | 214,453,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | #fast fib using memorization (https://stackoverflow.com/questions/18172257/efficient-calculation-of-fibonacci-series)
def fib(n, computed = {0: 0, 1: 1}):
if n not in computed:
computed[n] = fib(n-1, computed) + fib(n-2, computed)
return computed[n]
def fib_numcalls(n):
return 2*fib(n+1)-2
for i in range(int(input())):
x = int(input())
print('fib(%d) = %d calls = %d' % (x, fib_numcalls(x), fib(x)))
| [
"noreply@github.com"
] | LeonardoSaid.noreply@github.com |
da897abaf6951a947d5ed46d46c2df569d6e8f84 | b21e073975c0f7a4f94c9f3523b8f5dcbf98a521 | /pt/105/python/main.py | 392d9a0de4cfd4ca9168a1beb26056c80b83cde8 | [
"MIT"
] | permissive | franciscogomes2020/exercises | 3ed6877f945463ed01c7fcd55271689171b0ad9d | 8b33c4b9349a9331e4002a8225adc2a482c70024 | refs/heads/master | 2023-07-04T15:54:38.919185 | 2021-08-19T20:03:54 | 2021-08-19T20:03:54 | 396,992,428 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | # Faça um programa que tenha uma função notas() que pode receber várias notas de alunos e vai retornar um dicionário com as seguintes informações:
- Quantidade de notas
- A maior nota
- A menor nota
- A média da turma
- A situação (opcional)
Adicione também as docstrings dessa função para consulta pelo desenvolvedor.
| [
"71292537+franciscogomes2020@users.noreply.github.com"
] | 71292537+franciscogomes2020@users.noreply.github.com |
5e126dbaf3ae10daba0e35b740c00f217bfdbb10 | d19cbf8a0483c17a9d9779535b99bd340c4c5712 | /application/modules/transfert/api_trans.py | 5c8549d57aed5f0a1f80b5b53d9fbad165cc2e66 | [] | no_license | wilrona/CarryUp | 1273eb7f0432f1cc33410853f4ebf3940c705b1c | 3d4f65f45bf7859d10f2938559447559f3a5fa0a | refs/heads/master | 2022-12-10T15:16:22.096920 | 2020-08-31T13:30:29 | 2020-08-31T13:30:29 | 291,722,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,050 | py | __author__ = 'User'
from ...modules import *
from ..bcmd.models_bcmd import Documents, LigneDoc
from ..article.models_item import Articles, Variantes
from ..magasin.models_mag import Magasins
from ..compte.models_compte import Comptes
prefix = Blueprint('api_transfert', __name__)
def make_public(data):
new_task = {}
for field in data:
if field == 'id':
new_task['id'] = str(data['id'])
new_task['uri_view'] = url_for('transfert.view', data_id=data['id'], _external=True)
else:
new_task[field] = data[field]
if field == 'etat':
new_task['etat_name'] = 'En attente'
if new_task['etat'] == 2:
new_task['etat_name'] = 'Reception partielle'
if new_task['etat'] == 3:
new_task['etat_name'] = 'Terminee'
if new_task['etat'] == 4:
new_task['etat_name'] = 'Annulation'
return new_task
def make_public_variante(data):
new_task = {}
for field in data:
if field == 'id':
new_task['id'] = str(data['id'])
else:
new_task[field] = data[field]
if field == 'name':
new_task['name_variante'] = data['article_id'].name
if len(data['article_id'].variantes) > 1 :
new_task['name_variante'] += ' ('+new_task['name']+')'
return new_task
@prefix.route('/<objectid:compte_id>', methods=['GET'])
def index(compte_id):
sort = request.args.get('sort')
order = request.args.get('order')
q = str(request.args.get('q'))
try:
page = int(request.args.get('page', 1))
except ValueError:
page = 1
offset = 0
limit = 10
if request.args.get('per'):
limit = int(request.args.get('per'))
if page > 1:
offset = ((page - 1) * limit)
if q is not None:
datas = Documents.objects(Q(compte=compte_id) & Q(reference__icontains=q) & Q(type_transaction=1))
else:
datas = Documents.objects(Q(compte=compte_id) & Q(type_transaction=1))
datas = datas.skip(offset).limit(limit)
order_by = ''
if order == 'desc':
order_by += '-'
if sort is not None:
order_by += sort
datas.order_by(order_by)
count = Documents.objects(Q(compte=compte_id) & Q(type_transaction=1)).count() / limit
return jsonify({'data' : [make_public(data) for data in datas], 'total_page': count, 'order': order, 'sort': sort })
@prefix.route('/check/achat/', methods=['POST'])
def check():
article_id = request.json['id']
quantite = request.json['quantite']
magasin_id = request.args.get('magasin_origine')
info = {}
if article_id :
variante = Variantes.objects.get(id=article_id)
info['name'] = variante.article_id.name
if len(variante.article_id.variantes) > 1 :
info['name'] += ' ('+variante.name+')'
info['id'] = article_id
info['magasin'] = variante.MagVarianteID()
info['stock'] = variante.stock_magasin(magasin_id)
info['quantite'] = int(quantite)
return jsonify(info)
@prefix.route('/all/achat/<objectid:compte_id>/', methods=['GET'])
def allArticle(compte_id):
datas = []
magasin_origine_id = request.args.get('magasin_origine')
magasin_destina_id = request.args.get('magasin_destina')
compte = Comptes.objects.get(id=compte_id)
magasin_origine = None
if magasin_origine_id:
magasin_origine = Magasins.objects.get(id=magasin_origine_id)
magasin_destina = None
if magasin_destina_id:
magasin_destina = Magasins.objects.get(id=magasin_destina_id)
if magasin_destina and magasin_origine:
articles = Articles.objects(Q(compte=compte) & Q(type_article=0))
for article in articles:
for variante in article.variantes:
if magasin_destina in variante.MagVariante() and magasin_origine in variante.MagVariante():
datas.append(variante)
return jsonify({'data': [make_public_variante(data) for data in datas]})
@prefix.route('/ligne/<objectid:compte_id>/<objectid:item_id>', methods=['GET'])
@prefix.route('/ligne/<objectid:compte_id>/', methods=['GET'])
def ligne(compte_id, item_id=None):
data = []
currentSelect = []
if item_id :
docs = Documents.objects.get(id=item_id)
for item in docs.ligne_data:
magasin_id = request.args.get('magasin_origine')
matiere = {}
matiere['id'] = str(item.variante_id.id)
matiere['name'] = item.article_id.name
if len(item.article_id.variantes) > 1 :
matiere['name'] += ' ('+item.variante_id.name+')'
matiere['quantite'] = item.quantite
matiere['magasin'] = item.variante_id.MagVarianteID()
matiere['stock'] = item.old_stock
data.append(matiere)
currentSelect.append(str(item.variante_id.id))
return jsonify({'data': data, 'currentSelect': currentSelect})
| [
"wilrona@gmail.com"
] | wilrona@gmail.com |
b5df12202c5be1e05f8601f51ec94e42fcf45652 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_194/ch29_2019_03_11_18_45_05_167488.py | 4861c2cff3d2bee950f8c15d4e416cd85e3bd815 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | def calcula_aumento(x):
if x > 1250:
y == 1.10*x - x
elif x <= 1250 and x > 0:
y == 1.15*x - x
return y | [
"you@example.com"
] | you@example.com |
902c76802cddf915b918bec8411079d44a6b97fe | 23b5337bf410415b7b150e3ad60cafc1578a0441 | /05-Databases/03-Databases-in-Views/forms.py | 46d0f70c26385f98a9d58e484797ab8fb4d5fd4c | [] | no_license | VerdantFox/flask_course | b8de13ad312c14229f0c3bc2af70e8609a3b00fb | 47b167b54bc580734fa69fc1a2d7e724adfb9610 | refs/heads/master | 2021-09-10T05:01:47.385859 | 2020-02-24T21:07:05 | 2020-02-24T21:07:05 | 241,973,705 | 0 | 0 | null | 2021-09-08T01:40:59 | 2020-02-20T19:40:42 | Python | UTF-8 | Python | false | false | 324 | py | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField
class AddForm(FlaskForm):
name = StringField("Name of Puppy:")
submit = SubmitField("Add Puppy")
class DelForm(FlaskForm):
id = IntegerField("Id Number of Puppy to Remove:")
submit = SubmitField("Remove Puppy")
| [
"verdantfoxx@gmail.com"
] | verdantfoxx@gmail.com |
6dc8f8821937127ecb968d5c8e6366ad6ad177f2 | cd23b0457bc02a60b89f1f52783e56cc36d85b5e | /mutl_process/thread_context.py | 0943d30e34be0cd4bf6b3790559039473f0fc2f9 | [] | no_license | cluo/learingPython | 65c7068613e1a2ae0178e23770503043d9278c45 | 54609288e489047d4dd1dead5ac142f490905f0e | refs/heads/master | 2020-04-01T13:04:15.981758 | 2015-02-23T13:21:31 | 2015-02-23T13:21:31 | 28,440,969 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | import threading
import logging
logging.basicConfig(
level=logging.DEBUG,
format = '(%(threadName)-10s) %(message)s'
)
def worker_with(lock):
with lock:
logging.debug('Lock acquired via with')
def worker_no_with(lock):
lock.acquire()
try:
logging.debug('Lock acquired directly')
finally:
lock.release()
lock = threading.Lock()
w = threading.Thread(target=worker_with, args=(lock,))
nw = threading.Thread(target=worker_no_with, args=(lock,))
w.start()
nw.start() | [
"luosheng@meizu.com"
] | luosheng@meizu.com |
4bf2511aa7bcff09b14328f19d375bbb949a767e | 3235145c84c48535bbf27dabfb3faa7359ed6fef | /google-cloud-sdk/lib/surface/ml_engine/models/create.py | 31cf5ef3f4d2f8ba6cae149bbb0830eb0e2c4ab8 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | paceuniversity/CS3892017team1 | b69fb10f5194f09748cd5bca48901e9bd87a55dc | f8e82537c84cac148f577794d2299ea671b26bc2 | refs/heads/master | 2021-01-17T04:34:04.158071 | 2017-05-09T04:10:22 | 2017-05-09T04:10:22 | 82,976,622 | 2 | 8 | null | 2020-07-25T09:45:47 | 2017-02-23T22:13:04 | Python | UTF-8 | Python | false | false | 2,203 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ml-engine models create command."""
from googlecloudsdk.api_lib.ml import models
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml import flags
from googlecloudsdk.command_lib.ml import models_util
def _AddCreateArgs(parser):
"""Get arguments for the `ml-engine models create` command."""
flags.GetModelName().AddToParser(parser)
parser.add_argument(
'--regions',
metavar='REGION',
type=arg_parsers.ArgList(min_length=1),
help="""\
The Google Cloud region where the model will be deployed (currently only a
single region is supported).
Will soon be required, but defaults to 'us-central1' for now.
""")
parser.add_argument(
'--enable-logging',
action='store_true',
help=('If set, enables StackDriver Logging for online prediction.'))
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class CreateBeta(base.CreateCommand):
"""Create a new Cloud ML Engine model."""
@staticmethod
def Args(parser):
_AddCreateArgs(parser)
def Run(self, args):
models_util.Create(models.ModelsClient('v1beta1'), args.model,
regions=args.regions, enable_logging=args.enable_logging)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class CreateGa(base.CreateCommand):
"""Create a new Cloud ML Engine model."""
@staticmethod
def Args(parser):
_AddCreateArgs(parser)
def Run(self, args):
models_util.Create(models.ModelsClient('v1'), args.model,
regions=args.regions, enable_logging=args.enable_logging)
| [
"hanastanojkovic@gmail.com"
] | hanastanojkovic@gmail.com |
4dbadf4946762250b2114648b1717804055f5d63 | 032021f2604815f8a71bbfb55af41d7fc8cfab9c | /crawlib/tests/dummy_site_crawler/sql_backend/db.py | e5c00da079bfb355ff6967330726daba635f7215 | [
"MIT"
] | permissive | MacHu-GWU/crawlib-project | ddf5ad8f9971c87b51c618860be967e11f8f9700 | b2963b7f6a36ee7f1ef95a6bf9d8cb746d9da991 | refs/heads/master | 2020-04-18T03:11:12.219272 | 2019-12-31T03:34:10 | 2019-12-31T03:34:10 | 66,882,484 | 1 | 1 | MIT | 2019-12-31T03:34:11 | 2016-08-29T21:35:35 | Python | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy_mate import EngineCreator
from .config_init import config
engine = EngineCreator(
host=config.DB_HOST.get_value(),
port=config.DB_PORT.get_value(),
database=config.DB_DATABASE.get_value(),
username=config.DB_USERNAME.get_value(),
password=config.DB_PASSWORD.get_value(),
).create_postgresql_psycopg2()
Session = sessionmaker(bind=engine)
| [
"husanhe@gmail.com"
] | husanhe@gmail.com |
bb954328311c88612b3a3eab48ae0a1f1f911e36 | e0fa466605d4031260fb2401244ad2de2a07c393 | /ch16/tsUclnt.py | 1dd0749fd68163e671e4ef314af51b6ecde08014 | [] | no_license | saturnisbig/corepynotes | aef2f9ed8d387d51ccb64bb264891be6c8f909d7 | c6d883b57e5ffc485f997e87178243a8b71441c2 | refs/heads/master | 2020-04-29T10:36:15.608223 | 2019-03-17T08:19:39 | 2019-03-17T08:19:39 | 176,066,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
from socket import *
REMOTE_HOST = '66.112.209.81'
PORT = 21566
BUFSIZE = 1024
ADDR = (REMOTE_HOST, PORT)
udpCliSock = socket(AF_INET, SOCK_DGRAM)
while True:
data = raw_input('> ')
if not data:
break
udpCliSock.sendto(data, ADDR)
data, addr = udpCliSock.recvfrom(BUFSIZE)
if not data:
break
print data
udpCliSock.close()
| [
"i.kenting@gmail.com"
] | i.kenting@gmail.com |
faa137f479e796189e99c7f83a8f08674f538903 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/51/usersdata/75/20050/submittedfiles/listas.py | 7cec7cbdbc35e7ae3247f73e35ff47a5122308f5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # -*- coding: utf-8 -*-
from __future__ import division
def maior_grau (lista):
if i in range (0,len(lista)-1,1):
diferenca=lista[i]-lista[i+1]
if diferenca<0:
diferenca=diferenca*(-1)
if diferenca>diferenca:
return diferenca
n=int(input('Digite a quantidade de termos da lista:'))
while n<=2:
n=int(input('Digite a quantidade de termos da lista:'))
a=[]
for i in range (0,n,1):
a.append(input('Digite os elementos da lista:'))
print maior_grau(a)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
003c2447926534f7845b1498c135b13d428a1509 | 481a65ea079ca021e06f0fd4efa707ec91fa0131 | /setup.py | 02b0c9463f47bbe61561d30c4e464ba0ec6700cc | [] | no_license | maejie/scholarNetwork | 3c51576c7bfc2623b6ad4ad83f6add3dc9546c9c | 11ccd802f9825cfbcfad70b864ac28f2634df593 | refs/heads/master | 2020-02-26T13:32:28.348775 | 2015-05-14T03:43:11 | 2015-05-14T03:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='scholarNetwork',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0.2',
description='Coauthor-Network of your Google Scholar',
long_description=long_description,
# The project's main homepage.
url='https://github.com/chengjun/scholarNetwork',
# Author details
author='Cheng-Jun Wang & Lingfei Wu',
author_email='wangchj04@gmail.com; wwlf850927@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='Google Scholar',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['beautifulsoup4', 'networkx', 'matplotlib'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': ['check-manifest'],
'test': ['coverage'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
)
| [
"wangchj04@gmail.com"
] | wangchj04@gmail.com |
56f64bbca3a06b504a457f609ba0d51093b8b8fb | 8ed4bf9fbead471c9e5f88e4d18ac432ec3d628b | /hackerrank/algorithm/implementation/kangaroo.py | 1352c7d1ad39190658665eb1a53de08a7bdc6bfe | [] | no_license | hizbul25/programming_problem | 9bf26e49ed5bb8c9c829d00e765c9401222fb35c | 2acca363704b993ffe5f6c2b00f81a4f4eca7204 | refs/heads/master | 2021-01-10T22:28:26.105787 | 2018-01-21T16:45:45 | 2018-01-21T16:45:45 | 65,394,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import sys
x1, v1, x2, v2 = input().strip().split(' ')
x1, v1, x2, v2 = [int(x1), int(v1), int(x2), int(v2)]
if v1 > v2 and ((x2 - x1) % (v2 - v1) == 0):
print("YES")
else:
print("NO")
| [
"hizbul.ku@gmail.com"
] | hizbul.ku@gmail.com |
232e12f3a83cc3ace60e4e2722e476b207515369 | 60e4baae4d6b323b3d3b656df3a7b0ea3ca40ef2 | /project/apps/community/forms.py | eaf8d51edeca2b91c5abcef583e9295120a73913 | [] | no_license | Burzhun/Big-django-project | a03a61a15ee75f49324ad7ea51372b6b013d1650 | 1a71f974b7b5399a45862711b5f858c0d4af50d2 | refs/heads/master | 2020-04-11T00:16:06.211039 | 2018-12-11T19:13:38 | 2018-12-11T19:13:38 | 161,381,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,514 | py | from django.contrib.auth import password_validation
from django.conf import settings
from django import forms
from .models import User
from .choices import COUNTRY_CHOICES
from django_resized.forms import ResizedImageField
from .fields import ReCaptchaField
from .widgets import ReCaptchaWidget
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': "Неверное подтверждение пароля",
'uf_rules_is_not_accepted': "Вы должны принять лицензионное соглашение",
}
password1 = forms.CharField(
label="Пароль",
min_length=6,
strip=False,
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label="Подтвердите пароль",
min_length=6,
widget=forms.PasswordInput,
strip=False,
help_text="Пароль должен быть не менее 6 символов",
)
email = forms.EmailField(
label="Email",
required=True,
)
first_name = forms.CharField(
label="Имя",
strip=False,
required=True,
)
last_name = forms.CharField(
label="Фамилия",
strip=False,
required=True,
)
uf_rules = forms.BooleanField(
label="Условия",
required=False
)
captcha = ReCaptchaField(widget=ReCaptchaWidget)
class Meta:
model = User
fields = ("username", "email", "password1", "password2", "first_name", "last_name")
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.instance.username = self.cleaned_data.get('username')
password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def clean_uf_rules(self):
uf_rules = self.cleaned_data.get("uf_rules")
if not uf_rules:
raise forms.ValidationError(
self.error_messages['uf_rules_is_not_accepted'],
code='uf_rules_is_not_accepted',
)
return uf_rules
def is_valid(self, *args, **kwargs):
return super(UserCreationForm, self).is_valid(*args, **kwargs)
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class ProfileEditForm(forms.ModelForm):
error_messages = {
'password_mismatch': "Неверное подтверждение пароля",
}
password1 = forms.CharField(
label="Новый пароль",
min_length=6,
required=False,
strip=False,
widget=forms.PasswordInput(attrs={'class': 'form-control'}),
)
password2 = forms.CharField(
label="Повтор пароля",
min_length=6,
widget=forms.PasswordInput(attrs={'class': 'form-control'}),
required=False,
strip=False,
help_text="Пароль должен быть не менее 6 символов",
)
email = forms.EmailField(
label="Email",
required=True,
widget=forms.EmailInput(attrs={'class': 'form-control'})
)
first_name = forms.CharField(
label="Имя",
strip=False,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
last_name = forms.CharField(
label="Фамилия",
strip=False,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
is_public_email = forms.BooleanField(
required=False,
widget=forms.CheckboxInput(),
label="Показывать другим пользователям"
)
is_public_b_date = forms.BooleanField(
required=False,
widget=forms.CheckboxInput(),
label="Показывать другим пользователям"
)
is_public_has_children = forms.BooleanField(
required=False,
widget=forms.CheckboxInput(),
label="Показывать другим пользователям"
)
has_children = forms.ChoiceField(
choices=User.HAS_CHILDREN_CHOICES,
required=True,
label='Есть ли дети',
widget=forms.Select(attrs={'class': 'form-control chosen-select '})
)
b_date = forms.DateField(
required=False,
input_formats=['%d.%m.%Y'],
widget=forms.DateInput(attrs={
'class': 'form-control datepicker ',
'data-date-format': "dd.mm.yyyy",
'data-date-end-date': "30.01.2005",
}),
label='Дата рождения'
)
gender = forms.ChoiceField(
label='Пол',
choices=User.GENDER_CHOICES,
required=True,
widget=forms.Select(attrs={'class': 'form-control chosen-select '})
)
country = forms.ChoiceField(
required=False,
choices=COUNTRY_CHOICES,
label='Страна',
widget=forms.Select(attrs={
'class': 'form-control chosen-select ',
'data-placeholder': "Выбери страну..."
})
)
city = forms.CharField(
label='Город',
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
notes = forms.CharField(
required=False,
label='О себе',
widget=forms.Textarea(attrs={'class': 'form-control'})
)
class Meta:
model = User
fields = (
"username", "email", "password1", "password2", "first_name",
"last_name", "is_public_email", "is_public_b_date", "is_public_has_children_choices",
"b_date", "gender", "country", "city", "notes"
)
def __init__(self, *args, **kwargs):
super(ProfileEditForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def is_valid(self, *args, **kwargs):
return super(ProfileEditForm, self).is_valid(*args, **kwargs)
def save(self, commit=True):
user = super(ProfileEditForm, self).save(commit=False)
if self.cleaned_data["password1"]:
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class ImageUploadForm(forms.ModelForm):
avatar = forms.ImageField(required=True)
class Meta:
model = User
fields = ('avatar', ) | [
"burjunov@yandex.ru"
] | burjunov@yandex.ru |
2bb31beed6b284eb1d71957618d1eb8c31c56355 | 035c7cfbd62a9c06f8dbbb4a92607cf2b2570689 | /src/systemidentification/coefficient_plot.py | 9c94331149a3134488ea65b674b5ea5794be7296 | [
"BSD-3-Clause",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | NikhilPappu/dissertation | 60001328c619caf5f966368ea4a87f3086a29129 | 0a93ada92fa1a964c9de89bcdb558c82a9ef252b | refs/heads/master | 2020-03-24T00:50:05.493962 | 2017-02-07T02:01:17 | 2017-02-07T02:01:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | #!/usr/bin/env python
import sys
sys.path.append('..')
from load_paths import read
import os
import cPickle
from numpy import linspace, sqrt, zeros
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib import rcParams
from bicycleid import data, plot, model
import dtk.bicycle
params = {'axes.labelsize': 8,
'axes.titlesize': 10,
'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True}
rcParams.update(params)
dat = data.ExperimentalData()
coefPlot = plot.CoefficientPlot()
subDef = {}
subDef['Rider'] = ['Charlie', 'Jason', 'Luke']
subDef['Environment'] = ['Treadmill', 'Pavilion']
subDef['Maneuver'] = ['Balance', 'Track Straight Line',
'Balance With Disturbance', 'Track Straight Line With Disturbance']
subDef['Speed'] = ['1.4', '2.0', '3.0', '4.0', '4.92', '5.8', '7.0', '9.0']
subDef['MeanFit'] = 0.0
subDef['Duration'] = 0.0
subDat = dat.subset(**subDef)
speedRange = linspace(0.0, 10.0, num=50)
models = {rider: model.Whipple(rider).matrices(speedRange) for rider in ['Charlie']}
coefPlot.update_graph(subDat, models)
# now add the arm model
m = loadmat('../../data/extensions/armsAB-Charlie.mat', squeeze_me=True) # this is charlie at 101 speeds
inputMats = zeros((101, 4, 1))
for i, B in enumerate(m['inputMatrices']):
inputMats[i] = B[:, 1].reshape(4, 1)
for lab, ax in coefPlot.axes.items():
row, col = int(lab[-2]), int(lab[-1])
if lab[0] == 'a':
ax.plot(m['speed'], m['stateMatrices'][:, row - 1, col - 1], 'r')
elif lab[0] == 'b':
ax.plot(m['speed'], inputMats[:, row - 1, col - 1], 'r')
# now add the model identified from the runs with Luke on the Pavilion floor
# with the canonical realization
with open(read('pathToIdMat')) as f:
idMat = cPickle.load(f)
M, C1, K0, K2, H = idMat['L-P']
speeds = linspace(0, 10, num=50)
As = zeros((len(speeds), 4, 4))
Bs = zeros((len(speeds), 4, 1))
for i, v in enumerate(speeds):
A, B = dtk.bicycle.benchmark_state_space(M, C1, K0, K2, v, 9.81)
As[i] = A
Bs[i] = B[:, 1].reshape(4, 1)
for lab, ax in coefPlot.axes.items():
row, col = int(lab[-2]), int(lab[-1])
if lab[0] == 'a':
ax.plot(speeds, As[:, row - 1, col - 1], 'orange')
elif lab[0] == 'b':
ax.plot(speeds, Bs[:, row - 1, col - 1], 'orange')
width = 6.0
coefPlot.title.set_fontsize(10.0)
coefPlot.figure.set_figwidth(width)
goldenRatio = (sqrt(5) - 1.0) / 2.0
coefPlot.figure.set_figheight(6.0 * goldenRatio)
coefPlot.figure.savefig('../../figures/systemidentification/coefficients.pdf')
# this gtk backend failed when I tried to savefig a png, so I do this
os.system('convert -density 200x200 ../../figures/systemidentification/coefficients.pdf ../../figures/systemidentification/coefficients.png')
| [
"moorepants@gmail.com"
] | moorepants@gmail.com |
2327963633982b1d2c2485fd724170e257c0d587 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/ad_group_type.py | 13af772070562ce5a6957bdb7c6b267f66fab80e | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'AdGroupTypeEnum',
},
)
class AdGroupTypeEnum(proto.Message):
r"""Defines types of an ad group, specific to a particular
campaign channel type. This type drives validations that
restrict which entities can be added to the ad group.
"""
class AdGroupType(proto.Enum):
r"""Enum listing the possible types of an ad group."""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH_STANDARD = 2
DISPLAY_STANDARD = 3
SHOPPING_PRODUCT_ADS = 4
HOTEL_ADS = 6
SHOPPING_SMART_ADS = 7
VIDEO_BUMPER = 8
VIDEO_TRUE_VIEW_IN_STREAM = 9
VIDEO_TRUE_VIEW_IN_DISPLAY = 10
VIDEO_NON_SKIPPABLE_IN_STREAM = 11
VIDEO_OUTSTREAM = 12
SEARCH_DYNAMIC_ADS = 13
SHOPPING_COMPARISON_LISTING_ADS = 14
PROMOTED_HOTEL_ADS = 15
VIDEO_RESPONSIVE = 16
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
ab3c6b1ef110de9dfe5e006d756c82f119ae6354 | 72b00923d4aa11891f4a3038324c8952572cc4b2 | /python/test/socket/socket_sockpair.py | d3c8cc0437efa47725e06fd162ddc41a9c0dc352 | [] | no_license | taowuwen/codec | 3698110a09a770407e8fb631e21d86ba5a885cd5 | d92933b07f21dae950160a91bb361fa187e26cd2 | refs/heads/master | 2022-03-17T07:43:55.574505 | 2022-03-10T05:20:44 | 2022-03-10T05:20:44 | 87,379,261 | 0 | 0 | null | 2019-03-25T15:40:27 | 2017-04-06T02:50:54 | C | UTF-8 | Python | false | false | 505 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import os
parent, child = socket.socketpair()
pid = os.fork()
if pid:
print('in parent, sending message')
child.close()
parent.sendall(b'ping')
response = parent.recv(1024)
print('response from child:', response)
parent.close()
else:
print('in child, waiting for message')
parent.close()
message = child.recv(1024)
print('message from parent:', message)
child.sendall(b'pong')
child.close()
| [
"taowuwen@gmail.com"
] | taowuwen@gmail.com |
a51792d4ad92eaaec45651912b1cd8c976ed9d40 | dafaa64cf49c76ff00ef86d77f162f98279c0bc6 | /chef/tests/test_data_bag.py | 5cc2ae3321f922e1cc7d6cbb924cfbe9e84a4896 | [] | no_license | SeanOC/pychef | 93c08992d4a85b3002348aa588cf5e460e69402a | bc1b39586f567a5539b92570c4d38ceb02b23b6e | refs/heads/master | 2021-01-21T01:16:02.079784 | 2011-01-30T16:57:37 | 2011-01-30T16:57:37 | 1,309,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | from chef import DataBag, DataBagItem, Search
from chef.exceptions import ChefError
from chef.tests import ChefTestCase
class DataBagTestCase(ChefTestCase):
def test_list(self):
bags = DataBag.list()
self.assertIn('test_1', bags)
self.assertIsInstance(bags['test_1'], DataBag)
def test_keys(self):
bag = DataBag('test_1')
self.assertItemsEqual(bag.keys(), ['item_1', 'item_2'])
self.assertItemsEqual(iter(bag), ['item_1', 'item_2'])
def test_item(self):
bag = DataBag('test_1')
item = bag['item_1']
self.assertEqual(item['test_attr'], 1)
self.assertEqual(item['other'], 'foo')
def test_search_item(self):
self.assertIn('test_1', Search.list())
q = Search('test_1')
self.assertIn('item_1', q)
self.assertIn('item_2', q)
self.assertEqual(q['item_1']['raw_data']['test_attr'], 1)
item = q['item_1'].object
self.assertIsInstance(item, DataBagItem)
self.assertEqual(item['test_attr'], 1)
| [
"noah@coderanger.net"
] | noah@coderanger.net |
08c01252c948b5f3627cadfc766dcc902d5bbeae | 85b102bc9c0dcc04dd469297b32bad9e38065e28 | /backend/auth_app/serializers.py | 44c75c927350cf2d7ac29e284add714b855212dc | [] | no_license | ahrisagree/AHRIS | 60fc58279bf594ba9830e21df25aa7c3c90e6bb9 | 73c480b3d44231acfcc43c0292e0b514654aeb27 | refs/heads/master | 2023-06-06T11:55:33.100575 | 2021-06-29T06:26:08 | 2021-06-29T06:26:08 | 354,016,384 | 0 | 0 | null | 2021-06-29T06:26:09 | 2021-04-02T12:43:21 | JavaScript | UTF-8 | Python | false | false | 2,782 | py | from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
from rest_auth.registration.serializers import RegisterSerializer as RestRegisterSerializer
from backend.utils import get_or_none
from .roles import roles
from .models import AppUser, Division
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = AppUser
fields = ('pk', 'username', 'email', 'role', 'divisi', 'gaji')
read_only_fields = ('email', 'role', 'divisi', 'gaji')
depth = 1
class DivisionSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(required=True)
nama_divisi = serializers.CharField(max_length=100, required=False)
class Meta:
model = Division
fields = '__all__'
class UserListSerializer(serializers.ModelSerializer):
divisi = DivisionSerializer(many=True)
class Meta:
model = AppUser
fields = ('pk', 'username', 'email', 'role', 'divisi')
depth = 1
class UserEditSerializer(serializers.ModelSerializer):
divisi = DivisionSerializer(many=True)
class Meta:
model = AppUser
fields = ('pk', 'username', 'email', 'role', 'divisi', 'gaji')
read_only_fields = ('email',)
depth = 1
def validate_divisi(self, divisi):
divisi_list = []
for div in divisi:
divisi_obj = get_or_none(Division, **div)
if divisi_obj == None:
if div.get('nama_divisi', None) != None:
divisi_obj = Division.objects.create(nama_divisi=div['nama_divisi'])
else:
continue
divisi_list.append(divisi_obj)
return divisi_list
def update(self, instance, validated_data):
divisi = validated_data.pop('divisi')
updated_division = []
instance.divisi.set(divisi)
return super().update(instance, validated_data)
class RegisterSerializer(RestRegisterSerializer):
role = serializers.CharField(max_length=20, required=True)
divisi = DivisionSerializer(many=True)
gaji = serializers.IntegerField()
def validate_role(self, role):
if role not in roles:
raise serializers.ValidationError(_("Invalid Role"))
return role
def validate_divisi(self, divisi):
print(divisi)
divisi_list = []
for div in divisi:
divisi_obj = get_or_none(Division, **div)
if divisi_obj == None:
if div.get('nama_divisi', None) != None:
divisi_obj = Division.objects.get_or_create(nama_divisi=div['nama_divisi'])[0]
else:
continue
divisi_list.append(divisi_obj)
return divisi_list
def custom_signup(self, request, user):
print(self.validated_data)
role = self.validated_data.get('role', '')
setattr(user, 'role', role)
divisi = self.validated_data.get('divisi')
user.divisi.set(divisi)
gaji = self.validated_data.get('gaji')
setattr(user, 'gaji', gaji)
user.save() | [
"leonardoeinstein2000@gmail.com"
] | leonardoeinstein2000@gmail.com |
cc70794577262d87f2821ca1dedabcafa9149ed5 | b09bb602f921064a00835038ce593ed237ae9b16 | /home/forms.py | ba0c456654920c24a92c72f6ce6894c646294b79 | [] | no_license | gauravsaxena1997/Karyavahi | 03f04febe4d726b348c38bc3b8e0e3e7e0113b68 | 6f240234579153aced358d225b45c81b1bc87888 | refs/heads/master | 2020-03-21T06:10:18.854809 | 2018-06-28T18:16:36 | 2018-06-28T18:16:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from django.contrib.auth.models import User
from django import forms
class userinput(forms.Form):
q=forms.CharField(required=True,max_length=25,label='Input #hashtag')
class UserForm (forms.ModelForm):
password = forms.CharField (widget=forms.PasswordInput)
class Meta:
model = User
fields = [ 'username', 'email', 'password' ]
| [
"gauravsaxena.cs@gmail.com"
] | gauravsaxena.cs@gmail.com |
965b1741916b9fc0c678a0f919e49d5749144d7e | 72dc7d124cdac8f2dcab3f72e95e9a646154a6a0 | /byceps/services/ticketing/ticket_user_management_service.py | e9bf08d17b9df30a3c46ca20e8084f70f9bad11c | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | m-ober/byceps | e6569802ee76e8d81b892f1f547881010359e416 | 4d0d43446f3f86a7888ed55395bc2aba58eb52d5 | refs/heads/master | 2020-11-30T23:31:33.944870 | 2020-02-12T23:53:55 | 2020-02-12T23:56:04 | 40,315,983 | 0 | 0 | null | 2015-08-06T16:41:36 | 2015-08-06T16:41:36 | null | UTF-8 | Python | false | false | 3,084 | py | """
byceps.services.ticketing.ticket_user_management_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from ...database import db
from ...typing import UserID
from ..user import service as user_service
from . import event_service
from .exceptions import (
TicketIsRevoked,
UserAccountSuspended,
UserAlreadyCheckedIn,
UserIdUnknown,
)
from . import ticket_service
from .transfer.models import TicketID
def appoint_user_manager(
ticket_id: TicketID, manager_id: UserID, initiator_id: UserID
) -> None:
"""Appoint the user as the ticket's user manager."""
ticket = ticket_service.find_ticket(ticket_id)
if ticket.revoked:
raise TicketIsRevoked(f'Ticket {ticket_id} has been revoked.')
ticket.user_managed_by_id = manager_id
event = event_service.build_event('user-manager-appointed', ticket.id, {
'appointed_user_manager_id': str(manager_id),
'initiator_id': str(initiator_id),
})
db.session.add(event)
db.session.commit()
def withdraw_user_manager(ticket_id: TicketID, initiator_id: UserID) -> None:
"""Withdraw the ticket's custom user manager."""
ticket = ticket_service.find_ticket(ticket_id)
if ticket.revoked:
raise TicketIsRevoked(f'Ticket {ticket_id} has been revoked.')
ticket.user_managed_by_id = None
event = event_service.build_event('user-manager-withdrawn', ticket.id, {
'initiator_id': str(initiator_id),
})
db.session.add(event)
db.session.commit()
def appoint_user(
ticket_id: TicketID, user_id: UserID, initiator_id: UserID
) -> None:
"""Appoint the user as the ticket's user."""
ticket = ticket_service.find_ticket(ticket_id)
if ticket.revoked:
raise TicketIsRevoked(f'Ticket {ticket_id} has been revoked.')
if ticket.user_checked_in:
raise UserAlreadyCheckedIn('Ticket user has already been checked in.')
user = user_service.find_user(user_id)
if user is None:
raise UserIdUnknown(f"Unknown user ID '{user_id}'")
if user.suspended:
raise UserAccountSuspended(
f'User account {user.screen_name} is suspended.'
)
ticket.used_by_id = user_id
event = event_service.build_event('user-appointed', ticket.id, {
'appointed_user_id': str(user_id),
'initiator_id': str(initiator_id),
})
db.session.add(event)
db.session.commit()
def withdraw_user(ticket_id: TicketID, initiator_id: UserID) -> None:
"""Withdraw the ticket's user."""
ticket = ticket_service.find_ticket(ticket_id)
if ticket.revoked:
raise TicketIsRevoked(f'Ticket {ticket_id} has been revoked.')
if ticket.user_checked_in:
raise UserAlreadyCheckedIn('Ticket user has already been checked in.')
ticket.used_by_id = None
event = event_service.build_event('user-withdrawn', ticket.id, {
'initiator_id': str(initiator_id),
})
db.session.add(event)
db.session.commit()
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
bb0772a6c4a86996f2c626ca4c8586ad3b56e2e6 | 4946ea045878d30d8c68ac9b867e1b624608a0c7 | /users/migrations/0002_create_profile_existing_users.py | 0869a8e3da799fd2beb72ff0db752677b963f69b | [
"MIT"
] | permissive | jeanettemariemurphy/directory-cms | 6f4a7f5cdd24b891745d4f2dd5f8aa94d3b9b298 | 25c98d13e409c28998724d19fe1c352f7dc19f1d | refs/heads/master | 2020-06-13T12:25:52.083225 | 2019-06-27T10:19:11 | 2019-06-27T10:19:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-07 10:51
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.db import migrations
from users.models import APPROVED
def set_existing_users_as_approved(apps, schema_editor):
User = get_user_model()
UserProfile = apps.get_model('users', 'UserProfile')
for user in User.objects.filter(is_active=True):
profile = UserProfile(user_id=user.id,
assignment_status=APPROVED)
profile.save()
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RunPython(set_existing_users_as_approved,
reverse_code=migrations.RunPython.noop)
]
| [
"alessandro.denoia@digital.trade.gov.uk"
] | alessandro.denoia@digital.trade.gov.uk |
eba28d061f1ca38c49e2d42e932da20989443234 | 661721fa052febc3bb955a04888fdf770e1727b9 | /read_10_buf.py | 37a969d42aca7ea51ef7f5f4ee0fdeb8d32b4d36 | [] | no_license | sumitparw/leetcode_practise | 3f25366996702609e9644eff7b0e8f24de05a2e0 | c98b49c4a29b05ac21b6ae66b5d0e6b7f25a2c1a | refs/heads/master | 2020-08-12T12:24:25.092547 | 2020-02-01T19:36:53 | 2020-02-01T19:36:53 | 214,766,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | filename = "xyz.txt"
buffer = ''
with open(filename) as f:
while True:
c = f.read(10)
if not c:
break
buffer = buffer + c
print(buffer) | [
"46736751+sumitparw@users.noreply.github.com"
] | 46736751+sumitparw@users.noreply.github.com |
a730f5aec6b1037e07a94eedf86850ee5d5da30f | 01857ef455ea60eccaf03b5a9059ec83e9803c2e | /nicegui/tailwind_types/background_color.py | 501965091cc66d3eafcc5126673744ea5026a591 | [
"MIT"
] | permissive | zauberzeug/nicegui | f08312cc1f393deca79e0e84a2506d3a35efff16 | c61b1315f29d51e26cc1168207f5616b302f8df0 | refs/heads/main | 2023-08-18T18:09:30.937322 | 2023-08-18T15:04:00 | 2023-08-18T15:04:00 | 365,250,183 | 5,128 | 271 | MIT | 2023-09-14T01:50:56 | 2021-05-07T13:55:05 | Python | UTF-8 | Python | false | false | 4,213 | py | from typing import Literal
BackgroundColor = Literal[
'inherit',
'current',
'transparent',
'black',
'white',
'slate-50',
'slate-100',
'slate-200',
'slate-300',
'slate-400',
'slate-500',
'slate-600',
'slate-700',
'slate-800',
'slate-900',
'slate-950',
'gray-50',
'gray-100',
'gray-200',
'gray-300',
'gray-400',
'gray-500',
'gray-600',
'gray-700',
'gray-800',
'gray-900',
'gray-950',
'zinc-50',
'zinc-100',
'zinc-200',
'zinc-300',
'zinc-400',
'zinc-500',
'zinc-600',
'zinc-700',
'zinc-800',
'zinc-900',
'zinc-950',
'neutral-50',
'neutral-100',
'neutral-200',
'neutral-300',
'neutral-400',
'neutral-500',
'neutral-600',
'neutral-700',
'neutral-800',
'neutral-900',
'neutral-950',
'stone-50',
'stone-100',
'stone-200',
'stone-300',
'stone-400',
'stone-500',
'stone-600',
'stone-700',
'stone-800',
'stone-900',
'stone-950',
'red-50',
'red-100',
'red-200',
'red-300',
'red-400',
'red-500',
'red-600',
'red-700',
'red-800',
'red-900',
'red-950',
'orange-50',
'orange-100',
'orange-200',
'orange-300',
'orange-400',
'orange-500',
'orange-600',
'orange-700',
'orange-800',
'orange-900',
'orange-950',
'amber-50',
'amber-100',
'amber-200',
'amber-300',
'amber-400',
'amber-500',
'amber-600',
'amber-700',
'amber-800',
'amber-900',
'amber-950',
'yellow-50',
'yellow-100',
'yellow-200',
'yellow-300',
'yellow-400',
'yellow-500',
'yellow-600',
'yellow-700',
'yellow-800',
'yellow-900',
'yellow-950',
'lime-50',
'lime-100',
'lime-200',
'lime-300',
'lime-400',
'lime-500',
'lime-600',
'lime-700',
'lime-800',
'lime-900',
'lime-950',
'green-50',
'green-100',
'green-200',
'green-300',
'green-400',
'green-500',
'green-600',
'green-700',
'green-800',
'green-900',
'green-950',
'emerald-50',
'emerald-100',
'emerald-200',
'emerald-300',
'emerald-400',
'emerald-500',
'emerald-600',
'emerald-700',
'emerald-800',
'emerald-900',
'emerald-950',
'teal-50',
'teal-100',
'teal-200',
'teal-300',
'teal-400',
'teal-500',
'teal-600',
'teal-700',
'teal-800',
'teal-900',
'teal-950',
'cyan-50',
'cyan-100',
'cyan-200',
'cyan-300',
'cyan-400',
'cyan-500',
'cyan-600',
'cyan-700',
'cyan-800',
'cyan-900',
'cyan-950',
'sky-50',
'sky-100',
'sky-200',
'sky-300',
'sky-400',
'sky-500',
'sky-600',
'sky-700',
'sky-800',
'sky-900',
'sky-950',
'blue-50',
'blue-100',
'blue-200',
'blue-300',
'blue-400',
'blue-500',
'blue-600',
'blue-700',
'blue-800',
'blue-900',
'blue-950',
'indigo-50',
'indigo-100',
'indigo-200',
'indigo-300',
'indigo-400',
'indigo-500',
'indigo-600',
'indigo-700',
'indigo-800',
'indigo-900',
'indigo-950',
'violet-50',
'violet-100',
'violet-200',
'violet-300',
'violet-400',
'violet-500',
'violet-600',
'violet-700',
'violet-800',
'violet-900',
'violet-950',
'purple-50',
'purple-100',
'purple-200',
'purple-300',
'purple-400',
'purple-500',
'purple-600',
'purple-700',
'purple-800',
'purple-900',
'purple-950',
'fuchsia-50',
'fuchsia-100',
'fuchsia-200',
'fuchsia-300',
'fuchsia-400',
'fuchsia-500',
'fuchsia-600',
'fuchsia-700',
'fuchsia-800',
'fuchsia-900',
'fuchsia-950',
'pink-50',
'pink-100',
'pink-200',
'pink-300',
'pink-400',
'pink-500',
'pink-600',
'pink-700',
'pink-800',
'pink-900',
'pink-950',
'rose-50',
'rose-100',
'rose-200',
'rose-300',
'rose-400',
'rose-500',
'rose-600',
'rose-700',
'rose-800',
'rose-900',
'rose-950',
]
| [
"falko@zauberzeug.com"
] | falko@zauberzeug.com |
91a43d6b4ce8483705b6463ee0ce47c2bcb62dfd | ee6acbd5fcd0fcd16230e96a4a539de41a02c97e | /operators/kubefed/python/pulumi_pulumi_kubernetes_crds_operators_kubefed/scheduling/v1alpha1/_inputs.py | 151deeb1cd170868eccfa41ce88d7e5f05c7c50b | [
"Apache-2.0"
] | permissive | isabella232/pulumi-kubernetes-crds | 777e78137aaf6525a44b61a02dccf91bf0d87a14 | 372c4c0182f6b899af82d6edaad521aa14f22150 | refs/heads/master | 2023-03-15T04:29:16.039753 | 2020-12-30T19:35:54 | 2020-12-30T19:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,116 | py | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ReplicaSchedulingPreferenceSpecArgs',
]
@pulumi.input_type
class ReplicaSchedulingPreferenceSpecArgs:
def __init__(__self__, *,
target_kind: pulumi.Input[str],
total_replicas: pulumi.Input[int],
clusters: Optional[pulumi.Input[Mapping[str, Any]]] = None,
rebalance: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] target_kind: TODO (@irfanurrehman); upgrade this to label selector only if need be. The idea of this API is to have a a set of preferences which can be used for a target FederatedDeployment or FederatedReplicaset. Although the set of preferences in question can be applied to multiple target objects using label selectors, but there are no clear advantages of doing that as of now. To keep the implementation and usage simple, matching ns/name of RSP resource to the target resource is sufficient and only additional information needed in RSP resource is a target kind (FederatedDeployment or FederatedReplicaset).
:param pulumi.Input[int] total_replicas: Total number of pods desired across federated clusters. Replicas specified in the spec for target deployment template or replicaset template will be discarded/overridden when scheduling preferences are specified.
:param pulumi.Input[Mapping[str, Any]] clusters: A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in these clusters. "*" (if provided) applies to all clusters if an explicit mapping is not provided. If omitted, clusters without explicit preferences should not have any replicas scheduled.
:param pulumi.Input[bool] rebalance: If set to true then already scheduled and running replicas may be moved to other clusters in order to match current state to the specified preferences. Otherwise, if set to false, up and running replicas will not be moved.
"""
pulumi.set(__self__, "target_kind", target_kind)
pulumi.set(__self__, "total_replicas", total_replicas)
if clusters is not None:
pulumi.set(__self__, "clusters", clusters)
if rebalance is not None:
pulumi.set(__self__, "rebalance", rebalance)
@property
@pulumi.getter(name="targetKind")
def target_kind(self) -> pulumi.Input[str]:
"""
TODO (@irfanurrehman); upgrade this to label selector only if need be. The idea of this API is to have a a set of preferences which can be used for a target FederatedDeployment or FederatedReplicaset. Although the set of preferences in question can be applied to multiple target objects using label selectors, but there are no clear advantages of doing that as of now. To keep the implementation and usage simple, matching ns/name of RSP resource to the target resource is sufficient and only additional information needed in RSP resource is a target kind (FederatedDeployment or FederatedReplicaset).
"""
return pulumi.get(self, "target_kind")
@target_kind.setter
def target_kind(self, value: pulumi.Input[str]):
pulumi.set(self, "target_kind", value)
@property
@pulumi.getter(name="totalReplicas")
def total_replicas(self) -> pulumi.Input[int]:
"""
Total number of pods desired across federated clusters. Replicas specified in the spec for target deployment template or replicaset template will be discarded/overridden when scheduling preferences are specified.
"""
return pulumi.get(self, "total_replicas")
@total_replicas.setter
def total_replicas(self, value: pulumi.Input[int]):
pulumi.set(self, "total_replicas", value)
@property
@pulumi.getter
def clusters(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in these clusters. "*" (if provided) applies to all clusters if an explicit mapping is not provided. If omitted, clusters without explicit preferences should not have any replicas scheduled.
"""
return pulumi.get(self, "clusters")
@clusters.setter
def clusters(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "clusters", value)
@property
@pulumi.getter
def rebalance(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true then already scheduled and running replicas may be moved to other clusters in order to match current state to the specified preferences. Otherwise, if set to false, up and running replicas will not be moved.
"""
return pulumi.get(self, "rebalance")
@rebalance.setter
def rebalance(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "rebalance", value)
| [
"albertzhong0@gmail.com"
] | albertzhong0@gmail.com |
2e5b09aa02c5be1df2acfd4bd03e9c17cfc49f4a | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/squareroot/9bc2ac9b-fa9f-4fac-a960-1f9639102269__Babylon_sqrt.py | 1a10bb951e4c04d378758c4a075585b5a4778d26 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | def square_root(a):
"""Return the square root of a.
>>> square_root(9)
3.0
"""
x = 1
while x * x != a:
x = square_root_update(x, a)
return x
def square_root_update(x, a):
return average(x, a/x)
def average(x, y):
return (x + y)/2
| [
"erickmiller@gmail.com"
] | erickmiller@gmail.com |
8f6cbe3b5a97cfbd4419a1be1795d359956892fa | 53eee7eb899cb518983008532257037fb89def13 | /51.n-queens.py | a39e73f83fcbef748541b1370f3597e7ac75d757 | [] | no_license | chenxu0602/LeetCode | 0deb3041a66cb15e12ed4585bbe0fefce5dc6b26 | 3dc5af2bc870fcc8f2142130fcd2b7cab8733151 | refs/heads/master | 2023-07-05T19:26:21.608123 | 2023-07-02T08:35:35 | 2023-07-02T08:35:35 | 233,351,978 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py | #
# @lc app=leetcode id=51 lang=python3
#
# [51] N-Queens
#
# https://leetcode.com/problems/n-queens/description/
#
# algorithms
# Hard (43.46%)
# Likes: 1437
# Dislikes: 63
# Total Accepted: 177.9K
# Total Submissions: 408.2K
# Testcase Example: '4'
#
# The n-queens puzzle is the problem of placing n queens on an n×n chessboard
# such that no two queens attack each other.
#
#
#
# Given an integer n, return all distinct solutions to the n-queens puzzle.
#
# Each solution contains a distinct board configuration of the n-queens'
# placement, where 'Q' and '.' both indicate a queen and an empty space
# respectively.
#
# Example:
#
#
# Input: 4
# Output: [
# [".Q..", // Solution 1
# "...Q",
# "Q...",
# "..Q."],
#
# ["..Q.", // Solution 2
# "Q...",
# "...Q",
# ".Q.."]
# ]
# Explanation: There exist two distinct solutions to the 4-queens puzzle as
# shown above.
#
#
#
# @lc code=start
class Solution:
def solveNQueens(self, n: int) -> List[List[str]]:
def backtrack(queens, xy_diff, xy_sum):
p = len(queens)
if p == n:
res.append(queens)
return
for q in range(n):
if q not in queens and p - q not in xy_diff and p + q not in xy_sum:
backtrack(queens + [q], xy_diff + [p - q], xy_sum + [p + q])
res = []
backtrack([], [], [])
return [['.' * i + 'Q' + '.' * (n - i - 1) for i in sol] for sol in res]
# @lc code=end
| [
"chenxu@Chens-iMac.local"
] | chenxu@Chens-iMac.local |
8ece111453a17615bb8a45167b123bf01ff41168 | 4bfc3c184e736bb68dccbb6d5657f11c950df002 | /tests/operators/vector/test_scatter_nd_ad_001.py | 895444892735fed5ec3577c5ba74bc149e31e5f8 | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | laekov/akg | 159aa64ef6135222b5af784c408731275dfa9bdb | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | refs/heads/master | 2022-12-01T04:09:03.548063 | 2020-08-19T08:38:57 | 2020-08-19T08:41:28 | 288,678,192 | 0 | 0 | Apache-2.0 | 2020-08-19T08:41:30 | 2020-08-19T08:36:53 | Python | UTF-8 | Python | false | false | 2,249 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import datetime
import os
from base import TestBase
import pytest
from test_run.scatter_nd_ad_run import scatter_nd_ad_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_scatter_nd_ad_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
## testflag,opfuncname,testRunArgs, dimArgs
('scatter_nd_ad_001', scatter_nd_ad_run, ([4, 1], [4], [8], "int32", 'float16'), ((32, 1), (32, 1))),
#('scatter_nd_ad_002', scatter_nd_ad_run, ([3, 1], [3, 8], [8, 8], "int32", 'float32'), ((64, 1), (64, 1))),
#('scatter_nd_ad_003', scatter_nd_ad_run, ([2, 1], [2, 8, 8], [8, 8, 8], "int32", 'float32'), ((64, 1), (64, 1))),
]
return
@pytest.mark.level2
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_run(self):
self.common_run(self.testarg)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
# if __name__ == "__main__":
# a = TestCase()
# a.setup()
# a.test_run()
# a.teardown()
| [
"ckey.chengbin@huawei.com"
] | ckey.chengbin@huawei.com |
55513fc2b5ef956726d964dc9117f62ffa961065 | bfe394e1b7d8a2ff34e37ae65df8cc52070c69d8 | /Source/External/DataProject/Data_PIREP.py | be0e8ee121eef155004d8c07ca2cb9b6d629a481 | [] | no_license | Jack-GVDL/PredictModel | bb32d37a5c18a656d5ebed36098ba3fac435fb96 | 20495072fb776c31c4bb5f2ddeecda1b43fcc52e | refs/heads/main | 2023-04-30T05:47:34.364328 | 2021-05-11T09:25:13 | 2021-05-11T09:25:13 | 366,314,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,396 | py | from typing import *
from ..DataChain import *
from .Data_Raw import DataKeyLib_Raw
class DataKey_PIREP(DataKey):
def __init__(self):
super().__init__()
# data
self._name = "PIREP"
# key-index
self.date: int = -1
self.time: int = -1
self.latitude: int = -1
self.longitude: int = -1
self.height: int = -1
self.pirep_turbulence: int = -1
self.pirep_icing: int = -1
self.key_date: DataKey = DataKeyLib_Raw.key_date
self.key_time: DataKey = DataKeyLib_Raw.key_time
self.key_longitude: DataKey = DataKeyLib_Raw.key_longitude
self.key_latitude: DataKey = DataKeyLib_Raw.key_latitude
self.key_height: DataKey = DataKeyLib_Raw.key_height
self.key_pirep_turbulence: DataKey = DataKeyLib_Raw.key_pirep_turbulence
self.key_pirep_icing: DataKey = DataKeyLib_Raw.key_pirep_icing
# operation
# add key
self.addDataKey(self.key_date)
self.addDataKey(self.key_time)
self.addDataKey(self.key_longitude)
self.addDataKey(self.key_latitude)
self.addDataKey(self.key_height)
self.addDataKey(self.key_pirep_turbulence)
self.addDataKey(self.key_pirep_icing)
# get index
self.date = self.getKeyIndex_Key(self.key_date)
self.time = self.getKeyIndex_Key(self.key_time)
self.longitude = self.getKeyIndex_Key(self.key_longitude)
self.latitude = self.getKeyIndex_Key(self.key_latitude)
self.height = self.getKeyIndex_Key(self.key_height)
self.pirep_turbulence = self.getKeyIndex_Key(self.key_pirep_turbulence)
self.pirep_icing = self.getKeyIndex_Key(self.key_pirep_icing)
def __del__(self):
return
# Property
# ...
# Operation
# ...
# Protected
# ...
class DataHandler_Text_PIREP(DataHandler):
def __init__(self):
super().__init__()
# data
self._data_key = DataKey_PIREP()
self.file_text = File_Text()
# operation
# ...
def __del__(self):
return
# Property
@property
def data_key(self) -> DataKey:
return self._data_key
@data_key.setter
def data_key(self, key: DataKey) -> DataKey:
raise RuntimeError
# Operation
# ...
# Protected
def _load_(self, data_list: DataList) -> bool:
# check
if self.file_text is None:
return False
# load from plain text
if not self.file_text.load():
return False
# for each item (Data_PIREP)
for item in self.file_text.data:
self._loadSingleData_(item, data_list)
return True
def _dump_(self, data: DataList) -> bool:
raise RuntimeError
"""
Format (tab delimited):
[0] Event date DDMMYYYY/HHMM (in UTC)
[1] Turbulence intensity
[2] Icing intensity (may be absent)
[3] Flight level (in metres)
[4] Latitude
[5] Longitude
"""
def _loadSingleData_(self, s: str, data_list: DataList) -> None:
# setup - file data
string_list: List[str] = s.split()
offset_icing: int = 0
if len(string_list) < 5:
return
# setup - data_key
data_key: DataKey_PIREP = self._data_key
# create object
# TODO: assume: creation must be success
data = data_list.createData()
# icing may be absent
try:
if len(string_list) == 5:
offset_icing = -1
data[data_key.pirep_icing] = 0
else:
data[data_key.pirep_icing] = int(string_list[2])
except ValueError:
return
# direct conversion
try:
data[data_key.date] = self._convertDate_(string_list[0])
data[data_key.time] = self._convertTime_(string_list[0])
data[data_key.pirep_turbulence] = int(string_list[1])
data[data_key.latitude] = float(string_list[4 + offset_icing])
data[data_key.longitude] = float(string_list[5 + offset_icing])
except ValueError:
return
# convert height
try:
# from string to int
height_list: List[str] = string_list[3 + offset_icing].split("-")
# if the height is a range, get the mean
if len(height_list) == 1:
height = int(height_list[0])
else:
height = (int(height_list[0]) + int(height_list[1])) / 2
# convert the value from metric to feet
# 1 metre = 3.2808398950131 feet
height = height * 3.2808398950131
# set to data
data[data_key.height] = int(height)
except ValueError:
return
def _convertDate_(self, s: str) -> List[int]:
try:
temp = [int(s[4:8]), int(s[2:4]), int(s[0:2])]
except ValueError:
return [0, 0, 0]
return temp
def _convertTime_(self, s: str) -> List[int]:
try:
temp = [int(s[9:11]), int(s[11:13]), 0]
except ValueError:
return [0, 0, 0]
return temp
| [
"33114105+Jack-GVDL@users.noreply.github.com"
] | 33114105+Jack-GVDL@users.noreply.github.com |
186471bfecb77345d925d923453b48eb33a11159 | 666a077ce8ba97d8d234bd18d5d02cdc8ccb11e0 | /src/opencmiss/neon/ui/delegates/spinboxdelegate.py | f6c2141213f50bd10b94a5d3a27028d88f73428e | [
"Apache-2.0"
] | permissive | rchristie/neon | 1c8a4c428991d4a6d872c00e3336b4fb1fa005f0 | 3d59f24f680cf981d7221375a34cdff8d73f82a2 | refs/heads/develop | 2021-01-21T07:45:01.603190 | 2016-08-01T03:46:23 | 2016-08-01T03:46:23 | 47,146,276 | 0 | 0 | null | 2015-11-30T21:09:46 | 2015-11-30T21:09:46 | null | UTF-8 | Python | false | false | 1,491 | py | '''
Copyright 2015 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from PySide import QtCore, QtGui
class SpinBoxDelegate(QtGui.QStyledItemDelegate):
def __init__(self, parent=None):
super(SpinBoxDelegate, self).__init__(parent)
def createEditor(self, parent, option, index):
editor = QtGui.QSpinBox(parent)
editor.setFrame(False)
editor.setMinimum(3)
editor.setMaximum(9999)
editor.setValue(9)
return editor
def setEditorData(self, editor, index):
data = index.model().data(index, QtCore.Qt.EditRole)
if data is not None:
value = int(index.model().data(index, QtCore.Qt.EditRole))
editor.setValue(value)
def setModelData(self, editor, model, index):
value = editor.value()
model.setData(index, value, QtCore.Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
| [
"h.sorby@auckland.ac.nz"
] | h.sorby@auckland.ac.nz |
16d624c7e137c1f1d6d1854d6e049c5ddbc7b9b7 | 2d293a15dd64916f1f1e33e5a894d8ee96757072 | /questions/q26_rmDuplicFromSortedArray.py | 5cd4e53679c35656dc0e76ddbb61b3e529a22162 | [] | no_license | liang12k/leetcodeoj | 66238b76cf64ae0aeef7454fe2d449a781f461a7 | d432d1ac17e96f8a6f6a2d327020b58c46c34ecb | refs/heads/master | 2021-01-10T08:39:01.181646 | 2016-02-26T04:08:50 | 2016-02-26T04:08:50 | 36,776,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | '''
::KEY:: need to edit 'nums' list input!
Given a sorted array, remove the duplicates in place such that each element
appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
For example,
Given input array nums = [1,1,2],
Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively.
It doesn't matter what you leave beyond the new length.
'''
class Solution(object):
# @param {integer[]} nums
# @return {integer}
def removeDuplicates(self, nums):
if not nums: return nums
# the tracker, keep tabs on latest value
# 'counter' for unique values so far & used to slice the list (see for loop)
currn=None
counter=0
for n in nums:
# if unique value, this is the latest
if currn!=n:
currn=n
# set unique value at current counter (used as index)
nums[counter]=currn
# increment counter for next index & as latest unique values counted
counter+=1
# nums has been edited, slice list to get from [0:counter]
nums=nums[:counter]
# return int as directed
return counter
if __name__=="__main__":
inp=[1,1,2]
print Solution().removeDuplicates(inp)
| [
"kenneth.liang12@gmail.com"
] | kenneth.liang12@gmail.com |
37baf800877b4e64c1ed930a78180a30033ad4d9 | 7730655277bcb97ce0f36e2a9cb022e0706c9c19 | /mltoolkit/mlmo/utils/helpers/computation.py | a9752cd94c1e5754e3e7113531587750cac06b5a | [
"MIT"
] | permissive | developerdrone/Copycat-abstractive-opinion-summarizer | 9de3ade697936934a0a9804bf35ddadaf55ce3f0 | 04fe5393a7bb6883516766b762f6a0c530e95375 | refs/heads/master | 2023-07-29T07:54:14.955146 | 2021-09-02T06:08:24 | 2021-09-02T06:08:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | import numpy as np
def cosine_sim(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def score_candidates(target_reprs, cand_reprs, score_func):
"""
Scores every target repr against every candidate repr using a provided
score function.
:return: scores: [target_count, candidate_count]
"""
scores = np.empty((len(target_reprs), len(cand_reprs)), dtype='float32')
for tar_indx, tar_repr in enumerate(target_reprs):
for sour_indx, cand_repr in enumerate(cand_reprs):
scores[tar_indx, sour_indx] = score_func(tar_repr, cand_repr)
return scores
def select_cands_by_th(slot_scores, th=0.4):
"""
Selects candidates(indxs) based on their avg. scores exceeding th.
:param slot_scores: [source_slot_segms_count, cand_segms_count].
:param th: threshold value.
:return indices.
"""
avg_sim = slot_scores.mean(axis=0)
return np.where(avg_sim > th)
def select_cands_by_rank(slot_scores, top_n=1, order='descending'):
"""
Selects a fixed number of unique candidates per source seq. by sorting their
scores in a set order.
:param slot_scores: [source_slot_segs_count, cand_seqs_count].
:param top_n: candidates per source segments.
:param order: descending or ascending.
:return indices of selected candidates.
"""
assert order in ['descending', 'ascending']
if order == 'descending':
s = - slot_scores
else:
s = slot_scores
indxs = np.argsort(s, axis=1)[:, :top_n]
indxs = np.unique(indxs.reshape((-1,)))
return indxs
| [
"bulletdll@gmail.com"
] | bulletdll@gmail.com |
cb1e3e04477b93b669491773c0448f16baa2508d | 460b4ec7a8e9332567bae637797c3cf29619d651 | /tfbrain/loss.py | 467da923941aeb65a45b3c83f617940b8c432dde | [] | no_license | agajews/tfbrain-v2 | e1a79d805007913afb91dd60c73bdf64beba4122 | 86f595bf61d41f70af2bcc6ac3cd6abd6aa0615f | refs/heads/master | 2021-01-13T09:10:02.249595 | 2016-09-26T13:04:02 | 2016-09-26T13:04:02 | 69,250,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | import tensorflow as tf
class Loss(object):
def __init__(self, **kwargs):
pass
def get_loss(self):
return self.loss
def build(self, net, y, mask=None):
'''y_hat: Layer subclass
y: a TF placeholder representing the expected output'''
self.net = net
self.y_hat = net.get_output()
self.y = y
self.mask = mask
self.sess = net.get_sess()
self._build()
def eval(self, xs, y, mask=None):
feed_dict = self.net.get_feed_dict(xs)
feed_dict.update({self.y: y})
if mask is not None:
feed_dict.update({self.mask, mask})
return self.sess.run(self.loss, feed_dict=feed_dict)
def _build(self):
'''y_hat: a TF tensor representing a model's output
y: a TF placeholder representing the expected output'''
raise NotImplementedError()
class MSE(Loss):
def _build(self):
if self.mask is None:
errors = self.y - self.y_hat
else:
errors = self.y - tf.reduce_sum(self.y_hat * self.mask,
reduction_indices=1)
self.loss = tf.reduce_mean(tf.square(errors))
class MSEDQN(Loss):
def _build(self):
if self.mask is None:
errors = self.y - self.y_hat
else:
errors = self.y - tf.reduce_sum(self.y_hat * self.mask,
reduction_indices=1)
difference = tf.abs(errors)
quadratic_part = tf.clip_by_value(difference, 0.0, 1.0)
linear_part = tf.sub(difference, quadratic_part)
errors = (0.5 * tf.square(quadratic_part)) + linear_part
self.loss = tf.reduce_sum(errors)
class Crossentropy(Loss):
def __init__(self, log_clip=1e-10, **kwargs):
Loss.__init__(self, **kwargs)
self.log_clip = log_clip
def _build(self):
assert self.mask is None
log_out = tf.log(tf.clip_by_value(self.y_hat, self.log_clip, 1.0))
errors = -tf.reduce_sum(self.y * log_out, reduction_indices=1)
self.loss = tf.reduce_mean(errors)
| [
"agajews@gmail.com"
] | agajews@gmail.com |
7b351dc5896f7459d7a71a80439a10c009299a66 | b80b5026d926acd9c5128c04e14f725301c7aa87 | /tests/test_dynamodb.py | 8324b3a016f4dc7da9dda7e0f34f06c48d54daf3 | [
"MIT"
] | permissive | sxencon/slam | 3d209ae87f0f6fc21fef5b0498a49641679994ed | e76153211a128e2600d4fb887126dfe2a0b10ea2 | refs/heads/master | 2021-01-23T13:10:13.300442 | 2017-05-24T06:39:31 | 2017-05-24T06:39:31 | 93,233,179 | 2 | 0 | null | 2017-06-03T07:59:43 | 2017-06-03T07:59:42 | null | UTF-8 | Python | false | false | 7,356 | py | from copy import deepcopy
import mock
import unittest
from slam.plugins import dynamodb
from .test_deploy import config as deploy_config
config = deepcopy(deploy_config)
config.update({'dynamodb_tables': dynamodb.init.func(config, 't1,t2')})
class DynamoDBTests(unittest.TestCase):
def test_init(self):
plugin_config = dynamodb.init.func(config=deploy_config,
dynamodb_tables='a,b ,c, d')
for table in ['a', 'b', 'c', 'd']:
self.assertIn(table, plugin_config)
self.assertEqual(plugin_config[table], {
'attributes': {'id': 'S'},
'key': 'id',
'read_throughput': 1,
'write_throughput': 1
})
def test_policies(self):
self.assertEqual(dynamodb._get_dynamodb_policies({}), [])
policies = dynamodb._get_dynamodb_policies(config)
self.assertEqual(len(policies), 1)
statement = policies[0]['PolicyDocument']['Statement'][0]
self.assertEqual(
statement['Action'],
['dynamodb:DeleteItem',
'dynamodb:GetItem',
'dynamodb:PutItem',
'dynamodb:Query',
'dynamodb:Scan',
'dynamodb:UpdateItem',
'dynamodb:DescribeTable'])
self.assertEqual(len(statement['Resource']), 6) # 2 tables x 3 stages
tables = [r['Fn::Join'][1][5]['Ref'] for r in statement['Resource']]
self.assertEqual(set(tables), {'DevT1DynamoDBTable',
'DevT2DynamoDBTable',
'StagingT1DynamoDBTable',
'StagingT2DynamoDBTable',
'ProdT1DynamoDBTable',
'ProdT2DynamoDBTable'})
def test_key_schema(self):
self.assertEqual(dynamodb._get_dynamodb_key_schema('foo'),
[{'AttributeName': 'foo', 'KeyType': 'HASH'}])
self.assertEqual(dynamodb._get_dynamodb_key_schema(['foo', 'bar']),
[{'AttributeName': 'foo', 'KeyType': 'HASH'},
{'AttributeName': 'bar', 'KeyType': 'RANGE'}])
def test_index_projection(self):
self.assertEqual(dynamodb._get_dynamodb_projection(None),
{'ProjectionType': 'KEYS_ONLY'})
self.assertEqual(dynamodb._get_dynamodb_projection([]),
{'ProjectionType': 'KEYS_ONLY'})
self.assertEqual(dynamodb._get_dynamodb_projection('all'),
{'ProjectionType': 'ALL'})
self.assertEqual(dynamodb._get_dynamodb_projection(['foo', 'bar']),
{'ProjectionType': 'INCLUDE',
'NonKeyAttributes': ['foo', 'bar']})
@mock.patch('slam.plugins.dynamodb._get_dynamodb_key_schema',
return_value='key-schema')
def test_table_schema(self, *args):
cfg = deepcopy(config)
cfg['dynamodb_tables']['t1']['attributes'] = {'id': 'S', 'name': 'S',
'age': 'N'}
cfg['dynamodb_tables']['t1']['read_throughput'] = 2
cfg['dynamodb_tables']['t1']['write_throughput'] = 4
table = dynamodb._get_table_resource(cfg, 'dev', 't1')
self.assertEqual(table['Properties']['TableName'], 'dev.t1')
self.assertEqual(len(table['Properties']['AttributeDefinitions']), 3)
for attr in table['Properties']['AttributeDefinitions']:
self.assertIn(attr['AttributeName'],
cfg['dynamodb_tables']['t1']['attributes'])
self.assertEqual(attr['AttributeType'],
cfg['dynamodb_tables']['t1']['attributes']
[attr['AttributeName']])
self.assertEqual(table['Properties']['ProvisionedThroughput'],
{'ReadCapacityUnits': 2, 'WriteCapacityUnits': 4})
self.assertEqual(table['Properties']['KeySchema'], 'key-schema')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_projection',
return_value='projection')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_key_schema',
return_value='key-schema')
def test_local_indexes(self, _get_dynamodb_key_schema,
_get_dynamodb_projection):
cfg = deepcopy(config)
cfg['dynamodb_tables']['t1']['attributes'] = {'id': 'S', 'name': 'S'}
cfg['dynamodb_tables']['t1']['local_secondary_indexes'] = {
'index1': {'key': 'foo', 'project': 'bar'},
'index2': {'key': 'foo2', 'project': 'bar2'}
}
table = dynamodb._get_table_resource(cfg, 'dev', 't1')
self.assertIn({
'IndexName': 'index1',
'KeySchema': 'key-schema',
'Projection': 'projection'
}, table['Properties']['LocalSecondaryIndexes'])
self.assertIn({
'IndexName': 'index2',
'KeySchema': 'key-schema',
'Projection': 'projection'
}, table['Properties']['LocalSecondaryIndexes'])
_get_dynamodb_key_schema.assert_any_call('foo')
_get_dynamodb_projection.assert_any_call('bar')
_get_dynamodb_projection.assert_any_call('bar2')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_projection',
return_value='projection')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_key_schema',
return_value='key-schema')
def test_global_indexes(self, _get_dynamodb_key_schema,
_get_dynamodb_projection):
cfg = deepcopy(config)
cfg['dynamodb_tables']['t1']['attributes'] = {'id': 'S', 'name': 'S'}
cfg['dynamodb_tables']['t1']['global_secondary_indexes'] = {
'index2': {'key': 'foo', 'project': 'bar', 'read_throughput': 2,
'write_throughput': 4}
}
table = dynamodb._get_table_resource(cfg, 'dev', 't1')
self.assertEqual(table['Properties']['GlobalSecondaryIndexes'], [{
'IndexName': 'index2',
'KeySchema': 'key-schema',
'Projection': 'projection',
'ProvisionedThroughput': {'ReadCapacityUnits': 2,
'WriteCapacityUnits': 4}
}])
_get_dynamodb_key_schema.assert_any_call('foo')
_get_dynamodb_projection.assert_called_once_with('bar')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_policies',
return_value=['policies'])
@mock.patch('slam.plugins.dynamodb._get_table_resource',
return_value='resource')
def test_cfn_template(self, _get_table_resource, _get_dynamodb_policies):
tpl = dynamodb.cfn_template(config, {'Resources': {
'FunctionExecutionRole': {'Properties': {'Policies': ['foo']}}}})
self.assertEqual(tpl, {'Resources': {
'DevT1DynamoDBTable': 'resource',
'DevT2DynamoDBTable': 'resource',
'StagingT1DynamoDBTable': 'resource',
'StagingT2DynamoDBTable': 'resource',
'ProdT1DynamoDBTable': 'resource',
'ProdT2DynamoDBTable': 'resource',
'FunctionExecutionRole': {
'Properties': {'Policies': ['foo', 'policies']}}
}})
| [
"miguel.grinberg@gmail.com"
] | miguel.grinberg@gmail.com |
019b11de4d0556da2167bfff1479169d920b15fd | bd62843278ffc297ef8f6d75a931f1f4ca4caaa7 | /exercises/staircase_20180720.py | 9c1efaa04e361e3b7f18cf159a72fc529678d5b8 | [] | no_license | raysmith619/Introduction-To-Programming | d3bae042b4fc17bd56e8631a4d660233d8cd165b | bedc16eb5f6db0ad3b313355df6d51b5161c3835 | refs/heads/master | 2023-07-19T08:43:41.229893 | 2023-07-15T19:22:28 | 2023-07-15T19:22:28 | 132,622,195 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | #staircase.py 18Jul2018
"""
Write a function staircase(first_n, last_n)
"""
def staircase(first_n, last_n):
sum = 0
n = first_n
while n <= last_n:
sum = sum + n
n = n + 1
return sum
print("1,10 ==> ", staircase(1,10))
print("1,100 ==> ", staircase(1,100))
print("5,5 ==> ", staircase(5,5))
print("10,1 ==> ", staircase(10,1))
| [
"noreply@github.com"
] | raysmith619.noreply@github.com |
8b80a972bf22566cb02c31aa33521f89c7fc9fcd | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/cloud/retail/v2beta/retail-v2beta-py/google/cloud/retail_v2beta/types/purge_config.py | c9e1d7f387900608f425ea6351450c0f5396b4cc | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,516 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.retail.v2beta',
manifest={
'PurgeMetadata',
'PurgeUserEventsRequest',
'PurgeUserEventsResponse',
},
)
class PurgeMetadata(proto.Message):
r"""Metadata related to the progress of the Purge operation.
This will be returned by the
google.longrunning.Operation.metadata field.
"""
class PurgeUserEventsRequest(proto.Message):
r"""Request message for PurgeUserEvents method.
Attributes:
parent (str):
Required. The resource name of the catalog
under which the events are created. The format
is
"projects/${projectId}/locations/global/catalogs/${catalogId}".
filter (str):
Required. The filter string to specify the events to be
deleted with a length limit of 5,000 characters. Empty
string filter is not allowed. The eligible fields for
filtering are:
- ``eventType``: Double quoted
[UserEvent.event_type][google.cloud.retail.v2beta.UserEvent.event_type]
string.
- ``eventTime``: in ISO 8601 "zulu" format.
- ``visitorId``: Double quoted string. Specifying this will
delete all events associated with a visitor.
- ``userId``: Double quoted string. Specifying this will
delete all events associated with a user.
Examples:
- Deleting all events in a time range:
``eventTime > "2012-04-23T18:25:43.511Z" eventTime < "2012-04-23T18:30:43.511Z"``
- Deleting specific eventType in time range:
``eventTime > "2012-04-23T18:25:43.511Z" eventType = "detail-page-view"``
- Deleting all events for a specific visitor:
``visitorId = "visitor1024"``
The filtering fields are assumed to have an implicit AND.
force (bool):
Actually perform the purge. If ``force`` is set to false,
the method will return the expected purge count without
deleting any user events.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
filter = proto.Field(
proto.STRING,
number=2,
)
force = proto.Field(
proto.BOOL,
number=3,
)
class PurgeUserEventsResponse(proto.Message):
r"""Response of the PurgeUserEventsRequest. If the long running
operation is successfully done, then this message is returned by
the google.longrunning.Operations.response field.
Attributes:
purged_events_count (int):
The total count of events purged as a result
of the operation.
"""
purged_events_count = proto.Field(
proto.INT64,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
aba27c928883760a8e56fefdbca4f15230131d0b | 443406462ab99481ed1ccfa1e1ed483a242fed78 | /test/test_val_match.py | 4ced5e55541ea7b820747d29197dc15902923a36 | [
"MIT"
] | permissive | RiftNemesis/Pyot | 4343334fd2e2cde29d78d9a0025eb0b4c177c323 | 7c34bbd4bcdad37ec512dcdaac43ae5f69f12975 | refs/heads/master | 2023-05-29T13:05:27.106176 | 2021-06-17T15:24:30 | 2021-06-17T15:24:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,371 | py | from datetime import datetime, timedelta
from pyot.utils import loop_run
from pyot.models import val, riot
async def async_match_history():
account = await riot.Account(name="Tuxedo", tag="AYAYA", region="AMERICAS").get(pipeline="val")
history = await val.MatchHistory(puuid=account.puuid, platform="NA").get()
for match in history:
assert isinstance(match, val.Match)
assert isinstance(match.id, str)
assert isinstance(match.creation, datetime)
assert isinstance(match.team_id, str)
async def async_match():
account = await riot.Account(name="Tuxedo", tag="AYAYA", region="AMERICAS").get(pipeline="val")
history = await val.MatchHistory(puuid=account.puuid, platform="NA").get()
match = await history[0].get()
info = match.info
players = match.players
teams = match.teams
results = match.round_results
assert isinstance(info.id, str)
assert isinstance(info.map_id, str)
assert isinstance(info.duration, timedelta)
assert isinstance(info.creation, datetime)
assert isinstance(info.provisioning_flow_id, str)
assert isinstance(info.is_completed, bool)
assert isinstance(info.custom_game_name, str)
assert isinstance(info.queue_id, str)
assert isinstance(info.game_mode, str)
assert isinstance(info.is_ranked, bool)
assert isinstance(info.season_id, str)
for i in players:
assert isinstance(i.puuid, str)
assert isinstance(i.team_id, str)
assert isinstance(i.party_id, str)
assert isinstance(i.character_id, str)
assert isinstance(i.competitive_tier, int)
assert isinstance(i.player_card, str)
assert isinstance(i.player_title, str)
stat = i.stats
assert isinstance(stat.score, int)
assert isinstance(stat.rounds_played, int)
assert isinstance(stat.kills, int)
assert isinstance(stat.deaths, int)
assert isinstance(stat.assists, int)
assert isinstance(stat.playtime, timedelta)
ability = stat.ability_casts
if ability is not None:
assert isinstance(ability.grenade_casts, int) if ability.grenade_casts is not None else True
assert isinstance(ability.ability1_casts, int) if ability.ability1_casts is not None else True
assert isinstance(ability.ability2_casts, int) if ability.ability2_casts is not None else True
assert isinstance(ability.ultimate_casts, int) if ability.ultimate_casts is not None else True
for i in teams:
assert isinstance(i.id, str)
assert isinstance(i.won, bool)
assert isinstance(i.rounds_played, int)
assert isinstance(i.rounds_won, int)
for i in results:
assert isinstance(i.round_num, int)
assert isinstance(i.round_result, str)
assert isinstance(i.round_ceremony, str)
assert isinstance(i.winning_team, str)
# assert isinstance(i.bomb_planter_puuid, str)
# assert isinstance(i.bomb_defuser_puuid, str)
assert isinstance(i.plant_round_time, timedelta)
assert isinstance(i.plant_site, str)
assert isinstance(i.defuse_round_time, timedelta)
assert isinstance(i.round_result_code, str)
l1 = i.defuse_location
l2 = i.plant_location
assert isinstance(l1.x, int)
assert isinstance(l1.y, int)
assert isinstance(l2.x, int)
assert isinstance(l2.y, int)
defuse = i.defuse_player_locations
plant = i.plant_player_locations
for pi in [defuse, plant]:
if pi is not None:
for p in pi:
assert isinstance(p.puuid, str)
assert isinstance(p.view_radians, float)
assert isinstance(p.location.x, int)
assert isinstance(p.location.y, int)
pss = i.player_stats
for ps in pss:
assert isinstance(ps.puuid, str)
assert isinstance(ps.score, int)
for k in ps.kills:
assert isinstance(k.game_time, timedelta)
assert isinstance(k.round_time, timedelta)
assert isinstance(k.killer_puuid, str)
assert isinstance(k.victim_puuid, str)
assert k.assistant_puuids is not None
assert isinstance(k.victim_location.x, int)
assert isinstance(k.victim_location.y, int)
for pl in k.player_locations:
assert isinstance(pl.puuid, str)
assert isinstance(pl.view_radians, float) or pl.view_radians == 0
assert isinstance(pl.location.x, int)
assert isinstance(pl.location.y, int)
fd = k.finishing_damage
assert isinstance(fd.damage_type, str)
assert isinstance(fd.damage_item, str)
assert isinstance(fd.is_secondary_fire_mode, bool)
for d in ps.damage:
assert isinstance(d.receiver, str)
assert isinstance(d.damage, int)
assert isinstance(d.legshots, int)
assert isinstance(d.bodyshots, int)
assert isinstance(d.headshots, int)
ec = ps.economy
assert isinstance(ec.loadout_value, int)
assert isinstance(ec.weapon, str)
assert isinstance(ec.armor, str)
assert isinstance(ec.remaining, int)
assert isinstance(ec.spent, int)
abi = ps.ability
assert isinstance(abi.grenade_effects, int) if abi.grenade_effects is not None else True
assert isinstance(abi.ability1_effects, int) if abi.ability1_effects is not None else True
assert isinstance(abi.ability2_effects, int) if abi.ability2_effects is not None else True
assert isinstance(abi.ultimate_effects, int) if abi.ultimate_effects is not None else True
async def async_recent():
recent = await val.RecentMatches(queue="competitive", platform="NA").get()
assert isinstance(recent.current_time, datetime)
for match in recent.matches:
assert isinstance(match, val.Match)
assert match.platform == recent.platform
def test_match_history():
loop_run(async_match_history())
def test_match():
loop_run(async_match())
def test_recent():
loop_run(async_recent())
| [
"paaksingtech@gmail.com"
] | paaksingtech@gmail.com |
f185a7c8f9a3cd84146c245ef42ae3dd162ea955 | 3f4f40fede22e93d64c5b7c687461d6fdcf11202 | /binder/jupyter_notebook_config.py | 2ee53857ae141ce9d98154c80e950799c26dc939 | [] | no_license | mamba-org/mamba-navigator | 2fb682354b9a318866de4c79ccd3e2dd3b859c14 | 6c7c8bda9897b08f63b41578c7cac30e60417255 | refs/heads/master | 2022-12-18T16:49:10.599377 | 2020-09-16T09:56:23 | 2020-09-16T09:56:23 | 263,335,713 | 4 | 1 | null | 2020-09-16T09:56:25 | 2020-05-12T12:53:10 | TypeScript | UTF-8 | Python | false | false | 595 | py | import sys
c.ServerProxy.servers = {
"mamba": {
"command": [
sys.executable,
"main.py",
"--no-browser",
'--port={port}',
"--ip=0.0.0.0",
"--NotebookApp.token=''",
"--NotebookApp.base_url={base_url}mamba",
"--NotebookApp.allow_remote_access=True",
],
"timeout": 120,
"absolute_url": True,
"launcher_entry": {
"enabled": True,
"icon_path": "/home/jovyan/style/mamba.svg",
"title": "Mamba Navigator",
},
},
}
| [
"jeremy.tuloup@gmail.com"
] | jeremy.tuloup@gmail.com |
9641ebd3537d79835e97f84a1d8756a5a9a55f1c | e6b1ad137a9bd3d39ae7c61cb5c7f7956ce095b9 | /bruteforce/sum_of_evens.py | 82a04c6dc86d97e9cdd498226df324d34a360b6e | [] | no_license | jadenpadua/Data-Structures-and-Algorithms | d9ba8ece779a2d564a7d66fcbacc9fb7fa1f7205 | 838c29112fec4beb9d9cc3f54db00492b4a480b0 | refs/heads/master | 2021-07-17T13:10:52.029327 | 2020-07-08T02:00:14 | 2020-07-08T02:00:14 | 186,896,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | Create a function that returns the sum of all even elements in a 2D matrix.
def sum_of_evens(lst):
sum = 0
for i in range (len(lst)):
for j in range (len(lst[i])):
if (lst[i][j]%2==0):
sum += lst[i][j]
return sum
| [
"noreply@github.com"
] | jadenpadua.noreply@github.com |
5b760d12cea3f25316b2a67a254ec6f4f3401c7c | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/encoding/configurations/audio/opus/customdata/customdata_api.py | a54550e92efdc897ff0f7a3687a09d7c143398eb | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 1,364 | py | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.custom_data import CustomData
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
class CustomdataApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(CustomdataApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def get(self, configuration_id, **kwargs):
# type: (string_types, dict) -> CustomData
"""Opus Codec Configuration Custom Data
:param configuration_id: Id of the codec configuration
:type configuration_id: string_types, required
:return: Opus codec configuration custom data
:rtype: CustomData
"""
return self.api_client.get(
'/encoding/configurations/audio/opus/{configuration_id}/customData',
path_params={'configuration_id': configuration_id},
type=CustomData,
**kwargs
)
| [
"openapi@bitmovin.com"
] | openapi@bitmovin.com |
2a8ac7c9d2d3078b808eb31de461bd7522a12385 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2374/58585/316440.py | cac23d042dd5f238056bfb6df4de059e5a5494be | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | a=int(input())
b=int(input())
c=input()
d=int(input())
e=input()
if a==2 and b==5 and c=='5 5 4 6 4':
print('4 4 5 5 6 ')
print('9 9 9 2 5 ')
elif a==2 and b==5 and c=='5 5 4 5 4' and d==5 and e=='9 5 2 2 5':
print('5 5 5 4 4 ')
print('2 2 5 5 9 ')
elif a==2 and b==5 and c=='5 5 4 5 4' and d==5 and e=='9 9 2 2 5':
print('5 5 5 4 4 ')
print('2 2 9 9 5 ')
else:
print('5 5 5 4 4 ')
print('9 9 9 2 5 ') | [
"1069583789@qq.com"
] | 1069583789@qq.com |
c4614e23b2b15bb0e0061871ae7895e8cfa3bc7e | 9d6817b67f7993b3a43319894ebd508b1fa92f9f | /python/WJ_MiNNLO_NNLOPSLike_withPhotos_cff.py | 096f9df590fd8bcf688719768b8893f11d1394a9 | [] | no_license | kdlong/WMassNanoGen | b7c5c12df52862d7dd9d9554d7654b9e5d701167 | d1e0c6db75f671eb593cf907307189cd95aa31f6 | refs/heads/master | 2023-06-27T07:21:53.971633 | 2023-06-19T13:32:41 | 2023-06-19T13:32:41 | 235,908,488 | 2 | 7 | null | 2023-06-12T09:10:10 | 2020-01-23T23:43:11 | Python | UTF-8 | Python | false | false | 3,194 | py | import FWCore.ParameterSet.Config as cms
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/afs/cern.ch/work/m/mseidel/public/MiNNLO-gridpacks/Wj_slc6_amd64_gcc700_CMSSW_10_2_16_WplusJToMuNu-nnlopslike-powheg-MiNNLO.tgz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
from Configuration.Generator.Pythia8PowhegEmissionVetoSettings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
pythia8PSweightsSettingsBlock,
pythia8PowhegEmissionVetoSettingsBlock,
processParameters = cms.vstring(
'POWHEG:nFinal = 2', ## Number of final state particles
## (BEFORE THE DECAYS) in the LHE
## other than emitted extra parton
'ParticleDecays:allowPhotonRadiation = on',
'TimeShower:QEDshowerByL = off',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'pythia8PowhegEmissionVetoSettings',
'pythia8PSweightsSettings',
'processParameters')
),
ExternalDecays = cms.PSet(
Photospp = cms.untracked.PSet(
parameterSets = cms.vstring("setExponentiation", "setInfraredCutOff", "setMeCorrectionWtForW", "setMeCorrectionWtForZ", "setMomentumConservationThreshold", "setPairEmission", "setPhotonEmission", "setStopAtCriticalError", "suppressAll", "forceBremForDecay"),
setExponentiation = cms.bool(True),
setMeCorrectionWtForW = cms.bool(True),
setMeCorrectionWtForZ = cms.bool(True),
setInfraredCutOff = cms.double(0.00011),
setMomentumConservationThreshold = cms.double(0.1),
setPairEmission = cms.bool(True),
setPhotonEmission = cms.bool(True),
setStopAtCriticalError = cms.bool(False),
# Use Photos only for W/Z decays
suppressAll = cms.bool(True),
forceBremForDecay = cms.PSet(
parameterSets = cms.vstring("Z", "Wp", "Wm"),
Z = cms.vint32(0, 23),
Wp = cms.vint32(0, 24),
Wm = cms.vint32(0, -24),
),
),
parameterSets = cms.vstring("Photospp")
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"kdlong@hep.wisc.edu"
] | kdlong@hep.wisc.edu |
f16c43bebdb7978fe5f3a18d22a34eb59fe7c6d6 | 1e4eefff1c19ffb81016ce99f2284fb657293f65 | /special/src/paths.py | 67debb8325148760c72f41298431e8cf52493132 | [] | no_license | Solero93/bcn-algorithm-club-py | 5e1edf15f087e0edf2cf7ba0859fb5e4523525ad | 1edf407498756e7ba36534387bb4241b8b455c4f | refs/heads/master | 2020-03-28T09:06:30.328130 | 2019-03-25T10:38:48 | 2019-03-25T10:38:48 | 148,014,386 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | """
/**
* 5 points
*
*
* Given a binary tree and a sum, determine if the tree has a root-to-leaf path such that adding up all the values along the path equals the given sum.
*
*
* Note: A leaf is a node with no children.
*
* Example:
*
* Given the below binary tree and sum = 22,
*
* 5
* / \
* 4 8
* / / \
* 11 13 4
* / \ \
* 7 2 1
*
* return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
*/
"""
def has_path_sum(list_representation: list, sum_path: int) -> bool:
return True
| [
"solero93@gmail.com"
] | solero93@gmail.com |
9b6f6f24e900ff1e3448bfbaa608c7b3bf20aa6b | c03b6ed252fb3120b5972f39f980b717901a9cea | /app.py | a47506455a61a455b9320186cae0105530842bce | [] | no_license | vvalotto/autenticacion_flask | 69f79fd191921184c7a04c8db26a849edd047341 | 3b2db225b2445817a50429692b01e50d2a97a2d5 | refs/heads/master | 2020-05-18T07:16:36.140013 | 2019-04-30T17:42:35 | 2019-04-30T17:42:35 | 184,260,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | from flask import Flask
from flask import Flask, flash, redirect, render_template, request, session, abort
import os
from sqlalchemy.orm import sessionmaker
from table import *
engine = create_engine('sqlite:///tutorial.db', echo=True)
app = Flask(__name__)
@app.route('/')
def home():
if not session.get('logged_in'):
return render_template('login.html')
else:
return "Hola! <a href=/logout>Logout</a>"
@app.route('/login', methods=['POST'])
def do_admin_login():
POST_USERNAME = str(request.form['username'])
POST_PASSWORD = str(request.form['password'])
Session = sessionmaker(bind=engine)
s = Session()
query = s.query(User).filter(User.username.in_([POST_USERNAME]), User.password.in_([POST_PASSWORD]))
result = query.first()
if result:
session['logged_in'] = True
else:
flash('wrong password!')
return home()
@app.route("/logout")
def logout():
session['logged_in'] = False
return home()
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(debug=True, host='127.0.0.1', port=4000)
| [
"vvalotto@gmail.com"
] | vvalotto@gmail.com |
3d3127cc236021f4c7634594ae93a4e47381a056 | cdcd00bc69f4e37958ae1c0619f198a156a02e2e | /data_integration/parallel_tasks/sql.py | fd439deb4be2441f7bcf517d9dd71759b0334150 | [
"MIT"
] | permissive | willyhakim/data-integration | 57321727ec850f973ce24ae32092be25347331aa | 1f87ae2cef824d2347885dc64b11ddbdc95082eb | refs/heads/master | 2020-03-14T21:24:03.693810 | 2018-04-30T11:45:18 | 2018-04-30T11:45:18 | 131,795,287 | 1 | 0 | null | 2018-05-02T03:50:57 | 2018-05-02T03:50:57 | null | UTF-8 | Python | false | false | 3,344 | py | import inspect
import re
import typing
from data_integration import config, pipelines
from data_integration.commands import sql
from mara_page import _, html
class ParallelExecuteSQL(pipelines.ParallelTask, sql._SQLCommand):
def __init__(self, id: str, description: str, parameter_function: typing.Callable, parameter_placeholders: [str],
max_number_of_parallel_tasks: int = None, sql_statement: str = None, file_name: str = None,
commands_before: [pipelines.Command] = None, commands_after: [pipelines.Command] = None,
db_alias: str = None, echo_queries: bool = True, timezone: str = None,
replace: {str: str} = None) -> None:
if (not (sql_statement or file_name)) or (sql_statement and file_name):
raise ValueError('Please provide either sql_statement or file_name (but not both)')
pipelines.ParallelTask.__init__(self, id=id, description=description,
max_number_of_parallel_tasks=max_number_of_parallel_tasks,
commands_before=commands_before, commands_after=commands_after)
sql._SQLCommand.__init__(self, sql_statement, file_name, replace)
self.parameter_function = parameter_function
self.parameter_placeholders = parameter_placeholders
self._db_alias = db_alias
self.timezone = timezone
self.echo_queries = echo_queries
@property
def db_alias(self):
return self._db_alias or config.default_db_alias()
def add_parallel_tasks(self, sub_pipeline: 'pipelines.Pipeline') -> None:
parameters = self.parameter_function()
if not isinstance(parameters, list) or not all(isinstance(item, tuple) for item in parameters):
raise ValueError(f'parameter function should return a list of tuples, got "{repr(parameters)}"')
for parameter_tuple in parameters:
id = '-'.join([re.sub('[^0-9a-z\-_]+', '', str(x).lower().replace('-', '_')) for x in parameter_tuple])
replace = self.replace.copy()
for placeholder, param in zip(self.parameter_placeholders, parameter_tuple):
replace[placeholder] = param
sub_pipeline.add(pipelines.Task(
id=id, description=f'Execute SQL for parameters {repr(parameter_tuple)}',
commands=[
sql.ExecuteSQL(sql_file_name=self.sql_file_name, db_alias=self.db_alias,
echo_queries=self.echo_queries, timezone=self.timezone, replace=replace)
if self.sql_file_name else
sql.ExecuteSQL(sql_statement=self.sql_statement, db_alias=self.db_alias,
echo_queries=self.echo_queries, timezone=self.timezone, replace=replace)]))
def html_doc_items(self) -> [(str, str)]:
return [('db', _.tt[self.db_alias])] \
+ sql._SQLCommand.html_doc_items(self, self.db_alias) \
+ [('parameter function', html.highlight_syntax(inspect.getsource(self.parameter_function), 'python')),
('parameter placeholders', _.tt[repr(self.parameter_placeholders)]),
('echo queries', _.tt[str(self.echo_queries)]),
('timezone', _.tt[self.timezone or ''])]
| [
"martin.loetzsch@gmail.com"
] | martin.loetzsch@gmail.com |
0df74f09405291abe27edf78b7b4e06bb4ee8b14 | e942cafaf64f6354e1f9ebd4a84bcf236ad93004 | /yawast/_static_version.py | 2df3a4a6275a3ae95963ccbac54a9ee4d0e3f762 | [
"MIT"
] | permissive | Prodject/yawast | 9a441a0576012dc5f0664cd23cfa0a803fd7a477 | 044309709cf3782de75a35f77297f2d2850d8e1c | refs/heads/master | 2020-03-23T02:32:12.357082 | 2020-01-21T18:13:19 | 2020-01-21T18:13:19 | 140,978,938 | 0 | 0 | BSD-3-Clause | 2020-01-21T18:13:20 | 2018-07-14T21:23:05 | Ruby | UTF-8 | Python | false | false | 627 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2013 - 2020 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
# This file is part of 'miniver': https://github.com/jbweston/miniver
#
# This file will be overwritten by setup.py when a source or binary
# distribution is made. The magic value "__use_git__" is interpreted by
# version.py.
version = "__use_git__"
# These values are only set if the distribution was created with 'git archive'
refnames = "$Format:%D$"
git_hash = "$Format:%h$"
| [
"adam@adamcaudill.com"
] | adam@adamcaudill.com |
87a309cb82702de33083d597c32f97b9f550b950 | 6c816f19d7f4a3d89abbb00eeaf43dd818ecc34f | /apps/account/migrations/0055_auto_20210414_2208.py | 0a9881008c4515c36c8979af88d753d8ec9147de | [] | no_license | reo-dev/bolt | 29ee6aa7cfc96bd50fa7a7dae07fbaafc2125e54 | d1a7859dd1ebe2f5b0e6e295047b620f5afdb92e | refs/heads/master | 2023-07-13T04:05:57.856278 | 2021-08-27T09:07:03 | 2021-08-27T09:07:03 | 382,195,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | # Generated by Django 3.0.8 on 2021-04-14 13:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('category', '0011_auto_20210219_1901'),
('account', '0054_auto_20210413_1634'),
]
operations = [
migrations.RemoveField(
model_name='clientclass',
name='client',
),
migrations.RemoveField(
model_name='machine',
name='partner',
),
migrations.RemoveField(
model_name='partner_estimate',
name='partner',
),
migrations.RemoveField(
model_name='partnercategory',
name='category',
),
migrations.RemoveField(
model_name='partnercategory',
name='partner',
),
migrations.RemoveField(
model_name='process',
name='partner',
),
migrations.RemoveField(
model_name='structure',
name='partner',
),
migrations.RemoveField(
model_name='partner',
name='career',
),
migrations.RemoveField(
model_name='partner',
name='coin',
),
migrations.RemoveField(
model_name='partner',
name='employee',
),
migrations.RemoveField(
model_name='partner',
name='info_biz',
),
migrations.RemoveField(
model_name='partner',
name='region',
),
migrations.RemoveField(
model_name='partner',
name='revenue',
),
migrations.DeleteModel(
name='Certification',
),
migrations.DeleteModel(
name='Clientclass',
),
migrations.DeleteModel(
name='Machine',
),
migrations.DeleteModel(
name='Partner_Estimate',
),
migrations.DeleteModel(
name='PartnerCategory',
),
migrations.DeleteModel(
name='Process',
),
migrations.DeleteModel(
name='Structure',
),
]
| [
"75593016+reo-dev@users.noreply.github.com"
] | 75593016+reo-dev@users.noreply.github.com |
365ba771af380e6b95fbad2e044704d6ec5cc9fa | 0987f31e64bcacb41ba3a1e20054d7b8ac0d7346 | /practice/algorithm_datastructure_for_programming_contest/309_ALDS1_12_C_AtCoder.py | 98b22cbaed6fa9c73066ce9865cb9924191fd201 | [] | no_license | masakiaota/kyoupuro | 81ae52ab3014fb2b1e10472994afa4caa9ea463b | 74915a40ac157f89fe400e3f98e9bf3c10012cd7 | refs/heads/master | 2021-06-27T04:13:52.152582 | 2020-09-20T03:21:17 | 2020-09-20T03:21:17 | 147,049,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | # https://onlinejudge.u-aizu.ac.jp/courses/lesson/1/ALDS1/12/ALDS1_12_C
# これに関してもAtCoderではすでにある実装が使える https://note.nkmk.me/python-scipy-shortest-path/
# じゅっぴーさんの記事 https://juppy.hatenablog.com/entry/2019/06/04/scipy%E3%81%AEFloyd-Warshall%E3%81%A8Dijkstra%E3%81%AE%E3%81%99%E3%81%99%E3%82%81_Python_%E7%AB%B6%E6%8A%80%E3%83%97%E3%83%AD%E3%82%B0%E3%83%A9%E3%83%9F%E3%83%B3%E3%82%B0_Atcoder_1
# 1つ目の記事にあるようにdijkstraでなくshortest_path関数に引数を入れるのが実用的な使い方か
INF = 10**6
from scipy.sparse.csgraph import dijkstra
from scipy.sparse import csr_matrix, lil_matrix
# scipy.sparse.csgraphでは基本的に隣接行列の入力を想定している
# 機械学習ではcsrが基本的だがlil_matrixのほうがデータを打ち込むのが早いらしい
# load datas
N = int(input())
# adj_mat = csr_matrix((N, N))
adj_mat = lil_matrix((N, N))
# print(adj_mat.shape)
# print(adj_mat)
for _ in range(N):
tmp = list(map(int, input().split()))
if tmp[1] != 0:
node = tmp[0]
for i in range(2, 2 + tmp[1] * 2, 2):
adj_mat[node, tmp[i]] = tmp[i + 1]
D = dijkstra(adj_mat)[0]
# 行ごとにその行を始点としたときの各ノードへの最短経路が計算されるのでそれを取り出すだけ
for i in range(N):
print(i, int(D[i]))
| [
"aotamasakimail@gmail.com"
] | aotamasakimail@gmail.com |
554d1fdcac2ae51244747b651cfd9c9970d8637f | e6ef2915c35faa8d09e846708323b4b79786a5fe | /nobrainer/cli_click.py | 9d82f7e8bb816f394ee7ee8983f2047cbdb5e71d | [
"Apache-2.0"
] | permissive | yarikoptic/kwyk | 1bbe1ed795ffecac1157d67c5f3f40003dc5e379 | 7e2b2bdee58962f229befe9375fc5afeadd4aa3c | refs/heads/master | 2020-07-09T01:44:37.646045 | 2019-08-21T02:32:41 | 2019-08-21T02:32:41 | 203,839,875 | 0 | 0 | null | 2019-08-22T17:13:21 | 2019-08-22T17:13:20 | null | UTF-8 | Python | false | false | 4,726 | py | from pathlib import Path
import subprocess
import tempfile
import click
import nibabel as nib
import numpy as np
from nobrainer.io import read_volume
from nobrainer.predict import _get_predictor
from nobrainer.predict import predict_from_filepath
from nobrainer.volume import from_blocks
from nobrainer.volume import to_blocks
from nobrainer.volume import zscore
_here = Path(__file__).parent
_models = {
'bwn': _here.parent / 'saved_models' / 'all_50_wn' / '1555341859',
'bwn_multi': _here.parent / 'saved_models' / 'all_50_bwn_09_multi' / '1555963478',
'bvwn_multi_prior': _here.parent / 'saved_models' / 'all_50_bvwn_multi_prior' / '1556816070',
}
@click.command()
@click.argument('infile')
@click.argument('outprefix')
@click.option('-m', '--model', type=click.Choice(_models.keys()), default="bwn_multi", required=True, help='Model to use for prediction.')
@click.option('-n', '--n-samples', type=int, default=1, help='Number of samples to predict.')
@click.option('-b', '--batch-size', type=int, default=8, help='Batch size during prediction.')
@click.option('--save-variance', is_flag=True, help='Save volume with variance across `n-samples` predictions.')
@click.option('--save-entropy', is_flag=True, help='Save volume of entropy values.')
def predict(*, infile, outprefix, model, n_samples, batch_size, save_variance, save_entropy):
"""Predict labels from features using a trained model.
The predictions are saved to OUTPREFIX_* with the same extension as the input file.
If you encounter out-of-memory issues, use a lower batch size value.
"""
_orig_infile = infile
# Are there other neuroimaging file extensions with multiple periods?
if infile.lower().endswith('.nii.gz'):
outfile_ext = '.nii.gz'
else:
outfile_ext = Path(infile).suffix
outfile_stem = outprefix
outfile_means = "{}_means{}".format(outfile_stem, outfile_ext)
outfile_variance = "{}_variance{}".format(outfile_stem, outfile_ext)
outfile_entropy = "{}_entropy{}".format(outfile_stem, outfile_ext)
for ff in [outfile_means, outfile_variance, outfile_entropy]:
if Path(ff).exists():
raise FileExistsError("file exists: {}".format(ff))
required_shape = (256, 256, 256)
block_shape = (32, 32, 32)
img = nib.load(infile)
ndim = len(img.shape)
if ndim != 3:
raise ValueError("Input volume must have three dimensions but got {}.".format(ndim))
if img.shape != required_shape:
tmp = tempfile.NamedTemporaryFile(suffix='.nii.gz')
print("++ Conforming volume to 1mm^3 voxels and size 256x256x256.")
_conform(infile, tmp.name)
infile = tmp.name
else:
tmp = None
savedmodel_path = _models[model]
print("++ Running forward pass of model.")
predictor = _get_predictor(savedmodel_path)
outputs = predict_from_filepath(
infile,
predictor=predictor,
block_shape=block_shape,
return_variance=True,
return_entropy=True,
n_samples=n_samples,
batch_size=batch_size,
normalizer=zscore)
# Delete temporary file.
if tmp is not None:
tmp.close()
if n_samples > 1:
means, variance, entropy = outputs
else:
means, entropy = outputs
variance = None
outfile_means_orig = "{}_means_orig{}".format(outfile_stem, outfile_ext)
outfile_variance_orig = "{}_variance_orig{}".format(outfile_stem, outfile_ext)
outfile_entropy_orig = "{}_entropy_orig{}".format(outfile_stem, outfile_ext)
print("++ Saving results.")
data = np.round(means.get_fdata()).astype(np.uint8)
means = nib.Nifti1Image(data, header=means.header, affine=means.affine)
means.header.set_data_dtype(np.uint8)
nib.save(means, outfile_means)
_reslice(outfile_means, outfile_means_orig, _orig_infile, True)
if save_variance and variance is not None:
nib.save(variance, outfile_variance)
_reslice(outfile_variance, outfile_variance_orig, _orig_infile)
if save_entropy:
nib.save(entropy, outfile_entropy)
_reslice(outfile_entropy, outfile_entropy_orig, _orig_infile)
def _conform(input, output):
"""Conform volume using FreeSurfer."""
subprocess.run(['mri_convert', '--conform', input, output], check=True)
return output
def _reslice(input, output, reference, labels=False):
"""Conform volume using FreeSurfer."""
if labels:
subprocess.run(['mri_convert', '-rl', reference, '-rt', 'nearest', '-ns', '1',
input, output],
check=True)
else:
subprocess.run(['mri_convert', '-rl', reference, input, output], check=True)
return output
| [
"satra@mit.edu"
] | satra@mit.edu |
8954a181b5e8d7b31145e5c139935b9780e4d1eb | a7f442bc306d1a8366a3e30db50af0c2c90e9091 | /blockchain-env/Lib/site-packages/Cryptodome/Util/number.pyi | f9864ae390b6a99941307c3bfa0524d8bd651e30 | [] | no_license | Patreva/Python-flask-react-blockchain | cbdce3e0f55d4ba68be6ecfba35620585894bbbc | 474a9795820d8a4b5a370d400d55b52580055a2e | refs/heads/main | 2023-03-29T01:18:53.985398 | 2021-04-06T08:01:24 | 2021-04-06T08:01:24 | 318,560,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | pyi | from typing import List, Optional, Callable
def ceil_div(n: int, d: int) -> int: ...
def size (N: int) -> int: ...
def getRandomInteger(N: int, randfunc: Optional[Callable]=None) -> int: ...
def getRandomRange(a: int, b: int, randfunc: Optional[Callable]=None) -> int: ...
def getRandomNBitInteger(N: int, randfunc: Optional[Callable]=None) -> int: ...
def GCD(x: int,y: int) -> int: ...
def inverse(u: int, v: int) -> int: ...
def getPrime(N: int, randfunc: Optional[Callable]=None) -> int: ...
def getStrongPrime(N: int, e: Optional[int]=0, false_positive_prob: Optional[float]=1e-6, randfunc: Optional[Callable]=None) -> int: ...
def isPrime(N: int, false_positive_prob: Optional[float]=1e-6, randfunc: Optional[Callable]=None) -> bool: ...
def long_to_bytes(n: int, blocksize: Optional[int]=0) -> bytes: ...
def bytes_to_long(s: bytes) -> int: ...
def long2str(n: int, blocksize: Optional[int]=0) -> bytes: ...
def str2long(s: bytes) -> int: ...
sieve_base: List[int]
| [
"patrickwahome74@gmail.com"
] | patrickwahome74@gmail.com |
1e02a37c7f9aaf6c82b9607e145b16c47b81a547 | 5280cb50a8b61615a2b92474944c721b1f222aba | /main_projects/aspaceify_extents/scripts/test_aspace_split.py | dddc7a655aa138304b44e7ad6f462b6a471a3104 | [] | no_license | bentley-historical-library/bentley_code | 944125c58c0b9bceef5f424fd58a5282fea52a6f | 61bdfc5b12c088b605e25c9835bf50ab14cfbc14 | refs/heads/master | 2020-07-10T01:05:32.433263 | 2015-07-24T20:58:44 | 2015-07-24T20:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,868 | py | from __future__ import absolute_import
import unittest
from collections import namedtuple
from aspaceify_extents.scripts.make_aspace_extent_distinctions import split_into_aspace_components
class TestASpaceSplit(unittest.TestCase):
def setUp(self):
self.ASpaceExtent = namedtuple("ASpaceExtent", ["type_", "portion", "container_summary", "dimensions", "physfacet"])
def check_output_equality(self, extent, type_="", container_summary="", dimensions="", physfacet="", portion="whole", multiple=False):
target_namedtuple = self.ASpaceExtent(type_=type_, portion=portion, physfacet=physfacet, container_summary=container_summary, dimensions=dimensions)
generated_namedtuple = split_into_aspace_components(extent, multiple)
self.assertEqual(generated_namedtuple, target_namedtuple)
def test_type_only(self):
self.check_output_equality("5 volumes", type_="5 volumes")
def test_paren_placed_in_container_summary(self):
self.check_output_equality("5 linear feet (in 5 boxes)", type_="5 linear feet", container_summary="(in 5 boxes)")
def test_in_section_placed_in_container_summary(self):
self.check_output_equality("5 linear feet in 5 boxes", type_="5 linear feet", container_summary="(in 5 boxes)")
def test_dimension_placed_in_dimensions(self):
dimension_examples = [
("p, 2x4in.", "2x4in."),
("p, 2x4 in.", "2x4 in."),
("p, 2x4-5x8 cm.", "2x4-5x8 cm."),
("p, 20 x 40 cm", "20 x 40 cm"),
("p, 3-1/2x5 to 4x6-inch", "3-1/2x5 to 4x6-inch"),
("p, 79.5 x 113.8 cm. (31 x 44-3/8 inches)", "79.5 x 113.8 cm.; 31 x 44-3/8 inches")
]
for dimension_example in dimension_examples:
original_text, dimension = dimension_example
self.check_output_equality(original_text, type_="p", dimensions=dimension)
def test_complex_dimensions(self):
self.check_output_equality("228 3-1/2x5 to 4x6-inch, prints in 5 boxes",
type_="228 prints", dimensions="3-1/2x5 to 4x6-inch", container_summary="(in 5 boxes)")
def test_black_and_white_put_in_phys_facet(self):
self.check_output_equality("48 black and white 8x10-inch prints", type_="48 prints", dimensions="8x10-inch", physfacet="black and white")
def test_horrific_extent_1(self):
self.check_output_equality("26 3-1/4x4-1/4-inch, color and black-and-white; Polaroid prints",
type_="26 Polaroid prints",
dimensions="3-1/4x4-1/4-inch",
physfacet="color and black-and-white",
portion="whole")
def test_horrific_extent_2(self):
self.check_output_equality("236 3-1/2x5-1/2 and 4x6-inch, color prints",
type_="236 prints",
dimensions="3-1/2x5-1/2 and 4x6-inch",
physfacet="color",
portion="whole")
def test_in_edge_case_1(self):
self.check_output_equality("14 folders; formerly in binders", type_="14 folders", container_summary="(formerly in binders)")
def test_in_edge_case_2(self):
self.check_output_equality("(in 4 boxes)", type_="", container_summary="(in 4 boxes)")
def test_reel_special_cases(self):
self.check_output_equality("5 inch reel, 3 3/4 ips", type_="reel", physfacet="5 inch; 3 3/4 ips")
self.check_output_equality('7" reel, 3.75 ips.', type_="reel", physfacet='7"; 3.75 ips')
self.check_output_equality('1 10 1/2" reel', type_="1 reel", physfacet='10 1/2"')
self.check_output_equality("3/4-inch reel", type_="reel", physfacet="3/4-inch")
self.check_output_equality("1 sound tape reel: 7 1/2 ips; 5 inches", type_="1 sound tape reel", physfacet="7 1/2 ips; 5 inches")
self.check_output_equality("2 sound tape reels: 3 3/4 ips; 7 inches", type_="2 sound tape reels", physfacet="3 3/4 ips; 7 inches")
self.check_output_equality("5 sound tape reels (dual track): 7 1/2 ips; 7 inches", type_="5 sound tape reels", physfacet="dual track; 7 1/2 ips; 7 inches")
self.check_output_equality('2 tapes, 3-3/4 ips', type_="2 tapes", physfacet="3-3/4 ips")
self.check_output_equality("147 sound tape reels : 3 3/4 - 7 1/2 ips ; 5-10 inches", type_="147 sound tape reels", physfacet="3 3/4 - 7 1/2 ips ; 5-10 inches")
def test_rpm(self):
self.check_output_equality("33 1/3 rpm Phonograph Records", type_="Phonograph Records", physfacet="33 1/3 rpm")
self.check_output_equality("set of 4 records, 45 rpm,33 1/3 rpm", type_="set of 4 records", physfacet="45 rpm; 33 1/3 rpm")
def test_time_dimensions(self):
self.check_output_equality("50:59", type_="", dimensions="50:59")
self.check_output_equality("2:18 min.", type_="", dimensions="2:18 min.")
self.check_output_equality("ca. 15 min.", type_="", dimensions="ca. 15 min.")
self.check_output_equality("1 sound tape reel (13:08)", type_="1 sound tape reel", dimensions="13:08")
self.check_output_equality("1 sound tape reel (ca. 12 min.)", type_="1 sound tape reel", dimensions="ca. 12 min.")
self.check_output_equality("1 sound tape reel: ca. 3 min.", type_="1 sound tape reel", dimensions="ca. 3 min.")
def test_color_not_removed_when_part_of_other_words(self):
self.check_output_equality("original drawings, pencil and colored pencil on tracing paper", type_="original drawings, pencil and colored pencil on tracing paper")
def test_portion_assigns_part_correctly(self):
self.check_output_equality("1 linear foot", type_="1 linear foot", portion="part", multiple=True)
if __name__ == "__main__":
unittest.main()
| [
"wboyle13@gmail.com"
] | wboyle13@gmail.com |
f81c8d924008de759cda6f47958157a9921dd4e6 | a5c4ea16042a8078e360c32636c00e3163ac99a8 | /PractitionerBundle/practice/chap11-deepergooglenet/pyimagesearch/nn/conv/minigooglenet.py | 04f4ebdcd102bf3c334524029ba9bb8cd187c6f9 | [] | no_license | lykhahaha/Mine | 3b74571b116f72ee17721038ca4c58796610cedd | 1439e7b161a7cd612b0d6fa4403b4c8c61648060 | refs/heads/master | 2020-07-15T05:16:13.808047 | 2019-06-01T07:30:01 | 2019-06-01T07:30:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,760 | py | from keras.layers import Conv2D, BatchNormalization, Dropout, MaxPooling2D, concatenate, Dense, AveragePooling2D, Flatten
from keras.models import Input, Model
from keras.regularizers import l2
from keras import backend as K
class MiniGoogLeNet:
@staticmethod
def conv_module(x, K, k_x, k_y, stride, padding='same', chan_dim, reg=None):
x = Conv2D(K, (k_x, k_y), strides=stride, padding=padding, activation='relu', kernel_regularizer=l2(reg))(x)
x = BatchNormalization(axis=chan_dim)(x)
return x
@staticmethod
def inception_module(x, num_1x1, num_3x3, chan_dim, reg=None):
first = MiniGoogLeNet.conv_module(x, num_1x1, 1, 1, (1, 1), chan_dim=chan_dim, reg=reg)
second = MiniGoogLeNet.conv_module(x, num_3x3, 3, 3, (1, 1), chan_dim=chan_dim, reg=reg)
return concatenate([first, second], axis=chan_dim)
@staticmethod
def downsample_module(x, num_3x3, chan_dim, reg=None):
first = MiniGoogLeNet.conv_module(x, num_3x3, 3, 3, (2, 2), padding='valid', chan_dim, reg=reg)
second = MaxPooling2D((3, 3), strides=(2, 2))(x)
return concatenate([first, second], axis=chan_dim)
@staticmethod
def build(width, height, depth, classes, reg=None):
# define input shape and set channels last
input_shape = (width, height, depth)
chan_dim = -1
if K.image_data_format == 'channels_first':
input_shape = (depth, width, height)
chan_dim = 1
# define input and first convolution
inputs = Input(shape=input_shape)
x = MiniGoogLeNet.conv_module(inputs, 96, 3, 3, (1, 1), chan_dim=chan_dim, reg=reg)
# define inception - inception - downsample
x = MiniGoogLeNet.inception_module(x, 32, 32, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 32, 48, chan_dim, reg)
x = MiniGoogLeNet.downsample_module(x, 80, chan_dim, reg)
# define inception - inception - inception - inception- downsample
x = MiniGoogLeNet.inception_module(x, 112, 48, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 96, 64, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 80, 80, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 48, 96, chan_dim, reg)
x = MiniGoogLeNet.downsample_module(x, 96, chan_dim, reg)
# define inception - inception
x = MiniGoogLeNet.inception_module(x, 176, 160, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 176, 160, chan_dim, reg)
x = AveragePooling2D((7, 7))(x)
x = Dropout(0.5)(x)
# final layers
x = Flatten()(x)
x = Dense(classes, activation='softmax', kernel_regularizer=l2(reg))(x)
return Model(inputs, x) | [
"ITITIU15033@student.hcmiu.edu.vn"
] | ITITIU15033@student.hcmiu.edu.vn |
e5bf07685aa283a83e0748b3c2bb4bfe06af782e | 0e0cb55fb13e5ee6c60869a4da7812120817eb33 | /ruantong/概率分布/几何分布.py | c274c6b72d37a529e680cbb0eb23ecdd2a6fab97 | [] | no_license | xjwhhh/Probability-Theory | 7d2a89fdeb37b4112da5a52583df149ce119d389 | 1ce2e7843de9c5ed44c189992003f88c66abd13b | refs/heads/master | 2020-12-02T18:12:51.775550 | 2017-08-09T05:42:42 | 2017-08-09T05:42:42 | 96,496,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import geom as G
rv=G(0.2)#p=0.2
x=np.arange(1,11,1)
y=rv.pmf(x)
plt.bar(x,y,width=0.6,color='grey')
plt.show()
print(y) | [
"151250171@smail.nju.edu.cn"
] | 151250171@smail.nju.edu.cn |
df118c794a0cc62832e6333daf6389c9a655c01d | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/Traits/enthought/traits/ui/editors/dnd_editor.py | 22afe66e05115ed4899e7412692ac979d6dcade2 | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | #------------------------------------------------------------------------------
#
# Copyright (c) 2008, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 06/25/2006
#
#------------------------------------------------------------------------------
""" Defines the editor factory for a drag-and-drop editor. A drag-and-drop
editor represents its value as a simple image which, depending upon the
editor style, can be a drag source only, a drop target only, or both a
drag source and a drop target.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from __future__ import absolute_import
from ..ui_traits import Image
from ..editor_factory import EditorFactory
#-------------------------------------------------------------------------------
# 'ToolkitEditorFactory' class:
#-------------------------------------------------------------------------------
class ToolkitEditorFactory ( EditorFactory ):
""" Editor factory for drag-and-drop editors.
"""
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The image to use for the target:
image = Image
# The image to use when the target is disabled:
disabled_image = Image
# Define the DNDEditor class.
DNDEditor = ToolkitEditorFactory
# EOF #########################################################################
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
d0a119ae6529af7f07541b10b9c8ecb1ff6390ed | 26d802852cdfef2b57902a350b4fa17d5aa07f50 | /13_revp/solution3_revp.py | 018e307228530fa71eb403b8b57157332268547b | [
"MIT"
] | permissive | Vstrains/biofx_python | 7f5e27707f11e77f08d1516381a08a267fd81a1c | 7a2821dba36f1dae8404efbe35f44242833d6180 | refs/heads/main | 2023-03-12T09:51:57.766441 | 2021-02-25T03:22:15 | 2021-02-25T03:22:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | #!/usr/bin/env python3
""" Locating Restriction Sites """
import argparse
import sys
import operator
from typing import List, NamedTuple, TextIO
from Bio import SeqIO, Seq
from common import find_kmers
class Args(NamedTuple):
""" Command-line arguments """
file: TextIO
# --------------------------------------------------
def get_args() -> Args:
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description='Locating Restriction Sites',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
help='Input FASTA file',
metavar='FILE',
type=argparse.FileType('rt'))
args = parser.parse_args()
return Args(args.file)
# --------------------------------------------------
def main() -> None:
""" Make a jazz noise here """
args = get_args()
recs = SeqIO.parse(args.file, 'fasta')
if rec := next(recs):
for k in range(4, 13):
for pos in revp(str(rec.seq), k):
print(pos, k)
# for k, pos in [(k, p) for k in range(4, 13) for p in revp(seq, k)]:
# print(pos, k)
else:
sys.exit(f'"{args.file.name}" contains no sequences.')
# --------------------------------------------------
def revp(seq: str, k: int) -> List[int]:
""" Return positions of reverse palindromes """
kmers = find_kmers(seq, k)
revc = map(Seq.reverse_complement, kmers)
pairs = enumerate(zip(kmers, revc))
return [pos + 1 for pos, pair in pairs if operator.eq(*pair)]
# --------------------------------------------------
def test_revp() -> None:
""" Test revp """
assert revp('CGCATGCATTGA', 4) == [3, 5]
assert revp('CGCATGCATTGA', 5) == []
assert revp('CGCATGCATTGA', 6) == [2, 4]
assert revp('CGCATGCATTGA', 7) == []
assert revp('CCCGCATGCATT', 4) == [5, 7]
assert revp('CCCGCATGCATT', 5) == []
assert revp('CCCGCATGCATT', 6) == [4, 6]
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"kyclark@gmail.com"
] | kyclark@gmail.com |
64cb85a8ce03348ff0b349a19422abd9fe6515af | 1aa357433cad86c1c42eeaece7109094cdf790a6 | /12_extensions/ext/custom_ext_pygments.py | 134a984984bcae1b2676618d0b6ef706fa115976 | [
"MIT"
] | permissive | hooj0/jinja2-template-examples | 89ab4466d69ad2232922126a3f208f02b0ed3798 | 20481153a964d46e007e807fd2be9a0c42201dd0 | refs/heads/master | 2020-03-28T11:02:13.451556 | 2018-09-12T07:50:40 | 2018-09-12T07:50:40 | 148,171,173 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,559 | py | #!/usr/bin/env python3
# encoding: utf-8
# @author: hoojo
# @email: hoojo_@126.com
# @github: https://github.com/hooj0
# @create date: 2018-09-12
# @copyright by hoojo @2018
# @link http://www.bjhee.com/jinja2-extension.html
# @changelog user custom extension
# ===============================================================================
# 标题:利用 pygments 库开发 jinja2 template的扩展,做到代码高亮的效果
# ===============================================================================
# 使用:pip install pygments
# -------------------------------------------------------------------------------
# 描述:Pygments是Python提供语法高亮的工具,官网是pygments.org。
# http://pygments.org/docs/
# -------------------------------------------------------------------------------
from jinja2 import nodes
from jinja2.ext import Extension
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import guess_lexer, get_lexer_by_name
# -------------------------------------------------------------------------------
# 创建一个自定义扩展类,继承jinja2.ext.Extension
# -------------------------------------------------------------------------------
class PygmentsExtension(Extension):
# 定义该扩展的语句关键字,这里表示模板中的{% code %}语句会该扩展处理
tags = set(['code'])
def __init__(self, environment):
# 初始化父类,必须这样写
super(PygmentsExtension, self).__init__(environment)
# 在Jinja2的环境变量中添加属性,
# 这样在Flask中,就可以用app.jinja_env.pygments来访问
environment.extend(
pygments=self,
pygments_support=True
)
# 重写jinja2.ext.Extension类的parse函数
# 这是处理模板中{% code %}语句的主程序
def parse(self, parser):
# 进入此函数时,即表示{% code %}标签被找到了
# 下面的代码会获取当前{% code %}语句在模板文件中的行号
lineno = next(parser.stream).lineno
# 获取{% code %}语句中的参数,比如我们调用{% code 'python' %},
# 这里就会返回一个jinja2.nodes.Const类型的对象,值为'python'
lang_type = parser.parse_expression()
# 将参数封装为列表
args = []
if lang_type is not None:
args.append(lang_type)
# 下面的代码可以支持两个参数,参数之间用逗号分隔,不过本例中用不到
# 这里先检查当前处理流的位置是不是个逗号,是的话就再获取一个参数
# 不是的话,就在参数列表最后加个空值对象
# if parser.stream.skip_if('comma'):
# args.append(parser.parse_expression())
# else:
# args.append(nodes.Const(None))
# 解析从{% code %}标志开始,到{% endcode %}为止中间的所有语句
# 将解析完后的内容存在body里,并将当前流位置移到{% endcode %}之后
body = parser.parse_statements(['name:endcode'], drop_needle=True)
# 返回一个CallBlock类型的节点,并将其之前取得的行号设置在该节点中
# 初始化CallBlock节点时,传入我们自定义的"_pygmentize"方法的调用,
# 两个空列表,还有刚才解析后的语句内容body
return nodes.CallBlock(self.call_method('_pygmentize', args), [], [], body).set_lineno(lineno)
# 这个自定义的内部函数,包含了本扩展的主要逻辑。
# 其实上面parse()函数内容,大部分扩展都可以重用
def _pygmentize(self, lang_type, caller):
# 初始化HTML格式器
formatter = HtmlFormatter(linenos='table')
# 获取{% code %}语句中的内容
# 这里caller()对应了上面调用CallBlock()时传入的body
content = caller()
# 将模板语句中解析到了lang_type设置为我们要高亮的语言类型
# 如果这个变量不存在,则让Pygmentize猜测可能的语言类型
lexer = None
if lang_type is None:
lexer = guess_lexer(content)
else:
lexer = get_lexer_by_name(lang_type)
# 将{% code %}语句中的内容高亮,即添加各种<span>, class等标签属性
return highlight(content, lexer, formatter) | [
"hoojo@qq.com"
] | hoojo@qq.com |
adf58aff82e8028fa40481c8c506bcd1b433c7bb | 5cb7627fc47d57ba7c1fc402a3671c17625c8965 | /python/paddle_fl/mpc/examples/test_add.py | 05895b3a832912e4b5bfaa0c77eb5f68b1854ad7 | [] | no_license | NetHole/PaddleFL | c5951e32027dff714baead6e7a6f6135e1ca01a0 | e0c7192c90dda91f64167cf01e79628fd81dc981 | refs/heads/master | 2022-06-24T19:41:42.640865 | 2020-05-11T01:57:07 | 2020-05-11T01:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test mpc add op
"""
# set proper path for fluid_encrypted without install, should be first line
import env_set
import sys
import numpy as np
import paddle.fluid as fluid
import paddle_fl.mpc as pfl_mpc
role, server, port = env_set.TestOptions().values()
# call mpc add
pfl_mpc.init("aby3", int(role), "localhost", server, int(port))
data_1 = pfl_mpc.data(name='data_1', shape=[8], dtype='int64')
data_2 = pfl_mpc.data(name='data_2', shape=[8], dtype='int64')
d_1 = np.array(
[[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]]).astype('int64')
d_2 = np.array(
[[7, 6, 5, 4, 3, 2, 1, 0], [7, 6, 5, 4, 3, 2, 1, 0]]).astype('int64')
out_add = data_1 + data_2
exe = fluid.Executor(place=fluid.CPUPlace())
out_add = exe.run(feed={
'data_1': d_1,
'data_2': d_2,
}, fetch_list=[out_add])
print(out_add)
| [
"jingqinghe@baidu.com"
] | jingqinghe@baidu.com |
72a6652e0e2791748a30e32624658798735448df | 0b9e588b3d6ddf95d87a0a0f02d10ef6efcccf51 | /eduapi/api/migrations/0053_auto_20151215_1221.py | 12e1e5e1a4431fdff8d44dbe908b16b8fc5b03a4 | [] | no_license | omni360/inspiration-edu-api | b5d07a7fe3a473689d5323e60e6f88dd3d6fb4cb | 6e1bbf8d895082d4c44af4ae35b9f5aa5cc9addc | refs/heads/master | 2022-01-22T23:30:09.879433 | 2016-04-28T02:02:46 | 2016-04-28T02:02:46 | 57,559,736 | 0 | 0 | null | 2022-01-06T22:24:03 | 2016-05-01T06:35:12 | Python | UTF-8 | Python | false | false | 5,507 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
class Migration(migrations.Migration):
dependencies = [
('api', '0052_merge'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='teacher_info',
new_name='teacher_additional_resources',
),
migrations.AddField(
model_name='project',
name='ccss',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=10, choices=[(b'RL', b'Reading Literature'), (b'RI', b'Reading Informational Text'), (b'RF', b'Reading Foundational Skills'), (b'W', b'Writing'), (b'SL', b'Speaking & Listening'), (b'L', b'Language'), (b'RST', b'Reading Science & Technical Subjects'), (b'WHST', b'Writing in History, Science, & Technical Subjects'), (b'CC', b'Counting and Cardinality'), (b'OA', b'Operations & Algebraic Thinking'), (b'NBT', b'Number & Operation in Base Ten'), (b'NF', b'Number & operations-Fractions'), (b'MD', b'Measurement and Data'), (b'G', b'Geometry'), (b'RP', b'Ratios and Proportional Relationships'), (b'NS', b'Number System'), (b'EE', b'Expressions and Equations'), (b'F', b'Functions'), (b'SP', b'Statistics and Probability'), (b'MP', b'Math Practices')]), blank=True),
),
migrations.AddField(
model_name='project',
name='four_cs_collaboration',
field=models.TextField(help_text=b'4 cs collaboration', max_length=250, null=True, blank=True),
),
migrations.AddField(
model_name='project',
name='four_cs_communication',
field=models.TextField(help_text=b'4 cs communication', max_length=250, null=True, blank=True),
),
migrations.AddField(
model_name='project',
name='four_cs_creativity',
field=models.TextField(help_text=b'4 cs creativity', max_length=250, null=True, blank=True),
),
migrations.AddField(
model_name='project',
name='four_cs_critical',
field=models.TextField(help_text=b'4 cs critical', max_length=250, null=True, blank=True),
),
migrations.AddField(
model_name='project',
name='grades_range',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=10, choices=[(b'K', b'K'), (b'1', b'1'), (b'2', b'2'), (b'3', b'3'), (b'4', b'4'), (b'5', b'5'), (b'6', b'6'), (b'7', b'7'), (b'8', b'8'), (b'9', b'9'), (b'10', b'10'), (b'11', b'11'), (b'12', b'12')]), blank=True),
),
migrations.AddField(
model_name='project',
name='learning_objectives',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=25), blank=True),
),
migrations.AddField(
model_name='project',
name='ngss',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=10, choices=[(b'PS1', b'Matter and Its Interactions'), (b'PS2', b'Motion and Stability: Forces and Interactions'), (b'PS3', b'Energy'), (b'PS4', b'Waves and Their Applications in Technologies for Information Transfer'), (b'LS1', b'From Molecules to Organisms: Structures and Processes'), (b'LS2', b'Ecosystems: Interactions, Energy, and Dynamics'), (b'LS3', b'Heredity: Inheritance and Variation of Traits'), (b'LS4', b'Biological Evolution: Unity and Diversity'), (b'ESS1', b"Earth's Place in the Universe"), (b'ESS2', b"Earth's Systems"), (b'ESS3', b'Earth and Human Activity'), (b'ETS1', b'Engineering Design'), (b'ETS2', b'Links Among Engineering, Technology, Science, and Society')]), blank=True),
),
migrations.AddField(
model_name='project',
name='prerequisites',
field=models.TextField(default=b'', max_length=1000, null=True, help_text=b'Course prerequisites', blank=True),
),
migrations.AddField(
model_name='project',
name='skills_acquired',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=25), blank=True),
),
migrations.AddField(
model_name='project',
name='subject',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=25, choices=[(b'art', b'Art'), (b'drama', b'Drama'), (b'geography', b'Geography'), (b'history', b'History'), (b'language art', b'Language Arts'), (b'math', b'Math'), (b'music', b'Music'), (b'science', b'Science'), (b'social studies', b'Social Studies'), (b'technology', b'Technology')]), blank=True),
),
migrations.AddField(
model_name='project',
name='teacher_tips',
field=models.TextField(default=b'', max_length=1000, null=True, help_text=b'Tips for teachers', blank=True),
),
migrations.AddField(
model_name='project',
name='technology',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=25, choices=[(b'3d printing', b'3D Printing'), (b'electronics', b'Electronics'), (b'3d design', b'3D Design')]), blank=True),
),
]
| [
"frida.cai@autodesk.com"
] | frida.cai@autodesk.com |
fcca02f2f1adbc8f766107ecdaa0c0ff86a0d061 | d4e573e8eae32db155fe5931b3e2dcd3aa48969b | /indigo/bin/rocon_uri | 5eb4af63dbf48f015786da051418f1767653e6b9 | [] | no_license | javierdiazp/myros | ee52b0a7c972d559a1a377f8de4eb37878b8a99b | 7571febdfa881872cae6378bf7266deca7901529 | refs/heads/master | 2022-11-09T09:24:47.708988 | 2016-11-10T16:56:28 | 2016-11-10T16:56:28 | 73,733,895 | 0 | 1 | null | 2022-10-25T05:16:35 | 2016-11-14T18:19:06 | C++ | UTF-8 | Python | false | false | 5,048 | #!/usr/bin/python
#
# License: BSD
# https://raw.github.com/robotics-in-concert/tools/license/LICENSE
#
##############################################################################
# Imports
##############################################################################
import argparse
import os
import copy
import re
import sys
import rospy
import rocon_uri
import rocon_console.console as console
##############################################################################
# Methods
##############################################################################
def usage():
usage = console.green + "\nUtility for introspecting on rocon uri strings.\n\n" + console.reset
usage += console.bold + "Commands:\n" + console.reset
usage += console.cyan + "\trocon_uri parse URI\t" + console.yellow + "parse and attempt to validate a rocon URI.\n" + console.reset
usage += console.cyan + "\trocon_uri fields\t" + console.yellow + "print a full list of permitted fields in a rocon uri string.\n" + console.reset
usage += console.cyan + "\trocon_uri rules\t\t" + console.yellow + "print a full list of the ebnf rules for a rocon uri string.\n" + console.reset
usage += "\n"
return usage
def _rocon_uri_cmd_parse(rocon_uri_string):
try:
uri = rocon_uri.RoconURI(rocon_uri_string)
print("\n\t" + console.bold + "'" + rocon_uri_string + "'" + console.reset + console.green + " is a valid rocon uri\n" + console.reset)
print("\t" + console.bold + "'Concert Name' "+ console.reset + console.green + ": %s" % uri.concert_name+ console.reset)
print("\t" + console.bold + "'Hardware Platform' "+ console.reset + console.green + ": %s" % uri.hardware_platform.string + console.reset)
print("\t" + console.bold + "'Name' "+ console.reset + console.green + ": %s" % uri.name.string + console.reset)
print("\t" + console.bold + "'Application Framework' "+ console.reset + console.green + ": %s" % uri.application_framework.string + console.reset)
print("\t" + console.bold + "'Operating System' "+ console.reset + console.green + ": %s" % uri.operating_system.string + console.reset)
if uri.rapp:
print("\t" + console.bold + "'Rapp' "+ console.reset + console.green + ": %s" % uri.rapp + console.reset)
except rocon_uri.RoconURIValueError as e:
print(console.bold + "\nError" + console.reset)
print(console.red + "\tFailed to parse " + console.cyan + rocon_uri_string + console.reset)
print(console.bold + "Reason" + console.reset)
print(console.red + "\t%s\n" % str(e) + console.reset)
def _rocon_uri_cmd_fields():
print("\nA rocon uri string typically takes the form:\n")
print(console.green + "\trocon://concert_name/hardware_platform/name/application_framework/operating_system#rocon_app\n" + console.reset)
print("where permitted values for each of the fields include:\n")
yaml_rules = rocon_uri.rules.load_rules_into_dictionary()
rule_sets = {}
for yaml_rule_set in yaml_rules: # each of hardware_platform, name, application_framework, os
rule_sets.update(yaml_rule_set)
for rule_set_name, rule_set in rule_sets.iteritems():
for name, group, elements in rocon_uri.rules.walk_yaml_rules(rule_set_name, rule_set):
split_name = name.split('/')
prefix = ''
for i in range(0, 2*(len(split_name)-1)):
prefix += ' '
print(prefix + console.cyan + "+ %s" % split_name[-1] + console.reset)
for element in elements:
print(prefix + console.yellow + " - " + element + console.reset)
print("\nYou can modify or extend the list of permitted fields with a pull request at \n")
print(console.green + "\thttps://github.com/robotics-in-concert/rocon_tools/blob/indigo/rocon_uri/src/rocon_uri/rules/rules.yaml\n" + console.reset)
def _rocon_uri_cmd_rules():
print("\nThe " + console.bold + "ebnf" + console.reset + " rules used to internally parse a rocon_uri string:\n" + console.reset)
rules = rocon_uri.rules.load_ebnf_rules()
for name, rules in rules.iteritems():
print(console.cyan + " " + name + console.reset)
for rule in rules:
print(console.green + " " + rule + console.reset)
##############################################################################
# Simple Printout of Rocon URI Rules
##############################################################################
if __name__ == '__main__':
# filter out remapping arguments in case we are being invoked via roslaunch
argv = rospy.myargv(sys.argv)
command = argv[1] if len(argv) > 1 else None
if command == 'parse':
if len(argv) < 3:
print("%s" % usage())
else:
_rocon_uri_cmd_parse(argv[2])
elif command == 'fields':
_rocon_uri_cmd_fields()
elif command == 'rules':
_rocon_uri_cmd_rules()
else:
print("%s" % usage())
| [
"javier.diaz.palacios@gmail.com"
] | javier.diaz.palacios@gmail.com | |
3c1f6f4f9674dcd0e8d8e3bc8a5cfece6c2d762c | 9b722ca41671eb2cea19bac5126d0920639261bd | /.history/app_20201126190857.py | 0337e34e3eeeed73fd0d77cb0603c145682dcff3 | [] | no_license | thawalk/db_flask_server | 7928fd481f99d30bdccc60d97f02db78324cfdbe | cd55f1c9bf84c734457ee02d9f64a6833e295fad | refs/heads/master | 2023-01-25T02:40:19.097457 | 2020-12-06T07:45:50 | 2020-12-06T07:45:50 | 314,229,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,163 | py | import json
import pymongo
from flask import Flask, jsonify, url_for, request, redirect,Response,Request
import pymongo
from bson.json_util import dumps
import mysql.connector
from werkzeug.serving import run_simple
import os
from dotenv import load_dotenv
import datetime
import time
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
load_dotenv()
print(os.getenv('mongo_url'))
test_collection='test_collection'
mongo = pymongo.MongoClient('mongodb://18.209.236.31:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=false')
metadata_db = pymongo.database.Database(mongo, 'test')
metadata_col = pymongo.collection.Collection(metadata_db, 'test_collection')
userlogging_db = pymongo.database.Database(mongo,'user_analytics')
userlogging_col = pymongo.collection.Collection(userlogging_db,'logging')
metadata_db = mysql.connector.connect(
host ='54.163.143.77',
user = 'root',
password = '',
database = 'reviews',
)
cur = metadata_db.cursor()
def user_logging(userid,timestamp,req,res):
return userlogging_col.insert({"id":userid,"timestamp":timestamp,"request":req,"response":res})
@app.route('/',methods=["GET"])
def api_root():
data = {
'message': 'Welcome to our website. Where reviews are our number one priority'
}
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",200)
return response
@app.route('/categories', methods = ['GET']) #TODO: #returns list of categories
def get_categories():
categories = []
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",200)
return response
@app.route('/search', methods=['GET']) #now it only searches for TITLE. the mongo metadata does not have author
def search_book():
try:
title = request.args.get("title")
result = metadata_col.find({"title":title}).limit(10) #{ $text: { $search: title } }
result_array = dumps(list(result))
response = Response(result_array, status=200, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",200)
return response
except:
errMsg = "Please include title."
js = json.dumps(errMsg)
user_logging(123,datetime.datetime.now().isoformat(),"GET",400)
response = Response(js, status=400, mimetype='application/json')
return response
# @app.route('/review', methods=['POST'])
# def add_review():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
# txt = "INSERT INTO 'kindle_reviews' ('id', 'asin', 'overall', 'reviewText', 'reviewTime', 'reviewerID', 'reviewerName', 'summary', 'unixReviewTime') VALUES (%s)"
# values = (None, request.json['asin'], request.json['overall'], request.json['reviewText'], request.json['reviewTime'], request.json['reviewerID'], request.json['reviewerName'], request.json['summary'], request.json['unixReviewTime'])
# cur.execute(txt, values)
# return 'successfully uploaded new review', 200
@app.route('/addBook',methods= ['POST'])
def add_book():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
try:
data = request.json
title = data['title']
asin = data['asin']
description = data['description']
price = data['price']
categories = data['categories']
message = "Book added successfully"
metadata_col.insert({"title":title,"asin":asin,"description":description,"price":price,"categories":categories})
js = json.dumps(message)
response = Response(js, status=201, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"POST",201)
return response
except:
errMsg = "Please include title, asin, description, price and categories."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"POST",400)
return response
@app.route('/addReview',methods = ['POST']) #TODO: add review INTO sql part
def add_review():
try:
data = request.json
asin = data["asin"]
helpful = [0,0]
overall = data["overall"]
reviewText = data["reviewText"]
reviewTime = data["reviewTime"]
reviewerID = data["reviewerID"]
reviewerName = data["reviewerName"]
summary = data["summary"]
unixReviewTime = int(time.time())
mySQL_insert_query = f"""INSERT INTO reviews.kindle_reviews (asin, helpful, overall, reviewText, reviewTime, reviewerID, reviewerName, summary, unixReviewTime)
VALUES ("{asin}","{helpful}",{overall},"{reviewText}","{reviewTime}","{reviewerID}","{reviewerName}","{summary}","{unixReviewTime}");"""
cur.execute(mySQL_insert_query)
metadata_db.commit()
message = "Successfully uploaded review"
js = json.dumps(message)
response = Response(js, status=201, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"POST",201)
return response
except:
errMsg = "An error occurred. Please check if you have all fields."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"POST",400)
return response
@app.route('/sortByGenres', methods= ['GET']) #TODO: sort by genres from mongo metadata categories
def sort_by_genres():
pass
@app.route('/sortByRating' , methods = ['GET'])
def sort_by_ratings(): #sort by increasing ratings, decreasing rating
try:
rating_preference = request.args.get("rating_preference")
if(rating_preference == 'increasing'): #means rating 1 will come out first
mySQL_sort_query = """SELECT * FROM reviews.kindle_reviews ORDER BY overall ASC LIMIT 10;"""
else: #means rating 5 will come out first
mySQL_sort_query = """SELECT * FROM reviews.kindle_reviews ORDER BY overall DESC LIMIT 10;"""
cur.execute(mySQL_sort_query)
result_set = cur.fetchall()
r = [dict((cur.description[i][0], value) \
for i, value in enumerate(row)) for row in result_set]
js = json.dumps(r)
response = Response(js, status=200, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",200)
return response
except:
errMsg = "An error occurred. Please check if you have all fields."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",400)
return response
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000) #remember to change this part
# app.run(debug=True)
| [
"akmal_hakim_teo@hotmail.com"
] | akmal_hakim_teo@hotmail.com |
a8e2942c9f26c11e10d17a96fc317b0a47531ceb | 08e052c0c2ee4ad0cd4980fbc8a692c407118659 | /Ex. do Paca/Aula 6/P_6.8.py | 3efaea60835d67a96b62049edab2f317739b66ae | [] | no_license | miltonleal/MAC0110_Introduction_Computer_Science_IME_USP | 6fad182f7fbb3e83403080800074bf57456cb0b5 | b47936ce66e715dba79dff44779a750d795192a0 | refs/heads/master | 2023-03-04T11:06:48.092980 | 2021-02-17T14:47:46 | 2021-02-17T14:47:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | '''Dado um valor inteiro em reais (R$), determinar quantas notas de R$100, R$50, R$20, R$10, R$5, R$2 e R$1
são necessárias para compor esse valor. A solução procurada é aquela com o máximo de notas de cada tipo.'''
N = int(input("Digite o valor total em R$: "))
total_100 = N // 100
total_50 = N % 100 // 50
total_20 = N % 100 % 50 // 20
total_10 = N % 100 % 50 % 20 // 10
total_5 = N % 100 % 50 % 20 % 10 // 5
total_2 = 100 % 50 % 20 % 10 % 5 // 2
total_1 = 100 % 50 % 20 % 10 % 5 % 2 // 1
print ("Serão necessárias", total_100, "nota(s) de R$100", total_50, "nota(s) de R$50", total_20, "nota(s) de R$20", total_10, "nota(s) de R$10", total_5, "nota(s) de R$5", total_2, "nota(s) de R$2 e", total_1, "nota(s) de R$1") | [
"milton.leal@usp.br"
] | milton.leal@usp.br |
24c4ca54be9905e365f6ef7fb5630cad21cabfb9 | d1aa3a3dc4b05a82ccc6497a75d243c89ecf3c95 | /example-scripts/fitsextract.py | 0f8f9ce4c028d7366b3b399ffc04edefbf4dd60c | [
"MIT"
] | permissive | barentsen/dotastro-argparse-tutorial | d8926ddb1ed1eac943985dc414e40f09da3460a0 | d57a155af428ab1c0c2bf918f09f5381b09a3ad4 | refs/heads/master | 2020-12-24T06:36:19.129131 | 2016-06-20T13:17:23 | 2016-06-20T13:17:23 | 61,532,871 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | import argparse
from astropy.io import fits
def write_fits_extension(input_fn, extension, output_fn):
fits.open(input_fn)[extension].writeto(output_fn)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Writes a FITS extension to a new file')
parser.add_argument('filename', help='FITS filename')
parser.add_argument('extension', help='Extension number', type=int)
args = parser.parse_args()
output_fn = '{}-ext{}.fits'.format(args.filename, args.extension)
write_fits_extension(input_fn=args.filename,
extension=args.extension,
output_fn=output_fn)
| [
"geert@barentsen.be"
] | geert@barentsen.be |
7c9a871b78369da66f4ca93a6817a8b15af9723e | 67309cbca4ead3623c86647ac7bfaa067b029fdc | /BOJ/dp_greedy/12869.py | d8b6744243185aae3a4e6b6fb2eec5a373ed8816 | [] | no_license | Jeukoh/OJ | b6df132927ec15ab816fee8681952240b5a69e13 | 182b54554896d9860d5e5d09f8eccc07d99aa8e8 | refs/heads/master | 2023-08-30T19:18:10.812225 | 2021-10-15T09:57:14 | 2021-10-15T09:57:14 | 402,799,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | import math
N = int(input())
K = [0,0,0]
tmp = list(map(int,input().split()))
dp = [[[0]*61 for __ in range(61)] for _ in range(61)]
for _ in range(N):
K[_] = tmp[_]
def sol(N,K):
if N == 1:
return math.ceil(K[0]/9)
min_cnt = 500
def recur(s1,s2,s3,n):
nonlocal min_cnt
if s1 <= 0 and s2 <= 0 and s3 <= 0:
min_cnt = min(min_cnt, n)
return
s1, s2, s3 = map(lambda x: max(0,x), [s1,s2,s3])
if dp[s1][s2][s3] != 0 and dp[s1][s2][s3] <= n:
return
dp[s1][s2][s3] = n
recur(s1-9,s2-3,s3-1,n+1)
recur(s1 - 9, s2 - 1, s3 - 3, n + 1)
recur(s1 - 3, s2 - 9, s3 - 1, n + 1)
recur(s1 - 1, s2 - 9, s3 - 3, n + 1)
recur(s1 - 1, s2 - 3, s3 - 9, n + 1)
recur(s1 - 3, s2 - 1, s3 - 9, n + 1)
recur(*K,0)
return min_cnt
print(sol(N,K))
| [
"jeukoh@gmail.com"
] | jeukoh@gmail.com |
6ca34e5f4844fa6cb43b8dd01c856368d7d5d6f7 | c4939f03996ba18b678813ba7c65f519a6532051 | /home/migrations/0003_auto_20200608_1229.py | 0e33b523bd15faee5f21ad9b8f00623ee9b88ac0 | [] | no_license | crowdbotics-apps/tony-template-1-dev-5633 | 030f99cfe366bcbde9506b3679dd104d052f2bcf | e127168ad7a08be5805e9da73530f1ddd8c6ad0e | refs/heads/master | 2022-10-04T19:07:46.565100 | 2020-06-08T12:29:10 | 2020-06-08T12:29:10 | 270,660,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | # Generated by Django 2.2.13 on 2020-06-08 12:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0002_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Testing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Testtt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('testt', models.BinaryField()),
],
),
migrations.AddField(
model_name='customtext',
name='emp',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customtext_emp', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='customtext',
name='name',
field=models.BinaryField(blank=True, null=True),
),
migrations.AddField(
model_name='customtext',
name='subpage',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customtext_subpage', to='home.CustomText'),
),
migrations.AddField(
model_name='customtext',
name='test',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customtext_test', to=settings.AUTH_USER_MODEL),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
184cce916b7a1eff4b9fb73c933cb7878194682c | 726a548766a9db586806ef540dcf8ea4d0a82a60 | /Python3/examples/bin/loop_for.py | f097bfd41a67b0094776eed6b5cbc71de54d37ab | [] | no_license | ArseniD/learn_python | 6fd735a594ff83ea97888d6688e474e94182ea74 | d73fc790514f50a2f61c5cc198073299b0c71277 | refs/heads/master | 2022-05-28T04:53:54.603475 | 2019-08-27T10:15:29 | 2019-08-27T10:15:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | #!/usr/bin/env python3.6
colors = ['blue', 'green', 'red', 'purple']
for color in colors:
if color == 'blue':
continue
elif color == 'red':
break
print(color)
point = (2.1, 3.2, 7.6)
for value in point:
print(value)
ages = {'kevin': 59, 'bob': 40, 'kayla': 21}
for key in ages:
print(key)
for letter in "my_string":
print(letter)
list_of_points = [(1, 2), (2, 3), (3, 4)]
for x, y in list_of_points:
print(f"x: {x}, y: {y}")
for name, age in ages.items():
print(f"Person Named: {name}")
print(f"Age of: {age}")
| [
"arsenidudko@mail.ru"
] | arsenidudko@mail.ru |
39aadb1cb4d1a708dd82fba9aa3b7584f07754e0 | 122f9bf0d996c104f541453ab35c56f6ff3fc7cd | /z수업용문제/JunminLim/2445_별찍기8.py | ef4a526df4d72399eb817e24a5b6ee5a8395f8bd | [] | no_license | JannaKim/PS | 1302e9b6bc529d582ecc7d7fe4f249a52311ff30 | b9c3ce6a7a47afeaa0c62d952b5936d407da129b | refs/heads/master | 2023-08-10T17:49:00.925460 | 2021-09-13T02:21:34 | 2021-09-13T02:21:34 | 312,822,458 | 0 | 0 | null | 2021-04-23T15:31:11 | 2020-11-14T13:27:34 | Python | UTF-8 | Python | false | false | 410 | py | n=int(input())
manh=n-1
a=[]
m=2*n-1
p=2*n
paper=[[' ']*p for _ in range (m)]
'''
for i in range (len(paper)):
print(''.join(paper[i]))
'''
y1, x1 = n-1, 0
y2, x2= n-1, 2*n
for i in range (m):
for z in range (p):
if abs(x1-z)+abs(y1-i)<n:
paper[i][z]='*'
elif abs(x2-z)+abs(y2-i)<n+1:
paper[i][z]='*'
for i in range (len(paper)):
print(''.join(paper[i]))
| [
"baradamoh@gmail.com"
] | baradamoh@gmail.com |
d467c2620d84b57c8dbb8f3d38b8aa65aa49a062 | 3d88748960deb31c674525df2bd9d79ba1d2db1a | /pythonlib/bin/mcmcint | bfd0d1f647224043bd373dc5c1c16f030af8f49f | [
"BSD-2-Clause"
] | permissive | johnkerl/scripts-math | 1a0eb6ce86fd09d593c82540638252af5036c535 | cb29e52fec10dd00b33c3a697dec0267a87ab8bb | refs/heads/main | 2022-01-31T17:46:05.002494 | 2022-01-17T20:40:31 | 2022-01-17T20:40:31 | 13,338,494 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 410 | #!/usr/bin/python -Wall
from __future__ import division
import random
# ----------------------------------------------------------------
def f(x):
return 1 / (1 + x**2)
# ----------------------------------------------------------------
N = 100000
x0 = 1000.0
b = 10
x = x0
for i in range(0, N):
print x
y = random.normalvariate(x, b)
p = f(y) / f(x)
u = random.uniform(0.0, 1.0)
if (p > u):
x = y
| [
"kerl.john.r@gmail.com"
] | kerl.john.r@gmail.com | |
7867d807e90f24148853f20bc485ff8b66158e3a | 01f7ed47677805e3dcf39b75c657ebdfdf1e89a5 | /scripts/test_cartpole.py | f6802b2fe76a487f2d899151cb8276f40f5d3eb9 | [] | no_license | aoyan27/openai_gym_tutorial | 3d59a1080be8925cc7242128066dff4e4fcfb895 | 4015b4b9b3c2b38948909e4d20e37dca67e6ed19 | refs/heads/master | 2021-09-08T01:21:38.534371 | 2018-03-05T03:23:57 | 2018-03-05T03:23:57 | 98,200,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,702 | py | #!/usr/bin/env python
#coding:utf-8
#################################################
#
# Tutorial-1('Running an environment' section)
#
#################################################
# import gym
# env = gym.make('CartPole-v0')
# env.reset()
# for _ in range(1000):
# env.render()
# observation, reward, done, info = env.step(env.action_space.sample())
# print "observation : ", observation, " reward : ", reward, " done : ", done, " info : ", info
# """
# observation : 行動した後の状態(next_state)
# reward : 報酬
# done : 'env'において定義されたしきい値に応じて、そのエピソードを打ち切るためのフラグ
# info : 学習を行う際の詳細情報を記載できる(自分の自由に?公式に提供されている環境では何も入っていない)
# """
# # print "env.observation_space : ", env.observation_space
# # print "env.action_space. : ", env.observation_space
#################################################
#
# Tutorial-2('Observations' section)
#
#################################################
# import gym
# env = gym.make('CartPole-v0')
# for i_epidode in range(20):
# observation = env.reset()
# for t in range(100):
# env.render()
# print (observation)
# action = env.action_space.sample()
# observation, reward, done, info = env.step(action)
# if done:
# print ("Episode finished after {} timesteps.".format(t+1))
# break
#################################################
#
# Tutorial-3('Spaces' section)
#
#################################################
# import gym
# env = gym.make('CartPole-v0')
# print (env.action_space)
# print (env.observation_space)
# print (env.observation_space.high)
# print (env.observation_space.low)
# """
# spaceというパッケージがある、このパッケージにあるモジュールを使って、OpenAI Gymは状態や行動の定義を表している。
# Discrete : 0からn-1までの負の数を含まない範囲の値を表す。
# 使い方は、
# >> from gym import spaces
# >> a = spaces.Discrete(10) #0~9の範囲の値を考慮する
# >> a.sample() #0~10の中からランダムに値を選択する
# >> a.contains(5) #引数が設定した範囲内にあるかTrue or Falseで返してくる
# Boxes : n次元のboxをあつかう。
# 例えば、
# 1. Box(-1.0, 1.0, (3,4)) # low and high are scalars, and shape is provided
# 2. Box(np.array([-1.0,-2.0]), np.array([2.0,4.0])) # low and high are arrays of the same shape
# のような使い方においては、
# 1. 3x4次元の配列が確保され、それぞれの要素の最小値、最大値が第一引数、第二引数となっている
# 2. 2x2次元の配列が確保され、それぞれの要素の最大値、最小値は引数である配列の値となっている
# --->envでは、'observation_space'や'action_space'として利用されており、
# 状態や行動の型(何次元配列で定義されているか)や、その値の上限、下限値を知ることができる
# """
#################################################
#
# Tutorial-4('Environments' section)
#
#################################################
# import gym
# print (gym.envs.registry.all())
# """
# gym.envs.registry.all()で登録されている環境のリストを確認できる。
# 大元のgym/gym/envs/の中にある__init__.pyの中でgym.envsがimportされた時の初期化処理として、
# 'registration.py'におけるregistor関数を利用して、環境のidや報酬のしきい値、最大episode数、などを登録している。
# そうすると、'env = gym.make('自分で登録した環境の名前')'で環境を利用できるようになる。
# """
#################################################
#
# Tutorial-5(Recoding results'' section)
#
#################################################
import gym
from gym import wrappers
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, '/tmp/cartpole-experiment-1') #上の行の環境を定義している変数envをwrappr.Monitorクラスで上書きしているイメージ
for i_episode in range(20):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps.".format(t+1))
break
| [
"ce62001@meiji.ac.jp"
] | ce62001@meiji.ac.jp |
5c49d4ef1e8ec65c666469c3d1f5055e5808aa1e | 63327e8feb2166ee49d92e874655fa8244cf4d4a | /astropy/wcs/wcsapi/conftest.py | bf89f8cb5ced72d84d11004ac7817ea5f11ffba3 | [
"BSD-3-Clause"
] | permissive | bmerry/astropy | 29c317f806138fd830f1b10fd5a33c98f09be6a0 | 437ef257f317c099613bbac78def59f7b011fa09 | refs/heads/master | 2023-02-14T15:35:38.246389 | 2020-06-24T12:09:21 | 2020-06-24T12:09:21 | 275,123,787 | 0 | 0 | BSD-3-Clause | 2020-06-26T09:53:11 | 2020-06-26T09:53:11 | null | UTF-8 | Python | false | false | 3,958 | py | import pytest
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.units import Quantity
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseLowLevelWCS
@pytest.fixture
def spectral_1d_fitswcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = 'FREQ',
wcs.wcs.cunit = 'Hz',
wcs.wcs.cdelt = 3.e9,
wcs.wcs.crval = 4.e9,
wcs.wcs.crpix = 11.,
wcs.wcs.cname = 'Frequency',
return wcs
@pytest.fixture
def time_1d_fitswcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = 'TIME',
wcs.wcs.mjdref = (30042, 0)
wcs.wcs.crval = 3.,
wcs.wcs.crpix = 11.,
wcs.wcs.cname = 'Time',
wcs.wcs.cunit = 's'
return wcs
@pytest.fixture
def celestial_2d_fitswcs():
wcs = WCS(naxis=2)
wcs.wcs.ctype = 'RA---CAR', 'DEC--CAR'
wcs.wcs.cunit = 'deg', 'deg'
wcs.wcs.cdelt = -2., 2.
wcs.wcs.crval = 4., 0.
wcs.wcs.crpix = 6., 7.
wcs.wcs.cname = 'Right Ascension', 'Declination'
wcs.pixel_shape = (6, 7)
wcs.pixel_bounds = [(-1, 5), (1, 7)]
return wcs
@pytest.fixture
def spectral_cube_3d_fitswcs():
wcs = WCS(naxis=3)
wcs.wcs.ctype = 'RA---CAR', 'DEC--CAR', 'FREQ'
wcs.wcs.cunit = 'deg', 'deg', 'Hz'
wcs.wcs.cdelt = -2., 2., 3.e9
wcs.wcs.crval = 4., 0., 4.e9
wcs.wcs.crpix = 6., 7., 11.
wcs.wcs.cname = 'Right Ascension', 'Declination', 'Frequency'
wcs.pixel_shape = (6, 7, 3)
wcs.pixel_bounds = [(-1, 5), (1, 7), (1, 2.5)]
return wcs
class Spectral1DLowLevelWCS(BaseLowLevelWCS):
@property
def pixel_n_dim(self):
return 1
@property
def world_n_dim(self):
return 1
@property
def world_axis_physical_types(self):
return 'em.freq',
@property
def world_axis_units(self):
return 'Hz',
@property
def world_axis_names(self):
return 'Frequency',
_pixel_shape = None
@property
def pixel_shape(self):
return self._pixel_shape
@pixel_shape.setter
def pixel_shape(self, value):
self._pixel_shape = value
_pixel_bounds = None
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
self._pixel_bounds = value
def pixel_to_world_values(self, pixel_array):
return np.asarray(pixel_array - 10) * 3e9 + 4e9
def world_to_pixel_values(self, world_array):
return np.asarray(world_array - 4e9) / 3e9 + 10
@property
def world_axis_object_components(self):
return ('test', 0, 'value'),
@property
def world_axis_object_classes(self):
return {'test': (Quantity, (), {'unit': 'Hz'})}
@pytest.fixture
def spectral_1d_ape14_wcs():
return Spectral1DLowLevelWCS()
class Celestial2DLowLevelWCS(BaseLowLevelWCS):
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return 'pos.eq.ra', 'pos.eq.dec'
@property
def world_axis_units(self):
return 'deg', 'deg'
@property
def world_axis_names(self):
return 'Right Ascension', 'Declination'
@property
def pixel_shape(self):
return (6, 7)
@property
def pixel_bounds(self):
return (-1, 5), (1, 7)
def pixel_to_world_values(self, px, py):
return (-(np.asarray(px) - 5.) * 2 + 4.,
(np.asarray(py) - 6.) * 2)
def world_to_pixel_values(self, wx, wy):
return (-(np.asarray(wx) - 4.) / 2 + 5.,
np.asarray(wy) / 2 + 6.)
@property
def world_axis_object_components(self):
return [('test', 0, 'spherical.lon.degree'),
('test', 1, 'spherical.lat.degree')]
@property
def world_axis_object_classes(self):
return {'test': (SkyCoord, (), {'unit': 'deg'})}
@pytest.fixture
def celestial_2d_ape14_wcs():
return Celestial2DLowLevelWCS()
| [
"stuart@cadair.com"
] | stuart@cadair.com |
1b0c462d03f9dd859f73c4ff724cf7035687f484 | 50bec959bbddcb255efefeb7477c5c0193e74301 | /workflowengine/workflowserializers/UserFlowSerializer.py | eea7302dd2c39ad02ee11d66e1ded5efc4d1e84d | [] | no_license | rupin/WorkflowEngine | bd7805f04c688967a92dfc5b5341b275ad798338 | bf1c250ba69ac3b1cef4a5f2c4fe0ff46660db68 | refs/heads/master | 2022-04-26T10:43:09.868851 | 2020-05-24T10:10:42 | 2020-05-24T10:10:42 | 202,849,528 | 6 | 2 | null | 2022-04-22T23:16:38 | 2019-08-17T07:22:28 | Python | UTF-8 | Python | false | false | 492 | py | from workflowengine.models.UserFlowModel import UserFlow
from rest_framework import serializers
from workflowengine.workflowserializers.FlowSerializer import FlowSerializer
from workflowengine.workflowserializers.CustomUserSerializer import CustomUserSerializer
from django.conf import settings
class UserFlowSerializer(serializers.ModelSerializer):
created_at = serializers.DateTimeField(format=settings.SITE_DATE_FORMAT, read_only=True)
class Meta:
model = UserFlow
fields = "__all__" | [
"rupin.chheda@gmail.com"
] | rupin.chheda@gmail.com |
f73af99cb6cc9e31bde95659d290c53144101545 | aadad415f425b9f45fed14290235488a46687a4f | /2011/avanzato/trunk/relazione-labo-mmm/calibrazione/potenziometro_temperatura.py | 89845915aeb14f920354c218967b218515af5abe | [] | no_license | enucatl-university/lab-unipd | c1fdae198ccc3af3f75ad07554e148427a9cc096 | c197bb92f479913c1183375fa22fd1619e6bbad4 | refs/heads/master | 2023-08-15T01:59:55.502505 | 2016-11-11T19:20:13 | 2016-11-11T19:20:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | #!/usr/bin/env python
#coding=utf-8
from __future__ import division, print_function
import sys
from math import sqrt
from potenziometro_resistenza import PotenziometroResistenza
from resistenza_temperatura import ResistenzaTemperatura
class PotenziometroTemperatura:
def __init__(self, fit_file, tabella_pt, p):
self.c = PotenziometroResistenza(fit_file, p)
self.t = ResistenzaTemperatura(self.c.R, self.c.sigma_R, tabella_pt)
self.T = self.t.T
self.sigma_T = self.t.sigma_T
def __str__(self):
return "{0:.2f} \pm {1:.2f}".format(self.T, self.sigma_T)
if __name__ == "__main__":
fit_file = sys.argv[1]
p = float(sys.argv[2])
try:
file_tabella = sys.argv[3]
except IndexError:
file_tabella = "tabellaPT"
PT = PotenziometroTemperatura(fit_file, file_tabella, p)
print(PT)
| [
"gmatteo..abis@gmail.com"
] | gmatteo..abis@gmail.com |
25879cfcf48bc23fda52ebd265166f73220dd9d7 | 3432efd194137e1d0cb05656eb547c9992229f02 | /笔记/201711/星爷/day06/01test.py | 7be1476cd30d75ce3fded67c775a8ec18be7a855 | [] | no_license | zhanganxia/other_code | 31747d7689ae1e91fcf3f9f758df130246e7d495 | 8d09d9d0b6d6a1a9b8755487f926ac6fafd761fa | refs/heads/master | 2021-09-04T02:22:38.632685 | 2018-01-14T15:37:14 | 2018-01-14T15:37:14 | 107,007,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | def line_conf(a,b):
def line(x):
return a*x + b
return line
line1 = line_conf(1,1)
line2 = line_conf(4,5)
print(line1(5))
print(line2(5))
| [
"kk@kk.rhel.cc"
] | kk@kk.rhel.cc |
f8af275a225c509612e09d977869c62d2deda5be | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/benchmarks/prime-86.py | 579635d38e5b51be6d926171616f072a4a327384 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + $Exp
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
# Run [1, n]
i:int = 1
# Crunch
while i <= n:
print(get_prime(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.