blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73ff13809669834f4c7b58d502e2a2ec8d0f8a55 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_271/ch153_2020_04_13_20_28_57_339505.py | bc782a307772aadce3e74cda6678a4513b48cff7 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | def agrupa_por_idade(dicionario1):
dicionario2={}
for nome, idade in dicionario1.items():
if idade<=11:
dicionario2['criança']='{0}, {1}'.format(dicionario2['criança'], nome)
if 12<=idade<=17:
dicionario2['adolescente']='{0}, {1}'.format(dicionario2['adolescente'], nome)
if 18<=idade<=59:
dicionario2['adulto']='{0}, {1}'.format(dicionario2['adulto'], nome)
else:
dicionario2['idoso']='{0}, {1}'.format(dicionario2['idoso'], nome)
return dicionario2
| [
"you@example.com"
] | you@example.com |
caf5b92bfa055fb5a5ae7e31b1e15c75f38f4e95 | 946d72e6b44e5fdad5b10a9cbca40260d3202413 | /old/titles.py | a16f19c9755a0039d21f53755b06b58c9cd7eb10 | [] | no_license | pudo/wahlprogramme | 323527b7271a5a2af53530a8b2e2357b3bf1144e | 2cf794f10001d183678c3cc1a39b73f4c87035c3 | refs/heads/master | 2020-06-06T04:56:09.890984 | 2013-08-09T14:58:39 | 2013-08-09T14:58:39 | 11,253,695 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | from common import PARTIES, load_doc, nomenklatura
def all_titles():
for party in PARTIES:
doc = load_doc(party)
for i, h in enumerate(doc.findall('.//*')):
if not h.tag in ['h1', 'h2']:
continue
#titles.upsert({
# 'party': party,
# 'index': i,
# 'element': h.tag,
# 'text': h.text
#}, ['party', 'text'])
print [party, h.tag, h.text]
fp = '[%s:%s] %s' % (party, h.tag, h.text)
try:
entity = nomenklatura.lookup(fp)
print [h.text, entity.name]
except Exception, e:
print e
if __name__ == '__main__':
all_titles()
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
9975c0b7a9b56900ff37076e86ba832eeb79c265 | 9477ff0926416001b7c801ff36fbc8e74009e3ae | /excel-reformat/excelhandler/drmamma/migrations/0015_auto_20201029_1314.py | 28260e6b99d3124c802f72495b3e9c0dcc7121cd | [] | no_license | Tedhoon/business-automation | ab0cbcc405f646bcce7cdb4904073f1b0cd7ca9d | 17247fe8979060db3c00d3ff6a2ff2918a4c7ea5 | refs/heads/master | 2023-03-25T13:48:22.755946 | 2021-03-23T08:43:45 | 2021-03-23T08:43:45 | 285,768,349 | 0 | 0 | null | 2021-03-23T08:43:46 | 2020-08-07T07:37:09 | Python | UTF-8 | Python | false | false | 3,137 | py | # Generated by Django 3.0.8 on 2020-10-29 04:14
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('drmamma', '0014_auto_20201029_1115'),
]
operations = [
migrations.CreateModel(
name='NaverFarmTemp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('store_code', models.TextField(blank=True, null=True, verbose_name='발주처 코드')),
('order_pk', models.TextField(blank=True, null=True, verbose_name='주문번호')),
('product_num', models.TextField(blank=True, null=True, verbose_name='품목번호')),
('product_order_num', models.TextField(blank=True, null=True, verbose_name='품목별 주문번호')),
('receiver', models.TextField(blank=True, null=True, verbose_name='수령인')),
('address', models.TextField(blank=True, null=True, verbose_name='수령인 주소(전체)')),
('post_num', models.TextField(blank=True, null=True, verbose_name='수령인 우편번호')),
('phone_num', models.TextField(blank=True, null=True, verbose_name='수령인 휴대전화')),
('phone_num2', models.TextField(blank=True, null=True, verbose_name='수령인 전화번호')),
('message', models.TextField(blank=True, null=True, verbose_name='배송메시지')),
('product_name', models.TextField(blank=True, null=True, verbose_name='주문상품명(옵션포함)')),
('product_code', models.TextField(blank=True, null=True, verbose_name='자체품목코드')),
('amount', models.TextField(blank=True, null=True, verbose_name='수량')),
('price', models.TextField(blank=True, null=True, verbose_name='상품구매금액(KRW)')),
('discount', models.TextField(blank=True, null=True, verbose_name='상품별 추가할인금액')),
('total_price', models.TextField(blank=True, null=True, verbose_name='총 주문 금액')),
],
options={
'verbose_name': '네이버 스토어팜',
'verbose_name_plural': '네이버 스토어팜',
},
),
migrations.DeleteModel(
name='Cafe24',
),
migrations.AlterModelOptions(
name='cafe24temp',
options={'verbose_name': 'Cafe24', 'verbose_name_plural': 'Cafe24'},
),
migrations.AlterField(
model_name='deliveryexcel',
name='uploaded_at',
field=models.DateField(default=datetime.datetime(2020, 10, 29, 13, 14, 48, 524036), verbose_name='업로드 날짜'),
),
migrations.AddField(
model_name='naverfarmtemp',
name='made_by_source',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='drmamma.DeliveryExcel', verbose_name='연관 엑셀'),
),
]
| [
"gt0305@likelion.org"
] | gt0305@likelion.org |
c68dc8b29ed0c767a60726b18d7726f906aed1b2 | 878a3094430bb914717d641a4f4b06574e872518 | /hm_03_面向对象/hm_11_上架管理4.py | 4e2cbca0ab8b874e2983fc1dc9d370b7edfc341a | [] | no_license | 2020668/python2019 | 3f33eea85fdd3f2866d867859d5694abb71effe9 | f8a98389fa09f95e72914afa4935afc5c68eaccd | refs/heads/master | 2020-06-07T23:36:17.871376 | 2019-08-29T09:45:10 | 2019-08-29T09:45:10 | 193,116,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py |
# 空库位
bcdefgh_list = ["1B022","1B023","1B013","1B033","1B043","1B053","1B063","1B073","1B083"]
mnpq_list = ["1M053","1Q023"]
# sku基础数据
class SkuData():
def __init__(self,sku,layer_num,layer,box_num,length,width,heigth,weigth):
self.sku = sku
self.layer_num = layer_num
self.layer = layer
self.box_num = box_num
self.length = length
self.width = width
self.heigth = heigth
self.weigth = weigth
# 维护基础数据
sku11059905 = SkuData("11059905",10,6,16,36,28,21,15.5)
sku11039131 = SkuData("11039131",9,3,1,23.5,18,25,5.5)
# 主程序
class Sku(object):
def __init__(self,sku,total):
self.sku = sku
if self.sku == "11059905":
total_heigth = total/sku11059905.box_num/sku11059905.layer_num*sku11059905.heigth
t_num = sku11059905.layer_num*sku11059905.layer*sku11059905.box_num
t_height = sku11059905.layer*sku11059905.heigth
if self.sku == "11039131":
total_heigth = total/sku11039131.box_num/sku11039131.layer_num*sku11039131.heigth
t_num = sku11039131.layer_num*sku11039131.layer*sku11039131.box_num
t_height = sku11039131.layer*sku11039131.heigth
if total_heigth > 80:
for i in range(len(bcdefgh_list)-1,-1,-1):
print("数量%d,请入库位%s"% (t_num,bcdefgh_list[i]))
bcdefgh_list.pop(i)
# print(bcdefgh_list)
total_heigth -= t_height
total -= t_num
if total_heigth <= 80:
for r in range(len(mnpq_list)-1,-1,-1):
print("数量%d,请入库位%s" % (total, mnpq_list[r]))
mnpq_list.pop(r)
return
else:
for r in range(len(mnpq_list)-1,-1,-1):
print("数量%d,请入库位%s"% (total,mnpq_list[r]))
mnpq_list.pop(r)
return
input_sku = input("请输入SKU:")
input_total = int(input("请输入数量:"))
Sku = Sku(input_sku,input_total)
| [
"keen2020@outlook.com"
] | keen2020@outlook.com |
f0c5e3397709ca4ce121c0f482289221424aac74 | 9bb83bf5f6c2b5d2da4dda711591ef9987490c66 | /3DdetectionPrototype/Yolo-Pytorch-nms-updated/dataset/__init__.py | d919b198f149734f06916b220e93721264ed2db8 | [] | no_license | nudlesoup/DeepLearning | cb5b7039a9de6098194b56143d1a72a564fed1c9 | 336e415b0353d6e18d106f894a97d8873a55e544 | refs/heads/master | 2021-06-25T00:50:40.339768 | 2020-12-23T02:38:58 | 2020-12-23T02:38:58 | 172,002,661 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | from .coco import get_dataset as get_COCO
from .pascal import get_dataset as get_VOC
from dataset.augment.bbox import bbox_flip
from dataset.augment.image import makeImgPyramids
import os
import torch
from torch.utils.data import DataLoader
import numpy as np
def get_imgdir(dataset_root, batch_size, net_size):
from torchvision.transforms import transforms
from PIL import Image
class dataset:
def __init__(self, root, transform):
self.imglist = os.listdir(root)
self.root = root
self.transform = transform
def __len__(self):
return len(self.imglist)
def __getitem__(self, item):
path=os.path.join(self.root,self.imglist[item])
img=Image.open(path)
ori_shape=np.array(img.size)
img=self.transform(img)
return path,img,torch.from_numpy(ori_shape.astype(np.float32))
transform=transforms.Compose([
transforms.Resize((net_size,net_size)),
transforms.ToTensor(),
transforms.Normalize(mean=(0,0,0),std=(1,1,1))
])
dataloader=DataLoader(dataset=dataset(dataset_root,transform),shuffle=False,batch_size=batch_size)
return dataloader | [
"ameyad1995@gmail.com"
] | ameyad1995@gmail.com |
d9d22ee6b4dc770f86c458638636032fe0fcc083 | ffc563a34204ee65c5a518de07c78310c668c316 | /opennem/monitors/aemo_intervals.py | 221c76b1d35e35ce8cbd489b7544c2ef495630db | [
"MIT"
] | permissive | MarnieShaw/opennem | 45924ac132d199751958eade224684d867118145 | 062178a9e64764e2bd89352b223280c8eeff60e4 | refs/heads/master | 2023-03-29T07:04:02.435282 | 2021-03-25T12:57:08 | 2021-03-25T12:57:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | import logging
from datetime import datetime, timedelta
from opennem.monitors.aemo_wem_live_intervals import (
get_aemo_wem_live_facility_intervals_recent_date,
)
from opennem.notifications.slack import slack_message
from opennem.schema.network import NetworkWEM
from opennem.settings import settings
from opennem.utils.dates import chop_microseconds
logger = logging.getLogger("opennem.monitors.aemo")
def aemo_wem_live_interval() -> bool:
"""
Monitors the delay from the AEMO live scada data on the portal
"""
network = NetworkWEM
now_date = datetime.now().astimezone(network.get_timezone())
live_most_recent = get_aemo_wem_live_facility_intervals_recent_date()
live_delta = chop_microseconds(now_date - live_most_recent)
logger.debug(
"Live time: {}, delay: {}".format(live_most_recent, live_delta)
)
# @TODO move the minutes into settings
if live_delta > timedelta(minutes=90):
slack_message(
"*WARNING*: AEMO Live intervals for WEM on {} curently delayed by {}\n\nAEMO feed most recent: {}".format(
settings.env, live_delta, live_most_recent
)
)
return True
return False
if __name__ == "__main__":
delay = aemo_wem_live_interval()
| [
"git@nikcub.me"
] | git@nikcub.me |
6402742e91d560e57aa48ce294e2c9c7563695af | 39f8535e6b4aaf313278d65c2561c57db1425a44 | /web/register/migrations/0010_registeredperson_borough_gss.py | 97bbba8cf8f0192ef65af1880694fbc437324756 | [] | no_license | DemocracyClub/TakePart.london | 39c61f58740400597a24bd525eff78939a3f30ed | 816d427c37ad4f485aa392ff1d376f0f2681746a | refs/heads/master | 2022-02-11T15:01:04.782754 | 2017-11-09T15:10:25 | 2017-11-09T15:10:25 | 60,079,282 | 0 | 2 | null | 2022-01-21T19:27:25 | 2016-05-31T10:12:47 | Python | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-26 12:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0009_auto_20160526_1010'),
]
operations = [
migrations.AddField(
model_name='registeredperson',
name='borough_gss',
field=models.CharField(blank=True, db_index=True, max_length=100),
),
]
| [
"sym.roe@talusdesign.co.uk"
] | sym.roe@talusdesign.co.uk |
b5152f8763fc21f32e7530a4d1b55be7ae98c02a | faabe34af6297530617395bcc6811350765da847 | /platforms/leetcode/PalindromeLinkedList.py | bb7cf2be3376bd29d8d2a59cce309f2d4124781c | [] | no_license | pqnguyen/CompetitiveProgramming | 44a542aea299bd553dd022a9e737e087285b8b6d | 27330e7ff79c4ac883d7e1fcdf2f0d30939c3f78 | refs/heads/master | 2021-07-21T12:15:47.366599 | 2021-06-27T14:58:48 | 2021-06-27T14:58:48 | 132,837,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | # https://leetcode.com/explore/learn/card/linked-list/219/classic-problems/1209/
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
if not head: return True
mid = self.findMid(head)
lastNode = self.reverse(mid)
while lastNode:
if head.val != lastNode.val: return False
lastNode = lastNode.next
head = head.next
return True
def reverse(self, head):
if not head: return None
prev, next = head, head.next
while next:
tmp = next.next
next.next = prev
prev = next
next = tmp
head.next = None
return prev
def findMid(self, head):
slow = fast = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast:
tmp = slow.next
slow.next = None
slow = tmp
return slow
| [
"pqnguyen1996@gmail.com"
] | pqnguyen1996@gmail.com |
ecadf6700b1b850bb101d6a8ca65912bbfd57019 | 49e87fd199287ea7234c9bbfb3ec40447b9ed3d4 | /app/main/forms.py | cdf55969ee17ccd31d37bf6e400ca91820058ac5 | [
"MIT"
] | permissive | mornicamwende/Pitching-site | ae732d3d9ba38f1878b49113a6adc81769d2c1b6 | 04336aa97f2a1c806ad6668f104ff64d27c8d995 | refs/heads/master | 2022-12-20T10:48:11.003418 | 2020-09-25T04:59:35 | 2020-09-25T04:59:35 | 296,598,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py |
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField, RadioField,SelectField
from wtforms.validators import Required
class CommentsForm(FlaskForm):
comment = TextAreaField('Comment', validators=[Required()])
# vote=RadioField('default field arguments', choices=[('1', 'UpVote'), ('1', 'DownVote')])
submit = SubmitField('SUBMIT')
class PitchForm(FlaskForm):
category_id = SelectField('Select Category', choices=[('1', 'Interview'), ('2', 'Pick Up Lines'), ('3', 'Promotion'),('4','Product')])
content = TextAreaField('make a pitch', validators=[Required()])
submit = SubmitField('Create Pitch')
class UpvoteForm(FlaskForm):
'''
Class to create a wtf form for upvoting a pitch
'''
submit = SubmitField('Upvote')
class DownvoteForm(FlaskForm):
'''
Class to create a wtf form for downvoting a pitch
'''
submit = SubmitField('Downvote')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit') | [
"mornicamwende@gmail.com"
] | mornicamwende@gmail.com |
539ae6e0ecf887e6d1d29a89a317d920bcd8899a | c6f946097032432c787a4e3f2b1c7839a5966bfa | /problem 0120.py | 0e0fc987f8d8b2772162ed041c676b86767c43ef | [] | no_license | a100kpm/daily_training | 70a3bfdc1a773025bc03dad64310f7ad9f58eb22 | dc80f1708cba6f46a51d2e385bc16613acb5e710 | refs/heads/master | 2020-05-17T21:20:21.067730 | 2019-05-21T16:41:52 | 2019-05-21T16:41:52 | 183,969,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | '''
Good morning! Here's your coding interview problem for today.
This problem was asked by Microsoft.
Implement the singleton pattern with a twist.
First, instead of storing one instance, store two instances.
And in every even call of getInstance(), return the first instance and
in every odd call of getInstance(), return the second instance.
'''
class Singleton:
def __init__(self,val1,val2):
self.val1=val1
self.val2=val2
self.compteur=1
def getInstance(self):
if self.compteur==1:
self.compteur=2
return self.val1
else:
self.compteur=1
return self.val2
| [
"iannis3toussaint@gmail.com"
] | iannis3toussaint@gmail.com |
c146b0e1db17224998ba1eda4eafc3f9d27dd06b | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/RSGraviton/RSGravitonToWW_kMpl04_M_4000_TuneZ2star_8TeV_pythia6_cff.py | 227007a6d47128f5bc79ef6b8f9004b7add104ff | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 894 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1),
comEnergy = cms.double(8000.0),
crossSection = cms.untracked.double(1.137e-4),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'MSEL = 0',
'MSUB(391) = 1',
'MSUB(392) = 1',
'PMAS(347,1) = 4000',
'PARP(50) = 2.16',
'5000039:ALLOFF',
'5000039:ONIFANY 24',
),
parameterSets = cms.vstring(
'pythiaUESettings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch"
] | sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch |
7418efa19b2b99b91268d1c1c3eaecb2564be978 | 069c90561a55dbb050102914ba92a786137f3dd4 | /setup.py | 0d4d001769f4749905acc1374f364ba1baf66cff | [] | no_license | reorx/yaml2pac | a9075d8d706e26b4629e8fb98398bf0a5321c758 | 78a42e9bb5f2f27bdef5a3236cf242d3828ba5a1 | refs/heads/master | 2021-01-10T05:06:10.319371 | 2017-04-30T13:07:41 | 2017-04-30T13:07:41 | 48,587,654 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | #!/usr/bin/env python
# coding=utf-8
from setuptools import setup
# Use semantic versioning: MAJOR.MINOR.PATCH
version = '0.1.0'
def get_requires():
try:
with open('requirements.txt', 'r') as f:
requires = [i for i in map(lambda x: x.strip(), f.readlines()) if i]
return requires
except IOError:
return []
def get_long_description():
try:
with open('README.md', 'r') as f:
return f.read()
except IOError:
return ''
setup(
# license='License :: OSI Approved :: MIT License',
name='yaml2pac',
version=version,
author='reorx',
author_email='novoreorx@gmail.com',
description='Generate decent pac file from a set of yaml rules',
url='https://github.com/reorx/yaml2pac',
long_description=get_long_description(),
packages=['yaml2pac'],
# Or use (make sure find_packages is imported from setuptools):
# packages=find_packages()
install_requires=get_requires(),
package_data={
'yaml2pac': ['template.pac']
},
entry_points={
'console_scripts': [
'yaml2pac = yaml2pac.__main__:main'
]
}
)
| [
"novoreorx@gmail.com"
] | novoreorx@gmail.com |
a0e3e218a5b7ec2bf7ffd20d00fc6f0888cf6ea0 | 3c6fc92c8de309cd287e6f1b1b6e0af1e1aaf5c9 | /fix_date.py | 7e72e5bba097726d748338a2f8b498060b29976c | [
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"CC-PDDC"
] | permissive | caltechlibrary/caltechdata_migrate | 37e2b0fff3814eb69e413796a2ef8513959ebfda | 839feab326eb0b412d740db64e96d7ad0048c295 | refs/heads/master | 2021-10-27T08:38:30.755678 | 2021-10-20T19:06:28 | 2021-10-20T19:06:28 | 106,311,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | from caltechdata_api import get_metadata,caltechdata_edit,decustomize_schema
import requests
import os
idvs = [1163,1164,1165,1166,1167,1168,1169]
#Get access token from TIND sed as environment variable with source token.bash
token = os.environ['TINDTOK']
metadata = {}
for idv in idvs:
api_url = "https://data.caltech.edu/api/record/"
r = requests.get(api_url+str(idv))
r_data = r.json()
if 'message' in r_data:
raise AssertionError('id '+idv+' expected http status 200, got '+r_data.status+r_data.message)
if not 'metadata' in r_data:
raise AssertionError('expected as metadata property in response, got '+r_data)
metadata = r_data['metadata']
for d in metadata['relevantDates']:
if d['relevantDateType'] == 'created':
d['relevantDateType'] = 'Created'
metadata = decustomize_schema(metadata)
response = caltechdata_edit(token,idv,metadata,production=True)
print(response)
| [
"tmorrell@caltech.edu"
] | tmorrell@caltech.edu |
34e6fc1924b4bf98002f93525d9a60d3efac008f | fb8cbebdf034b2f478943752d5443afc82c6eef5 | /tuirer/venv/lib/python3.6/site-packages/IPython/core/tests/test_paths.py | a7dc1fddc233e5b268dcea9533b91a8ace9c0cd8 | [] | no_license | fariasjr/CitiTuirer | f64e0ec93ef088f8140bb0961d2ad4ed3b59448a | deb3f7a9c2d45b8a7f54639037f097b99abdac11 | refs/heads/master | 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,394 | py | import errno
import os
import shutil
import sys
import tempfile
import warnings
from unittest.mock import patch
from IPython import paths
from IPython.testing.decorators import skip_win32
from IPython.utils.tempdir import TemporaryDirectory
import nose.tools as nt
from testpath import assert_isdir, assert_isfile, modified_env
TMP_TEST_DIR = os.path.realpath(tempfile.mkdtemp())
HOME_TEST_DIR = os.path.join(TMP_TEST_DIR, "home_test_dir")
XDG_TEST_DIR = os.path.join(HOME_TEST_DIR, "xdg_test_dir")
XDG_CACHE_DIR = os.path.join(HOME_TEST_DIR, "xdg_cache_dir")
IP_TEST_DIR = os.path.join(HOME_TEST_DIR,'.ipython')
def setup():
"""Setup testenvironment for the module:
- Adds dummy home dir tree
"""
# Do not mask exceptions here. In particular, catching WindowsError is a
# problem because that exception is only defined on Windows...
os.makedirs(IP_TEST_DIR)
os.makedirs(os.path.join(XDG_TEST_DIR, 'ipython'))
os.makedirs(os.path.join(XDG_CACHE_DIR, 'ipython'))
def teardown():
"""Teardown testenvironment for the module:
- Remove dummy home dir tree
"""
# Note: we remove the parent test dir, which is the root of all test
# subdirs we may have created. Use shutil instead of os.removedirs, so
# that non-empty directories are all recursively removed.
shutil.rmtree(TMP_TEST_DIR)
def patch_get_home_dir(dirpath):
return patch.object(paths, 'get_home_dir', return_value=dirpath)
def test_get_ipython_dir_1():
"""test_get_ipython_dir_1, Testcase to see if we can call get_ipython_dir without Exceptions."""
env_ipdir = os.path.join("someplace", ".ipython")
with patch.object(paths, '_writable_dir', return_value=True), \
modified_env({'IPYTHONDIR': env_ipdir}):
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, env_ipdir)
def test_get_ipython_dir_2():
"""test_get_ipython_dir_2, Testcase to see if we can call get_ipython_dir without Exceptions."""
with patch_get_home_dir('someplace'), \
patch.object(paths, 'get_xdg_dir', return_value=None), \
patch.object(paths, '_writable_dir', return_value=True), \
patch('os.name', "posix"), \
modified_env({'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': None
}):
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join("someplace", ".ipython"))
def test_get_ipython_dir_3():
"""test_get_ipython_dir_3, move XDG if defined, and .ipython doesn't exist."""
tmphome = TemporaryDirectory()
try:
with patch_get_home_dir(tmphome.name), \
patch('os.name', 'posix'), \
modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': XDG_TEST_DIR,
}), warnings.catch_warnings(record=True) as w:
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join(tmphome.name, ".ipython"))
if sys.platform != 'darwin':
nt.assert_equal(len(w), 1)
nt.assert_in('Moving', str(w[0]))
finally:
tmphome.cleanup()
def test_get_ipython_dir_4():
"""test_get_ipython_dir_4, warn if XDG and home both exist."""
with patch_get_home_dir(HOME_TEST_DIR), \
patch('os.name', 'posix'):
try:
os.mkdir(os.path.join(XDG_TEST_DIR, 'ipython'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
with modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': XDG_TEST_DIR,
}), warnings.catch_warnings(record=True) as w:
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join(HOME_TEST_DIR, ".ipython"))
if sys.platform != 'darwin':
nt.assert_equal(len(w), 1)
nt.assert_in('Ignoring', str(w[0]))
def test_get_ipython_dir_5():
"""test_get_ipython_dir_5, use .ipython if exists and XDG defined, but doesn't exist."""
with patch_get_home_dir(HOME_TEST_DIR), \
patch('os.name', 'posix'):
try:
os.rmdir(os.path.join(XDG_TEST_DIR, 'ipython'))
except OSError as e:
if e.errno != errno.ENOENT:
raise
with modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': XDG_TEST_DIR,
}):
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, IP_TEST_DIR)
def test_get_ipython_dir_6():
"""test_get_ipython_dir_6, use home over XDG if defined and neither exist."""
xdg = os.path.join(HOME_TEST_DIR, 'somexdg')
os.mkdir(xdg)
shutil.rmtree(os.path.join(HOME_TEST_DIR, '.ipython'))
print(paths._writable_dir)
with patch_get_home_dir(HOME_TEST_DIR), \
patch.object(paths, 'get_xdg_dir', return_value=xdg), \
patch('os.name', 'posix'), \
modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': None,
}), warnings.catch_warnings(record=True) as w:
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join(HOME_TEST_DIR, '.ipython'))
nt.assert_equal(len(w), 0)
def test_get_ipython_dir_7():
"""test_get_ipython_dir_7, test home directory expansion on IPYTHONDIR"""
home_dir = os.path.normpath(os.path.expanduser('~'))
with modified_env({'IPYTHONDIR': os.path.join('~', 'somewhere')}), \
patch.object(paths, '_writable_dir', return_value=True):
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join(home_dir, 'somewhere'))
@skip_win32
def test_get_ipython_dir_8():
"""test_get_ipython_dir_8, test / home directory"""
with patch.object(paths, '_writable_dir', lambda path: bool(path)), \
patch.object(paths, 'get_xdg_dir', return_value=None), \
modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'HOME': '/',
}):
nt.assert_equal(paths.get_ipython_dir(), '/.ipython')
def test_get_ipython_cache_dir():
with modified_env({'HOME': HOME_TEST_DIR}):
if os.name == 'posix' and sys.platform != 'darwin':
# test default
os.makedirs(os.path.join(HOME_TEST_DIR, ".cache"))
with modified_env({'XDG_CACHE_HOME': None}):
ipdir = paths.get_ipython_cache_dir()
nt.assert_equal(os.path.join(HOME_TEST_DIR, ".cache", "ipython"),
ipdir)
assert_isdir(ipdir)
# test env override
with modified_env({"XDG_CACHE_HOME": XDG_CACHE_DIR}):
ipdir = paths.get_ipython_cache_dir()
assert_isdir(ipdir)
nt.assert_equal(ipdir, os.path.join(XDG_CACHE_DIR, "ipython"))
else:
nt.assert_equal(paths.get_ipython_cache_dir(),
paths.get_ipython_dir())
def test_get_ipython_package_dir():
ipdir = paths.get_ipython_package_dir()
assert_isdir(ipdir)
def test_get_ipython_module_path():
ipapp_path = paths.get_ipython_module_path('IPython.terminal.ipapp')
assert_isfile(ipapp_path)
| [
"jornadaciti@ug4c08.windows.cin.ufpe.br"
] | jornadaciti@ug4c08.windows.cin.ufpe.br |
b98d7f3a306f0489f526d005559bda871a9dae63 | e18da3301b53da6792f581159a953da71c00422b | /aula_1/ex_3.py | 989a228d4c99271fe93379b33bf46298099135f3 | [] | no_license | fernandolago/python-521 | 6c4998b60784d966e8ed4aff0e500f1ab4f1fab0 | e103b055c9cf04da889a380d55192307f66d34f4 | refs/heads/master | 2022-12-09T20:13:47.982648 | 2019-08-08T20:02:27 | 2019-08-08T20:02:27 | 200,073,861 | 0 | 0 | null | 2022-12-08T05:57:51 | 2019-08-01T15:16:36 | Python | UTF-8 | Python | false | false | 270 | py | import requests
URL = 'https://viacep.com.br/ws/{}/json'
cep = input('Digite seu cep: ')
print(cep)
URL_FORMATADA = URL.format(cep)
print(URL_FORMATADA)
response = requests.get(URL_FORMATADA)
print(response)
print(dir(response))
x = response.json()
print(type(x)) | [
"leonardo.mendes@4linux.com.br"
] | leonardo.mendes@4linux.com.br |
43129de68025bc596b16c6a2f1e3e13749c0063e | 4892b326dfd98d9513ba583868b1298c8a4e276c | /experiments/04_benchmark_memory/shared.py | ec654e7e21ec078f1a29ec876fa6a3fd6eef3cce | [
"MIT"
] | permissive | MeNicefellow/cockpit | 019640a610b05c46426d35eb9e6c8d49a3d970c7 | 5bd5ab3cda03eda0b0bf276f29d5c28b83d70b06 | refs/heads/master | 2023-01-23T06:59:43.329922 | 2020-11-24T07:56:03 | 2020-11-24T07:56:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | """Compare memory footprint w/o individual gradient transformations."""
import os
import sys
import warnings
import pandas
from memory_profiler import memory_usage
from torch.optim import SGD
from cockpit.runners.scheduled_runner import _ScheduleCockpitRunner
HERE = os.path.abspath(__file__)
DIR = os.path.join(os.path.dirname(HERE), "data")
FIG_DIR = os.path.join(os.path.dirname(HERE), "fig")
os.makedirs(DIR, exist_ok=True)
os.makedirs(FIG_DIR, exist_ok=True)
def set_up():
from cockpit.utils import fix_deepobs_data_dir
from deepobs.pytorch.config import set_default_device
fix_deepobs_data_dir()
FORCE_CPU = True
if FORCE_CPU:
set_default_device("cpu")
INTERVAL = 0.01
def report_memory(f):
mem_usage = memory_usage(f, interval=INTERVAL)
mem_time = [INTERVAL * idx for idx in range(len(mem_usage))]
return pandas.DataFrame(data={"time": mem_time, "usage": mem_usage})
def lr_schedule(num_epochs):
"""Some Learning rate schedule.
Example:
>>> # Halving the learning rate every epoch:
>>> lambda epoch: 0.5 ** epoch
>>> # A less aggressive decay:
>>> lambda epoch: 0.9 ** epoch
>>> # Constant learning rate (using init lr):
>>> lambda epoch: 1.0
"""
return lambda epoch: 1.0
def run(quants, testproblem):
optimizer_class = SGD
hyperparams = {
"lr": {"type": float, "default": 0.001},
"momentum": {"type": float, "default": 0.0},
"nesterov": {"type": bool, "default": False},
}
def plot_schedule(global_step):
return False
runner = MemoryBenchmarkRunner(
optimizer_class,
hyperparams,
quantities=quants,
plot=False,
plot_schedule=plot_schedule,
)
runner.run(
testproblem=testproblem,
num_epochs=1,
l2_reg=0.0, # necessary for backobs!
track_interval=1,
plot_interval=1,
show_plots=False,
save_plots=False,
save_final_plot=False,
save_animation=False,
lr_schedule=lr_schedule,
)
class MemoryBenchmarkRunner(_ScheduleCockpitRunner):
"""Run first forward-backward pass and update step of training, then quit.
Note:
Disables DeepOBS' additional metrics. Performs one step per epoch.
"""
STOP_BATCH_COUNT_PER_EPOCH = 1
def _maybe_stop_iteration(self, global_step, batch_count):
"""Stop after first step of each epoch."""
if batch_count == self.STOP_BATCH_COUNT_PER_EPOCH:
warnings.warn(
"The memory benchmark runner performs only "
+ f"{self.STOP_BATCH_COUNT_PER_EPOCH} steps per epoch."
)
raise StopIteration
def _should_eval(self):
"""Disable DeepOBS' evaluation of test/train/valid losses and accuracies."""
return False
def hotfix_deepobs_argparse():
"""Truncate command line arguments from pytest call to make DeepOBS arparse work.
TODO Think about good alternatives.
"""
sys.argv = sys.argv[:1]
def parse():
testproblem = sys.argv[1]
try:
num_run = int(sys.argv[2])
except IndexError:
num_run = None
hotfix_deepobs_argparse()
return testproblem, num_run
def skip_if_exists(filename):
if os.path.exists(filename):
print(f"Skipping as file already exists: {filename}")
sys.exit(0)
| [
"Anonymous"
] | Anonymous |
bc4d811f9de41de53688ff53da3865f5f47c9c4f | 926b3c52070f6e309567c8598248fd5c57095be9 | /src/mmgeneration/tests/test_cores/test_fp16_utils.py | a4cf5a42c2cf57c0146ba1cc930cd875facaca6c | [
"Apache-2.0"
] | permissive | fengbingchun/PyTorch_Test | 410f7cd2303707b0141d433fb9d144a961e1f4c8 | df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348 | refs/heads/master | 2023-05-23T16:42:29.711338 | 2023-03-25T11:31:43 | 2023-03-25T11:31:43 | 167,339,907 | 15 | 4 | null | 2023-03-25T11:31:45 | 2019-01-24T09:24:59 | C++ | UTF-8 | Python | false | false | 6,975 | py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.utils import TORCH_VERSION
from mmgen.core.runners.fp16_utils import (auto_fp16, cast_tensor_type,
nan_to_num)
def test_nan_to_num():
a = torch.tensor([float('inf'), float('nan'), 2.])
res = nan_to_num(a, posinf=255., neginf=-255.)
assert (res == torch.tensor([255., 0., 2.])).all()
res = nan_to_num(a)
assert res.shape == (3, )
with pytest.raises(TypeError):
nan_to_num(1)
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert outputs.dtype == dst_type
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(
tensor_a=torch.FloatTensor([1.]), tensor_b=torch.FloatTensor([2.]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert outputs['tensor_a'].dtype == dst_type
assert outputs['tensor_b'].dtype == dst_type
inputs = [torch.FloatTensor([1.]), torch.FloatTensor([2.])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert outputs[0].dtype == dst_type
assert outputs[1].dtype == dst_type
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int)
inputs = nn.Sequential(nn.Conv2d(2, 2, 3), nn.ReLU())
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, nn.Module)
@pytest.mark.skipif(
not TORCH_VERSION >= '1.6.0', reason='Lower PyTorch version')
def test_auto_fp16_func():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject(object):
@auto_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@auto_fp16()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
# apply to specified input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
# apply to optional input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
# out_fp32=True
class ExampleModule(nn.Module):
def __init__(self):
super().__init__()
self.out_fp32 = True
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
model.fp16_enabled = True
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
# out_fp32=True
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
| [
"fengbingchun@163.com"
] | fengbingchun@163.com |
1ccc6390564eddf80e6cf1914171d5f378c81954 | 2fe726cefb4448adf900dfe1242238a4622026ea | /simple_library/library_app/admin.py | bf6c366952106be918901ebb17550a835e83ca79 | [] | no_license | romankarki/Library-Book-App | 2623e1f3f0d2b3bc04ab7386a81e521bd2907305 | 571d3d843bb612e8fdbdb78a02cde1be781ab5fa | refs/heads/master | 2022-07-25T04:19:50.510807 | 2020-05-11T06:22:55 | 2020-05-11T06:22:55 | 262,956,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from django.contrib import admin
from library_app.models import Books, Student, BookUser
# Register your models here.
admin.site.register(Books)
admin.site.register(Student)
admin.site.register(BookUser) | [
"romnkrki@gmail.com"
] | romnkrki@gmail.com |
7ce6a01f3c5650d970ef32d681f884d359a3815e | e3c17b642b827e798f3f2e3eb3ba108aeb1e5769 | /Unit 26/judge_set_intersection.py | 06ad5d53382c41d1d1937daa0bff2cab437d0d5f | [] | no_license | ckiekim/Python-Lecture-1903 | d6c2eecbf56168e5e5da26dc31f1740979ea7c8c | 4fa84301f9f1b567ba240823309e9c8d0f3f5c64 | refs/heads/master | 2020-04-28T15:13:58.892548 | 2019-03-27T14:03:40 | 2019-03-27T14:03:40 | 175,365,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | x, y = map(int, input().split())
a = {i for i in range(1, x+1) if x % i == 0}
b = {i for i in range(1, x+1) if y % i == 0}
divisor = a & b
print(a, b, divisor)
result = 0
if type(divisor) == set:
result = sum(divisor)
print(result)
| [
"ckiekim@naver.com"
] | ckiekim@naver.com |
be1275ae642e75f3b7013809e57c2dd830e8801e | f531c56db4cd2776c765b9aca0c4cebaea864ec2 | /ABC152/b.py | 67ad0d250e393f4fab6a6c5be90863c24befc29a | [] | no_license | SatoKeiju/AtCoder-Python3 | 1c76f8ec5d99470b6e316115f0433b4b3cb64024 | 9c2860e2cfda490d5848b0557876ef616eff01a2 | refs/heads/master | 2021-06-23T05:59:46.911733 | 2021-03-30T08:00:34 | 2021-03-30T08:00:34 | 212,088,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | def main():
a, b = map(int, input().split())
print(str(min(a, b)) * max(a, b))
if __name__ == '__main__':
main()
| [
"keiju.sato@abeja.asia"
] | keiju.sato@abeja.asia |
7e045832ecae12c48e1e1b63db2b5658de3529ec | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib1/localConcepts/eswi/sort_table.py | 03e1224d57722c17f3827e62f20c2c5f19e1c0d2 | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,112 | py | def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'none'},
{'abbr': 1, 'code': 1, 'title': 'Atm. conc. [g/kg]'},
{'abbr': 2, 'code': 2, 'title': 'Log Atm. conc. [g/kg]'},
{'abbr': 3, 'code': 3, 'title': 'Atm. conc. [g/m3]'},
{'abbr': 3, 'code': 3, 'title': 'Atm. conc. [g/m3]'},
{'abbr': 4, 'code': 4, 'title': 'Log Atm. conc. [g/m3]'},
{'abbr': 7, 'code': 7, 'title': 'Atm. conc. [number/m3]'},
{'abbr': 9, 'code': 9, 'title': 'Atm. conc. [Bq/m3]'},
{'abbr': 10, 'code': 10, 'title': 'Log Atm. conc. [Bq/m3]'},
{'abbr': 11, 'code': 11, 'title': 'Atm. conc. at reference height [g/kg]'},
{'abbr': 12, 'code': 12, 'title': 'Atm. conc. at reference height [g/m3]'},
{'abbr': 13, 'code': 13, 'title': 'Log Atm. conc. at reference height [g/m3]'},
{'abbr': 14, 'code': 14, 'title': 'Total column [g/m2]'},
{'abbr': 14, 'code': 14, 'title': 'Column up to 6000m [g/m2]'},
{'abbr': 14, 'code': 14, 'title': 'Column up above 6000m [g/m2]'},
{'abbr': 14, 'code': 14, 'title': 'Max in column up to 6000m [g/m3]'},
{'abbr': 14, 'code': 14, 'title': 'Max in column above 6000m [g/m3]'},
{'abbr': 15, 'code': 15, 'title': 'Level at max in column up to 6000m [m]'},
{'abbr': 15, 'code': 15, 'title': 'Level at max in column above 6000m [m]'},
{'abbr': 21, 'code': 21, 'title': 'Integrated atm. conc. s [g/kg]'},
{'abbr': 22, 'code': 22, 'title': 'Log Integrated atm. conc. s [g/kg]'},
{'abbr': 23, 'code': 23, 'title': 'Integrated atm. conc. s [g/m3]'},
{'abbr': 24,
'code': 24,
'title': 'Logarith of Integrated atm. conc. s [g/m3]'},
{'abbr': 27, 'code': 27, 'title': 'Integrated atm. conc. s [number/m3]'},
{'abbr': 29, 'code': 29, 'title': 'Integrated atm. conc. s [Bq/m3]'},
{'abbr': 30, 'code': 30, 'title': 'Log Integrated atm. conc. s [Bq/m3]'},
{'abbr': 51, 'code': 51, 'title': 'Conc. in liquid water [g/m3]'},
{'abbr': 53, 'code': 53, 'title': 'Conc. in liquid water Equivalents/m3'},
{'abbr': 54, 'code': 54, 'title': 'Conc. in liquid water [number/m3]'},
{'abbr': 55, 'code': 55, 'title': 'Conc. in liquid water [Bq/m3]'},
{'abbr': 61, 'code': 61, 'title': 'Conc. in ice water [g/m3]'},
{'abbr': 63, 'code': 63, 'title': 'Conc. in ice water Equivalents/m3'},
{'abbr': 64, 'code': 64, 'title': 'Conc. in ice water [number/m3]'},
{'abbr': 65, 'code': 65, 'title': 'Conc. in ice water [Bq/m3]'},
{'abbr': 71,
'code': 71,
'title': 'Conc. in precipitation [g/m3]',
'units': 'mg/l'},
{'abbr': 73, 'code': 73, 'title': 'Conc. in precipitation Equivalents/m3'},
{'abbr': 74, 'code': 74, 'title': 'Conc. in precipitation [number/m3]'},
{'abbr': 75, 'code': 75, 'title': 'Conc. in precipitation [Bq/m3]'},
{'abbr': 81, 'code': 81, 'title': 'Dry deposition [g/m2]'},
{'abbr': 82, 'code': 82, 'title': 'Log Dry deposition [g/m2]'},
{'abbr': 84, 'code': 84, 'title': 'Dry deposition [number/m2]'},
{'abbr': 85, 'code': 85, 'title': 'Dry deposition [Bq/m2]'},
{'abbr': 91, 'code': 91, 'title': 'Wet deposition [g/m2]'},
{'abbr': 92, 'code': 92, 'title': 'Log Wet deposition [g/m2]'},
{'abbr': 94, 'code': 94, 'title': 'Wet deposition [number/m2]'},
{'abbr': 95, 'code': 95, 'title': 'Wet deposition [Bq/m2]'},
{'abbr': 101, 'code': 101, 'title': 'Total deposition [g/m2]'},
{'abbr': 102, 'code': 102, 'title': 'Log Total deposition [g/m2]'},
{'abbr': 104, 'code': 104, 'title': 'Total deposition [number/m2]'},
{'abbr': 105, 'code': 105, 'title': 'Total deposition [Bq/m2]'},
{'abbr': 110, 'code': 110, 'title': 'Emissions [ton]'},
{'abbr': 111, 'code': 111, 'title': 'Emissions [kg]'},
{'abbr': 112, 'code': 112, 'title': 'Emissions [g]'},
{'abbr': 114, 'code': 114, 'title': 'Emissions [number]'},
{'abbr': 115, 'code': 115, 'title': 'Emissions [Bq]'},
{'abbr': 121, 'code': 121, 'title': 'Emissions [kg/s]'},
{'abbr': 122, 'code': 122, 'title': 'Emissions [g/s]'},
{'abbr': 124, 'code': 124, 'title': 'Emissions [number/s]'},
{'abbr': 125, 'code': 125, 'title': 'Emissions [Bq/s]'},
{'abbr': 131, 'code': 131, 'title': 'Emissions [kg/(m2 s)]'},
{'abbr': 132, 'code': 132, 'title': 'Emissions [g/(m2 s)]'},
{'abbr': 134, 'code': 134, 'title': 'Emissions [number/(m2 s)]'},
{'abbr': 135, 'code': 135, 'title': 'Emissions [Bq/(m2 s)]'},
{'abbr': 136, 'code': 136, 'title': 'Surface emissions [kg/(m2 s)]'},
{'abbr': 137, 'code': 137, 'title': 'Surface emissions [g/(m2 s)]'},
{'abbr': 138, 'code': 138, 'title': 'Surface emissions [number/(m2 s)]'},
{'abbr': 139, 'code': 139, 'title': 'Surface emissions [Bq/(m2 s)]'},
{'abbr': 150, 'code': 150, 'title': 'Inhalation dose [nSv]'},
{'abbr': 151, 'code': 151, 'title': 'Ground dose [nSv]'},
{'abbr': 152, 'code': 152, 'title': 'Infinite cloud dose [nSv]'},
{'abbr': 153, 'code': 153, 'title': 'Sum of cloud and ground dose [nSv]'},
{'abbr': 201, 'code': 201, 'title': 'Dry deposition velocity [m/s]'},
{'abbr': 202, 'code': 202, 'title': 'Settling velocity [m/s]'},
{'abbr': 203, 'code': 203, 'title': 'Scavenging coefficient [1/s]'},
{'abbr': 205, 'code': 205, 'title': 'Degree hours or days for last day [K]'},
{'abbr': 206, 'code': 206, 'title': 'Current degree days [K]'},
{'abbr': 207, 'code': 207, 'title': 'Critical degree days [K]'},
{'abbr': 208, 'code': 208, 'title': 'Accum pollen emission [grains/m2]'},
{'abbr': 209, 'code': 209, 'title': 'Correction factor [fraction]'},
{'abbr': 210, 'code': 210, 'title': 'Aerosol optical depth []'},
{'abbr': 240,
'code': 240,
'title': 'Deposition arrival since 1 Jan 1971 [days]'},
{'abbr': 241,
'code': 241,
'title': 'Latest deposition since 1 Jan 1971 [days]'},
{'abbr': 242,
'code': 242,
'title': 'Time of max activity since 1 Jan 1971 [days]'},
{'abbr': 243, 'code': 243, 'title': 'Max radioactive activity [Bq/m2]'},
{'abbr': 244, 'code': 244, 'title': 'Log Max radioactive activity'},
{'abbr': 250, 'code': 250, 'title': 'Relative occurrence []'},
{'abbr': 251, 'code': 251, 'title': 'statistics [kg]'},
{'abbr': 252, 'code': 252, 'title': 'statistics [mol]'},
{'abbr': None, 'code': 255, 'title': 'missing value'})
| [
"baudouin.raoult@ecmwf.int"
] | baudouin.raoult@ecmwf.int |
d76c7ac7230899afca2ec03da09f81a9ab6d1f03 | b7939b343e52d2633857a93e19108dde49109008 | /setup_finish.py | 3bd9921ead93001611ff878fb4301620787d7fd6 | [] | no_license | hobson/safety-monitor | 040e74e0ac6d153b084860b3cdd6d9739fd0c10e | 122b09bc7f55302cdd5fda576358b56dcd8ee03e | refs/heads/master | 2021-01-22T05:28:23.350603 | 2012-04-15T04:45:09 | 2012-04-15T04:45:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import os
import glob
dist_dir = 'dist'
files = ['C:/Python25/lib/site-packages/wx-2.8-msw-ansi/wx/MSVCP71.dll',
'C:/Python25/lib/site-packages/wx-2.8-msw-ansi/wx/gdiplus.dll',
'./rev20.log',
'images']
for f in files:
os.system("cp -r "+f+" "+dist_dir+"/")
os.system("rm -r "+dist_dir+"/images/.svn")
os.system("rm -r "+dist_dir+"/images/Thumbs.db")
#os.system("gzip -r -S .zip -9 "+dist_dir)
os.system("7z a -tzip -mx=9 "+dist_dir+".zip "+dist_dir) | [
"hobsonlane@gmail.com"
] | hobsonlane@gmail.com |
a1c2632983e12ba8f5a00201653014204fb6181f | 14a853584c0c1c703ffd8176889395e51c25f428 | /sem1/fop/lab9/static/strings.py | 5a11cfaee8f14accb2823f0183f03c5641998655 | [] | no_license | harababurel/homework | d0128f76adddbb29ac3d805c235cdedc9af0de71 | 16919f3b144de2d170cd6683d54b54bb95c82df9 | refs/heads/master | 2020-05-21T12:25:29.248857 | 2018-06-03T12:04:45 | 2018-06-03T12:04:45 | 43,573,199 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | """
Most long messages displayed by the UI will be found here.
"""
from util.Color import *
STRINGS = {
'helpPrompt':
'Commands:\n' +
'\t%s - displays this prompt.\n' % Color.strong('help') +
'\t%s - adds a new student or assignment.\n' % Color.strong('add') +
'\t%s - removes an existing student or assignment.\n' % Color.strong('remove') +
'\t%s - displays all students or assignments.\n' % Color.strong('list') +
'\t%s - goes to previous state.\n' % Color.strong('undo') +
'\t%s - goes to next state.\n' % Color.strong('redo') +
'\t%s - displays statistics.\n' % Color.strong('stats') +
'\t%s - clears the screen.\n' % Color.strong('clear') +
'\t%s - saves the work session and exits the application.' % Color.strong('exit')
}
| [
"srg.pscs@gmail.com"
] | srg.pscs@gmail.com |
664537e820a66d7f15136e41787f159ac3ab7b86 | ba80ca143ba35fd481730786a27ebdb1f88ce835 | /algorithm/f/test/RomanNumerals.py | 50f14a15f212f49ed60fcc34487da6bd4748ab19 | [] | no_license | uiandwe/TIL | c541020b65adc53578aeb1c3ba4c6770b3b2e8b3 | 186544469374dd0279099c6c6aa7555ee23e42fe | refs/heads/master | 2022-02-15T08:33:07.270573 | 2022-01-01T15:22:54 | 2022-01-01T15:22:54 | 63,420,931 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | '''
I(1), V(5), X(10), L(50), C(100), D(500), M(1000)
1 -> I
2 -> II
3 -> III
4 -> IV
7 -> VII
10 -> X
39 -> XXXIX
246 -> CCXLVI
207 -> CCVII
1066 -> MLXVI
1776 -> MDCCLXXVI
1954 -> MCMLIV
'''
def roman(n):
dict = {
1000: 'M',
900: 'CM',
500: 'D',
400: 'CD',
100: 'C',
90: 'XC',
50: 'L',
40: 'XL',
10: 'X',
9: 'IX',
5: 'V',
4: 'IV',
1: 'I'
}
retStr = ''
keys = list(dict.keys())
values = list(dict.values())
while n > 0:
for index, key in enumerate(keys):
if n >= key:
retStr += values[index]
n -= key
break
return retStr
| [
"noreply@github.com"
] | uiandwe.noreply@github.com |
c292451580a6c057267d586c1da1f9416fa0cefc | 882cc558e786a82f3c0f11d3b332b2c26c59e8d0 | /funker/handle.py | 0d0870e0e8a539cfdf27e1bc1354498910403167 | [
"Apache-2.0"
] | permissive | bfirsh/funker-python | 79fbff296a909cf58ae8817ab42c6f8df1432122 | e7e4f598a6ec2b0c14adde2f49af89aa88ec4dea | refs/heads/master | 2020-06-21T18:53:11.309645 | 2016-11-29T16:13:35 | 2016-11-29T16:13:35 | 74,773,577 | 9 | 2 | null | 2017-01-29T17:23:46 | 2016-11-25T16:31:02 | Python | UTF-8 | Python | false | false | 658 | py | import json
import six
from six.moves import socketserver
class HandleHandler(socketserver.StreamRequestHandler):
def handle(self):
kwargs = json.loads(six.text_type(self.rfile.read(), "utf-8"))
return_value = self.server.func(**kwargs)
return_str = json.dumps(return_value)
self.wfile.write(return_str.encode("utf-8"))
self.wfile.close()
def handle(func):
server = socketserver.TCPServer(("0.0.0.0", 9999), HandleHandler)
server.request_queue_size = 1 # drop any connections except from the first
server.timeout = None
server.func = func
server.handle_request()
server.server_close()
| [
"ben@firshman.co.uk"
] | ben@firshman.co.uk |
55ffce84c42c62bceb73afa45c1838869dcec02b | 2fa102b20ea99d796cc3677c9305f1a80be18e6b | /cf_1154_A.py | 4edb3e6632103f67e9975c1130ae3888571ab9c4 | [] | no_license | pronob1010/Codeforces_Solve | e5186b2379230790459328964d291f6b40a4bb07 | 457b92879a04f30aa0003626ead865b0583edeb2 | refs/heads/master | 2023-03-12T11:38:31.114189 | 2021-03-03T05:49:17 | 2021-03-03T05:49:17 | 302,124,730 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | s = list(map(int, input().split()))
s.sort()
apb=s[0]
apc = s[1]
bpc = s[2]
apbpc = s[3]
c = apbpc - apb
b = bpc - c
a = apb-b
print(a,b,c) | [
"pronobmozumder.info@gmail.com"
] | pronobmozumder.info@gmail.com |
3147f1152d915a56edd4b17322b801527d96c1ea | ed0dd577f03a804cdc274f6c7558fafaac574dff | /python/pyre/applications/Executive.py | 3205a28c66169bb035d90b03ef2457a2a234159b | [
"Apache-2.0"
] | permissive | leandromoreira/vmaf | fd26e2859136126ecc8e9feeebe38a51d14db3de | a4cf599444701ea168f966162194f608b4e68697 | refs/heads/master | 2021-01-19T03:43:15.677322 | 2016-10-08T18:02:22 | 2016-10-08T18:02:22 | 70,248,500 | 3 | 0 | null | 2016-10-07T13:21:28 | 2016-10-07T13:21:27 | null | UTF-8 | Python | false | false | 2,919 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
class Executive(object):
# factories
def createCommandlineParser(self):
"""create a command line parser"""
import pyre.applications
return pyre.applications.commandlineParser()
def createRegistry(self, name=None):
"""create a registry instance to store my configuration"""
if name is None:
name = self.name
import pyre.inventory
return pyre.inventory.registry(name)
def createCurator(self, name=None):
"""create a curator to handle the persistent store"""
if name is None:
name = self.name
import pyre.inventory
curator = pyre.inventory.curator(name)
return curator
# configuration
def processCommandline(self, registry, parser=None):
"""convert the command line arguments to a trait registry"""
if parser is None:
parser = self.createCommandlineParser()
help, unprocessedArguments = parser.parse(registry)
return help, unprocessedArguments
def verifyConfiguration(self, unknownProperties, unknownComponents, mode='strict'):
"""verify that the user input did not contain any typos"""
if mode == 'relaxed':
return True
if unknownProperties:
print " ## unrecognized properties:"
for key, value, locator in unknownProperties:
print " %s <- '%s' from %s" % (key, value, locator)
self.usage()
return False
if mode == 'pedantic' and unknownComponents:
print ' ## unknown components: %s' % ", ".join(unknownComponents)
self.usage()
return False
return True
def pruneRegistry(self):
registry = self.registry
for trait in self.inventory.properties():
name = trait.name
registry.deleteProperty(name)
for trait in self.inventory.components():
for name in trait.aliases:
registry.extractNode(name)
return registry
# the default application action
def main(self, *args, **kwds):
return
# user assistance
def help(self):
print 'Please consider writing a help screen for this application'
return
def usage(self):
print 'Please consider writing a usage screen for this application'
return
# version
__id__ = "$Id: Executive.py,v 1.1.1.1 2006-11-27 00:09:54 aivazis Exp $"
# End of file
| [
"zli@netflix.com"
] | zli@netflix.com |
027107bfc9df6f0ba6ae047043cc917f2ccf3edb | 9d7ae4ba781a06d96fb1f4cc51b42abcc0928da6 | /sqlalchemy_utils/listeners.py | 29970ffb5b90253f2ab092e7f3c4779177c3cd81 | [] | no_license | tonyseek/sqlalchemy-utils | 5a4d6d1ebaf1d72af04fce30ff5473210cfdbef3 | 0ef12b0a070694fb8ef4c177f9816305110114b1 | refs/heads/master | 2021-01-15T08:47:40.983766 | 2015-01-14T13:39:04 | 2015-01-14T13:39:04 | 29,289,223 | 1 | 0 | null | 2015-01-15T09:03:24 | 2015-01-15T09:03:24 | null | UTF-8 | Python | false | false | 6,855 | py | import sqlalchemy as sa
from .exceptions import ImproperlyConfigured
def coercion_listener(mapper, class_):
"""
Auto assigns coercing listener for all class properties which are of coerce
capable type.
"""
for prop in mapper.iterate_properties:
try:
listener = prop.columns[0].type.coercion_listener
except AttributeError:
continue
sa.event.listen(
getattr(class_, prop.key),
'set',
listener,
retval=True
)
def instant_defaults_listener(target, args, kwargs):
for key, column in sa.inspect(target.__class__).columns.items():
if column.default is not None:
if callable(column.default.arg):
setattr(target, key, column.default.arg(target))
else:
setattr(target, key, column.default.arg)
def force_auto_coercion(mapper=None):
"""
Function that assigns automatic data type coercion for all classes which
are of type of given mapper. The coercion is applied to all coercion
capable properties. By default coercion is applied to all SQLAlchemy
mappers.
Before initializing your models you need to call force_auto_coercion.
::
from sqlalchemy_utils import force_auto_coercion
force_auto_coercion()
Then define your models the usual way::
class Document(Base):
__tablename__ = 'document'
id = sa.Column(sa.Integer, autoincrement=True)
name = sa.Column(sa.Unicode(50))
background_color = sa.Column(ColorType)
Now scalar values for coercion capable data types will convert to
appropriate value objects::
document = Document()
document.background_color = 'F5F5F5'
document.background_color # Color object
session.commit()
:param mapper: The mapper which the automatic data type coercion should be
applied to
"""
if mapper is None:
mapper = sa.orm.mapper
sa.event.listen(mapper, 'mapper_configured', coercion_listener)
def force_instant_defaults(mapper=None):
"""
Function that assigns object column defaults on object initialization
time. By default calling this function applies instant defaults to all
your models.
Setting up instant defaults::
from sqlalchemy_utils import force_instant_defaults
force_instant_defaults()
Example usage::
class Document(Base):
__tablename__ = 'document'
id = sa.Column(sa.Integer, autoincrement=True)
name = sa.Column(sa.Unicode(50))
created_at = sa.Column(sa.DateTime, default=datetime.now)
document = Document()
document.created_at # datetime object
:param mapper: The mapper which the automatic instant defaults forcing
should be applied to
"""
if mapper is None:
mapper = sa.orm.mapper
sa.event.listen(mapper, 'init', instant_defaults_listener)
def auto_delete_orphans(attr):
"""
Delete orphans for given SQLAlchemy model attribute. This function can be
used for deleting many-to-many associated orphans easily. For more
information see
https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/ManyToManyOrphan.
Consider the following model definition:
::
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import event
Base = declarative_base()
tagging = Table(
'tagging',
Base.metadata,
Column(
'tag_id',
Integer,
ForeignKey('tag.id', ondelete='CASCADE'),
primary_key=True
),
Column(
'entry_id',
Integer,
ForeignKey('entry.id', ondelete='CASCADE'),
primary_key=True
)
)
class Tag(Base):
__tablename__ = 'tag'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
def __init__(self, name=None):
self.name = name
class Entry(Base):
__tablename__ = 'entry'
id = Column(Integer, primary_key=True)
tags = relationship(
'Tag',
secondary=tagging,
backref='entries'
)
Now lets say we want to delete the tags if all their parents get deleted (
all Entry objects get deleted). This can be achieved as follows:
::
from sqlalchemy_utils import auto_delete_orphans
auto_delete_orphans(Entry.tags)
After we've set up this listener we can see it in action.
::
e = create_engine('sqlite://')
Base.metadata.create_all(e)
s = Session(e)
r1 = Entry()
r2 = Entry()
r3 = Entry()
t1, t2, t3, t4 = Tag('t1'), Tag('t2'), Tag('t3'), Tag('t4')
r1.tags.extend([t1, t2])
r2.tags.extend([t2, t3])
r3.tags.extend([t4])
s.add_all([r1, r2, r3])
assert s.query(Tag).count() == 4
r2.tags.remove(t2)
assert s.query(Tag).count() == 4
r1.tags.remove(t2)
assert s.query(Tag).count() == 3
r1.tags.remove(t1)
assert s.query(Tag).count() == 2
.. versionadded: 0.26.4
:param attr: Association relationship attribute to auto delete orphans from
"""
parent_class = attr.parent.class_
target_class = attr.property.mapper.class_
backref = attr.property.backref
if not backref:
raise ImproperlyConfigured(
'The relationship argument given for auto_delete_orphans needs to '
'have a backref relationship set.'
)
@sa.event.listens_for(sa.orm.Session, 'after_flush')
def delete_orphan_listener(session, ctx):
# Look through Session state to see if we want to emit a DELETE for
# orphans
orphans_found = (
any(
isinstance(obj, parent_class) and
sa.orm.attributes.get_history(obj, attr.key).deleted
for obj in session.dirty
) or
any(
isinstance(obj, parent_class)
for obj in session.deleted
)
)
if orphans_found:
# Emit a DELETE for all orphans
(
session.query(target_class)
.filter(
~getattr(target_class, attr.property.backref).any()
)
.delete(synchronize_session=False)
)
| [
"konsta.vesterinen@gmail.com"
] | konsta.vesterinen@gmail.com |
d746fe4996011cd8732a10c60d661dbd2bc53eff | bb1e0e89fcf1f1ffb61214ddf262ba327dd10757 | /plotly_study/graph_objs/waterfall/increasing/__init__.py | bacb03f40d90248bd3aad08622978e5ad5d72d1e | [
"MIT"
] | permissive | lucasiscovici/plotly_py | ccb8c3ced89a0f7eccf1ae98551fa712460033fe | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | refs/heads/master | 2020-09-12T05:43:12.363609 | 2019-12-02T15:13:13 | 2019-12-02T15:13:13 | 222,328,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,415 | py | from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the marker color of all increasing values.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly_study.graph_objs.waterfall.increasing.marker.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color of all increasing values.
width
Sets the line width of all increasing values.
Returns
-------
plotly_study.graph_objs.waterfall.increasing.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "waterfall.increasing"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of all increasing values.
line
plotly_study.graph_objects.waterfall.increasing.marker.Line
instance or dict with compatible properties
"""
def __init__(self, arg=None, color=None, line=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.waterfall.increasing.Marker
color
Sets the marker color of all increasing values.
line
plotly_study.graph_objects.waterfall.increasing.marker.Line
instance or dict with compatible properties
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.waterfall.increasing.Marker
constructor must be a dict or
an instance of plotly_study.graph_objs.waterfall.increasing.Marker"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.waterfall.increasing import marker as v_marker
# Initialize validators
# ---------------------
self._validators["color"] = v_marker.ColorValidator()
self._validators["line"] = v_marker.LineValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Marker", "marker"]
from plotly_study.graph_objs.waterfall.increasing import marker
| [
"you@example.com"
] | you@example.com |
789c0fd16ea82c8264607f3c813fe7c1703ceb81 | 660e35c822423685aea19d038daa8356722dc744 | /party/category.py | a35b8795bf2973d2c3aa84d30eef643b06b6e910 | [] | no_license | saifkazi/tryton_modules | a05cb4a90ae2c46ba39d60d2005ffc18ce5e44bb | 94bd3a4e3fd86556725cdff33b314274dcb20afd | refs/heads/main | 2023-05-05T12:20:02.059236 | 2021-05-19T10:46:37 | 2021-05-19T10:46:37 | 368,768,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,462 | py | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from sql.conditionals import Coalesce
from sql.operators import Equal
from trytond.model import (
ModelView, ModelSQL, DeactivableMixin, fields, Exclude, tree)
class Category(DeactivableMixin, tree(separator=' / '), ModelSQL, ModelView):
"Category"
__name__ = 'party.category'
name = fields.Char(
"Name", required=True, translate=True,
help="The main identifier of the category.")
parent = fields.Many2One(
'party.category', "Parent", select=True,
help="Add the category below the parent.")
childs = fields.One2Many(
'party.category', 'parent', "Children",
help="Add children below the category.")
@classmethod
def __setup__(cls):
super(Category, cls).__setup__()
t = cls.__table__()
cls._sql_constraints = [
('name_parent_exclude',
Exclude(t, (t.name, Equal), (Coalesce(t.parent, -1), Equal)),
'party.msg_category_name_unique'),
]
cls._order.insert(0, ('name', 'ASC'))
@classmethod
def __register__(cls, module_name):
super(Category, cls).__register__(module_name)
table_h = cls.__table_handler__(module_name)
# Migration from 4.6: replace unique by exclude
table_h.drop_constraint('name_parent_uniq')
| [
"saif.kazi76@gmail.com"
] | saif.kazi76@gmail.com |
cf53335750949034c20cc3ef5786467e683305df | 9811904ef72f0832c5fce44444f8f3b106dea165 | /admin_tools_stats/migrations/0001_initial.py | 36a32afa4b3c225c0ee7912bdce6010276ffb40e | [
"MIT"
] | permissive | areski/django-admin-tools-stats | 3b8d9f39ba41dbe733076e6d1f62c69d328637ff | 20fb537388895ed1f0913805bca18b97723b7dc1 | refs/heads/develop | 2023-02-22T18:53:01.672623 | 2020-01-16T14:48:21 | 2020-01-16T14:48:21 | 2,348,691 | 214 | 41 | NOASSERTION | 2023-02-11T17:57:17 | 2011-09-08T14:11:07 | Python | UTF-8 | Python | false | false | 3,865 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-13 11:29
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DashboardStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('graph_key', models.CharField(help_text='it needs to be one word unique. ex. auth, mygraph', max_length=90, unique=True, verbose_name='graph key')),
('graph_title', models.CharField(db_index=True, help_text='heading title of graph box', max_length=90, verbose_name='graph title')),
('model_app_name', models.CharField(help_text='ex. auth / dialer_cdr', max_length=90, verbose_name='app name')),
('model_name', models.CharField(help_text='ex. User', max_length=90, verbose_name='model name')),
('date_field_name', models.CharField(help_text='ex. date_joined', max_length=90, verbose_name='date field name')),
('operation_field_name', models.CharField(blank=True, help_text='The field you want to aggregate, ex. amount', max_length=90, null=True, verbose_name='Operate field name')),
('type_operation_field_name', models.CharField(blank=True, choices=[(b'Count', b'Count'), (b'Sum', b'Sum'), (b'Avg', b'Avg'), (b'Max', b'Max'), (b'Min', b'Min'), (b'StdDev', b'StdDev'), (b'Variance', b'Variance')], help_text='choose the type operation what you want to aggregate, ex. Sum', max_length=90, null=True, verbose_name='Choose Type operation')),
('is_visible', models.BooleanField(default=True, verbose_name='visible')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='date')),
('updated_date', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'dashboard_stats',
'verbose_name': 'dashboard stats',
'verbose_name_plural': 'dashboard stats',
},
),
migrations.CreateModel(
name='DashboardStatsCriteria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criteria_name', models.CharField(db_index=True, help_text='it needs to be one word unique. Ex. status, yesno', max_length=90, verbose_name='criteria name')),
('criteria_fix_mapping', jsonfield.fields.JSONField(blank=True, help_text='a JSON dictionary of key-value pairs that will be used for the criteria', null=True, verbose_name='fixed criteria / value')),
('dynamic_criteria_field_name', models.CharField(blank=True, help_text='ex. for call records - disposition', max_length=90, null=True, verbose_name='dynamic criteria field name')),
('criteria_dynamic_mapping', jsonfield.fields.JSONField(blank=True, help_text='a JSON dictionary of key-value pairs that will be used for the criteria', null=True, verbose_name='dynamic criteria / value')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='date')),
('updated_date', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'dash_stats_criteria',
'verbose_name': 'dashboard stats criteria',
'verbose_name_plural': 'dashboard stats criteria',
},
),
migrations.AddField(
model_name='dashboardstats',
name='criteria',
field=models.ManyToManyField(blank=True, to='admin_tools_stats.DashboardStatsCriteria'),
),
]
| [
"areski@gmail.com"
] | areski@gmail.com |
cbce07708e421612df9f4943eec5efae3a595746 | 2a68ce2f0f47370e2f57b9279cc8e1aab85e26da | /trojsten/dbsanitizer/tests.py | 45308305cf9ed8198fdb56412d4331066df8d443 | [
"MIT"
] | permissive | trojsten/web | 52007c3d575b21603bf205c1e7294a482eedbf85 | 97b7b3ae3ac46be786bde9c49a2cae6609dbf50f | refs/heads/master | 2023-08-17T23:30:16.857469 | 2023-07-30T16:31:34 | 2023-07-30T16:31:34 | 10,618,952 | 6 | 10 | MIT | 2023-09-04T19:09:09 | 2013-06-11T10:04:10 | Python | UTF-8 | Python | false | false | 2,543 | py | import datetime
from django.test import TestCase
from trojsten.contests.models import Competition, Round, Semester, Task
from trojsten.people.models import User, UserProperty, UserPropertyKey
from .model_sanitizers import (
GeneratorFieldSanitizer,
TaskSanitizer,
UserPropertySanitizer,
UserSanitizer,
)
class GeneratorFieldSanitizerTest(TestCase):
def test_data_replaced_by_generated_data(self):
def fake_generator():
return "generated_data"
sanitized_data = GeneratorFieldSanitizer(fake_generator).sanitize("original_data")
self.assertEquals(sanitized_data, "generated_data")
class TaskSanitizerTest(TestCase):
def test_task_data_sanitized(self):
c = Competition.objects.create(name="ABCD")
s = Semester.objects.create(year=47, competition=c, number=1)
r = Round.objects.create(number=3, semester=s, visible=True, solutions_visible=True)
Task.objects.create(number=2, name="foo", round=r)
TaskSanitizer().sanitize()
sanitized_task = Task.objects.get()
self.assertNotEquals(sanitized_task.name, "foo")
class UserSanitizerTest(TestCase):
def test_user_data_sanitized(self):
User.objects.create(
username="foo",
password="pwd",
first_name="Ferko",
last_name="Mrkvicka",
birth_date=datetime.date(year=2000, month=1, day=1),
email="ferko@example.com",
)
UserSanitizer().sanitize()
sanitized_user = User.objects.get()
self.assertNotEquals(sanitized_user.username, "foo")
self.assertEquals(sanitized_user.password, "")
self.assertNotEquals(sanitized_user.first_name, "Ferko")
self.assertNotEquals(sanitized_user.last_name, "Mrkvicka")
self.assertNotEquals(sanitized_user.birth_date, datetime.date(year=2000, month=1, day=1))
self.assertNotEquals(sanitized_user.last_name, "ferko@example.com")
class UserPropertySanitizerTest(TestCase):
def test_userproperty_data_sanitized(self):
key = UserPropertyKey.objects.create(key_name="foo")
user = User.objects.create(username="user")
UserProperty.objects.create(user=user, key=key, value="bar")
UserPropertySanitizer().sanitize()
sanitized_userproperty = UserProperty.objects.get()
self.assertEquals(sanitized_userproperty.key, key)
self.assertNotEquals(sanitized_userproperty.value, "bar")
self.assertEquals(len(sanitized_userproperty.value), 3)
| [
"mhozza@gmail.com"
] | mhozza@gmail.com |
15737f4817cf53b326364b6d29fe9fd568947d70 | 517d461257edd1d6b239200b931c6c001b99f6da | /Circuit_Playground/CircuitPython/libraries/adafruit-circuitpython-bundle-6.x-mpy-20211013/examples/emc2101_lut_example.py | e40521c8205807f188e5d8c7ce4b8be76f169be6 | [] | no_license | cmontalvo251/Microcontrollers | 7911e173badff93fc29e52fbdce287aab1314608 | 09ff976f2ee042b9182fb5a732978225561d151a | refs/heads/master | 2023-06-23T16:35:51.940859 | 2023-06-16T19:29:30 | 2023-06-16T19:29:30 | 229,314,291 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | # SPDX-FileCopyrightText: 2020 Bryan Siepert, written for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import board
from adafruit_emc2101.emc2101_lut import EMC2101_LUT as EMC2101
i2c = board.I2C() # uses board.SCL and board.SDA
FAN_MAX_RPM = 1700
emc = EMC2101(i2c)
emc.manual_fan_speed = 50
time.sleep(1)
emc.lut[27] = 25
emc.lut[34] = 50
emc.lut[42] = 75
emc.lut_enabled = True
emc.forced_temp_enabled = True
print("Lut:", emc.lut)
emc.forced_ext_temp = 28 # over 25, should be 25%
time.sleep(3)
print("25%% duty cycle is %f RPM:" % emc.fan_speed)
emc.forced_ext_temp = 35 # over 30, should be 50%
time.sleep(3)
print("50%% duty cycle is %f RPM:" % emc.fan_speed)
emc.forced_ext_temp = 43 # over 42, should be 75%
time.sleep(3)
print("75%% duty cycle is %f RPM:" % emc.fan_speed)
| [
"cmontalvo@southalabama.edu"
] | cmontalvo@southalabama.edu |
4445e59c151d0526998628fc5e6f48826e731efe | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_132556.93-032329.6/sdB_sdssj_132556.93-032329.6_lc.py | 40f5f972dc38f9ea75f637b807ff08d9e12c39a9 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[201.487208,-3.391556], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_132556.93-032329.6/sdB_sdssj_132556.93-032329.6_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
553f97fb014b172cd5365d8b22bf2ebbb4f7bd97 | 0d0b8236ff06027037d2a8a724d13a1866a9999c | /0x11-python-network_1/100-github_commits.py | 246b7b51fe87ce6eb9f1acda243b8b3831205621 | [] | no_license | Danucas/holbertonschool-higher_level_programming | 3f8e81a610bf80890280b764362b56ad8803e2df | b963d41af8bccf764dff67f80ea16f1184c0a96d | refs/heads/master | 2022-07-31T05:53:57.046789 | 2020-05-21T21:29:54 | 2020-05-21T21:29:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | #!/usr/bin/python3
"""
Python script to fetch an https request
"""
import requests
import sys
def main():
url = "https://api.github.com/repos/{}/{}/commits"
url = url.format(sys.argv[2], sys.argv[1])
response = requests.get(url)
commits = response.json()
for com in commits[:10]:
sha = com['sha']
author = com['commit']['author']['name']
print('{}: {}'.format(sha, author))
if __name__ == '__main__':
main()
| [
"danrodcastillo1994@gmail.com"
] | danrodcastillo1994@gmail.com |
4994f8e2ea7c14a3f49a1cc6ec20ccf81e3033c5 | 5779d964d5ee42b586697a640ff0f977e0fa1e55 | /synclient/model/paginated_results_of_submission_status.py | dfbe22b68739ed6283ac18d415da2dae69a78377 | [] | no_license | thomasyu888/synpy-sdk-client | 03db42c3c8411c8c1f8808e1145d7c2a8bcc3df1 | d1e19e26db5376c78c4ce0ff181ac3c4e0709cbb | refs/heads/main | 2023-02-28T09:33:12.386220 | 2021-02-02T15:09:59 | 2021-02-02T15:09:59 | 333,744,741 | 3 | 0 | null | 2021-01-30T12:10:50 | 2021-01-28T11:57:48 | Python | UTF-8 | Python | false | false | 7,401 | py | """
Platform Repository Service
Platform Repository Service - Sage Bionetworks Platform # noqa: E501
The version of the OpenAPI document: develop-SNAPSHOT
Contact: thomas.yu@sagebionetworks.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from synclient.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from synclient.model.submission_status_model import SubmissionStatusModel
globals()['SubmissionStatusModel'] = SubmissionStatusModel
class PaginatedResultsOfSubmissionStatus(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'results': ([SubmissionStatusModel],), # noqa: E501
'total_number_of_results': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'results': 'results', # noqa: E501
'total_number_of_results': 'totalNumberOfResults', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""PaginatedResultsOfSubmissionStatus - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
results ([SubmissionStatusModel]): The the id of the entity to which this reference refers. [optional] # noqa: E501
total_number_of_results (int): Calculating the actual totalNumberOfResults is not longer supported. Therefore, for each page, the totalNumberOfResults is estimated using the current page, limit, and offset. When the page size equals the limit, the totalNumberOfResults will be offset+pageSize+ 1. Otherwise, the totalNumberOfResults will be offset+pageSize. . [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"thomas.yu@sagebase.org"
] | thomas.yu@sagebase.org |
77dbd599f3c0b21c50f8a630b45f745368c7c237 | 663d429e1f552ef958d37cfe4a0707354b544a9a | /rimi_linux_mysql/tcp_ip_socket/Io_test/io_select_test/test5.py | 5305eb8f5071b28540bcab38f841a1e131f9fe30 | [] | no_license | nie000/mylinuxlearn | 72a33024648fc4393442511c85d7c439e169a960 | 813ed75a0018446cd661001e8803f50880d09fff | refs/heads/main | 2023-06-20T07:46:11.842538 | 2021-07-15T13:46:43 | 2021-07-15T13:46:43 | 307,377,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | import socket
import select
r = []
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(('127.0.0.1',28889))
server.listen(1)
r.append(server)
while True:
rlist,wlist,xlist = select.select(r,[],[])
for i in rlist:
if i is server:
con,add = i.accept()
print(add)
r.append(con)
else:
try:
m = i.recv(1024)
if not m:
i.close()
r.remove(i)
continue
except:
r.remove(i) | [
"1073438012@qq.com"
] | 1073438012@qq.com |
6aa870cef6388448f00bc19edcd80f5e465a6ca7 | fa4df5c5790b4c7af37c46ef82aeac1230e36a0e | /VRD/backend/factory/modifier_factory.py | b0b36c65ff883eab7821c81539e9fc22790904d7 | [] | no_license | Gorgious56/VRD | 32d548b6f2e096b151c49f83b80c48c351b1265d | f7d5bbb665ebaa4f3b1d274909c15fffb7b74bf5 | refs/heads/master | 2022-11-15T10:14:39.196704 | 2020-07-18T15:29:30 | 2020-07-18T15:29:30 | 280,684,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | import bpy
from typing import Iterable
class ModSettings:
def __init__(self, mod_type: str, mod_name: str, attributes: dict = None):
self.mod_type = mod_type
self.mod_name = mod_name
self.attributes = attributes
def add_modifier(
obj: bpy.types.Object,
settings: ModSettings,
replace: bool = True) -> None:
if settings.mod_name in obj.modifiers and not replace:
return
new_mod = obj.modifiers.new(name=settings.mod_name, type=settings.mod_type)
if settings.attributes:
for attr, value in settings.attributes.items():
if hasattr(new_mod, attr):
setattr(new_mod, attr, value)
new_mod.show_expanded = False
def add_modifiers(
obj: bpy.types.Object,
mods: Iterable[ModSettings],
replace: bool = True) -> None:
(add_modifier(obj, mod, replace) for mod in mods)
| [
"nathan.hild@gmail.com"
] | nathan.hild@gmail.com |
2986f1a06afe7c78517613346f8667e2a57ab23a | 525a0588ed3eb7ae5843e55522b6cc83ac2abd59 | /biodivhack/urls.py | 4febb1b8d565cc1db71a64ad24b5e976e748dafb | [] | no_license | dvoong/biodivhack | 1601d51dc2a34b5a8002bbf7efd3faccfd5b93e1 | 618ce01016d212ed0463957180ee06c0b9d62fa0 | refs/heads/master | 2020-12-24T16:23:42.746356 | 2015-06-20T19:22:04 | 2015-06-20T19:22:04 | 37,766,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | """biodivhack URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from biodivhack import views
urlpatterns = [
url(r'^$', views.index),
url(r'^keyword-summary/(\d+)', views.keyword_summary),
url(r'^reviews/(\d+)/(\d+)', views.review),
url(r'^statuses/(\d+)', views.status),
url(r'^add-to-database', views.add_to_database),
url(r'^admin/', include(admin.site.urls)),
]
| [
"voong.david@gmail.com"
] | voong.david@gmail.com |
d1a50006728152eb14ee0200eb479b5264089dc0 | 28b1036824bfa4c3290d285090f073c3676a4389 | /my1stsite/settings/testing.py | 2d67d73d80806b9830c7bdded15661987ba366f5 | [] | no_license | zoie0312/my1stsite | ee0154409d2ac9ed1408f1b8736ef6a1547e82fb | b147102e332f789ee430e4666717189ae6e88d51 | refs/heads/master | 2021-01-22T04:41:15.006631 | 2013-11-29T02:36:30 | 2013-11-29T02:36:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = root('..')
TEST_DISCOVER_ROOT = root('..')
TEST_DISCOVER_PATTERN = 'test_*'
| [
"vagrant@precise64.(none)"
] | vagrant@precise64.(none) |
c4681ca77b4913f01fadb65694fba7264ec93bb4 | f2b172f7c1dcf0ac28fe7465b5844b48facade18 | /12/1207/1207.py | 5d2fce8683456a4a169c17b65ac51d8606aae2cc | [] | no_license | 0gravity000/IntroducingPython | 2fde12485d0597e72a7da801a08d5048a47f2ff5 | 5d3281dbe37ed1a08d71cb6a36841781f9ac0ccf | refs/heads/master | 2023-07-19T02:53:23.081806 | 2021-09-30T01:51:44 | 2021-09-30T01:51:44 | 403,935,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | # 12.7 Pythonコードのデバッグ
# デバックでもっとも単純なのは、文字列を表示すること
# vars()は、関数への引数を含むローカル変数の値を抽出する
def func(*args, **kwargs):
print(vars())
func(1, 2, 3)
func(['a', 'b', 'argh'])
| [
"0gravity000@gmail.com"
] | 0gravity000@gmail.com |
fbfb74e10109085225bb38751b05a62c682c4b98 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /701/701.insert-into-a-binary-search-tree.291646803.Wrong-Answer.leetcode.python3.py | 3687f08550163587025901f57d3b5e1f85a4cfef | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | class Solution():
def insertIntoBST(self, root, val):
root_head = root
root_val = TreeNode(val)
while True:
if val < root.val:
if not root.left:
root = root.left
else:
root.left = root_val
break
else:
if not root.right:
root = root.right
else:
root.right = root_val
break
return root_head
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
cce56bc46a072cd4a07a8d3c7507c78ed15de20d | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D98B/REQOTED98BUN.py | 830d94b77397c27e9f72d80b435724d8e803eb91 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 7,917 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD98BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 35},
{ID: 'PAI', MIN: 0, MAX: 1},
{ID: 'ALI', MIN: 0, MAX: 5},
{ID: 'IMD', MIN: 0, MAX: 999},
{ID: 'IRQ', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'RFF', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'AJT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 5},
]},
{ID: 'TAX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 5},
]},
{ID: 'CUX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'PAT', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'PCD', MIN: 0, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 1},
]},
{ID: 'TOD', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 2},
]},
{ID: 'EQD', MIN: 0, MAX: 10, LEVEL: [
{ID: 'HAN', MIN: 0, MAX: 5},
{ID: 'MEA', MIN: 0, MAX: 5},
{ID: 'FTX', MIN: 0, MAX: 5},
]},
{ID: 'RCS', MIN: 0, MAX: 999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'FTX', MIN: 0, MAX: 99999},
]},
{ID: 'APR', MIN: 0, MAX: 25, LEVEL: [
{ID: 'PRI', MIN: 0, MAX: 1},
{ID: 'QTY', MIN: 0, MAX: 2},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 2},
{ID: 'RNG', MIN: 0, MAX: 2},
]},
{ID: 'DLM', MIN: 0, MAX: 1, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'NAD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 25},
{ID: 'FII', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'DOC', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'TDT', MIN: 0, MAX: 10, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'LOC', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
]},
{ID: 'PAC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 5},
{ID: 'PCI', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'GIN', MIN: 0, MAX: 10},
]},
]},
{ID: 'SCC', MIN: 0, MAX: 10, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'QTY', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
]},
{ID: 'ALC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ALI', MIN: 0, MAX: 5},
{ID: 'QTY', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'PCD', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 0, MAX: 2, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'RTE', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'TAX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
]},
]},
{ID: 'LIN', MIN: 1, MAX: 200000, LEVEL: [
{ID: 'PIA', MIN: 0, MAX: 25},
{ID: 'IMD', MIN: 0, MAX: 99},
{ID: 'MEA', MIN: 0, MAX: 5},
{ID: 'QTY', MIN: 0, MAX: 99},
{ID: 'PCD', MIN: 0, MAX: 1},
{ID: 'ALI', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 35},
{ID: 'GIN', MIN: 0, MAX: 1000},
{ID: 'GIR', MIN: 0, MAX: 1000},
{ID: 'QVR', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'PAI', MIN: 0, MAX: 1},
{ID: 'DOC', MIN: 0, MAX: 99},
{ID: 'CCI', MIN: 0, MAX: 999, LEVEL: [
{ID: 'CAV', MIN: 0, MAX: 10},
{ID: 'MEA', MIN: 0, MAX: 10},
]},
{ID: 'MOA', MIN: 0, MAX: 100, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 2},
{ID: 'IMD', MIN: 0, MAX: 1},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
]},
{ID: 'AJT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 5},
]},
{ID: 'PRI', MIN: 0, MAX: 99, LEVEL: [
{ID: 'APR', MIN: 0, MAX: 1},
{ID: 'RNG', MIN: 0, MAX: 1},
{ID: 'CUX', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'RFF', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'LOC', MIN: 0, MAX: 100, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'TAX', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 5},
]},
{ID: 'TOD', MIN: 0, MAX: 5, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 2},
]},
{ID: 'EQD', MIN: 0, MAX: 10, LEVEL: [
{ID: 'HAN', MIN: 0, MAX: 5},
{ID: 'MEA', MIN: 0, MAX: 5},
{ID: 'FTX', MIN: 0, MAX: 5},
]},
{ID: 'RCS', MIN: 0, MAX: 999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'FTX', MIN: 0, MAX: 99999},
]},
{ID: 'PAT', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'PCD', MIN: 0, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 1},
]},
{ID: 'PAC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 5},
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'PCI', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'GIN', MIN: 0, MAX: 10},
]},
]},
{ID: 'NAD', MIN: 0, MAX: 999, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'DOC', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'ALC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ALI', MIN: 0, MAX: 5},
{ID: 'QTY', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'PCD', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 0, MAX: 2, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'RTE', MIN: 0, MAX: 1, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'TAX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
]},
]},
{ID: 'TDT', MIN: 0, MAX: 10, LEVEL: [
{ID: 'QTY', MIN: 0, MAX: 5},
{ID: 'LOC', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
]},
{ID: 'SCC', MIN: 0, MAX: 100, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'QTY', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 5},
]},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 15},
{ID: 'CNT', MIN: 0, MAX: 10},
{ID: 'ALC', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 1, MAX: 1},
{ID: 'ALI', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
c07f51e53dc8e49f9eef1416a8fd830023679490 | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901040051/day11/mymodule/main1.py | 6d64c07eba011299f1539a221de2d0c5df683f51 | [] | no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 2,360 | py | import jieba
import re
import json
import sys
import collections
from collections import Counter
sys.path.append("c:")
# import stats_word
with open("mymodule/tang300.json", "r", encoding="utf-8") as file:
try:
read_data = file.read()
except ValueError as e:
print(e)
def stats_text_en(en,count) :
''' 1. 英文词频统计:使用正则表达式过滤英文字符,使用Counter统计并排序。
2. 参数类型检查,不为字符串抛出异常。
'''
if type(en) == str :
text_en = re.sub("[^A-Za-z]", " ", en.strip())
# text_en = ''.join(text_en)
enList = text_en.split( )
return collections.Counter(enList).most_common(count)
else :
raise ValueError ('type of argumengt is not str')
# print(stats_text_en(read_data, 4))
def stats_text_cn(cn,count) :
''' 1. 使用jieba第三方库精确模式分词。
2. 使用正则表达式过滤汉字字符。
3. 使用for循环判断分词后词频列表元素长度大于等于2的生成新列表。
4. 使用标准库collections.Counter()统计词频并限制统计数量。
5. 参数类型检查,不为字符串抛出异常。
'''
if type(cn) == str :
cnList = re.findall(u'[\u4e00-\u9fff]+', cn.strip())
cnString = ''.join(cnList)
segList = jieba.cut(cnString,cut_all=False)
cnnewList = []
for i in segList :
if len(i) >= 2 :
cnnewList.append(i)
else :
pass
return collections.Counter(cnnewList).most_common(count)
else :
raise ValueError ('type of argumengt is not str')
# print(stats_text_cn(read_data, 2))
def stats_text(text_en_cn,count_en_cn) :
''' 1. 合并英汉词频统计:调用stats_text_en()和stats_text_cn()并合并其结果。
2. 参数类型检查,不为字符串抛出异常。
'''
if type(text_en_cn) == str :
return stats_text_en(text_en_cn,count_en_cn)+stats_text_cn(text_en_cn,count_en_cn)
else :
raise ValueError ('type of argumengt is not str')
print('输出词频最高的前20个中文词:\n ', stats_text_cn(read_data,20)) | [
"40155646+seven-tears@users.noreply.github.com"
] | 40155646+seven-tears@users.noreply.github.com |
a8ddc101a423861f9bad5474af8d432f66c1eb80 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /RzrKedEonc3BJGhY5_12.py | a021d787d5af49db12fceee016b509e732de9173 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | """
**Mubashir** needs your help to plant some trees. He can give you three
parameters of the land:
* **width** of the land `w`
* **length** of the land `l`
* **gap** between the trees `g`
You have to create an algorithm to return the **number of trees** which can be
planted **on the edges** of the given land in a **symmetrical layout** shown
below (unsymmetrical gap = x, tree = o, gap = -):
w=3, l=3, g=1
plant_trees(w, l, g) ➞ 4
o - o
- -
o - o
# Mubashir can plant 4 trees.
w=3, l=3, g=3
plant_trees(w, l, g) ➞ 2
o - -
- -
- - o
# Mubashir can plant 2 trees.
If the layout is not symmetrical, you have to return `0`:
w=3, l=3, g=2
plant_trees(w, l, g) ➞ 0
o - -
x o
x x x
# Planting 2 trees mean the gap of two trees will be greater than 2.
o - -
x o
o - -
# Planting 3 trees mean the gap of two trees will be less than 2.
Another Example for better understanding:
w=3, l=3, g=0
plant_trees(w, l, g) ➞ 8
o o o
o o
o o o
# Mubashir can plant 8 trees.
### Notes
N/A
"""
def plant_trees(w, l, g):
perimeter = 2*w + 2*l - 4
if w == 0 or l == 0:
return 0
elif perimeter%(g+1) == 0:
return int(perimeter/(g+1))
else:
return 0
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
49b4f104d0a3ffacd0a6920cd0f12b858d6768e7 | 83b9910372a8246c947c7365b9eb39d9b20cde13 | /src/model/plainEffNet.py | c7dddc5608db4d3bd223d3d118df0da818b4556e | [
"MIT"
] | permissive | comword/TCD20-DP-DeepModel | 977394a9b1c9ce350efdf944919f034a28ff878a | 7dca097957b745cf6345d8ac218ff28f306a5218 | refs/heads/main | 2023-07-09T15:12:45.228999 | 2021-08-22T14:05:01 | 2021-08-22T14:05:01 | 338,325,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | import tensorflow as tf
class PlainEffNet(tf.keras.Model):
def __init__(self, input_shape=[3, 15, 224, 224], backbone='EfficientNetB0',
MLP_DIM=256, n_classes=16, MLP_DROPOUT_RATE=0.4, **kwargs):
super(PlainEffNet, self).__init__(**kwargs)
self.img_input = tf.keras.layers.Input(input_shape)
self.pos_input = tf.keras.layers.Input(input_shape[1])
self.backbone = getattr(tf.keras.applications, backbone)(
include_top=False, weights='imagenet', classes=n_classes,
input_shape=[input_shape[2], input_shape[3], input_shape[0]])
self.pool = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')
self.dense = tf.keras.layers.Dense(MLP_DIM, activation='relu')
self.dropout = tf.keras.layers.Dropout(
MLP_DROPOUT_RATE, name='top_dropout')
self.dense_out = tf.keras.layers.Dense(n_classes, activation='softmax')
# self.out = self.call([self.img_input, self.pos_input])
# super(PlainEffNet, self).__init__(
# inputs=[self.img_input, self.pos_input], outputs=self.out, **kwargs)
def call(self, x, training=False):
x, position_ids = x
shape = tf.shape(x)
B, C, F, H, W = shape[0], shape[1], shape[2], shape[3], shape[4]
x = tf.transpose(x, perm=[0, 2, 3, 4, 1]) # B, F, H, W, C
x = tf.reshape(x, (B * F, H, W, C))
x = self.backbone(x, training=training)
x = self.pool(x)
x = tf.reshape(x, (B, -1))
x = self.dense(x)
x = self.dropout(x, training=training)
x = self.dense_out(x)
return x
| [
"comword@live.com"
] | comword@live.com |
cf3cf7a93963bbf5a088a5e1c72b59a4549b56c9 | 145f57f0418924d982444598f12b291f9c280657 | /roboticstoolbox/tools/models.py | 3fdfee260d919f196663e064d2178c7f06ebc3fa | [
"MIT"
] | permissive | HaoWangSir/robotics-toolbox-python | 3b56fd7abc094df1555f7c0aa8d015ef6c344c53 | a93165018e9fa165bde353193af2eb1534bba992 | refs/heads/master | 2022-12-18T22:41:18.642417 | 2020-09-18T01:15:13 | 2020-09-18T01:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import roboticstoolbox.models as m
def models():
"""
Display all robot models in summary form
``models()`` displays a list of all models provided by the Toolbox. It
lists the name, manufacturer, and number of joints.
"""
for category in ['DH', 'URDF', 'ETS']:
print(category + ':')
group = m.__dict__[category]
for cls in group.__dict__.values():
# TODO should check that cls issubclass of Robot superclass (when there is one)
try:
robot = cls()
except:
continue
s = robot.name
if robot.manufacturer is not None:
s += ' (' + robot.manufacturer + ')'
print(f" {s:40s} {robot.n:d} dof")
models() | [
"peter.i.corke@gmail.com"
] | peter.i.corke@gmail.com |
ff539193b88130070464951bfe9d6c30cd6969a6 | 9833cd31d96f2c38fd4d6291d660c534cbee638e | /code/visualize-dataset.py | 2fbb6a72df8418329a2ba66bf6f4ea5f9ef0ff06 | [] | no_license | AspirinCode/drug-discovery-feature-selection | 35129cdeb6665db0d04111364925dc5f62dd0661 | 69ae43ef498aaf1e3523220547732c5d3f7f310e | refs/heads/master | 2020-03-27T20:17:39.215137 | 2018-07-20T06:20:11 | 2018-07-20T06:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,733 | py | """
Visualize dataset:
* Feature importances using Extra Trees
* 2D plot using t-SNE with various perplexities
Execution time:
real 15m50.854s
user 14m30.858s
sys 0m30.375s
@author yohanes.gultom@gmail.com
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas
import os
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.ensemble import ExtraTreesClassifier
from time import time
# config
dataset_file = '../dataset/dataset.csv'
n_components = 2
perplexities = [5, 30, 50, 100]
chart_importances_filename = 'visualize-dataset_importances.png'
chart_filename_tpl = 'visualize-dataset_tsne_{}.png'
# check if display available
if os.name == 'posix' and "DISPLAY" not in os.environ:
matplotlib.use('Agg')
# read dataset
df = pandas.read_csv(dataset_file, index_col=0)
feature_names = list(df[df.columns.drop('Class')])
# split to data X and labels y
X = df[df.columns.drop('Class')].values.astype('float32')
y = df['Class'].values
# separate data by class
red = y == 0
green = y == 1
# scale data
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
# feature importance check using Extra Trees
forest = ExtraTreesClassifier(n_estimators=250, random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1] # reverse
n = 10
print("Top {} most important features:".format(n))
for f in range(min(X.shape[1], n)):
print("{}. feature {} ({}): {:.4g}".format(f + 1, indices[f], feature_names[indices[f]], importances[indices[f]]))
# Set figure size to 1200 x 880 px
plt.figure(figsize=(15, 11))
# Plot the feature importances of the forest
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances, color="r", align="center")
plt.xlim([-1, X.shape[1]])
plt.ylabel("Importance")
plt.xlabel("Feature Index")
plt.savefig(chart_importances_filename)
# visualize dataset with TSNE
for i, perplexity in enumerate(perplexities):
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='random', random_state=0, perplexity=perplexity)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE perplexity={} in {:.2g} sec".format(perplexity, t1 - t0))
# plot
fig, ax = plt.subplots()
ax.set_title("Perplexity=%d" % perplexity)
ax.scatter(Y[red, 0], Y[red, 1], c="r")
ax.scatter(Y[green, 0], Y[green, 1], c="g")
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis('tight')
filename = chart_filename_tpl.format(perplexity)
plt.savefig(filename)
print("chart saved in {}".format(filename))
plt.show() | [
"yohanes.gultom@gmail.com"
] | yohanes.gultom@gmail.com |
ced609ff3700746a595cba6e854a51c4d16c80b6 | cfd7cd86b7098952910e7addf84ee96bbe463c4b | /iprPy-tools/process/process_structure_static.py | eeff1ef1535c0198bc273cd0230f24091a97cd67 | [] | no_license | vtran61/iprPy | 58519896abfd59bb7477bd8943e8a72ae0cce6cc | 53bc2b82863ac381710c3b20e90fd6f21db946f5 | refs/heads/master | 2021-01-14T14:23:16.459995 | 2016-03-30T19:32:11 | 2016-03-30T19:32:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,202 | py | import os
import glob
from DataModelDict import DataModelDict
def structure_static(xml_lib_dir):
calc_name = 'structure_static'
groups = os.path.join(xml_lib_dir, '*', calc_name, '*')
error_dict = DataModelDict()
for group_dir in glob.iglob(groups):
if os.path.isdir(group_dir):
calc_dir, group_name = os.path.split(group_dir)
pot_name = os.path.basename(os.path.dirname(calc_dir))
print pot_name
try:
with open(os.path.join(calc_dir, 'badlist.txt'), 'r') as f:
badlist = f.read().split()
except:
badlist = []
data = DataModelDict()
for sim_path in glob.iglob(os.path.join(group_dir, '*.xml')):
sim_file = os.path.basename(sim_path)
sim_name = sim_file[:-4]
if sim_name in badlist:
continue
with open(sim_path) as f:
sim = DataModelDict(f)['calculation-crystal-phase']
if 'error' in sim:
badlist.append(sim_name)
error_message = sim['error']
error = 'Unknown error'
for line in error_message.split('\n'):
if 'Error' in line:
error = line
error_dict.append(error, sim_name)
continue
try:
cell = sim['relaxed-atomic-system']['cell']
except:
tar_gz_path = sim_path[:-4] + '.tar.gz'
if os.isfile(tar_gz_path):
error_dict.append('Unknown error', sim_name)
continue
data.append('key', sim.get('calculation-id', ''))
data.append('file', sim['crystal-info'].get('artifact', ''))
data.append('symbols', '_'.join(sim['crystal-info'].aslist('symbols')))
data.append('Temperature (K)', sim['phase-state']['temperature']['value'])
data.append('Pressure (GPa)', sim['phase-state']['pressure']['value'])
cell = cell[cell.keys()[0]]
data.append('Ecoh (eV)', sim['cohesive-energy']['value'] )
if 'a' in cell:
data.append('a (A)', cell['a']['value'])
else:
data.append('a (A)', '')
if 'b' in cell:
data.append('b (A)', cell['b']['value'])
else:
data.append('b (A)', '')
if 'c' in cell:
data.append('c (A)', cell['c']['value'])
else:
data.append('c (A)', '')
C_dict = {}
for C in sim['elastic-constants'].iteraslist('C'):
C_dict[C['ij']] = C['stiffness']['value']
data.append('C11 (GPa)', C_dict.get('1 1', ''))
data.append('C22 (GPa)', C_dict.get('2 2', ''))
data.append('C33 (GPa)', C_dict.get('3 3', ''))
data.append('C12 (GPa)', C_dict.get('1 2', ''))
data.append('C13 (GPa)', C_dict.get('1 3', ''))
data.append('C23 (GPa)', C_dict.get('2 3', ''))
data.append('C44 (GPa)', C_dict.get('4 4', ''))
data.append('C55 (GPa)', C_dict.get('5 5', ''))
data.append('C66 (GPa)', C_dict.get('6 6', ''))
if len(data.keys()) > 0:
with open(os.path.join(calc_dir, 'structure_static_'+group_name+'.csv'), 'w') as f:
f.write(','.join(data.keys())+'\n')
for i in xrange(len(data.aslist('key'))):
f.write(','.join([str(data.aslist(k)[i]) for k in data.keys()]) + '\n')
with open(os.path.join(calc_dir, 'badlist.txt'), 'w') as f:
for bad in badlist:
f.write(bad+'\n')
| [
"lucas.hale@nist.gov"
] | lucas.hale@nist.gov |
1c6f5308ee148577f7f5e8389a9945efc8506c3e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_96/514.py | caadd24b565d411ff0959939b90e3e6c793fb7b7 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | #!/usr/bin/python
import sys
ns = [0]*31
s = [-1]*31
for i in xrange(1,31):
if i % 3 == 0:
ns[i] = i // 3
s[i] = i // 3 + 1
elif i % 3 == 1:
ns[i] = i // 3 + 1
s[i] = i // 3 + 1
elif i % 3 == 2:
ns[i] = i // 3 + 1
s[i] = i // 3 + 2
i = iter(map(int, sys.stdin.read().split()))
T = next(i)
for case in xrange(1,T+1):
N = next(i)
S = next(i)
p = next(i)
t = iter(sorted((next(i) for n in xrange(N)), reverse=True))
result = 0
while True:
try:
ti = next(t)
if ns[ti] >= p:
result += 1
elif s[ti] >= p and S > 0:
result += 1
S -= 1
else:
break
except:
break
print "Case #%d:" % case, result
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e48daae6ec102de56d95f2ca83541aae1805e989 | d3e2a5fec27ae2272ff9191f45af84c8eacc7693 | /snakemakelib/graphics/axes.py | cefa25eb6f0b4fa0d06c5d8c7230925f1388ef4e | [
"MIT"
] | permissive | jfear/snakemakelib-core | c49b9308a66361722b75869b7f461e340fef188b | b2d3cf1ecb84d630d0cc04646f86859ccac7f4c1 | refs/heads/master | 2021-01-21T00:44:32.396115 | 2016-02-07T22:49:08 | 2016-02-07T22:49:08 | 50,690,606 | 0 | 0 | null | 2016-01-29T20:53:48 | 2016-01-29T20:53:48 | null | UTF-8 | Python | false | false | 4,374 | py | '''
Author: Per Unneberg
Created: Wed Dec 2 07:52:08 2015
'''
from . import utils
from snakemakelib.log import LoggerManager
smllogger = LoggerManager().getLogger(__name__)
__all__ = ['xaxis', 'yaxis', 'main', 'grid', 'legend']
def xaxis(fig, i=None, **kwargs):
"""xaxis - modify the xaxis
Args:
fig (:py:class:`~bokeh.plotting.Plot`): bokeh Plot object
i (int): index to use if setting tick formatters and the like; see `tick label formats <http://bokeh.pydata.org/en/latest/docs/user_guide/styling.html#tick-label-formats>`_
kwargs: keyword arguments to pass to figure xaxis
Example:
.. bokeh-plot::
:source-position: above
import pandas as pd
import numpy as np
from bokeh.plotting import figure, show, hplot
from snakemakelib.graphics import points, xaxis, yaxis, grid, main, legend
df = pd.DataFrame([[1,2], [2,5], [3,9]], columns=["x", "y"])
f = figure(title="Test", plot_width=300, plot_height=300)
points(f, "x", "y", df, color="red")
points(f, "y", "x", df, legend="y")
xaxis(f, axis_label="x", major_label_orientation=np.pi/3)
yaxis(f, axis_label=None, axis_line_color=None)
grid(f, grid_line_color="black")
main(f, title="My plot", title_text_font_style="italic",
title_text_color="olive", title_text_font="times")
legend(f, orientation="bottom_left")
show(f)
"""
kwargs = {k.replace("x_", ""):v for k,v in kwargs.items()}
try:
props = fig.xaxis[0].properties()
except:
raise
kwaxis = utils.fig_args(kwargs, props)
try:
if i is None:
for i in range(len(fig.xaxis)):
fig.xaxis[i].set(**kwaxis)
else:
fig.xaxis[i].set(**kwaxis)
except AttributeError:
raise
def yaxis(fig, i=None, **kwargs):
"""yaxis - modify the yaxis
Args:
fig (:py:class:`~bokeh.plotting.Plot`): bokeh Plot object
i (int): index to use if setting tick formatters and the like; see `tick label formats <http://bokeh.pydata.org/en/latest/docs/user_guide/styling.html#tick-label-formats>`_
kwargs: keyword arguments to pass to figure yaxis
Example:
see xaxis example
"""
kwargs = {k.replace("y_", ""):v for k,v in kwargs.items()}
try:
props = fig.yaxis[0].properties()
except:
raise
kwaxis = utils.fig_args(kwargs, props)
try:
if i is None:
for i in range(len(fig.yaxis)):
fig.yaxis[i].set(**kwaxis)
else:
fig.yaxis[i].set(**kwaxis)
except AttributeError:
raise
def main(fig, **kwargs):
"""main - modify the title
Args:
fig (:py:class:`~bokeh.plotting.Plot`): bokeh Plot object
kwargs: keyword arguments to pass to figure.title
Example:
"""
for k, v in kwargs.items():
if not k.startswith("title"):
smllogger.warn("trying to set attribute {} via title".format(k))
continue
try:
setattr(fig, k, v)
except AttributeError:
smllogger.error("unexpected attribute {} to 'main'".format(k))
raise
def legend(fig, **kwargs):
"""legend - modify the legend
Args:
fig (:py:class:`~bokeh.plotting.Plot`): bokeh Plot object
kwargs: keyword arguments to pass to figure.legend
Example:
See xaxis.
"""
if len(fig.legend) == 0:
smllogger.warn("no legend defined in figure; creation of new legend currently not supported")
return
for k, v in kwargs.items():
try:
setattr(fig.legend, k, v)
except AttributeError:
smllogger.error("unexpected attribute {} to {}".format(k, fig.legend))
raise
except:
raise
def grid(fig, **kwargs):
"""grid - modify the grid
Args:
fig (:py:class:`~bokeh.plotting.Plot`): bokeh Plot object
kwargs: keyword arguments to pass to figure grid
Example:
see xaxis example
"""
for k, v in kwargs.items():
try:
setattr(fig.grid, k, v)
except AttributeError:
smllogger.error("unexpected attribute {} to {}".format(k, fig.grid))
raise
| [
"per.unneberg@scilifelab.se"
] | per.unneberg@scilifelab.se |
686480212b8364d3ab57598f0cbbe63d471e740d | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/translation/azure-ai-translation-document/tests/asynctestcase.py | b1c7a3b9c6813d0e7969edfc55beae0aed351f78 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 4,458 | py | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
from testcase import DocumentTranslationTest, Document
from azure.ai.translation.document import DocumentTranslationInput, TranslationTarget
class AsyncDocumentTranslationTest(DocumentTranslationTest):
def __init__(self, method_name):
super(AsyncDocumentTranslationTest, self).__init__(method_name)
def generate_oauth_token(self):
if self.is_live:
from azure.identity.aio import ClientSecretCredential
return ClientSecretCredential(
os.getenv("TRANSLATION_TENANT_ID"),
os.getenv("TRANSLATION_CLIENT_ID"),
os.getenv("TRANSLATION_CLIENT_SECRET"),
)
async def _begin_and_validate_translation_async(self, async_client, translation_inputs, total_docs_count, language=None):
# submit operation
poller = await async_client.begin_translation(translation_inputs)
self.assertIsNotNone(poller.id)
# wait for result
doc_statuses = await poller.result()
# validate
self._validate_translation_metadata(poller=poller, status='Succeeded', total=total_docs_count, succeeded=total_docs_count)
async for doc in doc_statuses:
self._validate_doc_status(doc, language)
return poller.id
# client helpers
async def _begin_multiple_translations_async(self, async_client, operations_count, **kwargs):
wait_for_operation = kwargs.pop('wait', True)
language_code = kwargs.pop('language_code', "es")
docs_per_operation = kwargs.pop('docs_per_operation', 2)
result_ids = []
for i in range(operations_count):
# prepare containers and test data
'''
# note
since we're only testing the client library
we can use sync container calls in here
no need for async container clients!
'''
blob_data = Document.create_dummy_docs(docs_per_operation)
source_container_sas_url = self.create_source_container(data=blob_data)
target_container_sas_url = self.create_target_container()
# prepare translation inputs
translation_inputs = [
DocumentTranslationInput(
source_url=source_container_sas_url,
targets=[
TranslationTarget(
target_url=target_container_sas_url,
language_code=language_code
)
]
)
]
# submit multiple operations
poller = await async_client.begin_translation(translation_inputs)
self.assertIsNotNone(poller.id)
if wait_for_operation:
await poller.result()
else:
await poller.wait()
result_ids.append(poller.id)
return result_ids
async def _begin_and_validate_translation_with_multiple_docs_async(self, async_client, docs_count, **kwargs):
# get input parms
wait_for_operation = kwargs.pop('wait', False)
language_code = kwargs.pop('language_code', "es")
# prepare containers and test data
blob_data = Document.create_dummy_docs(docs_count=docs_count)
source_container_sas_url = self.create_source_container(data=blob_data)
target_container_sas_url = self.create_target_container()
# prepare translation inputs
translation_inputs = [
DocumentTranslationInput(
source_url=source_container_sas_url,
targets=[
TranslationTarget(
target_url=target_container_sas_url,
language_code=language_code
)
]
)
]
# submit operation
poller = await async_client.begin_translation(translation_inputs)
self.assertIsNotNone(poller.id)
# wait for result
if wait_for_operation:
result = await poller.result()
async for doc in result:
self._validate_doc_status(doc, "es")
# validate
self._validate_translation_metadata(poller=poller)
return poller
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
ae7f88407fe7d2451eb8774356d45933f90af59a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03340/s635836646.py | 2c571bf6e84d7cac823b0f125ae5db4cf672e286 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | n = int(input())
A = list(map(int,input().split()))
ans = 0
l = 0
r = 0
bit = A[0]
total = A[0]
while True:
if bit == total:
ans += r-l+1
r += 1
if r == n:
break
total += A[r]
bit ^= A[r]
else:
total -= A[l]
bit ^= A[l]
l += 1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ed684da54409de93089b30bba581a10520c3695c | d12c7b974285a9ca0d4ddd06bd43223a03db5126 | /bkup_files/switchdb2.py | de040cb1d162e22ed404edfb5a92408a5984534d | [] | no_license | xod442/sad | 6b0006bdeb0ca31dc383b15de8197433c1a21733 | 0a1179b2730ee5a47c6e2d888b8bd748c9a46a0a | refs/heads/master | 2020-04-25T06:38:26.934968 | 2019-02-25T21:31:20 | 2019-02-25T21:31:20 | 172,587,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | #!/usr/bin/env python
'''
Copyright 2016 Hewlett Packard Enterprise Development LP.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
__author__ = "@netwookie"
__copyright__ = "Copyright 2016, Hewlett Packard Enterprise Development LP."
__credits__ = ["Rick Kauffman"]
__license__ = "Apache2"
__version__ = "1.0.0"
__maintainer__ = "Rick Kauffman"
__email__ = "rick@rickkauffman.com"
__status__ = "Prototype"
switchdb A database tool for managing switches in the Ansible VAR file
'''
from flask import Flask, request, render_template, redirect, url_for, flash, session
from flask.ext.bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from sdb_maker import db
from sdb_maker import Students
app = Flask(__name__)
bootstrap = Bootstrap(app)
# Moving on
@app.route('/')
@app.route('/index')
def show_all():
return render_template('show_all.html', students = Students.query.all() )
@app.route('/new', methods = ['GET', 'POST'])
def new():
if request.method == 'POST':
if not request.form['name'] or not request.form['city'] or not request.form['addr']:
flash('Please enter all the fields', 'error')
else:
student = Students(request.form['name'], request.form['city'],
request.form['addr'], request.form['pin'])
db.session.add(student)
db.session.commit()
flash('Record was successfully added')
return redirect(url_for('show_all'))
return render_template('new.html')
if __name__ == '__main__':
db.create_all()
app.secret_key = 'SuperSecret'
app.debug = True
app.run(host='0.0.0.0')
| [
"rick@rickkauffman.com"
] | rick@rickkauffman.com |
85c357808c48b54144a7b87a95e364a7db447d23 | 3eeee2ab87695b5e9f209ba4601dbcebd5d00036 | /AndroidApp/app_hello.py | 8fc554a906525fa547bbd1abcd92ebc3908161f0 | [] | no_license | pangxie1987/WebApp | 483fbdd6c65f78e35ab2f1bd98701a7fb1fbb8f9 | 7d3e679bf1af4a5a4d4e89866789bb6f583eae71 | refs/heads/master | 2020-03-18T17:53:01.030177 | 2018-07-02T11:09:32 | 2018-07-02T11:09:32 | 135,057,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | # -*- coding:utf-8 -*-
#qpy:kivy
import kivy
kivy.require('1.10.0')
from kivy.app import App
from kivy.uix.button import Button
class TestApp(App):
def build(self):
return Button(text='hello,kivy')
TestApp().run() | [
"lpb.waln@outlook.com"
] | lpb.waln@outlook.com |
ecda99ac45a8c50659a0c956bd055386e192a895 | 27455af4306bdb2d470bc7aa6a412ffb7950e1e1 | /cask/accounts/models.py | cd6dbae3e1fef3870e44d171a3457f357b2ed62c | [
"Apache-2.0"
] | permissive | dcramer/cask-server | 7a647a31cb798273ee9d3d8c7e43c28e829dec80 | 32535229a907479c3645aa34b75755d3e2b12dda | refs/heads/master | 2022-12-09T10:00:57.842269 | 2018-08-30T15:50:44 | 2018-08-30T15:50:44 | 143,897,850 | 3 | 0 | Apache-2.0 | 2022-10-18T19:15:29 | 2018-08-07T16:08:56 | Python | UTF-8 | Python | false | false | 2,658 | py | from uuid import uuid4
from django.conf import settings
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.db import models
from django.utils.translation import ugettext_lazy as _
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
if not email:
raise ValueError("The given email must be set")
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault("is_staff", False)
extra_fields.setdefault("is_superuser", False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True.")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return self._create_user(email, password, **extra_fields)
class Follower(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
from_user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="following"
)
to_user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="followers"
)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = (("from_user", "to_user"),)
class Identity(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
provider = models.CharField(max_length=32)
external_id = models.CharField(max_length=32)
class Meta:
unique_together = (("provider", "external_id"),)
class User(AbstractUser):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
username = None
first_name = None
last_name = None
name = models.CharField(max_length=128, null=True)
email = models.EmailField(_("email address"), unique=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
objects = UserManager()
def get_full_name(self):
return self.name
def get_short_name(self):
return self.name
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
e6179298367cbd15cd408ea1aff67299148fb15c | 531947701b18907ab1646bc1666ad3129f20ccec | /ttp/formatters/n2g_formatter.py | d22e2fdf8b02047f75ae55b30b9f059645d323e5 | [
"MIT"
] | permissive | dmulyalin/ttp | 45a0df04c089874f677670e1105dd2c544b095b2 | 483863e7966f9ab2be5e8cbd8b6316c82e380f1a | refs/heads/master | 2023-07-06T01:46:40.799147 | 2023-06-25T00:40:39 | 2023-06-25T00:40:39 | 216,000,389 | 322 | 44 | MIT | 2022-10-21T09:32:32 | 2019-10-18T10:33:52 | Python | UTF-8 | Python | false | false | 1,501 | py | import logging
log = logging.getLogger(__name__)
def n2g(data, **kwargs):
# load kwargs
module = kwargs.get("module", "yed")
method = kwargs.get("method", "from_list")
path = kwargs.get("path", [])
node_dups = kwargs.get("node_duplicates", "skip")
link_dups = kwargs.get("link_duplicates", "skip")
method_kwargs = kwargs.get("method_kwargs", {})
algo = kwargs.get("algo", None)
# import N2G library
try:
if module.lower() == "yed":
from N2G import yed_diagram as create_diagram
elif module.lower() == "drawio":
from N2G import drawio_diagram as create_diagram
else:
log.error(
"No N2G module '{}', supported values are 'yEd', 'DrawIO'".format(
module
)
)
return data
except ImportError:
log.error("Failed to import N2G '{}' module".format(module))
return data
diagram_obj = create_diagram(node_duplicates=node_dups, link_duplicates=link_dups)
# normalize results_data to list:
if isinstance(data, dict): # handle the case for group specific output
data = [data]
# make graph
for result in data:
result_datum = _ttp_["output"]["traverse"](result, path)
getattr(diagram_obj, method)(result_datum, **method_kwargs)
# layout graph
if algo:
diagram_obj.layout(algo=algo)
# return results XML
data = diagram_obj.dump_xml()
return data
| [
"d.mulyalin@gmail.com"
] | d.mulyalin@gmail.com |
16d282009891a2c4adc9dddaac41212fbcddde77 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v4/services/transports/user_data_service_grpc_transport.py | ec4db4762b7980b03241c7cdc39e9b4037068321 | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | false | 4,394 | py | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.ads.google_ads.v4.proto.services import user_data_service_pb2_grpc
class UserDataServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.ads.googleads.v4.services UserDataService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
)
def __init__(self, channel=None, credentials=None,
address='googleads.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.',
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'user_data_service_stub': user_data_service_pb2_grpc.UserDataServiceStub(channel),
}
@classmethod
def create_channel(
cls,
address='googleads.googleapis.com:443',
credentials=None,
**kwargs):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
**kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def upload_user_data(self):
"""Return the gRPC stub for :meth:`UserDataServiceClient.upload_user_data`.
Uploads the given user data.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['user_data_service_stub'].UploadUserData | [
"noreply@github.com"
] | fiboknacky.noreply@github.com |
0d2350576ecf378e8d10e1a1b24cde6cb267ba87 | c7967ec500b210513aa0b1f540144c931ca687ac | /알고리즘 스터디/개인공부/BinarySearch/LIS2.py | 779a83856bf648e87df66face366164bdbfb6827 | [] | no_license | sunminky/algorythmStudy | 9a88e02c444b10904cebae94170eba456320f8e8 | 2ee1b5cf1f2e5f7ef87b44643210f407c4aa90e2 | refs/heads/master | 2023-08-17T01:49:43.528021 | 2023-08-13T08:11:37 | 2023-08-13T08:11:37 | 225,085,243 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # https://www.acmicpc.net/problem/12015
# https://www.acmicpc.net/problem/12738
#세그먼트 트리로도 구현 가능
import sys
from bisect import bisect_left
if __name__ == '__main__':
n_number = int(sys.stdin.readline())
numbers = list(map(int, sys.stdin.readline().split()))
lis = []
for n in numbers:
idx = bisect_left(lis, n)
if idx == len(lis):
lis.append(n)
else:
lis[idx] = n
print(len(lis))
| [
"suns1502@gmail.com"
] | suns1502@gmail.com |
e998cf66ff60d424742afe61f74535b053faffa7 | cc9a87e975546e2ee2957039cceffcb795850d4f | /venv/lib/python3.7/site-packages/pip-19.0.3-py3.7.egg/pip/_internal/models/link.py | 141f2d2c35393534ffdd72be7baa027e24c5b1d5 | [] | no_license | CodeHunterDev/Belajar-Python | 304d3243801b91b3605d2b9bd09e49a30735e51b | 9dd2ffb556eed6b2540da19c5f206fedb218ae99 | refs/heads/master | 2023-03-19T22:12:46.330272 | 2020-02-04T08:02:00 | 2020-02-04T08:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,860 | py | # Copyright (c) 2020. Adam Arthur Faizal
import posixpath
import re
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.download import path_to_url
from pip._internal.utils.misc import (
WHEEL_EXTENSION, redact_password_from_url, splitext,
)
from pip._internal.utils.models import KeyBasedCompareMixin
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple, Union, Text # noqa: F401
from pip._internal.index import HTMLPage # noqa: F401
class Link(KeyBasedCompareMixin):
"""Represents a parsed link from a Package Index's simple URL
"""
def __init__(self, url, comes_from=None, requires_python=None):
# type: (str, Optional[Union[str, HTMLPage]], Optional[str]) -> None
"""
url:
url of the resource pointed to (href of the link)
comes_from:
instance of HTMLPage where the link was found, or string.
requires_python:
String containing the `Requires-Python` metadata field, specified
in PEP 345. This may be specified by a data-requires-python
attribute in the HTML link tag, as described in PEP 503.
"""
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.requires_python = requires_python if requires_python else None
super(Link, self).__init__(
key=(self.url),
defining_class=Link
)
def __str__(self):
if self.requires_python:
rp = ' (requires-python:%s)' % self.requires_python
else:
rp = ''
if self.comes_from:
return '%s (from %s)%s' % (redact_password_from_url(self.url),
self.comes_from, rp)
else:
return redact_password_from_url(str(self.url))
def __repr__(self):
return '<Link %s>' % self
@property
def filename(self):
# type: () -> str
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
# type: () -> str
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
# type: () -> str
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
# type: () -> str
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
# type: () -> Tuple[str, str]
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
# type: () -> str
return self.splitext()[1]
@property
def url_without_fragment(self):
# type: () -> str
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
@property
def egg_fragment(self):
# type: () -> Optional[str]
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
@property
def subdirectory_fragment(self):
# type: () -> Optional[str]
match = self._subdirectory_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
# type: () -> Optional[str]
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
# type: () -> Optional[str]
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
# type: () -> Optional[str]
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
# type: () -> bool
return self.ext == WHEEL_EXTENSION
@property
def is_artifact(self):
# type: () -> bool
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip._internal.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
| [
"adam.faizal.af6@gmail.com"
] | adam.faizal.af6@gmail.com |
0cb2292823c2f8a42d1cff2808998981ab7b4e92 | 0ff91fa3bcd9cc115d5f9e73d82dca4d777143aa | /hackerrank/python/Strings/find-a-string-English.py | 59f84647150fc1b03d914bd883a9ebdb033c0039 | [] | no_license | Cekurok/codes-competition | 1b335851b3e07b58a276b29c72df16ddbeff6b80 | 834afa2cc50549c82c72f5b0285661cd81f9a837 | refs/heads/master | 2021-09-16T05:08:58.689661 | 2018-06-16T18:35:16 | 2018-06-16T18:35:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | def count_substring(string, sub_string):
count = 0
subLen = len(sub_string)
strLen = len(string)
strLen -= subLen
for i in range(0,strLen+1):
if (sub_string == string[i:i+subLen]):
count += 1
return count | [
"rrangarajan.85@gmail.com"
] | rrangarajan.85@gmail.com |
353c0067d968cb6f3ae5b0d88ad3817aec595d26 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/KbAdvertChannelResponse.py | 4713f34ba023cae3655b08598ab7e21203a5508d | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,569 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KbAdvertChannelResponse(object):
def __init__(self):
self._channel_id = None
self._memo = None
self._name = None
self._status = None
self._type = None
@property
def channel_id(self):
return self._channel_id
@channel_id.setter
def channel_id(self, value):
self._channel_id = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.channel_id:
if hasattr(self.channel_id, 'to_alipay_dict'):
params['channel_id'] = self.channel_id.to_alipay_dict()
else:
params['channel_id'] = self.channel_id
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbAdvertChannelResponse()
if 'channel_id' in d:
o.channel_id = d['channel_id']
if 'memo' in d:
o.memo = d['memo']
if 'name' in d:
o.name = d['name']
if 'status' in d:
o.status = d['status']
if 'type' in d:
o.type = d['type']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
b3d499fb081e6e3f518bdc86e8f89fb91acb6430 | 754f2e0cc83a16efda4f7a9c76b34dceb082bec6 | /myblog/project/blog/models.py | 90f126c98a839a791c3f635a734c0273983e8491 | [] | no_license | veujs/myblog | b520c7742c6e761c851bbe9be13b235ef49587ea | 326613e1563d3e63af35604c6592f014b35177d2 | refs/heads/master | 2020-04-15T17:14:34.161131 | 2019-02-27T06:51:17 | 2019-02-27T06:51:17 | 164,866,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from django.db import models
# Create your models here.
from django.utils import timezone
from django.contrib.auth.models import User
class BlogArticles(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(User,related_name="blog_posts")
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
class Meta():
db_table = "blog_articles"
ordering = ["-publish"]
def _str_(self):
return self.title
| [
"624040034@qq.com"
] | 624040034@qq.com |
eb7c616950b5af0ac046e3f4e20c015ceb71c733 | b1b77bb1ed47586f96d8f2554a65bcbd0c7162cc | /NETFLIX/NfWebCrypto/plugin/ppapi/ppapi/ppapi_ipc_untrusted.gyp | f1711feb5727c6987e611615c579f35dc1ca2e24 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | DanHefrman/stuff | b3624d7089909972ee806211666374a261c02d08 | b98a5c80cfe7041d8908dcfd4230cf065c17f3f6 | refs/heads/master | 2023-07-10T09:47:04.780112 | 2021-08-13T09:55:17 | 2021-08-13T09:55:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../native_client/build/untrusted.gypi',
'ppapi_ipc.gypi',
],
'conditions': [
['disable_nacl==0 and disable_nacl_untrusted==0', {
'targets': [
{
'target_name': 'ppapi_ipc_untrusted',
'type': 'none',
'variables': {
'ppapi_ipc_target': 1,
'nacl_win64_target': 0,
'nacl_untrusted_build': 1,
'nlib_target': 'libppapi_ipc_untrusted.a',
'build_glibc': 0,
'build_newlib': 1,
'defines': [
'NACL_PPAPI_IPC_PROXY',
# Enable threading for the untrusted side of the proxy.
# TODO(bbudge) remove when this is the default.
'ENABLE_PEPPER_THREADING',
],
},
'include_dirs': [
'..',
],
'dependencies': [
'../native_client/tools.gyp:prep_toolchain',
'../base/base_untrusted.gyp:base_untrusted',
'../gpu/gpu_untrusted.gyp:gpu_ipc_untrusted',
'../ipc/ipc_untrusted.gyp:ipc_untrusted',
'../ppapi/ppapi_shared_untrusted.gyp:ppapi_shared_untrusted',
'../components/components_tracing_untrusted.gyp:tracing_untrusted',
],
},
],
}],
],
}
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
0515d3547fc81361b6a0ce562a54675280caf442 | f4dcde4b7b91bf293d9f1f626ff2d609c29fbd79 | /common/configDB.py | 1cffde0d8724676b17179dc96b4add15aa6f93b5 | [] | no_license | hanzhichao/interfaceTest | ce182486336f276431f849e5b7b49978b22a37a2 | bc75261ed246e3b18433a98ab91700281dca45ca | refs/heads/master | 2020-03-08T02:17:52.004295 | 2018-04-03T05:28:26 | 2018-04-03T05:28:26 | 127,855,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | """数据库操作"""
import pymysql
import sys
sys.path.append("..")
import readConfig as readConfig
from common.Log import MyLog as Log
localReadConfig = readConfig.ReadConfig()
class MyDB(object):
global host, username, password, port, database, readConfig
host = localReadConfig.get_db("host")
username = localReadConfig.get_db("username")
password = localReadConfig.get_db("password")
port = localReadConfig.get_db("port")
database = localReadConfig.get_db("database")
config = {
'host': str(host),
'user': username,
'passwd': password,
'db': database
}
def __init__(self):
self.log = Log.get_log()
self.logger = self.log.get_logger()
def connectDB(self):
try:
self.db = pymsql.connect(**config)
self.cursor = self.db.cursor()
print("连接数据库成功")
except ConnectionError as ex:
self.logger.error(str(ex))
def executeSQL(self, sql, params):
self.connectDB()
self.cursor = excute(sql, params)
self.db.commit()
return self.cursor
def get_all(sefl, cursor):
value = cursor.fetchall()
return value
def get_one(self, cursor):
value = cursor.fetchone()
return value
def closeDB(self):
self.db.close()
print("数据库关闭")
| [
"han_zhichao@sina.cn"
] | han_zhichao@sina.cn |
bdc0f69515b694afde031a265ab90f53cd14d3b0 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/USPS/DeliveryInformationAPI/TrackConfirmFields.py | b3eb3882fb518cf16065c175fb1206576e708c22 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,238 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# TrackConfirmFields
# Track a package sent via USPS and return tracking information with details in separate XML tags.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class TrackConfirmFields(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the TrackConfirmFields Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/USPS/DeliveryInformationAPI/TrackConfirmFields')
def new_input_set(self):
return TrackConfirmFieldsInputSet()
def _make_result_set(self, result, path):
return TrackConfirmFieldsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return TrackConfirmFieldsChoreographyExecution(session, exec_id, path)
class TrackConfirmFieldsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the TrackConfirmFields
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Endpoint(self, value):
"""
Set the value of the Endpoint input for this Choreo. ((optional, string) If you are accessing the production server, set to 'production'. Defaults to 'testing' which indicates that you are using the sandbox.)
"""
InputSet._set_input(self, 'Endpoint', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The password assigned by USPS)
"""
InputSet._set_input(self, 'Password', value)
def set_TrackID(self, value):
"""
Set the value of the TrackID input for this Choreo. ((required, string) The tracking number. Can be alphanumeric characters.)
"""
InputSet._set_input(self, 'TrackID', value)
def set_UserId(self, value):
"""
Set the value of the UserId input for this Choreo. ((required, string) Alphanumeric ID assigned by USPS)
"""
InputSet._set_input(self, 'UserId', value)
class TrackConfirmFieldsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the TrackConfirmFields Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from USPS Web Service)
"""
return self._output.get('Response', None)
class TrackConfirmFieldsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return TrackConfirmFieldsResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
017e7f2ed891206d9d845f61c7bdc5467026b6d5 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_148/218.py | d6a472b10295e9b40fed81edb93442d25c164db7 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | import sys
def prework(argv):
'''do something according to argv,
return a message describing what have been done.'''
return "nothing"
def once():
'''to cope once'''
n, x = [int(_) for _ in input().split()]
ss = [int(_) for _ in input().split()]
ss.sort()
cnt = 0
while len(ss) > 1 :
big = ss.pop()
if ss[0] + big <= x :
ss.pop(0)
cnt += 1
if len(ss) > 0 :
cnt += 1
return cnt
def printerr(*v):
print(*v, file=sys.stderr)
def main():
TT = int(input())
for tt in range(1,TT+1):
printerr("coping Case %d.."%(tt))
ans = once()
print("Case #%d: %s"%(tt, (ans)))
if __name__ == '__main__' :
msg = prework(sys.argv)
print("prework down with", msg, file=sys.stderr)
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f99b17b6e18c5be816cfc3a835d3e975bcc42435 | 85b402cd9e762b2749c978105ea362b10d335e5c | /219-unet_model_with_functions_of_blocks.py | 2fe553b815fa93ba3cdb1597aaf31e80f749f4f1 | [] | no_license | bnsreenu/python_for_microscopists | 29c08f17461baca95b5161fd4cd905be515605c4 | 4b8c0bd4274bc4d5e906a4952988c7f3e8db74c5 | refs/heads/master | 2023-09-04T21:11:25.524753 | 2023-08-24T18:40:53 | 2023-08-24T18:40:53 | 191,218,511 | 3,010 | 2,206 | null | 2023-07-25T07:15:22 | 2019-06-10T17:53:14 | Jupyter Notebook | UTF-8 | Python | false | false | 1,734 | py | # Building Unet by dividing encoder and decoder into blocks
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda
from keras.optimizers import Adam
from keras.layers import Activation, MaxPool2D, Concatenate
def conv_block(input, num_filters):
x = Conv2D(num_filters, 3, padding="same")(input)
x = BatchNormalization()(x) #Not in the original network.
x = Activation("relu")(x)
x = Conv2D(num_filters, 3, padding="same")(x)
x = BatchNormalization()(x) #Not in the original network
x = Activation("relu")(x)
return x
#Encoder block: Conv block followed by maxpooling
def encoder_block(input, num_filters):
x = conv_block(input, num_filters)
p = MaxPool2D((2, 2))(x)
return x, p
#Decoder block
#skip features gets input from encoder for concatenation
def decoder_block(input, skip_features, num_filters):
x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(input)
x = Concatenate()([x, skip_features])
x = conv_block(x, num_filters)
return x
#Build Unet using the blocks
def build_unet(input_shape):
inputs = Input(input_shape)
s1, p1 = encoder_block(inputs, 64)
s2, p2 = encoder_block(p1, 128)
s3, p3 = encoder_block(p2, 256)
s4, p4 = encoder_block(p3, 512)
b1 = conv_block(p4, 1024) #Bridge
d1 = decoder_block(b1, s4, 512)
d2 = decoder_block(d1, s3, 256)
d3 = decoder_block(d2, s2, 128)
d4 = decoder_block(d3, s1, 64)
outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d4) #Binary (can be multiclass)
model = Model(inputs, outputs, name="U-Net")
return model
| [
"noreply@github.com"
] | bnsreenu.noreply@github.com |
697935f155a7bdd2c57c63062706724a1ee2d1c3 | c264153f9188d3af187905d846fa20296a0af85d | /Python/Python3网络爬虫开发实战/《Python3网络爬虫开发实战》随书源代码/scrapyseleniumtest/scrapyseleniumtest/items.py | 383caae92fceee363746119211b6714f0b0d361a | [] | no_license | IS-OSCAR-YU/ebooks | 5cd3c1089a221759793524df647e231a582b19ba | b125204c4fe69b9ca9ff774c7bc166d3cb2a875b | refs/heads/master | 2023-05-23T02:46:58.718636 | 2021-06-16T12:15:13 | 2021-06-16T12:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class ProductItem(Item):
collection = 'products'
image = Field()
price = Field()
deal = Field()
title = Field()
shop = Field()
location = Field()
| [
"jiangzhangha@163.com"
] | jiangzhangha@163.com |
60194a6d0d99911e72ce15440b1a894e43f1cb31 | c20a356220d3f66d49bbad88e6bd56a26aac1465 | /tf_test_4_4_2.py | 2c4f0eb10427a9ae2fd3635bea1995c04a5a6e4e | [] | no_license | liuwei881/tensorflow_example | 5f7164f94a3cec63b47c78764fd6a3023de3247e | 5bed141ee0c64f3e62d508a171ed735edbfbffff | refs/heads/master | 2020-06-12T04:27:02.016333 | 2019-05-30T09:58:05 | 2019-05-30T09:58:05 | 194,194,167 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | # coding=utf-8
import tensorflow as tensorflow
def get_weight(shape, lambd):
# 生成一个变量
var = tf.Variable(tf.random_normal(shape), dtype=tf.float32)
# add_to_collection 函数将这个新生成变量的L2正则化损失项加入集合
# 这个函数的第一个参数'losses'是集合的名字, 第二个参数是要加入这个集合的内容
tf.add_to_collection(
'losses', tf.contrib.layers.l2_regularizer(lambd)(var))
return var
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))
batch_size = 8
# 定义了每一层网络中节点的个数
layer_dimension = [2, 10, 10, 10, 1]
# 神经网络的层数
n_layers = len(layer_dimension)
# 这个变量维护前向传播时最深层的节点, 开始的时候就是输入层
cur_layer = x
# 当前层的节点个数
in_dimension = layer_dimension[0]
for i in range(1, n_layers):
# layer_dimension[i]为下一层节点个数
out_dimension = layer_dimension[i]
# 生成当前层中权重的变量, 并将这个变量的L2正则化损失加入计算图上的集合
weight = get_weight([in_dimension, out_dimension], 0.001)
bias = tf.Variable(tf.constant(0.1, shape=[out_dimension]))
# 使用ReLU激活函数
cur_layer = tf.nn.relu(tf.matmul(cur_layer, weight) + bias)
# 进入下一层之前将下一层的节点个数更新为当前层节点的个数
in_dimension = layer_dimension[i]
# 在定义神经网络前向传播的同时已经将所有的L2正则化损失加入了图上的集合,
# 这里只需要计算刻画模型在训练数据上表现的损失函数
mse_loss = tf.reduce_mean(tf.square(y_ - cur_layer))
# 将均方误差损失函数加入损失集合
tf.add_to_collection('losses', mse_loss)
# get_collection返回一个列表, 这个列表是所有这个集合中的元素. 在这个样例中,
# 这些元素就是损失函数的不同部分, 将它们加起来就可以得到最终的损失函数
loss = tf.add_n(tf.get_collection('losses'))
| [
"liuweia@mail.open.com.cn"
] | liuweia@mail.open.com.cn |
c5b4385fbf41eb155af8c374bb450da4b85d0662 | 98efe1aee73bd9fbec640132e6fb2e54ff444904 | /loldib/getratings/models/NA/na_nocturne/na_nocturne_mid.py | e6ba0b8402afbf89e98fd709a52024e8f59a786d | [
"Apache-2.0"
] | permissive | koliupy/loldib | be4a1702c26546d6ae1b4a14943a416f73171718 | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | refs/heads/master | 2021-07-04T03:34:43.615423 | 2017-09-21T15:44:10 | 2017-09-21T15:44:10 | 104,359,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,821 | py | from getratings.models.ratings import Ratings
class NA_Nocturne_Mid_Aatrox(Ratings):
pass
class NA_Nocturne_Mid_Ahri(Ratings):
pass
class NA_Nocturne_Mid_Akali(Ratings):
pass
class NA_Nocturne_Mid_Alistar(Ratings):
pass
class NA_Nocturne_Mid_Amumu(Ratings):
pass
class NA_Nocturne_Mid_Anivia(Ratings):
pass
class NA_Nocturne_Mid_Annie(Ratings):
pass
class NA_Nocturne_Mid_Ashe(Ratings):
pass
class NA_Nocturne_Mid_AurelionSol(Ratings):
pass
class NA_Nocturne_Mid_Azir(Ratings):
pass
class NA_Nocturne_Mid_Bard(Ratings):
pass
class NA_Nocturne_Mid_Blitzcrank(Ratings):
pass
class NA_Nocturne_Mid_Brand(Ratings):
pass
class NA_Nocturne_Mid_Braum(Ratings):
pass
class NA_Nocturne_Mid_Caitlyn(Ratings):
pass
class NA_Nocturne_Mid_Camille(Ratings):
pass
class NA_Nocturne_Mid_Cassiopeia(Ratings):
pass
class NA_Nocturne_Mid_Chogath(Ratings):
pass
class NA_Nocturne_Mid_Corki(Ratings):
pass
class NA_Nocturne_Mid_Darius(Ratings):
pass
class NA_Nocturne_Mid_Diana(Ratings):
pass
class NA_Nocturne_Mid_Draven(Ratings):
pass
class NA_Nocturne_Mid_DrMundo(Ratings):
pass
class NA_Nocturne_Mid_Ekko(Ratings):
pass
class NA_Nocturne_Mid_Elise(Ratings):
pass
class NA_Nocturne_Mid_Evelynn(Ratings):
pass
class NA_Nocturne_Mid_Ezreal(Ratings):
pass
class NA_Nocturne_Mid_Fiddlesticks(Ratings):
pass
class NA_Nocturne_Mid_Fiora(Ratings):
pass
class NA_Nocturne_Mid_Fizz(Ratings):
pass
class NA_Nocturne_Mid_Galio(Ratings):
pass
class NA_Nocturne_Mid_Gangplank(Ratings):
pass
class NA_Nocturne_Mid_Garen(Ratings):
pass
class NA_Nocturne_Mid_Gnar(Ratings):
pass
class NA_Nocturne_Mid_Gragas(Ratings):
pass
class NA_Nocturne_Mid_Graves(Ratings):
pass
class NA_Nocturne_Mid_Hecarim(Ratings):
pass
class NA_Nocturne_Mid_Heimerdinger(Ratings):
pass
class NA_Nocturne_Mid_Illaoi(Ratings):
pass
class NA_Nocturne_Mid_Irelia(Ratings):
pass
class NA_Nocturne_Mid_Ivern(Ratings):
pass
class NA_Nocturne_Mid_Janna(Ratings):
pass
class NA_Nocturne_Mid_JarvanIV(Ratings):
pass
class NA_Nocturne_Mid_Jax(Ratings):
pass
class NA_Nocturne_Mid_Jayce(Ratings):
pass
class NA_Nocturne_Mid_Jhin(Ratings):
pass
class NA_Nocturne_Mid_Jinx(Ratings):
pass
class NA_Nocturne_Mid_Kalista(Ratings):
pass
class NA_Nocturne_Mid_Karma(Ratings):
pass
class NA_Nocturne_Mid_Karthus(Ratings):
pass
class NA_Nocturne_Mid_Kassadin(Ratings):
pass
class NA_Nocturne_Mid_Katarina(Ratings):
pass
class NA_Nocturne_Mid_Kayle(Ratings):
pass
class NA_Nocturne_Mid_Kayn(Ratings):
pass
class NA_Nocturne_Mid_Kennen(Ratings):
pass
class NA_Nocturne_Mid_Khazix(Ratings):
pass
class NA_Nocturne_Mid_Kindred(Ratings):
pass
class NA_Nocturne_Mid_Kled(Ratings):
pass
class NA_Nocturne_Mid_KogMaw(Ratings):
pass
class NA_Nocturne_Mid_Leblanc(Ratings):
pass
class NA_Nocturne_Mid_LeeSin(Ratings):
pass
class NA_Nocturne_Mid_Leona(Ratings):
pass
class NA_Nocturne_Mid_Lissandra(Ratings):
pass
class NA_Nocturne_Mid_Lucian(Ratings):
pass
class NA_Nocturne_Mid_Lulu(Ratings):
pass
class NA_Nocturne_Mid_Lux(Ratings):
pass
class NA_Nocturne_Mid_Malphite(Ratings):
pass
class NA_Nocturne_Mid_Malzahar(Ratings):
pass
class NA_Nocturne_Mid_Maokai(Ratings):
pass
class NA_Nocturne_Mid_MasterYi(Ratings):
pass
class NA_Nocturne_Mid_MissFortune(Ratings):
pass
class NA_Nocturne_Mid_MonkeyKing(Ratings):
pass
class NA_Nocturne_Mid_Mordekaiser(Ratings):
pass
class NA_Nocturne_Mid_Morgana(Ratings):
pass
class NA_Nocturne_Mid_Nami(Ratings):
pass
class NA_Nocturne_Mid_Nasus(Ratings):
pass
class NA_Nocturne_Mid_Nautilus(Ratings):
pass
class NA_Nocturne_Mid_Nidalee(Ratings):
pass
class NA_Nocturne_Mid_Nocturne(Ratings):
pass
class NA_Nocturne_Mid_Nunu(Ratings):
pass
class NA_Nocturne_Mid_Olaf(Ratings):
pass
class NA_Nocturne_Mid_Orianna(Ratings):
pass
class NA_Nocturne_Mid_Ornn(Ratings):
pass
class NA_Nocturne_Mid_Pantheon(Ratings):
pass
class NA_Nocturne_Mid_Poppy(Ratings):
pass
class NA_Nocturne_Mid_Quinn(Ratings):
pass
class NA_Nocturne_Mid_Rakan(Ratings):
pass
class NA_Nocturne_Mid_Rammus(Ratings):
pass
class NA_Nocturne_Mid_RekSai(Ratings):
pass
class NA_Nocturne_Mid_Renekton(Ratings):
pass
class NA_Nocturne_Mid_Rengar(Ratings):
pass
class NA_Nocturne_Mid_Riven(Ratings):
pass
class NA_Nocturne_Mid_Rumble(Ratings):
pass
class NA_Nocturne_Mid_Ryze(Ratings):
pass
class NA_Nocturne_Mid_Sejuani(Ratings):
pass
class NA_Nocturne_Mid_Shaco(Ratings):
pass
class NA_Nocturne_Mid_Shen(Ratings):
pass
class NA_Nocturne_Mid_Shyvana(Ratings):
pass
class NA_Nocturne_Mid_Singed(Ratings):
pass
class NA_Nocturne_Mid_Sion(Ratings):
pass
class NA_Nocturne_Mid_Sivir(Ratings):
pass
class NA_Nocturne_Mid_Skarner(Ratings):
pass
class NA_Nocturne_Mid_Sona(Ratings):
pass
class NA_Nocturne_Mid_Soraka(Ratings):
pass
class NA_Nocturne_Mid_Swain(Ratings):
pass
class NA_Nocturne_Mid_Syndra(Ratings):
pass
class NA_Nocturne_Mid_TahmKench(Ratings):
pass
class NA_Nocturne_Mid_Taliyah(Ratings):
pass
class NA_Nocturne_Mid_Talon(Ratings):
pass
class NA_Nocturne_Mid_Taric(Ratings):
pass
class NA_Nocturne_Mid_Teemo(Ratings):
pass
class NA_Nocturne_Mid_Thresh(Ratings):
pass
class NA_Nocturne_Mid_Tristana(Ratings):
pass
class NA_Nocturne_Mid_Trundle(Ratings):
pass
class NA_Nocturne_Mid_Tryndamere(Ratings):
pass
class NA_Nocturne_Mid_TwistedFate(Ratings):
pass
class NA_Nocturne_Mid_Twitch(Ratings):
pass
class NA_Nocturne_Mid_Udyr(Ratings):
pass
class NA_Nocturne_Mid_Urgot(Ratings):
pass
class NA_Nocturne_Mid_Varus(Ratings):
pass
class NA_Nocturne_Mid_Vayne(Ratings):
pass
class NA_Nocturne_Mid_Veigar(Ratings):
pass
class NA_Nocturne_Mid_Velkoz(Ratings):
pass
class NA_Nocturne_Mid_Vi(Ratings):
pass
class NA_Nocturne_Mid_Viktor(Ratings):
pass
class NA_Nocturne_Mid_Vladimir(Ratings):
pass
class NA_Nocturne_Mid_Volibear(Ratings):
pass
class NA_Nocturne_Mid_Warwick(Ratings):
pass
class NA_Nocturne_Mid_Xayah(Ratings):
pass
class NA_Nocturne_Mid_Xerath(Ratings):
pass
class NA_Nocturne_Mid_XinZhao(Ratings):
pass
class NA_Nocturne_Mid_Yasuo(Ratings):
pass
class NA_Nocturne_Mid_Yorick(Ratings):
pass
class NA_Nocturne_Mid_Zac(Ratings):
pass
class NA_Nocturne_Mid_Zed(Ratings):
pass
class NA_Nocturne_Mid_Ziggs(Ratings):
pass
class NA_Nocturne_Mid_Zilean(Ratings):
pass
class NA_Nocturne_Mid_Zyra(Ratings):
pass
| [
"noreply@github.com"
] | koliupy.noreply@github.com |
820d18976ceb036fc4e268a237ec43a25998aa45 | f5f771cd8600c2aeb7fc9b192d9084ec5fdf3616 | /lux/extensions/rest/user.py | 8c79233356ca1acf51458387f80667140e8413dc | [
"BSD-3-Clause"
] | permissive | SirZazu/lux | 75fe9fde4ddaee1c9c17e55c6e6d07a289ea2f5b | d647c34d11d1172d40e16b6afaba4ee67950fb5a | refs/heads/master | 2021-01-21T19:40:46.536485 | 2015-06-02T16:30:18 | 2015-06-02T16:30:18 | 36,931,033 | 0 | 3 | null | 2015-10-09T14:08:26 | 2015-06-05T12:15:21 | Python | UTF-8 | Python | false | false | 4,779 | py | import time
from importlib import import_module
from datetime import datetime, timedelta
from pulsar import PermissionDenied, Http404
from pulsar.utils.pep import to_bytes, to_string
import lux
from lux.utils.crypt import get_random_string, digest
__all__ = ['AuthenticationError', 'LoginError', 'LogoutError',
'MessageMixin', 'UserMixin', 'normalise_email', 'PasswordMixin',
'Anonymous', 'CREATE', 'READ', 'UPDATE', 'DELETE']
UNUSABLE_PASSWORD = '!'
CREATE = 30 # C
READ = 10 # R
UPDATE = 20 # U
DELETE = 40 # D
class AuthenticationError(ValueError):
pass
class LoginError(RuntimeError):
pass
class LogoutError(RuntimeError):
pass
class MessageMixin(object):
'''Mixin for models which support messages
'''
def success(self, message):
'''Store a ``success`` message to show to the web user
'''
self.message('success', message)
def info(self, message):
'''Store an ``info`` message to show to the web user
'''
self.message('info', message)
def warning(self, message):
'''Store a ``warning`` message to show to the web user
'''
self.message('warning', message)
def error(self, message):
'''Store an ``error`` message to show to the web user
'''
self.message('danger', message)
def message(self, level, message):
'''Store a ``message`` of ``level`` to show to the web user.
Must be implemented by session classes.
'''
raise NotImplementedError
def remove_message(self, data):
'''Remove a message from the list of messages'''
raise NotImplementedError
def get_messages(self):
'''Retrieve messages
'''
return ()
class UserMixin(MessageMixin):
'''Mixin for a User model
'''
email = None
def is_superuser(self):
return False
def is_authenticated(self):
'''Return ``True`` if the user is is_authenticated
'''
return True
def is_active(self):
return False
def is_anonymous(self):
return False
def get_id(self):
raise NotImplementedError
def get_oauths(self):
'''Return a dictionary of oauths account'''
return {}
def set_oauth(self, name, data):
raise NotImplementedError
def remove_oauth(self, name):
'''Remove a connected oauth account.
Return ``True`` if successfully removed
'''
raise NotImplementedError
def todict(self):
'''Return a dictionary with information about the user'''
def email_user(self, app, subject, body, sender=None):
backend = app.email_backend
backend.send_mail(app, sender, self.email, subject, body)
@classmethod
def get_by_username(cls, username):
'''Retrieve a user from username
'''
raise NotImplementedError
@classmethod
def get_by_email(cls, email):
raise NotImplementedError
@classmethod
def get_by_oauth(cls, name, identifier):
'''Retrieve a user from OAuth ``name`` with ``identifier``
'''
raise NotImplementedError
class Anonymous(UserMixin):
def is_authenticated(self):
return False
def is_anonymous(self):
return True
def get_id(self):
return 0
class PasswordMixin:
def on_config(self, app):
cfg = app.config
self.encoding = cfg['ENCODING']
self.secret_key = cfg['SECRET_KEY'].encode()
self.salt_size = cfg['AUTH_SALT_SIZE']
algorithm = cfg['CRYPT_ALGORITHM']
self.crypt_module = import_module(algorithm)
def decript(self, password=None):
if password:
p = self.crypt_module.decrypt(to_bytes(password, self.encoding),
self.secret_key)
return to_string(p, self.encoding)
else:
return UNUSABLE_PASSWORD
def encript(self, password):
p = self.crypt_module.encrypt(to_bytes(password, self.encoding),
self.secret_key, self.salt_size)
return to_string(p, self.encoding)
def password(self, raw_password=None):
if raw_password:
return self.encript(raw_password)
else:
return UNUSABLE_PASSWORD
def set_password(self, user, password):
'''Set the password for ``user``.
This method should commit changes.'''
pass
def normalise_email(email):
"""
Normalise the address by lowercasing the domain part of the email
address.
"""
email_name, domain_part = email.strip().rsplit('@', 1)
email = '@'.join([email_name, domain_part.lower()])
return email
| [
"luca.sbardella@gmail.com"
] | luca.sbardella@gmail.com |
7b9ed189a2e8a042b0c9614fde530a77d4c760df | 897d3299ef2eb9747ed21b9857b3c5dfda841f97 | /cnns/graphs/distortion_graph/distortion2.py | 6514b5b161779ddc90b89a85fcc4f0f9c3ebc379 | [
"Apache-2.0"
] | permissive | stjordanis/bandlimited-cnns | 59a286cdae16fb07d4418ac2008c34f7849c35da | e2b20efd391a971e128d62acc1801c81dc1bf4d7 | refs/heads/master | 2020-07-03T07:01:17.732061 | 2019-08-07T23:33:40 | 2019-08-07T23:33:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,469 | py | import matplotlib
# matplotlib.use('TkAgg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import csv
import os
print(matplotlib.get_backend())
# plt.interactive(True)
# http://ksrowell.com/blog-visualizing-data/2012/02/02/optimal-colors-for-graphs/
MY_BLUE = (56, 106, 177)
MY_RED = (204, 37, 41)
MY_ORANGE = (218, 124, 48)
MY_GREEN = (62, 150, 81)
MY_BLACK = (83, 81, 84)
MY_GOLD = (148, 139, 61)
def get_color(COLOR_TUPLE_255):
return [x / 255 for x in COLOR_TUPLE_255]
# fontsize=20
fontsize = 30
legend_size = 22
title_size = 30
font = {'size': fontsize}
matplotlib.rc('font', **font)
dir_path = os.path.dirname(os.path.realpath(__file__))
print("dir path: ", dir_path)
GPU_MEM_SIZE = 16280
def read_columns(dataset, columns=5):
file_name = dir_path + "/" + dataset + ".csv"
with open(file_name) as csvfile:
data = csv.reader(csvfile, delimiter=",", quotechar='|')
cols = []
for column in range(columns):
cols.append([])
for i, row in enumerate(data):
if i > 0: # skip header
for column in range(columns):
try:
cols[column].append(float(row[column]))
except ValueError as ex:
print("Exception: ", ex)
return cols
ylabel = "ylabel"
title = "title"
legend_pos = "center_pos"
bbox = "bbox"
file_name = "file_name"
column_nr = "column_nr"
labels = "labels"
legend_cols = "legend_cols"
xlim = "xlim"
ylim = "ylim"
carlini_cifar10 = {ylabel: "Accuracy (%)",
file_name: "distortionCarliniCifar",
title: "C&W L$_2$ CIFAR-10",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 12,
legend_cols: 2,
labels: ['FC', 'CD', 'Unif', 'Gauss', 'Laplace', 'SVD'],
xlim: (0, 12),
ylim: (0, 100)}
carlini_imagenet = {ylabel: "Accuracy (%)",
file_name: "distortionCarliniImageNet",
title: "C&W L$_2$ ImageNet",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 12,
legend_cols: 2,
labels: ['FC', 'CD', 'Unif', 'Gauss', 'Laplace', 'SVD'],
xlim: (0, 100),
ylim: (0, 100)}
pgd_cifar10 = {ylabel: "Accuracy (%)",
file_name: "distortionPGDCifar",
title: "PGD L$_{\infty}$ CIFAR-10",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 12,
legend_cols: 2,
labels: ['FC', 'CD', 'Unif', 'Gauss', 'Laplace', 'SVD'],
xlim: (0, 12),
ylim: (0, 100)}
random_pgd_cifar10 = {ylabel: "Accuracy (%)",
file_name: "distortionRandomPGDCifar",
title: "PGD (random start) L$_{\infty}$ CIFAR-10",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 12,
legend_cols: 2,
labels: ['FC', 'CD', 'Unif', 'Gauss', 'Laplace', 'SVD'],
xlim: (0, 12),
ylim: (0, 100)}
pgd_imagenet = {ylabel: "Accuracy (%)",
file_name: "distortionPGDImageNet",
title: "PGD L$_{\infty}$ ImageNet",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 12,
legend_cols: 2,
labels: ['FC', 'CD', 'Unif', 'Gauss', 'Laplace', 'SVD'],
xlim: (0, 100),
ylim: (0, 100)}
fgsm_imagenet = {ylabel: "Accuracy (%)",
file_name: "distortionFGSMImageNet2",
title: "FGSM L$_{\infty}$ ImageNet",
# legend_pos: "lower left",
legend_pos: "upper right",
# bbox: (0.0, 0.0),
column_nr: 12,
legend_cols: 2,
labels: ['FC', 'CD', 'Unif', 'Gauss', 'Laplace', 'SVD'],
xlim: (0, 100),
ylim: (0, 100)}
colors = [get_color(color) for color in
[MY_GREEN, MY_BLUE, MY_ORANGE, MY_RED, MY_BLACK, MY_GOLD]]
markers = ["+", "o", "v", "s", "D", "^", "+"]
linestyles = [":", "-", "--", ":", "-", "--", ":", "-"]
datasets = [carlini_cifar10,
carlini_imagenet,
# pgd_cifar10,
random_pgd_cifar10,
pgd_imagenet,
fgsm_imagenet]
# width = 12
# height = 5
# lw = 3
fig_size = 10
width = 10
height = 10
line_width = 4
layout = "horizontal" # "horizontal" or "vertical"
fig = plt.figure(figsize=(len(datasets) * width, height))
for j, dataset in enumerate(datasets):
if layout == "vertical":
plt.subplot(len(datasets), 1, j + 1)
else:
plt.subplot(1, len(datasets), j + 1)
print("dataset: ", dataset)
columns = dataset[column_nr]
cols = read_columns(dataset[file_name], columns=columns)
print("col 0: ", cols[0])
print("col 1: ", cols[1])
for col in range(0, columns, 2):
if col == 8: # skip Laplace
continue
i = col // 2
plt.plot(cols[col], cols[col + 1], label=f"{dataset[labels][i]}",
lw=line_width,
color=colors[i], linestyle=linestyles[i])
plt.grid()
plt.legend(loc=dataset[legend_pos], ncol=dataset[legend_cols],
frameon=False,
prop={'size': legend_size},
# bbox_to_anchor=dataset[bbox]
)
plt.xlabel('L2 distortion')
plt.title(dataset[title], fontsize=title_size)
if j == 0:
plt.ylabel(dataset[ylabel])
plt.ylim(dataset[ylim])
plt.xlim(dataset[xlim])
# plt.gcf().autofmt_xdate()
# plt.xticks(rotation=0)
# plt.interactive(False)
# plt.imshow()
plt.subplots_adjust(hspace=0.3)
format = "pdf" # "pdf" or "png"
destination = dir_path + "/" + "distortionCarliniPgdFgsm2." + format
print("destination: ", destination)
fig.savefig(destination,
bbox_inches='tight',
# transparent=True
)
# plt.show(block=False)
# plt.interactive(False)
plt.close()
| [
"adam.dziedzi@gmail.com"
] | adam.dziedzi@gmail.com |
a3206f4e1b6da273c4478040585ac8b75ed083b0 | df2cbe914f463ad050d7ed26194424afbe3a0a52 | /addons/website_event_track_quiz/controllers/community.py | 7748a2dc1db19bac7ade948bd9df677a27aada67 | [
"Apache-2.0"
] | permissive | SHIVJITH/Odoo_Machine_Test | 019ed339e995be980606a2d87a63312ddc18e706 | 310497a9872db7844b521e6dab5f7a9f61d365a4 | refs/heads/main | 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 | Apache-2.0 | 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null | UTF-8 | Python | false | false | 4,477 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import math
from odoo import http
from odoo.addons.http_routing.models.ir_http import slug
from odoo.addons.website_event.controllers.community import EventCommunityController
from odoo.http import request
class WebsiteEventTrackQuizCommunityController(EventCommunityController):
_visitors_per_page = 30
_pager_max_pages = 5
@http.route(['/event/<model("event.event"):event>/community/leaderboard/results',
'/event/<model("event.event"):event>/community/leaderboard/results/page/<int:page>'],
type='http', auth="public", website=True, sitemap=False)
def leaderboard(self, event, page=1, lang=None, **kwargs):
values = self._get_community_leaderboard_render_values(event, kwargs.get('search'), page)
return request.render('website_event_track_quiz.event_leaderboard', values)
@http.route('/event/<model("event.event"):event>/community/leaderboard',
type='http', auth="public", website=True, sitemap=False)
def community_leaderboard(self, event, **kwargs):
values = self._get_community_leaderboard_render_values(event, None, None)
return request.render('website_event_track_quiz.event_leaderboard', values)
@http.route('/event/<model("event.event"):event>/community',
type='http', auth="public", website=True, sitemap=False)
def community(self, event, **kwargs):
values = self._get_community_leaderboard_render_values(event, None, None)
return request.render('website_event_track_quiz.event_leaderboard', values)
def _get_community_leaderboard_render_values(self, event, search_term, page):
values = self._get_leaderboard(event, search_term)
values.update({'event': event, 'search': search_term})
user_count = len(values['visitors'])
if user_count:
page_count = math.ceil(user_count / self._visitors_per_page)
url = '/event/%s/community/leaderboard/results' % (slug(event))
if values.get('current_visitor_position') and not page:
values['scroll_to_position'] = True
page = math.ceil(values['current_visitor_position'] / self._visitors_per_page)
elif not page:
page = 1
pager = request.website.pager(url=url, total=user_count, page=page, step=self._visitors_per_page,
scope=page_count if page_count < self._pager_max_pages else self._pager_max_pages)
values['visitors'] = values['visitors'][(page - 1) * self._visitors_per_page: (page) * self._visitors_per_page]
else:
pager = {'page_count': 0}
values.update({'pager': pager})
return values
def _get_leaderboard(self, event, searched_name=None):
current_visitor = request.env['website.visitor']._get_visitor_from_request(force_create=False)
track_visitor_data = request.env['event.track.visitor'].sudo().read_group(
[('track_id', 'in', event.track_ids.ids),
('visitor_id', '!=', False),
('quiz_points', '>', 0)],
['id', 'visitor_id', 'points:sum(quiz_points)'],
['visitor_id'], orderby="points DESC")
data_map = {datum['visitor_id'][0]: datum['points'] for datum in track_visitor_data if datum.get('visitor_id')}
leaderboard = []
position = 1
current_visitor_position = False
visitors_by_id = {
visitor.id: visitor
for visitor in request.env['website.visitor'].sudo().browse(data_map.keys())
}
for visitor_id, points in data_map.items():
visitor = visitors_by_id.get(visitor_id)
if not visitor:
continue
if (searched_name and searched_name.lower() in visitor.display_name.lower()) or not searched_name:
leaderboard.append({'visitor': visitor, 'points': points, 'position': position})
if current_visitor and current_visitor == visitor:
current_visitor_position = position
position = position + 1
return {
'top3_visitors': leaderboard[:3],
'visitors': leaderboard,
'current_visitor_position': current_visitor_position,
'current_visitor': current_visitor,
'searched_name': searched_name
}
| [
"36736117+SHIVJITH@users.noreply.github.com"
] | 36736117+SHIVJITH@users.noreply.github.com |
23a549649db32408d622701f911b724d3231db54 | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Data Scientist with Python - Career Track /15. Interactive Data Visualization with Bokeh/02. Layouts, Interactions, and Annotations/03. Nesting rows and columns of plots.py | 4a34c4b1509d2e81c41dfc8db282991c297c849f | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | '''
Nesting rows and columns of plots
You can create nested layouts of plots by combining row and column layouts. In this exercise, you'll make a 3-plot layout in two rows using the auto-mpg data set. Three plots have been created for you of average mpg vs year (avg_mpg), mpg vs hp (mpg_hp), and mpg vs weight (mpg_weight).
Your job is to use the row() and column() functions to make a two-row layout where the first row will have only the average mpg vs year plot and the second row will have mpg vs hp and mpg vs weight plots as columns.
By using the sizing_mode argument, you can scale the widths to fill the whole figure.
Instructions
100 XP
Import row and column from bokeh.layouts.
Create a row layout called row2 with the figures mpg_hp and mpg_weight in a list and set sizing_mode='scale_width'.
Create a column layout called layout with the figure avg_mpg and the row layout row2 in a list and set sizing_mode='scale_width'.
'''
SOLUTION
| [
"didimilikina8@gmail.com"
] | didimilikina8@gmail.com |
29c223f3be9ebfbf847f6f23addf34933a47d3bc | 0ef8d98726078a75bc9e4d7001ca3eb4d0dd43f4 | /tests/queries/select/where/expressive_tests.py | 3e8f8a6776f5a5bc95892070529b102fc1e6346f | [] | no_license | fuzeman/byte | b32a5ff02cb5aa37aa9f86b0ec2fa1814fa8838e | cfd552583a20afded620058e18b950fe344b0245 | refs/heads/master | 2021-01-20T11:56:07.164270 | 2017-05-19T05:47:48 | 2017-05-19T05:47:48 | 82,638,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,501 | py | from __future__ import absolute_import, division, print_function
from byte.collection import Collection
from byte.core.models.expressions.proxy import ProxyEqual, ProxyGreaterThanOrEqual, ProxyLessThan, ProxyNotEqual
from tests.base.models.dynamic.user import User
from hamcrest import *
users = Collection(User)
def test_simple():
"""Test select() query can be created with expressions."""
query = users.select().where(
User['id'] < 35,
User['username'] != 'alpha'
)
assert_that(query, has_property('state', has_entries({
# where()
'where': all_of(
has_length(2),
has_items(
# User['id'] < 35
all_of(instance_of(ProxyLessThan), has_properties({
'lhs': User['id'],
'rhs': 35
})),
# User['username'] != 'alpha'
all_of(instance_of(ProxyNotEqual), has_properties({
'lhs': User['username'],
'rhs': 'alpha'
}))
)
)
})))
def test_chain():
"""Test select() query can be created with chained expressions."""
query = users.select().where(
User['id'] >= 12
).where(
User['username'] != 'beta'
)
assert_that(query, has_property('state', has_entries({
# where()
'where': all_of(
has_length(2),
has_items(
# User['id'] >= 12
all_of(instance_of(ProxyGreaterThanOrEqual), has_properties({
'lhs': User['id'],
'rhs': 12
})),
# User['username'] != 'beta'
all_of(instance_of(ProxyNotEqual), has_properties({
'lhs': User['username'],
'rhs': 'beta'
}))
)
)
})))
def test_match():
"""Test select() query can be created with property matching expressions."""
query = users.select().where(
User['username'] == User['password']
)
assert_that(query, has_property('state', has_entries({
# where()
'where': all_of(
has_length(1),
has_items(
# User['username'] == User['password']
all_of(instance_of(ProxyEqual), has_properties({
'lhs': User['username'],
'rhs': User['password']
}))
)
)
})))
| [
"me@dgardiner.net"
] | me@dgardiner.net |
f606251b65cb39a42ee14338420816cf79dc988b | 67ebe31bd561bad451f4cc1274f89b06c3c4f1e5 | /ldLib/GUI/Button.py | 3ed66d1ee9e7f4b842bc93039004f422536b86dd | [] | no_license | Bobsleigh/LDEngine | 9c7e60a887c1c118fa5348aaf2891ea800bb26b6 | 110aaf53f7843e9f18579f156b38f57c0fcc0ba6 | refs/heads/master | 2020-12-24T10:23:58.963627 | 2020-08-25T20:42:33 | 2020-08-25T20:42:33 | 73,085,051 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | import pygame
from app.settings import *
class Button(pygame.sprite.Sprite):
def __init__(self, pos, size, text, callback):
super().__init__()
self.method = callback
self.fontSize = 24
self.buttonFont = pygame.font.SysFont(FONT_NAME, self.fontSize)
self.width = size[0]
self.height = size[1]
self.image = pygame.Surface((self.width, self.height))
self.rect = self.image.get_rect()
self.rect.x = pos[0]
self.rect.y = pos[1]
self.borderButton = 5
self.interior = pygame.Rect(self.borderButton, self.borderButton, self.width - 2 * self.borderButton,
self.height - 2 * self.borderButton)
self.text = text
self.textPos = [0, 0]
self.isSelected = False
# Color
self.color1 = COLOR_MENU_1
self.color2 = COLOR_MENU_2
def doNothing(self):
print('You did nothing')
def update(self):
if self.isSelected:
self.color1 = COLOR_MENU_SELECT_1
self.color2 = COLOR_MENU_SELECT_2
self.printedText = self.buttonFont.render(self.text, True, COLOR_MENU_FONTS_SELECT)
else:
self.color1 = COLOR_MENU_1
self.color2 = COLOR_MENU_2
self.printedText = self.buttonFont.render(self.text, True, COLOR_MENU_FONTS)
self.setUpgradeSpec()
self.image.fill(self.color2)
self.image.fill(self.color1, self.interior)
self.image.blit(self.printedText, self.textPos)
def setUpgradeSpec(self):
self.textPos = [(self.image.get_width() - self.printedText.get_width()) / 2,
(self.image.get_height() - self.printedText.get_height()) / 2]
def notify(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == MOUSE_LEFT:
if self.rect.collidepoint(event.pos):
self.method() | [
"philippe.gendreau.2@ulaval.ca"
] | philippe.gendreau.2@ulaval.ca |
b35a279c534a0599d23de680ac9e6fad6a6ead3c | b46f5825b809c0166622149fc5561c23750b379c | /AppImageBuilder/app_dir/bundlers/apt/package_lists.py | 2a9ebdbd72908f76f446d8de376dd6127592ef65 | [
"MIT"
] | permissive | gouchi/appimage-builder | 22b85cb682f1b126515a6debd34874bd152a4211 | 40e9851c573179e066af116fb906e9cad8099b59 | refs/heads/master | 2022-09-28T09:46:11.783837 | 2020-06-07T19:44:48 | 2020-06-07T19:44:48 | 267,360,199 | 0 | 0 | MIT | 2020-05-27T15:42:25 | 2020-05-27T15:42:24 | null | UTF-8 | Python | false | false | 1,936 | py | # Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
apt_core_packages = [
'util-linux',
'coreutils',
'adduser',
'avahi-daemon',
'base-files',
'bind9-host',
'consolekit',
'dbus',
'debconf',
'dpkg',
'lsb-base',
'libcap2-bin',
'libinput-bin',
'multiarch-support',
'passwd',
'systemd',
'systemd-sysv',
'ucf',
'iso-codes',
'shared-mime-info',
'mount',
'xdg-user-dirs',
'sysvinit-utils',
'debianutils',
'init-system-helpers',
'libpam-runtime',
'libpam-modules-bin',
]
apt_font_config_packages = [
'libfontconfig*',
'fontconfig',
'fontconfig-config',
'libfreetype*',
]
apt_xclient_packages = [
'x11-common',
'libx11-*',
'libxcb1',
'libxcb-shape0',
'libxcb-shm0',
'libxcb-glx0',
'libxcb-xfixes0',
'libxcb-present0',
'libxcb-render0',
'libxcb-dri2-0',
'libxcb-dri3-0',
]
apt_graphics_stack_packages = [
'libglvnd*',
'libglx*',
'libgl1*',
'libdrm*',
'libegl1*',
'libegl1-*',
'libglapi*',
'libgles2*',
'libgbm*',
'mesa-*',
]
apt_glibc_packages = ['libc6', 'zlib1g', 'libstdc++6']
# packages required by the runtime generators
apt_proot_apprun_packages = ['proot', 'coreutils']
apt_classic_apprun_packages = ['coreutils']
apt_wrapper_apprun_packages = []
| [
"contact@azubieta.net"
] | contact@azubieta.net |
40fa34f31d61c6e5ac53b3bd7e6e3f4adeb6fd93 | 79661312d54643ce9dcfe3474058f514b01bfbe6 | /model/main_window_8_btc.py | 7259dfb9dd2b86c0e24dfdde1dc30162d7187831 | [] | no_license | davis-9fv/Project | 5c4c8ac03f5bf9db28704e63de9b004f56a52f10 | f2bd22b3ac440b91d1d1defc8da9e2ba2e67265e | refs/heads/master | 2020-03-20T22:24:07.244521 | 2019-02-28T16:58:04 | 2019-02-28T16:58:04 | 137,796,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,750 | py | from sklearn.utils import shuffle
import datetime
from Util import algorithm
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from math import sqrt
from Util import misc
from Util import data_misc
import numpy
def compare(y_test, y_predicted):
predictions = list()
for i in range(len(y_test)):
X = x_test[i]
yhat = y_predicted[i]
yhat = data_misc.invert_scale(scaler, X, yhat)
#Stationary
d = avg_values[split + window_size - 1:]
yhat = data_misc.inverse_difference(d, yhat, len(y_test) + 1 - i)
predictions.append(yhat)
d = avg_values[split + window_size + 1:]
#d = avg_values[split + window_size :]
rmse = sqrt(mean_squared_error(d, predictions))
#rmse = sqrt(mean_squared_error(y_test, y_predicted))
return rmse, predictions
seed = 5
numpy.random.seed(seed)
time_start = datetime.datetime.now()
result = list()
shuffle_data = False
write_file = False
print('Start time: %s' % str(time_start.strftime('%Y-%m-%d %H:%M:%S')))
print('Shuffle: %i' % (shuffle_data))
path = 'C:/tmp/bitcoin/'
#input_file = 'bitcoin_usd_11_10_2018.csv'
input_file = 'bitcoin_usd_bitcoin_block_chain_trend_by_day.csv'
window_size = 7 # 7
result = list()
print('')
print('')
print('Window Size: %i' % (window_size))
# To pair with the other models, this model gets 1438 first rows.
series = read_csv(path + input_file, header=0, sep=',', nrows=1438)
series = series.iloc[::-1]
date = series['Date']
avg = series['Avg']
date = date.iloc[window_size:]
date = date.values
avg_values = avg.values
# Stationary Data
diff_values = data_misc.difference(avg_values, 1)
#diff_values= avg_values
supervised = data_misc.timeseries_to_supervised(diff_values, window_size)
# The first [Window size number] contains zeros which need to be cut.
supervised = supervised.values[window_size:, :]
if shuffle_data:
supervised = shuffle(supervised, random_state=9)
size_supervised = len(supervised)
split = int(size_supervised * 0.80)
train, test = supervised[0:split], supervised[split:]
# transform the scale of the data
scaler, train_scaled, test_scaled = data_misc.scale(train, test)
x_train, y_train = train_scaled[:, 0:-1], train_scaled[:, -1]
x_test, y_test = test_scaled[:, 0:-1], test_scaled[:, -1]
print('Size size_supervised %i' % (size_supervised))
print('------- Test --------')
# No Prediction
y_hat_predicted_es = y_test
rmse, y_hat_predicted = compare(y_test, y_hat_predicted_es)
print('RMSE NoPredic %.3f' % (rmse))
# Dummy
y_predicted_dummy_es = x_test[:, 0]
rmse, y_predicted_dummy = compare(y_test, y_predicted_dummy_es)
print('RMSE Dummy %.3f' % (rmse))
# ElasticNet
y_predicted_en_es, y_future_en_es = algorithm.elastic_net(x_train, y_train, x_test, y_test, normalize=False)
rmse, y_predicted_en = compare(y_test, y_predicted_en_es)
print('RMSE Elastic %.3f' % (rmse))
# y_future_en = compare(y_test, y_future_en_es)
# KNN5
y_predicted_knn5_es = algorithm.knn_regressor(x_train, y_train, x_test, 5)
rmse, y_predicted_knn5 = compare(y_test, y_predicted_knn5_es)
print('RMSE KNN(5) %.3f' % (rmse))
# KNN10
y_predicted_knn10_es = algorithm.knn_regressor(x_train, y_train, x_test, 10)
rmse, y_predicted_knn10 = compare(y_test, y_predicted_knn10_es)
print('RMSE KNN(10) %.3f' % (rmse))
# SGD
y_predicted_sgd_es = algorithm.sgd_regressor(x_train, y_train, x_test)
rmse, y_predicted_sgd = compare(y_test, y_predicted_sgd_es)
print('RMSE SGD %.3f' % (rmse))
# Lasso
y_predicted_la_sc = algorithm.lasso(x_train, y_train, x_test, normalize=False)
rmse, y_predicted_la = compare(y_test, y_predicted_la_sc)
print('RMSE Lasso %.3f' % (rmse))
# LSTM
y_predicted_lstm = algorithm.lstm(x_train, y_train, x_test, batch_size=1, nb_epoch=60, neurons=14)
rmse, y_predicted_lstm = compare(y_test, y_predicted_lstm)
print('RMSE LSTM %.3f' % (rmse))
titles = ['Y', 'ElasticNet', 'KNN5', 'KNN10', 'SGD', 'Lasso', 'LSTM']
data = [y_hat_predicted, y_predicted_en, y_predicted_knn5, y_predicted_knn10, y_predicted_sgd, y_predicted_la,
y_predicted_lstm]
# titles = ['Y', 'ElasticNet', 'ElasticNet Future', 'KNN5', 'KNN10', 'SGD']
# data = [y_test, y_predicted_en, y_future_en, y_predicted_knn5, y_predicted_knn10]
# y_future_en = y_future_en[1]
# data = [y_hat_predicted, y_predicted_en, y_future_en, y_predicted_knn5, y_predicted_knn10, y_predicted_sgd]
date_test = date[split + 1:]
print('Length date test:' + str(len(date_test)))
print('Length data test:' + str(len(y_test)))
misc.plot_lines_graph('Stationary - Normalization,Test Data ', date_test, titles, data)
time_end = datetime.datetime.now()
print('End time: %s' % str(time_end.strftime('%Y-%m-%d %H:%M:%S')))
print('Duration of the script: %s' % (str(time_end - time_start)))
| [
"francisco.vinueza@alterbios.com"
] | francisco.vinueza@alterbios.com |
f4cfce085f2bee40324b89a91182e3026dbc3fec | 4fd84e0e1097d1153ed477a5e76b4972f14d273a | /myvirtualenv/lib/python3.7/site-packages/azure/servicefabric/models/cluster_health_report_expired_event.py | d09441c8e9559e4bdf055e5d5613c9698a8815f4 | [
"MIT"
] | permissive | peterchun2000/TerpV-U | c045f4a68f025f1f34b89689e0265c3f6da8b084 | 6dc78819ae0262aeefdebd93a5e7b931b241f549 | refs/heads/master | 2022-12-10T09:31:00.250409 | 2019-09-15T15:54:40 | 2019-09-15T15:54:40 | 208,471,905 | 0 | 2 | MIT | 2022-12-08T06:09:33 | 2019-09-14T16:49:41 | Python | UTF-8 | Python | false | false | 3,889 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .cluster_event import ClusterEvent
class ClusterHealthReportExpiredEvent(ClusterEvent):
"""Cluster Health Report Expired event.
All required parameters must be populated in order to send to Azure.
:param event_instance_id: Required. The identifier for the FabricEvent
instance.
:type event_instance_id: str
:param time_stamp: Required. The time event was logged.
:type time_stamp: datetime
:param has_correlated_events: Shows there is existing related events
available.
:type has_correlated_events: bool
:param kind: Required. Constant filled by server.
:type kind: str
:param source_id: Required. Id of report source.
:type source_id: str
:param property: Required. Describes the property.
:type property: str
:param health_state: Required. Describes the property health state.
:type health_state: str
:param time_to_live_ms: Required. Time to live in milli-seconds.
:type time_to_live_ms: long
:param sequence_number: Required. Sequence number of report.
:type sequence_number: long
:param description: Required. Description of report.
:type description: str
:param remove_when_expired: Required. Indicates the removal when it
expires.
:type remove_when_expired: bool
:param source_utc_timestamp: Required. Source time.
:type source_utc_timestamp: datetime
"""
_validation = {
'event_instance_id': {'required': True},
'time_stamp': {'required': True},
'kind': {'required': True},
'source_id': {'required': True},
'property': {'required': True},
'health_state': {'required': True},
'time_to_live_ms': {'required': True},
'sequence_number': {'required': True},
'description': {'required': True},
'remove_when_expired': {'required': True},
'source_utc_timestamp': {'required': True},
}
_attribute_map = {
'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'},
'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'},
'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'},
'kind': {'key': 'Kind', 'type': 'str'},
'source_id': {'key': 'SourceId', 'type': 'str'},
'property': {'key': 'Property', 'type': 'str'},
'health_state': {'key': 'HealthState', 'type': 'str'},
'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'},
'sequence_number': {'key': 'SequenceNumber', 'type': 'long'},
'description': {'key': 'Description', 'type': 'str'},
'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'},
'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(ClusterHealthReportExpiredEvent, self).__init__(**kwargs)
self.source_id = kwargs.get('source_id', None)
self.property = kwargs.get('property', None)
self.health_state = kwargs.get('health_state', None)
self.time_to_live_ms = kwargs.get('time_to_live_ms', None)
self.sequence_number = kwargs.get('sequence_number', None)
self.description = kwargs.get('description', None)
self.remove_when_expired = kwargs.get('remove_when_expired', None)
self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None)
self.kind = 'ClusterHealthReportExpired'
| [
"peterchun2000@gmail.com"
] | peterchun2000@gmail.com |
c1d6d7777b160f039547f7ae9d7740a8f555281d | 9e8e8026e575bbe791770ec4b8630c818b1aab61 | /backend/perfil/models.py | d07743edf026397cb4aa4515b4a336db51bd98fc | [
"MIT"
] | permissive | marcusgabrields/gabr | d4b47e0df35dfca4e8ce1e657c0e4e77cded18ec | 95ade6094ed7675ca267f2f16f77f0033eae9c1f | refs/heads/master | 2023-01-12T16:25:16.610427 | 2020-04-16T23:55:37 | 2020-04-16T23:55:37 | 249,736,516 | 0 | 0 | MIT | 2023-01-05T16:58:31 | 2020-03-24T14:53:42 | Python | UTF-8 | Python | false | false | 542 | py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from common.models import TimeStampedModel
class Perfil(TimeStampedModel):
slug = models.SlugField(_('slug'), max_length=300, unique=True)
user = models.OneToOneField(
'users.User',
on_delete=models.CASCADE,
primary_key=True,
verbose_name=_('user'),
)
name = models.CharField(_('name'), max_length=255)
avatar = models.URLField(_('avatar'), null=True)
def __str__(self):
return self.name
| [
"marcusgabriel.ds@gmail.com"
] | marcusgabriel.ds@gmail.com |
03d43e7eadc35e0ce127897a908e6e2af12eedee | 832eec4d9e618f9f3bdaeec259a79884283ac817 | /books/admin.py | 53abe34d35bd74067b5ad741bcf89a34861fc492 | [] | no_license | mconstantin/books-project | d0a5035014c9e61c5331b64b8879fce694e06540 | aa9acc64bf9a4bd654e98eaad5afbc23adbea312 | refs/heads/master | 2021-01-23T04:28:44.006164 | 2017-03-26T00:51:53 | 2017-03-26T00:51:53 | 86,197,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,202 | py | from django.contrib import admin
from .models import Publisher, Author, Book, BookCategory, BookFormat
# class BooksInLine(admin.StackedInline):
class BooksInLine(admin.TabularInline):
"""
Example of modifying the admin interface to show a list for a model in tabular form (multicolumn).
"""
model = Book
# add forms to add 2 additional books
extra = 2
# use 'fieldsets' to group the fields for the model, using a list of 2-tuples, where
# first tuple element is the name of the fieldset (or None for empty), and the second is a
# dictionary with 'fields' as the key and the list of model's fields as value.
fieldsets = [
(None, {'fields': ['title', 'authors']}),
("Publishing Information", {'fields': ['publisher', 'pub_date', 'isbn']}),
("Book Information", {'fields': ['format', 'category']})
]
# add a filter on the right-hand site
# Django sets predefined filters, e.g. last 7 days, etc.
list_filter = ['pub_date']
class PublisherAdmin(admin.ModelAdmin):
# list of display columns
list_display = ['name', 'website', 'published_books', 'address', 'city', 'state', 'country']
# 'fieldsets' grouping for an individual publisher
fieldsets = [
(None, {'fields': ['name', 'website']}),
('Address', {'fields': ['address', 'city', 'state', 'country']})
]
# for a specific publisher, show the list of (its published) books, using the 'BooksInLine' UI above
inlines = [BooksInLine]
list_filter = ['name', 'city']
class BookAdmin(admin.ModelAdmin):
model = Book
fieldsets = [
(None, {'fields': ['title', 'authors']}),
("Publishing Information", {'fields': ['publisher', 'pub_date', 'isbn']}),
("Book Information", {'fields': ['format', 'category']})
]
list_display = ['title', 'authors_names', 'publisher_name', 'pub_date', 'isbn']
list_filter = ['pub_date']
# register all the models with the corresponding new templates (if any), with the admin site
admin.site.register(Publisher, PublisherAdmin)
admin.site.register(Author)
admin.site.register(BookCategory)
admin.site.register(BookFormat)
admin.site.register(Book, BookAdmin) | [
"constantinm@sharplabs.com"
] | constantinm@sharplabs.com |
7788549a3662ca1e2a17c904ac5f22ecd49ac69b | 67416177cd9e221db0b20332c02dcc7680fcdd0e | /이것이 취업을 위한 코딩 테스트다/Chapter06_Sorting/Q02.py | 5f904fd8060abd698a7a21fca906fefb6368fa34 | [] | no_license | svclaw2000/Algorithm | 4fe5e3bf50888b974df4f3d87387a003b5249352 | b6d92cf0d18997e9e973d5f731ecb44a7935d93a | refs/heads/main | 2023-06-21T21:50:13.089719 | 2021-07-11T14:18:47 | 2021-07-11T14:18:47 | 363,825,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | # 00:02 / 00:20
N = int(input())
student = []
for _ in range(N):
inp = input().split()
student.append((int(inp[1]), inp[0]))
student.sort()
print(*[s[1] for s in student]) | [
"svclaw2000@gmail.com"
] | svclaw2000@gmail.com |
24e45112ef804b859c5fac4694945f26f9faf26d | f1738cd603e0b2e31143f4ebf7eba403402aecd6 | /ucs/test/ucs-test/tests/52_s4connector/s4connector.py | 4931adb730b3043036fbd0357bf0b3e45c5f64b7 | [] | no_license | m-narayan/smart | 92f42bf90d7d2b24f61915fac8abab70dd8282bc | 1a6765deafd8679079b64dcc35f91933d37cf2dd | refs/heads/master | 2016-08-05T17:29:30.847382 | 2013-01-04T04:50:26 | 2013-01-04T04:50:26 | 7,079,786 | 8 | 6 | null | 2015-04-29T08:54:12 | 2012-12-09T14:56:27 | Python | UTF-8 | Python | false | false | 3,597 | py | import ldap
import univention.config_registry
from ldap.controls import LDAPControl
import ldap.modlist as modlist
import time
import ldap_glue_s4
import univention.s4connector.s4 as s4
configRegistry = univention.config_registry.ConfigRegistry()
configRegistry.load()
class S4Connection(ldap_glue_s4.LDAPConnection):
'''helper functions to modify AD-objects'''
def __init__(self, configbase='connector', no_starttls=False):
self.configbase = configbase
self.adldapbase = configRegistry['%s/s4/ldap/base' % configbase]
self.addomain = self.adldapbase.replace (',DC=', '.').replace ('DC=', '')
self.login_dn = configRegistry['%s/s4/ldap/binddn' % configbase]
self.pw_file = configRegistry['%s/s4/ldap/bindpw' % configbase]
self.host = configRegistry['%s/s4/ldap/host' % configbase]
self.port = configRegistry['%s/s4/ldap/port' % configbase]
self.ssl = configRegistry.get('%s/s4/ldap/ssl', "no")
self.ca_file = configRegistry['%s/s4/ldap/certificate' % configbase]
self.protocol = configRegistry.get('%s/s4/ldap/protocol' % self.configbase, 'ldap').lower()
self.socket = configRegistry.get('%s/s4/ldap/socket' % self.configbase, '')
self.connect (no_starttls)
def createuser(self, username, position=None, cn=None, sn=None, description=None):
if not position:
position = 'cn=users,%s' % self.adldapbase
if not cn:
cn = username
if not sn:
sn = 'SomeSurName'
newdn = 'cn=%s,%s' % (cn, position)
attrs = {}
attrs['objectclass'] = ['top', 'user', 'person', 'organizationalPerson']
attrs['cn'] = cn
attrs['sn'] = sn
attrs['sAMAccountName'] = username
attrs['userPrincipalName'] = '%s@%s' % (username, self.addomain)
attrs['displayName'] = '%s %s' % (username, sn)
if description:
attrs['description'] = description
self.create(newdn, attrs)
def group_create(self, groupname, position=None, description=None):
if not position:
position = 'cn=groups,%s' % self.adldapbase
attrs = {}
attrs['objectclass'] = ['top', 'group']
attrs['sAMAccountName'] = groupname
if description:
attrs['description'] = description
self.create('cn=%s,%s' % (groupname, position), attrs)
def getprimarygroup(self, user_dn):
try:
res = self.lo.search_ext_s(user_dn, ldap.SCOPE_BASE, timeout=10)
except:
return None
primaryGroupID = res[0][1]['primaryGroupID'][0]
res = self.lo.search_ext_s(self.adldapbase,
ldap.SCOPE_SUBTREE,
'objectClass=group'.encode ('utf8'),
timeout=10)
import re
regex = '^(.*?)-%s$' % primaryGroupID
for r in res:
if r[0] == None or r[0] == 'None':
continue # Referral
if re.search (regex, s4.decode_sid(r[1]['objectSid'][0])):
return r[0]
def setprimarygroup(self, user_dn, group_dn):
res = self.lo.search_ext_s(group_dn, ldap.SCOPE_BASE, timeout=10)
import re
groupid = (re.search ('^(.*)-(.*?)$', s4.decode_sid (res[0][1]['objectSid'][0]))).group (2)
self.set_attribute (user_dn, 'primaryGroupID', groupid)
def container_create(self, name, position=None, description=None):
if not position:
position = self.adldapbase
attrs = {}
attrs['objectClass'] = ['top', 'container']
attrs['cn'] = name
if description:
attrs['description'] = description
self.create('cn=%s,%s' % (name, position), attrs)
def createou(self, name, position=None, description=None):
if not position:
position = self.adldapbase
attrs = {}
attrs['objectClass'] = ['top', 'organizationalUnit']
attrs['ou'] = name
if description:
attrs['description'] = description
self.create('ou=%s,%s' % (name, position), attrs)
| [
"kartik@debian.org"
] | kartik@debian.org |
b5c44cbbab50c96d3ed02f99623993dae708a4fa | fce280d1a9ef78784d28409c47865ec92402fad4 | /019Echarts/Demo_Geo.py | b5e92a127be827c15759193c16ca3deb9aa35c9c | [] | no_license | angus138/--- | 204fa9f5713fc3cee1ec814b0d600e5e4f413ab1 | 39ea3e51f32e093c01afae6984363afaaa5e120f | refs/heads/master | 2020-12-12T13:39:56.871807 | 2020-01-15T11:06:20 | 2020-01-15T11:06:20 | 234,139,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | # coding:utf-8
"""
create on Jan 2, 2020 By Wayne Yu
Function:
基于Pyecharts的Geo图
"""
from pyecharts.faker import Faker
from pyecharts import options as opts
from pyecharts.charts import Geo
from pyecharts.globals import ChartType, SymbolType
from pyecharts.charts import Map
def geo_base() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add("geo", [list(z) for z in zip(Faker.provinces, Faker.values())])
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(),
title_opts=opts.TitleOpts(title="Geo-基本示例"),
)
)
return c
def map_world() -> Map:
c = (
Map()
.add("商家A", [list(z) for z in zip(Faker.country, Faker.values())], "world")
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="Map-世界地图"),
visualmap_opts=opts.VisualMapOpts(max_=200),
)
)
return c
map_world().render("world_2D_render.html")
| [
"ieeflsyu@outlook.com"
] | ieeflsyu@outlook.com |
305c1d5c727d5961cbda6d9218376a6e0b1f7e8c | 7944d2fd5d885a034347a986f3114f0b81166447 | /facebookads/adobjects/transactioncurrencyamount.py | a8258dea5f141931966835b4349ea71a84f93500 | [] | no_license | it-devros/django-facebook-api | 4fd94d1bbbff664f0314e046f50d91ee959f5664 | ee2d91af49bc2be116bd10bd079c321bbf6af721 | refs/heads/master | 2021-06-23T06:29:07.664905 | 2019-06-25T07:47:50 | 2019-06-25T07:47:50 | 191,458,626 | 2 | 0 | null | 2021-06-10T21:33:08 | 2019-06-11T22:22:47 | Python | UTF-8 | Python | false | false | 1,864 | py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebookads.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class TransactionCurrencyAmount(
AbstractObject,
):
def __init__(self, api=None):
super(TransactionCurrencyAmount, self).__init__()
self._isTransactionCurrencyAmount = True
self._api = api
class Field(AbstractObject.Field):
amount = 'amount'
currency = 'currency'
total_amount = 'total_amount'
_field_types = {
'amount': 'string',
'currency': 'string',
'total_amount': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| [
"it-devros@outlook.com"
] | it-devros@outlook.com |
6306d40d8de8fa5309662dd4786bb511bd60a1f6 | 4182f5c371c15b8f79bc744b8bed0965ffd13c79 | /backend/pytx/views.py | fc5d4aa8a7a85c01fd20e630902f9ff9259702e4 | [] | no_license | pytexas/PyTexas2015 | f4db37a7d43ee523272311139f480189ecba02cd | f4648581a197e2f9387f61c2b94a8f178298becc | refs/heads/master | 2021-05-30T18:47:47.431250 | 2015-11-21T20:52:43 | 2015-11-21T20:52:43 | 17,159,683 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | import os
import subprocess
from django import http
from django.views import static
from django.conf import settings
from pytx.utils import index_generator
def favicon(request):
return static.serve(request, 'favicon.ico', settings.FRONT_ROOT)
def default_conf(request):
return http.HttpResponseRedirect(settings.DEFAULT_CONF + '/')
def index(request, conf_slug):
conf_slug = conf_slug.split('/')[0]
html = index_generator(conf_slug, dev=True)
return http.HttpResponse(html)
def less_view(request):
less = os.path.join(settings.BASE_DIR, '..', 'frontend', 'css', 'pytx.less')
pipe = subprocess.Popen(
"lessc {}".format(less),
shell=True,
stdout=subprocess.PIPE)
return http.HttpResponse(pipe.stdout, content_type="text/css")
def frontend(request, *args, **kwargs):
return http.HttpResponse(
"Front-End Should Serve This URL",
content_type="text/plain")
| [
"paul.m.bailey@gmail.com"
] | paul.m.bailey@gmail.com |
a6faf47fb558554f6674362748930f2d99227172 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-sae/aliyunsdksae/request/v20190506/UpdateNamespaceRequest.py | dccc5256a59542e159be6e9d83c4f970cab07b3c | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,483 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdksae.endpoint import endpoint_data
class UpdateNamespaceRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'sae', '2019-05-06', 'UpdateNamespace','serverless')
self.set_uri_pattern('/pop/v1/paas/namespace')
self.set_method('PUT')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NamespaceName(self): # String
return self.get_query_params().get('NamespaceName')
def set_NamespaceName(self, NamespaceName): # String
self.add_query_param('NamespaceName', NamespaceName)
def get_NamespaceDescription(self): # String
return self.get_query_params().get('NamespaceDescription')
def set_NamespaceDescription(self, NamespaceDescription): # String
self.add_query_param('NamespaceDescription', NamespaceDescription)
def get_EnableMicroRegistration(self): # Boolean
return self.get_query_params().get('EnableMicroRegistration')
def set_EnableMicroRegistration(self, EnableMicroRegistration): # Boolean
self.add_query_param('EnableMicroRegistration', EnableMicroRegistration)
def get_NamespaceId(self): # String
return self.get_query_params().get('NamespaceId')
def set_NamespaceId(self, NamespaceId): # String
self.add_query_param('NamespaceId', NamespaceId)
def get_NameSpaceShortId(self): # String
return self.get_query_params().get('NameSpaceShortId')
def set_NameSpaceShortId(self, NameSpaceShortId): # String
self.add_query_param('NameSpaceShortId', NameSpaceShortId)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
ed26c4174c273e492eba64f98d9537ec1ad20864 | f3d38d0e1d50234ce5f17948361a50090ea8cddf | /CodeUp/파이썬 풀이/1230번 ; 터널 통과하기 2.py | 3d02cf7a4fd78a7e1ee60df5860d0451ae9e76c7 | [] | no_license | bright-night-sky/algorithm_study | 967c512040c183d56c5cd923912a5e8f1c584546 | 8fd46644129e92137a62db657187b9b707d06985 | refs/heads/main | 2023-08-01T10:27:33.857897 | 2021-10-04T14:36:21 | 2021-10-04T14:36:21 | 323,322,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | # https://codeup.kr/problem.php?id=1230
# readline을 사용하기 위해 import합니다.
from sys import stdin
# 세 터널의 높이 a, b, c를 공백으로 구분해 입력합니다.
# 각각 int형으로 변환합니다.
a, b, c = map(int, stdin.readline().split())
# 세 터널의 높이 a, b, c를 차례대로 170 보다 큰 지 검사해봅니다.
# 첫 번째 터널의 높이가 170 초과라면
if a > 170:
# 두 번째 터널의 높이가 170 초과라면
if b > 170:
# 세 번째 터널의 높이가 170 초과라면
if c > 170:
# 세 터널 모두 170 초과이므로 차가 잘 통과합니다.
# 문자열 'PASS'를 출력합니다.
print('PASS')
# 첫 번째, 두 번째 터널은 통과했는데
# 세 번째 터널의 높이가 170 이하라면
else:
# 세 번째 터널에서 사고가 나므로 문자열 'CRASH'와
# 세 번째 터널의 높이 c값을 공백으로 구분해 출력합니다.
print('CRASH', c)
# 첫 번째 터널은 통과했는데
# 두 번째 터널의 높이가 170 이하라면
else:
# 두 번째 터널에서 사고가 나므로 문자열 'CRASH'와
# 두 번째 터널의 높이 b값을 공백으로 구분해 출력합니다.
print('CRASH', b)
# 첫 번째 터널의 높이가 170 이하라면
else:
# 첫 번째 터널에서 사고가 나므로 문자열 'CRASH'와
# 첫 번째 터널의 높이 a값을 공백으로 구분해 출력합니다.
print('CRASH', a) | [
"bright_night_sky@naver.com"
] | bright_night_sky@naver.com |
a19e8ca6e4d0aacdea80fd56b4f663d4369843c5 | 9795dda526b3436de26c73353021a0651a6762f9 | /pyefun/typeConv.py | 6d16e72c8caf0defa6bb38fc430f6bf3af2868de | [
"Apache-2.0"
] | permissive | brucekk4/pyefun | 891fc08897e4662823cf9016a680c07b31a8d5be | 1b4d8e13ee2c59574fded792e3f2a77e0b5e11a2 | refs/heads/master | 2023-07-10T03:54:11.437283 | 2021-08-23T17:46:19 | 2021-08-23T17:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | """
.. Hint::
类型转换
.. literalinclude:: ../../../pyefun/typeConv_test.py
:language: python
:caption: 代码示例
:linenos:
"""
from .timeBase import *
import json
def 到文本(bytes):
return str(bytes, encoding="utf-8")
def 到字节集(str):
return bytes(str, encoding='utf-8')
def 到数值(val):
return float(val)
def 到整数(val):
return int(float(val))
def 到时间(str):
return 创建日期时间(str)
def json到文本(obj):
return json.dumps(obj)
def json解析(obj):
return json.loads(obj) | [
"ll@163.com"
] | ll@163.com |
41017bba584e3df78520c70725466e1e02d28e2d | ad0e853db635edc578d58891b90f8e45a72a724f | /python/ray/train/lightning/__init__.py | a827dcd064ab3cc2b3b8ce362535db6e20395342 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | ericl/ray | 8c93fc713af3b753215d4fe6221278700936e2db | e9a1c6d814fb1a81033809f56695030d651388f5 | refs/heads/master | 2023-08-31T11:53:23.584855 | 2023-06-07T21:04:28 | 2023-06-07T21:04:28 | 91,077,004 | 2 | 4 | Apache-2.0 | 2023-01-11T17:19:10 | 2017-05-12T09:51:04 | Python | UTF-8 | Python | false | false | 638 | py | # isort: off
try:
import pytorch_lightning # noqa: F401
except ModuleNotFoundError:
raise ModuleNotFoundError(
"PyTorch Lightning isn't installed. To install PyTorch Lightning, "
"please run 'pip install pytorch-lightning'"
)
# isort: on
from ray.train.lightning.lightning_checkpoint import LightningCheckpoint
from ray.train.lightning.lightning_predictor import LightningPredictor
from ray.train.lightning.lightning_trainer import (
LightningTrainer,
LightningConfigBuilder,
)
__all__ = [
"LightningTrainer",
"LightningConfigBuilder",
"LightningCheckpoint",
"LightningPredictor",
]
| [
"noreply@github.com"
] | ericl.noreply@github.com |
b73ea34e6add92014bf1249f36194a4bccd11194 | 6921b29c09905e910c97c799fdb1c5249dff0274 | /pyocd/coresight/gpr.py | b0ecaff545f1a43b824d27b15bc33ad403f98186 | [
"Apache-2.0"
] | permissive | huaqli/pyOCD | b476a0d58cf55cc4855bea33b2c7a3afc37f7f35 | ee8324de9e0219a0e6e28e686c81fa5af3637479 | refs/heads/master | 2022-04-16T17:22:33.861865 | 2020-04-20T16:44:07 | 2020-04-20T16:44:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,215 | py | # pyOCD debugger
# Copyright (c) 2018 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .component import CoreSightComponent
from ..utility.timeout import Timeout
ACK_TIMEOUT = 5.0
class GPR(CoreSightComponent):
"""! @brief Granular Power Requestor.
Currently only supports enabling power domains.
"""
CPWRUPREQ = 0x0
CPWRUPACK = 0x0
CPWRUPM_COUNT_MASK = 0x3f
@classmethod
def factory(cls, ap, cmpid, address):
# Attempt to return the same instance that was created during ROM table scanning.
if cmpid.parent_rom_table is not None:
rom_gpr = cmpid.parent_rom_table.gpr
if rom_gpr is not None and rom_gpr.address == address:
return rom_gpr
# No luck, create a new instance.
gpr = cls(ap, cmpid, address)
return gpr
def __init__(self, ap, cmpid=None, addr=None):
super(GPR, self).__init__(ap, cmpid, addr)
def init(self):
"""! @brief Inits the GPR."""
self.domain_count = self.cmpid.devid[2] & self.CPWRUPM_COUNT_MASK
def _power_up(self, mask):
"""! @brief Enable power to a power domaind by mask.
@param self
@param mask Bitmask of the domains to power up.
@retval True Requested domains were successfully powered on.
@return False Timeout waiting for power ack bit(s) to set.
"""
# Enable power up request bits.
self.ap.write32(self.address + self.CPWRUPREQ, mask)
# Wait for ack bits to set.
with Timeout(ACK_TIMEOUT) as t_o:
while t_o.check():
value = self.ap.read32(self.address + self.CPWRUPACK)
if (value & mask) == mask:
return True
else:
return False
def power_up_all(self):
"""! @brief Enable power to all available power domains.
@param self
@retval True All domains were successfully powered on.
@return False Timeout waiting for power ack bit(s) to set.
"""
mask = (1 << self.domain_count) - 1
return self._power_up(mask)
def power_up_one(self, domain_id):
"""! @brief Power up a single power domain by domain ID.
@param self
@param domain_id Integer power domain ID.
@retval True Requested domain was powered on successfully.
@return False Timeout waiting for power ack bit to set.
"""
mask = 1 << domain_id
return self._power_up(mask)
def __repr__(self):
return "<GPR @ %x: count=%d>" % (id(self), self.domain_count)
| [
"flit@me.com"
] | flit@me.com |
69c02c736463a9b65e274fce99476ca679810c4f | 2603f28e3dc17ae2409554ee6e1cbd315a28b732 | /ABC181/prob_d.py | 4907374640a893c516218398622363e984976b2f | [] | no_license | steinstadt/AtCoder | 69f172280e89f4249e673cae9beab9428e2a4369 | cd6c7f577fcf0cb4c57ff184afdc163f7501acf5 | refs/heads/master | 2020-12-23T12:03:29.124134 | 2020-11-22T10:47:40 | 2020-11-22T10:47:40 | 237,144,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | # Problem D - Hachi
from itertools import permutations
# input
S = list(input())
# initialization
ans = False
def conv_nums(nums):
num_list = [0] * 10
conv_result = []
for n in nums:
num_list[n] += 1
for i in range(1, 10):
if num_list[i]==0:
continue
if num_list[i]>3:
num_list[i] = 3
for j in range(num_list[i]):
conv_result.append(str(i))
return conv_result
def check_eight(num):
if not num%2==0:
return False
tmp_1 = num // 2
tmp_1 = str(tmp_1)
tmp_1 = int(tmp_1[-2] + tmp_1[-1])
if not tmp_1%4==0:
return False
return True
# check
if len(S)==1:
tmp = int(S[0])
if tmp%8==0:
ans = True
elif len(S)==2:
tmp = int(S[0] + S[1])
if tmp%8==0:
ans = True
tmp = int(S[1] + S[0])
if tmp%8==0:
ans = True
else:
T = list(map(int, S))
T = conv_nums(T) # 数字の集合をまとめる
T_len = len(T)
for i in range(T_len-2):
for j in range(i+1, T_len-1):
for k in range(j+1, T_len):
tmp_list = [T[i], T[j], T[k]]
for tmp_p in permutations(tmp_list):
check_result = check_eight(int("".join(tmp_p)))
if check_result:
ans = True
# output
if ans:
print("Yes")
else:
print("No")
| [
"steinstadt@keio.jp"
] | steinstadt@keio.jp |
a5e75047a5d44c391ab9fe5a3c6909b31a774f11 | 0f54a2a03fba8e231bfd2a14785a7f091b4b88ac | /WeeklyProcessor/3. Analysing & Cleaning Content/column_cleaners/business_cleaner.py | 6d08b30c9eb7d94fa33c09233a891de80532a431 | [] | no_license | LukaszMalucha/WeeklyProcessor | ced08869397e54fb7c0a26a53a760c74868942c8 | b9c787248f41f6a30e34c4c13db08ce4d0834f52 | refs/heads/master | 2022-06-01T08:40:25.377037 | 2020-05-03T18:13:21 | 2020-05-03T18:13:21 | 243,991,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 19 17:55:52 2020
@author: jmalucl
"""
def business_cleaner(dataset):
dataset['Business'].fillna("Not Specified")
# REMOVE (OTHER CHOICES AVAILABLE)
dataset['Business'] = dataset['Business'].str.replace("other choices available", "")
dataset['Business'] = dataset['Business'].str.replace("\(", "")
dataset['Business'] = dataset['Business'].str.replace("\)", "")
dataset['Business'] = dataset['Business'].str.strip()
dataset['business'] = dataset['Business']
dataset = dataset.drop(['Business'], axis=1)
return dataset
| [
"lucasmalucha@gmail.com"
] | lucasmalucha@gmail.com |
1a15250cb5546c6b48ee83829dba429154c20d41 | c9bb8998bde76bf88117a5d8f710621cd824df14 | /tests/cupy_tests/cuda_tests/test_driver.py | fbc1e33d94198e127f8636923c068e6bf7df5cd6 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hephaex/cupy | 4a0d8198b889a26d9b958ec9c31346ec58598e49 | 5cf50a93bbdebe825337ed7996c464e84b1495ba | refs/heads/master | 2020-06-06T18:55:49.827170 | 2019-06-19T21:00:21 | 2019-06-19T21:00:21 | 192,827,521 | 1 | 0 | MIT | 2019-06-20T01:28:56 | 2019-06-20T01:28:56 | null | UTF-8 | Python | false | false | 990 | py | import threading
import unittest
import cupy
from cupy.cuda import driver
class TestDriver(unittest.TestCase):
def test_ctxGetCurrent(self):
# Make sure to create context.
cupy.arange(1)
self.assertNotEqual(0, driver.ctxGetCurrent())
def test_ctxGetCurrent_thread(self):
# Make sure to create context in main thread.
cupy.arange(1)
def f(self):
self._result0 = driver.ctxGetCurrent()
cupy.arange(1)
self._result1 = driver.ctxGetCurrent()
self._result0 = None
self._result1 = None
t = threading.Thread(target=f, args=(self,))
t.daemon = True
t.start()
t.join()
# The returned context pointer must be NULL on sub thread
# without valid context.
self.assertEqual(0, self._result0)
# After the context is created, it should return the valid
# context pointer.
self.assertNotEqual(0, self._result1)
| [
"webmaster@kenichimaehashi.com"
] | webmaster@kenichimaehashi.com |
6c3187ec0a176e1dda16a1d8fa32a1350f49b595 | e1c14c3b3ed552f1af97f427c342be70d8e3b27f | /src/yMaths/print-combinations-integers-sum-given-number.py | d21539fee7022d456e69899e5a0e5284fbccefa3 | [
"MIT"
] | permissive | mohitsaroha03/The-Py-Algorithms | 4ab7285b6ea2ce0a008203b425ec3f459995664b | b5ba58602c0ef02c7664ea0be8bf272a8bd5239c | refs/heads/master | 2023-01-28T09:25:33.317439 | 2020-12-05T06:17:00 | 2020-12-05T06:17:00 | 280,189,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # Link: https://www.techiedelight.com/print-combinations-integers-sum-given-number/
# IsDone: 1
# Recursive function to print all combination of positive integers
# in increasing order that sum to a given number
def printCombinations(A, i, sum, sum_left):
# To maintain the increasing order, start the loop from
# previous number stored in A
prev_num = A[i - 1] if (i > 0) else 1
for k in range(prev_num, sum + 1):
# set next element of the list to k
A[i] = k
# recur with the sum left and next location in the list
if sum_left > k:
printCombinations(A, i + 1, sum, sum_left - k)
# if sum is found
if sum_left == k:
print(A[:i+1])
# Wrapper over printCombinations() function
def findCombinations(sum):
# create a temporary list for storing the combinations
A = [0] * sum
# recur for all combinations
starting_index = 0
printCombinations(A, starting_index, sum, sum)
if __name__ == '__main__':
sum = 5
findCombinations(sum) | [
"MohitSaroha@Etechaces.com"
] | MohitSaroha@Etechaces.com |
0b2fd01ae9041f32ba9b913c03009fd5954e14ec | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02574/s478211086.py | e348f33b7bcd5b939cd80827f0dd178d0a30ffa6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | from math import gcd
from functools import reduce
from collections import defaultdict
n=int(input())
a=list(map(int,input().split()))
g=reduce(gcd,a)
if g!=1:
print("not coprime")
exit()
def isPrimeMR(n):
d = n - 1
d = d // (d & -d)
L = [2]
for a in L:
t = d
y = pow(a, t, n)
if y == 1: continue
while y != n - 1:
y = (y * y) % n
if y == 1 or t == n - 1: return 0
t <<= 1
return 1
def findFactorRho(n):
m = 1 << n.bit_length() // 8
for c in range(1, 99):
f = lambda x: (x * x + c) % n
y, r, q, g = 2, 1, 1, 1
while g == 1:
x = y
for i in range(r):
y = f(y)
k = 0
while k < r and g == 1:
ys = y
for i in range(min(m, r - k)):
y = f(y)
q = q * abs(x - y) % n
g = gcd(q, n)
k += m
r <<= 1
if g == n:
g = 1
while g == 1:
ys = f(ys)
g = gcd(abs(x - ys), n)
if g < n:
if isPrimeMR(g): return g
elif isPrimeMR(n // g): return n // g
return findFactorRho(g)
def primeFactor(n):
i = 2
ret = {}
rhoFlg = 0
while i*i <= n:
k = 0
while n % i == 0:
n //= i
k += 1
if k: ret[i] = k
i += 1 + i % 2
if i == 101 and n >= 2 ** 20:
while n > 1:
if isPrimeMR(n):
ret[n], n = 1, 1
else:
rhoFlg = 1
j = findFactorRho(n)
k = 0
while n % j == 0:
n //= j
k += 1
ret[j] = k
if n > 1: ret[n] = 1
if rhoFlg: ret = {x: ret[x] for x in sorted(ret)}
return ret
#d=defaultdict(int)
s=set()
for q in a:
p=primeFactor(q)
for j in p:
if j in s:
print("setwise coprime")
exit()
s.add(j)
print("pairwise coprime") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f0595be963dc4b4499181013831a9970ba963495 | 180e1e947f3f824cb2c466f51900aa12a9428e1c | /pattern4/hamburg_store_v5/src/SichuanIngredientsFactory.py | db6d7e34726ae46ecf5b21659ff3352834e2f368 | [
"MIT"
] | permissive | icexmoon/design-pattern-with-python | 216f43a63dc87ef28a12d5a9a915bf0df3b64f50 | bb897e886fe52bb620db0edc6ad9d2e5ecb067af | refs/heads/main | 2023-06-15T11:54:19.357798 | 2021-07-21T08:46:16 | 2021-07-21T08:46:16 | 376,543,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | #######################################################
#
# SichuanIngredientsFactory.py
# Python implementation of the Class SichuanIngredientsFactory
# Generated by Enterprise Architect
# Created on: 19-6��-2021 21:34:31
# Original author: 70748
#
#######################################################
from .Chicken import Chicken
from .Pepper import Pepper
from .SichuanPepper import SichuanPepper
from .ThreeYellowChicken import ThreeYellowChicken
from .IngredientsFactory import IngredientsFactory
class SichuanIngredientsFactory(IngredientsFactory):
def getChicken(self) -> Chicken:
return ThreeYellowChicken()
def getPepper(self) -> Pepper:
return SichuanPepper()
| [
"icexmoon@qq.com"
] | icexmoon@qq.com |
fdd539cdf1889df24696c662f74796e12e6ae49e | b8ed6b49f25d08a0a313d749f3e40d7a5b59dfc9 | /torch/fx/experimental/fx_acc/acc_op_properties.py | e2f53d7c48194b54b5f9ff834e1f73c13267da68 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | yinchimaoliang/pytorch | 191f154ede241d6c61f3600f2987f2a9ec637c92 | ecf7e96969dec08f5e0091f1584557f13c290c18 | refs/heads/master | 2023-08-22T07:05:37.055667 | 2021-10-26T00:42:38 | 2021-10-26T00:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | from collections import defaultdict
from enum import Flag, auto
from typing import Callable, DefaultDict, Set
import torch
import torch.fx
class AccOpProperty(Flag):
pointwise = auto()
quantized = auto()
acc_op_properties: DefaultDict[Callable, Set[AccOpProperty]] = defaultdict(set)
acc_ops_with_property: DefaultDict[AccOpProperty, Set[Callable]] = defaultdict(set)
def register_acc_op_properties(*properties: AccOpProperty):
"""
Attach properties to acc_op to inform optimization
"""
def decorator(acc_op: Callable):
acc_op_properties[acc_op] |= set(properties)
for prop in properties:
acc_ops_with_property[prop].add(acc_op)
return acc_op
return decorator
def add_optimization_properties_to_meta(mod: torch.fx.GraphModule) -> None:
"""
Add acc_op properties to Node.meta to inform optimization
"""
for node in mod.graph.nodes:
node.meta['acc_op_properties'] = acc_op_properties[node.target]
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
d3ec5b088a135db2f9b6e929ee4d35e3c6dee45b | 4f510470b3093ab2c60f929221af82c79b121ca7 | /python_net/day6/clock.py | c2d5b6098c3805e3c03231421859c85e08e5ae48 | [] | no_license | q737645224/python3 | ce98926c701214f0fc7da964af45ba0baf8edacf | 4bfabe3f4bf5ba4133a16102c51bf079d500e4eb | refs/heads/master | 2020-03-30T07:11:17.202996 | 2018-10-30T06:14:51 | 2018-10-30T06:14:51 | 150,921,088 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | from multiprocessing import Process
import time
class ClockProcess(Process):
def __init__(self,value):
#调用父类init
super().__init__()
self.value = value
#重写run方法
def run(self):
for i in range(5):
time.sleep(self.value)
print("The time is {}".format(time.ctime()))
p = ClockProcess(2)
#自动执行run
p.start()
p.join()
| [
"764375224@qq.com"
] | 764375224@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.