blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
341f476266c35eab353587eb301e389470a302a9 | 0fdb402809188c34702bc70e4d106e56ca8e2bd0 | /Algorithms/tkinter.py | 68887798977aa716073634bbb8f0b59f3dc37a4a | [] | no_license | the07/Python | 356f2018a85caeb9dd6ccb251636ff697eb613b6 | af34cf3ffe01504632cf3654a0a5f89653e163cb | refs/heads/master | 2021-01-06T20:36:33.718087 | 2017-11-24T06:58:32 | 2017-11-24T06:58:32 | 90,789,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | from tkinter import *
from PIL import Image, ImageTk
class Window(Frame):
def __init__(self, master = None):
Frame.__init__(self, master)
self.master = master
self.init_window()
def init_window(self):
self.master.title("GUI")
self.pack(fill=BOTH, expand=1)
#quitButton = Button(self, text="X",command=self.client_exit)
#quitButton.place(x=0,y=0)
menu = Menu(self.master)
self.master.config(menu=menu)
file = Menu(menu)
file.add_command(label='Exit', command=self.client_exit)
file.add_command(label='Save', command=self.client_exit)
menu.add_cascade(label='File', menu=file)
edit = Menu(menu)
edit.add_command(label='Show Image', command=self.showImg)
edit.add_command(label='Show Text', command=self.showTxt)
menu.add_cascade(label='Edit', menu=edit)
def client_exit(self):
exit()
def showImg(self):
load=Image.open('pic.jpg')
render = ImageTk.PhotoImage(load)
img = Label(self, image=render)
img.image = render
img.place(x=10,y=10)
def showTxt(self):
text = Label(self, text='Hey there good looking')
text.pack()
root = Tk()
root.geometry("400x300")
app = Window(root)
root.mainloop()
| [
"thegauravks@gmail.com"
] | thegauravks@gmail.com |
1a16098041fbca03cf063a8d634ba651c06669a2 | 1c83920efda583d0dcedda2ac9d91235094685e2 | /web/appauth/constants.py | c9214cd0195bd4ac937081777d0947385459c7d5 | [] | no_license | eshandas/django_project_template | d866d2d8c5e206b0430e6130bc470042af50b7fa | 09786f6201d8e83199a2c0b7a83b6b6b0c8fd285 | refs/heads/master | 2022-07-22T14:39:50.521081 | 2019-08-06T11:00:19 | 2019-08-06T11:00:19 | 65,455,207 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | class ResponseKeys(object):
SESSION_ID = 'sessionId'
USER = 'user'
class SuccessMessages(object):
PASSWORD_RESET = 'An email has been sent to change the password'
class FailMessages(object):
USER_INACTIVE = 'This user is not active'
INVALID_CREDENTIALS = 'Wrong username or password'
INVALID_EMAIL = 'This email does not exist'
INVALID_PASSWORD = 'Invalid password'
USER_ALREADY_EXISTS = 'This user already exists'
INVALID_SESSION_ID = 'Invalid Session Id'
TOKEN_MISSING = 'Token missing'
INVALID_TOKEN = 'Invalid token'
NOT_ADMIN = 'User is not an admin'
AUTH_HEADER_INVALID = 'Invalid Authorization'
INVALID_VALUE = 'Invalid merchant id or app id or secret'
class RequestKeys(object):
EMAIL = 'email'
PASSWORD = 'password'
CONFIRM_PASSWORD = 'confirm_password'
NEXT = 'next'
TOKEN = 'token'
class ResponseKeys(object):
SESSION_ID = 'sessionId'
USER = 'user'
LOGGED_OUT = 'loggedOut'
| [
"eshandasnit@gmail.com"
] | eshandasnit@gmail.com |
16a12bf01a5c8f66c081745f20fe8d9e7257cbfc | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve-8.51.857815/trinutils/bindings.py | e5220ff7fcd107047c5e0b787445c9b1c58c06ab | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,701 | py | #Embedded file name: trinutils\bindings.py
"""
Functions for dealing with bad value bindings on objects.
"""
import logging
import osutils.subst as subst
import trinity
logger = logging.getLogger(__name__)
def HasBrokenBindings(trinObj):
bindings = trinObj.Find('trinity.TriValueBinding')
for binding in bindings:
if not binding.destinationObject or not binding.sourceObject:
return True
if binding.destinationObject.GetRefCounts()[1] == 2:
return True
return False
def FixBrokenBindings(trinObj):
curveSets = trinObj.Find('trinity.TriCurveSet')
allBindings = trinObj.Find('trinity.TriValueBinding')
deleteCs = []
knownUsedCurves = []
deleteBinds = []
for cs in curveSets:
for binding in cs.bindings:
if not binding.destinationObject or not binding.sourceObject:
deleteBinds.append(binding)
elif binding.destinationObject.GetRefCounts()[1] == 2:
deleteBinds.append(binding)
else:
knownUsedCurves.append(binding.sourceObject)
for d in deleteBinds:
logger.info('Deleting binding: %s' % d.name)
cs.bindings.remove(d)
for cs in curveSets:
deleteCurves = []
for curve in cs.curves:
if curve not in knownUsedCurves:
usedElsewhere = False
for b in allBindings:
if b.sourceObject == curve and b not in deleteBinds:
usedElsewhere = True
logger.info('Curve found being used outside its curveset: %s' % curve.name)
break
if not usedElsewhere:
deleteCurves.append(curve)
for d in deleteCurves:
logger.info('Deleting curve: %s' % d.name)
cs.curves.remove(d)
for cs in curveSets:
if not cs.curves and not cs.bindings:
deleteCs.append(cs)
for d in deleteCs:
if hasattr(trinObj, 'curveSets'):
for cs in trinObj.curveSets:
if d == cs:
logger.info('Deleting curve set: %s' % d.name)
trinObj.curveSets.remove(d)
continue
return trinObj
def RepairFile(filePath):
filePath = subst.GetUnsubstedPath(filePath)
logger.info('==== File:%s====' % filePath)
original = trinity.Load(filePath)
if original:
if HasBrokenBindings(original):
logger.info('Broken bindings found!')
new = FixBrokenBindings(original)
trinity.Save(new, filePath)
else:
logger.info('No broken bindings found!')
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
1fb7deb6e862121c82bd11e1f35fb92ae1ba4494 | 4e5b112b32cc2eeffb39f7111122d0df13da4117 | /Cap 9/Ex9.7.py | 00af0098ccf8d32da4be756c75dd5d51b9252471 | [
"MIT"
] | permissive | FelipeDreissig/PenseEmPy | 13c194f307a8ade747872efb1f4e50848f3c71a3 | 158a55d0e6bd06c8eadaa9159e816a1e4beb0ff7 | refs/heads/main | 2023-02-10T13:30:41.254544 | 2020-12-28T13:05:05 | 2020-12-28T13:05:05 | 324,915,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # exercício 9.7
def repeat():
caminho = open(r'C:\Users\dreis\Desktop\Estudos\Projetos\words.txt', 'r')
for palavras in caminho:
if len(palavras) > 6:
for i in range(0, len(palavras) - 6):
if palavras[i] == palavras[i + 1]:
if palavras[i + 2] == palavras[i + 3]:
if palavras[i + 4] == palavras[i + 5]:
print(palavras)
repeat()
| [
"58836663+FelipeDreissig@users.noreply.github.com"
] | 58836663+FelipeDreissig@users.noreply.github.com |
3557efa72c93cd24fed3afd99d4cc0064394697e | ad518c153efb6ced9744d1df7e8bbd91820c5048 | /fm-api/fm_api_test.py | e748dfdb31e5010f99906f208c4fd8e5ac7fe0a6 | [
"Apache-2.0"
] | permissive | starlingx-staging/x.stx-fault | eccefc69c5015872da26869a07efd36464a1ae5d | 6cd8940170c1799f9aa2fd05a38b84de0e7d87b3 | refs/heads/master | 2020-03-19T02:03:52.136983 | 2018-05-30T23:16:06 | 2018-05-31T14:36:00 | 135,595,467 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,450 | py | # -*- encoding: utf-8 -*-
#
# Copyright (c) 2014 Wind River Systems, Inc.
#
# Author:
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from fm_api import *
from fm_api import constants
def print_alarm(alarm):
alarm_str = "alarm_id: " + alarm.alarm_id + ", " + "uuid: " + alarm.uuid + ", "
alarm_str += "alarm_type: " + alarm.alarm_type + "\n"
alarm_str += "state: " + alarm.alarm_state + ", ""severity: " + alarm.severity + ", " \
+ "entity_type_id: " + alarm.entity_type_id + ", timestamp: "+ alarm.timestamp + "\n"
alarm_str += "entity_instance_id: " + alarm.entity_instance_id + ", "
alarm_str += "probable cause:" + alarm.probable_cause + "\n"
print alarm_str
def create():
ser = FaultAPIs()
fault = Fault(alarm_id=constants.FM_ALARM_ID_VM_RESCUED,
alarm_state=constants.FM_ALARM_STATE_SET,
entity_type_id=constants.FM_ENTITY_TYPE_INSTANCE,
entity_instance_id=constants.FM_ENTITY_TYPE_INSTANCE + '=' + 'a4e4cdb7-2ee6-4818-84c8-5310fcd67b5d',
severity = constants.FM_ALARM_SEVERITY_CRITICAL,
reason_text = "Unknown",
alarm_type = constants.FM_ALARM_TYPE_5,
probable_cause = constants.ALARM_PROBABLE_CAUSE_8,
proposed_repair_action = None,
service_affecting = False,
suppression = False)
uuid =ser.set_fault(fault)
print uuid
def delete(alarm_id, instance_id):
ser=FaultAPIs()
ret = ser.clear_fault(alarm_id,instance_id)
print "Delete fault return %s" % str(ret)
def del_all(instance_id):
ser=FaultAPIs()
ret= ser.clear_all(instance_id)
print "Delete faults return: %s" % str(ret)
def get(alarm_id, instance_id):
ser=FaultAPIs()
a = ser.get_fault(alarm_id, instance_id)
if a is not None:
print_alarm(a)
else:
print "Alarm not found"
def get_all(instance_id):
ser=FaultAPIs()
ll= ser.get_faults(instance_id)
if ll is not None:
print "Total alarm returned: %d\n" % len(ll)
for i in ll:
print_alarm(i)
else:
print "No alarm returned"
def get_list(alarm_id):
ser=FaultAPIs()
ll= ser.get_faults_by_id(alarm_id)
if ll is not None:
print "Total alarm returned: %d\n" % len(ll)
for i in ll:
print_alarm(i)
else:
print "No alarm returned"
if __name__ == "__main__":
if sys.argv[1] == "create":
sys.exit(create())
elif sys.argv[1] == "del":
sys.exit(delete(sys.argv[2],sys.argv[3]))
elif sys.argv[1] == "get":
sys.exit(get(sys.argv[2],sys.argv[3]))
elif sys.argv[1] == "get_all":
sys.exit(get_all(sys.argv[2]))
elif sys.argv[1] == "del_all":
sys.exit(del_all(sys.argv[2]))
elif sys.argv[1] == "get_list":
sys.exit(get_list(sys.argv[2]))
| [
"dtroyer@gmail.com"
] | dtroyer@gmail.com |
81ef3a1abaf74c63ccf1b403a791df16705a2301 | dbaa45978f3392c200f8576a82e7f0ed063b9906 | /home/blocks.py | d0968d1a95395de3c61f9843e28b0b6821588e5d | [] | no_license | dentemm/yourin | 033d29c6f946bb805f240f4c51bfabf1fa206dca | ec61fe8dfe1397ff1ee2fc76dc45caed529d7aa1 | refs/heads/master | 2022-12-02T12:59:39.431498 | 2017-04-27T19:34:29 | 2017-04-27T19:34:29 | 71,818,880 | 0 | 0 | null | 2022-11-22T01:29:32 | 2016-10-24T18:26:14 | CSS | UTF-8 | Python | false | false | 4,082 | py | from django import forms
from wagtail.wagtailcore import blocks
from wagtail.wagtailimages.blocks import ImageChooserBlock
from wagtail.wagtailembeds.blocks import EmbedBlock
TEXT_ALIGNMENT_CHOICES = (
('text-left', 'Links'),
('text-right', 'Rechts'),
('text-center', 'Centreer'),
)
class CarouselImageBlock(blocks.StructBlock):
afbeelding = ImageChooserBlock()
#tekst = blocks.CharBlock(required=False)
class Meta:
icon = 'image'
label = 'carousel afbeelding'
class BlogTitleBlock(blocks.StructBlock):
image = ImageChooserBlock(label='afbeelding', required=True)
title = blocks.CharBlock(label='titel', required=True)
class Meta:
template = 'home/blocks/title_block.html'
label = 'titel'
icon = 'title'
class SubtitleBlock(blocks.CharBlock):
class Meta:
template = 'home/blocks/subtitle_block.html'
label = 'ondertitel'
icon = 'pilcrow'
class IntroTextBlock(blocks.TextBlock):
class Meta:
template = 'home/blocks/introtext_block.html'
label = 'intro'
icon = 'snippet'
class ParagraphBlock(blocks.StructBlock):
text_alignment = blocks.ChoiceBlock(label='Tekst uitlijning', choices=TEXT_ALIGNMENT_CHOICES, default='text-left')
text_width = blocks.IntegerBlock(label='Tekst breedte',default=12, min_value=1, max_value=12, help_text="Geeft de breedte van de paragraaf aan, waarbij 12 maximaal is. Som van tekst breedte en tekst offset is ook best maximaal 12")
text_offset = blocks.IntegerBlock(label='Tekst offset', default=0, min_value=0, max_value=10, help_text="Geeft de offset van de paragraaf aan, dus hoever de paragraaf naar rechts wordt verschoven (0 = volledig links)")
text = blocks.TextBlock(label='Paragraaf tekst', min_length=160, required=False, help_text='Plaats hier de tekst voor 1 paragraaf, en voeg zoveel paragrafen toe als nodig')
richtext = blocks.RichTextBlock(label='Richtext (= alternatief)', required=False, help_text="Deze wordt enkel getoond indien de 'Paragraaf tekst' leeg is")
class Meta:
template = 'home/blocks/paragraph_block.html'
label = 'paragraaf'
icon = 'edit'
class BlogEmbedBlock(blocks.URLBlock):
class Meta:
template = 'home/blocks/embed_block.html'
label = 'video embed'
icon = 'media'
class ImageWithCaptionBlock(blocks.StructBlock):
class Meta:
template = 'home/blocks/imagewithcaption_block.html'
label = 'afbeelding met tekst'
icon = 'image'
class PullQuoteBlock(blocks.StructBlock):
quote = blocks.CharBlock(label='Citaat', required=True, max_length=164, help_text='Geef hier een citaat in')
class Meta:
template = 'home/blocks/pullquote_block.html'
label = 'citaat'
icon = 'openquote'
#('slider', ListBlock(customblocks.CarouselImageBlock(), template='home/blocks/carousel_block.html', icon='image')),
class SliderBlock(blocks.StructBlock):
afbeeldingen = blocks.ListBlock(CarouselImageBlock())
bijhorende_tekst = blocks.RichTextBlock()
class Meta:
template = 'home/blocks/slider_block.html'
label = 'slider'
icon = 'image'
class TabbedContentItem(blocks.StructBlock):
tab_name = blocks.CharBlock(label='tabblad titel', required=True, max_length=32, help_text='de titel voor het tabblad')
rich_content = blocks.RichTextBlock(required=True)
text_width = blocks.IntegerBlock(label='Breedte',default=12, min_value=1, max_value=12, help_text="Geeft de breedte van de tabs + inhoud aan, waarbij 12 maximaal is.")
class TwoColsBlock(blocks.StructBlock):
#left = blocks.RichTextBlock(label='linkse kolom', required=True)
#right = blocks.RichTextBlock(label='rechtse kolom', required=True)
content = blocks.StreamBlock([
('linkse_kolom', blocks.RichTextBlock()),
('rechtse_kolom', blocks.RichTextBlock()),
], icon='arrow-left', label='inhoud')
# left = blocks.StreamBlock([
# ('linkse_kolom', blocks.RichTextBlock()),
# ], icon='arrow-left', label='inhoud')
# right = blocks.StreamBlock([
# ('rechtse_kolom', blocks.RichTextBlock()),
# ], icon='arrow-right', label='inhoud')
class Meta:
template = 'home/blocks/two_cols.html'
icon = 'placeholder'
label = '2 kolommen'
form_classname = 'range' | [
"tim.claes@me.com"
] | tim.claes@me.com |
f41e0c2c87253f5af0e523fec0a04fdcef77d705 | 00b762e37ecef30ed04698033f719f04be9c5545 | /scripts/test_results/scikit-learn_test_results/conflicts/127_mlcomp_sparse_document_classification_conflict.py | 8596dabdca286879c364138e4a68ba1148370e77 | [] | no_license | kenji-nicholson/smerge | 4f9af17e2e516333b041727b77b8330e3255b7c2 | 3da9ebfdee02f9b4c882af1f26fe2e15d037271b | refs/heads/master | 2020-07-22T02:32:03.579003 | 2018-06-08T00:40:53 | 2018-06-08T00:40:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,171 | py | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the arhive somewhere on your filesystem. For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata`` and
subfolders ``raw``, ``train`` and ``test`` holding the text documents organized by
newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from scikits.learn.datasets import load_mlcomp
from scikits.learn.feature_extraction.text.sparse import Vectorizer
from scikits.learn.svm.sparse import LinearSVC
from scikits.learn.metrics import confusion_matrix
<<<<<<< HEAD
from scikits.learn.metrics import f1_score
from scikits.learn.metrics import precision
from scikits.learn.metrics import recall
=======
from scikits.learn.metrics import classification_report
>>>>>>> remote
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print "Please follow those instructions to get started:"
print __doc__
sys.exit(0)
# Load the training set
print "Loading 20 newsgroups training set... "
news_train = load_mlcomp('20news-18828', 'train')
print news_train.DESCR
print "%d documents" % len(news_train.filenames)
print "%d categories" % len(news_train.target_names)
print "Extracting features from the dataset using a sparse vectorizer"
t0 = time()
vectorizer = Vectorizer()
X_train = vectorizer.fit_transform((open(f).read() for f in news_train.filenames))
print "done in %fs" % (time() - t0)
print "n_samples: %d, n_features: %d" % X_train.shape
assert sp.issparse(X_train)
y_train = news_train.target
print "Training a linear SVM (hinge loss and L2 regularizer)..."
parameters = {
'loss': 'l2',
'penalty': 'l2',
'C': 10,
'dual': False,
'eps': 1e-4,
}
print "parameters:", parameters
t0 = time()
clf = LinearSVC(**parameters).fit(X_train, y_train)
print "done in %fs" % (time() - t0)
print "Percentage of non zeros coef: %f" % (np.mean(clf.coef_ != 0) * 100)
print "Loading 20 newsgroups test set... "
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print "done in %fs" % (time() - t0)
print "Predicting the labels of the test set..."
print "%d documents" % len(news_test.filenames)
print "%d categories" % len(news_test.target_names)
print "Extracting features from the dataset using the same vectorizer"
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print "done in %fs" % (time() - t0)
print "n_samples: %d, n_features: %d" % X_test.shape
print "Predicting the outcomes of the testing set"
t0 = time()
pred = clf.predict(X_test)
print "done in %fs" % (time() - t0)
<<<<<<< HEAD
print "precision: %0.3f" % precision(y_test, pred)
print "recall: %0.3f" % recall(y_test, pred)
print "f1_score: %0.3f" % f1_score(y_test, pred)
=======
print "Classification report on test set:"
print classification_report(news_test.target, pred,
class_names=news_test.target_names)
>>>>>>> remote
cm = confusion_matrix(y_test, pred)
print "Confusion matrix:"
print cm
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix')
pl.colorbar()
pl.show()
| [
"srhee4@cs.washington.edu"
] | srhee4@cs.washington.edu |
95d44a6fd33c75ebdef986787fe08d50a5f247d3 | b1d4a62b60cedaf0b88613b4c9f6e1c37a79ccef | /app/migrations/0002_auto_20200302_1040.py | ce110466c912ca49e91643733bd7b3cf8e61d897 | [] | no_license | juned8236/primary_foreign_based_onfront | 05ac97730ecdb184c96f44e1c2fb67d40cd521c5 | 4fc3be613c246a7853b2896a120b924451673124 | refs/heads/master | 2021-02-07T02:25:18.923855 | 2020-03-13T03:56:17 | 2020-03-13T03:56:17 | 243,972,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # Generated by Django 3.0.3 on 2020-03-02 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='company',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='companydb', to='app.Company'),
),
]
| [
"juned8236@gmail.com"
] | juned8236@gmail.com |
beb525892b9b3398ef96bd78e6412b6090711a55 | 443aba47108d7b35984a18f8bdf8cf90a98af428 | /src/test_sst.py | 8823784b674cee5bcaf1c63c022e812bf4b2ce3e | [
"Apache-2.0"
] | permissive | bgshin/mxnet_cnn | 947898490d3845a3d4d5b89cbeab8857bb97b730 | 19ebc13f4990ee29612a479325cf13d3bd9723ec | refs/heads/master | 2021-01-19T19:45:42.301126 | 2017-09-29T18:03:38 | 2017-09-29T18:03:38 | 101,208,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,148 | py | # https://faroit.github.io/keras-docs/1.2.2/models/model/#methods
import os
# os.environ['KERAS_BACKEND']='mxnet'
os.environ['KERAS_BACKEND']='tensorflow'
from keras.layers import Convolution1D
from keras.layers import Dense, Dropout, Flatten, Input, MaxPooling1D, Embedding
from keras.layers import merge
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from sst import load_all, Timer
import os
import argparse
def run(w2vdim, attempt, gpunum):
filter_sizes = (2, 3, 4, 5)
num_filters = 32
dropout_prob = 0.8
hidden_dims = 50
maxlen = 60
batch_size = 32
epochs = 30
os.environ["CUDA_VISIBLE_DEVICES"] = gpunum
def CNNv1(model_input, max_features, model_path):
z = Embedding(max_features,
w2vdim,
input_length=maxlen,
trainable=False)(model_input)
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(nb_filter=num_filters,
filter_length=sz,
border_mode="valid",
activation="relu",
subsample_length=1)(z)
print(conv)
conv = MaxPooling1D(pool_length=2)(conv)
print(conv)
conv = Flatten()(conv)
conv_blocks.append(conv)
z = merge(conv_blocks, mode='concat')
z = Dropout(dropout_prob)(z)
z = Dense(hidden_dims, activation="relu")(z)
model_output = Dense(5, activation="softmax")(z)
model = Model(model_input, model_output)
model.load_weights(model_path)
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"],
context=["gpu(0)"])
return model
with Timer("load_all..."):
(x_trn, y_trn), (x_dev, y_dev), (x_tst, y_tst), embedding, max_features = \
load_all(w2vdim, maxlen, source='shm')
with Timer("Build model..."):
input_shape = (maxlen,)
model_input = Input(shape=input_shape)
modelpath = './model/newbest-%d-%d' % (w2vdim, attempt)
model = CNNv1(model_input, max_features, modelpath)
model.summary()
score_list = []
score = model.evaluate(x_trn, y_trn, batch_size=4, verbose=1)
print 'dev score=%f' % score[1]
score_list.append(score[1])
score = model.evaluate(x_dev, y_dev, batch_size=4, verbose=1)
print 'dev score=%f' % score[1]
score_list.append(score[1])
score = model.evaluate(x_tst, y_tst, batch_size=4, verbose=1)
print 'tst score=%f' % score[1]
score_list.append(score[1])
print '[summary]'
print 'trn\tdev\ttst'
print '\t'.join(map(str, score_list))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', default=400, choices=[50, 300, 400], type=int)
parser.add_argument('-t', default=2, choices=range(10), type=int)
parser.add_argument('-g', default="1", choices=["0", "1", "2", "3"], type=str)
args = parser.parse_args()
run(args.d, args.t, args.g)
| [
"nomolos79@gmail.com"
] | nomolos79@gmail.com |
06af8d9c34e3dadaebe4c707aa4f98b6d8c9c7c3 | 9f9ec8bebfe8b7ac8e60dcaa23153abe976585e6 | /dataCommons/reporting/reports/postingQueueSize.py | 84e6c0ffdd52f0ac5f0f6934c46737a76bcf1d73 | [] | no_license | erikwestra/data-commons | bbf32cd9b4b64ace28bcb049190d8272a23ed891 | e3ed33fad104157ff505bb02bc7ae981f8ba3b11 | refs/heads/master | 2020-04-11T12:03:19.996644 | 2013-02-14T17:08:24 | 2013-02-14T17:08:24 | 8,188,655 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,775 | py | """ dataCommons.reporting.reports.postingQueueSize
This module implements the "Posting Queue Size" report for the 3taps
Reporting system.
"""
from django.db.models import *
from dataCommons.shared.lib import dateHelpers,reportHelpers
from dataCommons.monitoringAPI.models import *
#############################################################################
# The unique type code for this report:
type = "postingQueueSize"
#############################################################################
# A user-visible name for this report:
name = "Posting Queue Size"
#############################################################################
# A user-visible description for this report:
description = "This report shows the size of the posting queue over a " \
+ "given timeframe."
#############################################################################
# The list of parameters used by this report:
params = [{'name' : "timeframe",
'label' : "View posting queue size for the last",
'required' : True,
'type' : "timeframe",
'default' : "1h"},
]
#############################################################################
# The function to generate our report from a given set of parameters:
def generator(params, timezone_offset):
startTime,endTime = reportHelpers.calc_timeframe(params['timeframe'])
# Get the "POSTINGS_QUEUED" and "POSTINGS_DEQUEUED" event types. We'll
# need these for our various database queries.
try:
postings_queued_event = EventType.objects.get(type="POSTINGS_QUEUED")
except EventType.DoesNotExist:
postings_queued_event = None
try:
postings_dequeued_event = EventType.objects.get(
type="POSTINGS_DEQUEUED")
except EventType.DoesNotExist:
postings_dequeued_event = None
# Now calculate the queue size at the start of the time period. We get
# this by summing up the total value of the POSTINGS_QUEUED events, and
# then subtract the total value of the POSTINGS_DEQUEUED events, prior to
# the starting time period.
if postings_queued_event != None:
query = Event.objects.filter(timestamp__lt=startTime,
type=postings_queued_event)
num_postings_added = \
query.aggregate(Sum("primary_value"))['primary_value__sum']
if num_postings_added == None: num_postings_added = 0
else:
num_postings_added = 0
if postings_dequeued_event != None:
query = Event.objects.filter(timestamp__lt=startTime,
type=postings_dequeued_event)
num_postings_removed = \
query.aggregate(Sum("primary_value"))['primary_value__sum']
if num_postings_removed == None: num_postings_removed = 0
else:
num_postings_removed = 0
starting_queue_size = num_postings_added - num_postings_removed
# Calculate the data to return to the caller. Note that we use a data
# reducer to simplify the data as necessary.
reducer = reportHelpers.DataReducer()
reducer.set_max_num_data_points(1000)
reducer.set_period(startTime, endTime)
reducer.set_value_combiner(sum)
if postings_queued_event != None:
for event in Event.objects.filter(timestamp__gte=startTime,
timestamp__lte=endTime,
type=postings_queued_event):
reducer.add(event.timestamp, event.primary_value)
if postings_dequeued_event != None:
for event in Event.objects.filter(timestamp__gte=startTime,
timestamp__lte=endTime,
type=postings_dequeued_event):
reducer.add(event.timestamp, -event.primary_value)
reduced_data = reducer.get_reduced_data()
# We now have a (possibly reduced) list of the changes to the queue size
# for the desired time period. Use these calculated values to build a
# running total of the queue size over the time period.
results = {'startTime' : reportHelpers.datetime_to_seconds(startTime,
timezone_offset),
'endTime' : reportHelpers.datetime_to_seconds(endTime,
timezone_offset),
'periods' : []}
running_total = starting_queue_size
for period_start,period_end,period_total in reduced_data:
running_total = running_total + period_total
timestamp = reportHelpers.datetime_to_seconds(period_start,
timezone_offset)
results['periods'].append((timestamp, running_total))
# Finally, return the calculated data back to the caller.
return (True, results)
#############################################################################
# The Javascript function to render the generated report into the web page:
renderer = """
function render(data) {
var points = [];
for (var i=0; i < data.periods.length; i++) {
var row = data.periods[i];
var timestamp = row[0];
var queue_size = row[1];
points.push([timestamp * 1000, queue_size]);
}
$.plot($("#report"), [
{data: points}
],
{xaxis: {mode: "time",
axisLabel: "Time of Day",
min: data.startTime * 1000,
max: data.endTime * 1000},
yaxis: {axisLabel: "Size of Posting Queue"}});
}
"""
| [
"ewestra@gmail.com"
] | ewestra@gmail.com |
7c745e331a254a56767884ec5b62a8ad36581097 | aba9b00edec394f1389a7ecf88a290112303414d | /energetyka/inżynieria_materiałowa/lab/06/polprzewodnik.py | b090e4821c75dec9e3f3bfa8e5f885a841a9095b | [] | no_license | torgiren/szkola | 2aca12807f0030f8e2ae2dfcb808bf7cae5e2e27 | 5ed18bed273ab25b8e52a488e28af239b8beb89c | refs/heads/master | 2020-12-25T18:18:36.317496 | 2014-04-27T23:43:21 | 2014-04-27T23:43:21 | 3,892,030 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | #!/usr/bin/env python
import math
f=open("data2.dat","r")
while True:
line=f.readline()
if not line:
break;
line=line.split()
print 1.0/(float(line[0])+273),
print math.log((0.0004*1000*float(line[1])/(0.00000016*float(line[2]))),math.e)
| [
"torgiren@gmail.com"
] | torgiren@gmail.com |
7238271ba0141772838ea30adbcf8d57f5070af2 | c91eac635507950941003dd79a494a95cd39dc77 | /test/data_formater/test_ttf_stage_parameters.py | 775a2212fc596871b1b9b7604ff84a3adccd31a4 | [] | no_license | GabrielPenaU3F/confiabilidad-software | 29b064cc9f866c06833cf6afc0bc424fd20619c6 | c57572ec3f9fba01331718d892d94d720cc5d04d | refs/heads/master | 2023-03-19T01:47:40.939503 | 2021-03-17T02:03:39 | 2021-03-17T02:03:39 | 193,144,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,330 | py | import unittest
from src.data.data_formater import TTFDataFormater
from src.data.data_repository import DataRepository
class TestTTFStageParameters(unittest.TestCase):
ntds_data = None
ttf_formater = None
@classmethod
def setUpClass(cls):
cls.ntds_data = DataRepository.provide_project_data('ntds')
cls.ttf_formater = TTFDataFormater.get_instance()
def test_ttf_formater_determine_stage_t0_should_return_0_if_initial_t_is_0(self):
t0 = self.ttf_formater.determine_stage_t0(self.ntds_data, 0)
self.assertEqual(0, t0)
def test_ttf_formater_determine_stage_t0_should_return_21_if_initial_t_is_32(self):
t0 = self.ttf_formater.determine_stage_t0(self.ntds_data, 32)
self.assertEqual(21, t0)
def test_ttf_formater_determine_initial_sample_should_return_0_if_initial_t_is_0(self):
initial_sample = self.ttf_formater.determine_stage_initial_sample(self.ntds_data, 0)
self.assertEqual(0, initial_sample)
def test_ttf_formater_determine_initial_sample_should_return_0_if_initial_t_is_9(self):
initial_sample = self.ttf_formater.determine_stage_initial_sample(self.ntds_data, 9)
self.assertEqual(0, initial_sample)
def test_ttf_formater_determine_initial_sample_should_return_1_if_initial_t_is_11(self):
initial_sample = self.ttf_formater.determine_stage_initial_sample(self.ntds_data, 11)
self.assertEqual(1, initial_sample)
def test_ttf_formater_determine_initial_sample_should_return_1_if_initial_t_is_21(self):
initial_sample = self.ttf_formater.determine_stage_initial_sample(self.ntds_data, 21)
self.assertEqual(1, initial_sample)
def test_ttf_formater_determine_end_sample_should_return_2_if_end_t_is_32(self):
end_sample = self.ttf_formater.determine_stage_end_sample(self.ntds_data, 32)
self.assertEqual(2, end_sample)
def test_ttf_formater_determine_end_sample_should_return_2_if_end_t_is_35(self):
end_sample = self.ttf_formater.determine_stage_end_sample(self.ntds_data, 35)
self.assertEqual(2, end_sample)
def test_ttf_formater_determine_end_sample_should_return_25_if_end_t_is_260(self):
end_sample = self.ttf_formater.determine_stage_end_sample(self.ntds_data, 250)
self.assertEqual(25, end_sample)
| [
"gpena@untref.edu.ar"
] | gpena@untref.edu.ar |
2722bfbf80756d42355e953b07dc2b3411eb23a4 | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res_bw/scripts/common/lib/plat-irix5/in.py | 81f04b7f3125011fcdad536e53c891298a4c1d7b | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,280 | py | # 2015.11.18 12:05:35 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/plat-irix5/IN.py
from warnings import warnpy3k
warnpy3k('the IN module has been removed in Python 3.0', stacklevel=2)
del warnpy3k
LITTLE_ENDIAN = 1234
BIG_ENDIAN = 4321
PDP_ENDIAN = 3412
BYTE_ORDER = BIG_ENDIAN
BYTE_ORDER = LITTLE_ENDIAN
def ntohl(x):
return x
def ntohs(x):
return x
def htonl(x):
return x
def htons(x):
return x
def htonl(x):
return ntohl(x)
def htons(x):
return ntohs(x)
ONBITSMAJOR = 7
ONBITSMINOR = 8
OMAXMAJ = 127
OMAXMIN = 255
NBITSMAJOR = 14
NBITSMINOR = 18
MAXMAJ = 511
MAXMIN = 262143
OLDDEV = 0
NEWDEV = 1
MKDEV_VER = NEWDEV
def major(dev):
return __major(MKDEV_VER, dev)
def minor(dev):
return __minor(MKDEV_VER, dev)
FD_SETSIZE = 1024
NBBY = 8
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_ENCAP = 4
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_XTP = 36
IPPROTO_HELLO = 63
IPPROTO_ND = 77
IPPROTO_EON = 80
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_MAXPORT = 65535
def IN_CLASSA(i):
return long(i) & 2147483648L == 0
IN_CLASSA_NET = 4278190080L
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 16777215
IN_CLASSA_MAX = 128
def IN_CLASSB(i):
return long(i) & 3221225472L == 2147483648L
IN_CLASSB_NET = 4294901760L
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 65535
IN_CLASSB_MAX = 65536
def IN_CLASSC(i):
return long(i) & 3758096384L == 3221225472L
IN_CLASSC_NET = 4294967040L
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 255
def IN_CLASSD(i):
return long(i) & 4026531840L == 3758096384L
IN_CLASSD_NET = 4026531840L
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 268435455
def IN_MULTICAST(i):
return IN_CLASSD(i)
def IN_EXPERIMENTAL(i):
return long(i) & 4026531840L == 4026531840L
def IN_BADCLASS(i):
return long(i) & 4026531840L == 4026531840L
INADDR_ANY = 0
INADDR_BROADCAST = 4294967295L
INADDR_LOOPBACK = 2130706433
INADDR_UNSPEC_GROUP = 3758096384L
INADDR_ALLHOSTS_GROUP = 3758096385L
INADDR_MAX_LOCAL_GROUP = 3758096639L
INADDR_NONE = 4294967295L
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_MULTICAST_IF = 2
IP_MULTICAST_TTL = 3
IP_MULTICAST_LOOP = 4
IP_ADD_MEMBERSHIP = 5
IP_DROP_MEMBERSHIP = 6
IP_HDRINCL = 7
IP_TOS = 8
IP_TTL = 9
IP_RECVOPTS = 10
IP_RECVRETOPTS = 11
IP_RECVDSTADDR = 12
IP_RETOPTS = 13
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 20
IP_MULTICAST_TTL = 21
IP_MULTICAST_LOOP = 22
IP_ADD_MEMBERSHIP = 23
IP_DROP_MEMBERSHIP = 24
IRIX4_IP_OPTIONS = 1
IRIX4_IP_MULTICAST_IF = 2
IRIX4_IP_MULTICAST_TTL = 3
IRIX4_IP_MULTICAST_LOOP = 4
IRIX4_IP_ADD_MEMBERSHIP = 5
IRIX4_IP_DROP_MEMBERSHIP = 6
IRIX4_IP_HDRINCL = 7
IRIX4_IP_TOS = 8
IRIX4_IP_TTL = 9
IRIX4_IP_RECVOPTS = 10
IRIX4_IP_RECVRETOPTS = 11
IRIX4_IP_RECVDSTADDR = 12
IRIX4_IP_RETOPTS = 13
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\plat-irix5\in.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 12:05:35 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
536183f4949f6e92e8c56105ea1e5dfe526556a9 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/word-count/5af1e40a97664eb786adf47ea78a5857.py | e3c48f698e30288abe8547792608b7366661c877 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 400 | py | """
Write a program that given a phrase can count the occurrences of each word in that phrase.
For example for the input `"olly olly in come free"`
plain
olly: 2
in: 1
come: 1
free: 1
"""
from collections import defaultdict
def word_count(phrase):
dictionary = defaultdict(int)
words = phrase.split()
for word in words:
dictionary[word] += 1
return dict(dictionary)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
4a474bffc05946755f60e75c53115e83ec96ac48 | 57caf9e323d5771c6463bb67a7e1a774f5315e5b | /setup.py | 9cdd5dcfa36969a58547e0f10e3d4a660b6ad322 | [
"Apache-2.0"
] | permissive | geziaka/rater | 5de97851d4207f03f996324b99b8fdc5881306e9 | 8437dea8baf0137ab3c07dd19c5f2bb8c15b4435 | refs/heads/master | 2022-12-06T00:33:33.867132 | 2020-09-01T15:04:37 | 2020-09-01T15:04:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from setuptools import setup, find_packages
import rater
if sys.version_info < (3,):
sys.exit('Sorry, Python3 is required.')
with open('README.md', 'r', encoding='utf-8') as f:
readme = f.read()
with open('LICENSE', 'r', encoding='utf-8') as f:
license = f.read()
with open('requirements.txt', 'r', encoding='utf-8') as f:
reqs = f.read()
setup(
name='rater',
version=rater.__version__,
description='rater',
long_description=readme,
long_description_content_type='text/markdown',
author='XuMing',
author_email='xuming624@qq.com',
url='https://github.com/shibing624/rater',
license="Apache License 2.0",
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Text Processing',
'Topic :: Text Processing :: Indexing',
'Topic :: Text Processing :: Linguistic',
],
keywords='rater,recommender,Recommendation System,recommendation model',
install_requires=reqs.strip().split('\n'),
packages=find_packages(exclude=['tests']),
package_dir={'rater': 'rater'},
package_data={
'rater': ['*.*', '../LICENSE', '../*.txt', '../README.*'],
},
test_suite='tests',
)
| [
"xuming624@qq.com"
] | xuming624@qq.com |
6a0effe99eed37ac3ee489761699b67ae14ef643 | 4aae2df13bfd53a8b16aa5f941f2cc8b8ac144b7 | /torch/utils/data/_utils/collate.py | e520de6ebee91da435f8e8cb8bab02ebc24f851b | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | computerguy2030/pytorch-rocm-amd | e9f2718c470b505325d396baf6513e71bcf0a7ca | 38da53d721fcb335dedb1b52f14fd89718e90bef | refs/heads/master | 2023-04-08T00:55:01.542663 | 2021-04-16T11:33:39 | 2021-04-16T11:33:39 | 334,288,140 | 3 | 0 | NOASSERTION | 2021-04-16T11:27:55 | 2021-01-29T23:40:06 | C++ | UTF-8 | Python | false | false | 3,656 | py | r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers to
collate samples fetched from dataset into Tensor(s).
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import torch
import re
import collections
from torch._six import string_classes
np_str_obj_array_pattern = re.compile(r'[SaUO]')
def default_convert(data):
r"""Converts each NumPy array data field into a tensor"""
elem_type = type(data)
if isinstance(data, torch.Tensor):
return data
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
# array of string classes and object
if elem_type.__name__ == 'ndarray' \
and np_str_obj_array_pattern.search(data.dtype.str) is not None:
return data
return torch.as_tensor(data)
elif isinstance(data, collections.abc.Mapping):
return {key: default_convert(data[key]) for key in data}
elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple
return elem_type(*(default_convert(d) for d in data))
elif isinstance(data, collections.abc.Sequence) and not isinstance(data, string_classes):
return [default_convert(d) for d in data]
else:
return data
default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}")
def default_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type))
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b1d34706132b3d17ed479db76bad66ce56f1572b | 29c58b3bec6ac0fcdb3070efc118600ee92004da | /mailslurp_client/models/domain_issues_dto.py | 308f604c368d5fc913ec8fb9b4289513ad448937 | [
"MIT"
] | permissive | mailslurp/mailslurp-client-python | a2b5a0545206714bd4462ae517f242852b52aaf9 | 5c9a7cfdd5ea8bf671928023e7263847353d92c4 | refs/heads/master | 2023-06-23T00:41:36.257212 | 2023-06-14T10:10:14 | 2023-06-14T10:10:14 | 204,662,133 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,963 | py | # coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: contact@mailslurp.dev
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from mailslurp_client.configuration import Configuration
class DomainIssuesDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'has_issues': 'bool'
}
attribute_map = {
'has_issues': 'hasIssues'
}
def __init__(self, has_issues=None, local_vars_configuration=None): # noqa: E501
"""DomainIssuesDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._has_issues = None
self.discriminator = None
self.has_issues = has_issues
@property
def has_issues(self):
"""Gets the has_issues of this DomainIssuesDto. # noqa: E501
:return: The has_issues of this DomainIssuesDto. # noqa: E501
:rtype: bool
"""
return self._has_issues
@has_issues.setter
def has_issues(self, has_issues):
"""Sets the has_issues of this DomainIssuesDto.
:param has_issues: The has_issues of this DomainIssuesDto. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and has_issues is None: # noqa: E501
raise ValueError("Invalid value for `has_issues`, must not be `None`") # noqa: E501
self._has_issues = has_issues
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DomainIssuesDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DomainIssuesDto):
return True
return self.to_dict() != other.to_dict()
| [
"contact@mailslurp.dev"
] | contact@mailslurp.dev |
5bfd5f3f684821bf71868a10ddb26ba44701fba3 | 4acc08d2c165b5d88119df6bb4081bcfaca684f7 | /python_program/NCR_NPR_find_value.py | 5610825ece1676d469f61a64ce35330106ab9c4e | [] | no_license | xiaotuzixuedaima/PythonProgramDucat | 9059648f070db7304f9aaa45657c8d3df75f3cc2 | 90c6947e6dfa8ebb6c8758735960379a81d88ae3 | refs/heads/master | 2022-01-16T04:13:17.849130 | 2019-02-22T15:43:18 | 2019-02-22T15:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | # Python Find ncR & nPr ????
n = int(input("enter the nth term : " ))
r = int(input("enter the rth term : " ))
t = n - r
fact = 1
while n > 0:
fact = fact * n
n = n - 1
n_fact = fact
# print("n th term of fact value : ",n_fact)
fact1 = 1
while r > 0:
fact1 = fact1 * r
r = r - 1
r_fact = fact1
# print("r th term of fact value : ",r_fact)
fact2 = 1
while t > 0:
fact2 = fact2 * t
t = t - 1
diff_fact = fact2
# print("(n-r)th term of fact value : ",diff_fact)
NCR = ( n_fact ) // ( diff_fact * r_fact )
print("total value of the given numbers its combination : ",NCR)
NPR = ( n_fact ) // ( diff_fact)
print("total value of the given numbers its permutation : ",NPR)
'''
output ===
enter the nth term : 10
enter the rth term : 7
total value of the given numbers its combination : 120
total value of the given numbers its permutation : 604800
''' | [
"ss7838094755@gmail.com"
] | ss7838094755@gmail.com |
144255196241663945336ba45beadedc72c62646 | 3dd43ff0dab514a39f611487ab421256b3b5b13b | /scripts/client/gui/Scaleform/daapi/view/meta/ClanSearchWindowMeta.py | e2816eaaf99fd1ffd00a31c5816c417a46e1f342 | [] | no_license | kusaku/wotscripts | 04ab289e3fec134e290355ecf81cf703af189f72 | a89c2f825d3c7dade7bc5163a6c04e7f5bab587d | refs/heads/master | 2023-08-20T00:17:36.852522 | 2018-02-26T14:53:44 | 2018-02-26T14:53:44 | 80,610,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ClanSearchWindowMeta.py
"""
This file was generated using the wgpygen.
Please, don't edit this file manually.
"""
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class ClanSearchWindowMeta(AbstractWindowView):
def search(self, text):
self._printOverrideError('search')
def previousPage(self):
self._printOverrideError('previousPage')
def nextPage(self):
self._printOverrideError('nextPage')
def dummyButtonPress(self):
self._printOverrideError('dummyButtonPress')
def as_getDPS(self):
if self._isDAAPIInited():
return self.flashObject.as_getDP()
def as_setInitDataS(self, data):
"""
:param data: Represented by ClanSearchWindowInitDataVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setInitData(data)
def as_setStateDataS(self, data):
"""
:param data: Represented by ClanSearchWindowStateDataVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setStateData(data)
def as_setDummyS(self, data):
"""
:param data: Represented by DummyVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setDummy(data)
def as_setDummyVisibleS(self, visible):
if self._isDAAPIInited():
return self.flashObject.as_setDummyVisible(visible) | [
"kirill.a@aggrostudios.com"
] | kirill.a@aggrostudios.com |
e65a195d861dbd5a95fad58ad3981875fc4713b5 | 86335a0ba622ffc1ef9392fa45190123599c92de | /ecpy_pulses/infos.py | d37602a9eb241dc1d80c8259df0bc937201c0482 | [
"BSD-3-Clause"
] | permissive | PhilipVinc/ecpy_pulses | 4e75d2fc4a977ec1f80761609412b453451f967d | 3ca72e5739e36ac203381ca6ed46a5b18184bd7c | refs/heads/master | 2021-01-17T22:27:44.395230 | 2016-06-08T16:16:59 | 2016-06-08T16:16:59 | 51,396,985 | 1 | 0 | null | 2016-02-09T20:23:16 | 2016-02-09T20:23:15 | Python | UTF-8 | Python | false | false | 3,162 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015 by Ecpy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Objects used to store filters, sequences and configs in the manager.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from atom.api import Atom, Subclass, Dict, Coerced
import enaml
from .sequences.base_sequences import AbstractSequence
from .configs.base_config import AbstractConfig
from .contexts.base_context import BaseContext
from .shapes.base_shapes import AbstractShape
from .pulse import Pulse
with enaml.imports():
from .sequences.views.abstract_sequence_view import AbstractSequenceView
from .configs.base_config_views import AbstractConfigView
from .shapes.views.base_shapes_views import AbstractShapeView
from .contexts.views.base_context_view import BaseContextView
from .pulse_view import PulseView
class ObjectDependentInfos(Atom):
""" Base info object for everything with dependencies.
"""
#: Runtime dependencies ids of this object.
dependencies = Coerced(set, ())
class SequenceInfos(ObjectDependentInfos):
"""An object used to store the informations about a sequence.
"""
#: Class representing this sequence.
cls = Subclass(AbstractSequence)
#: Widget associated with this sequence.
view = Subclass(AbstractSequenceView)
#: Metadata associated with this sequence such as group, looping
#: capabilities, etc
metadata = Dict()
class PulseInfos(ObjectDependentInfos):
"""An object used to store the informations about a sequence.
"""
#: Class representing this pulse.
cls = Subclass(Pulse)
#: Widget associated with this pulse.
view = Subclass(PulseView)
#: Metadata associated with this sequence such as group, looping
#: capabilities, etc
metadata = Dict()
class ConfigInfos(Atom):
"""An object used to store the informations about a sequence configurer.
"""
#: Class representing this configurer.
cls = Subclass(AbstractConfig)
#: Widget associated with this configurer.
view = Subclass(AbstractConfigView)
class ContextInfos(ObjectDependentInfos):
"""Object used to store informations about a Context, declared in a manifest.
"""
#: Class representing this context.
cls = Subclass(BaseContext)
#: Widget associated with this context.
view = Subclass(BaseContextView)
#: Metadata associated with this context such as who knows what.
metadata = Dict()
class ShapeInfos(ObjectDependentInfos):
"""Object used to store informations about a shape.
"""
#: Class representing this Shape.
cls = Subclass(AbstractShape)
#: Widget associated with this Shape.
view = Subclass(AbstractShapeView)
#: Metadata associated with this shape such as I have no idea what.
metadata = Dict()
| [
"filippovicentini@gmail.com"
] | filippovicentini@gmail.com |
2ccdbae7171dde011530efd66c5e27234901063d | d1aa9dc649209d2172c01f19f5121261fb5d6e9e | /Monitoring/Monitor/Monitor/Monitor_process.py | 44d11a5abccb11b471d11af3a6b449ed6d0003cd | [] | no_license | Alexanderklau/Amusing_python | 484e97806bc45ecbe0220f899723fa091a0f088b | 9ce288eac7eeabb0e21f62936b6eb5ac2a0c934e | refs/heads/master | 2021-12-27T03:33:37.535288 | 2021-12-20T08:00:11 | 2021-12-20T08:00:11 | 107,672,208 | 45 | 11 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | # coding: utf-8
__author__ = 'lau.wenbo'
"""
监控分为持续监控和自定义监控
持续监控会每60s统计出占比前十的进程,不停的将其打入日志
自定义监控可以自定监控的频率,监控指定进程,打印所需要的数据
例如固定进程的CPU,内存,线程占用等
"""
import sys
sys.path.append("..")
from Check import check_cpu, check_memory, check_process, check_threading
from Log import monitor_log
import getopt
import json
import time
f = open("/Monitor/setting.json", "r")
setting = json.load(f)
cpu_max = float(setting["CPU_max"])
memeory_max = float(setting["Memory_max"])
check_time = setting["time"]
def run_process_have():
return check_threading.process_have(cpu_max, memeory_max)
def run_check_process(name):
return check_process.get_process(name)
def run_check_process_thread(name):
return check_process.get_process(name)
def run_get_cpu():
return check_cpu.get_cpu_none()
def run_get_memory():
return check_memory.get_memory()
def run_get_cpu_have():
return check_cpu.get_cpu_have(cpu_max)
def run_get_memory_have():
return check_memory.get_memory_have(memeory_max) | [
"429095816@qq.com"
] | 429095816@qq.com |
ed39fe3c9346697d2fd9e046484b54ce38a345b5 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R1/benchmark/startQiskit142.py | bcf544e34e2f6180c27c19e7ab373d5da0db3295 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[0]) # number=9
prog.cz(input_qubit[3],input_qubit[0]) # number=10
prog.h(input_qubit[0]) # number=11
prog.z(input_qubit[3]) # number=7
prog.cx(input_qubit[3],input_qubit[0]) # number=8
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit142.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
d34bbc0300e2610300bf3f8097c18d4608516f1f | 94bd78e63de94859eb076e52683f73f6ea91eae3 | /199.py | 93d67efac4b025be260275ea48441696363cd9de | [] | no_license | MadSkittles/leetcode | 70598c1c861a8ff5d2f7c921a311307d55770acc | 817bbb73dfe095b9c9358dc459ba6605a2a9a256 | refs/heads/master | 2021-11-30T04:56:02.432749 | 2021-11-12T03:28:47 | 2021-11-12T03:28:47 | 123,558,601 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | class Solution:
def rightSideView(self, root):
if not root:
return []
from queue import Queue
q, m = Queue(), {}
q.put((root, 0))
while not q.empty():
node, floor = q.get()
m[floor] = node.val
if node.left:
q.put((node.left, floor + 1))
if node.right:
q.put((node.right, floor + 1))
return [m[i] for i in range(len(m))]
def rightSideView1(self, root):
self.result = []
res = ()
if root:
self.f(root, (root.val,))
for p in self.result:
if len(p) > len(res):
res += p[len(res) - len(p) :]
return res
def f(self, node, path):
if not node.left and not node.right:
self.result.append(path)
return
if node.right:
self.f(node.right, (*path, node.right.val))
if node.left:
self.f(node.left, (*path, node.left.val))
| [
"likanwen@icloud.com"
] | likanwen@icloud.com |
03005788a36d01937289261a7e288f6883d64b8a | bd8400dae9bf43922d043c22999dcfdea08b3797 | /5 Matplotlib/51.scatter_plot.py | 7feeea8712ca3fe366dd650f48d5411a11831a7a | [] | no_license | srikeshnagoji/Python_Fundamental_DataScience | dfc3bd5003a3bc357f8b08f0084cb5b2fc766bda | 3982cce2b69bed7128aeb7ce8adbd22f71890fcf | refs/heads/master | 2020-07-04T20:12:38.779942 | 2019-08-14T04:36:53 | 2019-08-14T04:36:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = [5, 3, 6, 1, 7, 9, 3, 3, 2]
plt.scatter(x, y, label='Tes', color='k', marker='*', s=200)
# plt.plot(x,y,'*')
# marker = '*' 'x' 'o' baca docs
plt.title('Tes Plotting Data\nby Lintang Wisesa')
plt.xlabel('Nilai x')
plt.ylabel('Nilai y')
# plt.legend()
plt.show() | [
"lintangwisesa@ymail.com"
] | lintangwisesa@ymail.com |
b7ef1fb30f414e36f09eb45f57a68beaea974a31 | 18239524612cf572bfeaa3e001a3f5d1b872690c | /clients/keto/python/test/test_remove_ory_access_control_policy_role_members.py | 6b90ab385d91bf24e7c7509df87b9ae9bddd4327 | [
"Apache-2.0"
] | permissive | simoneromano96/sdk | 2d7af9425dabc30df830a09b26841fb2e8781bf8 | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | refs/heads/master | 2023-05-09T13:50:45.485951 | 2021-05-28T12:18:27 | 2021-05-28T12:18:27 | 371,689,133 | 0 | 0 | Apache-2.0 | 2021-05-28T12:11:41 | 2021-05-28T12:11:40 | null | UTF-8 | Python | false | false | 1,208 | py | # coding: utf-8
"""
ORY Keto
A cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
The version of the OpenAPI document: v0.0.0-alpha.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import ory_keto_client
from ory_keto_client.models.remove_ory_access_control_policy_role_members import RemoveOryAccessControlPolicyRoleMembers # noqa: E501
from ory_keto_client.rest import ApiException
class TestRemoveOryAccessControlPolicyRoleMembers(unittest.TestCase):
"""RemoveOryAccessControlPolicyRoleMembers unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRemoveOryAccessControlPolicyRoleMembers(self):
"""Test RemoveOryAccessControlPolicyRoleMembers"""
# FIXME: construct object with mandatory attributes with example values
# model = ory_keto_client.models.remove_ory_access_control_policy_role_members.RemoveOryAccessControlPolicyRoleMembers() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | simoneromano96.noreply@github.com |
0b17e6a5303ffe9a2c6a6cdd3cb22ae5d6303f11 | 8ffcf5ce3f3861300f5ca6ba355600c1c65a9ede | /release/scripts/mgear/shifter_classic_components/chain_FK_spline_02/guide.py | ad2ead23257d2a7401ca5a26401e2542bc140b80 | [
"MIT"
] | permissive | mottosso/mgear4 | a5db6d712e07fcec607aa877576e7beee6b8b45e | e84362aa86e2049cf160dc516e023070e3071e53 | refs/heads/master | 2023-05-23T20:47:46.761469 | 2021-06-09T00:13:39 | 2021-06-09T00:13:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,973 | py | """Guide chain FK spline 01 module"""
from functools import partial
from mgear.shifter.component import guide
from mgear.core import pyqt
from mgear.vendor.Qt import QtWidgets, QtCore
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from maya.app.general.mayaMixin import MayaQDockWidget
from . import settingsUI as sui
# guide info
AUTHOR = "Miquel Campos"
URL = "www.miquel-campos.com"
EMAIL = ""
VERSION = [1, 0, 0]
TYPE = "chain_FK_spline_02"
NAME = "chain"
DESCRIPTION = "FK chain with a spline driven joints. And Extra IK controls \
for each segment. ADD option for extra Tweak for each joint"
##########################################################
# CLASS
##########################################################
class Guide(guide.ComponentGuide):
"""Component Guide Class"""
compType = TYPE
compName = NAME
description = DESCRIPTION
author = AUTHOR
url = URL
email = EMAIL
version = VERSION
def postInit(self):
"""Initialize the position for the guide"""
self.save_transform = ["root", "#_loc"]
self.save_blade = ["blade"]
self.addMinMax("#_loc", 1, -1)
def addObjects(self):
"""Add the Guide Root, blade and locators"""
self.root = self.addRoot()
self.locs = self.addLocMulti("#_loc", self.root)
self.blade = self.addBlade("blade", self.root, self.locs[0])
centers = [self.root]
centers.extend(self.locs)
self.dispcrv = self.addDispCurve("crv", centers)
self.addDispCurve("crvRef", centers, 3)
def addParameters(self):
"""Add the configurations settings"""
self.pNeutralPose = self.addParam("neutralpose", "bool", False)
self.pOverrideNegate = self.addParam("overrideNegate", "bool", False)
self.pKeepLength = self.addParam("keepLength", "bool", False)
self.pOverrideJointNb = self.addParam("overrideJntNb", "bool", False)
self.pJntNb = self.addParam("jntNb", "long", 3, 1)
self.pExtraTweak = self.addParam("extraTweak", "bool", False)
self.pUseIndex = self.addParam("useIndex", "bool", False)
self.pParentJointIndex = self.addParam(
"parentJointIndex", "long", -1, None, None)
##########################################################
# Setting Page
##########################################################
class settingsTab(QtWidgets.QDialog, sui.Ui_Form):
def __init__(self, parent=None):
super(settingsTab, self).__init__(parent)
self.setupUi(self)
class componentSettings(MayaQWidgetDockableMixin, guide.componentMainSettings):
def __init__(self, parent=None):
self.toolName = TYPE
# Delete old instances of the componet settings window.
pyqt.deleteInstances(self, MayaQDockWidget)
super(self.__class__, self).__init__(parent=parent)
self.settingsTab = settingsTab()
self.setup_componentSettingWindow()
self.create_componentControls()
self.populate_componentControls()
self.create_componentLayout()
self.create_componentConnections()
def setup_componentSettingWindow(self):
self.mayaMainWindow = pyqt.maya_main_window()
self.setObjectName(self.toolName)
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle(TYPE)
self.resize(280, 350)
def create_componentControls(self):
return
def populate_componentControls(self):
"""Populate Controls
Populate the controls values from the custom attributes of the
component.
"""
# populate tab
self.tabs.insertTab(1, self.settingsTab, "Component Settings")
# populate component settings
self.populateCheck(self.settingsTab.neutralPose_checkBox,
"neutralpose")
self.populateCheck(self.settingsTab.overrideNegate_checkBox,
"overrideNegate")
self.populateCheck(self.settingsTab.keepLength_checkBox,
"keepLength")
self.populateCheck(self.settingsTab.overrideJntNb_checkBox,
"overrideJntNb")
self.populateCheck(self.settingsTab.extraTweak_checkBox,
"extraTweak")
self.settingsTab.jntNb_spinBox.setValue(self.root.attr("jntNb").get())
def create_componentLayout(self):
self.settings_layout = QtWidgets.QVBoxLayout()
self.settings_layout.addWidget(self.tabs)
self.settings_layout.addWidget(self.close_button)
self.setLayout(self.settings_layout)
def create_componentConnections(self):
self.settingsTab.neutralPose_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.neutralPose_checkBox,
"neutralpose"))
self.settingsTab.overrideNegate_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.overrideNegate_checkBox,
"overrideNegate"))
self.settingsTab.keepLength_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.keepLength_checkBox,
"keepLength"))
self.settingsTab.overrideJntNb_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.overrideJntNb_checkBox,
"overrideJntNb"))
self.settingsTab.jntNb_spinBox.valueChanged.connect(
partial(self.updateSpinBox,
self.settingsTab.jntNb_spinBox,
"jntNb"))
self.settingsTab.extraTweak_checkBox.stateChanged.connect(
partial(self.updateCheck,
self.settingsTab.extraTweak_checkBox,
"extraTweak"))
def dockCloseEventTriggered(self):
pyqt.deleteInstances(self, MayaQDockWidget)
| [
"miquel.campos@gmail.com"
] | miquel.campos@gmail.com |
f1d583cd5a8a3a870e8664adc5fb87da3b4769f1 | 24cf6992d9c9b8523a0a7d3a7a45e701cd49fce3 | /djangotest/base/migrations/0017_auto_20200901_1402.py | da6b531e64b9f8dfb91ac1b8f9d08a3dd8c75441 | [] | no_license | yannickkiki/stuffs | f4e150a61eb0426791753f5da558dba09940d240 | d46e1ec56eb4f0f3486e72ffce5c7bba7f2a1796 | refs/heads/master | 2023-08-10T20:05:38.255427 | 2021-10-04T05:38:39 | 2021-10-04T05:38:39 | 353,716,213 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | # Generated by Django 2.2.11 on 2020-09-01 14:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0016_auto_20200901_1233'),
]
operations = [
migrations.RemoveField(
model_name='foxxdisplay',
name='card',
),
migrations.RemoveField(
model_name='lianacard',
name='metacard_ptr',
),
migrations.AlterField(
model_name='product',
name='metacard',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product', to='base.MetaCard'),
),
migrations.AlterField(
model_name='trelliscard',
name='metacard',
field=models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, related_name='trellis', to='base.MetaCard'),
),
migrations.DeleteModel(
name='FoxxCard',
),
migrations.DeleteModel(
name='FoxxDisplay',
),
migrations.DeleteModel(
name='LianaCard',
),
]
| [
"seyive.kiki@gmail.com"
] | seyive.kiki@gmail.com |
5dea6c310618ced92764a30c813fc80187d1ff6d | aee144770c8f4ec5987777aebe5b064e558fc474 | /doc/integrations/pytorch/projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_dict.py | 3765563494288bbf158bfd857c651ffd0d48e010 | [
"CC-BY-SA-3.0",
"Apache-2.0",
"AGPL-3.0-only",
"MIT"
] | permissive | adgang/cortx | 1d8e6314643baae0e6ee93d4136013840ead9f3b | a73e1476833fa3b281124d2cb9231ee0ca89278d | refs/heads/main | 2023-04-22T04:54:43.836690 | 2021-05-11T00:39:34 | 2021-05-11T00:39:34 | 361,394,462 | 1 | 0 | Apache-2.0 | 2021-04-25T10:12:59 | 2021-04-25T10:12:59 | null | UTF-8 | Python | false | false | 3,299 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.build_data import modelzoo_path
from parlai.core.dict import DictionaryAgent
from collections import defaultdict
import copy
import os
import re
RETOK = re.compile(r'\w+|[^\w\s]|\n', re.UNICODE)
class WizardDictAgent(DictionaryAgent):
def __init__(self, opt, shared=None):
# initialize fields
self.opt = copy.deepcopy(opt)
self.minfreq = opt.get('dict_minfreq', DictionaryAgent.default_minfreq)
self.null_token = '__PAD__'
self.end_token = '__SOC__'
self.unk_token = '__UNK__'
self.start_token = '__SOC__'
self.tokenizer = opt.get('dict_tokenizer', 'whitespace')
self.lower = opt.get('dict_lower', DictionaryAgent.default_lower)
self.maxtokens = opt.get('dict_maxtokens', DictionaryAgent.default_maxtokens)
self.textfields = opt.get(
'dict_textfields', DictionaryAgent.default_textfields
).split(",")
if shared:
self.freq = shared.get('freq', {})
self.tok2ind = shared.get('tok2ind', {})
self.ind2tok = shared.get('ind2tok', {})
else:
self.freq = defaultdict(int)
self.tok2ind = {}
self.ind2tok = {}
if opt.get('dict_file') and os.path.isfile(opt['dict_file']):
# load pre-existing dictionary
self.load(opt['dict_file'])
elif opt.get('dict_initpath'):
# load seed dictionary
opt['dict_initpath'] = modelzoo_path(
opt.get('datapath'), opt['dict_initpath']
)
self.load(opt['dict_initpath'])
self.add_token(self.null_token)
self.add_token(self.start_token)
self.add_token(self.end_token)
self.add_token(self.unk_token)
if not shared:
if opt.get('dict_file'):
self.save_path = opt['dict_file']
# cache unk token for later
self._unk_token_idx = self.tok2ind.get(self.unk_token)
def tokenize(self, text, building=False):
"""
Returns a sequence of tokens from the iterable.
"""
if self.lower:
text = text.lower()
if self.tokenizer == 're':
return self.re_tokenize(text)
elif self.tokenizer == 'whitespace':
return text.split(' ')
word_tokens = (
text.replace('.', ' . ')
.replace('. . .', '...')
.replace(',', ' , ')
.replace(';', ' ; ')
.replace(':', ' : ')
.replace('!', ' ! ')
.replace('?', ' ? ')
.replace(' ', ' ')
.replace(' ', ' ')
.strip()
.split(" ")
)
return word_tokens
def re_tokenize(self, text):
"""
This splits along whitespace and punctuation and keeps the newline as a token in
the returned list.
"""
return RETOK.findall(text)
| [
"noreply@github.com"
] | adgang.noreply@github.com |
990659f28fe89f9d10375d9edc85a48e910e91f4 | 52b5fa23f79d76883728d8de0bfd202c741e9c43 | /kubernetes/client/models/v1beta2_scale_status.py | af1d075c65c7c657052a68400f1a3e347c20cab5 | [] | no_license | kippandrew/client-python-tornado | 5d00810f57035825a84e37ff8fc89a7e79aed8da | d479dfeb348c5dd2e929327d800fe033b5b3b010 | refs/heads/master | 2021-09-04T13:01:28.275677 | 2018-01-18T23:27:34 | 2018-01-18T23:27:34 | 114,912,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,119 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.8.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1beta2ScaleStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'replicas': 'int',
'selector': 'dict(str, str)',
'target_selector': 'str'
}
attribute_map = {
'replicas': 'replicas',
'selector': 'selector',
'target_selector': 'targetSelector'
}
def __init__(self, replicas=None, selector=None, target_selector=None): # noqa: E501
"""V1beta2ScaleStatus - a model defined in Swagger""" # noqa: E501
self._replicas = None
self._selector = None
self._target_selector = None
self.discriminator = None
self.replicas = replicas
if selector is not None:
self.selector = selector
if target_selector is not None:
self.target_selector = target_selector
@property
def replicas(self):
"""Gets the replicas of this V1beta2ScaleStatus. # noqa: E501
actual number of observed instances of the scaled object. # noqa: E501
:return: The replicas of this V1beta2ScaleStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1beta2ScaleStatus.
actual number of observed instances of the scaled object. # noqa: E501
:param replicas: The replicas of this V1beta2ScaleStatus. # noqa: E501
:type: int
"""
if replicas is None:
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def selector(self):
"""Gets the selector of this V1beta2ScaleStatus. # noqa: E501
label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors # noqa: E501
:return: The selector of this V1beta2ScaleStatus. # noqa: E501
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1beta2ScaleStatus.
label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors # noqa: E501
:param selector: The selector of this V1beta2ScaleStatus. # noqa: E501
:type: dict(str, str)
"""
self._selector = selector
@property
def target_selector(self):
"""Gets the target_selector of this V1beta2ScaleStatus. # noqa: E501
label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
:return: The target_selector of this V1beta2ScaleStatus. # noqa: E501
:rtype: str
"""
return self._target_selector
@target_selector.setter
def target_selector(self, target_selector):
"""Sets the target_selector of this V1beta2ScaleStatus.
label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
:param target_selector: The target_selector of this V1beta2ScaleStatus. # noqa: E501
:type: str
"""
self._target_selector = target_selector
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2ScaleStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"andy@rstudio.com"
] | andy@rstudio.com |
ff25fb6064326423077dde69fc7e8673e9a34bff | 1c2428489013d96ee21bcf434868358312f9d2af | /ultracart/models/conversation_twilio_account.py | a649021e1f36c58540847bbbd1732a8064137324 | [
"Apache-2.0"
] | permissive | UltraCart/rest_api_v2_sdk_python | 7821a0f6e0e19317ee03c4926bec05972900c534 | 8529c0bceffa2070e04d467fcb2b0096a92e8be4 | refs/heads/master | 2023-09-01T00:09:31.332925 | 2023-08-31T12:52:10 | 2023-08-31T12:52:10 | 67,047,356 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,199 | py | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ConversationTwilioAccount(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'merchant_id': 'str',
'twilio_phone_numbers': 'list[str]'
}
attribute_map = {
'merchant_id': 'merchant_id',
'twilio_phone_numbers': 'twilio_phone_numbers'
}
def __init__(self, merchant_id=None, twilio_phone_numbers=None): # noqa: E501
"""ConversationTwilioAccount - a model defined in Swagger""" # noqa: E501
self._merchant_id = None
self._twilio_phone_numbers = None
self.discriminator = None
if merchant_id is not None:
self.merchant_id = merchant_id
if twilio_phone_numbers is not None:
self.twilio_phone_numbers = twilio_phone_numbers
@property
def merchant_id(self):
"""Gets the merchant_id of this ConversationTwilioAccount. # noqa: E501
:return: The merchant_id of this ConversationTwilioAccount. # noqa: E501
:rtype: str
"""
return self._merchant_id
@merchant_id.setter
def merchant_id(self, merchant_id):
"""Sets the merchant_id of this ConversationTwilioAccount.
:param merchant_id: The merchant_id of this ConversationTwilioAccount. # noqa: E501
:type: str
"""
self._merchant_id = merchant_id
@property
def twilio_phone_numbers(self):
"""Gets the twilio_phone_numbers of this ConversationTwilioAccount. # noqa: E501
:return: The twilio_phone_numbers of this ConversationTwilioAccount. # noqa: E501
:rtype: list[str]
"""
return self._twilio_phone_numbers
@twilio_phone_numbers.setter
def twilio_phone_numbers(self, twilio_phone_numbers):
"""Sets the twilio_phone_numbers of this ConversationTwilioAccount.
:param twilio_phone_numbers: The twilio_phone_numbers of this ConversationTwilioAccount. # noqa: E501
:type: list[str]
"""
self._twilio_phone_numbers = twilio_phone_numbers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConversationTwilioAccount, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConversationTwilioAccount):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"perry@ultracart.com"
] | perry@ultracart.com |
10639ce27fe471d8d20685065f3271f45c21380c | 4eac217f1a9c175ee370d83446c6ae763c69a26f | /Level 1/Task3.py | 2cbc0809746a1495e4be9f85b91b33bc3ff7de91 | [] | no_license | abhinavsharma629/Data-Structures-And-Algorithms-Udacity-Nanodegree | 411a74f042a671c6e7d3123bc63716d2d3748cc6 | 6e7645a9afb6065d12524a94734aeda022438f10 | refs/heads/master | 2022-11-06T00:49:11.863218 | 2020-06-20T11:52:42 | 2020-06-20T11:52:42 | 273,698,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,074 | py | """
Read file into texts and calls.
It's ok if you don't understand how to read files.
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
def checkCaller(phone_no):
if(len(phone_no.split("("))>0):
return True
def checkCaller(phone_no):
if(phone_no[0]=="("):
return True if phone_no.split("(")[1].split(")")[0]=="080" else False
return False
def getReceiver(phone_no):
# if telephone no
if(phone_no[0]=="("):
return phone_no.split("(")[1].split(")")[0]
# if mobile no
elif(len(phone_no.split(" "))>0):
return phone_no.split(" ")[0][0:4]
# if telemarketers no
else:
return phone_no.split("140")[1]
list_of_codes=set([])
calls_total=0
call_and_receive_total=0
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
for call in calls:
# check if caller from banglore
isCallerFromBanglore=checkCaller(call[0])
getReceiverNo=getReceiver(call[1])
# if caller from banglore
if(isCallerFromBanglore):
list_of_codes.add(getReceiverNo)
# check if receiver from banglore
isReceiverFromBanglore=checkCaller(call[1])
if(isReceiverFromBanglore):
# inc banglore -> banglore calls count
call_and_receive_total+=1
# inc total banglore calls count
calls_total+=1
print("The numbers called by people in Bangalore have codes:")
list_of_codes=sorted(list_of_codes)
for list_code in list_of_codes:
print(list_code)
percent=round((float)((call_and_receive_total/calls_total))*100,2)
print("{} percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore.".format(percent))
"""
TASK 3:
(080) is the area code for fixed line telephones in Bangalore.
Fixed line numbers include parentheses, so Bangalore numbers
have the form (080)xxxxxxx.)
Part A: Find all of the area codes and mobile prefixes called by people
in Bangalore.
- Fixed lines start with an area code enclosed in brackets. The area
codes vary in length but always begin with 0.
- Mobile numbers have no parentheses, but have a space in the middle
of the number to help readability. The prefix of a mobile number
is its first four digits, and they always start with 7, 8 or 9.
- Telemarketers' numbers have no parentheses or space, but they start
with the area code 140.
Print the answer as part of a message:
"The numbers called by people in Bangalore have codes:"
<list of codes>
The list of codes should be print out one per line in lexicographic order with no duplicates.
Part B: What percentage of calls from fixed lines in Bangalore are made
to fixed lines also in Bangalore? In other words, of all the calls made
from a number starting with "(080)", what percentage of these calls
were made to a number also starting with "(080)"?
Print the answer as a part of a message::
"<percentage> percent of calls from fixed lines in Bangalore are calls
to other fixed lines in Bangalore."
The percentage should have 2 decimal digits
"""
| [
"abhinavsharma629@gmail.com"
] | abhinavsharma629@gmail.com |
f352d8ce22dd736ee5f0204bbde9717188e6d87c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_groped.py | 9e6f4052a029097be877a2408813e55edef8a879 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.nouns._grope import _GROPE
#calss header
class _GROPED(_GROPE, ):
def __init__(self,):
_GROPE.__init__(self)
self.name = "GROPED"
self.specie = 'nouns'
self.basic = "grope"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c326f428fae13c4af9cb46ab323d1a372aa587a4 | b8a9b1204627c7d6b4123f4dba54631251e27f49 | /accounts/migrations/0006_auto_20210313_1624.py | b672084659b2994715f8cc3ffbc278a53f407904 | [] | no_license | ianmanalo1026/Coffee-Shop | 53aee5b4ff26294ead1808006c7d9ec258aca8d9 | f61a94ee416aed436d236c7243625417c7214479 | refs/heads/master | 2023-03-16T12:24:02.354223 | 2021-03-16T14:10:23 | 2021-03-16T14:10:23 | 347,078,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # Generated by Django 3.1.4 on 2021-03-13 08:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20210313_1606'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='first_name',
new_name='name',
),
migrations.RemoveField(
model_name='profile',
name='last_name',
),
]
| [
"ian.manalo1026@gmail.com"
] | ian.manalo1026@gmail.com |
80410beb2f4850b79e647b255bfd7626e96e2884 | d3fa61d28cdc0c515ebd4f610122a9141cf69471 | /gui/demoDlg-21.py | e155e49b73365362dea088ee8e5824694470688d | [] | no_license | iorilan/python-samples | 0bd2d66461bc5580de607c5e9984f713bc506c56 | 1db836d90731763e30a109c28948734727194232 | refs/heads/master | 2022-04-05T03:06:18.830332 | 2020-02-23T16:49:06 | 2020-02-23T16:49:06 | 213,014,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | from tkinter import * # get base widget set
from dialogTable import demos # button callback handlers
from quitter import Quitter # attach a quit object to me
class Demo(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack()
Label(self, text="Basic demos").pack()
for key in demos:
func = (lambda key=key: self.printit(key))
#func = (lambda self=self, name=key: self.printit(name))
#func = (lambda handler=self.printit, name=key: handler(name))
Button(self, text=key, command=func).pack(side=TOP, fill=BOTH)
Quitter(self).pack(side=TOP, fill=BOTH)
def printit(self, name):
print(name, 'returns =>', demos[name]()) # fetch, call, print
if __name__ == '__main__': Demo().mainloop() | [
"iorilan@hotmail.com"
] | iorilan@hotmail.com |
f432c581c95ce2dab16294b0388c1934ff86ade5 | 9abd182d02355ddf0b79afd4a35f7127a4a66f7a | /tests/auto/test_auto_tasks.py | 31cf99827d6d2993bc4ec767aa5e9f469a8fa5ce | [
"Apache-2.0"
] | permissive | dmlc/gluon-cv | e1303086419a5733661d0fcb9095c09d4f2382ad | 567775619f3b97d47e7c360748912a4fd883ff52 | refs/heads/master | 2023-07-19T12:02:36.824294 | 2023-01-19T00:37:33 | 2023-01-19T00:37:33 | 122,896,249 | 6,064 | 1,458 | Apache-2.0 | 2023-01-19T00:37:35 | 2018-02-26T01:33:21 | Python | UTF-8 | Python | false | false | 3,054 | py | from gluoncv.auto.tasks import ImageClassification
from gluoncv.auto.tasks import ObjectDetection
import autogluon.core as ag
import time
from nose.tools import nottest
IMAGE_CLASS_DATASET, _, IMAGE_CLASS_TEST = ImageClassification.Dataset.from_folders(
'https://autogluon.s3.amazonaws.com/datasets/shopee-iet.zip')
OBJECT_DETCTION_DATASET = ObjectDetection.Dataset.from_voc('https://autogluon.s3.amazonaws.com/datasets/tiny_motorbike.zip')
OBJECT_DETECTION_TRAIN, OBJECT_DETECTION_VAL, OBJECT_DETECTION_TEST = OBJECT_DETCTION_DATASET.random_split(val_size=0.3, test_size=0.2)
def test_image_classification():
from gluoncv.auto.tasks import ImageClassification
task = ImageClassification({'model': 'resnet18_v1', 'num_trials': 1, 'epochs': 1, 'batch_size': 8})
classifier = task.fit(IMAGE_CLASS_DATASET)
assert task.fit_summary().get('valid_acc', 0) > 0
test_result = classifier.predict(IMAGE_CLASS_TEST)
def test_image_classification_custom_net():
from gluoncv.auto.tasks import ImageClassification
from gluoncv.model_zoo import get_model
net = get_model('resnet18_v1')
task = ImageClassification({'num_trials': 1, 'epochs': 1, 'custom_net': net, 'batch_size': 8})
classifier = task.fit(IMAGE_CLASS_DATASET)
assert task.fit_summary().get('valid_acc', 0) > 0
test_result = classifier.predict(IMAGE_CLASS_TEST)
def test_object_detection_estimator():
from gluoncv.auto.tasks import ObjectDetection
task = ObjectDetection({'num_trials': 1, 'epochs': 1, 'batch_size': 4})
detector = task.fit(OBJECT_DETECTION_TRAIN)
assert task.fit_summary().get('valid_map', 0) > 0
test_result = detector.predict(OBJECT_DETECTION_TEST)
def test_object_detection_estimator_transfer():
from gluoncv.auto.tasks import ObjectDetection
task = ObjectDetection({'num_trials': 1, 'epochs': 1, 'transfer': ag.Categorical('yolo3_darknet53_coco', 'ssd_512_resnet50_v1_voc'), 'estimator': 'ssd', 'batch_size': 4})
detector = task.fit(OBJECT_DETECTION_TRAIN)
assert task.fit_summary().get('valid_map', 0) > 0
test_result = detector.predict(OBJECT_DETECTION_TEST)
import unittest
@unittest.skip("temporarily disabled")
def test_time_out_image_classification():
time_limit = 15
from gluoncv.auto.tasks import ImageClassification
task = ImageClassification({'num_trials': 1, 'epochs': 10, 'batch_size': 8})
tic = time.time()
classifier = task.fit(IMAGE_CLASS_DATASET, time_limit=time_limit)
# check time_limit with a little bit overhead
assert (time.time() - tic) < time_limit + 180
@unittest.skip("temporarily disabled")
def test_time_out_detection():
time_limit = 15
from gluoncv.auto.tasks import ObjectDetection
task = ObjectDetection({'num_trials': 1, 'epochs': 5, 'time_limits': time_limit, 'batch_size': 4})
tic = time.time()
detector = task.fit(OBJECT_DETECTION_TRAIN)
# check time_limit with a little bit overhead
assert (time.time() - tic) < time_limit + 180
if __name__ == '__main__':
import nose
nose.runmodule()
| [
"noreply@github.com"
] | dmlc.noreply@github.com |
8b283162d9edbf8dca0e7c46dc70bd9b59e8967e | 153fb205395605f631e92950fc86ba205bd85665 | /wagtail/wagtailcore/blocks/__init__.py | 1b7a1740cfd2dc54b81002c49016129a51f88256 | [
"BSD-3-Clause"
] | permissive | YoungSphere/Wagtail_Young | 8e385ab37263acf4b609bb6aa1f75d3e9035eee0 | 536b137446ef5bff464cbe8a82175ba099d4a15a | refs/heads/master | 2020-04-23T07:10:45.479469 | 2019-02-16T14:09:24 | 2019-02-16T14:09:24 | 170,998,679 | 0 | 0 | BSD-3-Clause | 2019-02-16T14:05:12 | 2019-02-16T11:55:39 | Python | UTF-8 | Python | false | false | 334 | py | from __future__ import absolute_import
# Import block types defined in submodules into the wagtail.wagtailcore.blocks namespace
from .base import * # NOQA
from .field_block import * # NOQA
from .struct_block import * # NOQA
from .list_block import * # NOQA
from .stream_block import * # NOQA
from .static_block import * # NOQA
| [
"nikhil684@gmail.com"
] | nikhil684@gmail.com |
56d15c6806ad5e594f7fc3174603378618fc75f9 | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/arc/arc086/D/answers/859172_ahho.py | 9dd224c8cbab6c1535202a94895bc8989a7d1f82 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | N = int(input())
A = list(map(int,input().split()))
# max absolute value
_,mi = max((abs(v),i) for i,v in enumerate(A))
mi += 1
print(2*N-2)
for i in range(1,N+1):
if i != mi:
print(mi,i)
if A[mi-1] > 0:
for i in range(1,N):
print(i,i+1)
else:
for i in reversed(range(1,N)):
print(i+1,i)
| [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
7a88d194dc4f6647d5d15e898799151518385985 | f131222013fd1c23bf23a9af44dbaf2cd2dfbe72 | /python 好用库/lib/dundeemt-pysftp-ad3aefc8ec42/dundeemt-pysftp-ad3aefc8ec42/tests/test_walktree.py | 467b7fe50b5df8c3aaf901fec09749cb730bcb4a | [] | no_license | shortdistance/workdir | e0bdadcb9d6b5e61e62434d574afad36afa60ba9 | 7c4a23fdbb8ae14b67aeda47ce53be1bd24ae2d1 | refs/heads/master | 2021-01-19T23:21:40.885964 | 2017-04-21T12:55:45 | 2017-04-21T12:55:45 | 88,968,923 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,680 | py | '''test pysftp.Connection.walktree and pysftp.walktree - uses py.test'''
from __future__ import print_function
# pylint: disable = W0142
from common import *
from mock import Mock, call
def test_walktree_cbclass():
'''test the walktree function with callbacks from a class'''
with pysftp.Connection(**SFTP_PUBLIC) as sftp:
wtcb = pysftp.WTCallbacks()
sftp.walktree('.',
fcallback=wtcb.file_cb,
dcallback=wtcb.dir_cb,
ucallback=wtcb.unk_cb)
assert './pub/build/build01/build01a/build-results.txt' in wtcb.flist
assert './readme.txt' in wtcb.flist
assert len(wtcb.flist) > 3
dlist = ['./pub', './pub/build', './pub/build/build01',
'./pub/build/build01/build01a', './pub/build/build01/build01b',
'./pub/build/build01/build01c', './pub/example', './pub/src',
'./pub/src/libs', './pub/src/media', './pub/src/tests']
assert wtcb.dlist == dlist
assert wtcb.ulist == []
def test_walktree_cbmock():
'''test the walktree function, with mocked callbacks (standalone functions)
'''
file_cb = Mock(return_value=None)
dir_cb = Mock(return_value=None)
unk_cb = Mock(return_value=None)
with pysftp.Connection(**SFTP_PUBLIC) as sftp:
sftp.walktree('.',
fcallback=file_cb,
dcallback=dir_cb,
ucallback=unk_cb)
# check calls to the file callback
file_cb.assert_called_with('./readme.txt')
thecall = call('./pub/build/build01/build01a/build-results.txt')
assert thecall in file_cb.mock_calls
assert file_cb.call_count > 3
# check calls to the directory callback
assert [call('./pub'),
call('./pub/build'),
call('./pub/build/build01'),
call('./pub/build/build01/build01a'),
call('./pub/build/build01/build01b'),
call('./pub/build/build01/build01c'),
call('./pub/example'),
call('./pub/src'),
call('./pub/src/libs'),
call('./pub/src/media'),
call('./pub/src/tests')] == dir_cb.mock_calls
# check calls to the unknown callback
assert [] == unk_cb.mock_calls
def test_walktree_no_recurse():
'''test the walktree function, with mocked callbacks (standalone functions)
'''
file_cb = Mock(return_value=None)
dir_cb = Mock(return_value=None)
unk_cb = Mock(return_value=None)
with pysftp.Connection(**SFTP_PUBLIC) as sftp:
sftp.walktree('.',
fcallback=file_cb,
dcallback=dir_cb,
ucallback=unk_cb,
recurse=False)
# check calls to the file callback
file_cb.assert_called_with('./readme.txt')
thecall = call('./readme.sym')
assert thecall in file_cb.mock_calls
assert file_cb.call_count == 2
# check calls to the directory callback
assert [call('./pub'),] == dir_cb.mock_calls
# check calls to the unknown callback
assert [] == unk_cb.mock_calls
def test_walktree_local():
'''test the capability of walktree to walk a local directory structure'''
wtcb = pysftp.WTCallbacks()
pysftp.walktree('.',
fcallback=wtcb.file_cb,
dcallback=wtcb.dir_cb,
ucallback=wtcb.unk_cb)
print(wtcb.dlist)
for dname in ['./docs', './tests']:
assert dname in wtcb.dlist
print(wtcb.ulist)
assert wtcb.ulist == []
print(wtcb.flist)
for fname in ['./release.sh', './MANIFEST.in', './tests/test_execute.py']:
assert fname in wtcb.flist
def test_walktree_local_no_recurse():
'''test the capability of walktree with recurse=False to walk a local
directory structure'''
wtcb = pysftp.WTCallbacks()
pysftp.walktree('.',
fcallback=wtcb.file_cb,
dcallback=wtcb.dir_cb,
ucallback=wtcb.unk_cb,
recurse=False)
print(wtcb.dlist)
for dname in ['./docs', './tests']:
assert dname in wtcb.dlist
print(wtcb.ulist)
assert wtcb.ulist == []
print(wtcb.flist)
for fname in ['./release.sh', './MANIFEST.in']:
assert fname in wtcb.flist
assert './tests/test_execute.py' not in wtcb.flist
def test_walktree_local_bad():
'''test pysftp.walktree on a non-existing directory'''
wtcb = pysftp.WTCallbacks()
with pytest.raises(OSError):
pysftp.walktree('/non-existing',
fcallback=wtcb.file_cb,
dcallback=wtcb.dir_cb,
ucallback=wtcb.unk_cb)
| [
"zhanglei520vip@163.com"
] | zhanglei520vip@163.com |
8e53fc2821f50c18518010717b0e82b25950cac2 | 89155ebee895cbd04e4eb7a9d079a820d90ffd7e | /viewset_modelviewset_application/viewset_modelviewset_application/settings.py | 9861cd4ac9f55a125dc627f00b6bae60651a2efc | [] | no_license | mahmudgithub/Rest-api-playground | 822c0671b534fc057461703711ef980d9d31ce56 | a452a329d60c9104afdeadde13f7493741e4914a | refs/heads/master | 2023-03-31T17:23:13.605754 | 2021-04-11T14:10:31 | 2021-04-11T14:10:31 | 331,842,045 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9lzw6zp^-1(=b#u!$w%7x(7_$7alx_nrvz4kd+gkl$&1q1!%m2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'viewset_modelviewset_application.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'viewset_modelviewset_application.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"mahmudhossain838@gmail.com"
] | mahmudhossain838@gmail.com |
adb170a554dd4d70c7c27dcdfa73e45fe7a48dd5 | 63304bd3fd27aca73e949579a732e183ba0c88af | /httprunner/client.py | fb3161acdfbff488d6136512fa89db89b25b4442 | [
"MIT"
] | permissive | janice1027/HttpRunner | 330de17485654041cf2c07022c8860364d742362 | 7fa1057f1675dc73640bb90c4a22e8811153226a | refs/heads/master | 2021-04-27T11:50:58.682691 | 2018-02-22T10:53:24 | 2018-02-22T10:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,481 | py | import json
import re
import time
import requests
import urllib3
from httprunner import logger
from httprunner.exception import ParamsError
from requests import Request, Response
from requests.exceptions import (InvalidSchema, InvalidURL, MissingSchema,
RequestException)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
absolute_http_url_regexp = re.compile(r"^https?://", re.I)
def prepare_kwargs(method, kwargs):
if method == "POST":
# if request content-type is application/json, request data should be dumped
content_type = kwargs.get("headers", {}).get("content-type", "")
if content_type.startswith("application/json") and "data" in kwargs:
kwargs["data"] = json.dumps(kwargs["data"])
class ApiResponse(Response):
def raise_for_status(self):
if hasattr(self, 'error') and self.error:
raise self.error
Response.raise_for_status(self)
class HttpSession(requests.Session):
"""
Class for performing HTTP requests and holding (session-) cookies between requests (in order
to be able to log in and out of websites). Each request is logged so that HttpRunner can
display statistics.
This is a slightly extended version of `python-request <http://python-requests.org>`_'s
:py:class:`requests.Session` class and mostly this class works exactly the same. However
the methods for making requests (get, post, delete, put, head, options, patch, request)
can now take a *url* argument that's only the path part of the URL, in which case the host
part of the URL will be prepended with the HttpSession.base_url which is normally inherited
from a HttpRunner class' host property.
"""
def __init__(self, base_url=None, *args, **kwargs):
super(HttpSession, self).__init__(*args, **kwargs)
self.base_url = base_url if base_url else ""
def _build_url(self, path):
""" prepend url with hostname unless it's already an absolute URL """
if absolute_http_url_regexp.match(path):
return path
elif self.base_url:
return "%s%s" % (self.base_url, path)
else:
raise ParamsError("base url missed!")
def request(self, method, url, name=None, **kwargs):
"""
Constructs and sends a :py:class:`requests.Request`.
Returns :py:class:`requests.Response` object.
:param method:
method for the new :class:`Request` object.
:param url:
URL for the new :class:`Request` object.
:param name: (optional)
Placeholder, make compatible with Locust's HttpSession
:param params: (optional)
Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional)
Dictionary or bytes to send in the body of the :class:`Request`.
:param headers: (optional)
Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional)
Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional)
Dictionary of ``'filename': file-like-objects`` for multipart encoding upload.
:param auth: (optional)
Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional)
How long to wait for the server to send data before giving up, as a float, or \
a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional)
Set to True by default.
:type allow_redirects: bool
:param proxies: (optional)
Dictionary mapping protocol to the URL of the proxy.
:param stream: (optional)
whether to immediately download the response content. Defaults to ``False``.
:param verify: (optional)
if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param cert: (optional)
if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
"""
# prepend url with hostname unless it's already an absolute URL
url = self._build_url(url)
logger.log_info("{method} {url}".format(method=method, url=url))
logger.log_debug("request kwargs: {kwargs}".format(kwargs=kwargs))
# store meta data that is used when reporting the request to locust's statistics
request_meta = {}
# set up pre_request hook for attaching meta data to the request object
request_meta["method"] = method
request_meta["start_time"] = time.time()
if "httpntlmauth" in kwargs:
from requests_ntlm import HttpNtlmAuth
auth_account = kwargs.pop("httpntlmauth")
kwargs["auth"] = HttpNtlmAuth(
auth_account["username"], auth_account["password"])
kwargs.setdefault("timeout", 120)
response = self._send_request_safe_mode(method, url, **kwargs)
request_meta["url"] = (response.history and response.history[0] or response)\
.request.path_url
# record the consumed time
request_meta["response_time"] = int((time.time() - request_meta["start_time"]) * 1000)
# get the length of the content, but if the argument stream is set to True, we take
# the size from the content-length header, in order to not trigger fetching of the body
if kwargs.get("stream", False):
request_meta["content_size"] = int(response.headers.get("content-length") or 0)
else:
request_meta["content_size"] = len(response.content or "")
request_meta["request_headers"] = response.request.headers
request_meta["request_body"] = response.request.body
request_meta["status_code"] = response.status_code
request_meta["response_headers"] = response.headers
request_meta["response_content"] = response.content
logger.log_debug("response status_code: {}".format(response.status_code))
logger.log_debug("response headers: {}".format(response.headers))
logger.log_debug("response body: {}".format(response.text))
try:
response.raise_for_status()
except RequestException as e:
logger.log_error(u"{exception}".format(exception=str(e)))
else:
logger.log_info(
"""status_code: {}, response_time: {} ms, response_length: {} bytes"""\
.format(request_meta["status_code"], request_meta["response_time"], \
request_meta["content_size"]))
return response
def _send_request_safe_mode(self, method, url, **kwargs):
"""
Send a HTTP request, and catch any exception that might occur due to connection problems.
Safe mode has been removed from requests 1.x.
"""
try:
prepare_kwargs(method, kwargs)
return requests.Session.request(self, method, url, **kwargs)
except (MissingSchema, InvalidSchema, InvalidURL):
raise
except RequestException as ex:
resp = ApiResponse()
resp.error = ex
resp.status_code = 0 # with this status_code, content returns None
resp.request = Request(method, url).prepare()
return resp
| [
"httprunner"
] | httprunner |
b8fc676b24f897ed85ac06ddd8a5dcf79961772a | bc441bb06b8948288f110af63feda4e798f30225 | /api_gateway_sdk/model/easy_flow/deploy_batch_pb2.py | d10e3afba86214003b6501929a438872c85b6ba4 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 5,564 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: deploy_batch.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from api_gateway_sdk.model.easy_flow import deploy_target_pb2 as api__gateway__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='deploy_batch.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x12\x64\x65ploy_batch.proto\x12\teasy_flow\x1a\x33\x61pi_gateway_sdk/model/easy_flow/deploy_target.proto\"\xbe\x01\n\x0b\x44\x65ployBatch\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x10\n\x08\x62\x61tchNum\x18\x02 \x01(\x05\x12\x15\n\rbatchInterval\x18\x03 \x01(\x05\x12/\n\x07\x62\x61tches\x18\x04 \x03(\x0b\x32\x1e.easy_flow.DeployBatch.Batches\x12\x12\n\nfailedStop\x18\x05 \x01(\x08\x1a\x33\n\x07\x42\x61tches\x12(\n\x07targets\x18\x01 \x03(\x0b\x32\x17.easy_flow.DeployTargetBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[api__gateway__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2.DESCRIPTOR,])
_DEPLOYBATCH_BATCHES = _descriptor.Descriptor(
name='Batches',
full_name='easy_flow.DeployBatch.Batches',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targets', full_name='easy_flow.DeployBatch.Batches.targets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=226,
serialized_end=277,
)
_DEPLOYBATCH = _descriptor.Descriptor(
name='DeployBatch',
full_name='easy_flow.DeployBatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployBatch.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_flow.DeployBatch.batchNum', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_flow.DeployBatch.batchInterval', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batches', full_name='easy_flow.DeployBatch.batches', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_flow.DeployBatch.failedStop', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYBATCH_BATCHES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=87,
serialized_end=277,
)
_DEPLOYBATCH_BATCHES.fields_by_name['targets'].message_type = api__gateway__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2._DEPLOYTARGET
_DEPLOYBATCH_BATCHES.containing_type = _DEPLOYBATCH
_DEPLOYBATCH.fields_by_name['batches'].message_type = _DEPLOYBATCH_BATCHES
DESCRIPTOR.message_types_by_name['DeployBatch'] = _DEPLOYBATCH
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeployBatch = _reflection.GeneratedProtocolMessageType('DeployBatch', (_message.Message,), {
'Batches' : _reflection.GeneratedProtocolMessageType('Batches', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYBATCH_BATCHES,
'__module__' : 'deploy_batch_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployBatch.Batches)
})
,
'DESCRIPTOR' : _DEPLOYBATCH,
'__module__' : 'deploy_batch_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployBatch)
})
_sym_db.RegisterMessage(DeployBatch)
_sym_db.RegisterMessage(DeployBatch.Batches)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
5ad76af557aa31011c1d9fd48b790dd553e175b8 | 02d1d89ed3c2a71a4f5a36f3a19f0683a0ae37e5 | /navigation/terrain_id/terrain_training/build/milk/milk/tests/test_perceptron.py | 6069de0370dbe07b08d521102bee703edf4c253c | [
"MIT"
] | permissive | lforet/robomow | 49dbb0a1c873f75e11228e24878b1e977073118b | eca69d000dc77681a30734b073b2383c97ccc02e | refs/heads/master | 2016-09-06T10:12:14.528565 | 2015-05-19T16:20:24 | 2015-05-19T16:20:24 | 820,388 | 11 | 6 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | import numpy as np
from milk.supervised.perceptron import perceptron_learner
from milk.supervised import _perceptron
from milksets.yeast import load
def test_raw():
np.random.seed(23)
data = np.random.random((100,10))
data[50:] += .5
labels = np.repeat((0,1), 50)
weights = np.zeros((11))
eta = 0.1
for i in xrange(20):
_perceptron.perceptron(data, labels, weights, eta)
errs = _perceptron.perceptron(data, labels, weights, eta)
assert errs < 10
def test_wrapper():
features,labels = load()
labels = (labels >= 5)
learner = perceptron_learner()
model = learner.train(features, labels)
test = map(model.apply, features)
assert np.mean(labels != test) < .35
| [
"laird@isotope11.com"
] | laird@isotope11.com |
e82043fb547aed02d1c9a63e9a349ebb3ecee747 | 9163d7b7f9301b4a334ced0a91e28348fdaa8882 | /other_function/generate_image_without_rule.py | d3223d3c6cfcb34fd6acd170520b6a7976211050 | [
"Apache-2.0"
] | permissive | frankiegu/generate_click_captcha | 2c9c551bec69d5c40e6a1354ec6f7dbef18e6447 | 7fdb2cafe4c2b5d0245b9b8c4fc9a8b8dee5f3a9 | refs/heads/master | 2021-03-03T14:56:30.486088 | 2019-01-03T16:03:00 | 2019-01-03T16:03:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from PIL import Image
import random
img = Image.new("RGB", (640, 480), (0, 255, 0))
w, h = img.size
for i in range(h):
for j in range(w):
a = random.randint(10, 230)
b = random.randint(10, 230)
c = random.randint(10, 230)
img.putpixel((j, i), (a, b, c))
img.show() | [
"nickliqian@outlook.com"
] | nickliqian@outlook.com |
d3296ed2a784f2ba7881764db316ee68412339b7 | 60aac823e576a1c415bc25901e113ad0f52fbd9a | /abc204/b.py | a639b661cf948d6b00950b036601a1220c600983 | [] | no_license | nishiwakki/atCoder | d4209e717529ab606d0e6fceb0ce170d228b1532 | fc5a6b667aa8c11c368fc712c5633da5ebf6bdf2 | refs/heads/main | 2023-06-23T08:08:12.072211 | 2021-07-24T14:01:52 | 2021-07-24T14:01:52 | 309,862,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | # -*- coding: UTF-8 -*-
N = int(input())
A = list(map(int, input().split()))
ans = 0
for a in A:
if a > 10:
ans += a-10
print(ans) | [
"glotply@gmail.com"
] | glotply@gmail.com |
54da71a35a2983c730ede3d625d2a5f53256bc8f | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/125_GetSpotlightNewPics/GetSpotlightNewPics.py | 4940becf12590fdad010000d2471096aef24c6a1 | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | # GetSpotlightNewPics
# Retrieves new Spotlight (microsoft wallpapers) pictures
#
# 2023-03-25 PV
# 2023-04-12 PV Logfile
# 2023-07-19 PV Added missing import
import datetime
import shutil
import os
from common_fs import get_files, extension_part
source = r'C:\Users\Pierr\AppData\Local\Packages\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\LocalState\Assets'
dest = r'C:\Users\Pierr\OneDrive\PicturesODMisc\Papiers peints\Spotlight'
logfile = r'C:\temp\GetSpotlightNewPics-' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S.log')
doit = True
dest_files = set(file.lower() for file in get_files(dest) if extension_part(file.lower()) == '.jpg')
print("GetSpotlightNewPics started")
with open(logfile, 'w') as log:
copied = 0
for filebase in get_files(source):
file = filebase.lower()+'.jpg'
if file not in dest_files:
print('Add', file)
log.write(f'Add {file}\n')
copied += 1
if doit:
shutil.copyfile(os.path.join(source, filebase), os.path.join(dest, file))
print('Copied:', copied)
log.write(f'Copied: {copied}\n')
| [
"FrenchBear38@outlook.com"
] | FrenchBear38@outlook.com |
a340f7261fc62eeabc63f2815bac12c4125010b6 | 3b2940c38412e5216527e35093396470060cca2f | /top/api/rest/FuwuScoresGetRequest.py | 74b17bad19a9821410b0b86c51d54b0c0426e9c7 | [] | no_license | akingthink/goods | 842eb09daddc2611868b01ebd6e330e5dd7d50be | ffdb5868a8df5c2935fc6142edcdf4c661c84dca | refs/heads/master | 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | '''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class FuwuScoresGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.current_page = None
self.date = None
self.page_size = None
def getapiname(self):
return 'taobao.fuwu.scores.get'
| [
"yangwenjin@T4F-MBP-17.local"
] | yangwenjin@T4F-MBP-17.local |
997ca3426e4c754a39df1f9a351c36b3da37e50a | 64bdedbe9ede8c21f8daef2234faf248e8bcad2f | /flask_app/flask_blog/main/routes.py | 5940ba8d9dd8fefe1821b71dae8384a32317b247 | [
"MIT"
] | permissive | Ziang-Lu/Flask-Blog | c02b5f6501af2d7f55350e337b5eed6a7f3d528b | aa0aa4d019de47e122cded8d4ff637d1b6edc410 | refs/heads/master | 2023-08-05T00:59:35.440152 | 2023-07-15T07:08:54 | 2023-07-15T07:08:54 | 203,568,155 | 1 | 0 | MIT | 2023-07-25T17:50:37 | 2019-08-21T11:15:05 | Python | UTF-8 | Python | false | false | 2,022 | py | # -*- coding: utf-8 -*-
"""
Flask main-related routes module.
"""
from datetime import datetime
import requests
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import current_user
from ..utils import POST_SERVICE, get_iter_pages
# Create a main-related blueprint
main_bp = Blueprint(name='main', import_name=__name__)
@main_bp.route('/')
@main_bp.route('/home')
def home():
"""
Home page.
:return:
"""
page = request.args.get('page', type=int, default=1)
request_url = f'{POST_SERVICE}/posts?page={page}&per_page=5'
username = request.args.get('user')
if username:
# Try to fetch all the posts by all the users that this user follows as
# well as this user himself
if not current_user.is_authenticated:
flash('Please log in first.', category='danger')
return redirect(url_for('auth.login'))
elif current_user.username != username:
flash(
'You can only view your own followed posts.', category='danger'
)
return redirect(url_for('main.home', user=current_user.username))
request_url += f'&user={username}'
r = requests.get(request_url)
paginated_data = r.json()
posts_data = paginated_data['data']['posts']
# Convert the datetime strings back to objects
for post in posts_data:
post['date_posted'] = datetime.fromisoformat(post['date_posted'])
pages = paginated_data['pagination_meta']['pages']
context = {
'p': {
'items': posts_data,
'page': page,
'pages': pages,
'total': paginated_data['pagination_meta']['total'],
'iter_pages': get_iter_pages(pages, page)
}
}
return render_template('home.html', **context)
@main_bp.route('/about')
def about():
"""
About page.
:return:
"""
context = {
'title': 'About'
}
return render_template('about.html', **context)
| [
"ziangl@alumni.cmu.edu"
] | ziangl@alumni.cmu.edu |
d34a9292cb308aac1c26003f0a06be2a49244505 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02953/s447346234.py | 650a3b88da697c512c29fd85204e7c24463e8dae | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | N=int(input())
H=list(map(int,input().split()))
if N>1:
for i in range(N-1):
if H[i]<=H[i+1]-1:
H[i+1]-=1
if H[i]>H[i+1]:
print('No')
exit()
print('Yes')
else:
print('Yes') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
43c481740de68c095956f67ca9dab167d5cca6b9 | 2cf560477807e9f3e869474defda47f2638347b4 | /glitter_news/urls.py | 35d0dcf2342ae5c739e2fabf9d84b2241c1e3c0d | [] | no_license | axsapronov/django-glitter-news | 37035d45fd1edbf619659b9451184500ab2fce33 | 000d548bafa8c777a3721611ba4620173713b87d | refs/heads/master | 2021-06-06T10:06:39.859177 | 2016-04-13T11:50:18 | 2016-04-13T11:50:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | # -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views, feeds
urlpatterns = [
url(
r'^$',
views.PostListView.as_view(),
name='list'
),
url(
r'^category/(?P<slug>[-\w]+)/$',
views.PostListCategoryView.as_view(),
name='post-list-category'
),
url(
r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$',
views.PostDetailView.as_view(),
name='post-detail'
),
url(
r'^feed/$',
feeds.NewsFeed(),
name='feed'
),
url(
r'^feed/(?P<slug>[-\w]+)/$',
feeds.NewsCategoryFeed(),
name='category-feed'
),
]
| [
"ikonitas@gmail.com"
] | ikonitas@gmail.com |
7c3dfc04897f0d4d50c778ed9925e8a9a3c4fdb4 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/R2/benchmark/startCirq84.py | a797a7c4d5d7e49cf7b5aedb3b512c541b22a87e | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=9
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=6
c.append(cirq.X.on(input_qubit[1])) # number=7
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=8
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=4
c.append(cirq.X.on(input_qubit[1])) # number=2
c.append(cirq.X.on(input_qubit[1])) # number=3
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq84.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
f8bf83673f352023ed8c7419fc205c8eee6cfc42 | e9f096e564afc9f0cfabaeaac67c2ff2b1c46d24 | /pymysql01/update.py | db767436d4285c1e11b745dfe591e848422f87e9 | [] | no_license | zh199609/pythonLearning | dcb4bfb4560fab0ac66a88934af278489abff38d | 430c70f1892966cf8f6b01e30e3a7996e83cc7ff | refs/heads/master | 2021-11-21T12:49:21.522780 | 2021-08-31T13:14:55 | 2021-08-31T13:14:55 | 238,373,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # 创建连接
import pymysql
conn = pymysql.connect(host="localhost", user='root', password='root', database='books', autocommit=True)
# 获取游标
cursor = conn.cursor()
# 执行sql
sql = "update t_book set title = '西游记修改01' where id = 4"
cursor.execute(sql)
print("影响的记录数:", cursor.rowcount)
# 关闭游标
cursor.close()
# 关闭连接
conn.close()
try:
print('try')
except Exception as e:
print(e)
finally:
print('finally')
| [
"1120123073@qq.com"
] | 1120123073@qq.com |
b8f99ce6a6829a95a5e4779296dbd76a1d416365 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-bms/huaweicloudsdkbms/v1/model/address_info.py | 5ab5c287376294e8571e0f93ba2a0651682fba42 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,832 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AddressInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version': 'str',
'addr': 'str',
'os_ext_ip_stype': 'str',
'os_ext_ips_ma_cmac_addr': 'str',
'os_ext_ip_sport_id': 'str'
}
attribute_map = {
'version': 'version',
'addr': 'addr',
'os_ext_ip_stype': 'OS-EXT-IPS:type',
'os_ext_ips_ma_cmac_addr': 'OS-EXT-IPS-MAC:mac_addr',
'os_ext_ip_sport_id': 'OS-EXT-IPS:port_id'
}
def __init__(self, version=None, addr=None, os_ext_ip_stype=None, os_ext_ips_ma_cmac_addr=None, os_ext_ip_sport_id=None):
"""AddressInfo
The model defined in huaweicloud sdk
:param version: IP地址版本。4:代表IPv4。6:代表IPv6。
:type version: str
:param addr: IP地址
:type addr: str
:param os_ext_ip_stype: IP地址类型。fixed:代表私有IP地址。floating:代表浮动IP地址。
:type os_ext_ip_stype: str
:param os_ext_ips_ma_cmac_addr: MAC地址。
:type os_ext_ips_ma_cmac_addr: str
:param os_ext_ip_sport_id: IP地址对应的端口ID
:type os_ext_ip_sport_id: str
"""
self._version = None
self._addr = None
self._os_ext_ip_stype = None
self._os_ext_ips_ma_cmac_addr = None
self._os_ext_ip_sport_id = None
self.discriminator = None
self.version = version
self.addr = addr
if os_ext_ip_stype is not None:
self.os_ext_ip_stype = os_ext_ip_stype
if os_ext_ips_ma_cmac_addr is not None:
self.os_ext_ips_ma_cmac_addr = os_ext_ips_ma_cmac_addr
if os_ext_ip_sport_id is not None:
self.os_ext_ip_sport_id = os_ext_ip_sport_id
@property
def version(self):
"""Gets the version of this AddressInfo.
IP地址版本。4:代表IPv4。6:代表IPv6。
:return: The version of this AddressInfo.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this AddressInfo.
IP地址版本。4:代表IPv4。6:代表IPv6。
:param version: The version of this AddressInfo.
:type version: str
"""
self._version = version
@property
def addr(self):
"""Gets the addr of this AddressInfo.
IP地址
:return: The addr of this AddressInfo.
:rtype: str
"""
return self._addr
@addr.setter
def addr(self, addr):
"""Sets the addr of this AddressInfo.
IP地址
:param addr: The addr of this AddressInfo.
:type addr: str
"""
self._addr = addr
@property
def os_ext_ip_stype(self):
"""Gets the os_ext_ip_stype of this AddressInfo.
IP地址类型。fixed:代表私有IP地址。floating:代表浮动IP地址。
:return: The os_ext_ip_stype of this AddressInfo.
:rtype: str
"""
return self._os_ext_ip_stype
@os_ext_ip_stype.setter
def os_ext_ip_stype(self, os_ext_ip_stype):
"""Sets the os_ext_ip_stype of this AddressInfo.
IP地址类型。fixed:代表私有IP地址。floating:代表浮动IP地址。
:param os_ext_ip_stype: The os_ext_ip_stype of this AddressInfo.
:type os_ext_ip_stype: str
"""
self._os_ext_ip_stype = os_ext_ip_stype
@property
def os_ext_ips_ma_cmac_addr(self):
"""Gets the os_ext_ips_ma_cmac_addr of this AddressInfo.
MAC地址。
:return: The os_ext_ips_ma_cmac_addr of this AddressInfo.
:rtype: str
"""
return self._os_ext_ips_ma_cmac_addr
@os_ext_ips_ma_cmac_addr.setter
def os_ext_ips_ma_cmac_addr(self, os_ext_ips_ma_cmac_addr):
"""Sets the os_ext_ips_ma_cmac_addr of this AddressInfo.
MAC地址。
:param os_ext_ips_ma_cmac_addr: The os_ext_ips_ma_cmac_addr of this AddressInfo.
:type os_ext_ips_ma_cmac_addr: str
"""
self._os_ext_ips_ma_cmac_addr = os_ext_ips_ma_cmac_addr
@property
def os_ext_ip_sport_id(self):
"""Gets the os_ext_ip_sport_id of this AddressInfo.
IP地址对应的端口ID
:return: The os_ext_ip_sport_id of this AddressInfo.
:rtype: str
"""
return self._os_ext_ip_sport_id
@os_ext_ip_sport_id.setter
def os_ext_ip_sport_id(self, os_ext_ip_sport_id):
"""Sets the os_ext_ip_sport_id of this AddressInfo.
IP地址对应的端口ID
:param os_ext_ip_sport_id: The os_ext_ip_sport_id of this AddressInfo.
:type os_ext_ip_sport_id: str
"""
self._os_ext_ip_sport_id = os_ext_ip_sport_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddressInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
f3e7038704cb777b458dce69e5130f105f8dd4d7 | 6ecff67d6103ddbd787f78c35182722b83b8a37e | /백준/Python/카테고리/DFS/11724(연결 요소의 개수).py | 7a4091ce202ede0784f000483c362a969b976680 | [] | no_license | jsungmin6/Algorithm | 9ef2339aa00921e7df756a8dff569954a008c118 | bc1ea9de9f7ba3f1aa6616ebef8719540d72e0bf | refs/heads/master | 2023-05-27T06:24:16.123307 | 2021-06-11T09:22:21 | 2021-06-11T09:22:21 | 259,299,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | '''
visited 와 graph를 만들어 구하는게 아닐까
'''
from collections import deque
import sys
input = sys.stdin.readline
N,M = map(int,input().split())
visited = [0]*(N+1)
graph = [[] for i in range(N+1)]
def dfs(i):
visited[i] = 1
need_visit=deque(graph[i])
while need_visit:
node = need_visit.popleft()
if visited[node] !=0:
continue
visited[node] = 1
for k in graph[node]:
if visited[k] !=0:
continue
need_visit.append(k)
for _ in range(M):
u,v = map(int,input().split())
graph[u].append(v)
graph[v].append(u)
cnt=0
for i in range(1,N+1):
if visited[i] != 1:
dfs(i)
cnt+=1
print(cnt) | [
"jsungmin506@gmail.com"
] | jsungmin506@gmail.com |
26b2fcf42fe20f5c02c69785b561a485bae9c91f | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/PhysicsAnalysis/D3PDMaker/QcdD3PDMaker/python/JSTrackJets.py | f04421edf54f821f6b1056046460cf90fb8a50b8 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,137 | py | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
###############################################################
#
# JetTrackZClusterTool jobOptions file
# # PhysicsAnalysis/JetEtMissDPDModifier/share/JetTrackZClusterTool_jobOptions.py
# # https://svnweb.cern.ch/trac/atlasoff/browser/PhysicsAnalysis/D3PDMaker/SUSYD3PDMaker/trunk/share/JetTrackZClusterTool_jobOptions.py
#==============================================================
#--------------------------------------------------------------
# JetTrackZClusterTool Options
#--------------------------------------------------------------
# Import configurable for tool
from JetRecTools.JetRecToolsConf import JetTrackZClusterTool
from JetRec.JetGetters import *
from JetRec.JetRecConf import *
def createJSTrackJets ( theseq, myjetfinder, myjetdr ):
# Setup tool so that it can be used
JS_MyJetTrackZClusterTool = JetTrackZClusterTool( "JS_JetTrackZClusterTool_%s%d" % (myjetfinder,myjetdr*10) )
JS_MyJetTrackZClusterTool.TrackJetMinMulti = 2
JS_MyJetTrackZClusterTool.TrackJetMinPt = 4000 # MeV
JS_MyJetTrackZClusterTool.UseVtxSeeding = True
JS_MyJetTrackZClusterTool.DeltaZRange = 10000.0
JS_MyJetTrackZClusterTool.TrackParticleContainerName = "TrackParticleCandidate"
JS_MyJetTrackZClusterTool.VxContainerName = "VxPrimaryCandidate"
JS_MyJetTrackZClusterTool.OutputLevel = 3
#--------------------------------------------------------------
# TrackSelector Tool Options
#--------------------------------------------------------------
from InDetTrackSelectorTool.InDetTrackSelectorToolConf import InDet__InDetDetailedTrackSelectorTool
from AthenaCommon.AppMgr import ToolSvc
trackSelector = InDet__InDetDetailedTrackSelectorTool( "JS_MyDetailedTrackSelectorTool" )
ToolSvc += trackSelector
#See InDetDetailedTrackSelectorTool.h for additional options and defaults
trackSelector.pTMin = 500 # MeV
trackSelector.etaMax = 2.5
trackSelector.nHitBLayer = 0
trackSelector.nHitPix = 0
trackSelector.nHitBLayerPlusPix = 1 #0
trackSelector.nHitSct = 6 #0
trackSelector.nHitSi = 7 #7
trackSelector.nHitTrt = 0
trackSelector.IPd0Max = 1.0 #1 # d0 cut
trackSelector.IPz0Max = 1.5 # z0*sin(theta) cut
trackSelector.z0Max = 200 # z0 cut
trackSelector.fitChi2OnNdfMax = 10000 #1000 #3.5 #3
trackSelector.d0significanceMax = -1.
trackSelector.z0significanceMax = -1.
# Try to set InDet default tools to avoid strange bugs
try:
trackSelector.Extrapolator = ToolSvc.InDetExtrapolator
except:
from AthenaCommon.Logging import logging
l = logging.getLogger("TrackSelectionForJets::setupTrackSelectorTool")
l.warning("No ToolSvc.InDetExtrapolator available. Tracking might cause infinite loop")
pass
#trackSelector.OutputLevel = 3
from TrkTrackSummaryTool.AtlasTrackSummaryTool import AtlasTrackSummaryTool
atst = AtlasTrackSummaryTool()
ToolSvc += atst
trackSelector.TrackSummaryTool = atst
##
from JetSubStructure.JetSubStructureConf import JetSubStructure__CachedTrackSelectorTool
CachedTrackSelector = JetSubStructure__CachedTrackSelectorTool("JS_CachedTrackSelectorTool")
ToolSvc += CachedTrackSelector
CachedTrackSelector.TrackSelector = trackSelector
##
# Tell "JetTrackZClusterTool" to use this tool
JS_MyJetTrackZClusterTool.TrackSelector = CachedTrackSelector.TrackSelector
#--------------------------------------------------------------
# JetFinder Tool Options (Anti-Kt)
#--------------------------------------------------------------
from JetRec.JetRecConf import JetFastJetFinderTool
myfastfinder = JetFastJetFinderTool("JS_%s%dTrackJetFinder" % (myjetfinder,myjetdr*10))
if myjetfinder == 'AntiKt':
myfastfinder.Algorithm = "anti-kt"
elif myjetfinder == 'CamKt':
myfastfinder.Algorithm = "cambridge"
myfastfinder.Radius = myjetdr
myfastfinder.RecombScheme = "E"
myfastfinder.Strategy = "Best"
myfastfinder.FailIfMisconfigured = True
myfastfinder.Inclusive = True
myfastfinder.CalculateJetArea = False
myfastfinder.StoreNFlipValues = 0
ToolSvc += myfastfinder
# Tell "TrackZClusterTool" to use this tool
JS_MyJetTrackZClusterTool.JetFinder = myfastfinder
#-------------------------------------------------------------
# Jet Getter
#-------------------------------------------------------------
JS_TrackZToolList = [JS_MyJetTrackZClusterTool,
JetSignalSelectorTool('JSTZ_JetFinalPtCut',UseTransverseMomentum = True,MinimumSignal= jetFlags.finalMinEt()),
JetSorterTool('JSTZ_JetSorter',SortOrder="ByPtDown") ]
mytrackzjetgetter = make_StandardJetGetter(myjetfinder, myjetdr,'TrackZ',seq = theseq, allTools = JS_TrackZToolList)
return mytrackzjetgetter
#==============================================================
#
# End of job options file
#
###############################################################
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
16173f46fa2faecd70a0cdf9cbd51a926f590924 | 17268419060d62dabb6e9b9ca70742f0a5ba1494 | /pp/types.py | 4dd35737400a72f4e304189cf8101cc22720358d | [
"MIT"
] | permissive | TrendingTechnology/gdsfactory | a19124423b12cbbb4f35b61f33303e9a012f82e5 | c968558dba1bae7a0421bdf49dc192068147b776 | refs/heads/master | 2023-02-22T03:05:16.412440 | 2021-01-24T03:38:00 | 2021-01-24T03:38:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | """Common data types."""
from typing import Callable, Dict, Tuple, Union
from pp.component import Component
Layer = Tuple[int, int]
ComponentOrFunction = Union[Callable, Component]
NameToFunctionDict = Dict[str, Callable]
Number = Union[float, int]
def get_name_to_function_dict(*functions) -> Dict[str, Callable]:
"""Returns a dict with function name as key and function as value"""
return {func.__name__: func for func in functions}
__all__ = [
"Layer",
"ComponentOrFunction",
"NameToFunctionDict",
"Number",
"get_name_to_function_dict",
]
| [
"noreply@github.com"
] | TrendingTechnology.noreply@github.com |
c5f27f2348c5d7b812eaf52d13e8b3cb56d2b862 | fcc7dd9d9c7f22808f907759cec6a339c5a1421c | /my_workstation/my-v2/core.PrePostInitMeta.py | 3edabd35eb190368ee17c81725ad1b667c000f50 | [] | no_license | EmbraceLife/fastai_treasures | 3ae792771af3510848c7bb19003b04cff8001e1e | 4230be915e70a7e5a22f2f7e5137cca7045754fd | refs/heads/master | 2022-10-30T07:46:46.397037 | 2020-09-09T04:09:14 | 2020-09-09T04:09:14 | 173,718,178 | 21 | 6 | null | 2022-10-06T05:32:28 | 2019-03-04T09:52:12 | Jupyter Notebook | UTF-8 | Python | false | false | 2,111 | py | from local.test import *
from local.imports import *
from local.notebook.showdoc import show_doc
from local.core import *
# check the official source
show_doc(PrePostInitMeta, title_level=3)
class PrePostInitMeta(type):
"""
"A metaclass that calls optional `__pre_init__` and `__post_init__` methods"
Why need `PrePostInitMeta`?
- not only run `__init__`, but also
- automatically run `__pre_init__`, `__post_init__`
How to use `PrePostInitMeta`?
- create a subclass to `PrePostInitMeta`
- you can add `__pre_init__` and `__post_init__` to `__init__`
- program will run them in the order of `__pre_init__`, `__init__` and `__post_init__`
- if any of them is missing, a `_pass()` method will run instead
How to create `PrePostInitMeta`?
- how to lay out the logic flow?
- use pdb break at the first line of `__new__`
- basically `__new__` run before running `t=_T()`
- to prepare all the additional methods of `x` or `_T`
"""
def __new__(cls, name, bases, dct):
# pdb break here to run the hidden codes
x = super().__new__(cls, name, bases, dct)
def _pass(self, *args,**kwargs): pass
for o in ('__init__', '__pre_init__', '__post_init__'):
if not hasattr(x,o): setattr(x,o,_pass)
old_init = x.__init__
@functools.wraps(old_init)
def _init(self,*args,**kwargs):
self.__pre_init__()
old_init(self, *args,**kwargs)
self.__post_init__()
setattr(x, '__init__', _init)
return x
# simple but standard example
class _T(metaclass=PrePostInitMeta):
def __pre_init__(self): self.a = 0; assert self.a==0
def __init__(self): self.a += 1; assert self.a==1
def __post_init__(self): self.a += 1; assert self.a==2
t = _T() #pdb
t.a
# what would happen when lacking __pre_init__
class _T(metaclass=PrePostInitMeta):
def __pre_init__(self): self.a = 0; assert self.a==0
def __init__(self): self.a += 1; assert self.a==1
# def __post_init__(self): self.a += 1; assert self.a==2
t = _T()#pdb
t.a
| [
"1227561934@qq.com"
] | 1227561934@qq.com |
0ead5a6450132dc13eb9ea466a731aed48251aa1 | 0880faa6ef7f30da63a74739203b0f9d7d4fe10e | /wesbanco/items.py | b03a2b1076a10cef81ffb6f9dcaca01f7f808313 | [] | no_license | wesleybowen/wesbanco | 11c930d9facd5f2ee64b31316192796635529eb9 | 75e29924f2b57cd4b8da84a04e74fe10a5547942 | refs/heads/main | 2023-03-26T14:01:32.826370 | 2021-03-25T08:56:09 | 2021-03-25T08:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | import scrapy
class WesbancoItem(scrapy.Item):
title = scrapy.Field()
description = scrapy.Field()
date = scrapy.Field()
| [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
8be82b1ecb01d00e2c59861f5272e0287388f6c9 | eebe1e43538bcc11a0558b58f2e6a6d22abc6a4a | /DGesQuad/manage.py | bc8d80251e82e44eba5bd6aa3df20298527c7f17 | [] | no_license | zurcx/GesQuad | 9718843be1e24e5a11572ad90a7a0da1065f15f6 | e1b5413ecfb740fd92e2dac8858b9e86fbb6efad | refs/heads/master | 2021-01-10T20:11:22.635706 | 2013-04-25T16:02:02 | 2013-04-25T16:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DGesQuad.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"luizfabiodacruz@gmail.com"
] | luizfabiodacruz@gmail.com |
595539f9f15b03ff08b9a21351d7fb2c6c6668e1 | 405d5ab969287f184ea589e73d61cc3be1c5d12b | /kinparse/kinparse.py | 4ebc18408467834295ad57576d63765bbbb410ce | [
"MIT"
] | permissive | CBJamo/kinparse | a847c594f1bbed015797bb31ea8c15cc339f9d85 | 3f4e8e9d881290cbac8e7b71886b0ed6cab0cd9b | refs/heads/master | 2020-03-09T06:32:02.646762 | 2018-04-08T13:02:52 | 2018-04-08T13:02:52 | 128,641,751 | 0 | 0 | null | 2018-04-08T13:01:53 | 2018-04-08T13:01:53 | null | UTF-8 | Python | false | false | 7,150 | py | # MIT license
#
# Copyright (C) 2016 by XESS Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Parsers for netlist files of various formats (only KiCad, at present).
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import open
from future import standard_library
standard_library.install_aliases()
from .py_2_3 import *
from pyparsing import *
THIS_MODULE = locals()
def _parse_netlist_kicad(text):
"""
Return a pyparsing object storing the contents of a KiCad netlist.
"""
def _paren_clause(keyword, value):
"""
Create a parser for a parenthesized list with an initial keyword.
"""
lp = Literal('(').suppress()
rp = Literal(')').suppress()
return (lp + Keyword(keyword, caseless=True).suppress() + value('val') + rp
)(keyword)
#++++++++++++++++++++++++++++ Parser Definition +++++++++++++++++++++++++++
# Basic elements.
word = Word(alphas)
inum = Word(nums)
fnum = Word(nums) + Optional(Literal('.') + Optional(Word(nums)))
string = ZeroOrMore(White()).suppress() + CharsNotIn('()') + ZeroOrMore(White()).suppress()
qstring = dblQuotedString() ^ sglQuotedString()
qstring.addParseAction(removeQuotes)
anystring = qstring ^ string
# Design section.
source = _paren_clause('source', Optional(anystring))
date = _paren_clause('date', Optional(anystring))
tool = _paren_clause('tool', Optional(anystring))
number = _paren_clause('number', inum)
name = _paren_clause('name', anystring)
names = _paren_clause('names', anystring)
tstamp = _paren_clause('tstamp', anystring)
tstamps = _paren_clause('tstamps', anystring)
title = _paren_clause('title', Optional(anystring))
company = _paren_clause('company', Optional(anystring))
rev = _paren_clause('rev', Optional(anystring))
value = _paren_clause('value', anystring)
comment = Group(_paren_clause('comment', number & value))
comments = Group(OneOrMore(comment))('comments')
title_block = _paren_clause('title_block', Optional(title) &
Optional(company) & Optional(rev) &
Optional(date) & Optional(source) & comments)
sheet = Group(_paren_clause('sheet', number + name + tstamps + Optional(title_block)))
sheets = Group(OneOrMore(sheet))('sheets')
design = _paren_clause('design', Optional(source) & Optional(date) &
Optional(tool) & Optional(sheets))
# Components section.
ref = _paren_clause('ref', anystring)
datasheet = _paren_clause('datasheet', anystring)
field = Group(_paren_clause('field', name & anystring('text')))
fields = _paren_clause('fields', ZeroOrMore(field))
lib = _paren_clause('lib', anystring)
part = _paren_clause('part', anystring)
footprint = _paren_clause('footprint', anystring)
libsource = _paren_clause('libsource', lib & part)
sheetpath = _paren_clause('sheetpath', names & tstamps)
comp = Group(_paren_clause('comp', ref & value & Optional(datasheet) &
Optional(fields) & Optional(libsource) & Optional(footprint) &
Optional(sheetpath) & Optional(tstamp)))
components = _paren_clause('components', ZeroOrMore(comp))
# Part library section.
description = _paren_clause('description', anystring)
docs = _paren_clause('docs', anystring)
pnum = _paren_clause('num', anystring)
ptype = _paren_clause('type', anystring)
pin = Group(_paren_clause('pin', pnum & name & ptype))
pins = _paren_clause('pins', ZeroOrMore(pin))
alias = Group(_paren_clause('alias', anystring))
aliases = _paren_clause('aliases', ZeroOrMore(alias))
fp = Group(_paren_clause('fp', anystring))
footprints = _paren_clause('footprints', ZeroOrMore(fp))
libpart = Group(_paren_clause('libpart', lib & part & Optional(
fields) & Optional(pins) & Optional(footprints) & Optional(aliases) &
Optional(description) & Optional(docs)))
libparts = _paren_clause('libparts', ZeroOrMore(libpart))
# Libraries section.
logical = _paren_clause('logical', anystring)
uri = _paren_clause('uri', anystring)
library = Group(_paren_clause('library', logical & uri))
libraries = _paren_clause('libraries', ZeroOrMore(library))
# Nets section.
code = _paren_clause('code', inum)
part_pin = _paren_clause('pin', anystring)
node = Group(_paren_clause('node', ref & part_pin))
nodes = Group(OneOrMore(node))('nodes')
net = Group(_paren_clause('net', code & name & nodes))
nets = _paren_clause('nets', ZeroOrMore(net))
# Entire netlist.
version = _paren_clause('version', word)
end_of_file = ZeroOrMore(White()) + stringEnd
parser = _paren_clause('export', version +
(design & components & Optional(libparts) & Optional(libraries) & nets
))('netlist') + end_of_file.suppress()
return parser.parseString(text)
def parse_netlist(src, tool='kicad'):
"""
Return a pyparsing object storing the contents of a netlist.
Args:
src: Either a text string, or a filename, or a file object that stores
the netlist.
Returns:
A pyparsing object that stores the netlist contents.
Exception:
PyparsingException.
"""
try:
text = src.read()
except Exception:
try:
text = open(src,'r').read()
except Exception:
text = src
if not isinstance(text, basestring):
raise Exception("What is this shit you're handing me? [{}]\n".format(src))
try:
# Use the tool name to find the function for loading the library.
func_name = '_parse_netlist_{}'.format(tool)
parse_func = THIS_MODULE[func_name]
return parse_func(text)
except KeyError:
# OK, that didn't work so well...
logger.error('Unsupported ECAD tool library: {}'.format(tool))
raise Exception
| [
"devb@xess.com"
] | devb@xess.com |
695b7501286789e70267ff4ce0dcf1ccb349a120 | 6a63a3b241e161d1e69f1521077617ad86f31eab | /release/rllib_tests/multi_gpu_learning_tests/run.py | 7835956daf7972e9fe0bad297fab58c3f0d175a8 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | jovany-wang/ray | 47a9df67e8ea26337517d625df50eb0b8b892135 | 227aef381a605cb1ebccbba4e84b840634196a35 | refs/heads/master | 2023-09-03T23:53:00.050619 | 2022-08-20T21:50:52 | 2022-08-20T21:50:52 | 240,190,407 | 1 | 1 | Apache-2.0 | 2023-03-04T08:57:04 | 2020-02-13T06:13:19 | Python | UTF-8 | Python | false | false | 818 | py | """Multi-GPU learning tests for RLlib (torch and tf).
"""
import json
import os
from pathlib import Path
from ray.rllib.utils.test_utils import run_learning_tests_from_yaml
if __name__ == "__main__":
# Get path of this very script to look for yaml files.
abs_yaml_path = Path(__file__).parent
print("abs_yaml_path={}".format(abs_yaml_path))
yaml_files = abs_yaml_path.rglob("*.yaml")
yaml_files = sorted(
map(lambda path: str(path.absolute()), yaml_files), reverse=True
)
# Run all tests in the found yaml files.
results = run_learning_tests_from_yaml(yaml_files)
test_output_json = os.environ.get(
"TEST_OUTPUT_JSON", "/tmp/rllib_multi_gpu_learning_tests.json"
)
with open(test_output_json, "wt") as f:
json.dump(results, f)
print("Ok.")
| [
"noreply@github.com"
] | jovany-wang.noreply@github.com |
7edca1e18a6672b3567128648ab71cf7a75a0200 | e6d55aa3c68644bdfe37a9472931c01950e27609 | /ceggelab/ce/models.py | 4fef63b2b397da38f1d012d9759b53a3e5d2848a | [] | no_license | KamonratNg/cegge | 2c5597554f183479d4f18a10d9e4132a299ea591 | 64e3b3012ea96f2fd1c25f1581e83a9c193f7092 | refs/heads/master | 2023-01-06T18:23:46.668810 | 2020-11-11T17:56:54 | 2020-11-11T17:56:54 | 309,621,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | from django.db import models
# Create your models here.
class Studentrecord(models.Model):
student_name_en = models.CharField(max_length=200)
student_name_th = models.CharField(max_length=200)
student_ID = models.IntegerField(default=0)
type = (('M','Master programs'),
('D','Doctoral programs'),
('P','Postdoctorate'),
('R','Researcher'))
student_level = models.CharField(max_length=30, choices= type)
def __str__(self):
return self.student_name_en +' '+ str(self.student_ID) +' '+ self.student_level
| [
"you@example.com"
] | you@example.com |
9f5a25ac8aac1ab659663cbe98f1f78fec020788 | b3e42025194b81680086d097fed9aa6c84bfce9a | /apps/vendors/locations_urls.py | 98387686246dcb4232cee0b5d2523610b972344e | [
"MIT"
] | permissive | superdev999/ProvenBanking | 95c65698d9f3a552d04edfd4fd9d4469fb43a47f | 2153e9d737e2b235e502c848986ca35b6f310b8d | refs/heads/master | 2021-01-12T05:17:31.864890 | 2017-08-17T00:11:59 | 2017-08-17T00:11:59 | 77,897,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from django.conf.urls import patterns, include, url
from med_social.decorators import member_required
from vendors.views import (CreateVendorLocation,
EditVendorLocation, DeleteVendorLocation, VendorLocationList)
# namespace = locations
urlpatterns = patterns('',
url(r'^(?P<pk>\d+)/create/$', member_required(CreateVendorLocation.as_view()), name='create'),
url(r'^(?P<pk>\d+)/list/$', member_required(VendorLocationList.as_view()), name='list'),
url(r'^(?P<pk>\d+)/(?P<loc_pk>\d+)/edit/$', member_required(EditVendorLocation.as_view()), name='edit'),
url(r'^(?P<pk>\d+)/delete/$', member_required(DeleteVendorLocation.as_view()), name='delete'),
)
| [
"wanghaoming820@outlook.com"
] | wanghaoming820@outlook.com |
088511264beb5b545d0e43bf09441aa35f1c34e7 | f845225329fa9750c838bf511fed3beb48cc86af | /listings/migrations/0001_initial.py | 9ea3d0e0a6433a346296dafd185e3909e32ea71a | [] | no_license | Fabricourt/btre_project- | ac8c2b84cc8b7f4f5368a204dc23b378d488b356 | 13defd495ba309ac31550d22ad7d6306638f91eb | refs/heads/master | 2020-04-15T11:03:05.980170 | 2019-01-08T11:16:56 | 2019-01-08T11:16:56 | 164,611,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,168 | py | # Generated by Django 2.1.4 on 2018-12-23 13:47
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('zipcode', models.CharField(max_length=20)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.Realtor')),
],
),
]
| [
"mfalme2030@gmail.com"
] | mfalme2030@gmail.com |
4dd1a54ae37966be0ac487524f3fed672f577f6a | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201023164513.py | 720eace3df9f2dc075a573f46a06fa46c97fa7f1 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,931 | py | from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self) -> str:
internal_page = self.get('internal_page')
external_link = self.get('external_link')
if internal_page:
return internal_page.url
elif external_link:
return external_link
return ''
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
interal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
class ImageAndTextBlock(blocks.StructBlock):
image = ImageChooserBlock(help_text='Obraz automatycznie ')
image_alignment
title
text
link - Link() | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
01a3a428407e02aaf5a3e666649d68eaa1e1e1b1 | 8a4f0d4aad4a901bd08fd5eb92b2a31fb1bac167 | /dizoo/smac/config/smac_MMM_coma_config.py | b99dd285cf57a67513b658576e9a62336a1d8eaa | [
"Apache-2.0"
] | permissive | lichuminglcm/DI-engine | 3977eed854dc634f8796764e0a7e0b71b615747f | e9052f195d231a9875afb053ba815c6341857571 | refs/heads/main | 2023-08-21T05:09:49.931351 | 2021-10-11T12:32:36 | 2021-10-11T12:32:36 | 415,903,070 | 0 | 0 | Apache-2.0 | 2021-10-11T12:32:37 | 2021-10-11T11:48:26 | Python | UTF-8 | Python | false | false | 2,855 | py | import sys
from copy import deepcopy
from ding.entry import serial_pipeline
from easydict import EasyDict
agent_num = 10
collector_env_num = 16
evaluator_env_num = 8
main_config = dict(
env=dict(
map_name='MMM',
difficulty=7,
reward_only_positive=True,
mirror_opponent=False,
agent_num=agent_num,
collector_env_num=collector_env_num,
evaluator_env_num=evaluator_env_num,
shared_memory=False,
stop_value=0.999,
n_evaluator_episode=32,
),
policy=dict(
model=dict(
# (int) agent_num: The number of the agent.
# For SMAC 3s5z, agent_num=8; for 2c_vs_64zg, agent_num=2.
agent_num=agent_num,
# (int) obs_shape: The shapeension of observation of each agent.
# For 3s5z, obs_shape=150; for 2c_vs_64zg, agent_num=404.
# (int) global_obs_shape: The shapeension of global observation.
# For 3s5z, obs_shape=216; for 2c_vs_64zg, agent_num=342.
obs_shape=dict(
agent_state=186,
global_state=290,
),
# (int) action_shape: The number of action which each agent can take.
# action_shape= the number of common action (6) + the number of enemies.
# For 3s5z, obs_shape=14 (6+8); for 2c_vs_64zg, agent_num=70 (6+64).
action_shape=16,
# (List[int]) The size of hidden layer
actor_hidden_size_list=[64],
),
# used in state_num of hidden_state
collect=dict(
n_episode=32,
unroll_len=10,
env_num=collector_env_num,
),
eval=dict(env_num=evaluator_env_num, evaluator=dict(eval_freq=100, )),
other=dict(
eps=dict(
type='exp',
start=0.5,
end=0.01,
decay=200000,
),
replay_buffer=dict(
# (int) max size of replay buffer
replay_buffer_size=5000,
# (int) max use count of data, if count is bigger than this value, the data will be removed from buffer
max_use=10,
),
),
),
)
main_config = EasyDict(main_config)
create_config = dict(
env=dict(
type='smac',
import_names=['dizoo.smac.envs.smac_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='coma'),
collector=dict(type='episode', get_train_sample=True),
)
create_config = EasyDict(create_config)
def train(args):
config = [main_config, create_config]
serial_pipeline(config, seed=args.seed)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', '-s', type=int, default=0)
args = parser.parse_args()
train(args)
| [
"niuyazhe@sensetime.com"
] | niuyazhe@sensetime.com |
00f80ba31ddfb7053bb5c584ada8ce11612618d3 | 1baf76e19a719ebb2207f2af2924fc53349d6a60 | /internship3_env/bin/black | 7ae627598807eb9072010262c912bdf4e16afbd3 | [
"MIT"
] | permissive | Zamy97/internship_3 | 4deb0df914e68930b23faa6bf7e0ca7fd342fbd8 | 9c9db252b6818316e9864839075bb1d23714f7e4 | refs/heads/master | 2023-01-01T15:33:45.980776 | 2020-10-28T02:47:34 | 2020-10-28T02:47:34 | 307,861,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | #!/Users/zamy/Desktop/Python_Projects/excl_intrnship_projects/excl_internship_0/internship_3/internship_3/internship3_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from black import patched_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(patched_main())
| [
"aktarzaman@berkeley.edu"
] | aktarzaman@berkeley.edu | |
e92f37657c6db9b8535a58da0709431147348625 | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/parcats/line/colorbar/_lenmode.py | bcb4aa6f6f7b818eda6ecb5c198c420b34567bf3 | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 568 | py | import _plotly_utils.basevalidators
class LenmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='lenmode',
parent_name='parcats.line.colorbar',
**kwargs
):
super(LenmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
role=kwargs.pop('role', 'info'),
values=kwargs.pop('values', ['fraction', 'pixels']),
**kwargs
)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
b124bf0ba59ebf9333a3780b526609d07c55a3e6 | ac1938e7513d8e58f2228962b10caa1044a3d8ff | /python-fundamentals/39-find_the_duplicate/solution.py | eb8b89c18dca0a01d877565a4f8a2dce20c66afe | [] | no_license | annikaslund/python_practice | fb211cfec725573a3e9f5f358c869e1edd8608a3 | a6a1586ebbb1883afc8d7920848167955fa258a0 | refs/heads/master | 2020-04-22T07:33:32.389883 | 2019-02-14T16:53:53 | 2019-02-14T16:53:53 | 170,220,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | def find_the_duplicate(nums):
"""Find duplicate number in nums."""
seen = set()
for num in nums:
if num in seen:
return num
seen.add(num)
| [
"joel@joelburton.com"
] | joel@joelburton.com |
659c8d2a968faea0223a4e997f381d01e84cd5bb | 49edd8549054f63a73c846d0bdf48930703b9aed | /app/core/tests/test_commands.py | 62b7167bf5c90ce7dbf86dd6f42b6fa7e37f8d6e | [] | no_license | AlekseiChirkov/recipe-app-api | 370ccc8239197d700407449e892abd0a804e1504 | e5d1a0561951b46e0766c96e28f5f4ad707a9bc9 | refs/heads/main | 2023-08-23T10:14:50.337020 | 2021-10-04T07:02:19 | 2021-10-04T07:02:19 | 381,278,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""
Test wating for db when db is available
:return:
"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""
Test waiting for db
:return:
"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| [
"tektonikboy98@gmail.com"
] | tektonikboy98@gmail.com |
54431f70529ad477776902aaf6afb9bed0e1f2d0 | 7ae20e08e736e6df546cb5a80df2baf067686b52 | /tasks/sprint-3/Финал B - Effective Quick Sort/effective_quick_sort_test.py | 4938fba3eee95666adf5ae8f300c6c7e8bbbcacb | [] | no_license | Grey2k/yandex.praktikum-alghoritms | faf466374c932733cc1c5049a2df719d8fd33ac7 | 97b1b4858265b44266a33b834e1e9a1349739048 | refs/heads/master | 2023-08-28T02:46:16.502298 | 2021-09-28T19:08:35 | 2021-09-28T19:08:35 | 334,646,281 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,881 | py | import unittest
import io
from unittest.mock import patch
from effective_quick_sort import main
class EffectiveQuickSortTest(unittest.TestCase):
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 4 100',
'gena 6 1000',
'gosha 2 90',
'rita 2 90',
'timofey 4 80',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_one(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'gena',
'timofey',
'alla',
'gosha',
'rita',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 0 0',
'gena 0 0',
'rita 0 0',
'timofey 0 0',
'gosha 0 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_two(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena',
'gosha',
'rita',
'timofey',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 1 0',
'gena 0 0',
'gosha 1 100',
'rita 0 0',
'timofey 0 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_three(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gosha',
'gena',
'rita',
'timofey',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 1 0',
'gena 0 0',
'gosha 1 100',
'rita 2 0',
'timofey 2 100',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_four(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'rita',
'timofey',
'alla',
'gosha',
'gena',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'1',
'alla 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_five(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'alla 1 0',
'gena 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_six(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'gena 1 0',
'alla 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_seven(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'gena 2 10',
'alla 2 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_eight(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'gena 1 10',
'alla 2 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_nine(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 1 10',
'gena 1 10',
'gosha 1 10',
'rita 2 100',
'timofey 2 100',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_ten(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'rita',
'timofey',
'alla',
'gena',
'gosha',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'alla 1 0',
'gena 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_eleven(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'alla 2 0',
'gena 2 10',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_twelve(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'alla 2 0',
'gena 1 10',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_thirteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'3',
'alla 1 0',
'gosha 1 0',
'gena 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_fourteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena',
'gosha'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'3',
'alla 2 0',
'gosha 2 0',
'gena 2 10',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_fifteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gosha',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'3',
'alla 2 0',
'gosha 1 10',
'gena 1 10',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_sixteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena',
'gosha'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 1 100',
'gena 1 1000',
'gosha 1 90',
'rita 1 90',
'timofey 10 80',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_seventeen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'timofey',
'gosha',
'rita',
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'13',
'tufhdbi 76 58',
'rqyoazgbmv 59 78',
'qvgtrlkmyrm 35 27',
'tgcytmfpj 70 27',
'xvf 84 19',
'jzpnpgpcqbsmczrgvsu 30 3',
'evjphqnevjqakze 92 15',
'wwzwv 87 8',
'tfpiqpwmkkduhcupp 1 82',
'tzamkyqadmybky 5 81',
'amotrxgba 0 6',
'easfsifbzkfezn 100 28',
'kivdiy 70 47',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_eighteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'easfsifbzkfezn',
'evjphqnevjqakze',
'wwzwv',
'xvf',
'tufhdbi',
'tgcytmfpj',
'kivdiy',
'rqyoazgbmv',
'qvgtrlkmyrm',
'jzpnpgpcqbsmczrgvsu',
'tzamkyqadmybky',
'tfpiqpwmkkduhcupp',
'amotrxgba',
]) + '\n')
def test_comparator(self):
self.assertEqual((-1, 0, 'alla') > (-2, 0, 'gosha'), True)
self.assertEqual((-1, 0, 'alla') < (-1, 0, 'gosha'), True)
self.assertEqual((-1, 10, 'alla') > (-1, 0, 'gosha'), True)
| [
"grey2k@gmail.com"
] | grey2k@gmail.com |
3f5d882eb5c278177e7ea02ad9b0bae7cf3d56a7 | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/chrome/common/extensions/docs/server2/features_bundle_test.py | 50767975bc1ae25f4024e0185a990181d1544b71 | [
"BSD-3-Clause",
"MIT"
] | permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 7,104 | py | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from extensions_paths import EXTENSIONS
from server_instance import ServerInstance
from test_file_system import TestFileSystem
_TEST_FILESYSTEM = {
'api': {
'_api_features.json': json.dumps({
'audioCapture': {
'channel': 'stable',
'extension_types': ['platform_app']
},
'background': [
{
'channel': 'stable',
'extension_types': ['extension']
},
{
'channel': 'stable',
'extension_types': ['platform_app'],
'whitelist': ['im not here']
}
],
'omnibox': {
'dependencies': ['manifest:omnibox'],
'contexts': ['blessed_extension']
},
'syncFileSystem': {
'dependencies': ['permission:syncFileSystem'],
'contexts': ['blessed_extension']
},
'tabs': {
'channel': 'stable',
'extension_types': ['extension', 'legacy_packaged_app'],
'contexts': ['blessed_extension']
},
'test': {
'channel': 'stable',
'extension_types': 'all',
'contexts': [
'blessed_extension', 'unblessed_extension', 'content_script']
},
'windows': {
'dependencies': ['api:tabs'],
'contexts': ['blessed_extension']
}
}),
'_manifest_features.json': json.dumps({
'app.content_security_policy': {
'channel': 'stable',
'extension_types': ['platform_app'],
'min_manifest_version': 2,
'whitelist': ['this isnt happening']
},
'background': {
'channel': 'stable',
'extension_types': ['extension', 'legacy_packaged_app', 'hosted_app']
},
'manifest_version': {
'channel': 'stable',
'extension_types': 'all'
},
'omnibox': {
'channel': 'stable',
'extension_types': ['extension']
},
'page_action': {
'channel': 'stable',
'extension_types': ['extension']
},
'sockets': {
'channel': 'dev',
'extension_types': ['platform_app']
}
}),
'_permission_features.json': json.dumps({
'bluetooth': {
'channel': 'dev',
'extension_types': ['platform_app']
},
'power': {
'channel': 'stable',
'extension_types': [
'extension', 'legacy_packaged_app', 'platform_app'
]
},
'syncFileSystem': {
'channel': 'stable',
'extension_types': ['platform_app']
},
'tabs': {
'channel': 'stable',
'extension_types': ['extension']
}
})
},
'docs': {
'templates': {
'json': {
'manifest.json': json.dumps({
'background': {
'documentation': 'background_pages.html'
},
'manifest_version': {
'documentation': 'manifest/manifest_version.html',
'example': 2,
'level': 'required'
},
'page_action': {
'documentation': 'pageAction.html',
'example': {},
'level': 'only_one'
}
}),
'permissions.json': json.dumps({
'fakeUnsupportedFeature': {},
'syncFileSystem': {
'partial': 'permissions/sync_file_system.html'
},
'tabs': {
'partial': 'permissions/tabs.html'
},
})
}
}
}
}
class FeaturesBundleTest(unittest.TestCase):
def setUp(self):
self._server = ServerInstance.ForTest(
TestFileSystem(_TEST_FILESYSTEM, relative_to=EXTENSIONS))
def testManifestFeatures(self):
expected_features = {
'background': {
'name': 'background',
'channel': 'stable',
'platforms': ['extensions'],
'documentation': 'background_pages.html'
},
'manifest_version': {
'name': 'manifest_version',
'channel': 'stable',
'platforms': ['apps', 'extensions'],
'documentation': 'manifest/manifest_version.html',
'level': 'required',
'example': 2
},
'omnibox': {
'name': 'omnibox',
'channel': 'stable',
'platforms': ['extensions']
},
'page_action': {
'name': 'page_action',
'channel': 'stable',
'platforms': ['extensions'],
'documentation': 'pageAction.html',
'level': 'only_one',
'example': {}
},
'sockets': {
'name': 'sockets',
'channel': 'dev',
'platforms': ['apps']
}
}
self.assertEqual(
expected_features,
self._server.features_bundle.GetManifestFeatures().Get())
def testPermissionFeatures(self):
expected_features = {
'bluetooth': {
'name': 'bluetooth',
'channel': 'dev',
'platforms': ['apps'],
},
'fakeUnsupportedFeature': {
'name': 'fakeUnsupportedFeature',
'platforms': []
},
'power': {
'name': 'power',
'channel': 'stable',
'platforms': ['apps', 'extensions'],
},
'syncFileSystem': {
'name': 'syncFileSystem',
'channel': 'stable',
'platforms': ['apps'],
'partial': 'permissions/sync_file_system.html'
},
'tabs': {
'name': 'tabs',
'channel': 'stable',
'platforms': ['extensions'],
'partial': 'permissions/tabs.html'
}
}
self.assertEqual(
expected_features,
self._server.features_bundle.GetPermissionFeatures().Get())
def testAPIFeatures(self):
expected_features = {
'audioCapture': {
'name': 'audioCapture',
'channel': 'stable',
'platforms': ['apps']
},
'background': {
'name': 'background',
'channel': 'stable',
'platforms': ['extensions']
},
'omnibox': {
'name': 'omnibox',
'platforms': ['extensions'],
'contexts': ['blessed_extension'],
'dependencies': ['manifest:omnibox']
},
'syncFileSystem': {
'name': 'syncFileSystem',
'platforms': ['apps'],
'contexts': ['blessed_extension'],
'dependencies': ['permission:syncFileSystem']
},
'tabs': {
'name': 'tabs',
'channel': 'stable',
'platforms': ['extensions'],
'contexts': ['blessed_extension'],
},
'test': {
'name': 'test',
'channel': 'stable',
'platforms': ['apps', 'extensions'],
'contexts': [
'blessed_extension', 'unblessed_extension', 'content_script'],
},
'windows': {
'name': 'windows',
'platforms': ['extensions'],
'contexts': ['blessed_extension'],
'dependencies': ['api:tabs']
}
}
self.assertEqual(
expected_features,
self._server.features_bundle.GetAPIFeatures().Get())
if __name__ == '__main__':
unittest.main()
| [
"karun.matharu@gmail.com"
] | karun.matharu@gmail.com |
279e0d1e2d470454199c547c791cb5ef62e33742 | 18305efd1edeb68db69880e03411df37fc83b58b | /pdb_files3000rot/yw/1ywr/tractability_450/pymol_results_file.py | ebb72d9d9960837c67a5b78068b76528df0d8d86 | [] | no_license | Cradoux/hotspot_pipline | 22e604974c8e38c9ffa979092267a77c6e1dc458 | 88f7fab8611ebf67334474c6e9ea8fc5e52d27da | refs/heads/master | 2021-11-03T16:21:12.837229 | 2019-03-28T08:31:39 | 2019-03-28T08:31:39 | 170,106,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,618 | py |
from os.path import join
import tempfile
import zipfile
from pymol import cmd, finish_launching
from pymol.cgo import *
finish_launching()
dirpath = None
def cgo_arrow(atom1='pk1', atom2='pk2', radius=0.07, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''):
from chempy import cpv
radius, gap = float(radius), float(gap)
hlength, hradius = float(hlength), float(hradius)
try:
color1, color2 = color.split()
except:
color1 = color2 = color
color1 = list(cmd.get_color_tuple(color1))
color2 = list(cmd.get_color_tuple(color2))
def get_coord(v):
if not isinstance(v, str):
return v
if v.startswith('['):
return cmd.safe_list_eval(v)
return cmd.get_atom_coords(v)
xyz1 = get_coord(atom1)
xyz2 = get_coord(atom2)
normal = cpv.normalize(cpv.sub(xyz1, xyz2))
if hlength < 0:
hlength = radius * 3.0
if hradius < 0:
hradius = hlength * 0.6
if gap:
diff = cpv.scale(normal, gap)
xyz1 = cpv.sub(xyz1, diff)
xyz2 = cpv.add(xyz2, diff)
xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)
obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
return obj
dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
hs_zip.extractall(dirpath)
cmd.load(join(dirpath,"protein.pdb"), "protein")
cmd.show("cartoon", "protein")
if dirpath:
f = join(dirpath, "label_threshold_10.mol2")
else:
f = "label_threshold_10.mol2"
cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_14.mol2")
else:
f = "label_threshold_14.mol2"
cmd.load(f, 'label_threshold_14')
cmd.hide('everything', 'label_threshold_14')
cmd.label("label_threshold_14", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_17.mol2")
else:
f = "label_threshold_17.mol2"
cmd.load(f, 'label_threshold_17')
cmd.hide('everything', 'label_threshold_17')
cmd.label("label_threshold_17", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
colour_dict = {'acceptor':'red', 'donor':'blue', 'apolar':'yellow', 'negative':'purple', 'positive':'cyan'}
threshold_list = [10, 14, 17]
gfiles = ['donor.grd', 'apolar.grd', 'acceptor.grd']
grids = ['donor', 'apolar', 'acceptor']
num = 0
surf_transparency = 0.2
if dirpath:
gfiles = [join(dirpath, g) for g in gfiles]
for t in threshold_list:
for i in range(len(grids)):
try:
cmd.load(r'%s'%(gfiles[i]), '%s_%s'%(grids[i], str(num)))
cmd.isosurface('surface_%s_%s_%s'%(grids[i], t, num), '%s_%s'%(grids[i], num), t)
cmd.set('transparency', surf_transparency, 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.color(colour_dict['%s'%(grids[i])], 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.group('threshold_%s'%(t), members = 'surface_%s_%s_%s'%(grids[i],t, num))
cmd.group('threshold_%s' % (t), members='label_threshold_%s' % (t))
except:
continue
try:
cmd.group('hotspot_%s' % (num), members='threshold_%s' % (t))
except:
continue
for g in grids:
cmd.group('hotspot_%s' % (num), members='%s_%s' % (g,num))
cluster_dict = {"16.1989994049":[], "16.1989994049_arrows":[]}
cluster_dict["16.1989994049"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-1.0), float(-3.0), float(23.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([-1.0,-3.0,23.5], [-4.133,-2.917,23.047], color="blue red", name="Arrows_16.1989994049_1")
cluster_dict["16.1989994049"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(2.5), float(-3.0), float(15.0), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([2.5,-3.0,15.0], [3.509,-5.374,14.235], color="blue red", name="Arrows_16.1989994049_2")
cluster_dict["16.1989994049"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(2.0), float(3.5), float(19.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([2.0,3.5,19.5], [4.113,5.844,19.273], color="blue red", name="Arrows_16.1989994049_3")
cluster_dict["16.1989994049"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(4.0), float(3.5), float(26.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([4.0,3.5,26.5], [2.411,5.22,24.84], color="blue red", name="Arrows_16.1989994049_4")
cluster_dict["16.1989994049"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(2.85864055667), float(-0.56159484147), float(21.1266280078), float(1.0)]
cluster_dict["16.1989994049"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(2.5), float(-3.0), float(16.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([2.5,-3.0,16.5], [3.933,-5.403,17.063], color="red blue", name="Arrows_16.1989994049_5")
cluster_dict["16.1989994049"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(2.5), float(-3.0), float(16.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([2.5,-3.0,16.5], [3.933,-5.403,17.063], color="red blue", name="Arrows_16.1989994049_6")
cluster_dict["16.1989994049"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(4.0), float(3.0), float(22.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([4.0,3.0,22.5], [2.892,6.029,22.446], color="red blue", name="Arrows_16.1989994049_7")
cmd.load_cgo(cluster_dict["16.1989994049"], "Features_16.1989994049", 1)
cmd.load_cgo(cluster_dict["16.1989994049_arrows"], "Arrows_16.1989994049")
cmd.set("transparency", 0.2,"Features_16.1989994049")
cmd.group("Pharmacophore_16.1989994049", members="Features_16.1989994049")
cmd.group("Pharmacophore_16.1989994049", members="Arrows_16.1989994049")
if dirpath:
f = join(dirpath, "label_threshold_16.1989994049.mol2")
else:
f = "label_threshold_16.1989994049.mol2"
cmd.load(f, 'label_threshold_16.1989994049')
cmd.hide('everything', 'label_threshold_16.1989994049')
cmd.label("label_threshold_16.1989994049", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
cmd.group('Pharmacophore_16.1989994049', members= 'label_threshold_16.1989994049')
cmd.bg_color("white")
cmd.show("cartoon", "protein")
cmd.color("slate", "protein")
cmd.show("sticks", "organic")
cmd.hide("lines", "protein")
| [
"cradoux.cr@gmail.com"
] | cradoux.cr@gmail.com |
e829b3c4b85543d0c217195b930a908a47eb42ec | 386d1b6557f4cbaf20794cd222f3b7b8598ef6a6 | /data/clean_data/A1/81.py | 8adc1c38d7e03dff806e025dd070c4524f27725c | [] | no_license | woowei0102/code2pro | 3baf86985f911264362963c503f12d20bdc1f89f | 0b16c62a1cb9053ab59edd7a52e1b3b39fdf66dc | refs/heads/main | 2023-06-28T23:09:23.998798 | 2021-07-13T11:49:27 | 2021-07-13T11:49:27 | 385,585,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | class Account:
def __init__(self, name):
self.name = name
self.balance = 0
def deposit(self, amount):
self.balance += amount
print('{}存了NT$'.format(self.name) + str(amount) + '元')
def withdraw(self, amount):
if self.balance >= amount:
self.balance -= amount
else:
print('{}的存款不足.'.format(self.name))
def show(self):
print("{}餘額NT${:,.0f}元".format(self.name,self.balance))
userA = Account("Jack")
userA.withdraw(1000)
userA.deposit(5000)
userA.withdraw(1000)
userA.show()
| [
"54971984+woowei0102@users.noreply.github.com"
] | 54971984+woowei0102@users.noreply.github.com |
d1c982f88855f761cac1e63ac2a25c7026cee10c | 9daf1ecdfc69a1a97998465fae2102f0f2845eb0 | /deepbond/models/rcnn_crf.py | 3118eefd7a1f3809a359955b8e9450ee478a342e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mtreviso/deepbond | f3d23de8955f8ff1085b24fe53ebb7ff722a2a7f | a36ccb71e4457889d340920260f18666835d703f | refs/heads/master | 2023-04-07T22:58:34.650115 | 2023-03-15T16:27:58 | 2023-03-15T16:27:58 | 114,040,073 | 17 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,573 | py | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from deepbond import constants
from deepbond.initialization import init_xavier, init_kaiming
from deepbond.models.model import Model
from deepbond.modules.crf import CRF
class RCNNCRF(Model):
"""Recurrent Convolutional Neural Network + CRF.
As described in: https://arxiv.org/pdf/1610.00211.pdf
"""
def __init__(self, words_field, tags_field, options):
super().__init__(words_field, tags_field)
#
# Embeddings
#
word_embeddings = None
if self.words_field.vocab.vectors is not None:
word_embeddings = self.words_field.vocab.vectors
options.word_embeddings_size = word_embeddings.size(1)
self.word_emb = nn.Embedding(
num_embeddings=len(self.words_field.vocab),
embedding_dim=options.word_embeddings_size,
padding_idx=constants.PAD_ID,
_weight=word_embeddings,
)
self.dropout_emb = nn.Dropout(options.emb_dropout)
if options.freeze_embeddings:
self.word_emb.weight.requires_grad = False
features_size = options.word_embeddings_size
#
# CNN 1D
#
self.cnn_1d = nn.Conv1d(in_channels=features_size,
out_channels=options.conv_size,
kernel_size=options.kernel_size,
padding=options.kernel_size // 2)
self.max_pool = nn.MaxPool1d(options.pool_length,
padding=options.pool_length // 2)
self.dropout_cnn = nn.Dropout(options.cnn_dropout)
self.relu = torch.nn.ReLU()
features_size = (options.conv_size // options.pool_length +
options.pool_length // 2)
#
# RNN
#
self.is_bidir = options.bidirectional
self.sum_bidir = options.sum_bidir
self.rnn_type = options.rnn_type
if self.rnn_type == 'gru':
rnn_class = nn.GRU
elif self.rnn_type == 'lstm':
rnn_class = nn.LSTM
else:
rnn_class = nn.RNN
hidden_size = options.hidden_size[0]
self.rnn = rnn_class(features_size,
hidden_size,
bidirectional=self.is_bidir,
batch_first=True)
self.dropout_rnn = nn.Dropout(options.rnn_dropout)
self.sigmoid = torch.nn.Sigmoid()
features_size = hidden_size
eos_tag_id = self.tags_field.vocab.stoi['.'] if '.' in self.tags_field.vocab.stoi else self.tags_field.vocab.stoi['_']
self.crf = CRF(
self.nb_classes,
bos_tag_id=self.tags_field.vocab.stoi['_'], # hack
eos_tag_id=eos_tag_id,
pad_tag_id=None,
batch_first=True,
)
#
# Linear
#
n = 1 if not self.is_bidir or self.sum_bidir else 2
self.linear_out = nn.Linear(n * features_size, self.nb_classes)
self.init_weights()
self.is_built = True
def init_weights(self):
if self.cnn_1d is not None:
init_kaiming(self.cnn_1d, dist='uniform', nonlinearity='relu')
if self.rnn is not None:
init_xavier(self.rnn, dist='uniform')
if self.linear_out is not None:
init_xavier(self.linear_out, dist='uniform')
def build_loss(self, loss_weights=None):
self._loss = self.crf
def loss(self, emissions, gold):
mask = gold != constants.TAGS_PAD_ID
crf_gold = gold.clone()
crf_gold[mask == 0] = 0
return self._loss(emissions, crf_gold, mask=mask.float())
def predict_classes(self, batch):
emissions = self.forward(batch)
mask = batch.words != constants.PAD_ID
_, path = self.crf.decode(emissions, mask=mask[:, 2:].float())
return [torch.tensor(p) for p in path]
def predict_proba(self, batch):
raise Exception('Predict() probability is not available.')
def forward(self, batch):
assert self.is_built
assert self._loss is not None
h = batch.words
mask = h != constants.PAD_ID
lengths = mask.int().sum(dim=-1)
# (bs, ts) -> (bs, ts, emb_dim)
h = self.word_emb(h)
h = self.dropout_emb(h)
# Turn (bs, ts, emb_dim) into (bs, emb_dim, ts) for CNN
h = h.transpose(1, 2)
# (bs, emb_dim, ts) -> (bs, conv_size, ts)
h = self.relu(self.cnn_1d(h))
# Turn (bs, conv_size, ts) into (bs, ts, conv_size) for Pooling
h = h.transpose(1, 2)
# (bs, ts, conv_size) -> (bs, ts, pool_size)
h = self.max_pool(h)
h = self.dropout_cnn(h)
# (bs, ts, pool_size) -> (bs, ts, hidden_size)
h = pack(h, lengths, batch_first=True, enforce_sorted=False)
h, _ = self.rnn(h)
h, _ = unpack(h, batch_first=True)
# if you'd like to sum instead of concatenate:
if self.sum_bidir:
h = (h[:, :, :self.rnn.hidden_size] +
h[:, :, self.rnn.hidden_size:])
h = self.sigmoid(h)
# apply dropout
h = self.dropout_rnn(h)
# (bs, ts, hidden_size) -> (bs, ts, nb_classes)
h = self.linear_out(h)
# remove <bos> and <eos> tokens
# (bs, ts, nb_classes) -> (bs, ts-2, nb_classes)
h = h[:, 1:-1, :]
return h
| [
"marcosvtreviso@gmail.com"
] | marcosvtreviso@gmail.com |
1cd352d1bca1e800029113e0addaac329f0597b1 | 4f7dc1bd5a5561c9f3fb693f0d6f4c6b13504db6 | /library/v0.5/analysis_tools/kinase_enrichment/kinase_enrichment.py | 00278ff36b510fa6436063457fa111a67be34ad7 | [] | no_license | bluejek128/microglia | 296b02d21f82f0769c18e3fa7e63eadd374e4965 | f9f2281c277d1b71ca80e26cc071fa096e653e68 | refs/heads/master | 2020-03-13T21:48:14.750943 | 2018-05-07T20:43:32 | 2018-05-07T20:43:32 | 131,304,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,204 | py | #################################################################
#################################################################
############### DE
#################################################################
#################################################################
#############################################
########## 1. Load libraries
#############################################
##### 1. General support #####
import qgrid, requests, json
import pandas as pd
import numpy as np
from IPython.display import display, Markdown, HTML
##### 2. Other libraries #####
#######################################################
#######################################################
########## Support
#######################################################
#######################################################
#############################################
########## 1. Get Enrichr Results
#############################################
def get_enrichr_results(user_list_id, gene_set_libraries, overlappingGenes=True):
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/enrich'
query_string = '?userListId=%s&backgroundType=%s'
results = []
for gene_set_library in gene_set_libraries:
response = requests.get(
ENRICHR_URL + query_string % (user_list_id, gene_set_library)
)
if not response.ok:
raise Exception('Error fetching enrichment results')
data = json.loads(response.text)
resultDataframe = pd.DataFrame(data[gene_set_library], columns=['rank', 'term_name', 'pvalue', 'zscore', 'combined_score', 'overlapping_genes', 'FDR', 'old_pvalue', 'old_FDR'])
selectedColumns = ['term_name','zscore','combined_score','pvalue', 'FDR'] if not overlappingGenes else ['term_name','zscore','combined_score','FDR', 'pvalue', 'overlapping_genes']
resultDataframe = resultDataframe.loc[:,selectedColumns]
resultDataframe['gene_set_library'] = gene_set_library
results.append(resultDataframe)
concatenatedDataframe = pd.concat(results)
return concatenatedDataframe
#############################################
########## 2. Display Result Table
#############################################
def results_table(enrichment_dataframe, source_label, target_label):
# Get libraries
for gene_set_library in enrichment_dataframe['gene_set_library'].unique():
# Get subset
enrichment_dataframe_subset = enrichment_dataframe[enrichment_dataframe['gene_set_library'] == gene_set_library].copy()
# Get unique values from source column
enrichment_dataframe_subset[source_label] = [x.split('_')[0] for x in enrichment_dataframe_subset['term_name']]
enrichment_dataframe_subset = enrichment_dataframe_subset.sort_values(['FDR', 'pvalue']).rename(columns={'pvalue': 'P-value'}).drop_duplicates(source_label)
# Add links and bold for significant results
enrichment_dataframe_subset[source_label] = ['<a href="http://amp.pharm.mssm.edu/Harmonizome/gene/{x}" target="_blank">{x}</a>'.format(**locals()) for x in enrichment_dataframe_subset[source_label]]
enrichment_dataframe_subset[source_label] = [rowData[source_label].replace('target="_blank">', 'target="_blank"><b>').replace('</a>', '*</b></a>') if rowData['FDR'] < 0.05 else rowData[source_label] for index, rowData in enrichment_dataframe_subset.iterrows()]
# Add rank
enrichment_dataframe_subset['Rank'] = ['<b>'+str(x+1)+'</b>' for x in range(len(enrichment_dataframe_subset.index))]
# Add overlapping genes with tooltip
enrichment_dataframe_subset['nr_overlapping_genes'] = [len(x) for x in enrichment_dataframe_subset['overlapping_genes']]
enrichment_dataframe_subset['overlapping_genes'] = [', '.join(x) for x in enrichment_dataframe_subset['overlapping_genes']]
enrichment_dataframe_subset[target_label.title()] = ['{nr_overlapping_genes} {geneset} '.format(**rowData)+target_label+'s' for index, rowData in enrichment_dataframe_subset.iterrows()]
# enrichment_dataframe[target_label.title()] = ['<span class="gene-tooltip">{nr_overlapping_genes} {geneset} '.format(**rowData)+target_label+'s<div class="gene-tooltip-text">{overlapping_genes}</div></span>'.format(**rowData) for index, rowData in enrichment_dataframe.iterrows()]
# Convert to HTML
pd.set_option('max.colwidth', -1)
html_table = enrichment_dataframe_subset.head(50)[['Rank', source_label, 'P-value', 'FDR', target_label.title()]].to_html(escape=False, index=False, classes='w-100')
html_results = '<div style="max-height: 200px; overflow-y: scroll;">{}</div>'.format(html_table)
# Add CSS
display(HTML('<style>.w-100{width: 100%;} .text-left th{text-align: left !important;}</style>'))
display(HTML('<style>.slick-cell{overflow: visible;}.gene-tooltip{text-decoration: underline; text-decoration-style: dotted;}.gene-tooltip .gene-tooltip-text{visibility: hidden; position: absolute; left: 60%; width: 250px; z-index: 1000; text-align: center; background-color: black; color: white; padding: 5px 10px; border-radius: 5px;} .gene-tooltip:hover .gene-tooltip-text{visibility: visible;} .gene-tooltip .gene-tooltip-text::after {content: " ";position: absolute;bottom: 100%;left: 50%;margin-left: -5px;border-width: 5px;border-style: solid;border-color: transparent transparent black transparent;}</style>'))
# Display gene set
display(Markdown('### A. KEA (experimentally validated substrates)' if gene_set_library == 'KEA_2015' else '### B. ARCHS4 (coexpressed genes)'))
# Display table
display(HTML(html_results))
#######################################################
#######################################################
########## S1. Function
#######################################################
#######################################################
#############################################
########## 1. Run
#############################################
def run(enrichr_results, signature_label):
# Initialize results
results = []
# Loop through genesets
for geneset in ['upregulated', 'downregulated']:
# Append ChEA results
enrichment_dataframe = get_enrichr_results(enrichr_results[geneset]['userListId'], gene_set_libraries=['KEA_2015', 'ARCHS4_Kinases_Coexp'])
enrichment_dataframe['geneset'] = geneset
results.append(enrichment_dataframe)
# Concatenate results
tf_dataframe = pd.concat(results)
return {'tf_dataframe': tf_dataframe, 'signature_label': signature_label}
#############################################
########## 2. Plot
#############################################
def plot(kinase_enrichment_results, plot_counter):
results_table(kinase_enrichment_results['tf_dataframe'].copy(), source_label='Kinase', target_label='substrate')
# Figure Legend
display(Markdown('** Table '+plot_counter('table')+' | Kinase Enrichment Analysis Results. **The figure contains browsable tables displaying the results of the Protein Kinase (PK) enrichment analysis generated using Enrichr. Every row represents a PK; significant PKs are highlighted in bold. A displays results generated using KEA, indicating PKs whose experimentally validated substrates are enriched. C displays results generated using the ARCHS4 library, indicating PKs whose top coexpressed genes (according to the ARCHS4 dataset) are enriched.'.format(**locals()))) | [
"denis.torre@mssm.edu"
] | denis.torre@mssm.edu |
43a4b48b9af6e391d2d94d872ba672bbdee47e83 | bc441bb06b8948288f110af63feda4e798f30225 | /agent_admin_sdk/model/topology/link_pb2.py | 57425310345e86fec127525b11152a17ac288f7c | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,202 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: link.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from agent_admin_sdk.model.topology import linkStyle_pb2 as agent__admin__sdk_dot_model_dot_topology_dot_linkStyle__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='link.proto',
package='topology',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/topology'),
serialized_pb=_b('\n\nlink.proto\x12\x08topology\x1a.agent_admin_sdk/model/topology/linkStyle.proto\"J\n\x04Link\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\x12\"\n\x05style\x18\x03 \x01(\x0b\x32\x13.topology.LinkStyleBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/topologyb\x06proto3')
,
dependencies=[agent__admin__sdk_dot_model_dot_topology_dot_linkStyle__pb2.DESCRIPTOR,])
_LINK = _descriptor.Descriptor(
name='Link',
full_name='topology.Link',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='topology.Link.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target', full_name='topology.Link.target', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='style', full_name='topology.Link.style', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=72,
serialized_end=146,
)
_LINK.fields_by_name['style'].message_type = agent__admin__sdk_dot_model_dot_topology_dot_linkStyle__pb2._LINKSTYLE
DESCRIPTOR.message_types_by_name['Link'] = _LINK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Link = _reflection.GeneratedProtocolMessageType('Link', (_message.Message,), {
'DESCRIPTOR' : _LINK,
'__module__' : 'link_pb2'
# @@protoc_insertion_point(class_scope:topology.Link)
})
_sym_db.RegisterMessage(Link)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
9a8385ebac75bfafb7f8a0eded1d52e017c2102c | fd281c5c50d31c32ff3724d6cfc9534d8bf65b06 | /artigos/migrations/0002_auto_20170922_1603.py | fcd70e91a35936ae7094e5bb9b521c2d1b548346 | [] | no_license | thiagorocha06/mairimed-site | 72ef24cdf0bdc016dac821bb3d8117283a6d9f52 | 5537755ced8c1e4ff8641686acf241b254e50670 | refs/heads/master | 2021-01-22T17:48:17.227737 | 2017-10-29T10:39:26 | 2017-10-29T10:39:26 | 100,734,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,052 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-22 19:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('artigos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='artigo',
name='ef_top1',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='ef_top2',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='ef_top3',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='ef_top4',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top1',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top2',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top3',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top4',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top5',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top6',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top1',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top2',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top3',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top4',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top5',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top6',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top1',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top2',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top3',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top4',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top5',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top6',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='top1',
field=models.TextField(blank=True, null=True),
),
]
| [
"thiagorocha06@gmail.com"
] | thiagorocha06@gmail.com |
a4f6c54a5d544fc14b830f131426060871b97721 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/161/48984/submittedfiles/testes.py | aa8ccc2aca043bef467a8eafb70d9d924c95f684 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | n=int(input('numero:'))
soma=0
for i in range(0,n+1,1):
fat=1
for a in range(1,i+1,1):
fat=fat*a
soma=soma+(1/math.factorial)
print(soma)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
6ccd2c5d4edca415ab24a1f3efd1a03ab45a84e7 | 773f6abee91e5368e43b34d8ad179c4ab9056da1 | /gen/wellknownfiletype.py | fb3c550fa4fa8b577314a4ae3c13e8a54548196e | [] | no_license | richstoner/aibs | 3dc9489ee6a1db836d58ec736b13d35a7cffc215 | bfc7e732b53b4dff55f7c3edccdd0703f4bab25f | refs/heads/master | 2021-01-10T05:11:09.484238 | 2013-03-03T06:19:34 | 2013-03-03T06:19:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # -*- coding: utf-8 -*-
# Rich Stoner, 2013
class WellKnownFileType(object):
'''aibs.model.wellknownfiletype (autogen)'''
# Fields
self.id = 0
self.name = ''
# Associations
self.well_known_files = [] # has_many WellKnownFile
def __init__(self, initialData={}):
for k,v in initData.iteritems():
setattr(self, k, v)
# add class methods and private methods here | [
"stonerri@gmail.com"
] | stonerri@gmail.com |
1368e722eb797feff1eca7bb87f37bd18411b067 | 150d9e4cee92be00251625b7f9ff231cc8306e9f | /RemoveDupLL.py | 356fd8431d9a51ac2bd0a44d1c699d17ce8499ff | [] | no_license | JerinPaulS/Python-Programs | 0d3724ce277794be597104d9e8f8becb67282cb0 | d0778178d89d39a93ddb9b95ca18706554eb7655 | refs/heads/master | 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | '''
Given the head of a sorted linked list, delete all duplicates such that each element appears only once. Return the linked list sorted as well.
Example 1:
Input: head = [1,1,2]
Output: [1,2]
Example 2:
Input: head = [1,1,2,3,3]
Output: [1,2,3]
Constraints:
The number of nodes in the list is in the range [0, 300].
-100 <= Node.val <= 100
The list is guaranteed to be sorted in ascending order.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
current = head
start = head
if current is None:
return(head)
else:
current = current.next
while current:
if current.val == start.val:
current = current.next
else:
start.next = current
start = start.next
start.next = None
return(head)
def print_l(head):
while head:
print(head.val)
head = head.next
obj = Solution()
head = ListNode(1)
#head.next = ListNode(1)
#head.next.next = ListNode(2)
#head.next.next.next = ListNode(3)
#head.next.next.next.next = ListNode(3)
print_l(obj.deleteDuplicates(head)) | [
"jerinsprograms@gmail.com"
] | jerinsprograms@gmail.com |
2e5aa056829a7e404aebb2f952bfc8b7aa726fe6 | 9b5d0b7d7c9cdaef2851b675292e5eef651ab257 | /tools/extract/liftOver_wrapper.py | 6b23580be746f0f57f2fa12ee10ca9983d07cffb | [
"CC-BY-2.5",
"MIT"
] | permissive | msGenDev/Yeps-EURAC | 392fd497a6891a5a22204b236c26dcd133793f21 | 7b679ea17ba294893cc560354d759cfd61c0b450 | refs/heads/master | 2021-01-16T21:49:26.499975 | 2010-04-05T17:52:50 | 2010-04-05T17:52:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | #!/usr/bin/env python
#Guruprasad Ananda
"""
Converts coordinates from one build/assembly to another using liftOver binary and mapping files downloaded from UCSC.
"""
import sys, os, string
import tempfile
import re
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def safe_bed_file(infile):
"""Make a BED file with track and browser lines ready for liftOver.
liftOver will fail with track or browser lines. We can make it happy
by converting these to comments. See:
https://lists.soe.ucsc.edu/pipermail/genome/2007-May/013561.html
"""
fix_pat = re.compile("^(track|browser)")
(fd, fname) = tempfile.mkstemp()
in_handle = open(infile)
out_handle = open(fname, "w")
for line in in_handle:
if fix_pat.match(line):
line = "#" + line
out_handle.write(line)
in_handle.close()
out_handle.close()
return fname
if len( sys.argv ) != 7:
stop_err( "USAGE: prog input out_file1 out_file2 input_dbkey output_dbkey minMatch" )
infile = sys.argv[1]
outfile1 = sys.argv[2]
outfile2 = sys.argv[3]
in_dbkey = sys.argv[4]
mapfilepath = sys.argv[5]
minMatch = sys.argv[6]
try:
assert float(minMatch)
except:
minMatch = 0.1
#ensure dbkey is set
if in_dbkey == "?":
stop_err( "Input dataset genome build unspecified, click the pencil icon in the history item to specify it." )
if not os.path.isfile( mapfilepath ):
stop_err( "%s mapping is not currently available." % ( mapfilepath.split('/')[-1].split('.')[0] ) )
safe_infile = safe_bed_file(infile)
cmd_line = "liftOver -minMatch=" + str(minMatch) + " " + safe_infile + " " + mapfilepath + " " + outfile1 + " " + outfile2 + " > /dev/null 2>&1"
try:
os.system( cmd_line )
except Exception, exc:
stop_err( "Exception caught attempting conversion: %s" % str( exc ) )
finally:
os.remove(safe_infile)
| [
"fox91@anche.no"
] | fox91@anche.no |
017a4d9314cc977d5c80644063b57eaa990b050d | b0174911702ab63f7ba0d0ca4cb03ae6453dc182 | /calas7262/service/interfaces.py | 9c34585091728f72ed26c481c929d96a16b1d145 | [
"MIT"
] | permissive | astrorafael/calas7262 | 4001bffdc586b91677095ac4f112170911c93e7c | 8ff4c0ce5bf670fe0bf6fde218ecd7c993c41d0e | refs/heads/master | 2020-08-01T18:12:39.020668 | 2019-10-28T10:07:18 | 2019-10-28T10:07:18 | 211,072,096 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,426 | py | # ----------------------------------------------------------------------
# Copyright (c) 2014 Rafael Gonzalez.
#
# See the LICENSE file for details
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
from __future__ import division, absolute_import
# ---------------
# Twisted imports
# ---------------
from zope.interface import implementer, Interface
class IPausable(Interface):
"""
A pausable interface for services.
Run pause/resume code at the appropriate times.
@type paused: C{boolean}
@ivar paused: Whether the service is paused.
"""
def pauseService():
"""
Pauses the service. It can take a while, so it returns a Deferred
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is triggered when the
service has finished shutting down. If shutting down is immediate,
a value can be returned (usually, C{None}).
"""
def resumeService():
"""
Resumes the service. It can take a while, so it returns a Deferred
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is triggered when the
service has finished shutting down. If shutting down is immediate,
a value can be returned (usually, C{None}).
"""
class IReloadable(Interface):
"""
A reloadable interface for services.
Run reload code at the appropriate times.
"""
def reloadService(config=None):
"""
Reloads the service by reading on the fly its service configuration.
Configuration can be stored be a file (more likely) or a database.
If C{config} is C{None}, then the service must find out what changed
may be reading a configuration file (most likely) or a database.
Otherwise, C{config} as an object or data type meaningful for the
service itself passeb by a container I{IReloadable} C{MultiCervice}.
@type config: any meaningful datatype or object.
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is triggered when the
service has finished reloading. If reloading is immediate,
a value can be returned (usually, C{None}).
"""
__all__ = [ "IReloadable", "IPausable" ] | [
"astrorafael@yahoo.es"
] | astrorafael@yahoo.es |
12617fabbf89c88ca061ddde97c6781271a3d367 | 7357d367b0af4650ccc5b783b7a59090fdde47bb | /neo/Core/TX/IssueTransaction.py | 8113d4c98a96ad128375898867a454f12b1368c5 | [
"MIT"
] | permissive | BarracudaPff/code-golf-data-python | fb0cfc74d1777c4246d56a5db8525432bf37ab1a | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | refs/heads/main | 2023-05-29T05:52:22.856551 | 2020-05-23T22:12:48 | 2020-05-23T22:12:48 | 378,832,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | """
Description:
Issue Transaction
Usage:
from neo.Core.TX.IssueTransaction import IssueTransaction
"""
class IssueTransaction(Transaction):
"""docstring for IssueTransaction"""
def __init__(self, *args, **kwargs):
"""
Create an instance.
Args:
*args:
**kwargs:
"""
super(IssueTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.IssueTransaction
self.Nonce = None
def SystemFee(self):
"""
Get the system fee.
Returns:
Fixed8:
"""
if self.Version >= 1:
return Fixed8.Zero()
all_neo_gas = True
for output in self.outputs:
if output.AssetId != GetSystemCoin().Hash and output.AssetId != GetSystemShare().Hash:
all_neo_gas = False
if all_neo_gas:
return Fixed8.Zero()
return super(IssueTransaction, self).SystemFee()
def GetScriptHashesForVerifying(self, snapshot):
pass
def DeserializeExclusiveData(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.Type = TransactionType.IssueTransaction
if self.Version > 1:
raise Exception("Invalid TX Type")
def SerializeExclusiveData(self, writer):
pass | [
"sokolov.yas@gmail.com"
] | sokolov.yas@gmail.com |
d7d3e50712748c0c8737e836bb75ea879e62ba06 | f5bfdaccf014b9a986a8d1e58a4655c21b8368ce | /send_recv/basic_conn/client.py | 2e1bae8dfa62e99c7500ac9dd12c31c24b8e853e | [] | no_license | wlgud0402/class | a6029bb51160cb2ba39dd59b3826532becd61895 | ae84bfe4bb832d1a5a8434f3a6f78a57da272d62 | refs/heads/master | 2022-10-09T18:47:53.165134 | 2020-06-13T07:46:21 | 2020-06-13T07:46:21 | 271,963,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import socket
import time
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # ipv4, TCP
client_socket.connect(('127.0.0.1', 3000))
# client_socket.sendall(bytes([234, 185 ,128]))
client_socket.sendall("김지형".encode())
client_socket.close() | [
"wlgudrlgus@naver.com"
] | wlgudrlgus@naver.com |
9341990ffd55f00376c0f6771d2fff7b135601e0 | 76938f270e6165514162856b2ed33c78e3c3bcb5 | /lib/coginvasion/minigame/CameraShyHeadPanels.py | d54c8dccdc92354cc6b09258cd61a99ec93f52ad | [] | no_license | coginvasion/src | 9a5ec682845cc4c9c013fcc35e9b379bd4360b6c | 2d7fcdb0cd073050250cb51292ee48300a9fe19f | refs/heads/master | 2021-01-19T06:50:11.786112 | 2015-11-08T12:28:52 | 2015-11-08T12:28:52 | 61,545,543 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,548 | py | # Embedded file name: lib.coginvasion.minigame.CameraShyHeadPanels
from panda3d.core import Point3, VBase4
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.gui.DirectGui import DirectFrame
from lib.coginvasion.toon.ToonHead import ToonHead
from HeadPanels import HeadPanels
class CameraShyHeadPanels(HeadPanels):
notify = directNotify.newCategory('CameraShyHeadPanels')
def __init__(self):
HeadPanels.__init__(self)
self.framePositions = [Point3(0.15, 0, -0.15),
Point3(0.15, 0, -0.43),
Point3(0.15, 0, -0.71),
Point3(0.15, 0, -0.99)]
self.otherPlayerHeadHolderTransforms = {'scale': Point3(2, 1, 0.5),
'pos': Point3(1.03, 0, 0)}
self.otherPlayerHeadXValues = [-0.45, 0, 0.45]
self.state2Color = {0: VBase4(0.05, 0.05, 0.05, 1.0),
1: VBase4(0.5, 0.5, 0.5, 1.0),
2: VBase4(0.75, 0.75, 0.75, 1.0),
3: VBase4(1.0, 1.0, 1.0, 1.0)}
self.avId2otherPlayerAvIds2otherPlayerHeadsFrame = {}
def generate(self, gender, head, headtype, color, doId, name):
HeadPanels.generate(self, gender, head, headtype, color, doId, name, 0)
def generateOtherPlayerGui(self):
for avId in self.doId2Frame.keys():
self.avId2otherPlayerAvIds2otherPlayerHeadsFrame[avId] = {}
headNumber = -1
frame = self.doId2Frame[avId][0]
otherPlayerHeadsFrame = DirectFrame(relief=None, scale=0.85, parent=frame)
otherPlayerHeadsFrame['image'] = frame['image']
otherPlayerHeadsFrame['image_color'] = frame['image_color']
otherPlayerHeadsFrame['image_scale'] = self.otherPlayerHeadHolderTransforms['scale']
otherPlayerHeadsFrame.setPos(self.otherPlayerHeadHolderTransforms['pos'])
otherPlayerHeadsFrame.setBin('gui-popup', 70)
self.frameList.append(otherPlayerHeadsFrame)
for otherAvId in self.doId2Frame.keys():
if otherAvId != avId:
headNumber += 1
otherAv = base.cr.doId2do.get(otherAvId)
gender = otherAv.getGender()
head, color = otherAv.getHeadStyle()
animal = otherAv.getAnimal()
headFrame = otherPlayerHeadsFrame.attachNewNode('otherPlayerHeadFrame')
headFrame.setPosHprScale(self.otherPlayerHeadXValues[headNumber], 5, -0.1, 180, 0, 0, 0.2, 0.2, 0.2)
headFrame.setColorScale(self.state2Color[0])
toon = ToonHead(None)
toon.generateHead(gender, animal, head)
r, g, b, _ = color
color = (r,
g,
b,
1.0)
toon.setHeadColor(color)
toon.setDepthWrite(1)
toon.setDepthTest(1)
toon.reparentTo(headFrame)
self.avId2otherPlayerAvIds2otherPlayerHeadsFrame[avId][otherAvId] = headFrame
return
def updateOtherPlayerHead(self, avId, otherPlayerAvId, state):
frame = self.avId2otherPlayerAvIds2otherPlayerHeadsFrame[avId][otherPlayerAvId]
frame.setColorScale(self.state2Color[state])
def delete(self):
self.otherPlayerHeadHolderTransforms = None
self.otherPlayerHeadXValues = None
self.state2Color = None
self.avId2otherPlayerAvIds2otherPlayerHeadsFrame = None
HeadPanels.delete(self)
return | [
"ttarchive@yandex.com"
] | ttarchive@yandex.com |
0764441710a1e2bc191eba04dedbcb39accb063a | 502af3505e4e670c507ee6a5dedbc41995cefa09 | /deep_generative_models/tasks/arae/sample.py | 8206a693522c003f87c778e649a89ad618c03744 | [] | no_license | manoj04418/deep-generative-models | c1e8062e280ac6d1f3fb8ab359a21e870a2276df | 402d06773320231d9135c88d8a6033f916a68f89 | refs/heads/master | 2022-10-02T10:44:14.857680 | 2020-06-10T14:50:48 | 2020-06-10T14:50:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | import argparse
from deep_generative_models.configuration import load_configuration
from deep_generative_models.tasks.gan_with_autoencoder.sample import SampleGANWithAutoEncoder
if __name__ == '__main__':
options_parser = argparse.ArgumentParser(description="Sample from ARAE.")
options_parser.add_argument("configuration", type=str, help="Configuration json file.")
options = options_parser.parse_args()
SampleGANWithAutoEncoder().timed_run(load_configuration(options.configuration))
| [
"ramirocamino@gmail.com"
] | ramirocamino@gmail.com |
8c0f067abeaf7da4af4794f4fd818c33ee8870ef | 3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2 | /src/Week10/Recursion/Recursive Math/recursiveMath.py | 3971edeec5e2ac01cd6538f02737243d4440daeb | [] | no_license | theguyoverthere/CMU15-112-Spring17 | b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8 | b8287092b14e82d2a3aeac6c27bffbc95382eb34 | refs/heads/master | 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,603 | py | # A few example recursive functions.
# Can you figure out what each one does, in general?
import math
def f1(x):
if x == 0: return 0
else: return 1 + f1(x-1) #f(0) = 0, f(1) = 1, f(2) = 2, f(3) = 3
def f2(x):
if x == 0: return 40
else: return 1 + f2(x-1) #f(0) = 40, f(1) = 41, f(2) = 42
def f3(x):
if x == 0: return 0
else: return 2 + f3(x-1) #f(0) = 0, f(1) = 2, f(2) = 4, f(3) = 6
def f4(x):
if x == 0: return 40
else: return 2 + f4(x-1) #f(0) = 40, f(1) = 42, f(2) = 44, f(3) = 46
def f5(x):
if x == 0: return 0 #Triangular Numbers
else: return x + f5(x-1) #f(0) = 0, f(1) = 1, f(2) = 3, f(3) = 6
def f6(x):
if x == 0: return 0
else: return 2*x-1 + f6(x-1) #f(0) = 0, f(1) = 1, f(2)= 4, f(3) = 9
# (x - 1)** 2 = x**2 - 2 * x + 1
# 2 *x - 1 + (x -1) ** 2 = x ** 2
def f7(x):
if x == 0: return 1
else: return 2*f7(x-1) #f(0) = 1, f(1) = 2, f(2) = 4 , f(3) = 8, f(4) = 16
def f8(x):
if x < 2: return 0
else: return 1 + f8(x//2) #f(0) = 1, f(1) = 0, f(2) = 1, f(4) = 2, f(8) = 3
def f9(x):
if x < 2: return 1
else: return f9(x-1) + f9(x-2) #Fibonacci Numbers
def f10(x):
if x == 0: return 1 # Factorials!
else: return x*f10(x-1) #f(0) = 1, f(1) = 1, f(2) = 2, f(3) = 6, f(4) = 24
def f11(x, y):
if y < 0: return -f11(x, -y)
elif y == 0: return 0
else: return x + f11(x, y-1) #f(2,3) = 2 + f(2, 2)
# = 2 + 2 + f(2, 1)
# = 2 + 2 + 2 + f(2, 0)
# = 2 + 2 + 2 + 0
# = 6
def f12(x,y):
if (x < 0) and (y < 0): return f12(-x,-y)
elif (x == 0) or (y == 0): return 0
else: return x+y-1 + f12(x-1, y-1) #Returns product of x and y
# (x - 1)*(y - 1) = x * y - (x + y - 1)
def f13(L):
assert(type(L) == list)
if len(L) < 2: return [ ]
else: return f13(L[2:]) + [L[1]] # [0, 1, 2, 3, 4, 5] ---> [5, 3, 1]
# [2, 3, 4, 5] ---> [5, 3]
# [4, 5] ---> [] + [5] = [5]
# [] ---> []
def go():
while True:
n = input("Enter function # (1-13, or 0 to quit): ")
if n == "0": break
elif n == "11": print("f11(5, 7) ==", f11(5, 7))
elif n == "12": print("f12(5, 7) ==", f12(5, 7))
elif n == "13": print("f13(list(range(20)) ==", f13(list(range(20))))
else:
f = globals()["f"+n]
print("f"+n+": ", [f(x) for x in range(10)])
print()
go() | [
"tariqueanwer@outlook.com"
] | tariqueanwer@outlook.com |
2d4e5375e79cc35c8674acf1f09ed3ea017a8102 | 01bf95e0c0d57e3a1392f9d7e20580376c9e39a2 | /keystone/backends/sqlalchemy/migrate_repo/versions/002_rename_token_table.py | 1d15d9dac471ffd9d2e3887ea331871420c26b32 | [
"Apache-2.0"
] | permissive | oubiwann/keystone | 433713dd5d542484fc754ecfd097dc02759555b2 | 5c70d24462d75256fb6167d58e13d9c0a3d60427 | refs/heads/master | 2021-01-15T16:00:31.822891 | 2011-12-02T14:39:59 | 2011-12-02T14:39:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | """
Addresses bug 854425
Renames the 'token' table to 'tokens',
in order to appear more consistent with
other table names.
"""
# pylint: disable=C0103
import sqlalchemy
meta = sqlalchemy.MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
sqlalchemy.Table('token', meta).rename('tokens')
def downgrade(migrate_engine):
meta.bind = migrate_engine
sqlalchemy.Table('tokens', meta).rename('token')
| [
"dolph.mathews@gmail.com"
] | dolph.mathews@gmail.com |
162ca16f1c1766a7e0eba5b50a4d4e47a7f382d6 | 6b09043b97fb379aebd4363ff07d4cc53e8ec0b9 | /Day 8/08-DailyFlash_Solutions/22_Jan_Solutions_Three/Python/p1.py | 95966c06b2a0fe2542e4ee03e372c0259eb8dc23 | [] | no_license | Aadesh-Shigavan/Python_Daily_Flash | 6a4bdd73a33f533f3b121fae9eef973e10bf3945 | b118beeca3f4c97de54ae1a610f83da81157009a | refs/heads/master | 2022-11-28T13:03:17.573906 | 2020-08-06T15:36:36 | 2020-08-06T15:36:36 | 276,581,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | '''
Program 1: Write a program that accepts two integers from user and prints
addition & Subtraction of them.
{Note: checks for greater number to subtracts with while subtracting numbers}
Input: 10 20
Output:
Addition is 20
Subtraction is 10
'''
var1=int(input("enter first integer "))
var2=int(input("enter second integer "))
add=var1+var2
print("Addition is ",add)
if(var1 > var2):
sub=var1-var2
print("Subtraction is ",sub)
elif(var2>var1):
sub=var2-var1
print("Subtraction is ",sub)
| [
"aadesh.shigavan01@gmail.com"
] | aadesh.shigavan01@gmail.com |
b04295b6bd02ded41471966b990097969fe52ff6 | 55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850 | /.history/master_20200108222014.py | f0ad1ad6e18e50c3c5af481bb7301cae10b1b643 | [] | no_license | StRobertCHSCS/final-project-team | c115dc11b318f7ac782c94860a8801bb558bd107 | 48907e72813c4dd3b48ff36f794f6fce04533219 | refs/heads/master | 2020-12-03T22:35:37.833893 | 2020-01-31T04:05:38 | 2020-01-31T04:05:38 | 231,506,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,220 | py | import arcade
import random
# Set how many rows and columns we will have
ROW_COUNT = 29
COLUMN_COUNT = 51
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
# and on the edges of the screen.
MARGIN = 5
# Do the math to figure out our screen dimensions
SCREEN_WIDTH = (WIDTH + MARGIN) * COLUMN_COUNT + MARGIN
SCREEN_HEIGHT = (HEIGHT + MARGIN) * ROW_COUNT + MARGIN
movedic = {"up":False, "down":False, "left":False, "right" = False}
player_x_column = 5
player_y_row = 5
texture = arcade.load_texture("griddd.jpg")
grid = []
def on_update(delta_time):
snake_move()
def on_draw():
arcade.start_render()
grid_background()
snake()
apple()
print (movedic)
def grid_background():
arcade.draw_texture_rectangle(SCREEN_WIDTH//2, SCREEN_HEIGHT//2, texture.width, texture.height, texture, 0)
def snake_move():
global player_x, player_y, player_x_column, player_y_row, moveList
if (0 < player_x_column < COLUMN_COUNT) and (0 < player_y_row < ROW_COUNT):
if up:
player_y_row += 1
elif down:
player_y_row -= 1
elif right:
player_x_column += 1
elif left:
player_x_column -= 1
else:
player_x_column = 5
player_y_row = 5
movedic
# Player coordinates
player_x = (MARGIN + WIDTH) * player_x_column + MARGIN + WIDTH // 2
player_y = (MARGIN + HEIGHT) * player_y_row + MARGIN + HEIGHT // 2
def snake():
arcade.draw_rectangle_filled(player_x, player_y, WIDTH, HEIGHT, arcade.color.BLUE)
def apple():
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
arcade.draw_rectangle_filled(apple_x, apple_y, WIDTH, HEIGHT, arcade.color.RED)
def wall():
pass
def on_key_press(key, modifiers):
global up, down, left, right
if key == arcade.key.W:
up = True
down = False
right = False
left = False
elif key == arcade.key.S:
down = True
up = False
right = False
left = False
elif key == arcade.key.A:
left = True
up = False
down = False
right = False
elif key == arcade.key.D:
right = True
up = False
down = False
left = False
def on_key_release(key, modifiers):
pass
def on_mouse_press(x, y, button, modifiers):
pass
def setup():
global grid
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "snake")
arcade.set_background_color(arcade.color.BLACK)
arcade.schedule(on_update, 1/10)
# Override arcade window methods
window = arcade.get_window()
window.on_draw = on_draw
window.on_key_press = on_key_press
window.on_key_release = on_key_release
window.on_mouse_press = on_mouse_press
# array is simply a list of lists.
for row in range(ROW_COUNT):
# Add an empty array that will hold each cell
# in this row
grid.append([])
for column in range(COLUMN_COUNT):
grid[row].append(0) # Append a cell
arcade.run()
if __name__ == '__main__':
setup() | [
"clementina1023@gmail.com"
] | clementina1023@gmail.com |
038ed0403663029f64d78bc9575373753b2fc331 | bf28036f99ee0d94ac6c5172659018c5b55fa337 | /drum.py | b4f15451e9124ed7a1e29d1c8559af5b9569f58c | [] | no_license | shantinavgurukul/Dictionary_questions | 1fb2da829675fb8e5ef23b7259e2de29f58ce505 | ac79ec33901de4414359e48a88cf2cc882d79b5c | refs/heads/master | 2022-12-26T13:15:33.477165 | 2020-10-05T16:21:00 | 2020-10-05T16:21:00 | 301,469,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | import json
dict1 ={
"emp1": {
"name": "Lisa",
"designation": "programmer",
"age": "34",
"salary": "54000"
},
"emp2": {
"name": "Elis",
"designation": "Trainee",
"age": "24",
"salary": "40000"
},
}
out_file = open("myfile.json", "w")
# a=write(o)
# store=json.dumps(dict1,indent = 6)
# # out_file.close()
# print(store)
json.dumps(out_file,indent=4)
print(out_file) | [
"you@example.com"
] | you@example.com |
f5f87e659a58abd555e1e571e39cf2b5eedc1cd1 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/weecology_retriever/retriever-master/scripts/prism_climate.py | ed1da4bfd28d1a966b678c31c30ee3d3244572a9 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 2,745 | py | #retriever
"""Retriever script for direct download of PRISM climate data"""
from future import standard_library
standard_library.install_aliases()
from builtins import range
from retriever.lib.templates import Script
import urllib.request, urllib.parse, urllib.error
class main(Script):
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
self.name = "PRISM Climate Data"
self.shortname = "prism-climate"
self.retriever_minimum_version = '2.0.dev'
self.version = '1.1.1'
self.ref = "http://prism.oregonstate.edu/"
self.urls = {"climate": "http://services.nacse.org/prism/data/public/4km/"}
self.description = "The PRISM data set represents climate observations from a wide range of monitoring networks, applies sophisticated quality control measures, and develops spatial climate datasets to reveal short- and long-term climate patterns. "
def get_file_names(self, clim_var, mval, year, month):
"""Create a list of all filenames in a given monthly data zip file """
file_extensions = ['bil', 'bil.aux.xml', 'hdr', 'info.txt', 'prj', 'stx', 'xml']
file_names = []
for extension in file_extensions:
file_names.append("PRISM_{}_stable_4km{}_{}{}_bil.{}".format(clim_var,
mval,
year,
month,
extension))
return file_names
def download(self, engine=None, debug=False):
if engine.name != "Download Only":
raise Exception("The PRISM dataset contains only non-tabular data files, and can only be used with the 'download only' engine.")
Script.download(self, engine, debug)
clim_vars = ['ppt', 'tmax', 'tmean', 'tmin']
years = list(range(1981, 2015))
months = ["{:02d}".format(i) for i in range(1,13)]
for clim_var in clim_vars:
mval = "M3" if clim_var == 'ppt' else "M2"
for year in years:
for month in months:
file_names = self.get_file_names(clim_var, mval, year, month)
file_url = urllib.parse.urljoin(self.urls["climate"], "{}/{}{}".format(clim_var, year, month))
archivename = "PRISM_{}_stable_4km{}_{}{}_bil.zip".format(clim_var, mval, year, month)
self.engine.download_files_from_archive(file_url, file_names, archivename=archivename, keep_in_dir=True)
self.engine.register_files(file_names)
SCRIPT = main()
| [
"659338505@qq.com"
] | 659338505@qq.com |
59fcde378e9247778415b7848b2705ccfe8e3385 | 11a0fab712b139bcba9e90f6acdc7597dff68dbb | /mestrado/ppgmcs/m07-elaboracao-de-dissertacao-i/projeto/codigo/teste1/grade/grade.py | daaca83d32160a4d36b59cbfca4b0cb9ba952eb3 | [] | no_license | fapers/MeusTreinamentos | 17ba096d518df533433ae2528b70d18717f3cf96 | 32a6b791b0c3dbb8b29ffd177597919e768b09b5 | refs/heads/master | 2023-06-04T14:00:37.847808 | 2021-06-28T02:37:11 | 2021-06-28T02:37:11 | 292,962,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | import numpy as np
def newgrade(h, d, t, p):
grade = np.arange(len(h)*len(d)*len(t)).reshape(len(d), len(h), len(t))
for i in range(len(d)):
for j in range(len(h)):
for k in range(len(t)):
aleatorio = np.random.choice(p)
grade[i][j][k] = int(aleatorio)
return grade
def grade(t, p):
grade = np.arrange(len(t)*len(p)*np.sum(p).reshape(len(t), np.sum(p))
| [
"fpsmoc@yahoo.com.br"
] | fpsmoc@yahoo.com.br |
05e423199bcd4237ba3acf5f863237c356aa85d7 | 95289559871f328cbed764cee33f85994599ef1f | /my_package/SeparableConvFlow/SeparableConvFlowLayer.py | 3428875de42fdbb72f8502bd817f5a3fa7a06b03 | [
"MIT"
] | permissive | Bostwickenator/Dain-App | 10a6f725e8c82b2a9a4f59060521d675f5b63e40 | 27a9dc83f36b549129a1815a095da9c782c8752e | refs/heads/master | 2023-03-18T16:41:31.839901 | 2021-03-20T20:00:53 | 2021-03-20T20:00:53 | 349,823,152 | 0 | 0 | MIT | 2021-03-20T20:59:25 | 2021-03-20T19:58:53 | Python | UTF-8 | Python | false | false | 4,368 | py | # this is for wrapping the customized layer
import torch
from torch.autograd import Function
import separableconvflow_cuda as my_lib
import warnings
#Please check how the STN FUNCTION is written :
#https://github.com/fxia22/stn.pytorch/blob/master/script/functions/gridgen.py
#https://github.com/fxia22/stn.pytorch/blob/master/script/functions/stn.py
class SeparableConvFlowLayer(Function):
def __init__(self,filtersize):
self.filtersize = filtersize
warnings.warn("\nSeparable Conv Flow Layer is not precise enough for optical flow due to a divison operation")
super(SeparableConvFlowLayer,self).__init__()
def forward(self, input1,input2,input3):
intBatches = input1.size(0)
intInputDepth = input1.size(1)
intInputHeight = input1.size(2)
intInputWidth = input1.size(3)
intFilterSize = min(input2.size(1), input3.size(1))
intOutputHeight = min(input2.size(2), input3.size(2))
intOutputWidth = min(input2.size(3), input3.size(3))
assert(intInputHeight - self.filtersize == intOutputHeight - 1)
assert(intInputWidth - self.filtersize == intOutputWidth - 1)
assert(intFilterSize == self.filtersize)
assert(input1.is_contiguous() == True)
assert(input2.is_contiguous() == True)
assert(input3.is_contiguous() == True)
# output = input1.new().resize_(intBatches, intInputDepth, intOutputHeight, intOutputWidth).zero_()
flow_ouput = torch.zeros(intBatches, 2,intOutputHeight, intOutputWidth) # as a byproduct of SepConv, but no
# assert(input1.is_contiguous())
# assert(input2.is_contiguous())
self.input1 = input1.contiguous() # need to use in the backward process, so we need to cache it
self.input2 = input2.contiguous() # TODO: Note that this is simply a shallow copy?
self.input3 = input3.contiguous()
if input1.is_cuda:
self.device = torch.cuda.current_device()
else:
self.device = -1
if input1.is_cuda :
# output = output.cuda()
flow_ouput = flow_ouput.cuda()
err = my_lib.SeparableConvFlowLayer_gpu_forward(input1, input2,input3,flow_ouput)
else:
# output = torch.cuda.FloatTensor(input1.data.size())
err = my_lib.SeparableConvFlowLayer_cpu_forward(input1, input2,input3,flow_ouput)
if err != 0:
print(err)
# the function returns the output to its caller
return flow_ouput
#TODO: if there are multiple outputs of this function, then the order should be well considered?
def backward(self, gradoutput):
# print("Backward of Interpolation Layer")
# gradinput1 = input1.new().zero_()
# gradinput2 = input2.new().zero_()
gradinput1 = torch.zeros(self.input1.size()) # the input1 has zero gradient because flow backprop. nothing to gradinput1
gradinput2 = torch.zeros(self.input2.size())
gradinput3 = torch.zeros(self.input3.size())
if self.input1.is_cuda:
# print("CUDA backward")
gradinput1 = gradinput1.cuda(self.device)
gradinput2 = gradinput2.cuda(self.device)
gradinput3 = gradinput3.cuda(self.device)
# the input1 image should not require any gradients
# print("Does input1 requires gradients? " + str(self.input1.requires_grad))
# err = my_lib.SeparableConvFlowLayer_gpu_backward(self.input1,self.input2,self.input3, gradoutput,gradinput1,gradinput2,gradinput3)
err = my_lib.SeparableConvFlowLayer_gpu_backward(self.input1,self.input2,self.input3, gradoutput,gradinput1,gradinput2,gradinput3)
if err != 0 :
print(err)
else:
# print("CPU backward")
# print(gradoutput)
# print(err)
# err = my_lib.SeparableConvFlowLayer_cpu_backward(self.input1, self.input2, self.input3, gradoutput, gradinput1, gradinput2, gradinput3)
err = my_lib.SeparableConvFlowLayer_cpu_backward(self.input1, self.input2, self.input3, gradoutput, gradinput1, gradinput2, gradinput3)
if err != 0 :
print(err)
# print(gradinput1)
# print(gradinput2)
# print(gradinput1)
return gradinput1, gradinput2,gradinput3 | [
"user@user.com"
] | user@user.com |
87c07543dd40fb4839f8bd146fa7eb9bd2b4adca | 0fefd630aa4b500a1a218f5f12d351dfeb79d4a7 | /Class-HomeWork/03.RectanglePosition.py | 8253241100608d02637c7cb68e4136c21fa3125e | [
"MIT"
] | permissive | bozhikovstanislav/Python-Fundamentals | a7e7659d7ce8996f9e5dc17a8a0c5fcd5fbab65f | 072fd2c8bc962d20d4c526947349fdeae0bc94a5 | refs/heads/master | 2020-04-15T00:22:02.395202 | 2019-03-10T15:46:48 | 2019-03-10T15:46:48 | 164,237,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py |
def is_inside(rect_a, rect_b):
b_left = rect_a.get_x() >= rect_b.get_x()
b_top = rect_a.get_y() <= rect_b.get_y()
b_get_right_ = rect_a.get_x1() <= rect_b.get_x1()
get_bottum_ = rect_a.get_y1() <= rect_b.get_y1()
if b_left and b_get_right_ and b_top and get_bottum_:
return 'Inside'
return 'Not inside'
class Rectungle:
def __init__(self, x, y, width, height):
self.__x = x
self.__y = y
self.__width = width
self.__height = height
def set_x(self, x):
if isinstance(x, int):
return x
def get_x(self):
return self.__x
def set_y(self, y):
if isinstance(y, int):
return y
def get_y(self):
return self.__y
def set_width(self, width):
if isinstance(width, int):
return width
def get_width(self):
return self.__width
def set_height(self, height):
if isinstance(height, int):
return height
def get_height(self):
return self.__height
def get_x1(self):
h = self.get_x() + abs(self.get_width())
return h
def get_y1(self):
return self.get_y() + self.get_height()
rectungle_one = list(map(int, input().split()))
rectungle_tow = list(map(int, input().split()))
rect_one = Rectungle(rectungle_one[0], rectungle_one[1], rectungle_one[2], rectungle_one[3])
rect_tow = Rectungle(rectungle_tow[0], rectungle_tow[1], rectungle_tow[2], rectungle_tow[3])
print(is_inside(rect_one, rect_tow))
| [
"bozhikov.stanislav@gmail.com"
] | bozhikov.stanislav@gmail.com |
b5cffee6d892f73cbea112ed9209626b511c5b1e | 7c1be5665bf193281a90ba44ce0c7fe2215c2630 | /拼多多/pin_04.py | a9eb15ca196e2c0e424ceed3f119dcf366c71c46 | [] | no_license | TechInTech/Interview_Codes | 47a8748ff0b70b37949034926fdc01ec1f912584 | 24145a34de7a80b8dd7379914ab27e0017541b25 | refs/heads/master | 2020-07-08T22:37:27.952537 | 2019-10-14T05:42:00 | 2019-10-14T05:42:00 | 203,798,729 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | # -*- coding:utf-8 -*-
import sys
if __name__ == '__main__':
str_input = sys.stdin.readline().strip().split(' ')
sub_str = []
for i in str_input:
if len(i) > 1:
sub_str.append(i[0] + i[-1])
else:
sub_str.append(i[0])
# print(sub_str)
str_dict = {}
flag = True
while flag:
for item in sub_str:
if len(item) == 1:
if item in str_dict.values():
key = list(str_dict.keys())[list(str_dict.values()).index(item)]
str_dict[key] = item
else:
str_dict[item] = item
else:
if item[0] in str_dict.values():
key = list(str_dict.keys())[list(str_dict.values()).index(item[0])]
str_dict[key] = item[-1]
else:
str_dict[item[0]] = item[-1]
list_dict = []
for k1, it1 in str_dict.items():
list_dict.extend([k1, it1])
if len(str_dict) == 1 or len(set(list_dict)) >= 3:
flag = False
else:
sub_str = []
for k, it in str_dict.items():
sub_str.append(k + it)
str_dict = {}
if len(str_dict) > 1 or (len(str_dict) == 1 and list(str_dict.keys())[0] != list(str_dict.values())[0]):
print(False)
else:
print(True)
| [
"wdw_bluesky@163.com"
] | wdw_bluesky@163.com |
56c83d148ef48487ce438c5eb9f69b92baa0f3bb | 6b1dd40d16ae6169e7ed780c5062e88d10502c85 | /Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/922_predict_829-1.py | 13011603b4e56baf37e707fbd78dcb77ebcf9844 | [
"MIT"
] | permissive | hehuanlin123/DeepLearning | 8a59680a341cfc525d50aa5afc3e44202ca4acc4 | 6b7feabbbde9ac9489f76da4c06eeb6703fb165a | refs/heads/master | 2022-07-12T09:26:08.617883 | 2019-06-10T11:31:37 | 2019-06-10T11:31:37 | 183,748,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,733 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 01:21:16 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
import gc, os
import sys
argv = sys.argv
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
from sklearn.model_selection import GroupKFold
from sklearn.metrics import roc_auc_score
from glob import glob
import utils, utils_cat
utils.start(__file__)
#==============================================================================
SEED = 71
LOOP = 5
NFOLD = 7
SUBMIT_FILE_PATH = '../output/829-1.csv.gz'
HEAD = 600
EXE_SUBMIT = False
COMMENT = 'CV(single): 0.806418 + 0.002499 600features'
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.9,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
# 'seed': SEED
}
np.random.seed(SEED)
#==============================================================================
imp = pd.read_csv('LOG/imp_801_imp_lgb.py-2.csv')
imp.sort_values('total', ascending=False, inplace=True)
def mk_submit(HEAD=HEAD):
features = imp.head(HEAD).feature
files_tr = ('../feature/train_' + features + '.f').tolist()
files_te = ('../feature/test_' + features + '.f').tolist()
# =============================================================================
# load
# =============================================================================
# train
X_train = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)
y_train = utils.read_pickles('../data/label').TARGET
X_train.head().to_csv(SUBMIT_FILE_PATH.replace('.csv', '_X.csv'),
index=False, compression='gzip')
if X_train.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')
print('no dup :) ')
print(f'X_train.shape {X_train.shape}')
gc.collect()
CAT = list( set(X_train.columns) & set(utils_cat.ALL) )
COL = X_train.columns.tolist()
# test
X_test = pd.concat([
pd.read_feather(f) for f in tqdm(files_te, mininterval=60)
], axis=1)[COL]
# =============================================================================
# groupKfold
# =============================================================================
sk_tbl = pd.read_csv('../data/user_id_v7.csv.gz') # TODO: check
user_tbl = sk_tbl.user_id.drop_duplicates().reset_index(drop=True).to_frame()
sub_train = pd.read_csv('../input/application_train.csv.zip', usecols=['SK_ID_CURR']).set_index('SK_ID_CURR')
sub_train['y'] = y_train.values
group_kfold = GroupKFold(n_splits=NFOLD)
# =============================================================================
# training with cv
# =============================================================================
model_all = []
auc_mean = 0
for i in range(LOOP):
dtrain = lgb.Dataset(X_train, y_train, categorical_feature=CAT, free_raw_data=False)
# shuffle fold
ids = list(range(user_tbl.shape[0]))
np.random.shuffle(ids)
user_tbl['g'] = np.array(ids) % NFOLD
sk_tbl_ = pd.merge(sk_tbl, user_tbl, on='user_id', how='left').set_index('SK_ID_CURR')
sub_train['g'] = sk_tbl_.g
folds = group_kfold.split(X_train, sub_train['y'], sub_train['g'])
gc.collect()
param['seed'] = i
ret, models = lgb.cv(param, dtrain, 9999, folds=folds,
early_stopping_rounds=100, verbose_eval=50,
seed=i)
model_all += models
auc_mean += ret['auc-mean'][-1]
auc_mean /= LOOP
result = f"CV auc-mean(feature {HEAD}): {auc_mean}"
print(result)
utils.send_line(result)
# =============================================================================
# predict
# =============================================================================
sub = pd.read_pickle('../data/sub.p')
gc.collect()
label_name = 'TARGET'
sub[label_name] = 0
for model in model_all:
y_pred = model.predict(X_test)
sub[label_name] += pd.Series(y_pred).rank()
sub[label_name] /= len(model_all)
sub[label_name] /= sub[label_name].max()
sub['SK_ID_CURR'] = sub['SK_ID_CURR'].map(int)
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
# =============================================================================
# main
# =============================================================================
mk_submit(HEAD)
#==============================================================================
utils.end(__file__)
utils.stop_instance()
| [
"szkfzx@szkfzxdeiMac.local"
] | szkfzx@szkfzxdeiMac.local |
ba1831997efc65fdd8de32d918565cd280a23b1f | a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea | /airflow/decorators/python.py | 3f00681ccfdde818a19511335dab7fefa7db6aa4 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ishiis/airflow | 4305794e36b611d01f49e3f2401be3dc49782670 | 292440d54f4db84aaf0c5a98cf5fcf34303f2fa8 | refs/heads/master | 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 | Apache-2.0 | 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null | UTF-8 | Python | false | false | 3,205 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable, Optional, Sequence
from airflow.decorators.base import DecoratedOperator, TaskDecorator, task_decorator_factory
from airflow.operators.python import PythonOperator
class _PythonDecoratedOperator(DecoratedOperator, PythonOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
template_fields: Sequence[str] = ('op_args', 'op_kwargs')
template_fields_renderers = {"op_args": "py", "op_kwargs": "py"}
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects (e.g protobuf).
shallow_copy_attrs: Sequence[str] = ('python_callable',)
def __init__(self, *, python_callable, op_args, op_kwargs, **kwargs) -> None:
kwargs_to_upstream = {
"python_callable": python_callable,
"op_args": op_args,
"op_kwargs": op_kwargs,
}
super().__init__(
kwargs_to_upstream=kwargs_to_upstream,
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
**kwargs,
)
def python_task(
python_callable: Optional[Callable] = None,
multiple_outputs: Optional[bool] = None,
**kwargs,
) -> TaskDecorator:
"""Wraps a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_PythonDecoratedOperator,
**kwargs,
)
| [
"noreply@github.com"
] | ishiis.noreply@github.com |
de8f80151a4960a8f4e5d28c3ea758062ee104bf | e8c3e7964f4b448e94481704d29508e9d6bd1798 | /CommonTools/python/HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen0SetTo0_0505Files_1SetTo1.py | a08f9f4a69a9ca55ba3d7591cf98e7a06773d704 | [] | no_license | senka/ZZ_2l2nu_4l_CMS_combination | 1401f81dc255ea0ae4a0a5c73b022670849a1152 | 197655fa2143ffe1665cd7a1c6e5af2a2f48e57a | refs/heads/master | 2021-01-13T02:06:27.885996 | 2014-08-09T16:15:14 | 2014-08-09T16:15:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,872 | py | from CombinedEWKAnalysis.CommonTools.AnomalousCouplingModel import *
import ROOT as r
import os
basepath = '%s/src/CombinedEWKAnalysis/CommonTools/data/WV_semileptonic'%os.environ['CMSSW_BASE']
#filename = '%s/ATGC_shape_coefficients.root'%basepath
#filename = '%s/signal_WV.root'%basepath
#this model is in the equal couplings scenario of HISZ or something similar
#it does the old style limits of setting the other parameter to zero
class HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen0SetTo0_0505Files_1SetTo1(AnomalousCouplingModel):
def __init__(self,mode):
AnomalousCouplingModel.__init__(self)
self.processes = ['WWgammaZ']
self.channels = ['WV_atgc_semileptonic']
# self.lepchannels = ['ch1','ch2','ch3','ch4']
self.lepchannels = ['ch1','ch2','ch3','ch4','ch5','ch6']
# self.lepchannels = ['ch1','ch2']
self.pois = ['dkg','dg1','lZ']
self.mode = mode
self.anomCoupSearchWindows = {'dkg':['-0.006','0.006'],
'dg1':['-0.006','0.006'],
'lZ' :['-0.006','0.006'] }
# self.anomCoupSearchWindows = {'dkg':['-0.015','0.015'],
# 'dg1':['-0.015','0.015'],
# 'lZ' :['-0.015','0.015'] }
self.verbose = False
def buildScaling(self,process,channel,lepchannel):
scalerName = '%s_%s_%s'%(process,channel,lepchannel)
print '\t\t *********************** Hagiwara reading: %s/signal_WV_%s_f5z_ifLessThen1SetTo1.root'%(basepath,lepchannel)
filename = '%s/signal_WV_%s_f5z_ifLessThen1SetTo1_0505Files.root'%(basepath,lepchannel)
# f = r.TFile('%s/mu_boosted.root'%basepath,'READ')
print '\t\t *********************** Hagiwara reading: %s/%s_boosted.root'%(basepath,lepchannel)
f = r.TFile('%s/%s_boosted.root'%(basepath,lepchannel),'READ')
# SM_diboson_shape = f.Get('diboson').Clone('SM_wv_semil_mu_shape_for_scale')
if ('ch' in lepchannel):
print 'reading ZZ2l2nu for %s'%lepchannel
SM_diboson_shape = f.Get('zz2l2nu').Clone('SM_wv_semil_%s_shape_for_scale'%lepchannel)
else:
print 'reading diboson %s'%lepchannel
SM_diboson_shape = f.Get('diboson').Clone('SM_wv_semil_%s_shape_for_scale'%lepchannel)
SM_diboson_shape.SetDirectory(0)
f.Close()
self.modelBuilder.out._import(SM_diboson_shape)
SM_diboson_shape_dhist = r.RooDataHist('DHIST_SM_wv_semil_%s_shape_for_scale'%lepchannel,
'DHIST_SM_wv_semil_%s_shape_for_scale'%lepchannel,
r.RooArgList(self.modelBuilder.out.var('W_pt_%s'%lepchannel)),
self.modelBuilder.out.obj('SM_wv_semil_%s_shape_for_scale'%lepchannel))
self.modelBuilder.out._import(SM_diboson_shape_dhist)
# self.modelBuilder.factory_('RooHistFunc::Scaling_base_pdf_%s({W_pt},DHIST_SM_wv_semil_mu_shape_for_scale)'%(scalerName))
self.modelBuilder.factory_('RooHistFunc::Scaling_base_pdf_%s({W_pt_%s},DHIST_SM_wv_semil_%s_shape_for_scale)'%(scalerName,lepchannel,lepchannel))
self.modelBuilder.factory_('RooATGCProcessScaling_wz::Scaling_%s(W_pt_%s,dkg,lZ,dg1,Scaling_base_pdf_%s,"%s")'%(scalerName,lepchannel,scalerName,filename))
if ( self.mode == 'dkglZ' ):
self.modelBuilder.out.function('Scaling_%s'%scalerName).setLimitType(0)
self.modelBuilder.out.var('dg1').setVal(0)
self.modelBuilder.out.var('dg1').setConstant(True)
elif ( self.mode == 'dg1lZ' ):
self.modelBuilder.out.function('Scaling_%s'%scalerName).setLimitType(1)
self.modelBuilder.out.var('dkg').setVal(0)
self.modelBuilder.out.var('dkg').setConstant(True)
elif ( self.mode == 'dkgdg1' ):
self.modelBuilder.out.function('Scaling_%s'%scalerName).setLimitType(2)
self.modelBuilder.out.var('lZ').setVal(0)
self.modelBuilder.out.var('lZ').setConstant(True)
else:
raise RuntimeError('InvalidCouplingChoice',
'We can only use [dkg,lZ], [dg1,lZ], and [dkg,dg1]'\
' as POIs right now!')
return scalerName
dkglZModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1 = HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1('dkglZ')
dg1lZModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1 = HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1('dg1lZ')
dkgdg1Model_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1 = HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1('dkgdg1')
| [
"senka.duric@cern.ch"
] | senka.duric@cern.ch |
333db2238260b5ce45d4d105fa1e5cac5933855d | dfc686228834750216b2cd6eea14d2a6d12422e4 | /Hackerrank_Python_solution/RegexandParsing/DetectHTMLTagsAttributesandAttributeValues.py | 920d82f47e5c38a13364c5af8b4e6a11668ec42b | [] | no_license | Parth-Ps/python | 8466e8856bf301908544eb60ae4a68338ccf4550 | bb448c2a7996d17883214fe8eb11caa61e211400 | refs/heads/master | 2023-01-22T13:30:50.507021 | 2020-12-02T07:59:53 | 2020-12-02T07:59:53 | 317,788,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | '''
Title : Detect HTML Tags, Attributes and Attribute Values
Subdomain : Regex and Parsing
Domain : Python
Author : Ahmedur Rahman Shovon
Created : 15 July 2016
'''
from html.parser import HTMLParser
class CustomHTMLParser(HTMLParser):
def handle_starttag(self,tag,attrs):
print(tag)
self.handle_attrs(attrs)
def handle_startendtag(self,tag,attrs):
print(tag)
self.handle_attrs(attrs)
def handle_attrs(self,attrs):
for attrs_pair in attrs:
print('->',attrs_pair[0].strip(),'>',attrs_pair[1].strip())
n = int(input())
html_string = ''
for i in range(n):
html_string += input()
customHTMLParser = CustomHTMLParser()
customHTMLParser.feed(html_string)
customHTMLParser.close()
| [
"parth0129.certificate@gmail.com"
] | parth0129.certificate@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.