content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""
MesoNet
Authors: Brandon Forys and Dongsheng Xiao, Murphy Lab
https://github.com/bf777/MesoNet
Licensed under the Creative Commons Attribution 4.0 International License (see LICENSE for details)
"""
# __init__.py
from mesonet.utils import *
from mesonet.dlc_predict import predict_dlc
from mesonet.predict_regions import predict_regions
from mesonet.train_model import train_model
from mesonet.gui_start import gui_start
from mesonet.img_augment import img_augment
| [
37811,
198,
44,
274,
78,
7934,
198,
30515,
669,
25,
14328,
376,
652,
82,
290,
28831,
7091,
782,
28249,
11,
14424,
3498,
198,
5450,
1378,
12567,
13,
785,
14,
19881,
29331,
14,
44,
274,
78,
7934,
198,
26656,
15385,
739,
262,
17404,
13... | 3.302817 | 142 |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Ravi Sharma
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on Feb 26, 2014
@author: ravi
'''
from lxml import etree
from kayako.core.lib import UnsetParameter
from kayako.core.object import KayakoObject
from kayako.exception import KayakoRequestError, KayakoResponseError
class CustomField(KayakoObject):
'''
Kayako Custom Field API Object.
customfieldid The custom field ID.
customfieldgroupid The custom field group id.
title The title of the custom field.
fieldtype The type of the custom field.
fieldname The field name of custom field.
defaultvalue The default value of custom field.
isrequired 1 or 0 boolean that controls whether or not field required.
usereditable 1 or 0 boolean that controls whether or not to edit the field by user.
staffeditable 1 or 0 boolean that controls whether or not to edit the field by staff.
regexpvalidate A regex string for validate.
displayorder The display order of the custom field.
encryptindb 1 or 0 boolean that controls whether or not field is encrypted.
description The description of the custom field.
'''
controller = '/Base/CustomField'
__parameters__ = [
'id',
'customfieldid',
'customfieldgroupid',
'title',
'fieldtype',
'fieldname',
'defaultvalue',
'isrequired',
'usereditable',
'staffeditable',
'regexpvalidate',
'displayorder',
'encryptindb',
'description',
]
@classmethod
@classmethod
@classmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
1946,
11,
371,
15820,
40196,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
12892,
263,
22961,
3611,
5094,
13789,
... | 2.948553 | 622 |
# # -*- coding: utf-8 -*-
import math
import random
import numpy as np
import heapq
from io import BytesIO
from PIL import Image, ImageOps
from server.api.crop_suggestor import CropSuggestorModel
from server.api.utils import compress_bg, read_image, nd_deserialize, np_isground, tiling_range, n_dim_iter, \
np_bg_goodness, np_iswater
from server.api.graphics import paste_obj
import json
import base64
cat_data = read_image('./input/obj/cat.png')
cat_obj = ObjImg(cat_data, "cat", "object;grounded", size_ratio=0.4, well_cropped=False)
dog_data = read_image('./input/obj/dog.png')
dog_obj = ObjImg(dog_data, "dog", "object;grounded", size_ratio=0.75, well_cropped=True)
person_data = read_image('./input/obj/man.png')
person_obj = ObjImg(person_data, "person", "object;grounded", size_ratio=1.8)
rhino_data = read_image('./input/obj/rhino.png')
rhino_obj = ObjImg(rhino_data, "rhino", "object;grounded", size_ratio=1.5)
zebra_data = read_image('./input/obj/zebra.png')
zebra_obj = ObjImg(zebra_data, "zebra", "object;grounded", size_ratio=1.5, well_cropped=True)
lion_data = read_image('./input/obj/lion.png')
lion_obj = ObjImg(lion_data, "zebra", "object;grounded", size_ratio=1.5)
giraffe_data = read_image('./input/obj/giraffe.png')
giraffe_obj = ObjImg(giraffe_data, "giraffe", "object;grounded", size_ratio=3.0)
person_data = read_image('./input/obj/man.png')
person_obj = ObjImg(person_data, "person", "object;grounded", size_ratio=1.8)
pirate_data = read_image('./input/obj/pirate.png')
pirate_obj = ObjImg(pirate_data, "pirate", "object;grounded", size_ratio=1.8)
boat_data = read_image('./input/obj/boat.png')
boat_obj = ObjImg(boat_data, "boat", "object;water", size_ratio=1.0, well_cropped=True)
parrot_data = read_image('./input/obj/parrot.png')
parrot_obj = ObjImg(parrot_data, "parrot", "object;air", size_ratio=1.0, well_cropped=True)
field_cm = loadnp('./input/bg/field_cm.json', 'colormap')
field_dm = loadnp('./input/bg/field_dm.json', 'depthmap')
field_bg = Image.open("./input/bg/field.jpg")
africa_cm = loadnp('./input/bg/africa_cm.json', 'colormap')
africa_dm = loadnp('./input/bg/africa_dm.json', 'depthmap')
africa_bg = Image.open("./input/bg/africa.jpg")
beach_cm = loadnp('./input/bg/beach_cm.json', 'colormap')
beach_dm = loadnp('./input/bg/beach_dm.json', 'depthmap')
beach_bg = Image.open("./input/bg/beach.jpg")
autumn_cm = loadnp('./input/bg/autumn_cm.json', 'colormap')
autumn_dm = loadnp('./input/bg/autumn_dm.json', 'depthmap')
autumn_bg = Image.open("./input/bg/autumn.jpg")
composer = ImageComposer()
# composer.compose(field_bg, [person_obj], field_cm, field_dm)
# composer.compose(field_bg, [cat_obj, person_obj], field_cm, field_dm)
import cv2
# composer.compose(africa_bg, [rhino_obj], africa_cm, africa_dm)
# composer.compose(africa_bg, [zebra_obj], africa_cm, africa_dm)
# composer.compose(africa_bg, [lion_obj, giraffe_obj], africa_cm, africa_dm)
# composer.compose(africa_bg, [lion_obj], africa_cm, africa_dm)
composer.compose(field_bg, [cat_obj, dog_obj], field_cm, field_dm)
# composer.compose(beach_bg, [parrot_obj, pirate_obj], beach_cm, beach_dm)
# composer.compose(autumn_bg, [person_obj], autumn_cm, autumn_dm) | [
2,
1303,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
24575,
80,
198,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
350,
4146,
1330,... | 2.386279 | 1,341 |
import pylab
import tables
import math
import numpy
fig = pylab.figure(2)
tr_125 = pylab.loadtxt('../s125/s125-double-shear_totalEnergy')
tr_129 = pylab.loadtxt('../s129/s129-double-shear_totalEnergy')
tr_130 = pylab.loadtxt('../s130/s130-double-shear_totalEnergy')
refTe = tr_125[0,1]
pylab.plot(tr_125[:,0], tr_125[:,1], label='CFL 0.2')
pylab.plot(tr_129[:,0], tr_129[:,1], label='CFL 0.1')
pylab.plot(tr_130[:,0], tr_130[:,1], label='CFL 0.05')
pylab.legend(loc='lower left')
pylab.title('Total Energy History')
pylab.xlabel('Time [s]')
pylab.ylabel('Total Energy')
pylab.savefig('s125s129s130-double-shear-totalEnergy_cmp.png')
pylab.close()
print "CFL 0.2", tr_125[-1,1]-tr_125[0,1]
print "CFL 0.1", tr_129[-1,1]-tr_129[0,1]
print "CFL 0.05", tr_130[-1,1]-tr_130[0,1]
| [
11748,
279,
2645,
397,
198,
11748,
8893,
198,
11748,
10688,
198,
11748,
299,
32152,
628,
198,
5647,
796,
279,
2645,
397,
13,
26875,
7,
17,
8,
198,
2213,
62,
11623,
796,
279,
2645,
397,
13,
2220,
14116,
10786,
40720,
82,
11623,
14,
8... | 2.111111 | 369 |
"""
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
biggest_number = 0
for i in range(999):
for n in range(999):
number = i*n
if str(number) == str(number)[::-1]:
if number > biggest_number:
biggest_number = number
print(biggest_number) | [
37811,
198,
32,
6340,
521,
398,
291,
1271,
9743,
262,
976,
1111,
2842,
13,
383,
4387,
6340,
521,
5998,
925,
422,
262,
1720,
286,
734,
362,
12,
27003,
3146,
318,
15897,
24,
796,
10495,
13958,
7388,
13,
198,
16742,
262,
4387,
6340,
52... | 3 | 138 |
# Copyright 2019 University of Basel, Center for medical Image Analysis and Navigation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
__author__ = "Robin Sandkuehler"
__copyright__ = "Copyright (C) 2019 Center for medical Image Analysis and Navigation"
import torch as th
# Loss base class (standard from PyTorch)
# conditional return
class MSE(_PairwiseImageLoss):
r""" The mean square error loss is a simple and fast to compute point-wise measure
which is well suited for monomodal image registration.
.. math::
\mathcal{S}_{\text{MSE}} := \frac{1}{\vert \mathcal{X} \vert}\sum_{x\in\mathcal{X}}
\Big(I_M\big(x+f(x)\big) - I_F\big(x\big)\Big)^2
Args:
fixed_image (Image): Fixed image for the registration
moving_image (Image): Moving image for the registration
size_average (bool): Average loss function
reduce (bool): Reduce loss function to a single value
"""
| [
2,
15069,
13130,
2059,
286,
6455,
417,
11,
3337,
329,
3315,
7412,
14691,
290,
42115,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
... | 3.212903 | 465 |
#!/usr/bin/python
import os
from argparse import ArgumentParser
import psycopg2.extras
import requests
from requests.auth import HTTPBasicAuth
from ebi_eva_common_pyutils.logger import logging_config
from ebi_eva_common_pyutils.metadata_utils import get_metadata_connection_handle
from ebi_eva_common_pyutils.pg_utils import get_all_results_for_query, execute_query
from retry import retry
logger = logging_config.get_logger(__name__)
logging_config.add_stdout_handler()
@retry(tries=4, delay=2, backoff=1.2, jitter=(1, 3))
@retry(tries=4, delay=2, backoff=1.2, jitter=(1, 3))
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
28686,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
11748,
17331,
22163,
70,
17,
13,
2302,
8847,
198,
11748,
7007,
198,
6738,
7007,
13,
18439,
1330,
14626,
26416,
30515,
198,
19... | 2.754386 | 228 |
#!/usr/bin/env python
import os
import json
if __name__ == '__main__':
import sys
basedir = sys.argv[1]
files, replicas = load_files_replicas(basedir)
files_path = os.path.join(basedir, 'files.json')
with open(files_path, 'w') as ff:
ff.write(json.dumps(files, indent=4, sort_keys=True))
replicas_path = os.path.join(basedir, 'replicas.json')
with open(replicas_path, 'w') as fr:
fr.write(json.dumps(replicas, indent=4, sort_keys=True))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1330,
25064,
198,
220,
220,
220,
1912,
343,
796,
25064,
13... | 2.342995 | 207 |
from flask import render_template, url_for, redirect, flash
from flask_login import current_user
from .. import db
from ..models import User, Tweet
from . import user
from .forms import TweetForm
from ..queries import get_all_tweets, send_tweet, get_user
@user.route('/<username>', methods=['GET', 'POST'])
| [
6738,
42903,
1330,
8543,
62,
28243,
11,
19016,
62,
1640,
11,
18941,
11,
7644,
198,
6738,
42903,
62,
38235,
1330,
1459,
62,
7220,
198,
6738,
11485,
1330,
20613,
198,
6738,
11485,
27530,
1330,
11787,
11,
18752,
198,
6738,
764,
1330,
2836,... | 3.347826 | 92 |
#!/usr/bin/env python
from pathlib import Path
from setuptools import find_packages, setup
packages = find_packages(where="asdf_chunked")
packages.append("asdf_chunked.resources")
package_dir = {
"": "asdf_chunked",
"asdf_chunked.resources": "resources",
}
package_data = {"asdf_chunked.resources": package_yaml_files("resources")}
setup(
use_scm_version=True,
packages=packages,
package_dir=package_dir,
package_data=package_data,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
43789,
796,
1064,
62,
43789,
7,
3003,
2625,
292,
7568,
62,
354,
2954,
276,
... | 2.642045 | 176 |
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTableWidget,QTableWidgetItem,QVBoxLayout
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
# @pyqtSlot()
# def add_element(self):
# des = "HI"
# price = 10
# self.tableWidget.insertRow(self.items)
# description_item = QTableWidgetItem(des)
# price_item = QTableWidgetItem("{:.2f}".format(float(price)))
# price_item.setTextAlignment(Qt.AlignRight)
# self.tableWidget.setItem(self.items, 0, description_item)
# self.tableWidget.setItem(self.items, 1, price_item)
# self.description.setText("")
# self.price.setText("")
# self.items += 1
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| [
11748,
25064,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
13383,
27703,
11,
1195,
23416,
11,
1195,
38300,
11,
1195,
12502,
11,
1195,
10962,
38300,
11,
48,
10962,
38300,
7449,
11,
48,
53,
14253,
32517,
198,
6... | 2.291553 | 367 |
# Copyright (c) 2017, Oren Kraus All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import h5py
import numpy as np
| [
2,
15069,
357,
66,
8,
2177,
11,
440,
918,
21553,
385,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
17613,
11,
198,
2,
389,
10431,
2810,
326,
262,
1708,
3403,
... | 3.643357 | 429 |
from PIL import Image
import requests
from io import BytesIO
import base64
import os
def save_image(src, counter, path):
"""Method for saving a single image"""
cur_dir = os.getcwd()
image_root_path = os.path.join(cur_dir, "images")
if not os.path.isdir(image_root_path):
os.mkdir(image_root_path)
image_path = os.path.join(image_root_path, path)
if not os.path.isdir(image_path):
os.mkdir(image_path)
output_path = os.path.join(image_path,f"{path}_{format(counter, '04d')}.png")
if src.startswith("http"):
try:
response = requests.get(src, timeout=20)
img = Image.open(BytesIO(response.content))
img.save(output_path)
except:
return False
else:
try:
img = Image.open(src)
img.save(output_path)
except:
return False
print(f"Save Image: {output_path}")
return True
def save_images(src_list, path, img_num):
"""Method for saving a list of images"""
counter = 1
for src in src_list:
if counter == img_num + 1:
break
if (save_image(src, counter, path)):
counter += 1
| [
6738,
350,
4146,
1330,
7412,
198,
11748,
7007,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
11748,
2779,
2414,
198,
11748,
28686,
198,
198,
4299,
3613,
62,
9060,
7,
10677,
11,
3753,
11,
3108,
2599,
198,
220,
220,
220,
37227,
17410,
... | 2.14841 | 566 |
"""
Python Expression field, another finite prime field characteristic two definition.
field element is defined by bool(Python Expression).
This module is reference design for finite field characteristic two.
but I recommend that this field should be used only checking Python syntax.
"""
import logging
import operator
_log = logging.getLogger('sandbox.pyexprfield')
import sandbox.finitefield as finitefield
class PythonExpressionFieldElement(finitefield.FiniteFieldElement):
"""
The element of boolean field.
"""
def __init__(self, expression):
""" boolean must be Python expression.
"""
self.boolean = bool(expression)
def xor(self, other):
""" return self xor other .
"""
return self.__class__(not (self == other))
__radd__ = __add__
__sub__ = __add__
__rsub__ = __add__
__rmul__ = __mul__
def __div__(self, other):
""" compute formal division.
In Python expression, 0 is False, so dividing False causes ZeroDivisionerror.
"""
if not other:
raise ZeroDivisionError("False represents zero, this operation is ZeroDivision.")
return self.__class__(self.boolean)
__truediv__ = __div__
__floordiv__ = __div__
__rdiv__ = __div__
__rtruediv__ = __div__
__rfloordiv__ = __div__
__invert__ = __neg__
def toFinitePrimeFieldElement(self):
""" get FinitePrimeField(2) element with bijective map.
"""
if self.boolean:
return finitefield.FinitePrimeFieldElement(1, 2)
return finitefield.FinitePrimeFieldElement(0, 2)
| [
37811,
198,
37906,
41986,
2214,
11,
1194,
27454,
6994,
2214,
16704,
734,
6770,
13,
198,
3245,
5002,
318,
5447,
416,
20512,
7,
37906,
41986,
737,
198,
198,
1212,
8265,
318,
4941,
1486,
329,
27454,
2214,
16704,
734,
13,
198,
4360,
314,
... | 2.739933 | 596 |
# __init__: let python know this is a package
from mpgen import mpgen
| [
2,
220,
11593,
15003,
834,
25,
1309,
21015,
760,
428,
318,
257,
5301,
198,
6738,
29034,
5235,
1330,
29034,
5235,
628,
197
] | 3.318182 | 22 |
from flask import Flask, render_template, redirect, jsonify
from flask_pymongo import PyMongo
from datetime import datetime
import json
import pandas as pd
import os
import numpy as np
import datetime
import csv
import pymongo
import request
# function to save dataframe to collection_name in MongoDB 'wines'
# In[2]:
# Load CSV file
csv_path = os.path.join('..',"rawdata", "gun-violence-data_01-2013_12-2015.csv")
# Read the first half of the gun violence file and store into Pandas data frame
gun_violence_df_2015 = pd.read_csv(csv_path, encoding = "ISO-8859-1")
gun_violence_df_2015.head()
# In[3]:
# Load CSV file
csv_path = os.path.join('..',"rawdata", "gun-violence-data_01-2016_03-2018.csv")
# Read the second half of the gun violence file and store into Pandas data frame
gun_violence_df_2018 = pd.read_csv(csv_path, encoding = "ISO-8859-1")
gun_violence_df_2018.head()
# In[4]:
# Recomine the two files
gun_violence_df= pd.concat([gun_violence_df_2015, gun_violence_df_2018])
gun_violence_df.head()
# In[5]:
# Convert the date field to date/time and removed unnecessary columns
gun_violence_df['date']= pd.to_datetime(gun_violence_df['date'])
gun_violence_df=gun_violence_df.loc[(gun_violence_df['date'] <'2018-01-01') & (gun_violence_df['date']>'2013-12-31') ]
gun_violence_df.drop(columns=['address', 'incident_url', 'incident_url_fields_missing', 'source_url', 'participant_name','sources', 'location_description','notes'], inplace=True, axis=1)
gun_violence_df.head()
# In[6]:
# Search the incident_characteristics for specific incident types and set that incident type to True
gun_violence_df["mass"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Mass Shooting", case=False, na=False), True, False)
gun_violence_df["gang"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Gang", case=False, na=False), True, False)
gun_violence_df["domestic"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Domestic Violence", case=False, na=False), True, False)
gun_violence_df["non-shooting"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Non-Shooting", case=False, na=False), True, False)
gun_violence_df["accidental"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Accidental", case=False, na=False), True, False)
gun_violence_df["prohibited"]=np.where(gun_violence_df['incident_characteristics'].str.contains("prohibited", case=False, na=False), True, False)
gun_violence_df['officer'] = np.where(gun_violence_df['incident_characteristics'].str.contains("Officer|TSA", case=False, na=False), True, False)
gun_violence_df.head()
# ## Load csv files into pandas dataframes, clean, save to mongo db
# In[7]:
# read in cities data
cities_path = os.path.join("..","Data","Cities.csv")
df_cities = pd.read_csv(cities_path, encoding="UTF-8")
df_cities.head()
# # save to/replace collection "cities" in "guns" mongo db
saveMongo(df_cities, "cities", replace=True)
# In[8]:
# read in state data
states_path = os.path.join("..","Data","States.csv")
df_states = pd.read_csv(states_path, encoding="UTF-8")
df_states = df_states[["state","census_2010","pop_estimate_2015","2015_median_income", "age18longgunpossess","age21longgunpossess","assault","mentalhealth","universal"]]
df_states.head()
# # save to/replace collection "states" in "guns" mongo db
saveMongo(df_states, "states", replace=True)
# In[12]:
# Loading gun violence
df_guns = gun_violence_df
df_guns = df_guns[["incident_id","date","state","city_or_county","n_killed","n_injured","incident_characteristics","latitude","longitude","mass","gang","domestic","non-shooting","accidental","prohibited","officer"]]
df_guns["n_involved"] = df_guns["n_killed"]+df_guns["n_injured"]
df_guns["year"]= pd.DatetimeIndex(df_guns['date']).year
# Create a column to record type of shooting
conditions = [
(df_guns["mass"]==1),
(df_guns["n_involved"] == 0),
(df_guns["n_killed"]==0)]
choices = ["mass shooting", "no injuries","injuries only"]
df_guns["shoot_type"] = np.select(conditions, choices, default="some dead")
df_guns.head()
# Add in state level data for filtering purposes
df_guns_complete = pd.merge(df_guns, df_states, on="state", how="left")
df_guns_complete["count"] = 1
df_guns_complete.head()
# save to/replace collection "guns" in "guns" mongo db
saveMongo(df_guns_complete, "guns", replace=True)
# In[10]:
summary_guns_df = df_guns_complete.groupby("shoot_type",as_index=False).sum()[["pop_estimate_2015"]]
summary_guns_df["shoot_type"] = df_guns_complete.groupby("shoot_type",as_index=False).first()["shoot_type"]
summary_guns_df["Count"] = df_guns_complete.groupby("shoot_type",as_index=False).sum()[["count"]]
summary_guns_df["n_killed"]= df_guns_complete.groupby("shoot_type",as_index=False).sum()[["n_killed"]]
summary_guns_df["Incidents_per_100M"] = summary_guns_df ["Count"]/summary_guns_df["pop_estimate_2015"]*100000000
summary_guns_df["Killed_per_100M"] = summary_guns_df ["n_killed"]/summary_guns_df["pop_estimate_2015"]*100000000
summary_guns_df.reset_index()
summary_guns_df.head()
# save to/replace collection "guns_summary" in "guns" mongo db
saveMongo(summary_guns_df, "guns_summary", replace=True)
# In[17]:
summary_states_df = df_guns_complete.groupby(["shoot_type","state"], as_index=False).sum()[["pop_estimate_2015"]]
summary_states_df["state"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["state"]
summary_states_df["shoot_type"] = df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["shoot_type"]
summary_states_df["Count"] = df_guns_complete.groupby(["shoot_type", "state"],as_index=False).sum()[["count"]]
summary_states_df["n_killed"]= df_guns_complete.groupby(["shoot_type","state"],as_index=False).sum()[["n_killed"]]
summary_states_df["Incidents_per_100M"] = summary_states_df ["Count"]/summary_states_df["pop_estimate_2015"]*100000000
summary_states_df["Killed_per_100M"] = summary_states_df ["n_killed"]/summary_states_df["pop_estimate_2015"]*100000000
summary_states_df["2015_median_income"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["2015_median_income"]
summary_states_df["age18longgunpossess"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["age18longgunpossess"]
summary_states_df["age21longgunpossess"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["age21longgunpossess"]
summary_states_df["assault"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["assault"]
summary_states_df["mentalhealth"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["mentalhealth"]
summary_states_df["universal"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["universal"]
summary_states_df.reset_index()
summary_states_df.head()
# save to/replace collection "state_summary" in "guns" mongo db
saveMongo(summary_states_df, "state_summary", replace=True)
s
# from bson.json_util import loads
# Create an instance of Flask
app = Flask(__name__)
# Use PyMongo to establish Mongo connection
mongo = PyMongo(app, uri="mongodb://localhost:27017/guns")
# Define shooting list
ShootList = ["mass shooting", "no injuries", "injuries only", "some dead"]
# ShootList = ["mass shooting"]
@app.route("/")
@app.route("/maps")
@app.route("/benchmark")
@app.route("/interactive_chart")
@app.route("/jsonifiedcities")
@app.route("/jsonifiedguns")
@app.route("/jsonifiedguns/<yr>")
@app.route("/jsonifiedstates")
@app.route("/jsonifiedsummary")
@app.route("/jsonifiedstatesummary")
if __name__ == "__main__":
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
18941,
11,
33918,
1958,
198,
6738,
42903,
62,
79,
4948,
25162,
1330,
9485,
44,
25162,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
... | 2.825577 | 2,729 |
import unittest
from docker_image import digest
from docker_image import reference
| [
11748,
555,
715,
395,
198,
198,
6738,
36253,
62,
9060,
1330,
16274,
198,
6738,
36253,
62,
9060,
1330,
4941,
628,
198
] | 4.095238 | 21 |
import time
import sha
from pyftpdlib import ftpserver
username="user"
authorizer = ftpserver.DummyAuthorizer()
password = mysha((str(time.time()) + "babble"))[:7]
print "user:",username
print "password:",password
authorizer.add_user(username, password, "./data", perm="elradfmw")
#authorizer.add_anonymous(".")
ftp_handler = ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
#address = ("127.0.0.1", 21) # listen only on localhost
address = ("", 21) # listen on all interfaces
ftpd = ftpserver.FTPServer(address, ftp_handler)
ftpd.serve_forever() | [
11748,
640,
198,
11748,
427,
64,
198,
6738,
12972,
701,
30094,
8019,
1330,
10117,
862,
18497,
628,
198,
198,
29460,
2625,
7220,
1,
198,
198,
9800,
7509,
796,
10117,
862,
18497,
13,
35,
13513,
13838,
7509,
3419,
198,
198,
28712,
796,
6... | 2.871795 | 195 |
import os
import socket
import subprocess
import time
import signal
import sys
import struct
while True:
time.sleep(0.1)
main()
| [
11748,
28686,
198,
11748,
17802,
198,
11748,
850,
14681,
198,
11748,
640,
198,
11748,
6737,
198,
11748,
25064,
198,
11748,
2878,
628,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
640,
13,
42832,
7,
15,
13,
16,
8,
198,
220,
220,
220... | 3.021739 | 46 |
"""
Author: CaptCorpMURICA
Project: 100DaysPython
File: module1_day07_ranges.py
Creation Date: 6/2/2019, 8:55 AM
Description: Basic instruction of ranges in python.
"""
# A range starts with an index of 0 and ends with the declared value. The endpoint of a range in not inclusive.
# Therefore, the range will contain indices from 0 to 41, but it will not use 42.
print(range(10))
print(list(range(10)))
print(range(0, 9, 2) == range(0, 10, 2))
# The range declaration has the format `range(start, stop, step)`.
even = range(0, 10, 2)
odd = range(1, 10, 2)
print("The even range is {} and the values are {}".format(even, list(even)))
print("The odd range is {} and the values are {}".format(odd, list(odd)))
# If the step is negative, then the range values are produced in reverse. The higher number must be in the start
# position if producing results in reverse.
even = range(10, 0, -2)
odd = range(9, 0, -2)
print("The even range is {} and the values are {}".format(even, list(even)))
print("The odd range is {} and the values are {}".format(odd, list(odd)))
# By using a specific step value, a range can be used to identify a collection of numbers divisible by a specific value.
# This example uses the `input()` function to prompt the user for input. It also used `if/elif/else` statements, which
# will be covered on [Day 10](../Module1/Day10).
val = int(input("Please provide a whole number for the divisibility check: "))
request = int(input("Please provide a whole number, less than 1 million, that is to be tested for divisibility: "))
in_range = range(val, 1000000, val)
if request > 1000000:
print("Please select a number less than 1 million and try again. Thank you")
elif request in in_range:
print("{} is divisible by {}.".format(request, val))
else:
print("{} is not divisible by {}.".format(request, val))
| [
37811,
198,
220,
220,
220,
6434,
25,
220,
220,
220,
220,
220,
220,
220,
220,
6790,
45680,
44,
4261,
25241,
198,
220,
220,
220,
4935,
25,
220,
220,
220,
220,
220,
220,
220,
1802,
38770,
37906,
198,
220,
220,
220,
9220,
25,
220,
220... | 3.177066 | 593 |
# -*- coding: utf-8 -*-
from .input_argument import InputArgument
from .input_option import InputOption
def argument(name, description='',
required=False, default=None, is_list=False,
validator=None):
"""
Helper function to create a new argument.
:param name: The name of the argument.
:type name: str
:param description: A helpful description of the argument.
:type description: str
:param required: Whether the argument is required or not.
:type required: bool
:param default: The default value of the argument.
:type default: mixed
:param is_list: Whether the argument should be a list or not.
:type list: bool
:param validator: An optional validator.
:type validator: Validator or str
:rtype: InputArgument
"""
mode = InputArgument.OPTIONAL
if required:
mode = InputArgument.REQUIRED
if is_list:
mode |= InputArgument.IS_LIST
return InputArgument(name, mode, description, default, validator)
def option(name, shortcut=None, description='',
flag=True, value_required=None, is_list=False,
default=None, validator=None):
"""
Helper function to create an option.
:param name: The name of the option
:type name: str
:param shortcut: The shortcut (Optional)
:type shortcut: str or None
:param description: The description of the option.
:type description: str
:param flag: Whether the option is a flag or not.
:type flag: bool
:param value_required: Whether a value is required or not.
:type value_required: bool or None
:param is_list: Whether the option is a list or not.
:type is_list: bool
:param default: The default value.
:type default: mixed
:param validator: An optional validator.
:type validator: Validator or str
:rtype: InputOption
"""
mode = InputOption.VALUE_IS_FLAG
if value_required is True:
mode = InputOption.VALUE_REQUIRED
elif value_required is False:
mode = InputOption.VALUE_OPTIONAL
if is_list:
mode |= InputOption.VALUE_IS_LIST
return InputOption(
name, shortcut, mode, description,
default, validator
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
764,
15414,
62,
49140,
1330,
23412,
28100,
1713,
198,
6738,
764,
15414,
62,
18076,
1330,
23412,
19722,
628,
198,
4299,
4578,
7,
3672,
11,
6764,
11639,
3256,
1... | 2.809524 | 798 |
from django import forms
| [
6738,
42625,
14208,
1330,
5107,
628
] | 4.333333 | 6 |
from . import unittest
from shapely.algorithms.polylabel import polylabel, Cell
from shapely.geometry import LineString, Point, Polygon
from shapely.errors import TopologicalError
| [
6738,
764,
1330,
555,
715,
395,
198,
6738,
5485,
306,
13,
282,
7727,
907,
13,
35428,
18242,
1330,
7514,
18242,
11,
12440,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
6910,
10100,
11,
6252,
11,
12280,
14520,
198,
6738,
5485,
306,
13,
... | 3.770833 | 48 |
import json
import unittest
import webtest
import os
from google.appengine.ext import testbed
from api.src import main
| [
11748,
33918,
198,
11748,
555,
715,
395,
198,
11748,
3992,
9288,
198,
11748,
28686,
198,
198,
6738,
23645,
13,
1324,
18392,
13,
2302,
1330,
1332,
3077,
628,
198,
6738,
40391,
13,
10677,
1330,
1388,
628,
220,
220,
220,
220,
198,
220,
2... | 2.151899 | 79 |
import yaml
from joblib import load
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from helpers import preprocess_single_text, load_mapping
text = "ki_erstattung_test_topf2.txt Details Activity ki_erstattung_test_topf2.txt Sharing Info. Who has access M General Info. System properties Type Text Size 496 bytes Storage used 496 bytes Location testcases Owner Marc Bachmann Modified Dec 15, 2021 by Marc Bachmann Opened 6:32 PM by me Created Dec 15, 2021 Description. No description Download permissions. Viewers can download From: Marijke Holtkamp <m.etzrodtgweb.de> To: tierarztrechnung@barmenia.de Subject Tierarztrechungen Sent Thu, 21 Oct 2021 14:28:46+0200 IMG 2798.JPG IMG_2799.JPG Sehr geehrte Damen und Herren, anbei sende ich Ihnen die Tierarztrechnung unserer Hündin Clara Tari mit der bitte um Erstattung: KreisSparkasse Köln DE 74 3705 0299 1152 0271 47 BIC COKSDE33xxX Vielen Dank.! Mit freundlichem Gruß Marijke Holtkamp"
stopwords_locale = 'german'
stemmer = SnowballStemmer(stopwords_locale)
stop_words = set(stopwords.words(stopwords_locale))
with open('../dataset/stopwords.yaml', 'r') as f:
curated_stop_words = yaml.safe_load(f)
text = preprocess_single_text(text, stop_words=stop_words, curated_stop_words=curated_stop_words, stemming=True, stemmer=stemmer)
mapping_dict = load_mapping(mapping_file='../dataset/mapping.yaml')
# load the saved pipleine model
for filename in ["../trained_models/model_logreg.sav", "../trained_models/model_sgd.sav"]:
pipeline = load(filename)
# predict on the text
json_result = {}
for cls, prob in zip(pipeline.classes_.tolist(), pipeline.predict_proba([text]).tolist().pop()):
json_result[mapping_dict[cls]] = prob
print(filename, '\n', json_result) | [
11748,
331,
43695,
198,
6738,
1693,
8019,
1330,
3440,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
198,
6738,
299,
2528,
74,
13,
927,
13,
82,
2197,
1894,
1330,
7967,
1894,
1273,
368,
647,
198,
6738,
49385,
1330,
... | 2.848485 | 627 |
#Altere o programa anterior para mostrar no final a soma dos números.
n1 = int(input("Digite um número: "))
n2 = int(input("Digite outro número: "))
for i in range(n1 + 1, n2):
print(i)
for i in range(n2 + 1, n1):
print(i)
print("Soma dos números: ", i + i)
| [
2,
29161,
567,
267,
1430,
64,
32700,
31215,
749,
20040,
645,
2457,
257,
3870,
64,
23430,
299,
21356,
647,
418,
13,
201,
198,
201,
198,
77,
16,
796,
493,
7,
15414,
7203,
19511,
578,
23781,
299,
21356,
647,
78,
25,
366,
4008,
201,
1... | 2.027972 | 143 |
#
# Provider information sources for `Attitude' - a false horizon display using
# accelerometer information. (c) Andrew Flegg 2009
# Released under the Artistic Licence
import os.path
from math import sin, cos, pi
class Dummy:
"""One of the simplest providers: returns dead-on, flat."""
class Demo:
"""A demonstration provider which will take the user on a tour through
the air."""
x = 0.0
y = 0.0
z = 0.0
class NokiaAccelerometer:
"""An accelerometer provider which actually reads an RX-51's
accelerometers, based on http://wiki.maemo.org/Accelerometers"""
global ACCELEROMETER_PATH
ACCELEROMETER_PATH = '/sys/class/i2c-adapter/i2c-3/3-001d/coord'
@classmethod
| [
2,
198,
2,
32549,
1321,
4237,
329,
4600,
8086,
3984,
6,
532,
257,
3991,
17810,
3359,
1262,
198,
2,
8320,
15635,
1321,
13,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
357,
66,
8,
6858,
12005,
1130,
... | 2.507987 | 313 |
# эти строки введены для IDE, их нужно закомментировать или удалить
"""
Список метрик и соответствующих им методов
"""
metrics = {
'productType': {
'method': getProductType,
'name': 'productType', # данное поле нужно использовать для названия вложенного поля
},
'productColor': {
'method': getProductColor,
'name': 'productColor',
},
'productCondition': {
'method': getProductCondition,
'name': 'productCondition',
}
}
"""
Список веток
"""
branches = {
'branch1': 'price==Low,condition==New',
'branch2': 'price==Medium,color==Red',
'branch3': 'color==Blue,price==High',
}
def getStatistics(strStats, values, globalFilter, limit=None, offset=None):
"""
:param strStats: 'productType;branch1;productColor;branch2;branch3;productCondition'
:param values: 'sklad1,sklad2'
:param globalFilter: ''
:param limit:
:param offset:
:return:
"""
def getRecursive(lvl=0, listStatFilter=[], statIndex=0):
"""
:param lvl: текущий уровень вложенности
:param listStatFilter: список предыдущих метрик, для которых получены статистики
:return: Возвращается список из двух элементов: ['имя метрики', [список статистик для метрики]]
Например: ['productType', [{'label': 'Окна', 'segment': 'productType==Окна',...}, {...}, ...]
listStats и listValues берутся из внешней функции
"""
# условие выхода из рекурсии - необходимо для случая, когда listStats изначально пустой
if statIndex == len(listStats):
return None
# формируем фильтр
filter = ';'.join(listStatFilter + globalFilter)
# берем очередную метрику, для которой нужна статистика
curStat = listStats[statIndex]
# если текущая метрика - branch
if curStat.startswith('branch'):
nameMetric = 'branch'
listBranches = [curStat]
# выбираем все идущие подряд бранчи
while statIndex < (len(listStats) - 1):
statIndex += 1
if not listStats[statIndex].startswith('branch'):
statIndex -= 1
break
listBranches.append(listStats[statIndex])
listBranches.append('All data') # для ветки 'All data'
# здесь мы имеем список с названиями бранчей, можно получить статистику для них
data = []
for branch in listBranches:
query = {
'method': 'branch', # значение указыает на то, что это запрос для бранча
'branch': branch,
'values': values,
'filter': filter,
'limit': None, # limit и offset тоже не имеют смысла в данной ветке
'offset': None,
}
data.append(get_stat_api([query])[0][0])
# если текущая метрика - обычная, например 'productType'
else:
nameMetric = metrics[curStat]['name']
list_of_queries = [{
'method': metrics[nameMetric]['method'],
'values': values,
'filter': filter,
'limit': limit if lvl == 0 else None,
'offset': offset if lvl == 0 else None,
}]
data = get_stat_api(list_of_queries)[0]
# условие выхода из рекурсии - достигнут конец списка listStats
if (statIndex + 1) == len(listStats):
return [nameMetric, data]
# если у нас есть вложенные бранчи, то здесь самое место для их обработки
if listStats[statIndex + 1] == 'subbranch':
statIndex += 1 # просто пропускаем данный элемент списка
# здесь у нас есть список статистик в data и соответствующее им название метрики
for item in data:
# если текущий элемент не имеет поля 'segment', для него рекурсия закончена - он последний в цепочке
if 'segment' not in item:
continue
result = getRecursive(lvl + 1, listStatFilter + [item['segment']], statIndex + 1)
# вот здесь result[0] как раз равно metrics[metricName]['name']
item[result[0]] = result[1]
return [nameMetric, data]
# для корректной обработки вложенных бранчей типа branch1,branch2|branch3
# заменим разделяющий знак дополнительной "виртуальной" метрикой, чтобы получился примерно
# такой список ['branch1', 'branch2', 'subbranch', "branch3"]
strStats = strStats.replace('|', ',subbranch,')
# преобразовываем строку с необходимыми статистиками в список,
# сначала удалив все пробелы из строки
listStats = strStats.replace(' ', '').split(',')
return {
'stats': getRecursive()[1],
}
| [
2,
220,
141,
235,
20375,
18849,
220,
21727,
20375,
21169,
25443,
118,
18849,
12466,
110,
38857,
16843,
43666,
16843,
22177,
45035,
12466,
112,
30143,
40623,
33497,
11,
12466,
116,
141,
227,
12466,
121,
35072,
140,
114,
22177,
15166,
12466,
... | 1.474488 | 3,273 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import time
import os
from sklearn.svm import SVR
import joblib
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit_utils import smiles_dataset
from utils import save_dataset
model_load = joblib.load('./models/model.pkl')
database = pd.read_csv('./screening_base/in-vitro_zinc/in-vitro.csv')
screen_database = pd.read_csv('./datasets/screen_results/in-vitro_zinc/in-vitro_bits.csv')
screen_result = model_load.predict(screen_database)
screen_result_fp = pd.DataFrame({'Predictive Results': screen_result})
database_result = pd.concat([database, screen_result_fp], axis = 1)
threshold_7 = database_result[database_result['Predictive Results'] > 7]
original_dataset = pd.read_csv('./datasets/all_structures.csv')
de_threshold_7 = threshold_7
for smile in original_dataset['Smiles']:
for new_structure in threshold_7['smiles']:
if smile == new_structure:
index = threshold_7[threshold_7['smiles'] == smile].index[0]
print('overlap found at position: {:01d}'.format(index))
de_threshold_7 = de_threshold_7.drop(index = index, axis = 0)
else:
pass
save_dataset(threshold_7, path = './datasets/screen_results/in-vitro_zinc/', file_name = 'threshold_7', idx = False)
save_dataset(de_threshold_7, path = './datasets/screen_results/in-vitro_zinc/', file_name = 'de_threshold_7', idx = False)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
302,
198,
11748,
640,
198,
11748,
28686,
198,
6738,
1341,
35720,
13,
82,
14761,
1330,
311,
... | 2.501706 | 586 |
# Generated by Django 3.1.12 on 2021-08-13 00:35
from django.db import migrations, models
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1065,
319,
33448,
12,
2919,
12,
1485,
3571,
25,
2327,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
26791,
13,
2435,
11340,
628
] | 2.95122 | 41 |
r"""
Six Vertex Model
"""
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.list_clone import ClonableArray
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.combinat.combinatorial_map import combinatorial_map
class SixVertexConfiguration(ClonableArray):
"""
A configuration in the six vertex model.
"""
def check(self):
"""
Check if ``self`` is a valid 6 vertex configuration.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: M[0].check()
"""
if self not in self.parent():
raise ValueError("invalid configuration")
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: M[0]
^ ^ ^
| | |
--> # <- # <- # <--
| ^ ^
V | |
--> # -> # <- # <--
| | ^
V V |
--> # -> # -> # <--
| | |
V V V
"""
# List are in the order of URDL
ascii = [[r' V ', ' -', r' ^ ', '- '], # LR
[r' | ', ' <', r' ^ ', '- '], # LU
[r' V ', ' <', r' | ', '- '], # LD
[r' | ', ' <', r' | ', '> '], # UD
[r' | ', ' -', r' ^ ', '> '], # UR
[r' V ', ' -', r' | ', '> ']] # RD
ret = ' '
# Do the top line
for entry in self[0]:
if entry == 1 or entry == 3 or entry == 4:
ret += ' ^ '
else:
ret += ' | '
# Do the meat of the ascii art
for row in self:
ret += '\n '
# Do the top row
for entry in row:
ret += ascii[entry][0]
ret += '\n'
# Do the left-most entry
if row[0] == 0 or row[0] == 1 or row[0] == 2:
ret += '<-'
else:
ret += '--'
# Do the middle row
for entry in row:
ret += ascii[entry][3] + '#' + ascii[entry][1]
# Do the right-most entry
if row[-1] == 0 or row[-1] == 4 or row[-1] == 5:
ret += '->'
else:
ret += '--'
# Do the bottom row
ret += '\n '
for entry in row:
ret += ascii[entry][2]
# Do the bottom line
ret += '\n '
for entry in self[-1]:
if entry == 2 or entry == 3 or entry == 5:
ret += ' V '
else:
ret += ' | '
return ret
def to_signed_matrix(self):
"""
Return the signed matrix of ``self``.
The signed matrix corresponding to a six vertex configuration is
given by `0` if there is a cross flow, a `1` if the outward arrows
are vertical and `-1` if the outward arrows are horizonal.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: map(lambda x: x.to_signed_matrix(), M)
[
[1 0 0] [1 0 0] [ 0 1 0] [0 1 0] [0 1 0] [0 0 1] [0 0 1]
[0 1 0] [0 0 1] [ 1 -1 1] [1 0 0] [0 0 1] [1 0 0] [0 1 0]
[0 0 1], [0 1 0], [ 0 1 0], [0 0 1], [1 0 0], [0 1 0], [1 0 0]
]
"""
from sage.matrix.constructor import matrix
# verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
return matrix([[matrix_sign(_) for _ in row] for row in self])
def plot(self, color='sign'):
"""
Return a plot of ``self``.
INPUT:
- ``color`` -- can be any of the following:
* ``4`` - use 4 colors: black, red, blue, and green with each
corresponding to up, right, down, and left respectively
* ``2`` - use 2 colors: red for horizontal, blue for vertical arrows
* ``'sign'`` - use red for right and down arrows, blue for left
and up arrows
* a list of 4 colors for each direction
* a function which takes a direction and a boolean corresponding
to the sign
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: print M[0].plot().description()
Arrow from (-1.0,0.0) to (0.0,0.0)
Arrow from (-1.0,1.0) to (0.0,1.0)
Arrow from (0.0,0.0) to (0.0,-1.0)
Arrow from (0.0,0.0) to (1.0,0.0)
Arrow from (0.0,1.0) to (0.0,0.0)
Arrow from (0.0,1.0) to (0.0,2.0)
Arrow from (1.0,0.0) to (1.0,-1.0)
Arrow from (1.0,0.0) to (1.0,1.0)
Arrow from (1.0,1.0) to (0.0,1.0)
Arrow from (1.0,1.0) to (1.0,2.0)
Arrow from (2.0,0.0) to (1.0,0.0)
Arrow from (2.0,1.0) to (1.0,1.0)
"""
from sage.plot.graphics import Graphics
from sage.plot.circle import circle
from sage.plot.arrow import arrow
if color == 4:
color_list = ['black', 'red', 'blue', 'green']
cfunc = lambda d,pm: color_list[d]
elif color == 2:
cfunc = lambda d,pm: 'red' if d % 2 == 0 else 'blue'
elif color == 1 or color is None:
cfunc = lambda d,pm: 'black'
elif color == 'sign':
cfunc = lambda d,pm: 'red' if pm else 'blue' # RD are True
elif isinstance(color, (list, tuple)):
cfunc = lambda d,pm: color[d]
else:
cfunc = color
G = Graphics()
for j,row in enumerate(reversed(self)):
for i,entry in enumerate(row):
if entry == 0: # LR
G += arrow((i,j+1), (i,j), color=cfunc(2, True))
G += arrow((i,j), (i+1,j), color=cfunc(1, True))
if j == 0:
G += arrow((i,j-1), (i,j), color=cfunc(0, False))
if i == 0:
G += arrow((i,j), (i-1,j), color=cfunc(3, False))
elif entry == 1: # LU
G += arrow((i,j), (i,j+1), color=cfunc(0, False))
G += arrow((i+1,j), (i,j), color=cfunc(3, False))
if j == 0:
G += arrow((i,j-1), (i,j), color=cfunc(0, False))
if i == 0:
G += arrow((i,j), (i-1,j), color=cfunc(3, False))
elif entry == 2: # LD
G += arrow((i,j+1), (i,j), color=cfunc(2, True))
G += arrow((i+1,j), (i,j), color=cfunc(3, False))
if j == 0:
G += arrow((i,j), (i,j-1), color=cfunc(2, True))
if i == 0:
G += arrow((i,j), (i-1,j), color=cfunc(3, False))
elif entry == 3: # UD
G += arrow((i,j), (i,j+1), color=cfunc(0, False))
G += arrow((i+1,j), (i,j), color=cfunc(3, False))
if j == 0:
G += arrow((i,j), (i,j-1), color=cfunc(2, True))
if i == 0:
G += arrow((i-1,j), (i,j), color=cfunc(1, True))
elif entry == 4: # UR
G += arrow((i,j), (i,j+1), color=cfunc(0, False))
G += arrow((i,j), (i+1,j), color=cfunc(1, True))
if j == 0:
G += arrow((i,j-1), (i,j), color=cfunc(0, False))
if i == 0:
G += arrow((i-1,j), (i,j), color=cfunc(1, True))
elif entry == 5: # RD
G += arrow((i,j+1), (i,j), color=cfunc(2, True))
G += arrow((i,j), (i+1,j), color=cfunc(1, True))
if j == 0:
G += arrow((i,j), (i,j-1), color=cfunc(2, True))
if i == 0:
G += arrow((i-1,j), (i,j), color=cfunc(1, True))
G.axes(False)
return G
def energy(self, epsilon):
r"""
Return the energy of the configuration.
The energy of a configuration `\nu` is defined as
.. MATH::
E(\nu) = n_0 \epsilon_0 + n_1 \epsilon_1 + \cdots + n_5 \epsilon_5
where `n_i` is the number of vertices of type `i` and
`\epsilon_i` is the `i`-th energy constant.
.. NOTE::
We number our configurations as:
0. LR
1. LU
2. LD
3. UD
4. UR
5. RD
which differs from :wikipedia:`Ice-type_model`.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: nu = M[2]; nu
^ ^ ^
| | |
--> # -> # <- # <--
^ | ^
| V |
--> # <- # -> # <--
| ^ |
V | V
--> # -> # <- # <--
| | |
V V V
sage: nu.energy([1,2,1,2,1,2])
15
A KDP energy::
sage: nu.energy([1,1,0,1,0,1])
7
A Rys `F` energy::
sage: nu.energy([0,1,1,0,1,1])
4
The zero field assumption::
sage: nu.energy([1,2,3,1,3,2])
15
"""
if len(epsilon) != 6:
raise ValueError("there must be 6 energy constants")
return sum(epsilon[entry] for row in self for entry in row)
class SixVertexModel(UniqueRepresentation, Parent):
"""
The six vertex model.
We model a configuration by indicating which configuration by the
following six configurations which are determined by the two outgoing
arrows in the Up, Right, Down, Left directions:
1. LR::
|
V
<-- # -->
^
|
2. LU::
^
|
<-- # <--
^
|
3. LD::
|
V
<-- # <--
|
V
4. UD::
^
|
--> # <--
|
V
5. UR::
^
|
--> # -->
^
|
6. RD::
|
V
--> # -->
|
V
INPUT:
- ``n`` -- the number of rows
- ``m`` -- (optional) the number of columns, if not specified, then
the number of columns is the number of rows
- ``boundary_conditions`` -- (optional) a quadruple of tuples whose
entries are either:
* ``True`` for an inward arrow,
* ``False`` for an outward arrow, or
* ``None`` for no boundary condition.
There are also the following predefined boundary conditions:
* ``'ice'`` - The top and bottom boundary conditions are outward and the
left and right boundary conditions are inward; this gives the square
ice model. Also called domain wall boundary conditions.
* ``'domain wall'`` - Same as ``'ice'``.
* ``'alternating'`` - The boundary conditions alternate between inward
and outward.
* ``'free'`` - There are no boundary conditions.
EXAMPLES:
Here are the six types of vertices that can be created::
sage: M = SixVertexModel(1)
sage: list(M)
[
| ^ | ^ ^ |
V | V | | V
<-- # --> <-- # <-- <-- # <-- --> # <-- --> # --> --> # -->
^ ^ | | ^ |
| , | , V , V , | , V
]
When using the square ice model, it is known that the number of
configurations is equal to the number of alternating sign matrices::
sage: M = SixVertexModel(1, boundary_conditions='ice')
sage: len(M)
1
sage: M = SixVertexModel(4, boundary_conditions='ice')
sage: len(M)
42
sage: all(len(SixVertexModel(n, boundary_conditions='ice'))
....: == AlternatingSignMatrices(n).cardinality() for n in range(1, 7))
True
An example with a specified non-standard boundary condition and
non-rectangular shape::
sage: M = SixVertexModel(2, 1, [[None], [True,True], [None], [None,None]])
sage: list(M)
[
^ ^ | ^
| | V |
<-- # <-- <-- # <-- <-- # <-- --> # <--
^ ^ | |
| | V V
<-- # <-- --> # <-- <-- # <-- <-- # <--
^ | | |
| , V , V , V
]
REFERENCES:
- :wikipedia:`Vertex_model`
- :wikipedia:`Ice-type_model`
"""
@staticmethod
def __classcall_private__(cls, n, m=None, boundary_conditions=None):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: M1 = SixVertexModel(1, boundary_conditions=[[False],[True],[False],[True]])
sage: M2 = SixVertexModel(1, 1, ((False,),(True,),(False,),(True,)))
sage: M1 is M2
True
"""
if m is None:
m = n
if boundary_conditions is None or boundary_conditions == 'free':
boundary_conditions = ((None,)*m, (None,)*n)*2
elif boundary_conditions == 'alternating':
bdry = True
cond = []
for dummy in range(2):
val = []
for k in range(m):
val.append(bdry)
bdry = not bdry
cond.append(tuple(val))
val = []
for k in range(n):
val.append(bdry)
bdry = not bdry
cond.append(tuple(val))
boundary_conditions = tuple(cond)
elif boundary_conditions == 'ice' or boundary_conditions == 'domain wall':
if m == n:
return SquareIceModel(n)
boundary_conditions = ((False,)*m, (True,)*n)*2
else:
boundary_conditions = tuple(tuple(x) for x in boundary_conditions)
return super(SixVertexModel, cls).__classcall__(cls, n, m, boundary_conditions)
def __init__(self, n, m, boundary_conditions):
"""
Initialize ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: TestSuite(M).run()
"""
self._nrows = n
self._ncols = m
self._bdry_cond = boundary_conditions # Ordered URDL
Parent.__init__(self, category=FiniteEnumeratedSets())
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: SixVertexModel(2, boundary_conditions='ice')
The six vertex model on a 2 by 2 grid
"""
return "The six vertex model on a {} by {} grid".format(self._nrows, self._ncols)
def _repr_option(self, key):
"""
Metadata about the ``_repr_()`` output.
See :meth:`sage.structure.parent._repr_option` for details.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: M._repr_option('element_ascii_art')
True
"""
if key == 'element_ascii_art':
return True
return Parent._repr_option(self, key)
def _element_constructor_(self, x):
"""
Construct an element of ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: M([[3,1],[5,3]])
^ ^
| |
--> # <- # <--
| ^
V |
--> # -> # <--
| |
V V
"""
if isinstance(x, SixVertexConfiguration):
if x.parent() is not self:
return self.element_class(self, tuple(x))
return x
verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
elt = []
for row in x:
elt.append([])
for entry in row:
if entry in verts:
elt[-1].append(verts.index(entry))
elif entry in range(6):
elt[-1].append(entry)
else:
raise ValueError("invalid entry")
elt[-1] = tuple(elt[-1])
return self.element_class(self, tuple(elt))
Element = SixVertexConfiguration
def __iter__(self):
"""
Iterate through ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: list(M)
[
^ ^ ^ ^
| | | |
--> # <- # <-- --> # -> # <--
| ^ ^ |
V | | V
--> # -> # <-- --> # <- # <--
| | | |
V V , V V
]
"""
# Boundary conditions ordered URDL
# The top row boundary condition of True is a downward arrow
# The left condition of True is a right arrow
# verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
next_top = [False, False, True, True, False, True]
next_left = [True, False, False, False, True, True]
check_top = [True, False, True, False, False, True]
check_left = [False, False, False, True, True, True]
bdry = [self._bdry_cond[0]]
lbd = list(self._bdry_cond[3]) + [None] # Dummy
left = [ [lbd[0]] ]
cur = [[-1]]
n = self._nrows
m = self._ncols
# [[3, 1], [5, 3]]
# [[4, 3], [3, 2]]
while len(cur) > 0:
# If we're at the last row
if len(cur) > n:
cur.pop()
left.pop()
# Check if all our bottom boundry conditions are statisfied
if all(x is not self._bdry_cond[2][i]
for i,x in enumerate(bdry[-1])):
yield self.element_class(self, tuple(tuple(x) for x in cur))
bdry.pop()
# Find the next row
row = cur[-1]
l = left[-1]
i = len(cur) - 1
while len(row) > 0:
row[-1] += 1
# Check to see if we have more vertices
if row[-1] > 5:
row.pop()
l.pop()
continue
# Check to see if we can add the vertex
if (check_left[row[-1]] is l[-1] or l[-1] is None) \
and (check_top[row[-1]] is bdry[-1][len(row)-1]
or bdry[-1][len(row)-1] is None):
if len(row) != m:
l.append(next_left[row[-1]])
row.append(-1)
# Check the right bdry condition since we are at the rightmost entry
elif next_left[row[-1]] is not self._bdry_cond[1][i]:
bdry.append([next_top[x] for x in row])
cur.append([-1])
left.append([lbd[i+1]])
break
# If we've killed this row, backup
if len(row) == 0:
cur.pop()
bdry.pop()
left.pop()
def boundary_conditions(self):
"""
Return the boundary conditions of ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: M.boundary_conditions()
((False, False), (True, True), (False, False), (True, True))
"""
return self._bdry_cond
def partition_function(self, beta, epsilon):
r"""
Return the partition function of ``self``.
The partition function of a 6 vertex model is defined by:
.. MATH::
Z = \sum_{\nu} e^{-\beta E(\nu)}
where we sum over all configurations and `E` is the energy function.
The constant `\beta` is known as the *inverse temperature* and is
equal to `1 / k_B T` where `k_B` is Boltzmann's constant and `T` is
the system's temperature.
INPUT:
- ``beta`` -- the inverse temperature constant `\beta`
- ``epsilon`` -- the energy constants, see
:meth:`~sage.combinat.six_vertex_model.SixVertexConfiguration.energy()`
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: M.partition_function(2, [1,2,1,2,1,2])
e^(-24) + 2*e^(-28) + e^(-30) + 2*e^(-32) + e^(-36)
REFERENCES:
:wikipedia:`Partition_function_(statistical_mechanics)`
"""
from sage.functions.log import exp
return sum(exp(-beta * nu.energy(epsilon)) for nu in self)
class SquareIceModel(SixVertexModel):
r"""
The square ice model.
The square ice model is a 6 vertex model on an `n \times n` grid with
the boundary conditions that the top and bottom boundaries are pointing
outward and the left and right boundaries are pointing inward. These
boundary conditions are also called domain wall boundary conditions.
Configurations of the 6 vertex model with domain wall boundary conditions
are in bijection with alternating sign matrices.
"""
def __init__(self, n):
"""
Initialize ``self``.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: TestSuite(M).run()
"""
boundary_conditions = ((False,)*n, (True,)*n)*2
SixVertexModel.__init__(self, n, n, boundary_conditions)
def from_alternating_sign_matrix(self, asm):
"""
Return a configuration from the alternating sign matrix ``asm``.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: asm = AlternatingSignMatrix([[0,1,0],[1,-1,1],[0,1,0]])
sage: M.from_alternating_sign_matrix(asm)
^ ^ ^
| | |
--> # -> # <- # <--
^ | ^
| V |
--> # <- # -> # <--
| ^ |
V | V
--> # -> # <- # <--
| | |
V V V
TESTS::
sage: M = SixVertexModel(5, boundary_conditions='ice')
sage: ASM = AlternatingSignMatrices(5)
sage: all(M.from_alternating_sign_matrix(x.to_alternating_sign_matrix()) == x
....: for x in M)
True
sage: all(M.from_alternating_sign_matrix(x).to_alternating_sign_matrix() == x
....: for x in ASM)
True
"""
if asm.parent().size() != self._nrows:
raise ValueError("mismatched size")
#verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
ret = []
bdry = [False]*self._nrows # False = up
for row in asm.to_matrix():
cur = []
right = True # True = right
for j,entry in enumerate(row):
if entry == -1:
cur.append(0)
right = True
bdry[j] = False
elif entry == 1:
cur.append(3)
right = False
bdry[j] = True
else: # entry == 0
if bdry[j]:
if right:
cur.append(5)
else:
cur.append(2)
else:
if right:
cur.append(4)
else:
cur.append(1)
ret.append(tuple(cur))
return self.element_class(self, tuple(ret))
class Element(SixVertexConfiguration):
"""
An element in the square ice model.
"""
@combinatorial_map(name='to alternating sign matrix')
def to_alternating_sign_matrix(self):
"""
Return an alternating sign matrix of ``self``.
.. SEEALSO::
:meth:`~sage.combinat.six_vertex_model.SixVertexConfiguration.to_signed_matrix()`
EXAMPLES::
sage: M = SixVertexModel(4, boundary_conditions='ice')
sage: M[6].to_alternating_sign_matrix()
[1 0 0 0]
[0 0 0 1]
[0 0 1 0]
[0 1 0 0]
sage: M[7].to_alternating_sign_matrix()
[ 0 1 0 0]
[ 1 -1 1 0]
[ 0 1 -1 1]
[ 0 0 1 0]
"""
from sage.combinat.alternating_sign_matrix import AlternatingSignMatrix #AlternatingSignMatrices
#ASM = AlternatingSignMatrices(self.parent()._nrows)
#return ASM(self.to_signed_matrix())
return AlternatingSignMatrix(self.to_signed_matrix())
| [
81,
37811,
198,
21447,
4643,
16886,
9104,
198,
37811,
198,
198,
6738,
35021,
13,
301,
5620,
13,
8000,
1330,
16774,
198,
6738,
35021,
13,
301,
5620,
13,
34642,
62,
15603,
341,
1330,
30015,
40171,
341,
198,
6738,
35021,
13,
301,
5620,
1... | 1.792524 | 14,286 |
import numpy as np
import random
import scipy
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
220,
198,
11748,
629,
541,
88,
628,
198
] | 3.0625 | 16 |
from builtins import object
from cloudmesh_base.base import HEADING
from cloudmesh_pbs.database import pbs_db, pbs_shelve
import os
| [
6738,
3170,
1040,
1330,
2134,
198,
6738,
6279,
76,
5069,
62,
8692,
13,
8692,
1330,
39837,
2751,
198,
6738,
6279,
76,
5069,
62,
79,
1443,
13,
48806,
1330,
279,
1443,
62,
9945,
11,
279,
1443,
62,
82,
2978,
303,
198,
11748,
28686,
198
... | 3.069767 | 43 |
# -*- coding: utf-8 -*-
"""Unit tests for Response class"""
import pytest
from ga4gh.refget.http.response import Response
from ga4gh.refget.http.status_codes import StatusCodes as SC
from ga4gh.refget.config.constants import CONTENT_TYPE_JSON_REFGET_VND, \
CONTENT_TYPE_TEXT_REFGET_VND
testdata_body = [
("ACGT"),
('{"message": "NOTFOUND"}'),
('''{"service": {"circular_supported": false}}''')
]
testdata_status_code = [
(SC.OK),
(SC.PARTIAL_CONTENT),
(SC.NOT_ACCEPTABLE)
]
testdata_header = [
("Content-Type", CONTENT_TYPE_JSON_REFGET_VND),
("Content-Type", CONTENT_TYPE_TEXT_REFGET_VND),
("Content-Type", "application/json")
]
testdata_data = [
("seqid", "ga4gh:SQ.HKyMuwwEWbdUDXfk5o1EGxGeqBmon6Sp"),
("subseq-type", "start-end"),
("subseq-type", "range")
]
testdata_redirect = [
("https://ga4gh.org"),
("https://example.com"),
("https://anotherexample.com")
]
@pytest.mark.parametrize("body", testdata_body)
@pytest.mark.parametrize("status_code", testdata_status_code)
@pytest.mark.parametrize("key,value", testdata_header)
@pytest.mark.parametrize("key,value", testdata_data)
@pytest.mark.parametrize("url", testdata_redirect)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
26453,
5254,
329,
18261,
1398,
37811,
198,
198,
11748,
12972,
9288,
198,
6738,
31986,
19,
456,
13,
5420,
1136,
13,
4023,
13,
26209,
1330,
18261,
198,
6738,
31986,
... | 2.375984 | 508 |
#!/usr/bin/env python
import time
import pygame
from pygame.locals import *
CHECK_LATCHES_INTERVAL = 100
CHECK_LATCHES = pygame.USEREVENT + 1
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
640,
198,
198,
11748,
12972,
6057,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
198,
198,
50084,
62,
43,
11417,
1546,
62,
41358,
23428,
796,
1802,
198,
50084,
62,
43... | 2.534247 | 73 |
from django.shortcuts import redirect
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
628
] | 4.333333 | 9 |
import numpy as np
# import _proj as proj_lib
import scipy.sparse as sparse
import scipy.sparse.linalg as splinalg
ZERO = "f"
POS = "l"
SOC = "q"
PSD = "s"
EXP = "ep"
EXP_DUAL = "ed"
POWER = "p"
# The ordering of CONES matches SCS.
CONES = [ZERO, POS, SOC, PSD, EXP, EXP_DUAL, POWER]
def parse_cone_dict(cone_dict):
"""Parses SCS-style cone dictionary."""
return [(cone, cone_dict[cone]) for cone in CONES if cone in cone_dict]
def as_block_diag_linear_operator(matrices):
"""Block diag of SciPy sparse matrices (or linear operators)."""
linear_operators = [splinalg.aslinearoperator(
op) if not isinstance(op, splinalg.LinearOperator) else op
for op in matrices]
num_operators = len(linear_operators)
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
row_indices = np.append(0, np.cumsum(nrows))
col_indices = np.append(0, np.cumsum(ncols))
return splinalg.LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec)
def unvec_symm(x, dim):
"""Returns a dim-by-dim symmetric matrix corresponding to `x`.
`x` is a vector of length dim*(dim + 1)/2, corresponding to a symmetric
matrix; the correspondence is as in SCS.
X = [ X11 X12 ... X1k
X21 X22 ... X2k
...
Xk1 Xk2 ... Xkk ],
where
vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk)
"""
X = np.zeros((dim, dim))
# triu_indices gets indices of upper triangular matrix in row-major order
col_idx, row_idx = np.triu_indices(dim)
X[(row_idx, col_idx)] = x
X = X + X.T
X /= np.sqrt(2)
X[np.diag_indices(dim)] = np.diagonal(X) * np.sqrt(2) / 2
return X
def vec_symm(X):
"""Returns a vectorized representation of a symmetric matrix `X`.
Vectorization (including scaling) as per SCS.
vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk)
"""
X = X.copy()
X *= np.sqrt(2)
X[np.diag_indices(X.shape[0])] = np.diagonal(X) / np.sqrt(2)
col_idx, row_idx = np.triu_indices(X.shape[0])
return X[(row_idx, col_idx)]
def _proj(x, cone, dual=False):
"""Returns the projection of x onto a cone or its dual cone."""
if cone == ZERO:
return x if dual else np.zeros(x.shape)
elif cone == POS:
return np.maximum(x, 0)
elif cone == SOC:
# print("Second Order Cone: x = {}".format(x))
t = x[0]
z = x[1:]
norm_z = np.linalg.norm(z, 2)
if norm_z <= t or np.isclose(norm_z, t, atol=1e-8):
return x
elif norm_z <= -t:
return np.zeros(x.shape)
else:
return 0.5 * (1 + t / norm_z) * np.append(norm_z, z)
elif cone == PSD:
dim = psd_dim(x)
X = unvec_symm(x, dim)
lambd, Q = np.linalg.eig(X)
return vec_symm(Q @ sparse.diags(np.maximum(lambd, 0)) @ Q.T)
elif cone == EXP:
raise NotImplementedError("exp cone is not implemented here yet {}".format(EXP))
num_cones = int(x.size / 3)
out = np.zeros(x.size)
offset = 0
for _ in range(num_cones):
x_i = x[offset:offset + 3]
r, s, t, _ = proj_lib.proj_exp_cone(
float(x_i[0]), float(x_i[1]), float(x_i[2]))
out[offset:offset + 3] = np.array([r, s, t])
offset += 3
# via Moreau
return x - out if dual else out
else:
raise NotImplementedError(f"{cone} not implemented")
def _dproj(x, cone, dual=False):
"""Returns the derivative of projecting onto a cone (or its dual cone) at x.
The derivative is represented as either a sparse matrix or linear operator.
"""
shape = (x.size, x.size)
if cone == ZERO:
return sparse.eye(*shape) if dual else sparse.csc_matrix(shape)
elif cone == POS:
return sparse.diags(.5 * (np.sign(x) + 1), format="csc")
elif cone == SOC:
t = x[0]
z = x[1:]
norm_z = np.linalg.norm(z, 2)
if norm_z <= t:
return sparse.eye(*shape)
elif norm_z <= -t:
return sparse.csc_matrix(shape)
else:
z = z.reshape(z.size)
unit_z = z / norm_z
scale_factor = 1.0 / (2 * norm_z)
t_plus_norm_z = t + norm_z
# derivative is symmetric
return splinalg.LinearOperator(shape, matvec=matvec,
rmatvec=matvec)
elif cone == PSD:
dim = psd_dim(x)
X = unvec_symm(x, dim)
lambd, Q = np.linalg.eig(X)
if np.all(lambd >= 0):
matvec = lambda y: y
return splinalg.LinearOperator(shape, matvec=matvec, rmatvec=matvec)
# Sort eigenvalues, eigenvectors in ascending order, so that
# we can obtain the index k such that lambd[k-1] < 0 < lambd[k]
idx = lambd.argsort()
lambd = lambd[idx]
Q = Q[:, idx]
k = np.searchsorted(lambd, 0)
B = np.zeros((dim, dim))
pos_gt_k = np.outer(np.maximum(lambd, 0)[k:], np.ones(k))
neg_lt_k = np.outer(np.ones(dim - k), np.minimum(lambd, 0)[:k])
B[k:, :k] = pos_gt_k / (neg_lt_k + pos_gt_k)
B[:k, k:] = B[k:, :k].T
B[k:, k:] = 1
matvec = lambda y: vec_symm(
Q @ (B * (Q.T @ unvec_symm(y, dim) @ Q)) @ Q.T)
return splinalg.LinearOperator(shape, matvec=matvec, rmatvec=matvec)
elif cone == EXP:
raise NotImplementedError("EXP cone is not implemented here yet {}".format(EXP))
num_cones = int(x.size / 3)
ops = []
offset = 0
for _ in range(num_cones):
x_i = x[offset:offset + 3]
offset += 3
if in_exp(x_i):
ops.append(splinalg.aslinearoperator(sparse.eye(3)))
elif in_exp_dual(-x_i):
ops.append(splinalg.aslinearoperator(
sparse.csc_matrix((3, 3))))
elif x_i[0] < 0 and x_i[1] and not np.isclose(x_i[2], 0):
matvec = lambda y: np.array([
y[0], 0, y[2] * 0.5 * (1 + np.sign(x_i[2]))])
ops.append(splinalg.LinearOperator((3, 3), matvec=matvec,
rmatvec=matvec))
else:
# TODO(akshayka): Cache projection if this is a bottleneck
# TODO(akshayka): y_st is sometimes zero ...
x_st, y_st, _, mu = proj_lib.proj_exp_cone(x_i[0], x_i[1],
x_i[2])
if np.equal(y_st, 0):
y_st = np.abs(x_st)
exp_x_y = np.exp(x_st / y_st)
mu_exp_x_y = mu * exp_x_y
x_mu_exp_x_y = x_st * mu_exp_x_y
M = np.zeros((4, 4))
M[:, 0] = np.array([
1 + mu_exp_x_y / y_st, -x_mu_exp_x_y / (y_st ** 2),
0,
exp_x_y])
M[:, 1] = np.array([
-x_mu_exp_x_y / (y_st ** 2),
1 + x_st * x_mu_exp_x_y / (y_st ** 3),
0, exp_x_y - x_st * exp_x_y / y_st])
M[:, 2] = np.array([0, 0, 1, -1])
M[:, 3] = np.array([
exp_x_y, exp_x_y - x_st * exp_x_y / y_st, -1, 0])
ops.append(splinalg.aslinearoperator(np.linalg.inv(M)[:3, :3]))
D = as_block_diag_linear_operator(ops)
if dual:
return splinalg.LinearOperator((x.size, x.size),
matvec=lambda v: v - D.matvec(v),
rmatvec=lambda v: v - D.rmatvec(v))
else:
return D
else:
raise NotImplementedError(f"{cone} not implemented")
def pi(x, cones, dual=False):
"""Projects x onto product of cones (or their duals)
Args:
x: NumPy array (with PSD data formatted in SCS convention)
cones: list of (cone name, size)
dual: whether to project onto the dual cone
Returns:
NumPy array that is the projection of `x` onto the (dual) cones
"""
projection = np.zeros(x.shape)
offset = 0
for cone, sz in cones:
# ===============================
# print(cone, sz) # only uncomment for debug
sz = sz if isinstance(sz, (tuple, list)) else (sz,)
if sum(sz) == 0:
continue
for dim in sz:
if cone == PSD:
dim = vec_psd_dim(dim)
elif cone == EXP:
raise NotImplementedError("exp cone is not supported here yet {}".format(EXP))
dim *= 3
# ===============================
# print("offset:", offset)
# ===============================
projection[offset:offset + dim] = _proj(
x[offset:offset + dim], cone, dual=dual)
offset += dim
# ===============================
# debug for deep analysis
# ===============================
# print("cone type: {:s}, offset: {:d} ".format(cone, offset))
return projection
def dpi(x, cones, dual=False):
"""Derivative of projection onto product of cones (or their duals), at x
Args:
x: NumPy array
cones: list of (cone name, size)
dual: whether to project onto the dual cone
Returns:
An abstract linear map representing the derivative, with methods
`matvec` and `rmatvec`
"""
dprojections = []
offset = 0
for cone, sz in cones:
sz = sz if isinstance(sz, (tuple, list)) else (sz,)
if sum(sz) == 0:
continue
for dim in sz:
if cone == PSD:
dim = vec_psd_dim(dim)
elif cone == EXP:
raise NotImplementedError("exp cone is not supported here yet {}".format(EXP))
dim *= 3
dprojections.append(
_dproj(x[offset:offset + dim], cone, dual=dual))
offset += dim
return as_block_diag_linear_operator(dprojections)
| [
11748,
299,
32152,
355,
45941,
198,
2,
1330,
4808,
1676,
73,
355,
386,
73,
62,
8019,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
29877,
198,
11748,
629,
541,
88,
13,
82,
29572,
13,
75,
1292,
70,
355,
4328,
1292,
70,
198,
198,
... | 1.908954 | 5,327 |
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Flask-WTF-Decorators',
version='0.1.2',
license='MIT',
url='https://github.com/simpleapples/flask-wtf-decorators/',
author='Zhiya Zang',
author_email='zangzhiya@gmail.com',
description='Decorators for flask-wtf',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests']),
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License'
],
include_package_data=True,
platforms='any',
install_requires=['Flask>=0.7', 'Flask-WTF>=0.9'],
)
| [
6738,
28686,
1330,
3108,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
4480,
1280,
7,
6978,
13,
22179,
7,
1456,
... | 2.524631 | 406 |
from astrolib.solar_system.celestial_objects import CelestialObject
from astrolib.solar_system.motion_models import OriginFixedMotionModel
from astrolib.solar_system.orientation_models import InertiallyFixedOrientationModel
Sun = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Mercury = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Venus = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Earth = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Mars = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Jupiter = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Saturn = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Neptune = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Uranus = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Pluto = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
| [
6738,
6468,
3225,
571,
13,
82,
6192,
62,
10057,
13,
5276,
21711,
62,
48205,
1330,
37231,
10267,
198,
6738,
6468,
3225,
571,
13,
82,
6192,
62,
10057,
13,
38714,
62,
27530,
1330,
19349,
13715,
45740,
17633,
198,
6738,
6468,
3225,
571,
1... | 3.948339 | 271 |
#python program - make simple calculator
print("1. adiition")
print("2. subtraction")
print("3. multiplication")
print("4. division")
print("5. exit")
choice = int(input("enter your choice: "))
if (choice>=1 and choice<=4):
print("enter two numbers: ")
num1 = int(input())
num2 = int(input())
if choice ==1:
res = num1 + num2
print("result = ", res)
elif choice == 2:
res = num1 - num2
print("result=",res)
elif choice==3:
res=num1*num2
print("result=",res)
else:
res=num1/num2
print("result=",res)
elif choice==5:
exit()
else:
print("wrong input..!!")
| [
2,
29412,
1430,
532,
787,
2829,
28260,
198,
198,
4798,
7203,
16,
13,
512,
72,
653,
4943,
198,
4798,
7203,
17,
13,
13284,
7861,
4943,
198,
4798,
7203,
18,
13,
48473,
4943,
198,
4798,
7203,
19,
13,
7297,
4943,
198,
4798,
7203,
20,
1... | 2.042857 | 350 |
"""
Code for a database of MOA light curves including non-microlensing, single lensing, and binary lensing collcetions.
"""
from ramjet.data_interface.moa_data_interface import MoaDataInterface
from ramjet.photometric_database.derived.moa_survey_light_curve_collection import MoaSurveyLightCurveCollection
from ramjet.photometric_database.standard_and_injected_light_curve_database import \
StandardAndInjectedLightCurveDatabase, OutOfBoundsInjectionHandlingMethod, BaselineFluxEstimationMethod
class MoaSurveyNoneSingleAndBinaryDatabase(StandardAndInjectedLightCurveDatabase):
"""
A class for a database of MOA light curves including non-microlensing, single lensing, and binary lensing
collections.
"""
moa_data_interface = MoaDataInterface()
| [
37811,
198,
10669,
329,
257,
6831,
286,
13070,
32,
1657,
23759,
1390,
1729,
12,
24055,
75,
26426,
11,
2060,
10317,
278,
11,
290,
13934,
10317,
278,
2927,
66,
316,
507,
13,
198,
37811,
198,
6738,
15770,
31173,
13,
7890,
62,
39994,
13,
... | 3.411504 | 226 |
valor = str(input('Valor para saque: ').strip())
last = valor[len(valor) - 1]
valor = int(valor)
if last == '1':
valor += 1
print(f'o valor precisou ser corrigido para R${valor},00 pois não há notas de R$ 1,00 disponíveis')
print()
tot100 = tot50 = tot20 = tot10 = tot5 = tot2 = 0
while True:
if valor // 100 > 0:
tot100 += 1
valor -= 100
elif valor // 50 > 0:
tot50 += 1
valor -= 50
elif valor // 20 > 0:
tot20 += 1
valor -= 20
elif valor // 10 > 0:
tot10 += 1
valor -= 10
elif valor // 5 > 0 and ((valor % 2) == 1):
tot5 += 1
valor -= 5
elif valor // 2 > 0 and ((valor % 2) == 0):
tot2 += 1
valor -= 2
else:
break
if tot100 > 0:
if tot100 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot100} {A} de R$ 100,00')
if tot50 > 0:
if tot50 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot50} {A} de R$ 50,00')
if tot20 > 0:
if tot20 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot20} {A} de R$ 20,00')
if tot10 > 0:
if tot10 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot10} {A} de R$ 10,00')
if tot5 > 0:
if tot5 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot5} {A} de R$ 5,00')
if tot2 > 0:
if tot2 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot2} {A} de R$ 2,00')
print()
| [
2100,
273,
796,
965,
7,
15414,
10786,
7762,
273,
31215,
473,
4188,
25,
705,
737,
36311,
28955,
201,
198,
12957,
796,
1188,
273,
58,
11925,
7,
2100,
273,
8,
532,
352,
60,
201,
198,
2100,
273,
796,
493,
7,
2100,
273,
8,
201,
198,
... | 1.703784 | 925 |
#!/usr/bin/python3
'''
Module for Whitelisting Access Points
'''
# -*- coding: utf-8 -*-
# (C) Copyright 2020 Hewlett Packard Enterprise Development LP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aos_cap_whitelist
version_added: 2.8.1
short_description: Whitelist Campus Access Points (CAP)
description: Module for whitelisting Campus Access Points on the controller under
the Mobility Master or a Standalone Controller
options:
action:
description:
- Type of action to be performed for whitelisting Campus Acess Points
require: true
choices:
- add
- delete
type: str
ap_name:
description:
- Name you would like to give to the the Access Point
required: false
type: str
ap_group:
description:
- Name of AP group where the Access Point needs to be added
required: false
type: str
mac_address:
description:
- MAC address of the Campus Access Point
required: true
type: str
description:
description:
- Short description for the Access Point
required: false
type: str
"""
EXAMPLES = """
#Usage Examples
- name: Whitelist an Access Point to default AP-Group
aos_cap_whitelist:
action: add
ap_name: test-ap-1
ap_group: default
mac_address: "ab:32:32:32:32:32"
description: Boston office, building 6, 2nd floor
- name: Whitelist an Access Point to configured AP-Group
aos_cap_whitelist:
ap_name: test-ap-2
ap_group: test-ap-group
mac_address: "zx:32:32:32:32:33"
description: This is just for testing
- name: Delete an Access Point from Whitelist
aos_cap_whitelist:
action: delete
mac_address: "ab:32:32:32:32:32"
- name: Delete an Access Point from Whitelist
aos_cap_whitelist:
ap_name: test-ap-2
ap_group: test-ap-group
mac_address: "zx:32:32:32:32:33"
description: This is just for testing
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos_http import AosApi
if __name__ == '__main__':
main()
| [
198,
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
7061,
6,
198,
26796,
329,
13183,
417,
9665,
8798,
11045,
198,
7061,
6,
198,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
357,
34,
8,
15069,
12131... | 2.551959 | 1,174 |
from configuration import *
from features.base import BaseFeature, Feature, ConstantFeature
from utils import _pprint, get_single_column
from pandas import concat, DataFrame, Series, Index
import numpy as np
| [
6738,
8398,
1330,
1635,
198,
6738,
3033,
13,
8692,
1330,
7308,
38816,
11,
27018,
11,
20217,
38816,
198,
6738,
3384,
4487,
1330,
4808,
381,
22272,
11,
651,
62,
29762,
62,
28665,
198,
6738,
19798,
292,
1330,
1673,
265,
11,
6060,
19778,
... | 3.944444 | 54 |
# coding: utf-8
from django import forms
from .models import Transaction
from .models import Customer
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
764,
27530,
1330,
45389,
198,
6738,
764,
27530,
1330,
22092,
628
] | 3.961538 | 26 |
import wx
from guikit.plugins import PluginBase, Tab
| [
11748,
266,
87,
198,
198,
6738,
915,
1134,
270,
13,
37390,
1330,
42636,
14881,
11,
16904,
628
] | 3.235294 | 17 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PreventionDeidentifyTemplateArgs', 'PreventionDeidentifyTemplate']
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.496503 | 143 |
from sklearn.preprocessing import MinMaxScaler
from numpy import loadtxt
import numpy as np
import matplotlib as plt
import pandas as pd
from numpy import reshape
data = loadtxt('data-time.txt')
print(data)
#redata = np.reshape(-1,1)
#print(redata)
scaler = MinMaxScaler()
print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 100))
print(scaler.data_max_)
a = scaler.transform(data)
np.savetxt('time-scale.txt', a,fmt='%.6f')
print(a)
#print(scaler.transform([[2, 2]]))
| [
6738,
1341,
35720,
13,
3866,
36948,
1330,
1855,
11518,
3351,
36213,
198,
6738,
299,
32152,
1330,
3440,
14116,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
355,
458,
83,
220,
198,
11748,
19798,
292,
355,
279,
67,
19... | 2.59893 | 187 |
#Copyright(c) 2015, Nathan Miller
# The Proving Ground, http://theprovingground.org
#Edited and modified by Mehmet Cenk Tunaboylu, to better suit his needs. Removed boundary curves extraction. Added department extraction.
import clr
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
# Import DocumentManager and TransactionManager
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
# Import ToDSType(bool) extension method
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
#The input to this node will be stored in the IN[0] variable.
doc = DocumentManager.Instance.CurrentDBDocument
app = DocumentManager.Instance.CurrentUIApplication.Application
toggle = IN[0]
output = []
rooms = ['TYPE']
names = ['ROOM NAME']
numbers = ['ROOM NUMBER']
areas = ['AREA']
levels = ['LEVEL']
locations = ['LOCATION']
elementids = ['ELEMENT ID']
uniqueids = ['UNIQUE ID']
roomStyles = ['ROOM STYLE']
baseFinishes = ['BASE FINISH']
floorFinishes = ['FLOOR FINISH']
wallFinishes = ['WALL FINISH']
ceilingFinishes = ['CEILING FINISH']
if toggle == True:
collector = FilteredElementCollector(doc)
collector.OfCategory(BuiltInCategory.OST_Rooms)
famtypeitr = collector.GetElementIdIterator()
famtypeitr.Reset()
for item in famtypeitr:
elmID = item
eleminst = doc.GetElement(elmID)
#print eleminst
if eleminst.Area > 0:
room = eleminst
roomname = ''
for p in room.Parameters:
if p.Definition.Name == 'Name':
roomname = p.AsString()
if p.Definition.Name == 'Level':
level = p.AsValueString()
if (level is None):
level = p.AsString()
if p.Definition.Name == 'Base Finish':
baseFinish = p.AsValueString()
if (baseFinish is None):
baseFinish = p.AsString()
if p.Definition.Name == 'Wall Finish':
wallFinish = p.AsValueString()
if (wallFinish is None):
wallFinish = p.AsString()
if p.Definition.Name == 'Floor Finish':
floorFinish = p.AsValueString()
if (floorFinish is None):
floorFinish = p.AsString()
if p.Definition.Name == 'Ceiling Finish':
ceilingFinish = p.AsValueString()
if (ceilingFinish is None):
ceilingFinish = p.AsString()
if p.Definition.Name == 'Room Style':
roomStyle = p.AsValueString()
if (roomStyle is None):
roomStyle = p.AsString()
number = eleminst.Number
area = eleminst.Area
location = eleminst.Location.Point.ToPoint()
elementid = eleminst.Id.ToString()
uniqueid = eleminst.UniqueId
uniqueids.append(uniqueid)
rooms.append(room)
numbers.append("xxx_"+number)
names.append(roomname)
areas.append(area)
levels.append(level)
roomStyles.append(roomStyle)
baseFinishes.append(baseFinish)
floorFinishes.append(floorFinish)
wallFinishes.append(wallFinish)
ceilingFinishes.append(ceilingFinish)
locations.append(location)
output.append(uniqueids)
output.append(rooms)
output.append(numbers)
output.append(names)
output.append(areas)
output.append(levels)
output.append(roomStyles)
output.append(baseFinishes)
output.append(floorFinishes)
output.append(wallFinishes)
output.append(ceilingFinishes)
output.append(locations)
#Assign your output to the OUT variable
OUT = output | [
2,
15269,
7,
66,
8,
1853,
11,
18106,
7920,
201,
198,
2,
383,
1041,
1075,
13706,
11,
2638,
1378,
1169,
1676,
1075,
2833,
13,
2398,
201,
198,
2,
45882,
290,
9518,
416,
37070,
4164,
327,
268,
74,
13932,
397,
726,
2290,
11,
284,
1365,... | 2.511848 | 1,477 |
#!python3
# -*- coding: utf-8 -*-
# author: yinkaisheng@foxmail.com
import os
import sys
import json
import ctypes
import pickle
import shutil
# import socket
import zipfile
import datetime
from typing import Any, Callable, Iterator, Dict, List, Tuple
_SelfFileName = os.path.split(__file__)[1]
def getStrBetween(src: str, left: str, right: str = None, start: int = 0, end: int = None) -> Tuple[str, int]:
'''return tuple (str, index), index is -1 if not found'''
if left:
s1start = src.find(left, start, end)
if s1start >= 0:
s1end = s1start + len(left)
if right:
s2start = src.find(right, s1end, end)
if s2start >= 0:
return src[s1end:s2start], s1end
else:
return '', -1
else:
return src[s1end:], s1end
else:
return '', -1
else:
if right:
s2start = src.find(right, end)
if s2start >= 0:
return src[:s2start], 0
else:
return '', -1
else:
return '', -1
TreeNode = Any
def walkTree(root, getChildren: Callable[[TreeNode], List[TreeNode]] = None,
getFirstChild: Callable[[TreeNode], TreeNode] = None, getNextSibling: Callable[[TreeNode], TreeNode] = None,
yieldCondition: Callable[[TreeNode, int], bool] = None, includeRoot: bool = False, maxDepth: int = 0xFFFFFFFF) -> Iterator:
"""
Walk a tree not using recursive algorithm.
root: a tree node.
getChildren: Callable[[TreeNode], List[TreeNode]], function(treeNode: TreeNode) -> List[TreeNode].
getNextSibling: Callable[[TreeNode], TreeNode], function(treeNode: TreeNode) -> TreeNode.
getNextSibling: Callable[[TreeNode], TreeNode], function(treeNode: TreeNode) -> TreeNode.
yieldCondition: Callable[[TreeNode, int], bool], function(treeNode: TreeNode, depth: int) -> bool.
includeRoot: bool, if True yield root first.
maxDepth: int, enum depth.
If getChildren is valid, ignore getFirstChild and getNextSibling,
yield 3 items tuple: (treeNode, depth, remain children count in current depth).
If getChildren is not valid, using getFirstChild and getNextSibling,
yield 2 items tuple: (treeNode, depth).
If yieldCondition is not None, only yield tree nodes that yieldCondition(treeNode: TreeNode, depth: int)->bool returns True.
For example:
def GetDirChildren(dir_):
if os.path.isdir(dir_):
return [os.path.join(dir_, it) for it in os.listdir(dir_)]
for it, depth, leftCount in WalkTree('D:\\', getChildren= GetDirChildren):
print(it, depth, leftCount)
"""
if maxDepth <= 0:
return
depth = 0
if getChildren:
if includeRoot:
if not yieldCondition or yieldCondition(root, 0):
yield root, 0, 0
children = getChildren(root)
childList = [children]
while depth >= 0: # or while childList:
lastItems = childList[-1]
if lastItems:
if not yieldCondition or yieldCondition(lastItems[0], depth + 1):
yield lastItems[0], depth + 1, len(lastItems) - 1
if depth + 1 < maxDepth:
children = getChildren(lastItems[0])
if children:
depth += 1
childList.append(children)
del lastItems[0]
else:
del childList[depth]
depth -= 1
elif getFirstChild and getNextSibling:
if includeRoot:
if not yieldCondition or yieldCondition(root, 0):
yield root, 0
child = getFirstChild(root)
childList = [child]
while depth >= 0: # or while childList:
lastItem = childList[-1]
if lastItem:
if not yieldCondition or yieldCondition(lastItem, depth + 1):
yield lastItem, depth + 1
child = getNextSibling(lastItem)
childList[depth] = child
if depth + 1 < maxDepth:
child = getFirstChild(lastItem)
if child:
depth += 1
childList.append(child)
else:
del childList[depth]
depth -= 1
def listDir(path: Tuple[str, bool, str]) -> List[Tuple[str, bool, str]]:
'''returns Tuple[filePath:str, isDir:bool, fileName:str]'''
if path[1]:
files = []
files2 = []
for it in os.listdir(path[0]):
childPath = os.path.join(path[0], it)
if os.path.isdir(childPath):
files.append((childPath, True, it))
else:
files2.append((childPath, False, it))
files.extend(files2)
return files
def copyDir(src: str, dst: str, log: bool = True) -> int:
"""return int, files count"""
if src[-1] == os.path.sep:
src = src[:-1]
if dst[-1] != os.path.sep:
dst = dst + os.sep
srcLen = len(src)
if not os.path.exists(dst):
os.makedirs(dst)
fileCount = 0
for filePath, isDir, fileName, depth, remainCount in walkDir(src):
relativeName = filePath[srcLen + 1:]
dstPath = dst + relativeName
if isDir:
if not os.path.exists(dstPath):
os.makedirs(dstPath)
if log:
print(f'create dir: {dstPath}')
else:
shutil.copyfile(filePath, dstPath) # dstPath's dir must exists, will over write dstPath if dstPath exists
fileCount += 1
if log:
print(f'copy file {fileCount}: {dstPath}')
def renameFilesInDir(src: str, find: str, replace: str, log: bool = True) -> int:
"""return int, files count that are renamed"""
fileCount = 0
for filePath, isDir, fileName, depth, remainCount in walkDir(src):
if not isDir:
newFileName = fileName.replace(find, replace)
if fileName != newFileName:
newFilePath = filePath[:len(filePath) - len(fileName)] + newFileName
if os.path.exists(newFilePath):
os.remove(newFilePath)
os.rename(filePath, newFilePath)
fileCount += 1
if log:
print(f'{fileCount}: {filePath}\n -> {newFilePath}, file renamed')
def walkZip(zipPath: str, getFileObjCondition: Callable[[zipfile.ZipInfo], bool] = None) -> Iterator[Tuple[bool, zipfile.ZipInfo, zipfile.ZipExtFile]]:
"""
getFileObjCondition: getFileObjCondition(fileName:str)->bool
return tuple(isDir:bool, zipInfo:ZipInfo, fileObj:ZipExtFile)
zipInfo.is_dir(), zipInfo.filename, ...
"""
with zipfile.ZipFile(zipPath, 'r') as zin:
for zipInfo in zin.infolist():
if zipInfo.is_dir():
yield True, zipInfo, None
else:
if getFileObjCondition and getFileObjCondition(zipInfo):
with zin.open(zipInfo.filename, 'r') as fin:
yield False, zipInfo, fin
# shutil.copyfileobj(fin, fout, 512000) # avoid too much memory, default 1MB if pass 0 to 3rd parameter
else:
yield False, zipInfo, None
def extractOneFileInZip(zipPath: str, dstDir: str, fileEnd: str, log: bool = True) -> bool:
"""
fileEnd: str.
dstDir: str, should end with \\(not must).
"""
if dstDir[-1] != os.sep:
dstDir = dstDir + os.sep
if not os.path.exists(dstDir):
os.makedirs(dstDir)
for isDir, zipInfo, zipFile in walkZip(zipPath, lambda zInfo: zInfo.filename.endswith(fileEnd)):
if zipFile:
dstPath = dstDir + os.path.basename(fileEnd)
with open(dstPath, 'wb') as fout:
shutil.copyfileobj(zipFile, fout)
if log:
print(f'copy file: {dstPath}')
return True
return False
def extractZip(zipPath: str, dstDir: str, subDir: str = None, log: bool = True) -> int:
"""
subDir: str, if None, extrac all contents to dstDir, if not None, must not be end with / and can not use \\ in subDir.
dstDir: str, should end with \\(not must).
returns int, files count.
"""
if dstDir[-1] != os.sep:
dstDir = dstDir + os.sep
fileCount = 0
if not subDir:
for isDir, zipInfo, zipFile in walkZip(zipPath, lambda zInfo: True):
if isDir:
dstPath = dstDir + zipInfo.filename
if not os.path.exists(dstPath):
os.makedirs(dstPath)
if log:
print(f'create dir: {dstPath}')
else:
dstPath = dstDir + zipInfo.filename
with open(dstPath, 'wb') as fout:
shutil.copyfileobj(zipFile, fout)
fileCount += 1
if log:
print(f'copy file {fileCount}: {dstPath}')
return fileCount
foundDir = False
for isDir, zipInfo, zipFile in walkZip(zipPath, checkFunc):
if isDir:
index = zipInfo.filename.find(subDir)
if not foundDir and index >= 0:
foundDir = True
if foundDir:
if index < 0:
break
createDir = dstDir + zipInfo.filename[index + len(subDir) + 1:]
if not os.path.exists(createDir):
os.makedirs(createDir)
if log:
print(f'create dir: {createDir}')
else:
if zipFile:
index = zipInfo.filename.find(subDir)
dstPath = dstDir + zipInfo.filename[index + len(subDir) + 1:]
with open(dstPath, 'wb') as fout:
shutil.copyfileobj(zipFile, fout)
fileCount += 1
if log:
print(f'copy file {fileCount}: {dstPath}')
else:
if foundDir:
break
return fileCount
# def getLocalIP() -> str:
# ip = ''
# try:
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.connect(('8.8.8.8', 80))
# ip = s.getsockname()[0]
# finally:
# s.close()
# return ip
if __name__ == '__main__':
print(1, 2, 3)
| [
2,
0,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
1772,
25,
331,
676,
15152,
31753,
31,
12792,
4529,
13,
785,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
269,
19199,
1... | 2.036526 | 5,147 |
import torch
| [
11748,
28034,
628
] | 4.666667 | 3 |
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module of utility functions.
Functions:
rotation_matrix: Returns a two-dimensional rotation matrix.
"""
import numpy as np
def rotation_matrix(theta):
"""Return 2D rotation matrix.
Arguments:
theta: Scalar value indicating radians of rotation.
"""
return np.array((
(np.cos(theta), -np.sin(theta)),
(np.sin(theta), np.cos(theta)),
))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
12131,
383,
350,
13396,
57,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
... | 3.334347 | 329 |
from discord.ext import commands
from loguru import logger
from mainDiscord import embedCreator
| [
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
2604,
14717,
1330,
49706,
198,
6738,
1388,
15642,
585,
1330,
11525,
16719,
273,
628,
198
] | 4.26087 | 23 |
exclude_patterns = ['_build']
latex_elements = {
'maxlistdepth': '10',
}
| [
1069,
9152,
62,
33279,
82,
796,
37250,
62,
11249,
20520,
198,
198,
17660,
87,
62,
68,
3639,
796,
1391,
198,
220,
220,
220,
705,
9806,
4868,
18053,
10354,
705,
940,
3256,
198,
92,
198
] | 2.294118 | 34 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-07-11 04:08
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1959,
319,
33448,
12,
2998,
12,
1157,
8702,
25,
2919,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198... | 2.754386 | 57 |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
chrome_src = os.environ['CHROME_SRC']
chrome_tool_path = os.path.join(chrome_src, 'build', 'android')
sys.path.append(chrome_tool_path)
# Below two modules should be imported at runtime, but pylint can not find it,
# add below pylint attribute to ignore this error.
#
# pylint: disable=F0401
import adb_profile_chrome
from pylib import constants
# Wrapper for package info, the key 'stable' is needed by adb_profile_chrome.
PACKAGE_INFO = {
'xwalk_embedded_shell': constants.PackageInfo(
'org.xwalk.runtime.client.embedded.shell',
'org.xwalk.runtime.client.embedded.shell'
'.XWalkRuntimeClientEmbeddedShellActivity',
'/data/local/tmp/xwview-shell-command-line',
None,
None),
}
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
2211,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
... | 2.8875 | 400 |
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="enhanced",
version="4.0.0",
description="Enhanced interactions for interactions.py",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/interactions-py/enhanced",
author="Toricane",
author_email="prjwl028@gmail.com",
license="MIT",
packages=["interactions.ext.enhanced"],
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"discord-py-interactions>=4.1.1rc.1",
"typing_extensions",
],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
... | 2.567823 | 317 |
import itertools
from typing import List, Dict
from .config import PullRequestsConstants
from .domain import PullRequest, PullRequestSort, PullRequestStatus, PullRequestsOverview, PullRequestException
from .notification import send_notification_pr
from ..common.config import get_logger
from ..common.icons import Icon, Icons
from ..common.util import get_absolute_path_to_repo_file
logger = get_logger(__name__)
open_multiple_urls = get_absolute_path_to_repo_file('src/open-multiple-urls.sh')
| [
11748,
340,
861,
10141,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
198,
198,
6738,
764,
11250,
1330,
21429,
16844,
3558,
34184,
1187,
198,
6738,
764,
27830,
1330,
21429,
18453,
11,
21429,
18453,
42758,
11,
21429,
18453,
19580,
11,
21429,... | 3.362416 | 149 |
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.test import SimpleTestCase
from yepes.test_mixins import TemplateTagsMixin
from marchena.modules.posts.templatetags.posts import (
CalendarTag,
GetArchivesTag,
GetCategoryTag,
GetCategoriesTag,
GetNextPostTag,
GetPopularPostsTag,
GetPostTag,
GetPostsTag,
GetPreviousPostTag,
GetRecentPostsTag,
GetRelatedPostsTag,
GetTagTag,
GetTagsTag,
LastModificationTag,
LastPublicationTag,
NextPostLinkTag,
PostAuthorsTag,
PostCategoriesTag,
PostTagsTag,
PreviousPostLinkTag,
TagCloudTag,
)
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
17427,
14402,
20448,
198,
198,
6738,
331,
538,
274,
13,
9288,
... | 2.515625 | 256 |
"""DeepNeuro: A deep learning python package for neuroimaging data.
Created by the Quantitative Tumor Imaging Lab at the Martinos Center
(Harvard-MIT Program in Health, Sciences, and Technology / Massachussets General Hospital).
"""
DOCLINES = __doc__.split("\n")
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path
import os
os.environ["MPLCONFIGDIR"] = "."
if sys.version_info[:2] < (3, 5):
raise RuntimeError("Python version 3.5 or greater required.")
setup(
name='deepneuro',
version='0.2.3',
description=DOCLINES[0],
packages=find_packages(),
entry_points= {
"console_scripts": ['segment_gbm = deepneuro.pipelines.Segment_GBM.cli:main',
'skull_stripping = deepneuro.pipelines.Skull_Stripping.cli:main',
'segment_mets = deepneuro.pipelines.Segment_Brain_Mets.cli:main',
'segment_ischemic_stroke = deepneuro.pipelines.Ischemic_Stroke.cli:main'],
},
author='Andrew Beers',
author_email='abeers@mgh.harvard.edu',
url='https://github.com/QTIM-Lab/DeepNeuro', # use the URL to the github repo
download_url='https://github.com/QTIM-Lab/DeepNeuro/tarball/0.2.3',
keywords=['neuroimaging', 'neuroncology', 'neural networks', 'neuroscience', 'neurology', 'deep learning', 'fmri', 'pet', 'mri', 'dce', 'dsc', 'dti', 'machine learning', 'computer vision', 'learning', 'keras', 'theano', 'tensorflow', 'nifti', 'nrrd', 'dicom'],
install_requires=['tables', 'pydicom', 'pynrrd', 'nibabel', 'pyyaml', 'six', 'imageio', 'matplotlib', 'pydot', 'scipy', 'numpy', 'scikit-image', 'imageio', 'tqdm'],
classifiers=[],
)
| [
37811,
29744,
8199,
1434,
25,
317,
2769,
4673,
21015,
5301,
329,
7669,
320,
3039,
1366,
13,
198,
198,
41972,
416,
262,
16972,
12464,
309,
388,
273,
48656,
3498,
379,
262,
5780,
418,
3337,
220,
198,
7,
13587,
10187,
12,
36393,
6118,
28... | 2.367347 | 735 |
from coralquant.models.odl_model import BS_SZ50_Stocks
import baostock as bs
import pandas as pd
from sqlalchemy import String
from coralquant.database import engine
from coralquant.settings import CQ_Config
def get_sz50_stocks():
"""
获取上证50成分股数据
"""
#删除数据
BS_SZ50_Stocks.del_all_data()
# 登陆系统
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
# 获取上证50成分股
rs = bs.query_sz50_stocks()
print('query_sz50 error_code:'+rs.error_code)
print('query_sz50 error_msg:'+rs.error_msg)
# 打印结果集
sz50_stocks = []
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
sz50_stocks.append(rs.get_row_data())
result = pd.DataFrame(sz50_stocks, columns=rs.fields)
result.to_sql('odl_bs_sz50_stocks', engine, schema=CQ_Config.DB_SCHEMA, if_exists='append', index=False)
# 登出系统
bs.logout() | [
6738,
29537,
40972,
13,
27530,
13,
375,
75,
62,
19849,
1330,
24218,
62,
50,
57,
1120,
62,
1273,
3320,
198,
11748,
26605,
455,
735,
355,
275,
82,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
44161,
282,
26599,
1330,
10903,
198,
6... | 1.80791 | 531 |
"""
The :mod:`pyswarms.utils.search` module implements various techniques in
hyperparameter value optimization.
"""
from .grid_search import GridSearch
from .random_search import RandomSearch
__all__ = ["GridSearch", "RandomSearch"]
| [
37811,
198,
464,
1058,
4666,
25,
63,
79,
893,
5767,
907,
13,
26791,
13,
12947,
63,
8265,
23986,
2972,
7605,
287,
198,
49229,
17143,
2357,
1988,
23989,
13,
198,
37811,
198,
198,
6738,
764,
25928,
62,
12947,
1330,
24846,
18243,
198,
673... | 3.671875 | 64 |
# -------------------------------------------------------------------------------
# Copyright 2018-2020, Christian Pilato <christian.pilato@polimi.it>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
| [
2,
16529,
24305,
198,
2,
15069,
2864,
12,
42334,
11,
4302,
12693,
5549,
1279,
43533,
666,
13,
79,
346,
5549,
31,
16104,
25236,
13,
270,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
341... | 4.555556 | 171 |
heatmap_1_r = imresize(heatmap_1, (50,80)).astype("float32")
heatmap_2_r = imresize(heatmap_2, (50,80)).astype("float32")
heatmap_3_r = imresize(heatmap_3, (50,80)).astype("float32")
heatmap_geom_avg = np.power(heatmap_1_r * heatmap_2_r * heatmap_3_r, 0.333)
display_img_and_heatmap("dog.jpg", heatmap_geom_avg)
| [
25080,
8899,
62,
16,
62,
81,
796,
545,
411,
1096,
7,
25080,
8899,
62,
16,
11,
357,
1120,
11,
1795,
29720,
459,
2981,
7203,
22468,
2624,
4943,
198,
25080,
8899,
62,
17,
62,
81,
796,
545,
411,
1096,
7,
25080,
8899,
62,
17,
11,
357... | 2.129252 | 147 |
import codecs
import time
import cPickle
import gzip
import random
import os
import math
import modules.token as tk
import modules.perceptron as perceptron
import modules.lmi as lmi
from modules.evaluation import evaluate
from modules.affixes import find_affixes
# save the model (weight vectors) to a file:
# load the model (weight vectors) from a file:
# train the classifiers using the perceptron algorithm:
# apply the classifiers to test data:
# build mapping of features to vector dimensions (key=feature, value=dimension index):
if __name__ == '__main__':
t0 = time.time()
import argparse
argpar = argparse.ArgumentParser(description='')
mode = argpar.add_mutually_exclusive_group(required=True)
mode.add_argument('-train', dest='train', action='store_true', help='run in training mode')
mode.add_argument('-test', dest='test', action='store_true', help='run in test mode')
mode.add_argument('-ev', dest='evaluate', action='store_true', help='run in evaluation mode')
mode.add_argument('-tag', dest='tag', action='store_true', help='run in tagging mode')
argpar.add_argument('-i', '--infile', dest='in_file', help='in file', required=True)
argpar.add_argument('-e', '--epochs', dest='epochs', help='epochs', default='1')
argpar.add_argument('-m', '--model', dest='model', help='model', default='model')
argpar.add_argument('-o', '--output', dest='output_file', help='output file', default='output.txt')
argpar.add_argument('-t1', '--topxform', dest='top_x_form', help='top x form', default=None)
argpar.add_argument('-t2', '--topxwordlen', dest='top_x_word_len', help='top x word len', default=None)
argpar.add_argument('-t3', '--topxposition', dest='top_x_position', help='top x position', default=None)
argpar.add_argument('-t4', '--topxprefix', dest='top_x_prefix', help='top x prefix', default=None)
argpar.add_argument('-t5', '--topxsuffix', dest='top_x_suffix', help='top x suffix', default=None)
argpar.add_argument('-t6', '--topxlettercombs', dest='top_x_lettercombs', help='top x letter combs', default=None)
argpar.add_argument('-decrease-alpha', dest='decrease_alpha', action='store_true', help='decrease alpha',
default=False)
argpar.add_argument('-shuffle-sentences', dest='shuffle_sentences', action='store_true', help='shuffle sentences',
default=False)
argpar.add_argument('-batch-training', dest='batch_training', action='store_true', help='batch training',
default=False)
args = argpar.parse_args()
t = posTagger()
if os.stat(args.in_file).st_size == 0:
print "Input file is empty"
else:
if args.train:
print "Running in training mode\n"
if not args.top_x_form:
print args.top_x_form
top_x = [args.top_x_form, args.top_x_word_len, args.top_x_position, args.top_x_prefix, args.top_x_suffix,
args.top_x_lettercombs]
t.train(args.in_file, args.model, int(args.epochs), top_x, args.decrease_alpha, args.shuffle_sentences,
args.batch_training)
elif args.test:
print "Running in test mode\n"
t.test(args.in_file, args.model, args.output_file)
elif args.evaluate:
print "Running in evaluation mode\n"
out_stream = open(args.output_file, 'w')
evaluate(args.in_file, out_stream)
out_stream.close()
elif args.tag:
print "Running in tag mode\n"
t.tag(args.in_file, args.model, args.output_file)
t1 = time.time()
print "\n\tDone. Total time: " + str(t1 - t0) + "sec.\n"
| [
11748,
40481,
82,
198,
11748,
640,
198,
11748,
269,
31686,
293,
198,
11748,
308,
13344,
198,
11748,
4738,
198,
11748,
28686,
198,
11748,
10688,
198,
198,
11748,
13103,
13,
30001,
355,
256,
74,
198,
11748,
13103,
13,
525,
984,
1313,
355,... | 2.463158 | 1,520 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pants.engine.internals.scheduler import Scheduler
@dataclass(frozen=True)
class BootstrapScheduler:
"""A Scheduler that has been configured with only the rules for bootstrapping."""
scheduler: Scheduler
| [
2,
15069,
33448,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
3826,
38559,
24290,
737,
198,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
3... | 3.509091 | 110 |
from django import template
from django.utils.html import format_html
register = template.Library()
@register.inclusion_tag('django_bootstrap_dynamic_formsets/dynamic_formsets.html')
@register.inclusion_tag('django_bootstrap_dynamic_formsets/dynamic_formsets_js.html',takes_context=True)
| [
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
5794,
62,
6494,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
198,
31,
30238,
13,
259,
4717,
62,
12985,
10786,
28241,
14208,
62,
18769,
26418,
62,... | 3.063158 | 95 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: acl.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import hdfs_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='acl.proto',
package='hadoop.hdfs',
serialized_pb='\n\tacl.proto\x12\x0bhadoop.hdfs\x1a\nhdfs.proto\"\xc4\x03\n\rAclEntryProto\x12:\n\x04type\x18\x01 \x02(\x0e\x32,.hadoop.hdfs.AclEntryProto.AclEntryTypeProto\x12<\n\x05scope\x18\x02 \x02(\x0e\x32-.hadoop.hdfs.AclEntryProto.AclEntryScopeProto\x12=\n\x0bpermissions\x18\x03 \x02(\x0e\x32(.hadoop.hdfs.AclEntryProto.FsActionProto\x12\x0c\n\x04name\x18\x04 \x01(\t\"-\n\x12\x41\x63lEntryScopeProto\x12\n\n\x06\x41\x43\x43\x45SS\x10\x00\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x01\"=\n\x11\x41\x63lEntryTypeProto\x12\x08\n\x04USER\x10\x00\x12\t\n\x05GROUP\x10\x01\x12\x08\n\x04MASK\x10\x02\x12\t\n\x05OTHER\x10\x03\"~\n\rFsActionProto\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07\x45XECUTE\x10\x01\x12\t\n\x05WRITE\x10\x02\x12\x11\n\rWRITE_EXECUTE\x10\x03\x12\x08\n\x04READ\x10\x04\x12\x10\n\x0cREAD_EXECUTE\x10\x05\x12\x0e\n\nREAD_WRITE\x10\x06\x12\x0c\n\x08PERM_ALL\x10\x07\"\x9f\x01\n\x0e\x41\x63lStatusProto\x12\r\n\x05owner\x18\x01 \x02(\t\x12\r\n\x05group\x18\x02 \x02(\t\x12\x0e\n\x06sticky\x18\x03 \x02(\x08\x12+\n\x07\x65ntries\x18\x04 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\x12\x32\n\npermission\x18\x05 \x01(\x0b\x32\x1e.hadoop.hdfs.FsPermissionProto\"X\n\x1cModifyAclEntriesRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\x12+\n\x07\x61\x63lSpec\x18\x02 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\"\x1f\n\x1dModifyAclEntriesResponseProto\"$\n\x15RemoveAclRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\"\x18\n\x16RemoveAclResponseProto\"X\n\x1cRemoveAclEntriesRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\x12+\n\x07\x61\x63lSpec\x18\x02 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\"\x1f\n\x1dRemoveAclEntriesResponseProto\"+\n\x1cRemoveDefaultAclRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\"\x1f\n\x1dRemoveDefaultAclResponseProto\"N\n\x12SetAclRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\x12+\n\x07\x61\x63lSpec\x18\x02 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\"\x15\n\x13SetAclResponseProto\"\'\n\x18GetAclStatusRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\"H\n\x19GetAclStatusResponseProto\x12+\n\x06result\x18\x01 \x02(\x0b\x32\x1b.hadoop.hdfs.AclStatusProtoB5\n%org.apache.hadoop.hdfs.protocol.protoB\tAclProtos\xa0\x01\x01')
_ACLENTRYPROTO_ACLENTRYSCOPEPROTO = _descriptor.EnumDescriptor(
name='AclEntryScopeProto',
full_name='hadoop.hdfs.AclEntryProto.AclEntryScopeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACCESS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=255,
serialized_end=300,
)
_ACLENTRYPROTO_ACLENTRYTYPEPROTO = _descriptor.EnumDescriptor(
name='AclEntryTypeProto',
full_name='hadoop.hdfs.AclEntryProto.AclEntryTypeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='USER', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GROUP', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MASK', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTHER', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=302,
serialized_end=363,
)
_ACLENTRYPROTO_FSACTIONPROTO = _descriptor.EnumDescriptor(
name='FsActionProto',
full_name='hadoop.hdfs.AclEntryProto.FsActionProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXECUTE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_EXECUTE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_EXECUTE', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_WRITE', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERM_ALL', index=7, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=365,
serialized_end=491,
)
_ACLENTRYPROTO = _descriptor.Descriptor(
name='AclEntryProto',
full_name='hadoop.hdfs.AclEntryProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='hadoop.hdfs.AclEntryProto.type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scope', full_name='hadoop.hdfs.AclEntryProto.scope', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='permissions', full_name='hadoop.hdfs.AclEntryProto.permissions', index=2,
number=3, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='hadoop.hdfs.AclEntryProto.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ACLENTRYPROTO_ACLENTRYSCOPEPROTO,
_ACLENTRYPROTO_ACLENTRYTYPEPROTO,
_ACLENTRYPROTO_FSACTIONPROTO,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=39,
serialized_end=491,
)
_ACLSTATUSPROTO = _descriptor.Descriptor(
name='AclStatusProto',
full_name='hadoop.hdfs.AclStatusProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='owner', full_name='hadoop.hdfs.AclStatusProto.owner', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group', full_name='hadoop.hdfs.AclStatusProto.group', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sticky', full_name='hadoop.hdfs.AclStatusProto.sticky', index=2,
number=3, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entries', full_name='hadoop.hdfs.AclStatusProto.entries', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='permission', full_name='hadoop.hdfs.AclStatusProto.permission', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=494,
serialized_end=653,
)
_MODIFYACLENTRIESREQUESTPROTO = _descriptor.Descriptor(
name='ModifyAclEntriesRequestProto',
full_name='hadoop.hdfs.ModifyAclEntriesRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.ModifyAclEntriesRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aclSpec', full_name='hadoop.hdfs.ModifyAclEntriesRequestProto.aclSpec', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=655,
serialized_end=743,
)
_MODIFYACLENTRIESRESPONSEPROTO = _descriptor.Descriptor(
name='ModifyAclEntriesResponseProto',
full_name='hadoop.hdfs.ModifyAclEntriesResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=745,
serialized_end=776,
)
_REMOVEACLREQUESTPROTO = _descriptor.Descriptor(
name='RemoveAclRequestProto',
full_name='hadoop.hdfs.RemoveAclRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.RemoveAclRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=778,
serialized_end=814,
)
_REMOVEACLRESPONSEPROTO = _descriptor.Descriptor(
name='RemoveAclResponseProto',
full_name='hadoop.hdfs.RemoveAclResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=816,
serialized_end=840,
)
_REMOVEACLENTRIESREQUESTPROTO = _descriptor.Descriptor(
name='RemoveAclEntriesRequestProto',
full_name='hadoop.hdfs.RemoveAclEntriesRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.RemoveAclEntriesRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aclSpec', full_name='hadoop.hdfs.RemoveAclEntriesRequestProto.aclSpec', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=842,
serialized_end=930,
)
_REMOVEACLENTRIESRESPONSEPROTO = _descriptor.Descriptor(
name='RemoveAclEntriesResponseProto',
full_name='hadoop.hdfs.RemoveAclEntriesResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=932,
serialized_end=963,
)
_REMOVEDEFAULTACLREQUESTPROTO = _descriptor.Descriptor(
name='RemoveDefaultAclRequestProto',
full_name='hadoop.hdfs.RemoveDefaultAclRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.RemoveDefaultAclRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=965,
serialized_end=1008,
)
_REMOVEDEFAULTACLRESPONSEPROTO = _descriptor.Descriptor(
name='RemoveDefaultAclResponseProto',
full_name='hadoop.hdfs.RemoveDefaultAclResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1010,
serialized_end=1041,
)
_SETACLREQUESTPROTO = _descriptor.Descriptor(
name='SetAclRequestProto',
full_name='hadoop.hdfs.SetAclRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.SetAclRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aclSpec', full_name='hadoop.hdfs.SetAclRequestProto.aclSpec', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1043,
serialized_end=1121,
)
_SETACLRESPONSEPROTO = _descriptor.Descriptor(
name='SetAclResponseProto',
full_name='hadoop.hdfs.SetAclResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1123,
serialized_end=1144,
)
_GETACLSTATUSREQUESTPROTO = _descriptor.Descriptor(
name='GetAclStatusRequestProto',
full_name='hadoop.hdfs.GetAclStatusRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.GetAclStatusRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1146,
serialized_end=1185,
)
_GETACLSTATUSRESPONSEPROTO = _descriptor.Descriptor(
name='GetAclStatusResponseProto',
full_name='hadoop.hdfs.GetAclStatusResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='hadoop.hdfs.GetAclStatusResponseProto.result', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1187,
serialized_end=1259,
)
_ACLENTRYPROTO.fields_by_name['type'].enum_type = _ACLENTRYPROTO_ACLENTRYTYPEPROTO
_ACLENTRYPROTO.fields_by_name['scope'].enum_type = _ACLENTRYPROTO_ACLENTRYSCOPEPROTO
_ACLENTRYPROTO.fields_by_name['permissions'].enum_type = _ACLENTRYPROTO_FSACTIONPROTO
_ACLENTRYPROTO_ACLENTRYSCOPEPROTO.containing_type = _ACLENTRYPROTO;
_ACLENTRYPROTO_ACLENTRYTYPEPROTO.containing_type = _ACLENTRYPROTO;
_ACLENTRYPROTO_FSACTIONPROTO.containing_type = _ACLENTRYPROTO;
_ACLSTATUSPROTO.fields_by_name['entries'].message_type = _ACLENTRYPROTO
_ACLSTATUSPROTO.fields_by_name['permission'].message_type = hdfs_pb2._FSPERMISSIONPROTO
_MODIFYACLENTRIESREQUESTPROTO.fields_by_name['aclSpec'].message_type = _ACLENTRYPROTO
_REMOVEACLENTRIESREQUESTPROTO.fields_by_name['aclSpec'].message_type = _ACLENTRYPROTO
_SETACLREQUESTPROTO.fields_by_name['aclSpec'].message_type = _ACLENTRYPROTO
_GETACLSTATUSRESPONSEPROTO.fields_by_name['result'].message_type = _ACLSTATUSPROTO
DESCRIPTOR.message_types_by_name['AclEntryProto'] = _ACLENTRYPROTO
DESCRIPTOR.message_types_by_name['AclStatusProto'] = _ACLSTATUSPROTO
DESCRIPTOR.message_types_by_name['ModifyAclEntriesRequestProto'] = _MODIFYACLENTRIESREQUESTPROTO
DESCRIPTOR.message_types_by_name['ModifyAclEntriesResponseProto'] = _MODIFYACLENTRIESRESPONSEPROTO
DESCRIPTOR.message_types_by_name['RemoveAclRequestProto'] = _REMOVEACLREQUESTPROTO
DESCRIPTOR.message_types_by_name['RemoveAclResponseProto'] = _REMOVEACLRESPONSEPROTO
DESCRIPTOR.message_types_by_name['RemoveAclEntriesRequestProto'] = _REMOVEACLENTRIESREQUESTPROTO
DESCRIPTOR.message_types_by_name['RemoveAclEntriesResponseProto'] = _REMOVEACLENTRIESRESPONSEPROTO
DESCRIPTOR.message_types_by_name['RemoveDefaultAclRequestProto'] = _REMOVEDEFAULTACLREQUESTPROTO
DESCRIPTOR.message_types_by_name['RemoveDefaultAclResponseProto'] = _REMOVEDEFAULTACLRESPONSEPROTO
DESCRIPTOR.message_types_by_name['SetAclRequestProto'] = _SETACLREQUESTPROTO
DESCRIPTOR.message_types_by_name['SetAclResponseProto'] = _SETACLRESPONSEPROTO
DESCRIPTOR.message_types_by_name['GetAclStatusRequestProto'] = _GETACLSTATUSREQUESTPROTO
DESCRIPTOR.message_types_by_name['GetAclStatusResponseProto'] = _GETACLSTATUSRESPONSEPROTO
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n%org.apache.hadoop.hdfs.protocol.protoB\tAclProtos\240\001\001')
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
257,
565,
13,
1676,
1462,
198,
198,
6738,
23645,
13,
11235,
672,
3046,
1330,
43087,
355,
4808,
20147,
1968,
273,
198,
6738,
23645,
13,
1123... | 2.269338 | 8,636 |
"""
Generate certain RST files used in documentation.
"""
from __future__ import print_function
import sys
import argparse
from collections import OrderedDict, defaultdict
import os
from os.path import join, exists
from os import mkdir
import pandas
import logomaker
from matplotlib import pyplot
from mhcflurry.downloads import get_path
from mhcflurry.amino_acid import COMMON_AMINO_ACIDS
AMINO_ACIDS = sorted(COMMON_AMINO_ACIDS)
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--class1-models-dir-with-ms",
metavar="DIR",
default=get_path(
"models_class1_pan", "models.with_mass_spec", test_exists=False),
help="Class1 models. Default: %(default)s",
)
parser.add_argument(
"--class1-models-dir-no-ms",
metavar="DIR",
default=get_path(
"models_class1_pan", "models.no_mass_spec", test_exists=False),
help="Class1 models. Default: %(default)s",
)
parser.add_argument(
"--logo-cutoff",
default=0.01,
type=float,
help="Fraction of top to use for motifs",
)
parser.add_argument(
"--length-cutoff",
default=0.01,
type=float,
help="Fraction of top to use for length distribution",
)
parser.add_argument(
"--length-distribution-lengths",
nargs="+",
default=[8, 9, 10, 11, 12, 13, 14, 15],
type=int,
help="Peptide lengths for length distribution plots",
)
parser.add_argument(
"--motif-lengths",
nargs="+",
default=[8, 9, 10, 11],
type=int,
help="Peptide lengths for motif plots",
)
parser.add_argument(
"--out-dir",
metavar="DIR",
required=True,
help="Directory to write RSTs and images to",
)
parser.add_argument(
"--max-alleles",
default=None,
type=int,
metavar="N",
help="Only use N alleles (for testing)",
)
if __name__ == "__main__":
go(sys.argv[1:])
| [
37811,
198,
8645,
378,
1728,
371,
2257,
3696,
973,
287,
10314,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
11,
4277,
11600... | 2.500678 | 737 |
# face_detector_google3.py
#
# This version expects a source folder containing folders of images,
# as you get with the LFW distribution.
# Each folder is named for one person and that folder should contain
# only photos of that person.
#
# Environment Variable: GOOGLE_APPLICATION_CREDENTIALS
# C:\pyDev\__My Scripts\face_detector_google\Face-Detection-3dc1b370d617.json
from __future__ import print_function
"""Draws squares around faces in the given image."""
import sys
import os
import os.path
parent_dir = os.path.dirname(os.getcwd())
sys.path.insert(0, parent_dir)
from mcm_lib2 import exception_utils as eu
from mcm_lib2 import fname as fnm
from mcm_lib2 import files_and_folders as ff
from mcm_lib2 import enum as en
import argparse
import base64
import json
import fnmatch
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from PIL import Image
from PIL import ImageDraw
import numpy as np
import kairos_face
kairos_face.settings.app_id = "56aab423"
kairos_face.settings.app_key = "faa3e1412c97b3171dd7dcda3382313a"
RADIUS = 2
API_KEY = 'AIzaSyD3HsHlSOrQXhmqjpph9R9Di1pl_4WVNEY'
def get_list_of_matching_files(root_dir, image_ext_tuple):
'''
Returns a list of filenames in the specified directory that match the specified tuple.
Example tuple: ('*.jpg', '*.jpeg', '*.j2k', '*.png')
'''
x_matching_files = []
# Read the directories in the root_dir and then iterate over them
dirs = os.listdir(root_dir)
for dir in dirs:
full_dir = os.path.join(root_dir, dir)
for root, dirs, files in os.walk(full_dir):
for extension in image_ext_tuple:
for filename in fnmatch.filter(files, extension):
full_filename = os.path.join(full_dir, filename)
x_matching_files.append(full_filename)
return x_matching_files
def save_as_json(d, full_json_filename):
'''
'''
try:
json_string = json.dumps(d, indent=4)
except Exception as e:
return(False, 'Exception in save calling json.dumps. Details: %s' % (str(e)))
try:
with open(full_json_filename, "w") as text_file:
text_file.write("%s" % (json_string))
except Exception as e:
return (False, 'Exception in save() writing to output file: %s. Details: %s' % (full_json_filename, str(e)))
return (True, '')
def get_elipse_bounding_box(x, y, radius):
'''
Returns as a list the bounding box around the specified point
'''
#left = (int(x)-radius, int(y))
#right = (int(x)+radius, int(y))
#top = (int(x), int(y)-radius)
#bottom = (int(x), int(y)+radius)
top_left = (int(x)-radius, int(y)-radius)
bottom_right = (int(x)+radius, int(y)+radius)
#return [left, right, top, bottom]
return [top_left, bottom_right]
# [START get_vision_service]
# [END get_vision_service]
def detect_face(face_file, service, max_results=4):
'''
Uses the Vision API to detect faces in the face_file image
object that was opened by the client with the following:
with open(input_filename, 'rb') as face_file:
detect_face(face_file, 3)
RETURNS: the following tuple (result_flag, err_msg, response_obj, face_data)
'''
# Read the previously-opened image file, base64-encode it and then decode it
image_content = face_file.read()
batch_request = [{
'image': { 'content': base64.b64encode(image_content).decode('utf-8') },
'features': [{ 'type': 'FACE_DETECTION', 'maxResults': max_results }]
}]
#service = get_vision_service()
# Exception details: <HttpError 403 when requesting https://vision.googleapis.com/v1/images:annotate?alt=json
# returned "The request cannot be identified with a client project. Please pass a valid API key with the request.">
#service = discovery.build('vision', 'v1')
#API_KEY = 'AIzaSyD3HsHlSOrQXhmqjpph9R9Di1pl_4WVNEY'
#service = discovery.build('vision', 'v1', developerKey = API_KEY)
try:
request = service.images().annotate(body={ 'requests': batch_request })
except Exceptiion as e:
msg = 'Exception calling annotate service. Details: %s' % (str(e))
return (False, msg, None, None)
try:
response = request.execute()
except Exception as e:
msg = 'Exception calling request.execute. Details: %s' % (str(e))
return (False, msg, None, None)
try:
face_data = None
if 'faceAnnotations' in response['responses'][0].keys():
face_data = response['responses'][0]['faceAnnotations']
except Exception as e:
msg = 'Exception accessing response object for face_data. Details: %s' % (str(e))
return (False, msg, response, None)
return (True, '', response, face_data)
def draw_landmark_boxes(image, xz_landmarks, output_filename):
'''
draws a polygon around each landmark and then save out the file to the specified
output_filename.
'''
im = Image.open(image)
draw = ImageDraw.Draw(im)
fill='#00ff00'
for z_landmark in xz_landmarks:
x = z_landmark['position']['x']
y = z_landmark['position']['y']
x_bbox = get_elipse_bounding_box(x, y, RADIUS)
draw.ellipse(x_bbox, fill=fill)
im.save(output_filename)
def highlight_faces(image, faces, output_filename):
'''
Draws a polygon around the faces, then saves to output_filename.
Args:
image: a file containing the image with the faces.
faces: a list of faces found in the file. This should be in the format
returned by the Vision API.
output_filename: the name of the image file to be created, where the
faces have polygons drawn around them.
'''
im = Image.open(image)
draw = ImageDraw.Draw(im)
for face in faces:
box1 = [(v.get('x', 0.0), v.get('y', 0.0)) for v in face['fdBoundingPoly']['vertices']]
#box2 = [(v.get('x', 0.0), v.get('y', 0.0)) for v in face['boundingPoly']['vertices']]
draw.line(box1 + [box1[0]], width=3, fill='#00ff00')
#draw.line(box2 + [box2[0]], width=3, fill='#00ff0f')
im.save(output_filename)
def detect_and_annotate(input_filename, face_filename, json_filename, service, max_results):
'''
RETURNS: the following tuple: (result_flag, base_filename, num_faces, headwear_likelihood, msg)
'''
num_faces = 0
base_filename = os.path.basename(input_filename)
tmp_output = os.path.join(os.path.dirname(face_filename),'tmp.jpg')
# First detect the face, then draw a box around it, then save it
with open(input_filename, 'rb') as source_image:
(result, err_msg, response, face_data) = detect_face(source_image, service, max_results)
if not result:
msg = 'Error in detect_and_annotate calling detect_face. Details: %s' % err_msg
return (False, base_filename, 0, msg)
# the call didn't return face data
if not face_data:
msg = 'No face annotation data returned for %s' % (face_filename)
return (False, base_filename, 0, msg)
# The call to detect_face succeeded and we have face_data
num_faces = len(face_data)
#print('Found {} face{}'.format(num_faces, '' if num_faces == 1 else 's'))
#print('Writing face rectangle to file {}'.format(face_filename))
# Reset the file pointer, so we can read the file again to draw the face rectangle
try:
source_image.seek(0)
highlight_faces(source_image, face_data, tmp_output)
except Exception as e:
msg = 'Exception in highlight_faces. Details: %s' % (str(e))
return (False, base_filename, num_faces, msg)
try:
# Draw ellipses for the landmarks on the image and save it to a different filename
with open(tmp_output, 'rb') as source_image:
xz_landmarks = face_data[0]['landmarks']
draw_landmark_boxes(tmp_output, xz_landmarks, face_filename)
except Exception as e:
msg = 'Exception in draw_landmark_boxes. Details: %s' % (str(e))
return (False, base_filename, num_faces, msg)
try:
# Save the JSON file
(result, errmsg) = save_as_json(response, json_filename)
if not result:
return (False, base_filename, num_faces, errmsg)
except Exception as e:
msg = 'Exception calling save_as_json. Details: %s' % (str(e))
return (False, base_filename, num_faces, msg)
# Iterate over the list of face_data
xz_face_data = []
for idx in xrange(0, num_faces):
z_face_data = {}
# Pull out headwearLikelihood value from json
headwear_likelihood = face_data[0]['headwearLikelihood']
z_face_data['headwear_likelihood'] = face_data[idx]['headwearLikelihood']
# Pull out eye locations
eye_data = get_eye_locations(face_data, idx)
if len(eye_data) == 2:
try:
d = compute_eye_distance(eye_data)
except Exception as e:
print('Exception calling compute_eye_distance: %s' % (str(e)))
return (False, base_filename, xz_face_data, '')
if d > 0.0:
z_face_data['eye_distance'] = d
else:
z_face_data['eye_distance'] = 0.0
else:
z_face_data['eye_distance'] = 0.0
# Pull out pan angles
try:
z_face_data['face_angles'] = get_face_angles(face_data, idx)
except Exception as e:
print('Exception calling get_face_angles: %s' % (str(e)))
return (False, base_filename, z_face_data, '')
# Add this dictionary to our list
xz_face_data.append(z_face_data)
return (True, base_filename, xz_face_data, '')
def compute_eye_distance(eye_data):
'''
eye_data is a list of two lists
eye_data[0]: [left_eye_x, left_eye_y, left_eye_z]
eye_data[1]: [right_eye_x, right_eye_y, right_eye_z]
'''
left_eye_x, left_eye_y, left_eye_z = eye_data[0]
right_eye_x, right_eye_y, right_eye_z = eye_data[1]
if left_eye_x and right_eye_x:
x = left_eye_x - right_eye_x
x = x * x
else:
return -1.0
if left_eye_y and right_eye_y:
y = left_eye_y - right_eye_y
y = y * y
else:
return -1.0
if left_eye_z and right_eye_z:
z = left_eye_z - right_eye_z
z = z * z
else:
return -1.0
d = np.sqrt(x + y + z)
return d
def select_faces_to_keep(filename, xz_face_data):
'''
xz_face_data is a list of dictionaries.
Each list item is a dictionary with the following key/values:
'eye_distance' : distance (float)
'headwear_likelihood' : likelihood enum (string)
'face_angles' : dictionary with the following keys: 'pan', 'roll', 'pitch'
Using the data we use some simple heuristics to determine which faces to keep.
Rule 1. In an ideal condition, the best face is the one that is relatively much larger than
the runner-up and has a pan angle close to 0.
If both faces are about the same size, then the best face is the one with the pan
angle closest to 0.
'''
x_faces_to_keep = []
x_distances = []
for z in xz_face_data:
d = z['eye_distance']
x_distances.append(d)
# Get the largest face (largest eye distance) and its index in the list
idx_of_largest_face = get_index_of_largest_eye_distance(x_distances)
largest_eye_distance = x_distances[idx_of_largest_face]
z_angles_of_largest_face = xz_face_data[idx_of_largest_face]['face_angles']
pan_angle_of_largest_face = abs(z_angles_of_largest_face['pan'])
# Null out the largest value so we can get runner-up
x_distances[idx_of_largest_face] = 0.0
# Get the runner-up face distance and its index in the list
idx_of_second_largest_eye_distance = get_index_of_largest_eye_distance(x_distances)
second_largest_eye_distance = x_distances[idx_of_second_largest_eye_distance]
z_angles_of_runner_up_face = xz_face_data[idx_of_second_largest_eye_distance]['face_angles']
pan_angle_of_runner_up = abs(z_angles_of_runner_up_face['pan'])
# Calculate the relative difference between these two distances
# This is a float between 0 and 1 in which a larger value indicates a greater relative difference
try:
relative_difference = calculate_relative_difference(largest_eye_distance, second_largest_eye_distance)
except Exception as e:
print('Exception thrown calling calculate_relative_difference. Details: %s' % (str(e)))
return []
rel_face_diff = face_difference(relative_difference)
face_dir_largest = face_direction(pan_angle_of_largest_face)
face_dir_runnerup = face_direction(pan_angle_of_runner_up)
# ---------------------------------------------------------------------------------------------------------------
# Rules for how we deal with other faces detected:
# R0: Large relative difference in face size and forward-facing ==> Only keep largest face
# R1: Medium relative difference in face size, but only largest face is forward-facing ==> Only keep largest face
# R2:
# R3
# Rule R0: If much larger and forward-facing, then only keep the largest face
if (rel_face_diff.name == 'LARGE' or rel_face_diff.name == 'EXTRA_LARGE') and \
face_dir_largest.d == 'FORWARD':
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R1: If larger, forward-facing face and runner-up is not forward-facing, then keep only largest face
elif rel_face_diff.name == 'MEDIUM' and \
face_dir_largest.d == 'FORWARD' and \
(face_dir_runnerup.d == 'ANGLED' or face_dir_runnerup.d == 'SIDE_VIEW'):
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R2: If approx same size faces, largest is forward-facing and runner-up is not forward-facing, keep the face that is forward-facing
elif (rel_face_diff.name == 'EXTRA_SMALL' or rel_face_diff.name == 'SMALL') and \
face_dir_largest.d == 'FORWARD' and \
(face_dir_runnerup.d == 'ANGLED' or face_dir_runnerup.d == 'SIDE_VIEW'):
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R3: If approx same size faces, largest is forward-facing and runner-up is forward-facing, keep both faces
elif (rel_face_diff.name == 'EXTRA_SMALL' or rel_face_diff.name == 'SMALL') and \
face_dir_largest.d == 'FORWARD' and face_dir_runnerup.d == 'FORWARD':
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R4: If approx same size faces and largest face is not forward-facing, runner-up is forward-facing ==> Keep only runner-up face
elif (rel_face_diff.name == 'EXTRA_SMALL' or rel_face_diff.name == 'SMALL') and \
(face_dir_largest.d == 'ANGLED' or face_dir_largest.d == 'SIDE_VIEW') and \
face_dir_runnerup.d == 'FORWARD':
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
else:
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
return x_faces_to_keep
def get_face_angles(face_data, face_idx):
'''
Using the json face_data returned from the Google Vision detection call,
get the pan, tilt and roll angles of the face and return in a dictionary,
with these values keyed on the angle name.
'''
face_angles = {}
try:
face_angles['pan'] = face_data[face_idx]['panAngle']
face_angles['tilt'] = face_data[face_idx]['tiltAngle']
face_angles['roll'] = face_data[face_idx]['rollAngle']
except Exception as e:
face_angles['pan'] = None
face_angles['tilt'] = None
face_angles['roll'] = None
return face_angles
def get_location_from_landmark_dict(z_lm):
'''
z_lm is the landmark dictionary and z['position'] is the dictionary
holding the coordinate values. It appears that sometimes the Google
service doesn't return a full dictionary.
'''
x = None
if 'x' in z_lm['position'].keys():
x = z_lm['position']['x']
y = None
if 'y' in z_lm['position'].keys():
y = z_lm['position']['y']
z = None
if 'z' in z_lm['position'].keys():
z = z_lm['position']['z']
return [x, y, z]
def get_eye_locations(face_data, face_idx):
'''
Using the json face_data returned from Google Vision detection call,
get the location of the left and right eye for the specified face index.
face_idx:0 is the 0th face detected.
face_idx:1 is the 1st face detected. ...
'''
eye_data = [[], []]
# face_data[idx]['landmarks'] is a list of dictionaries.
# We iterate over the list looking for the one that has the value of LEFT_EYE or
# RIGHT_EYE for the key 'type'.
for lm in face_data[face_idx]['landmarks']:
if lm['type'] == 'LEFT_EYE':
#left_eye_x = lm['position']['x']
#left_eye_y = lm['position']['y']
#left_eye_z = lm['position']['z']
#eye_data[0] = [left_eye_x, left_eye_y, left_eye_z]
[left_eye_x, left_eye_y, left_eye_z] = get_location_from_landmark_dict(lm)
continue
if lm['type'] == 'RIGHT_EYE':
#right_eye_x = lm['position']['x']
#right_eye_y = lm['position']['y']
#right_eye_z = lm['position']['z']
#eye_data[1] = [right_eye_x, right_eye_y, right_eye_z]
[right_eye_x, right_eye_y, right_eye_z] = get_location_from_landmark_dict(lm)
continue
return [[left_eye_x, left_eye_y, left_eye_z], [right_eye_x, right_eye_y, right_eye_z]]
def get_index_of_largest_eye_distance(x_distances):
'''
Returns the index in the x_distances list containing the maximum value.
'''
max_value = max(x_distances)
max_index = x_distances.index(max_value)
return max_index
def calculate_relative_difference(max_distance, runner_up_distance):
'''
Returns the relative difference between the max distance and the runner-up:
Rel_Diff = (max_distance - runner_up) / max_distance
'''
return (max_distance - runner_up_distance) / max_distance
def create_exclude_list(exclude_filename):
'''
Returns a list of filenames in the exclude_filename file.
These are the filenames that should be exluded from processing.
'''
with open(exclude_filename) as f:
x_names = [line.strip() for line in f]
return x_names
if __name__ == '__main__':
# Visual Studio script arguments:
# tst1\00AB500A-0006-0000-0000-000000000000.jpg --out 00AB500A-0006-0000-0000-000000000000_out.jpg --max-results 5
# tst1\demo-image.jpg --out tst1\dog_out.jpg --max-results 3
# tst1\00AB500A-0006-0000-0000-000000000000.jpg --face 00AB500A-0006-0000-0000-000000000000_face.jpg --land 00AB500A-0006-0000-0000-000000000000_land.jpg --max-results 5
# tst1\02ED2000-0006-0000-0000-000000000000.jpg --face 02ED2000-0006-0000-0000-000000000000_face.jpg --land 02ED2000-0006-0000-0000-000000000000_land.jpg --max-results 5
# fd = face_difference(0.30)
# print(fd.name)
print(sys.prefix)
print(sys.version)
print(sys.path)
src_root_dir = r'E:\_Ancestry\lfw\lfw_tmp_efghijk_orig'
out_dir = r'E:\_Ancestry\lfw\lfw_output'
out_suffix = '_face'
#exclude_list_filename = 'exclude1.txt'
#x_exclude = create_exclude_list(exclude_list_filename)
service = discovery.build('vision', 'v1', developerKey = API_KEY)
id = 0
x_files = get_list_of_matching_files(src_root_dir, ('*.jpg', '*.jpeg'))
#for fn in x_files:
# input_face_fni = fni.fname_info(fullname=fn)
# basename =
# output_face_fni = fni.fname_info(dirname=out_dir, basename=input_face_fni.basename, suffix=out_suffix)
# (dir, filename) = os.path.split(fn)
# (basename, ext) = os.path.splitext(filename)
for fn in x_files:
#if fn in x_exclude:
# msg = '%s | %s' % ('Exclude', fn)
# print(msg)
# continue
# Name of the output image file (with the out_suffix)
face_fn = basename + out_suffix + '.jpg'
full_output_face_fn = os.path.join(out_dir, face_fn)
# Name of the output JSON file (.json ext)
json_fn = basename + '.json'
full_output_json_fn = os.path.join(out_dir, json_fn)
try:
(result, base_filename, xz_face_data, errmsg) = detect_and_annotate(fn, full_output_face_fn, full_output_json_fn, service, 3)
except Exception as e:
print('Exception calling detect_and_annotate on: %s, Details: %s' % (filename, str(e)))
continue
if not result:
google_result = 'Failure'
else:
google_result = 'Success'
num_faces = len(xz_face_data)
if num_faces == 1:
try:
(response_code, z_attributes) = kairos_face.enroll_face(id, 'gallery13', file=fn)
kairos_result = 'Success'
except Exception as e:
msg = 'Exception in enroll_face for %s. Details: %s' % (basename, str(e))
face_idx = 0
gender = z_attributes['gender']['type']
age = z_attributes['age']
confidence = z_attributes['confidence']
headwear_likelihood = xz_face_data[0]['headwear_likelihood']
eye_distance = xz_face_data[0]['eye_distance']
pan_angle = xz_face_data[0]['face_angles']['pan']
#pan_angle = xz_face_angles[0]['pan']
#eye_distance = x_distances[0]
msg = '%s | %s | %s | %d | %s | %s | %s | %s | %s | %s | %s | %s' % (google_result, kairos_result, basename, face_idx, headwear_likelihood, gender, age, confidence, str(pan_angle), str(eye_distance), fn, errmsg)
print(msg)
id += 1
# More than 1 face slightly complicates things ...
else:
# We only care about the "extra" face if it meets certain conditions ...
try:
x_faces_to_keep = select_faces_to_keep(base_filename, xz_face_data)
except Exception as e:
print('Exception in select_faces_to_keep on %s. Details: %s' % (base_filename, str(e)))
continue
if False:
# Iterate over the faces we are going to keep...
for face_idx in xrange(0, num_faces):
try:
(response_code, z_attributes) = kairos_face.enroll_face(id, 'gallery13', file=fn)
kairos_result = 'Success'
except Exception as e:
msg = 'Exception in enroll_face for %s. Details: %s' % (basename, str(e))
gender = z_attributes['gender']['type']
age = z_attributes['age']
confidence = z_attributes['confidence']
msg = '%s | %s | %s | %d | %s | %s | %s | %s | %s | %s' % (google_result, kairos_result, basename, face_idx, headwear_likelihood, gender, age, confidence, fn, errmsg)
print(msg)
id += 1
# if num_faces == 1:
# try:
# (response_code, z_attributes) = kairos_face.enroll_face(id, 'gallery13', file=fn)
# kairos_result = 'Success'
# except Exception as e:
# msg = 'Exception in enroll_face for %s. Details: %s' % (basename, str(e))
# gender = z_attributes['gender']['type']
# age = z_attributes['age']
# confidence = z_attributes['confidence']
# else:
# gender = 'UNKNOWN_DUE_TO_MULTIPLE_FACES'
# age = 'UNKNOWN_DUE_TO_MULTIPLE_FACES'
# confidence = 'UNKNOWN_DUE_TO_MULTIPLE_FACES'
#msg = '%s | %s | %s | %d | %s | %s | %s | %s | %s | %s' % (google_result, kairos_result, basename, num_faces, headwear_likelihood, gender, age, confidence, fn, errmsg)
#print(msg)
#id += 1
#parser = argparse.ArgumentParser(description='Detects faces in the given image.')
#parser.add_argument('input_image', help='the image you\'d like to detect faces in.')
#parser.add_argument('--face', dest='face_output', default='face.jpg', help='the name of the face output file.')
#parser.add_argument('--land', dest='land_output', default='face.jpg', help='the name of the landmark output file.')
#parser.add_argument('--max-results', dest='max_results', default=4, help='the max results of face detection.')
#args = parser.parse_args()
#main(args.input_image, args.face_output, args.land_output, args.max_results)
print('Done!') | [
2,
1986,
62,
15255,
9250,
62,
13297,
18,
13,
9078,
198,
2,
198,
2,
770,
2196,
13423,
257,
2723,
9483,
7268,
24512,
286,
4263,
11,
198,
2,
355,
345,
651,
351,
262,
406,
24160,
6082,
13,
198,
2,
5501,
9483,
318,
3706,
329,
530,
10... | 2.552983 | 9,673 |
from pii_crypt.pii_crypt import PIICrypt
| [
6738,
279,
4178,
62,
29609,
13,
79,
4178,
62,
29609,
1330,
30434,
2149,
6012,
198
] | 2.733333 | 15 |
from universal import process, clean_csv, add_trans_chunk
import sys
import re
# The infile is the system trancript.
infile = sys.argv[1]
# Using the system output name, the relevant universal format and full transcripts are gathered.
filename_prep = re.search(r"(?<=system-output\/)(.*?)(?=\.txt)", infile).group(0)
outfile = "./results/msoft/universal/msoft-" + filename_prep + ".csv"
trans_file = "./results/msoft/system-trans-text/msoft-" + filename_prep + "-trans.txt"
# setting initial utterance as jiwer can't handle empty strings.
# tsoft = the start of the file.
prev = "tsotf"
utt = ""
# Microsoft specific processing.
# This function extracts each new hypothesis with its time and processes it.
# Simultaneously, finalised hypotheses are stored for final WER calculations.
with open(infile, 'r') as f:
for line in f:
if line.startswith("RECOGNIZING"):
relevant_info = re.search(r"\{(.*?)\}", line).group(0)
dictionary = eval(relevant_info)
time = dictionary.get("Duration") + dictionary.get("Offset")
utt = dictionary.get("Text")
process(outfile, time, prev, utt)
prev = utt
elif line.startswith("JSON"):
prev = "tsotf"
transcript = re.search(r"(?<=DisplayText\":\")(.*?)(?=\")", line)
if transcript:
transcript = transcript.group(0)
add_trans_chunk(trans_file, transcript.lower())
# Universal output finalised.
clean_csv(outfile) | [
6738,
10112,
1330,
1429,
11,
3424,
62,
40664,
11,
751,
62,
7645,
62,
354,
2954,
198,
11748,
25064,
198,
11748,
302,
198,
198,
2,
383,
1167,
576,
318,
262,
1080,
491,
1192,
1968,
13,
198,
259,
7753,
796,
25064,
13,
853,
85,
58,
16,... | 2.594128 | 579 |
/home/sheldon/anaconda3/lib/python3.6/tokenize.py | [
14,
11195,
14,
7091,
25900,
14,
272,
330,
13533,
18,
14,
8019,
14,
29412,
18,
13,
21,
14,
30001,
1096,
13,
9078
] | 2.227273 | 22 |
import pytest
import copy
from pyconcrete import rebar
@pytest.fixture
@pytest.fixture
@pytest.fixture
# def test_real_length(r1, lr1, ur1):
# assert r1.real_length == 5
# assert lr1.real_length == 200
# assert ur1.real_length == 250
| [
11748,
12972,
9288,
198,
11748,
4866,
198,
198,
6738,
12972,
1102,
38669,
1330,
3405,
283,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
628,
628,
628,
... | 2.368421 | 114 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Victor\Dropbox\DFR\film2dose\qt_ui\edit_grid.ui'
#
# Created: Tue Sep 29 14:53:43 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
34,
7479,
14490,
59,
21944,
273,
59,
26932,
3524,
59,
8068,
49,
59,
26240,
17,
34436,
59,
39568,
62,
9019,
5... | 2.56 | 125 |
#-------------------------------------------------------------------------------
# Name: mwts_makedat
# Purpose: mwts dat file creation
#
# Author: isken
#
# Created: 28/09/2011
# Copyright: (c) isken 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import sys
import StringIO
import yaml
from numpy import *
import csv
import json
import itertools
"""
mwts_makedat is a module for reading input files for mwts problems
and creating an AMPL/GMPL data file.
It is a replacement for the ancient createssdat.c program that was
used to create AMPL/GMPL dat files for one week tour scheduling problems.
"""
def create_weekend_base(n_weeks):
"""
Generate basis for cartesion product of [0,1] lists based
on number of weeks in scheduling problem. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
Input:
n_weeks - number of weeks in scheduling horizon
Output:
Result is all the possible n_weeks weekends worked patterns.
Example: n_weeks = 4 --> 256 possible weekends worked patterns. This
exhaustive list can later be filtered to only include desirable patterns.
"""
basis_list = [[0,0],[1,0],[0,1],[1,1]]
mw_basis_list = []
for i in range(n_weeks):
mw_basis_list.append(basis_list)
# Use itertools to create the n_weeks cartesion product of the basis_list.
return list(itertools.product(*mw_basis_list))
def filterpatterns(pattern,ttnum,wkendtype,ttspec):
"""
Creates a sequence of binary values to be used for list filtering. This
function will contain the various rules used to filter out weekend days
worked patterns that we don't want to allow.
For now I'm hard coding in rules but need to develop an approach to
flexibly specifiying rules to apply to filter out undesirable weekends
worked patterns.
Inputs:
x - list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
type - 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
max_days_worked - max # of weekend days worked over horizon
max_wkends_worked - max # of weekends in which >= 1 day worked
half_weekends_ok - True or False
max_consec_wkends - max consecutive weeks with >= 1 day worked
Examples:
(1) Type 1, work every other weekend
pattern = [(0,1),(1,0),(0,1),(1,0)], type = 1
(2) Type 1, work every other weekend
pattern = [(1,1),(0,0),(1,1),(0,0)], type = 2
Output: True --> keep pattern
False --> discard pattern
"""
n_weeks = len(pattern)
keep = True
tourtype = [t for t in ttspec['tourtypes'] if t['ttnum'] == ttnum]
# No more than max_days_worked over the scheduling horizon
max_days_worked = tourtype[0]['max_days_worked']
if not (sum(pattern) <= max_days_worked):
keep = False
# No consecutive weekends with one or more days worked
window = ntuples(pattern,2)
for pair in window:
if sum(pair) > 2:
keep = False
# No half-weekends
if not tourtype[0]['half_weekends_ok'] and num_half_weekends(pattern,wkendtype) > 0:
keep = False
return keep
def num_full_weekends(x,wkendtype):
"""
Returns number of full weekends (both days) worked in a given weekends worked pattern.
Inputs:
x - list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
wkend_type - 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
Output:
Number of full weekends worked
Example:
n = num_full_weekends([(0,1),(1,0),(0,1),(1,0)],1)
# n = 2
n = num_full_weekends([(0,1),(1,0),(0,1),(0,0)],1)
# n = 1
n = num_full_weekends([(1,1),(1,0),(1,1),(1,0)],2)
# n = 2
n = num_full_weekends([(0,1),(1,0),(0,1),(0,0)],2)
# n = 0
"""
if wkendtype == 2:
L1 = [sum(j) for j in x]
n = sum([(1 if j == 2 else 0) for j in L1])
else:
n = 0
for j in range(len(x)):
if j < len(x) - 1:
if x[j][1] == 1 and x[j+1][0] == 1:
n += 1
else:
if x[j][1] == 1 and x[0][0] == 1:
n += 1
return n
def num_half_weekends(x,wkendtype):
"""
Returns number of half weekends (one day) worked in a given weekends worked pattern.
Inputs:
x - list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
wkend_type - 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
Output:
Number of half weekends worked
Example:
n = num_half_weekends([(0,1),(1,0),(0,1),(1,0)],1)
# n = 0
n = num_half_weekends([(0,1),(1,0),(0,1),(0,0)],1)
# n = 1
n = num_half_weekends([(1,1),(1,0),(1,1),(1,0)],2)
# n = 2
n = num_half_weekends([(0,1),(1,0),(0,1),(0,0)],2)
# n = 3
"""
if wkendtype == 2:
L1 = [sum(j) for j in x]
n = sum([(1 if j == 1 else 0) for j in L1])
else:
n = 0
for j in range(len(x)):
if j < len(x) - 1:
if x[j][1] + x[j+1][0] == 1:
n += 1
else:
if x[j][1] + x[0][0] == 1:
n += 1
return n
##param dmd_staff := [*,*,1] :
## 1 2 3 4 5 6 7 :=
## 1 5.0 4.0 4.0 4.0 5.0 5.0 5.0
## 2 5.0 4.0 4.0 4.0 5.0 5.0 5.0
## 3 5.0 4.0 4.0 4.0 5.0 5.0 5.0
## 4 5.0 4.0 4.0 4.0 5.0 5.0 5.0
def scalar_to_param(pname,pvalue,isStringIO=True):
"""
Convert a scalar to a GMPL representation of a parameter.
Inputs:
param_name - string name of paramter in GMPL file
pvalue - value of parameter
isstringio - true to return StringIO object, false to return string
Output:
GMPL dat code for scalar parameter either as a StringIO
object or a string.
Example:
param n_prds_per_day := 48;
"""
param = 'param ' + pname + ' := ' + str(pvalue) + ';\n'
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def list_to_param(pname,plist,reverseidx=False,isStringIO=True):
"""
Convert a list to a GMPL representation of a parameter.
Inputs:
param_name - string name of paramter in GMPL file
plist - list containing parameter (could be N-Dimen list)
reverseidx - True to reverse the order of the indexes (essentially transposing the matrix)
isstringio - True to return StringIO object, False to return string
Output:
GMPL dat code for list parameter either as a StringIO
object or a string.
Example:
param midnight_thresh:=
1 100
2 100
3 100
;
"""
# Convert parameter as list to an ndarray
parray = array(plist)
# Denumerate the array to get at the index tuple and array value
paramrows = ndenumerate(parray)
param = 'param ' + pname + ':=\n'
for pos, val in paramrows:
poslist = [str(p + 1) for p in pos]
if reverseidx:
poslist.reverse()
datarow = ' '.join(poslist) + ' ' + str(val) + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def shiftlencons_to_param(pname,ttspec,plist,isStringIO=True):
"""
Convert the shift length specific inputs for the days worked and periods
worked constraints to a GMPL representation of a parameter.
Cannot use the generic list_to_param function above since the potentially
jagged nature of the lists storing these parameters makes it impossible to
convert to a numpy array for denumeration.
Inputs:
param_name - string name of paramter in GMPL file
plist - list containing parameter (could be N-Dimen list)
isstringio - true to return StringIO object, false to return string
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
lengths = get_lengths_from_mix(ttspec)
param = 'param ' + pname + ':=\n'
for t in range(0,len(plist)): # Outer loop is tour types in mix
t_x = ttspec['tourtypes'][t]['ttnum'] # Get tour type number
for s in range(0,len(plist[t])): # Inner loop is shift length
# Get shift length index
s_x = lengths.index(ttspec['tourtypes'][t]['shiftlengths'][s]['numbins'])
# Generate the GMPL rows for this tour type, shift length
for w in range(0,len(plist[t][s])):
rowlist = [str(t_x),str(s_x + 1),str(w+1),str(plist[t][s][w])]
datarow = ' '.join(rowlist) + ' ' + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def list_to_indexedset(sname,slist,isStringIO=True):
"""
Convert a list to a GMPL representation of a parameter.
Inputs:
gmpl_set_name - string name of set in GMPL file
set_list - list containing set (could be N-Dimen list)
isstringio - true to return StringIO object, false to return string
Output:
set tt_length_x[1] :=
5 6;
"""
# Convert set as list to GMPL string rep'n
gset = ''
sindex = 0
for s in slist:
gset += 'set ' + sname + '[' + str(sindex + 1) + '] :=\n'
datarow = ' '.join(map(str, s)) + ';\n'
gset += datarow
sindex += 1
if isStringIO:
gsetout = StringIO.StringIO()
gsetout.write(gset)
return gsetout.getvalue()
else:
return gset
def mix_days_prds_params(ttspec,pname,nonshiftlen_pname,shiftlen_pname,isStringIO=True):
"""
Convert the various tour type mix lower and upper bounds (both cumulative
and non-cumulative and both shift length specific and non-shift length
specific) to their GMPL parameter representation.
It's a wrapper function in that it calls list_to_param() for non-shift
length specific inputs and shiftlencons_to_param() for shift length
specific inputs.
Inputs:
ttspec - the tour type spec object created from the mix file
param_name - string name of paramter in GMPL file
non_shiftlen_param_name - string name of non-shift length specific mix parameter key in YAML file
shiftlen_param_name - string name of shift length specific mix parameter key in YAML file
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
L = []
isShiftLen = False
for m in ttspec['tourtypes']:
if 'shiftlen' in pname:
isShiftLen = True
shiftL = []
for s in m['shiftlengths']:
shiftL.append(s[shiftlen_pname])
L.append(shiftL)
else:
if nonshiftlen_pname in m:
L.append(m[nonshiftlen_pname])
else:
L.append(m['shiftlengths'][0][shiftlen_pname])
if not isShiftLen:
return list_to_param(pname,L)
else:
return shiftlencons_to_param(pname,ttspec,L)
def mix_to_dat(probspec,isStringIO=True):
"""
Reads a YAML mix file and generates all of the GMPL dat components associated with
the mix inputs.
Inputs:
ttspec - the tour type spec object created from the mix file
param_name - string name of paramter in GMPL file
non_shiftlen_param_name - string name of non-shift length specific mix parameter key in YAML file
shiftlen_param_name - string name of shift length specific mix parameter key in YAML file
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
# Open the mix file and load it into a YAML object
fn_mix = probspec['reqd_files']['filename_mix']
fin = open(fn_mix,"r")
ttspec = yaml.load(fin)
mixout = StringIO.StringIO()
## print ttspec
## print ttspec['tourtypes']
## print ttspec['tourtypes'][0]
## print ttspec['tourtypes'][0]['min_days_week']
# Get set of shift lengths and order them ascending by length
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
len_param = list_to_param('lengths', lengths)
# Number of shift lengths
n_lengths = size(lengths)
numlen_param = scalar_to_param('n_lengths', n_lengths)
# Number of tour types
n_ttypes = size(ttspec['tourtypes'])
numttypes_param = scalar_to_param('n_tts', n_ttypes)
# Tour type length sets
lenxset = get_length_x_from_mix(ttspec)
lenxset_set = list_to_indexedset('tt_length_x', lenxset)
# Midnight threshold for weekend assignments
midthresholds = [m['midnight_thresh'] for m in ttspec['tourtypes']]
midthresh_param = list_to_param('midnight_thresh', midthresholds)
# Parttime flag and bound
ptflags = [m['is_parttime'] for m in ttspec['tourtypes']]
ptflags_param = list_to_param('tt_parttime', ptflags)
ptfrac = ttspec['max_parttime_frac']
ptfrac_param = scalar_to_param('max_parttime_frac', ptfrac)
# Global start window width
width = ttspec['g_start_window_width']
width_param = scalar_to_param('g_start_window_width', width)
# Lower and upper bounds on number scheduled
if 'opt_files' in probspec and 'filename_ttbounds' in probspec['opt_files']:
fn_ttbnds = probspec['opt_files']['filename_ttbounds']
fin_ttbnds = open(fn_ttbnds,"r")
ttbndsspec = yaml.load(fin_ttbnds)
tt_lb = [m['tt_lb'] for m in ttbndsspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttbndsspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
else:
tt_lb = [m['tt_lb'] for m in ttspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
# Cost multiplier
tt_cost_multiplier = [m['tt_cost_multiplier'] for m in ttspec['tourtypes']]
tt_cost_multiplier_param = list_to_param('tt_cost_multiplier',
tt_cost_multiplier)
# Min and max cumulative days and prds worked over the weeks
tt_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_shiftlen_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_shiftlen_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_shiftlen_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked each week
tt_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Min and max cumulative days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_shiftlen_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_shiftlen_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_shiftlen_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Put the parameter pieces together into a single StringIO object
print >>mixout, numlen_param
print >>mixout, len_param
print >>mixout, numttypes_param
print >>mixout, lenxset_set
print >>mixout, midthresh_param
print >>mixout, tt_lb_param
print >>mixout, tt_ub_param
print >>mixout, tt_cost_multiplier_param
print >>mixout, ptflags_param
print >>mixout, ptfrac_param
print >>mixout, width_param
print >>mixout, tt_min_cumul_dys_weeks_param
print >>mixout, tt_max_cumul_dys_weeks_param
print >>mixout, tt_min_cumul_prds_weeks_param
print >>mixout, tt_max_cumul_prds_weeks_param
print >>mixout, tt_min_dys_weeks_param
print >>mixout, tt_max_dys_weeks_param
print >>mixout, tt_min_prds_weeks_param
print >>mixout, tt_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_dys_weeks_param
print >>mixout, tt_shiftlen_max_dys_weeks_param
print >>mixout, tt_shiftlen_min_prds_weeks_param
print >>mixout, tt_shiftlen_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_max_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_min_cumul_prds_weeks_param
print >>mixout, tt_shiftlen_max_cumul_prds_weeks_param
# print mixout.getvalue()
if isStringIO:
return mixout.getvalue()
else:
smixout = mixout.read()
return smixout
def get_length_x_from_mix(ttspec):
"""
Get list of lists of shift length indexes for each tour type from
a mix spec.
Inputs:
ttspec - yaml representation of tour type mix parameters
Output:
A list of lists whose elements are the shift length indexes for
each tour type.
Example: [[1,2],[2]]
"""
# Get set of shift lengths and order them ascending by length
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
lenxset = []
for m in ttspec['tourtypes']:
shifts = [lengths.index(s['numbins']) for s in m['shiftlengths']]
shifts = [s + 1 for s in shifts]
shifts.sort()
lenxset.append(shifts)
return lenxset
def get_lengths_from_mix(ttspec):
"""
Get set of shift lengths and order them ascending by length
Inputs:
ttspec - yaml representation of tour type mix parameters
Output:
A sorted list of shift lengths.
Example: [8, 16, 20, 24]
"""
#
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
return lengths
def csvrow_to_yaml(fn_csv, isStringIO=True):
"""
Convert a comma delimited row of data into a
a yaml representation that can be inserted into the yaml mix file.
This procedure does not not know or care what each row means in the sense
It's just taking a comma or semicolon delimited row and converts it to yaml.
Inputs:
fn_csv - csv filename containing rows of size n_periods_per_day
isstringio - true to return StringIO object, false to return string
Output:
yaml version of csv row of data either as a StringIO
object or a string.
Example:
Input: 0, 1, 0, 0
Output: [0, 1, 0, 0]
"""
fin = open(fn_csv,'r')
dialect = csv.Sniffer().sniff(fin.read(1024),delimiters=',;')
fin.seek(0)
ash_data = csv.reader(fin,dialect)
ash_list = [map(float,row) for row in ash_data]
fin.close
yamlstr = ''
for row in ash_list:
yamlstr += (' - ' + str(row) + '\n')
if isStringIO:
yamlout = StringIO.StringIO()
yamlout.write(yamlstr)
return yamlout.getvalue()
else:
return yamlstr
def ash_to_dat(fn_yni,fn_mix,isStringIO=True):
"""
Convert allowable shift start time inputs into GMPL dat form.
Inputs:
fn_yni - filename of yaml ini scenario file
fn_mix - filename of yaml tour type mix file
isstringio - true to return StringIO object, false to return string
Output:
GMPL dat code for allowable shift start times either as a StringIO
object or a string.
Example:
param allow_start:=
1 1 1 2 0.0
2 1 1 2 0.0
3 1 1 2 0.0
4 1 1 2 0.0
...
13 1 1 2 1.0
14 1 1 2 1.0
15 1 1 2 1.0
"""
fin_yni = open(fn_yni,"r")
probspec = yaml.load(fin_yni)
fin_mix = open(fn_mix,"r")
ttspec = yaml.load(fin_mix)
# param allow_start[i,j,t,s] = 1 if period i and day j is an allowable
# shift start time for shift length s of tour type t
lenxset = get_lengths_from_mix(ttspec)
ash_rows = []
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
for j in range(len(s['allowable_starttimes'])):
for i in range(len(s['allowable_starttimes'][j])):
length_x = lenxset.index(s['numbins'])
L = [i+1,j+1,length_x+1,m['ttnum'],s['allowable_starttimes'][j][i]]
ash_rows.append(L)
param = 'param allow_start:=\n'
for val in ash_rows:
datarow = ' '.join(map(str, val)) + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
## p = [(0,1),(1,1),(0,0),(1,0)]
## n = num_full_weekends(p,1)
def mwts_createdat(fn_yni,fn_dat):
"""
Create a GMPL dat file for mwts problems.
Inputs:
fn_yni - Name of YAML input file for the mwts problem
Output:
fn_dat - Name of GMPL dat file to create
"""
fin = open(fn_yni,"r")
probspec = yaml.load(fin)
# General section
num_prds_per_day_param = scalar_to_param('n_prds_per_day',
probspec['time']['n_prds_per_day'])
num_days_per_week_param = scalar_to_param('n_days_per_week',
probspec['time']['n_days_per_week'])
num_weeks_param = scalar_to_param('n_weeks',
probspec['time']['n_weeks'])
# Cost related
labor_budget_param = scalar_to_param('labor_budget',probspec['cost']
['labor_budget'])
cu1_param = scalar_to_param('cu1',probspec['cost']
['understaff_cost_1'])
cu2_param = scalar_to_param('cu2',probspec['cost']
['understaff_cost_2'])
usb_param = scalar_to_param('usb',probspec['cost']
['understaff_1_lb'])
# Demand section
dmd_dat = dmd_min_to_dat('dmd_staff',probspec['reqd_files']['filename_dmd'],mode='unsliced')
# Min staff section
min_dat = dmd_min_to_dat('min_staff',probspec['reqd_files']['filename_min'],mode='unsliced')
# Mix section
mix_dat = mix_to_dat(probspec)
# Weekends worked patterns section
wkends_dat = wkends_to_dat(fn_yni,probspec['reqd_files']['filename_mix'])
# Allowable shift start time section
ash_dat = ash_to_dat(fn_yni,probspec['reqd_files']['filename_mix'])
# Put the pieces together
dat = StringIO.StringIO()
print >>dat, num_prds_per_day_param
print >>dat, num_days_per_week_param
print >>dat, num_weeks_param
print >>dat, labor_budget_param
print >>dat, cu1_param
print >>dat, cu2_param
print >>dat, usb_param
print >>dat, mix_dat
print >>dat, dmd_dat
print >>dat, min_dat
print >>dat, wkends_dat
print >>dat, ash_dat
fout = open(fn_dat,"w")
print >>fout, dat.getvalue()
fout.close()
if __name__ == '__main__':
main()
| [
2,
10097,
24305,
198,
2,
6530,
25,
220,
220,
220,
220,
220,
220,
220,
285,
86,
912,
62,
76,
4335,
265,
198,
2,
32039,
25,
220,
220,
220,
220,
285,
86,
912,
4818,
2393,
6282,
198,
2,
198,
2,
6434,
25,
220,
220,
220,
220,
220,
... | 2.139221 | 12,728 |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_38]
# language: python
# name: conda-env-bandit_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
from pyPRMS.ControlFile import ControlFile
# %%
base_dir = '/Users/pnorton/Projects/National_Hydrology_Model/src/tests_prms6/sagehen/prms6'
control_dir = f'{base_dir}'
control_file = f'{control_dir}/prms6.control'
# %%
ctl = ControlFile(control_file)
# %%
ctl.control_variables.keys()
# %%
print(ctl.get('windspeed_day'))
# %%
modules_used = ctl.modules.values()
print(modules_used)
# %%
ctl.modules.items()
# %%
ctl.get('temp_module').values
# %%
# %%
ctl.write(f'{control_dir}/prms6.control')
# %%
| [
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
17519,
25,
20966,
2047,
65,
11,
9078,
25,
25067,
198,
2,
220,
220,
220,
220,
2420,
62,
15603,
341,
25,
198,
2,
22... | 2.285 | 400 |
import random
level1 = "simple operations with numbers 2-9"
level2 = "integral squares of 11-29"
message = f"Which level do you want? Enter a number:\n1 - {level1}\n2 - {level2}\n "
user_choice = choose_level(input(message))
while user_choice == "Incorrect format.":
print(user_choice)
user_choice = choose_level(input(message))
n = check_result(user_choice)
save_res = input(f"Your mark is {n}/5. Would you like to save the result? Enter yes or no.\n")
if save_res.lower() == "yes" or save_res.lower() == "y":
name = input("What is your name?\n")
with open("results.txt", "a") as file:
file.write(f"{name}: {n}/5 in level {user_choice} ({level1 if user_choice == 1 else level2})")
print(f'The results are saved in "{file.name}"') | [
11748,
4738,
628,
628,
198,
198,
5715,
16,
796,
366,
36439,
4560,
351,
3146,
362,
12,
24,
1,
198,
5715,
17,
796,
366,
18908,
1373,
24438,
286,
1367,
12,
1959,
1,
198,
20500,
796,
277,
1,
13828,
1241,
466,
345,
765,
30,
6062,
257,
... | 2.730496 | 282 |
import gc
import mock
import pytest
import pygear
from . import noop_serializer
@pytest.fixture
| [
11748,
308,
66,
198,
198,
11748,
15290,
198,
11748,
12972,
9288,
198,
11748,
12972,
31763,
198,
198,
6738,
764,
1330,
645,
404,
62,
46911,
7509,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
628,
628,
628,
628,
628,
628,
198
] | 2.780488 | 41 |
import unittest
from happyentropy import vocab
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
3772,
298,
28338,
1330,
12776,
397,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.818182 | 33 |
import os
import sys
import importlib.util
from typing import Any, Callable, Tuple
from collections import defaultdict
"""
PLANNING
So I want the plugin manager to work really similar to how the hook system in Wordpress works:
https://developer.wordpress.org/plugins/hooks/
Thats because I already have a lot of experience with that system and I kind of really like it. It just makes sense
and is rather intuitive to use. The main point is that it uses hooks: A hook is essentially a point in the execution
of the main program where a plugin can insert custom functionality to be executed. Wordpress differs between two
types of hooks: action hooks simply allow the execution of code, they dont have a return value. filter hooks allow
the modification of certain important values of the main program.
"""
class PluginManager:
"""
This class represents the plugin manager which is responsible for managing the plugin related functionality for
ufotest. This mainly included the dynamic discovery and loading of the plugins at the beginning of the program
execution and the management and application of the additional action and filter hooks added by those plugins.
**UFOTEST PLUGIN SYSTEM**
The ufotest plugin system is strongly influenced by the Wordpress plugin system
(https://developer.wordpress.org/plugins/hooks/). It uses so called hooks to enable plugins to insert custom
functions to be executed at vital points during the ufotest main program routine. A plugin simply has to decorate
a function with the according hook decorator and supply a string identifier for which hook to use. The function
will then be registered within the plugin manager and wait there until the according hook is actually called from
within the main routine.
The plugin system differentiates between two types of hooks: *action* hooks dont have a return value, if a function
is hooked into an action hook, this just means that it will be executed at a certain point. *filter* hooks on the
other side have a return value. Filter hooks present the possibility to modify certain key data structures during
the execution of the main ufotest routine.
**USING THE PLUGIN MANAGER**
Alongside the config instance for ufotest, the plugin manager instance is the second most important thing. It has
to be accessible by all parts of the code at any time. This is because the individual parts of the code actually
invoke the special hooks by referencing the plugin manager. To create a new instance of the pm it only needs the
folder which is supposed to contain the plugins. After creating the instance, the "load_plugins" method has to be
used to actually load the plugins from that folder. At this point the internal dicts "filters" and "actions"
already contain all the callable instance linked to the specific hooks, just waiting to be executed. Invoking a
hook within the main routine can be done with the "do_action" and "apply_filter" methods.
.. code-block:: python
pm = PluginManager("/path/to/plugins")
pm.load_plugins()
# Some time later
data = {}
data_filtered = pm.apply_filter("custom_filter", data)
pm.do_action("custom_action")
**LOADING THE PLUGINS**
The plugins themselves are dynmically imported during the runtime of the ufotest routine. The plugin manager will
attempt to import the plugins from the folder which was passed to its constructor. Some important assumptions are
made about what constitutes a valid plugin:
- Each plugin is assumed to be a FOLDER. The folder name will be used as the plugin name, by which it will be
identified
- Within each plugin folder there has to be at least a "main.py" python module. This is what is actually imported
by the plugin system. Consequentially, all of it's top level code will be executed on import time.
- Important detail: Folders starting with an underscore will be ignored! This is mainly a pragmatic choice to make
sure that the plugin system does not attempt to import __pycache__ but can also be used to quickly disable
plugins
"""
# -- For invoking hooks in the main system --
def do_action(self, hook_name: str, *args, **kwargs) -> None:
"""
Executes all the plugin functions which have been hooked to the action hook identified by *hook_name*.
The hook call may include additional positional and keyword arguments which are passed as they are to the
registered callbacks.
:param hook_name: The string name identifying the hook to be executed.
:return: void
"""
if hook_name in self.actions.keys():
callback_specs = sorted(self.actions[hook_name], key=lambda spec: spec['priority'], reverse=True)
callbacks = [spec['callback'] for spec in callback_specs]
for callback in callbacks:
callback(*args, **kwargs)
def apply_filter(self, hook_name: str, value: Any, *args, **kwargs) -> Any:
"""
Applies all the plugin callback functions which have been hooked to the filter hook identified by *hook_name*
to filter the given *value*. The result of each filter operation is then passed as the value argument to the
next filter callback in order of priority.
The hook call may include additional positional and keyword arguments which are passed as they are to the
registered callbacks.
:param hook_name: THe string name identifying the hook to be executed.
:param value: Whatever value that specific hook is supposed to manipulate
:return: The manipulated version of the passed value argument
"""
filtered_value = value
if hook_name in self.filters.keys():
callback_specs = sorted(self.filters[hook_name], key=lambda spec: spec['priority'], reverse=True)
callbacks = [spec['callback'] for spec in callback_specs]
for callback in callbacks:
filtered_value = callback(filtered_value, *args, **kwargs)
return filtered_value
# -- For registering hook callbacks in the plugins --
def register_filter(self, hook_name: str, callback: Callable, priority: int = 10) -> None:
"""
Registers a new filter *callback* function for the hook identified by *hook_name* with the given *priority*.
:param hook_name: The name of the hook for which to register the function
:param callback: A callable object, which is then actually supposed to be executed when the according hook is
invoked. Since this is a filter hook, the callback needs to accept at least one argument which is the
value to be filtered and it also needs to return a manipulated version of this value.
:param priority: The integer defining the priority of this particular callback. Default is 10.
:return: void
"""
self.filters[hook_name].append({
'callback': callback,
'priority': priority
})
def register_action(self, hook_name: str, callback: Callable, priority: int = 10) -> None:
"""
Registers a new action *callback* function for the hook identified by *hook_name* with the given *priority*.
:param hook_name: The name of the hook for which to register the function
:param callback: A callable object, which is then actually supposed to be executed when the according hook is
invoked.
:param priority: The integer defining the priority of this particular callback. Default is 10.
:return: void
"""
self.actions[hook_name].append({
'callback': callback,
'priority': priority
})
# -- Loading the plugins --
def load_plugins(self):
"""
Loads all the plugins from the plugin folder which was passed to the constructor of the manager instance.
After this method was executed, it can be assumed that the internal dicts "filters" and "actions" contain all
the callable instance linked to the according hook names.
:return: void
"""
for root, folders, files in os.walk(self.plugin_folder_path, topdown=True):
for folder_name in folders:
# IMPORTANT: We will ignore all folders which start with and underscore. The very practical reason for
# this is that the plugins folder will almost certainly contain a __pycache__ folder which obviously
# is not a ufotest plugin and thus cause an error. But this behaviour is also nice to disable certain
# plugins without removing them completely: simply rename them to start with an underscore
# 2.0.0 - 29.11.2021: We also need to ignore folders which start with a dot, as these are the linux
# hiddenfolders. There were issues with runaway .idea and .git folders being attempted for import.
if folder_name[0] in ['_', '.']:
continue
plugin_path = os.path.join(root, folder_name)
plugin_name, module = self.import_plugin_by_path(plugin_path)
self.plugins[plugin_name] = module
# 2.0.0 - 29.11.2021: So as to not accidentally attempt to import all plugin subfolders as plugins as well.
# This was previously a bug
break
def reset(self):
"""
Resets the plugin manager, which means that it unloads all registered filter and action hooks. Also clears the
internal reference to all the plugin modules.
:returns: void
"""
self.filters = defaultdict(list)
self.actions = defaultdict(list)
self.plugins = {}
@classmethod
def import_plugin_by_path(cls, path: str) -> Tuple[str, Any]:
"""
Given the path of a folder, this method will attempt to dynamically import a "main.py" module within this
folder interpreting it as a ufotest plugin.
:return: A tuple of two elements, where the first is the string name of the plugin and the second is the
imported module instance.
"""
plugin_name = os.path.basename(path)
plugin_main_module_path = os.path.join(path, 'main.py')
if not os.path.exists(plugin_main_module_path):
raise FileNotFoundError((
f'Cannot import folder "{plugin_name}" as an ufotest plugin, because the folder does not contain a '
f'main.py python module. All ufotest plugins need to have a main.py file! This is the top level file '
f'which is imported to import the plugins functionality into the ufotest system.\n '
f'Path being checked: {plugin_main_module_path}'
))
# 29.11.2021
# This will add the parent folder in which the actual plugin folder resides to the plugin path. This is due to
# a problem with the plugins: Prior imports of another plugin module from the plugins main.py module did not
# work.
plugin_folder = os.path.dirname(path)
if plugin_folder not in sys.path:
sys.path.append(plugin_folder)
spec = importlib.util.spec_from_file_location(plugin_name, plugin_main_module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[plugin_name] = module
return plugin_name, module
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
1330,
8019,
13,
22602,
198,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
309,
29291,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
37811,
198,
6489,
1565,
15871,
198,
198,
2396,
314,
765... | 3.070899 | 3,780 |
"""
"""
import argparse
import os
import h5py
import keras.backend.tensorflow_backend as ktf
import tensorflow as tf
from applications.config import get_siamese_config
from applications.siamesenet import run_net
from core.data import build_siamese_data, load_siamese_data
from core.util import get_session
# add directories in src/ to path
# sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),'..')))
# PARSE ARGUMENTS
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, help='gpu number to use', default='')
parser.add_argument('--gpu_memory_fraction', type=float, help='gpu percentage to use', default='1.0')
parser.add_argument('--dset', type=str, help='dataset to use', default='mnist')
args = parser.parse_args()
ktf.set_session(get_session(args.gpu_memory_fraction))
params = get_siamese_config(args)
data = load_siamese_data(params['data_path'], args.dset)
# RUN Train
run_net(data, params)
| [
37811,
198,
37811,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
11748,
289,
20,
9078,
198,
11748,
41927,
292,
13,
1891,
437,
13,
83,
22854,
11125,
62,
1891,
437,
355,
479,
27110,
198,
11748,
11192,
273,
11125,
355,
48700,
... | 2.864458 | 332 |
# Copyright (c) 2017-2018 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.urls import path, include
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework import permissions
from consent_manager import settings, views
from gui import views as fr_views
from hgw_common.settings import VERSION_REGEX
urlpatterns = [
path(r'', fr_views.home),
path(r'login/', fr_views.perform_login),
path(r'logout/', fr_views.perform_logout),
path(r'admin/', admin.site.urls),
path(r'saml2/', include('djangosaml2.urls')),
path(r'oauth2/', include('oauth2_provider.urls')),
path(r'protocol/', include('hgw_common.urls')),
path(r'confirm_consents/', views.confirm_consent),
path(r'{}/consents/confirm/'.format(VERSION_REGEX), views.ConsentView.as_view({'post': 'confirm'})),
path(r'{}/consents/revoke/'.format(VERSION_REGEX), views.ConsentView.as_view({'post': 'revoke_list'}),
name='consents_revoke'),
path(r'{}/consents/find/'.format(VERSION_REGEX), views.ConsentView.as_view({'get': 'find'}),
name='consents_find'),
path(r'{}/consents/'.format(VERSION_REGEX), views.ConsentView.as_view({'get': 'list', 'post': 'create'}),
name='consents'),
path(r'{}/consents/<str:consent_id>/revoke/'.format(VERSION_REGEX), views.ConsentView.as_view({'post': 'revoke'}),
name='consents_retrieve'),
path(r'{}/consents/<str:consent_id>/'.format(VERSION_REGEX),
views.ConsentView.as_view({'get': 'retrieve', 'put': 'update'}),
name='consents_retrieve'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
2,
15069,
357,
66,
8,
2177,
12,
7908,
327,
6998,
19,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
198,
2,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
366,
25423,
... | 2.863147 | 928 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.http import HttpResponseRedirect
from sentry.integrations.pipeline import IntegrationPipeline
from sentry.web.frontend.base import BaseView
from sentry.models import Organization
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
42625,
14208,
13,
26791,
13,
4023,
1330,
2956,
11925,
8189,
198,
6738,
42625,
14208,
13,
4023,
1330,... | 3.666667 | 87 |
lis=[(1,2,3),[1,2],['a','hit','less']]
ils=[x for i in lis for x in i]
print(ils)
| [
27999,
41888,
7,
16,
11,
17,
11,
18,
828,
58,
16,
11,
17,
4357,
17816,
64,
41707,
17945,
41707,
1203,
6,
11907,
201,
198,
201,
198,
201,
198,
4487,
41888,
87,
329,
1312,
287,
300,
271,
329,
2124,
287,
1312,
60,
220,
201,
198,
47... | 1.8 | 50 |
from PyPDF2 import PdfFileReader, PdfFileWriter
file1 = open('pdf1.pdf','rb')
file2 = open('output.pdf','wb')
filer = PdfFileReader(file1)
filew = PdfFileWriter()
#Cloning PDF reader with its properties ; see notes to see other ways to do this
try:
filew.cloneDocumentFromReader(filer)
except Exception as e:
print('Not possible to clone PDF File:',e)
try:
filew.addBookmark('user name',1,color='1',bold=True,italic=False,fit='/Fit')
filew.addLink(1,3,[30,30,70,70],border=['2','2','4','4'],fit='/Fit')
except Exception as e:
print('Not possible to addBookmark or to addLink:',e)
#AddsJava script,executes when user opens it, here : printing windows
try:
filew.addJS("this.print({bUI:true,bSilent:false,bShrinkToFit:true});")
filew.addMetadata({'/Producer':'/User1','/CreationDate':'/30.08.1996','/CreationProgram':'Adobe Acrobat Reader DC (Windows)'})
except Exception as e:
print('Not possible to addJS or Metadata:',e)
#Function updatePageFormFieldValues never worked
#try:
# page = filew.getPage(2)
# filew.updatePageFormFieldValues(page,{'/Texte1':'/Bond'})
#except Exception as e:
# print('Not possible to update Field Values:',e)
try:
filew.removeImages()
filew.removeText()
filew.removeLinks()
except:
print('Not possible to remove Img,Text or Links:',e)
try:
filew.encrypt('1234',owner_pwd='1234',use_128bit=True)
print('File encrypted')
except Exception as e:
print('Not possible to encrypt file:',e)
filew.write(file2)
file2.close()
file1.close()
#MERGING SEVERAL PDF Files :
"""def scan_several_pdf(file1):
global file_c
file_c = open(file1,'rb')
filer = PdfFileReader(file_c)
filew.appendPagesFromReader(filer)
def main():
global filew
filew = PdfFileWriter()
for item in os.listdir():
if '.pdf' in item:
print(item)
scan_several_pdf(item)
output = open('output_file.pdf','wb')
filew.write(output)
output.close()
file_c.close()
main()
Most important Metadata Fields on a PDF File :
1. /Producer
2. /CreationDate
3. /Author
4. /Location
"""
| [
6738,
9485,
20456,
17,
1330,
350,
7568,
8979,
33634,
11,
350,
7568,
8979,
34379,
198,
198,
7753,
16,
796,
1280,
10786,
12315,
16,
13,
12315,
41707,
26145,
11537,
198,
7753,
17,
796,
1280,
10786,
22915,
13,
12315,
41707,
39346,
11537,
19... | 2.665782 | 754 |
import unittest
import commentjson
| [
11748,
555,
715,
395,
198,
198,
11748,
2912,
17752,
628
] | 3.7 | 10 |
import discord # type: ignore
from discord.ext import commands # type: ignore
from discord.commands import permissions # type: ignore
from utils import guild_ids
#'''
#'''
'''
def custom_check():
print("Check!")
print((ctx.channel.permissions_for(ctx.guild.me)).send_messages)
return (ctx.channel.permissions_for(ctx.guild.me)).send_messages
#'''
'''
This is what I used for commands
def allowed_channels(allowed_channels_list):
async def predicate(ctx):
return ctx.guild and (ctx.channel.id in allowed_channels_list)
return commands.check(predicate)
@allowed_channels([PREFIX_COMMAND])
'''
| [
11748,
36446,
220,
1303,
2099,
25,
8856,
198,
6738,
36446,
13,
2302,
1330,
9729,
220,
1303,
2099,
25,
8856,
198,
6738,
36446,
13,
9503,
1746,
1330,
21627,
220,
1303,
2099,
25,
8856,
198,
198,
6738,
3384,
4487,
1330,
19806,
62,
2340,
1... | 2.764192 | 229 |
from typing import Any, Dict, Optional
import base64
import json
import logging
import os
from shlex import quote as shq
from gear.cloud_config import get_global_config
from ....batch_configuration import (DOCKER_ROOT_IMAGE, DOCKER_PREFIX, DEFAULT_NAMESPACE,
INTERNAL_GATEWAY_IP)
from ....file_store import FileStore
from ....instance_config import InstanceConfig
from ...resource_utils import unreserved_worker_data_disk_size_gib
from ..resource_utils import azure_machine_type_to_worker_type_and_cores
log = logging.getLogger('create_instance')
BATCH_WORKER_IMAGE = os.environ['HAIL_BATCH_WORKER_IMAGE']
log.info(f'BATCH_WORKER_IMAGE {BATCH_WORKER_IMAGE}')
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
198,
11748,
2779,
2414,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
427,
2588,
1330,
9577,
355,
427,
80,
198,
198,
6738,
7733,
13,
17721,
62,
11250,
1330,
651,
... | 2.687023 | 262 |
# coding: utf-8
"""
AVACloud API 1.17.3
AVACloud API specification # noqa: E501
OpenAPI spec version: 1.17.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class STLBReferenceDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'version_date': 'datetime',
'catalogue_name': 'str',
'group': 'str',
'cost_group': 'str',
'service_area': 'str',
'keys': 'list[STLBKeyDto]'
}
attribute_map = {
'version_date': 'versionDate',
'catalogue_name': 'catalogueName',
'group': 'group',
'cost_group': 'costGroup',
'service_area': 'serviceArea',
'keys': 'keys'
}
def __init__(self, version_date=None, catalogue_name=None, group=None, cost_group=None, service_area=None, keys=None): # noqa: E501
"""STLBReferenceDto - a model defined in Swagger""" # noqa: E501
self._version_date = None
self._catalogue_name = None
self._group = None
self._cost_group = None
self._service_area = None
self._keys = None
self.discriminator = None
if version_date is not None:
self.version_date = version_date
if catalogue_name is not None:
self.catalogue_name = catalogue_name
if group is not None:
self.group = group
if cost_group is not None:
self.cost_group = cost_group
if service_area is not None:
self.service_area = service_area
if keys is not None:
self.keys = keys
@property
def version_date(self):
"""Gets the version_date of this STLBReferenceDto. # noqa: E501
The date of the STLB version. Typically, only the Year and Month are used # noqa: E501
:return: The version_date of this STLBReferenceDto. # noqa: E501
:rtype: datetime
"""
return self._version_date
@version_date.setter
def version_date(self, version_date):
"""Sets the version_date of this STLBReferenceDto.
The date of the STLB version. Typically, only the Year and Month are used # noqa: E501
:param version_date: The version_date of this STLBReferenceDto. # noqa: E501
:type: datetime
"""
self._version_date = version_date
@property
def catalogue_name(self):
"""Gets the catalogue_name of this STLBReferenceDto. # noqa: E501
The name of the catalogue within the STLB # noqa: E501
:return: The catalogue_name of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._catalogue_name
@catalogue_name.setter
def catalogue_name(self, catalogue_name):
"""Sets the catalogue_name of this STLBReferenceDto.
The name of the catalogue within the STLB # noqa: E501
:param catalogue_name: The catalogue_name of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._catalogue_name = catalogue_name
@property
def group(self):
"""Gets the group of this STLBReferenceDto. # noqa: E501
The name of the group in STLB # noqa: E501
:return: The group of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this STLBReferenceDto.
The name of the group in STLB # noqa: E501
:param group: The group of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._group = group
@property
def cost_group(self):
"""Gets the cost_group of this STLBReferenceDto. # noqa: E501
The cost group this service is associated with # noqa: E501
:return: The cost_group of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._cost_group
@cost_group.setter
def cost_group(self, cost_group):
"""Sets the cost_group of this STLBReferenceDto.
The cost group this service is associated with # noqa: E501
:param cost_group: The cost_group of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._cost_group = cost_group
@property
def service_area(self):
"""Gets the service_area of this STLBReferenceDto. # noqa: E501
The service area (or type) in the STLB # noqa: E501
:return: The service_area of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._service_area
@service_area.setter
def service_area(self, service_area):
"""Sets the service_area of this STLBReferenceDto.
The service area (or type) in the STLB # noqa: E501
:param service_area: The service_area of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._service_area = service_area
@property
def keys(self):
"""Gets the keys of this STLBReferenceDto. # noqa: E501
These keys may optionally be used to further reference multiple, specific items within the STLB # noqa: E501
:return: The keys of this STLBReferenceDto. # noqa: E501
:rtype: list[STLBKeyDto]
"""
return self._keys
@keys.setter
def keys(self, keys):
"""Sets the keys of this STLBReferenceDto.
These keys may optionally be used to further reference multiple, specific items within the STLB # noqa: E501
:param keys: The keys of this STLBReferenceDto. # noqa: E501
:type: list[STLBKeyDto]
"""
self._keys = keys
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(STLBReferenceDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, STLBReferenceDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
14661,
2246,
75,
2778,
7824,
352,
13,
1558,
13,
18,
628,
220,
220,
220,
14661,
2246,
75,
2778,
7824,
20855,
220,
1303,
645,
20402,
25,
412,
33548,
628,
220,
220,... | 2.267344 | 3,344 |
import aiodownload
from aiodownload.example import logger
if __name__ == '__main__':
main()
| [
11748,
257,
2101,
593,
2220,
198,
6738,
257,
2101,
593,
2220,
13,
20688,
1330,
49706,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.857143 | 35 |
# Aula 16 - Vaiaveis compostas: Tuplas
lanche = ('hamburger', 'suco', 'pizza', 'pudim') # tupla criada, pode ser criada sem parenteses np python 3
print(lanche) # mostra a tupla em parenteses e os seus elementos
print(lanche[1]) # mostra o elemento 1 da tupla
print(lanche[-1]) # mostra o elemento -1 da tupla
# Tuplas recebem indices numericos crescentes e decrescentes ex: 0,1,2 e -1,-2,-3 respectivamente
print(lanche[3]) # mostra o elemento 3 da tupla
print(lanche[-2]) # mostra o elemento - 2 da tupla
print(lanche[1:3]) # imprime do elemento 1 ao elemento 2 desconsiderando o elemento 3
print(lanche[2:]) # imprime o elemento 2 até o ultimo
print(lanche[:2]) # imprime do início até o elemento 1 ignorando o elemento 2
print(lanche[-2:]) # imprime do -2 até o final, pizza até pudim
print(lanche[3:-5:-1]) # imprime do elemento 3 ao 0 em ordem decrescente
## lanche[1] = refrigerante # retorna erro, tuplas são imutáveis durante a execução do programa.
print(lanche)
# Percorre o for para cada elemento indexado da Tupla
for comida in lanche:
print(f'Eu vou comer {comida};')
print('Comi pra caramba')
print(len(lanche)) # mostra a quantidade de elementos da tupla
# O range vai de 0 à quantidade de elementos da tupla lanche = (4)
# Por tanto mostra o indice correspondentes de 0 a 3
for cont1 in range(0, 4): # pode ser lido, 'mostre 4 resultados iniciando do 0. 0,1,2,3 = 4 resultados
print(cont1)
for cont in range(0, len(lanche)):
print(lanche[cont])
for cont2 in range(0, len(lanche)):
print(f'Eu vou comer {lanche[cont2]} na posição {cont2}')
# Metodo enumerate retorna uma tupla para cada elemento na tupla lanche
# no primeiro indice ele aloca o número no segundo indice aloca o elemento
# após o for aponto duas variaveis se quiser guadar separadamente
for pos, comida in enumerate(lanche):
print(f'Eu vou comer {comida} na posição {pos}')
# O metoto sorted cria lista com a tupla e organiza em ordem alfabetica
print(sorted(lanche))
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b
# soma de tuplas realiza a comutação de elementos, ou seja, apenas junta os elementos numa terceira tupla
print(c)
# Nesse caso, por tanto, a + b não sera igual b + a.. A ordem da soma influencia em como os elementos serão indexados
c = b + a
print(c)
print(len(c)) # imprime o número de elementos de c
print(c)
print(c.count(5)) # mosta a quantidade de elementos 5 que a tupla possui
print(c.count(4)) # mostra a quantidade de elementos 4 na tupla
print(c.count(9)) # mostra que não possui o elemento
print(c.index(8)) # mostra o indice do elemento
print(c.index(4)) # mostra o indice do elemento
print(c.index(5)) # mostra o indice do elemento, na caso do primeiro encontrado
print(c.index(5, 1)) # mostra o indice do elemento a partir do indice 1
# Uma tupla pode receber elementos de tipos diferentes
pessoa = ('Flavio', 32, 'Casado', 'peso', 110)
print(pessoa)
# É possivel deletar uma tupla durante a execução do programa com o comando del
del lanche
print(lanche)
| [
2,
317,
4712,
1467,
532,
17668,
544,
303,
271,
36541,
292,
25,
16749,
489,
292,
628,
198,
75,
6362,
796,
19203,
2763,
6236,
1362,
3256,
705,
2385,
1073,
3256,
705,
79,
9990,
3256,
705,
79,
463,
320,
11537,
220,
1303,
12777,
489,
64,... | 2.507897 | 1,203 |
VERSION="1.9.4"
LOGLEVEL = "INFO" | [
198,
198,
43717,
2625,
16,
13,
24,
13,
19,
1,
198,
25294,
2538,
18697,
796,
366,
10778,
1
] | 1.944444 | 18 |
"""Constants for Airly integration."""
ATTR_API_ADVICE = "ADVICE"
ATTR_API_CAQI = "CAQI"
ATTR_API_CAQI_DESCRIPTION = "DESCRIPTION"
ATTR_API_CAQI_LEVEL = "LEVEL"
ATTR_API_HUMIDITY = "HUMIDITY"
ATTR_API_PM1 = "PM1"
ATTR_API_PM10 = "PM10"
ATTR_API_PM10_LIMIT = "PM10_LIMIT"
ATTR_API_PM10_PERCENT = "PM10_PERCENT"
ATTR_API_PM25 = "PM25"
ATTR_API_PM25_LIMIT = "PM25_LIMIT"
ATTR_API_PM25_PERCENT = "PM25_PERCENT"
ATTR_API_PRESSURE = "PRESSURE"
ATTR_API_TEMPERATURE = "TEMPERATURE"
CONF_USE_NEAREST = "use_nearest"
DEFAULT_NAME = "Airly"
DOMAIN = "airly"
MANUFACTURER = "Airly sp. z o.o."
MAX_REQUESTS_PER_DAY = 100
NO_AIRLY_SENSORS = "There are no Airly sensors in this area yet."
| [
37811,
34184,
1187,
329,
3701,
306,
11812,
526,
15931,
198,
1404,
5446,
62,
17614,
62,
2885,
27389,
796,
366,
2885,
27389,
1,
198,
1404,
5446,
62,
17614,
62,
8141,
48,
40,
796,
366,
8141,
48,
40,
1,
198,
1404,
5446,
62,
17614,
62,
... | 2.083333 | 324 |
from typing import List, Optional
from dataclasses import dataclass
@dataclass
@dataclass
@dataclass
@dataclass | [
6738,
19720,
1330,
7343,
11,
32233,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
628,
198,
31,
19608,
330,
31172,
628,
198,
31,
19608,
330,
31172,
628,
198,
31,
19608,
330,
31172,
628,
198,
31,
19608,
330,
31172
] | 3.051282 | 39 |
import json
import api
import pymongo
import spur
from api.common import (check, InternalException, safe_fail, validate,
WebException)
from voluptuous import Length, Required, Schema
server_schema = Schema(
{
Required("name"):
check(
("Name must be a reasonable string.", [str,
Length(min=1, max=128)])),
Required("host"):
check(
("Host must be a reasonable string", [str,
Length(min=1, max=128)])),
Required("port"):
check(("You have to supply a valid integer for your port.", [int]),
("Your port number must be in the valid range 1-65535.",
[lambda x: 1 <= int(x) and int(x) <= 65535])),
Required("username"):
check(("Username must be a reasonable string",
[str, Length(min=1, max=128)])),
Required("password"):
check(("Username must be a reasonable string",
[str, Length(min=1, max=128)])),
Required("protocol"):
check(("Protocol must be either HTTP or HTTPS",
[lambda x: x in ['HTTP', 'HTTPS']])),
"server_number":
check(("Server number must be an integer.", [int]),
("Server number must be a positive integer.",
[lambda x: 0 < int(x)])),
},
extra=True)
def get_server(sid=None, name=None):
"""
Returns the server object corresponding to the sid provided
Args:
sid: the server id to lookup
Returns:
The server object
"""
db = api.common.get_conn()
if sid is None:
if name is None:
raise InternalException("You must specify either an sid or name")
else:
sid = api.common.hash(name)
server = db.shell_servers.find_one({"sid": sid})
if server is None:
raise InternalException(
"Server with sid '{}' does not exist".format(sid))
return server
def get_server_number(sid):
"""
Gets the server_number designation from sid
"""
if sid is None:
raise InternalException("You must specify a sid")
server = get_server(sid=sid)
if server is None:
raise InternalException(
"Server with sid '{}' does not exist".format(sid))
return server.get("server_number")
def get_connection(sid):
"""
Attempts to connect to the given server and returns a connection.
"""
server = get_server(sid)
try:
shell = spur.SshShell(
hostname=server["host"],
username=server["username"],
password=server["password"],
port=server["port"],
missing_host_key=spur.ssh.MissingHostKey.accept,
connect_timeout=10)
shell.run(["echo", "connected"])
except spur.ssh.ConnectionError as e:
raise WebException(
"Cannot connect to {}@{}:{} with the specified password".format(
server["username"], server["host"], server["port"]))
return shell
def ensure_setup(shell):
"""
Runs sanity checks on the shell connection to ensure that
shell_manager is set up correctly.
Leaves connection open.
"""
result = shell.run(
["sudo", "/picoCTF-env/bin/shell_manager", "status"], allow_error=True)
if result.return_code == 1 and "command not found" in result.stderr_output.decode(
"utf-8"):
raise WebException("shell_manager not installed on server.")
def add_server(params):
"""
Add a shell server to the pool of servers. First server is
automatically assigned server_number 1 (yes, 1-based numbering)
if not otherwise specified.
Args:
params: A dict containing:
host
port
username
password
server_number
Returns:
The sid.
"""
db = api.common.get_conn()
validate(server_schema, params)
if isinstance(params["port"], str):
params["port"] = int(params["port"])
if isinstance(params.get("server_number"), str):
params["server_number"] = int(params["server_number"])
if safe_fail(get_server, name=params["name"]) is not None:
raise WebException("Shell server with this name already exists")
params["sid"] = api.common.hash(params["name"])
# Automatically set first added server as server_number 1
if db.shell_servers.count() == 0:
params["server_number"] = params.get("server_number", 1)
db.shell_servers.insert(params)
return params["sid"]
# Probably do not need/want the sid here anymore.
def update_server(sid, params):
"""
Update a shell server from the pool of servers.
Args:
sid: The sid of the server to update
params: A dict containing:
port
username
password
server_number
"""
db = api.common.get_conn()
validate(server_schema, params)
server = safe_fail(get_server, sid=sid)
if server is None:
raise WebException(
"Shell server with sid '{}' does not exist.".format(sid))
params["name"] = server["name"]
validate(server_schema, params)
if isinstance(params["port"], str):
params["port"] = int(params["port"])
if isinstance(params.get("server_number"), str):
params["server_number"] = int(params["server_number"])
db.shell_servers.update({"sid": server["sid"]}, {"$set": params})
def remove_server(sid):
"""
Remove a shell server from the pool of servers.
Args:
sid: the sid of the server to be removed
"""
db = api.common.get_conn()
if db.shell_servers.find_one({"sid": sid}) is None:
raise WebException(
"Shell server with sid '{}' does not exist.".format(sid))
db.shell_servers.remove({"sid": sid})
def get_servers(get_all=False):
"""
Returns the list of added shell servers, or the assigned shell server
shard if sharding is enabled. Defaults to server 1 if not assigned
"""
db = api.common.get_conn()
settings = api.config.get_settings()
match = {}
if not get_all and settings["shell_servers"]["enable_sharding"]:
team = api.team.get_team()
match = {"server_number": team.get("server_number", 1)}
servers = list(db.shell_servers.find(match, {"_id": 0}))
if len(servers) == 0 and settings["shell_servers"]["enable_sharding"]:
raise InternalException(
"Your assigned shell server is currently down. Please contact an admin."
)
return servers
def get_problem_status_from_server(sid):
"""
Connects to the server and checks the status of the problems running there.
Runs `sudo shell_manager status --json` and parses its output.
Closes connection after running command.
Args:
sid: The sid of the server to check
Returns:
A tuple containing:
- True if all problems are online and false otherwise
- The output data of shell_manager status --json
"""
shell = get_connection(sid)
ensure_setup(shell)
with shell:
output = shell.run(
["sudo", "/picoCTF-env/bin/shell_manager", "status",
"--json"]).output.decode("utf-8")
data = json.loads(output)
all_online = True
for problem in data["problems"]:
for instance in problem["instances"]:
# if the service is not working
if not instance["service"]:
all_online = False
# if the connection is not working and it is a remote challenge
if not instance["connection"] and instance["port"] is not None:
all_online = False
return (all_online, data)
def load_problems_from_server(sid):
"""
Connects to the server and loads the problems from its deployment state.
Runs `sudo shell_manager publish` and captures its output.
Closes connection after running command.
Args:
sid: The sid of the server to load problems from.
Returns:
The number of problems loaded
"""
shell = get_connection(sid)
with shell:
result = shell.run(["sudo", "/picoCTF-env/bin/shell_manager", "publish"])
data = json.loads(result.output.decode("utf-8"))
# Pass along the server
data["sid"] = sid
api.problem.load_published(data)
has_instances = lambda p: len(p["instances"]) > 0
return len(list(filter(has_instances, data["problems"])))
def get_assigned_server_number(new_team=True, tid=None):
"""
Assigns a server number based on current teams count and
configured stepping
Returns:
(int) server_number
"""
settings = api.config.get_settings()["shell_servers"]
db = api.common.get_conn()
if new_team:
team_count = db.teams.count()
else:
if not tid:
raise InternalException("tid must be specified.")
oid = db.teams.find_one({"tid": tid}, {"_id": 1})
if not oid:
raise InternalException("Invalid tid.")
team_count = db.teams.count({"_id": {"$lt": oid["_id"]}})
assigned_number = 1
steps = settings["steps"]
if steps:
if team_count < steps[-1]:
for i, step in enumerate(steps):
if team_count < step:
assigned_number = i + 1
break
else:
assigned_number = 1 + len(steps) + (
team_count - steps[-1]) // settings["default_stepping"]
else:
assigned_number = team_count // settings["default_stepping"] + 1
if settings["limit_added_range"]:
max_number = list(
db.shell_servers.find({}, {
"server_number": 1
}).sort("server_number", -1).limit(1))[0]["server_number"]
return min(max_number, assigned_number)
else:
return assigned_number
| [
11748,
33918,
198,
198,
11748,
40391,
198,
11748,
279,
4948,
25162,
198,
11748,
26724,
198,
6738,
40391,
13,
11321,
1330,
357,
9122,
11,
18628,
16922,
11,
3338,
62,
32165,
11,
26571,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.422416 | 4,131 |
maxn = 10000
isprime = [False] * 2 + [True] * maxn
for i in range(2, maxn):
if isprime[i]:
j = i*i
while j < maxn:
isprime[j] = False
j += i
for a in range(1000, 10000):
if isprime[a]:
code = encode(a)
for inc in range(1, 4500):
b = a + inc
c = a + inc * 2
if c >= 10000:
break
if isprime[b] and isprime[c]:
if encode(b) == code and encode(c) == code:
print a, b, c
| [
9806,
77,
796,
33028,
198,
271,
35505,
796,
685,
25101,
60,
1635,
362,
1343,
685,
17821,
60,
1635,
3509,
77,
198,
1640,
1312,
287,
2837,
7,
17,
11,
3509,
77,
2599,
198,
197,
361,
318,
35505,
58,
72,
5974,
198,
197,
197,
73,
796,
... | 2.055276 | 199 |
from django.db import models
from datetime import datetime
# date = models.DateField(default=datetime.date.today)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628,
628,
220,
220,
220,
220,
1303,
220,
220,
3128,
796,
4981,
13,
10430,
15878,
7,
12286,
28,
19608,
8079,
13,
4475,
13,
40838,
8,
198
] | 3.1 | 40 |
from project.motorcycle import Motorcycle
| [
6738,
1628,
13,
76,
20965,
13696,
1330,
12533,
13696,
198
] | 4.2 | 10 |
import argparse
import logging
import os
from src.algorithm import deep_q_learning
from torch.utils.tensorboard import SummaryWriter
import warnings
import gym
from src.agent import DQNAgent
from src.environment import DQNEnvironment
from datetime import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# see Extended Data Table 1
parser.add_argument('--mini_batch_size', default=32)
parser.add_argument('--replay_memory_size', default=100000) # 1000000
parser.add_argument('--agent_history_length', default=4)
parser.add_argument('--target_update_frequency', default=10000) # target_network_update_frequency
parser.add_argument('--gamma', default=0.99) # discount factor
parser.add_argument('--action_repeat', default=4)
parser.add_argument('--update_frequency', default=4)
parser.add_argument('--learning_rate', default=0.00025)
parser.add_argument('--gradient_momentum', default=0.95)
parser.add_argument('--squared_gradient_momentum', default=0.95)
parser.add_argument('--min_squared_gradient', default=0.01)
parser.add_argument('--epsilon_start', default=1) # initial_epsilon
parser.add_argument('--epsilon_end', default=0.1) # final_epsilon
parser.add_argument('--epsilon_decay', default=1000000) # final_epsilon_frame
parser.add_argument('--replay_start_size', default=25000) # 50000
parser.add_argument('--max_n_wait_actions', default=30) # no_op_max
# see Caption of Extended Data Table 3
parser.add_argument('--n_training_steps', default=10000000)
parser.add_argument('--evaluation_frequency', default=250000)
parser.add_argument('--n_evaluation_steps', default=135000)
args = parser.parse_args()
games = ['Breakout', 'Enduro', 'Riverraid', 'Seaquest', 'Spaceinvaders']
for game in games:
experiment_name = datetime.today().strftime('%Y-%m-%d') + '_' + game
# NoFrameskip - ensures no frames are skipped by the emulator
# v4 - ensures actions are executed, whereas v0 would ignore an action with 0.25 probability
max_avg_episode_score = deep_q_learning(environment_name=game, experiment_name=experiment_name, args=args)
print(f'{game} Score: {max_avg_episode_score}')
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
6738,
12351,
13,
282,
42289,
1330,
2769,
62,
80,
62,
40684,
198,
6738,
28034,
13,
26791,
13,
83,
22854,
3526,
1330,
21293,
34379,
198,
11748,
14601,
198,
11748,
11550,
... | 2.864385 | 789 |