blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6795b7e92f3819173d554c15491b3ea9ad12a4e6
|
035f1df119ce80ee57bd39baabaf2f163f4117ad
|
/intergenic_regions_extractor.py
|
b65e668742d4c1a24dca580ed838b6b8b5b5e8a8
|
[] |
no_license
|
Ethuer/Scripts
|
58659cc7852e8eb0c5d07ba19a0675db04f1aa4f
|
57ff92d09f08d7f2303cf41fadbfc4b3e4157a9f
|
refs/heads/master
| 2020-05-17T01:12:48.677178
| 2016-10-06T16:21:02
| 2016-10-06T16:21:02
| 18,836,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,308
|
py
|
import csv
from Bio import SeqIO
import re as re
from Bio.Blast.Applications import NcbiblastxCommandline
import sys,argparse
import os.path
#####################################################################################
# this script will extract inergenic regions from two multifasta files, #
# one containing orfs one containing orfs and intergenic regions #
# a cutoff or 1000bp upstream and downstream is recommended, and removed as well #
# the remaining fragments will be blasted against the orf database, #
# to see if they do not match #
# output is for teaching purpouse in CPAT #
# #
###############(c) Ernst Thuer 2014 #################################################
# arguments for commandline input and help
####################################################
parser = argparse.ArgumentParser(description='This script takes two files in fasta format, one containint orfs only another orfs +intergenic, it returns intergenic regions')
parser.add_argument('-orfs',
dest='orfs',
required = True,
help='Input a fasta file containing the orfs only',
metavar = 'FILE',
#type=lambda x: is_valid_file(parser,x)
)
parser.add_argument('-inter',
dest='intergen',
required = True,
help='input a fasta file containing the orfs and intergenic regions',
metavar = 'FILE',
#type=lambda x: is_valid_file(parser,x)
)
parser.add_argument('-out',
dest='output',
required = False,
default='output.fasta',
help='Output a fasta file containing the intergenic regions beyond the threshold',
metavar = 'FILE',
#type=argparse.FileType('w')
)
parser.add_argument('-overhead',
dest='overhead',
required = False,
default='1000',
help='overhead of upstream and downstream bp beyond open reading frame will be cut off. Default 1000',
metavar = 'integer',
#type=argparse.FileType('w')
)
args = parser.parse_args()
#####################################################
def match_string(large,small,ident):
""" REGEX via python re. looking for bp upstream downstream"""
count_string = 0
collectstring = {}
overhead = int(args.overhead)
string = ('\w{1,%i}%s\w{1,%i}') % (overhead, small, overhead)
reg = re.compile(string)
large = str(large)
reg_string = reg.sub('',large)
return reg_string
def compare(infile,compare):
""" compares two files according to their row[0] field"""
counter = 0
collect_seq={}
for row,seq in infile.items():
for rown,seqn in compare.items():
if row == rown:
lenght=(len(seqn.seq)-len(seq.seq))
if lenght > 2000:
string = match_string(seqn.seq,seq.seq,row)
if len(string) < len(seqn.seq):
collect_seq[row] = string
counter +=1
print '%i transcripts found' %(counter)
return collect_seq
with open('%s' %(args.orfs) ,'r') as handle_orf, open('%s' % (args.intergen),'r') as handle_inter, open('%s'% (args.output) ,'w') as out_raw :
orf = SeqIO.to_dict(SeqIO.parse(handle_orf,'fasta'))
inter = SeqIO.to_dict(SeqIO.parse(handle_inter,'fasta'))
out = csv.writer(out_raw,delimiter='\n')
print ' Processing files ...'
collection = compare(orf,inter)
print '%i of which possess acceptable overhead' %(len(collection))
count = 0
for key in collection:
if len(collection[key]) > 100:
out.writerow(['> %s intergenic region after 1000bp overhead' %(key),collection[key]])
count += len(collection[key])
print 'average length = %i' %(count/len(collection))
|
[
"thuer.ernst@gmail.com"
] |
thuer.ernst@gmail.com
|
f6a183c0cc5dfdfe2dc8c9f6c14190c2e3110004
|
149c286465f1cb01ac0583c1d4a717de9fa15671
|
/L5/aggregate.py
|
a9ce4d4eaf46e3e7ffc4cf2c442f38fc82cbb18e
|
[] |
no_license
|
leejaeka/Data-Wrangling-Udacity
|
ff9d980d98c01aa3ac5895bc8cf5f7bf3fe9172b
|
6ef17b949b794aec268707f01eb2d072dc0e75b3
|
refs/heads/main
| 2023-02-06T03:04:07.237039
| 2020-12-27T05:00:33
| 2020-12-27T05:00:33
| 312,496,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,261
|
py
|
#!/usr/bin/env python
"""
Write an aggregation query to answer this question:
Of the users in the "Brasilia" timezone who have tweeted 100 times or more,
who has the largest number of followers?
The following hints will help you solve this problem:
- Time zone is found in the "time_zone" field of the user object in each tweet.
- The number of tweets for each user is found in the "statuses_count" field.
To access these fields you will need to use dot notation (from Lesson 4)
- Your aggregation query should return something like the following:
{u'ok': 1.0,
u'result': [{u'_id': ObjectId('52fd2490bac3fa1975477702'),
u'followers': 2597,
u'screen_name': u'marbles',
u'tweets': 12334}]}
Note that you will need to create the fields 'followers', 'screen_name' and 'tweets'.
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used
in examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
pipeline = [ ]
return pipeline
def aggregate(db, pipeline):
return [doc for doc in db.tweets.aggregate(pipeline)]
if __name__ == '__main__':
db = get_db('twitter')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
import pprint
pprint.pprint(result)
assert len(result) == 1
assert result[0]["followers"] == 17209
|
[
"jaekang.lee@mail.utoronto.ca"
] |
jaekang.lee@mail.utoronto.ca
|
97d64b5f37844028007a1cfea36d95e2507f501c
|
e212d9b85df5962c8ebf9e737b825fa3fe89f3d6
|
/WaveRNN/utility/text/__init__.py
|
0b9d8cc2401057deb5c8f63699658b5395afb09c
|
[
"MIT"
] |
permissive
|
sankar-mukherjee/Expressive-Speech-Synthesis-Research
|
9c3ea564509324dbfe033a328edd45aa7bffeffa
|
d85a067a131c04944f5bbe7fa7ab8c26e7d83800
|
refs/heads/master
| 2023-01-28T05:01:17.371683
| 2020-12-16T11:43:15
| 2020-12-16T11:43:15
| 294,351,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,187
|
py
|
""" from https://github.com/keithito/tacotron """
import re
from utility.text import cleaners
from utility.text.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
|
[
"sankar1535@gmail.com"
] |
sankar1535@gmail.com
|
a23cc982e760acbf55a579b0d8829327af32289b
|
e9a33230671bd7e099c11943ec056f84b6a9e24b
|
/jaal_call.py
|
b2ae8e2a72ebbbaeda09cb9d5df50b979976757a
|
[
"MIT"
] |
permissive
|
Olshansk/jaal
|
52c49dcaaa1d4be21d1474c7739f4e5af9eb971a
|
2b1e4696ca0d3d8f074827e5ae2943817eaa88e7
|
refs/heads/main
| 2023-05-31T13:45:57.443046
| 2021-05-23T09:42:57
| 2021-05-23T09:42:57
| 340,232,097
| 1
| 0
|
MIT
| 2021-02-19T02:04:16
| 2021-02-19T02:04:16
| null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# import
from jaal import Jaal
from jaal.datasets import load_got
# load the data
edge_df, node_df = load_got()
# define vis options
vis_opts = {'height': '600px', # change height
'interaction':{'hover': True}, # turn on-off the hover
'physics':{'stabilization':{'iterations': 100}}} # define the convergence iteration of network
# init Jaal and run server (with opts)
Jaal(edge_df, node_df).plot(vis_opts=vis_opts)
# init Jaal and run server (with default options)
# Jaal(edge_df, node_df).plot()
|
[
"mohitmayank1@gmail.com"
] |
mohitmayank1@gmail.com
|
e7d7eb5fdbca34075d62575cd4055b8f3df101c3
|
211e4920cc57e300a0c1f85e9d08f79c41d6fd6c
|
/ICP/ICP1/SOURCE/print number of letters and strings.py
|
7b5eb3153eb5cff4165efefb6deca0eb92e4c7ad
|
[] |
no_license
|
srividyavn/Python-DL
|
34d2497c8a5b395b4426e9e90989e57f3854d297
|
85f4c64e28e0003ef17a1f8ecb49a90b11bafddd
|
refs/heads/master
| 2020-04-18T16:04:23.407642
| 2019-05-18T03:47:53
| 2019-05-18T03:47:53
| 167,626,124
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
s = input("Input a string")
d = l = 0
for c in s:
if c.isdigit():
d = d+1
elif c.isalpha():
l = l+1
else:
pass
print("Letters", l)
print("Digits", d)
|
[
"vnsrividya1994@gmail.com"
] |
vnsrividya1994@gmail.com
|
1693c758f2c5cf600463f7be6a97c24efec33c8a
|
79a5a03461ff0c8905ced690b5c900bc2c031525
|
/visualize.py
|
d6dc0e7cca9ea31634538410556fa76e3549c34b
|
[] |
no_license
|
himanshucodz55/Social_Distancing_Ai_COVID19
|
f5ba1146acf8ead00b944e558aad46313a549076
|
ff138b7f3d6d109722a19fbad1b87d68e0da3a5d
|
refs/heads/master
| 2022-12-10T22:58:39.752672
| 2020-09-02T17:43:57
| 2020-09-02T17:43:57
| 292,346,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,401
|
py
|
"""
Mask R-CNN
Display and Visualization Functions.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import random
import itertools
import colorsys
import numpy as np
from skimage.measure import find_contours
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
from matplotlib.patches import Polygon
import IPython.display
import utils
import cv2
############################################################
# Visualization
############################################################
def display_images(images, titles=None, cols=4, cmap=None, norm=None,
interpolation=None):
"""Display the given set of images, optionally with titles.
images: list or array of image tensors in HWC format.
titles: optional. A list of titles to display with each image.
cols: number of images per row
cmap: Optional. Color map to use. For example, "Blues".
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interporlation to use for display.
"""
titles = titles if titles is not None else [""] * len(images)
rows = len(images) // cols + 1
plt.figure(figsize=(14, 14 * rows // cols))
i = 1
for image, title in zip(images, titles):
plt.subplot(rows, cols, i)
plt.title(title, fontsize=9)
plt.axis('off')
# plt.imshow(image.astype(np.uint8), cmap=cmap,
# norm=norm, interpolation=interpolation)
i += 1
plt.show()
random.seed(0)
N=90
brightness = 1.0
hsv = [(i / N, 1, brightness) for i in range(N)]
random.shuffle(hsv)
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
all_colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
return all_colors
def class_color(id,prob):
_hsv = list(hsv[id])
# _hsv[2]=random.uniform(0.8, 1)
_hsv[2]=prob
color = colorsys.hsv_to_rgb(*_hsv)
return color
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,scores=None, title="",figsize=(16, 16), ax=None,risky=None,index=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
"""
if index is not None:
# Number of instances
N = boxes.shape[0]
# if not N:
# print("\n*** No instances to display *** \n")
# else:
# assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
l=0
masked_image = image.astype(np.uint32).copy()
for i in index:
# color = colors[i]
# print("##################################",i,color)
color=(0.26666666666666683, 1.0, 0.25)
color1=(0.0,0.0,1.0)
# Bounding box
if not np.any(boxes[l]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[l]
l+=1
# p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
# alpha=0.7, linestyle="dashed",
# edgecolor=None, facecolor='none')
# ax.add_patch(p)
# ax.circle()
# ax.Circle( ((x1+x2)/2,y2), 5, (0, 0, 255), -1)
# center= plt.Circle(((x1+x2)/2,y2),5,color="blue")
# ax.add_patch(center)
if class_ids[i]==1:
# Label
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:.3f}".format(label, score) if score else label
ax.text(x1, y1 + 8, caption,color='w', size=11, backgroundcolor="none")
# Mask
if (risky is not None) and (i in risky):
# ii=risky[i]
# print("risky_ids: ",i)
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color1)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color1)
ax.add_patch(p)
else:
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
# ax.imshow(masked_image.astype(np.uint8))
return masked_image.astype(np.uint8)
def draw_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# if not ax:
# _, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
masked_image = image.copy()
for i in range(N):
class_id = class_ids[i]
score = scores[i] if scores is not None else None
# color = colors[i]
color = class_color(class_id,score*score*score*score)
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
# p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
# alpha=0.7, linestyle="dashed",
# edgecolor=color, facecolor='none')
cv2.rectangle(masked_image, (x1, y1),(x2, y2), [int(x*255) for x in (color)],4)
# Label
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
caption = "%s %d%%"%(label, int(score*100)) if score else label
# ax.text(x1, y1 + 8, caption,
# color='w', size=11, backgroundcolor="none")
yyy=y1 -16
if yyy <0:
yyy=0
cv2.putText(masked_image, caption, (x1, yyy), cv2.FONT_HERSHEY_SIMPLEX, 1.5, [int(x*255) for x in (color)],4)
# Mask
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
# ax.add_patch(p)
pts = np.array(verts.tolist(), np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(masked_image,[pts],True,[int(x*255) for x in (color)],4)
return masked_image.astype(np.uint8)
def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):
"""
anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.
proposals: [n, 4] the same anchors but refined to fit objects better.
"""
masked_image = image.copy()
# Pick random anchors in case there are too many.
ids = np.arange(rois.shape[0], dtype=np.int32)
ids = np.random.choice(
ids, limit, replace=False) if ids.shape[0] > limit else ids
fig, ax = plt.subplots(1, figsize=(12, 12))
if rois.shape[0] > limit:
plt.title("Showing {} random ROIs out of {}".format(
len(ids), rois.shape[0]))
else:
plt.title("{} ROIs".format(len(ids)))
# Show area outside image boundaries.
ax.set_ylim(image.shape[0] + 20, -20)
ax.set_xlim(-50, image.shape[1] + 20)
ax.axis('off')
for i, id in enumerate(ids):
color = np.random.rand(3)
class_id = class_ids[id]
# ROI
y1, x1, y2, x2 = rois[id]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
edgecolor=color if class_id else "gray",
facecolor='none', linestyle="dashed")
ax.add_patch(p)
# Refined ROI
if class_id:
ry1, rx1, ry2, rx2 = refined_rois[id]
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal for easy visualization
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Label
label = class_names[class_id]
ax.text(rx1, ry1 + 8, "{}".format(label),
color='w', size=11, backgroundcolor="none")
# Mask
m = utils.unmold_mask(mask[id], rois[id]
[:4].astype(np.int32), image.shape)
masked_image = apply_mask(masked_image, m, color)
# ax.imshow(masked_image)
# Print stats
print("Positive ROIs: ", class_ids[class_ids > 0].shape[0])
print("Negative ROIs: ", class_ids[class_ids == 0].shape[0])
print("Positive Ratio: {:.2f}".format(
class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
# TODO: Replace with matplotlib equivalent?
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 2, x1:x2] = color
image[y2:y2 + 2, x1:x2] = color
image[y1:y2, x1:x1 + 2] = color
image[y1:y2, x2:x2 + 2] = color
return image
def display_detections(image, gt_boxes, boxes, masks, class_ids, class_names, scores=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
"""
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
fig, ax = plt.subplots(1, figsize=(20,20))
N = boxes.shape[0] # number of instances
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height+10, -10)
ax.set_xlim(-10, width+10)
ax.axis('off')
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2-x1, y2-y1, linewidth=2, alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1+x2)//2)
ax.text(x1, y1+8, "{} {:.3f}".format(label, score) if score else label,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:,:,i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad the mask to ensure proper polygons for mask that touch image edges.
padded_mask = np.zeros((mask.shape[0]+2, mask.shape[1]+2), dtype=np.uint8)
padded_mask[1:-1,1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
return plt.imshow(masked_image.astype(np.uint8))
def display_top_masks(image, mask, class_ids, class_names, limit=4):
"""Display the given image and the top few class masks."""
to_display = []
titles = []
to_display.append(image)
titles.append("H x W={}x{}".format(image.shape[0], image.shape[1]))
# Pick top prominent classes in this image
unique_class_ids = np.unique(class_ids)
mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])
for i in unique_class_ids]
top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),
key=lambda r: r[1], reverse=True) if v[1] > 0]
# Generate images and titles
for i in range(limit):
class_id = top_ids[i] if i < len(top_ids) else -1
# Pull masks of instances belonging to the same class.
m = mask[:, :, np.where(class_ids == class_id)[0]]
m = np.sum(m * np.arange(1, m.shape[-1]+1), -1)
to_display.append(m)
titles.append(class_names[class_id] if class_id != -1 else "-")
display_images(to_display, titles=titles, cols=limit+1, cmap="Blues_r")
def plot_precision_recall(AP, precisions, recalls):
"""Draw the precision-recall curve.
AP: Average precision at IoU >= 0.5
precisions: list of precision values
recalls: list of recall values
"""
# Plot the Precision-Recall curve
_, ax = plt.subplots(1)
ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP))
ax.set_ylim(0, 1.1)
ax.set_xlim(0, 1.1)
_ = ax.plot(recalls, precisions)
def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,
overlaps, class_names, threshold=0.5):
"""Draw a grid showing how ground truth objects are classified.
gt_class_ids: [N] int. Ground truth class IDs
pred_class_id: [N] int. Predicted class IDs
pred_scores: [N] float. The probability scores of predicted classes
overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictins and GT boxes.
class_names: list of all class names in the dataset
threshold: Float. The prediction probability required to predict a class
"""
gt_class_ids = gt_class_ids[gt_class_ids != 0]
pred_class_ids = pred_class_ids[pred_class_ids != 0]
plt.figure(figsize=(12, 10))
plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)
plt.yticks(np.arange(len(pred_class_ids)),
["{} ({:.2f})".format(class_names[int(id)], pred_scores[i])
for i, id in enumerate(pred_class_ids)])
plt.xticks(np.arange(len(gt_class_ids)),
[class_names[int(id)] for id in gt_class_ids], rotation=90)
thresh = overlaps.max() / 2.
for i, j in itertools.product(range(overlaps.shape[0]),
range(overlaps.shape[1])):
text = ""
if overlaps[i, j] > threshold:
text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong"
color = ("white" if overlaps[i, j] > thresh
else "black" if overlaps[i, j] > 0
else "grey")
plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text),
horizontalalignment="center", verticalalignment="center",
fontsize=9, color=color)
plt.tight_layout()
plt.xlabel("Ground Truth")
plt.ylabel("Predictions")
def draw_boxes(image, boxes=None, refined_boxes=None,
masks=None, captions=None, visibilities=None,
title="", ax=None):
"""Draw bounding boxes and segmentation masks with differnt
customizations.
boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.
refined_boxes: Like boxes, but draw with solid lines to show
that they're the result of refining 'boxes'.
masks: [N, height, width]
captions: List of N titles to display on each box
visibilities: (optional) List of values of 0, 1, or 2. Determine how
prominant each bounding box should be.
title: An optional title to show over the image
ax: (optional) Matplotlib axis to draw on.
"""
# Number of boxes
assert boxes is not None or refined_boxes is not None
N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]
# Matplotlib Axis
if not ax:
_, ax = plt.subplots(1, figsize=(12, 12))
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
margin = image.shape[0] // 10
ax.set_ylim(image.shape[0] + margin, -margin)
ax.set_xlim(-margin, image.shape[1] + margin)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
# Box visibility
visibility = visibilities[i] if visibilities is not None else 1
if visibility == 0:
color = "gray"
style = "dotted"
alpha = 0.5
elif visibility == 1:
color = colors[i]
style = "dotted"
alpha = 1
elif visibility == 2:
color = colors[i]
style = "solid"
alpha = 1
# Boxes
if boxes is not None:
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=alpha, linestyle=style,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Refined boxes
if refined_boxes is not None and visibility > 0:
ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)
p = patches.Rectangle((rx1, ry1), rx2-rx1, ry2-ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal
if boxes is not None:
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Captions
if captions is not None:
caption = captions[i]
# If there are refined boxes, display captions on them
if refined_boxes is not None:
y1, x1, y2, x2 = ry1, rx1, ry2, rx2
x = random.randint(x1, (x1 + x2) // 2)
ax.text(x1, y1, caption, size=11, verticalalignment='top',
color='w', backgroundcolor="none",
bbox={'facecolor': color, 'alpha': 0.5,
'pad': 2, 'edgecolor': 'none'})
# Masks
if masks is not None:
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
# ax.imshow(masked_image.astype(np.uint8))
def display_table(table):
"""Display values in a table format.
table: an iterable of rows, and each row is an iterable of values.
"""
html = ""
for row in table:
row_html = ""
for col in row:
row_html += "<td>{:40}</td>".format(str(col))
html += "<tr>" + row_html + "</tr>"
html = "<table>" + html + "</table>"
IPython.display.display(IPython.display.HTML(html))
def display_weight_stats(model):
"""Scans all the weights in the model and returns a list of tuples
that contain stats about each weight.
"""
layers = model.get_trainable_layers()
table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]]
for l in layers:
weight_values = l.get_weights() # list of Numpy arrays
weight_tensors = l.weights # list of TF tensors
for i, w in enumerate(weight_values):
weight_name = weight_tensors[i].name
# Detect problematic layers. Exclude biases of conv layers.
alert = ""
if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1):
alert += "<span style='color:red'>*** dead?</span>"
if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:
alert += "<span style='color:red'>*** Overflow?</span>"
# Add row
table.append([
weight_name + alert,
str(w.shape),
"{:+9.4f}".format(w.min()),
"{:+10.4f}".format(w.max()),
"{:+9.4f}".format(w.std()),
])
display_table(table)
|
[
"noreply@github.com"
] |
noreply@github.com
|
26a487162edb894b5eb3cb1710694ac467522456
|
6c4722bd98c9fde85862774b6de78c93be5f3513
|
/gen_unprocessed_RAW_for_ExDark.py
|
c2139db6cfe7d9a41136415bd63b9578f504212b
|
[] |
no_license
|
wakananai/unprocessing
|
f34ec72a877784f861a3c86e65972388d9c83140
|
8296df535e27ab9ef615b1735e7136181e1c18bf
|
refs/heads/master
| 2021-01-01T08:47:33.719273
| 2020-07-02T20:22:38
| 2020-07-02T20:22:38
| 239,204,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
from unprocess import unprocess
import glob
import cv2
import tensorflow as tf
import glob
from tqdm import tqdm
import os
import numpy as np
import pickle
import argparse
IMG_DIR= f'/tmp3/r07922076/ExDark_data'
OUT_DIR= f'/tmp3/r07922076/unprocessed_ExDark_data'
obj_class_dir = next(os.walk( os.path.join(IMG_DIR)))[1]
# obj_class_dir.remove('__MACOSX')
for obj_class in obj_class_dir:
if not os.path.exists(os.path.join(OUT_DIR, obj_class)):
os.makedirs(os.path.join(OUT_DIR, obj_class))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
input_image = tf.placeholder(tf.float32, shape=[None, None, 3])
un_raw, meta = unprocess(input_image)
sess = tf.Session(config=config)
with sess.as_default():
for imgpath in tqdm(sorted(glob.glob(os.path.join(IMG_DIR, '*', '*')))):
img = cv2.imread(imgpath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# check if img contain odd height / width
h, w, _ = img.shape
if img.shape[0] % 2 == 1:
h = img.shape[0] + 1
if img.shape[1] % 2 == 1:
w = img.shape[1] + 1
plane = np.zeros((h,w,3))
plane[:img.shape[0],:img.shape[1],:] = img[:,:,:]
plane = plane.astype(np.float32) / 255.0
un, metadata = sess.run([un_raw, meta], feed_dict={input_image: plane})
file_name, file_ext = os.path.splitext(imgpath)
obj_class = imgpath.split('/')[-2]
path_raw = os.path.join(OUT_DIR, obj_class, os.path.basename(imgpath).replace(file_ext,'.pkl'))
with open(path_raw, 'wb') as pf:
content = dict()
content['raw'] = un
content['metadata'] = metadata
pickle.dump(content, pf)
|
[
"wakananai@gmail.com"
] |
wakananai@gmail.com
|
26184749943556b8182e4acc6b275c7e7dc14266
|
1cbc22287df1134337fe3f440f8e73a08ee41fb6
|
/projectapp/models.py
|
9336fb48ee0dbea5534aebaf2f77686082619739
|
[
"MIT"
] |
permissive
|
FranciscoStanley/pythonXlsx
|
cb0cc5c3df66ba959c854a5902982b2c3704db8c
|
f8ed52bd3c1a6b07edf8d046d0ace3e7287f83ef
|
refs/heads/master
| 2023-08-27T13:34:10.757988
| 2021-11-08T21:36:50
| 2021-11-08T21:36:50
| 424,464,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
from django.db import models
# Create your models here.
SEXO_CHOICES = (
('M', 'masculino'),
('F', 'feminino'),
)
class Person(models.Model):
nome = models.CharField(max_length=50)
sobrenome = models.CharField(max_length=50, null=True, blank=True)
sexo = models.CharField(max_length=2, choices=SEXO_CHOICES)
altura = models.FloatField(null=True, blank=True, default=None)
peso = models.FloatField(null=True, blank=True, default=None)
nascimento = models.DateTimeField(verbose_name="Data de Nascimento", null=True)
bairro = models.CharField(max_length=30)
cidade = models.CharField(max_length=20)
estado = models.CharField(max_length=20)
numero = models.DecimalField(max_digits=8, decimal_places=0)
def __str__(self):
return self.nome
def get_nascimento(self):
return self.nascimento.strftime('%d/%m/%Y')
|
[
"franciscothestanley@gmail.com"
] |
franciscothestanley@gmail.com
|
3cc7c17ee582aaba4ab4d5771286ac2e1ae8b9e8
|
1b45d1162bd60a356844fc4dced068da2e6cc438
|
/Arrays/Merge.py
|
8ee66ae39f1687b433e476fa1b9e3be1d2e31015
|
[
"MIT"
] |
permissive
|
AnkitAvi11/Data-Structures-And-Algorithms
|
de9584e439861254cdce265af789c8b484c01c69
|
703f78819a41d4dd88caf71156a4a515651edc1b
|
refs/heads/master
| 2023-02-19T21:53:39.405934
| 2021-01-24T17:27:21
| 2021-01-24T17:27:21
| 297,752,655
| 6
| 3
|
MIT
| 2021-01-24T17:27:22
| 2020-09-22T19:33:55
|
Python
|
UTF-8
|
Python
| false
| false
| 669
|
py
|
"""
QUESTION STATEMENT : MERGE TWO SORTED ARRAYS WITHOUT USING ANY EXTRA SPACE
example :
arr1 = {1,3,5,7,9} size = n
arr2 = {2,4,6,8,10} size = m
arr1 after merging = {1,2,3,4,5,6,7,8,9,10}
"""
def mergeArrays(arr : list, arr2 : list) :
i = 0;j = 0;
while i < len(arr) : # O(n)
if arr[i] > arr2[j] :
arr[i], arr2[j] = arr2[j], arr[i] # swapping the elements
arr2.sort() # O(mlog2m)
i+=1
# total complexity = (n*m)log2m
for el in arr2 :
arr.append(el)
if __name__ == '__main__' :
arr = [1,3,5,7,9]
arr2 = [2,4,6,8,10]
mergeArrays(arr, arr2)
print(arr)
|
[
"kumar.ankit383@gmail.com"
] |
kumar.ankit383@gmail.com
|
6c78fccd11b2ca769683b6527aa888e158fea647
|
d9e26e516ab3863b6e7d00c4e3cdecf1af7028eb
|
/src/oaklib/io/streaming_nl_writer.py
|
ecde169932c3e55baa59bfdfd1aef1e274f6109a
|
[
"Apache-2.0"
] |
permissive
|
INCATools/ontology-access-kit
|
2f08a64b7308e8307d1aaac2a81764e7d98b5928
|
8d2a124f7af66fe2e796f9e0ece55585438796a5
|
refs/heads/main
| 2023-08-30T14:28:57.201198
| 2023-08-29T17:40:19
| 2023-08-29T17:40:19
| 475,072,415
| 67
| 15
|
Apache-2.0
| 2023-09-07T01:06:04
| 2022-03-28T15:50:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
from dataclasses import dataclass
from linkml_runtime.utils.yamlutils import YAMLRoot
from oaklib.datamodels import obograph
from oaklib.io.streaming_writer import StreamingWriter
from oaklib.utilities.nlp.natual_language_generation import NaturalLanguageGenerator
@dataclass
class StreamingNaturalLanguageWriter(StreamingWriter):
"""
A writer that streams basic line by line reporting info
"""
natural_language_generator: NaturalLanguageGenerator = None
def emit_curie(self, curie, label=None, **kwargs):
self._ensure_init()
self.file.write(self.natural_language_generator.render_entity(curie))
self.file.write("\n")
def emit_obj(self, obj: YAMLRoot):
self._ensure_init()
if isinstance(obj, obograph.LogicalDefinitionAxiom):
self.file.write(self.natural_language_generator.render_logical_definition(obj))
self.file.write("\n")
else:
raise NotImplementedError
def _ensure_init(self):
if self.natural_language_generator is None:
self.natural_language_generator = NaturalLanguageGenerator(self.ontology_interface)
|
[
"noreply@github.com"
] |
noreply@github.com
|
d49bd6a43b3f609a7edeca36319384afb91d1e8b
|
9c91412897e8c38698b707fd68241f5f90dad8ca
|
/faculty/models.py
|
d964ebe95cf256b3df082b290e8d312c16bccd8f
|
[] |
no_license
|
zhaoz2017/cpa2019
|
ae78d6beb3f26c8d681fb7bfb1665dad34b30dc3
|
795325edc435201f2b4d2c823500dee9f55d9b96
|
refs/heads/master
| 2020-04-02T15:37:09.146904
| 2019-04-11T21:13:33
| 2019-04-11T21:13:33
| 153,498,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models import Q
# Create your models here.
class Solution(models.Model):
title = models.CharField(max_length=255)
pub_date = models.DateTimeField()
body_q = models.TextField()
body_a = models.TextField()
votes_total = models.IntegerField(default=1)
publisher = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
# def summary(self):
# return self.body[:100]
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
class Professor(models.Model):
first = models.CharField(max_length=50)
last = models.CharField(max_length=50)
email = models.EmailField(max_length=50)
website = models.CharField(max_length=50)
def __str__(self): #__unicode__(self):
return "{} {} {} {}".format(self.first, self.last, self.email, self.website)
|
[
"zhaoz2017@my.fit.edu"
] |
zhaoz2017@my.fit.edu
|
f49b8c133be1b81786886982ff14bce39ed0ed34
|
03cbffb6c8f80070704a9c3290ad053f10535a16
|
/project_utils/utilities.py
|
7f8ec7b67694f43e051399ce12909ce5e8947978
|
[] |
no_license
|
TanviSahay/Rl-for-NLP-project
|
dabc4be62dd6f8f3812ff7ac40075871d78b2839
|
496f654b0497e67b5d3c1f6d77b9457266248252
|
refs/heads/master
| 2021-08-31T07:49:19.754461
| 2017-12-20T17:27:53
| 2017-12-20T17:27:53
| 114,515,736
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,981
|
py
|
import numpy as np
import os
import matplotlib.pyplot as plt
import warnings
import pickle
from collections import defaultdict
from nltk import pos_tag, word_tokenize
warnings.simplefilter("ignore")
def dd():
return defaultdict(int)
def get_actions():
with open('./Data/vocab.pkl','rb') as f:
actions = pickle.load(f)
actions = {k:i for i,k in enumerate(actions)}
return actions
def getReward(reward_func):
if reward_func == 1:
#print('Reward will be: word-word co-occurrence')
return word_cooc_reward()
if reward_func == 2:
#print('Reward will be: pos-pos co-occurrence')
return pos_cooc_reward()
if reward_func == 3:
#print('Reward will be: product of word-word and pos-pos cooccurrence')
return word_pos_reward('prod')
if reward_func == 4:
#print('reward will be: average of word-word and pos-pos cooccurrence')
return word_pos_reward('avg')
def word_cooc_reward():
with open('./Data/word_cooccurrence.pkl','rb') as f:
return pickle.load(f)
def pos_cooc_reward():
with open('./Data/pos_cooccurrence.pkl','rb') as f:
return pickle.load(f)
def word_pos_reward(combine):
if os.path.exists('./Data/word_pos_%s'%combine):
with open('./Data/word_pos_%s'%combine,'rb') as f:
rewards = pickle.load(f)
else:
with open('./Data/pos_cooccurrence.pkl','rb') as f:
pos_cooc = pickle.load(f)
with open('./Data/word_cooccurrence.pkl','rb') as f:
word_cooc = pickle.load(f)
rewards = defaultdict(dd)
for key, val in word_cooc.items():
for word, score in val.items():
bigram = [key, word]
tagged_bigram = pos_tag(bigram)
if combine == 'prod':
rewards[key][word] = pos_cooc[tagged_bigram[0][1]][tagged_bigram[1][1]] * score
if combine == 'avg':
rewards[key][word] = (pos_cooc[tagged_bigram[0][1]][tagged_bigram[1][1]] + score) / 2
with open('./Data/word_pos_%s.pickle'%combine, 'wb') as f:
pickle.dump(rewards, f)
return rewards
#def scale(val, old_min, old_max, new_min, new_max):
# new_val = (val - old_min)/(old_max - old_min)
# return new_val
#def count(number, base, shape):
# c = np.zeros(shape=shape)
# i = c.shape[0] - 1
# while number >= base:
# remainder = number % base
# c[i] = remainder
# i -= 1
# number = number / base
# if number != 0 and number < base:
# c[i] = number
# return c
def plot(data, method, trials, NEPS,eps,alp,g):
mean = np.mean(data, axis=1)
#print mean.shape
variance = np.mean(np.square(data.T-mean).T, axis=1)
#print variance
std = np.sqrt(variance)
#print std
x = list(np.arange(0,NEPS,1))
y = list(mean)
print 'Length of x: {} length of y: {}'.format(len(x), len(y))
err = list(std)
plt.axis((0,NEPS,0,15))
plt.errorbar(x, y, yerr=err, fmt='-ro')
#plt.plot(y)
plt.xlabel('Episode')
plt.ylabel('Expected return of reward')
plt.title('%s for %d trials, epsilon: %.4f, alpha: %.2f, gamma: %.2f' % (method, trials, float(eps), float(alp), float(g)))
plt.savefig('Expected_Return_%s_%d_unclipped.jpg' % (method, trials))
plt.show()
return mean[-1]
def log(method, trials, eps, gamma, alpha, maxima=None, time=0):
if os.path.exists('log'):
with open('log','r') as f:
data = f.readlines()
data.append('method: {0}, trials: {1}, epsilon: {2}, gamma: {3}, alpha: {4}, maximum value: {5}, time taken: {6}\n'.format(method, trials, eps, gamma, alpha, maxima, time))
else:
data = 'method: {0}, trials: {1}, epsilon: {2}, gamma: {3}, alpha: {4}, maximum value: {5}, time taken: {6}\n'.format(method, trials, eps, gamma, alpha, maxima, time)
with open('log','w') as f:
for line in data:
f.write(line)
|
[
"tsahay@umass.edu"
] |
tsahay@umass.edu
|
fc02fda54534594dd3a8358ecf562fc2cbd36a7e
|
0a1716384ac3425b0f457e210e43c0a499bd66d2
|
/process_files/_old/fix_processed_names.py
|
27e83d345283a04bd753cafb4edbf2a7f9b3850a
|
[] |
no_license
|
ilbarlow/process-rig-data
|
d54d0489ad42ef92e422915d01ac43feeb62bed3
|
89fc296628eb7f9260b099ee3cb2f25680905686
|
refs/heads/master
| 2020-03-18T21:50:05.775230
| 2018-03-28T20:13:41
| 2018-03-28T20:13:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 27 16:15:39 2016
@author: worm_rig
"""
import os
import shutil
import glob
import numpy as np
import pandas as pd
import warnings
from functools import partial
if __name__ == '__main__':
output_root = '/Volumes/behavgenom_archive$/Avelino/Worm_Rig_Tests/short_movies_new/'
#'/Volumes/behavgenom_archive$/Avelino/PeterAskjaer/'
exp_name = 'Double_pick_090217'#'Mutant_worm_screening_Y32H12A.7(ok3452)_220217'
tsv_file = os.path.join(output_root, 'ExtraFiles', exp_name + '_renamed.tsv')
tab = pd.read_table(tsv_file, names=['old', 'new'])
for _, row in tab.iterrows():
parts = row['old'].split(os.sep)
delP = [int(x[2:]) for x in parts if x.startswith('PC')][0]
old_base_name = os.path.splitext(os.path.basename(row['old']))[0]
old_ch = [int(x[2:]) for x in old_base_name.split('_') if x.startswith('Ch')][0]
base_name = os.path.splitext(os.path.basename(row['new']))[0]
real_ch = 'Ch{}'.format(2*(delP-1)+old_ch)
fparts = base_name.split('_')
ff = [x.strip() if not x.startswith('Ch') else real_ch for x in fparts ]
new_base_name = '_'.join(ff)
search_str = os.path.join(output_root,'**', exp_name, base_name + '*')
fnames = glob.glob(search_str)
for bad_name in fnames:
good_name = bad_name.replace(base_name, new_base_name)
print(bad_name, good_name)
#shutil.move(bad_name, good_name)
|
[
"ajaver@MRC-8791.local"
] |
ajaver@MRC-8791.local
|
d172365081306da15a884cc5c29f601bd27ef325
|
1de6d55bf8c4d9333c9b21f9f8ee154c2aef3c7f
|
/phi/migrations/0033_auto_20180920_1225.py
|
8a10fe86ddf75f16db687871795e94e5d0be753b
|
[] |
no_license
|
FloCare/hha-backendtest
|
ad675c5da2fa23ec5d8ea58223bef28c4142483a
|
0918b932dcc5c44fae9799c05c17519abc54f7a7
|
refs/heads/master
| 2022-12-10T02:20:56.200101
| 2019-05-06T11:14:09
| 2019-05-06T11:14:09
| 184,367,994
| 0
| 0
| null | 2022-12-08T02:31:35
| 2019-05-01T04:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
# Generated by Django 2.0.6 on 2018-09-20 12:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_auth', '0013_auto_20180817_1008'),
('phi', '0032_physician_organization'),
]
operations = [
migrations.AlterField(
model_name='physician',
name='npi',
field=models.CharField(max_length=10),
),
migrations.AlterUniqueTogether(
name='physician',
unique_together={('organization', 'npi')},
),
]
|
[
"nikhil@flocare.health"
] |
nikhil@flocare.health
|
4178241c956b41e6c04cec3ba18389b1a237ab68
|
17beb9d3062db25c430acd0435953305431cbbf1
|
/binding.gyp
|
eaa8a16873c7dfa83bf3fe7dc0429b99d62a8463
|
[] |
no_license
|
hansmalherbe/node-opencv2
|
fb114157b9e60d474e17471ad737461eca4f5d62
|
d41d327fc9fd6104f1c24ec2a0fa5d835cbcb89f
|
refs/heads/master
| 2016-09-06T11:58:59.641102
| 2012-10-22T22:49:22
| 2012-10-22T22:49:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,801
|
gyp
|
{
'variables' : {
'opencv_dir': 'c:/opencv242/build/',
'boost_dir': 'c:/boost_1_49_0',
'conf': '$(ConfigurationName)',
},
'conditions': [
['OS=="win"', {
'conditions': [
['target_arch=="x64"', {
'variables': {
'opencv_libs': '<(opencv_dir)x64/vc10/',
'opencv_tbb': '<(opencv_dir)common/tbb/intel64/vc10/',
},
},{
'variables': {
'opencv_libs': '<(opencv_dir)x86/vc10/',
'opencv_tbb': '<(opencv_dir)common/tbb/ia32/vc10/',
},
}],
],
}],
],
'targets': [
{
'target_name': 'opencv2',
'sources': [
'./src/opencv2.cc',
'./src/help.cc',
'./src/mat.cc',
'./src/object_proxy.cc',
],
'msbuild_props': [
'node.vsprops'
],
'include_dirs': [ './src', '<(opencv_dir)include', '<(boost_dir)' ],
'link_settings': {
'libraries': [
'<(opencv_libs)lib/opencv_calib3d242.lib',
'<(opencv_libs)lib/opencv_contrib242.lib',
'<(opencv_libs)lib/opencv_core242.lib',
'<(opencv_libs)lib/opencv_features2d242.lib',
'<(opencv_libs)lib/opencv_flann242.lib',
'<(opencv_libs)lib/opencv_gpu242.lib',
'<(opencv_libs)lib/opencv_haartraining_engine.lib',
'<(opencv_libs)lib/opencv_highgui242.lib',
'<(opencv_libs)lib/opencv_imgproc242.lib',
'<(opencv_libs)lib/opencv_legacy242.lib',
'<(opencv_libs)lib/opencv_ml242.lib',
'<(opencv_libs)lib/opencv_nonfree242.lib',
'<(opencv_libs)lib/opencv_objdetect242.lib',
'<(opencv_libs)lib/opencv_photo242.lib',
'<(opencv_libs)lib/opencv_stitching242.lib',
'<(opencv_libs)lib/opencv_ts242.lib',
'<(opencv_libs)lib/opencv_video242.lib',
'<(opencv_libs)lib/opencv_videostab242.lib',
],
'conditions': [
['OS=="win"', {
'libraries/': [
['exclude', '\\.a$'],
],
}],
],
},
'conditions': [
['OS=="win"', {
'msvs_guid': 'FC93254D-884A-4FE7-B74F-2301D842BB78',
#'msvs_disabled_warnings': [4351, 4355, 4800],
'copies': [
{
'destination': './build/$(ConfigurationName)/',
'files': [
'<(opencv_tbb)tbb.dll',
'<(opencv_tbb)tbb_preview.dll',
'<(opencv_tbb)tbbmalloc.dll',
'<(opencv_tbb)tbbmalloc_proxy.dll',
'<(opencv_libs)bin/opencv_calib3d242.dll',
'<(opencv_libs)bin/opencv_contrib242.dll',
'<(opencv_libs)bin/opencv_core242.dll',
'<(opencv_libs)bin/opencv_features2d242.dll',
'<(opencv_libs)bin/opencv_flann242.dll',
'<(opencv_libs)bin/opencv_gpu242.dll',
'<(opencv_libs)bin/opencv_highgui242.dll',
'<(opencv_libs)bin/opencv_imgproc242.dll',
'<(opencv_libs)bin/opencv_legacy242.dll',
'<(opencv_libs)bin/opencv_ml242.dll',
'<(opencv_libs)bin/opencv_nonfree242.dll',
'<(opencv_libs)bin/opencv_objdetect242.dll',
'<(opencv_libs)bin/opencv_photo242.dll',
'<(opencv_libs)bin/opencv_stitching242.dll',
'<(opencv_libs)bin/opencv_ts242.dll',
'<(opencv_libs)bin/opencv_video242.dll',
'<(opencv_libs)bin/opencv_videostab242.dll',
],
'conditions': [
['target_arch=="x64"', {
'files': [
'<(opencv_libs)bin/opencv_ffmpeg242_64.dll',
],
}, {
'files': [
'<(opencv_libs)bin/opencv_ffmpeg242.dll',
],
}]
],
},
],
'configurations': {
'Debug': {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'vfw32.lib',
'comctl32.lib',
'<(opencv_libs)staticlib/zlib.lib',
'<(opencv_libs)staticlib/libtiff.lib',
'<(opencv_libs)staticlib/libpng.lib',
'<(opencv_libs)staticlib/libjpeg.lib',
'<(opencv_libs)staticlib/libjasper.lib'
],
},
},
},
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': '2', # /EHsc
},
'VCLinkerTool': {
'AdditionalDependencies': [
'vfw32.lib',
'comctl32.lib',
'<(opencv_libs)staticlib/zlib.lib',
'<(opencv_libs)staticlib/libtiff.lib',
'<(opencv_libs)staticlib/libpng.lib',
'<(opencv_libs)staticlib/libjpeg.lib',
'<(opencv_libs)staticlib/libjasper.lib'
],
# LinkIncremental values:
# 0 == default
# 1 == /INCREMENTAL:NO
# 2 == /INCREMENTAL
#'LinkIncremental': '1',
},
},
},
},
'defines': [
'WINDOWS_SUPPRESS_WARNINGS',
],
'include_dirs': [],
}]
]
}
]
}
|
[
"hans.malherbe@gmail.com"
] |
hans.malherbe@gmail.com
|
37448d7967ed493b56ddd9b94af1582157f26f15
|
3d016301728a4428ec466653587f0f80c4f7eb11
|
/plugin/lighthouse/metadata.py
|
ea0afcf3dc444279943f34cbf0dc8925b321eb9b
|
[
"MIT"
] |
permissive
|
MosheWagner/lighthouse
|
bcb16e24612645cdcf441011b430d6b8408b0687
|
ca1454b2680b31d882339ff56efd34b546ba908d
|
refs/heads/master
| 2020-04-26T23:50:43.001011
| 2019-03-05T09:32:30
| 2019-03-05T09:32:30
| 173,915,816
| 2
| 0
|
MIT
| 2019-03-05T09:26:59
| 2019-03-05T09:26:55
|
Python
|
UTF-8
|
Python
| false
| false
| 35,794
|
py
|
import time
import Queue
import bisect
import logging
import weakref
import threading
import collections
from lighthouse.util.misc import *
from lighthouse.util.disassembler import disassembler
logger = logging.getLogger("Lighthouse.Metadata")
#------------------------------------------------------------------------------
# Metadata
#------------------------------------------------------------------------------
#
# To aid in performance, Lighthouse lifts and indexes an in-memory limited
# representation of the disassembler's open database. This is commonly
# referred to as 'metadata' throughout this codebase.
#
# Once built, the lifted metadata cache stands completely independent of
# the disassembler. This effectively eliminates the need for Lighthouse to
# communicate with the underlying disassembler / API (which is slow) when
# mapping coverage, or doing coverage composition logic.
#
# With this model, we have been able to move the heavy director based
# coverage composition logic to python-only threads without disrupting the
# user, or IDA. (added in v0.4.0)
#
# However, there are two main caveats of this model -
#
# 1. The cached 'metadata' representation may not always be true to state
# of the database. For example, if the user defines/undefines functions,
# the metadata cache will not be aware of such changes.
#
# Lighthouse will try to update the director's metadata cache when
# applicable, but there are instances when it will be in the best
# interest of the user to manually trigger a refresh of the metadata.
#
# 2. Building the metadata comes with an upfront cost, but this cost has
# been reduced as much as possible. For example, generating metadata for
# a database with ~17k functions, ~95k nodes (basic blocks), and ~563k
# instructions takes only ~6 seconds.
#
# This will be negligible for small-medium sized databases, but may still
# be jarring for larger databases.
#
# Ultimately, this model provides us a more responsive user experience at
# the expense of the occasional inaccuracies that can be corrected by
# reasonably low cost refresh.
#
#------------------------------------------------------------------------------
# Database Metadata
#------------------------------------------------------------------------------
class DatabaseMetadata(object):
"""
Database level metadata cache.
"""
def __init__(self):
# name & imagebase of the executable this metadata is based on
self.filename = ""
self.imagebase = -1
# database metadata cache status
self.cached = False
# the cache of key database structures
self.nodes = {}
self.functions = {}
self.instructions = []
# internal members to help index & navigate the cached metadata
self._stale_lookup = False
self._name2func = {}
self._last_node = [] # HACK: blank iterable for now
self._node_addresses = []
self._function_addresses = []
# placeholder attribute for disassembler event hooks
self._rename_hooks = None
# metadata callbacks (see director for more info)
self._function_renamed_callbacks = []
# asynchronous metadata collection thread
self._refresh_worker = None
self._stop_threads = False
def terminate(self):
"""
Cleanup & terminate the metadata object.
"""
self.abort_refresh(join=True)
if self._rename_hooks:
self._rename_hooks.unhook()
#--------------------------------------------------------------------------
# Providers
#--------------------------------------------------------------------------
def get_instructions_slice(self, start_address, end_address):
"""
Get the instructions addresses that fall within a given range.
"""
index_start = bisect.bisect_left(self.instructions, start_address)
index_end = bisect.bisect_left(self.instructions, end_address)
return self.instructions[index_start:index_end]
def get_node(self, address):
"""
Get the node (basic block) metadata for a given address.
"""
assert not self._stale_lookup, "Stale metadata is unsafe to use..."
# fast path, effectively a LRU cache of 1 ;P
if address in self._last_node:
return self._last_node
#
# use the lookup lists to do a 'fuzzy' lookup of the given address,
# locating the index of the closest known node address (rounding down)
#
index = bisect.bisect_right(self._node_addresses, address) - 1
node_metadata = self.nodes.get(self._node_addresses[index], None)
#
# if the given address does not fall within the selected node (or the
# node simply does not exist), then we have no match/metadata to return
#
if not (node_metadata and address in node_metadata):
return None
#
# if the selected node metadata contains the given target address, it
# is a positive hit and we should cache this node (in last_node) for
# faster consecutive lookups
#
self._last_node = node_metadata
# return the located node_metadata
return node_metadata
def get_function(self, address):
"""
Get the function metadata for a given address.
"""
node_metadata = self.get_node(address)
if not node_metadata:
return None
return node_metadata.function
def get_function_by_name(self, function_name):
"""
Get the function metadata for a given function name.
"""
try:
return self.functions[self._name2func[function_name]]
except (IndexError, KeyError):
return None
def get_function_by_index(self, index):
"""
Get the function metadata for a given function index.
"""
try:
return self.functions[self._function_addresses[index]]
except (IndexError, KeyError):
return None
def get_function_index(self, address):
"""
Get the function index for a given address.
"""
return self._function_addresses.index(address)
def get_closest_function(self, address):
"""
Get the function metadata for the function closest to the give address.
"""
# sanity check
if not self._function_addresses:
return None
# get the closest insertion point of the given address
index = bisect.bisect_left(self._function_addresses, address)
# the given address is a min, return the first known function
if index == 0:
return self.functions[self._function_addresses[0]]
# given address is a max, return the last known function
if index == len(self._function_addresses):
return self.functions[self._function_addresses[-1]]
# select the two candidate addresses
before = self._function_addresses[index - 1]
after = self._function_addresses[index]
# return the function closest to the given address
if after - address < address - before:
return self.functions[after]
else:
return self.functions[before]
def flatten_blocks(self, basic_blocks):
"""
Flatten a list of basic blocks (address, size) to instruction addresses.
This function provides a way to convert a list of (address, size) basic
block entries into a list of individual instruction (or byte) addresses
based on the current metadata.
"""
output = []
for address, size in basic_blocks:
instructions = self.get_instructions_slice(address, address+size)
output.extend(instructions)
return output
def is_big(self):
"""
Return a bool indicating whether we think the database is 'big'.
"""
return len(self.functions) > 50000
#--------------------------------------------------------------------------
# Refresh
#--------------------------------------------------------------------------
def refresh(self, function_addresses=None, progress_callback=None):
"""
Request an asynchronous refresh of the database metadata.
TODO/FUTURE: we should make a synchronous refresh available
"""
assert self._refresh_worker == None, 'Refresh already running'
result_queue = Queue.Queue()
#
# reset the async abort/stop flag that can be used used to cancel the
# ongoing refresh task
#
self._stop_threads = False
#
# kick off an asynchronous metadata collection task
#
self._refresh_worker = threading.Thread(
target=self._async_refresh,
args=(result_queue, function_addresses, progress_callback,)
)
self._refresh_worker.start()
#
# immediately return a queue to the caller which it can use to listen
# on and wait for a refresh completion message
#
return result_queue
def abort_refresh(self, join=False):
"""
Abort an asynchronous refresh.
To guarantee an asynchronous refresh has been canceled, the caller can
optionally wait for the result_queue from refresh() to return 'None'.
Alternatively, the `join` parameter can be set to `True`, making this
function block until the refresh is canceled.
"""
#
# the refresh worker (if it exists) can be ripped away at any time.
# take a local reference to avoid a double fetch problems
#
worker = self._refresh_worker
#
# if there is no worker present or running (cleaning up?) there is
# nothing for us to abort. Simply reset the abort flag (just in case)
# and return immediately
#
if not (worker and worker.is_alive()):
self._stop_threads = False
self._refresh_worker = None
return
# signal the worker thread to stop
self._stop_threads = True
# if requested, don't return until the worker thread has stopped...
if join:
worker.join()
def _refresh_instructions(self):
"""
Refresh the list of database instructions (from function metadata).
"""
instructions = []
for function_metadata in self.functions.itervalues():
instructions.extend(function_metadata.instructions)
instructions = list(set(instructions))
instructions.sort()
# commit the updated instruction list
self.instructions = instructions
def _refresh_lookup(self):
"""
Refresh the internal fast lookup address lists.
Fast lookup lists are simply sorted address lists of function metadata,
node metadata, or possibly other forms of metadata (in the future). We
create sorted lists of metadata object addresses so that we can use them
for fast, fuzzy address lookup (eg, bisect).
c.f:
- get_node(ea)
- get_function(ea)
"""
self._last_node = []
self._name2func = { f.name: f.address for f in self.functions.itervalues() }
self._node_addresses = sorted(self.nodes.keys())
self._function_addresses = sorted(self.functions.keys())
self._stale_lookup = False
#--------------------------------------------------------------------------
# Metadata Collection
#--------------------------------------------------------------------------
@not_mainthread
def _async_refresh(self, result_queue, function_addresses, progress_callback):
"""
The main routine for the asynchronous metadata refresh worker.
TODO/FUTURE: this should be cleaned up / refactored
"""
# pause our rename listening hooks (more performant collection)
if self._rename_hooks:
self._rename_hooks.unhook()
#
# if the caller provided no function addresses to target for refresh,
# we will perform a complete metadata refresh of all database defined
# functions. let's retrieve that list from the disassembler now...
#
if not function_addresses:
function_addresses = disassembler.execute_read(
disassembler.get_function_addresses
)()
# refresh database properties that we wish to cache
self._async_refresh_properties()
# refresh the core database metadata asynchronously
completed = self._async_collect_metadata(
function_addresses,
progress_callback
)
# regenerate the instruction list from collected metadata
self._refresh_instructions()
# refresh the internal function/node fast lookup lists
self._refresh_lookup()
#
# NOTE:
#
# creating the hooks inline like this is less than ideal, but they
# they have been moved here (from the metadata constructor) to
# accomodate shortcomings of the Binary Ninja API.
#
# TODO/FUTURE/V35:
#
# it would be nice to move these back to the constructor once the
# Binary Ninja API allows us to detect BV / sessions as they are
# created, and able to load plugins on such events.
#
#----------------------------------------------------------------------
# create the disassembler hooks to listen for rename events
if not self._rename_hooks:
self._rename_hooks = disassembler.create_rename_hooks()
self._rename_hooks.renamed = self._name_changed
self._rename_hooks.metadata = weakref.proxy(self)
#----------------------------------------------------------------------
# reinstall the rename listener hooks now that the refresh is done
self._rename_hooks.hook()
# send the refresh result (good/bad) incase anyone is still listening
if completed:
self.cached = True
result_queue.put(True)
else:
result_queue.put(False)
# clean up our thread's reference as it is basically done/dead
self._refresh_worker = None
# thread exit...
return
@disassembler.execute_read
def _async_refresh_properties(self):
"""
Refresh a selection of interesting database properties.
"""
self.filename = disassembler.get_root_filename()
self.imagebase = disassembler.get_imagebase()
@not_mainthread
def _async_collect_metadata(self, function_addresses, progress_callback):
"""
Collect metadata from the underlying database (interruptable).
"""
CHUNK_SIZE = 150
completed = 0
start = time.time()
#----------------------------------------------------------------------
for addresses_chunk in chunks(function_addresses, CHUNK_SIZE):
#
# collect function metadata from the open database in groups of
# CHUNK_SIZE. collect_function_metadata() takes a list of function
# addresses and collects their metadata in a thread-safe manner
#
fresh_metadata = collect_function_metadata(addresses_chunk)
# update our database metadata cache with the new function metadata
self._update_functions(fresh_metadata)
# report incremental progress to an optional progress_callback
if progress_callback:
completed += len(addresses_chunk)
progress_callback(completed, len(function_addresses))
# if the refresh was canceled, stop collecting metadata and bail
if self._stop_threads:
return False
# sleep some so we don't choke the mainthread
time.sleep(.0015)
#----------------------------------------------------------------------
end = time.time()
logger.debug("Metadata collection took %s seconds" % (end - start))
# refresh completed normally / was not interrupted
return True
def _update_functions(self, fresh_metadata):
"""
Update stored function metadata with the given fresh metadata.
Returns a map of {address: function metadata} that has been updated.
"""
blank_function = FunctionMetadata(-1)
#
# the first step is to loop through the 'fresh' function metadata that
# has been given to us, and identify what is truly new or different
# from any existing metadata we hold.
#
for function_address, new_metadata in fresh_metadata.iteritems():
# extract the 'old' metadata from the database metadata cache
old_metadata = self.functions.get(function_address, blank_function)
#
# if the fresh metadata for this function is identical to the
# existing metadata we have collected for it, there's nothing
# else for us to do -- just ignore it.
#
if old_metadata == new_metadata:
continue
# delete nodes that explicitly no longer exist
old = old_metadata.nodes.viewkeys() - new_metadata.nodes.viewkeys()
for node_address in old:
del self.nodes[node_address]
#
# the newly collected metadata for a given function is empty, this
# indicates that the function has been deleted. we go ahead and
# remove its old function metadata from the db metadata entirely
#
if new_metadata.empty:
del self.functions[function_address]
continue
# add or overwrite the new/updated basic blocks
self.nodes.update(new_metadata.nodes)
# save the new/updated function
self.functions[function_address] = new_metadata
#
# since the node / function metadata cache has probably changed, we
# will need to refresh the internal fast lookup lists. this flag is
# only really used for debugging, and will probably be removed
# in the TODO/FUTURE collection refactor (v0.9?)
#
self._stale_lookup = True
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
@mainthread
def _name_changed(self, address, new_name, local_name=None):
"""
Handler for rename event in IDA.
TODO/FUTURE: refactor this to not be so IDA-specific
"""
# we should never care about local renames (eg, loc_40804b), ignore
if local_name or new_name.startswith("loc_"):
return 0
# get the function that this address falls within
function = self.get_function(address)
# if the address does not fall within a function (might happen?), ignore
if not function:
return 0
#
# ensure the renamed address matches the function start before
# renaming the function in our metadata cache.
#
# I am not sure when this would not be the case (globals? maybe)
# but I'd rather not find out.
#
if address != function.address:
return
# if the name isn't actually changing (misfire?) nothing to do
if new_name == function.name:
return
logger.debug("Name changing @ 0x%X" % address)
logger.debug(" Old name: %s" % function.name)
logger.debug(" New name: %s" % new_name)
# rename the function, and notify metadata listeners
#function.name = new_name
function.refresh_name()
self._notify_function_renamed()
# necessary for IDP/IDB_Hooks
return 0
#--------------------------------------------------------------------------
# Callbacks
#--------------------------------------------------------------------------
def function_renamed(self, callback):
"""
Subscribe a callback for function rename events.
"""
register_callback(self._function_renamed_callbacks, callback)
def _notify_function_renamed(self):
"""
Notify listeners of a function rename event.
"""
notify_callback(self._function_renamed_callbacks)
#------------------------------------------------------------------------------
# Function Metadata
#------------------------------------------------------------------------------
class FunctionMetadata(object):
"""
Function level metadata cache.
"""
def __init__(self, address):
# function metadata
self.address = address
self.name = None
# node metadata
self.nodes = {}
self.edges = collections.defaultdict(list)
# fixed/baked/computed metrics
self.size = 0
self.node_count = 0
self.edge_count = 0
self.instruction_count = 0
self.cyclomatic_complexity = 0
# collect metdata from the underlying database
if address != -1:
self._build_metadata()
#--------------------------------------------------------------------------
# Properties
#--------------------------------------------------------------------------
@property
def instructions(self):
"""
Return the instruction addresses in this function.
"""
return set([ea for node in self.nodes.itervalues() for ea in node.instructions])
@property
def empty(self):
"""
Return a bool indicating whether the object is populated.
"""
return len(self.nodes) == 0
#--------------------------------------------------------------------------
# Public
#--------------------------------------------------------------------------
@disassembler.execute_read
def refresh_name(self):
"""
Refresh the function name against the open database.
"""
self.name = disassembler.get_function_name_at(self.address)
#--------------------------------------------------------------------------
# Metadata Population
#--------------------------------------------------------------------------
def _build_metadata(self):
"""
Collect function metadata from the underlying database.
"""
self.name = disassembler.get_function_name_at(self.address)
self._refresh_nodes()
self._finalize()
def _refresh_nodes(self):
"""
This will be replaced with a disassembler-specific function at runtime.
NOTE: Read the 'MONKEY PATCHING' section at the end of this file.
"""
raise RuntimeError("This function should have been monkey patched...")
def _ida_refresh_nodes(self):
"""
Refresh function node metadata against an open IDA database.
"""
function_metadata = self
function_metadata.nodes = {}
# get function & flowchart object from IDA database
function = idaapi.get_func(self.address)
flowchart = idaapi.qflow_chart_t("", function, idaapi.BADADDR, idaapi.BADADDR, 0)
#
# now we will walk the flowchart for this function, collecting
# information on each of its nodes (basic blocks) and populating
# the function & node metadata objects.
#
for node_id in xrange(flowchart.size()):
node = flowchart[node_id]
# NOTE/COMPAT
if disassembler.USING_IDA7API:
node_start = node.start_ea
node_end = node.end_ea
else:
node_start = node.startEA
node_end = node.endEA
#
# the node current node appears to have a size of zero. This means
# that another flowchart / function owns this node so we can just
# ignore it...
#
if node_start == node_end:
continue
# create a new metadata object for this node
node_metadata = NodeMetadata(node_start, node_end, node_id)
#
# establish a relationship between this node (basic block) and
# this function metadata (its parent)
#
node_metadata.function = function_metadata
function_metadata.nodes[node_start] = node_metadata
# compute all of the edges between nodes in the current function
for node_metadata in function_metadata.nodes.itervalues():
edge_src = node_metadata.instructions[-1]
for edge_dst in idautils.CodeRefsFrom(edge_src, True):
if edge_dst in function_metadata.nodes:
function_metadata.edges[edge_src].append(edge_dst)
def _binja_refresh_nodes(self):
"""
Refresh function node metadata against an open Binary Ninja database.
"""
function_metadata = self
function_metadata.nodes = {}
# get the function from the Binja database
function = disassembler.bv.get_function_at(self.address)
#
# now we will walk the flowchart for this function, collecting
# information on each of its nodes (basic blocks) and populating
# the function & node metadata objects.
#
for node in function.basic_blocks:
# create a new metadata object for this node
node_metadata = NodeMetadata(node.start, node.end, node.index)
#
# establish a relationship between this node (basic block) and
# this function metadata (its parent)
#
node_metadata.function = function_metadata
function_metadata.nodes[node.start] = node_metadata
#
# enumerate the edges produced by this node (basic block) with a
# destination that falls within this function.
#
edge_src = node_metadata.instructions[-1]
for edge in node.outgoing_edges:
function_metadata.edges[edge_src].append(edge.target.start)
def _compute_complexity(self):
"""
Walk the function CFG to determine approximate cyclomatic complexity.
The purpose of this function is mostly to account for IDA's inclusion
of additional floating nodes in function flowcharts. These blocks tend
to be for exception handlers, but can manifest in various other cases.
By walking the function CFG, we can identify these 'disembodied'
blocks that have no incoming edge and ignore them in our cyclomatic
complexity calculation. Not doing so will radically throw off the
cyclomatic complexity score.
"""
confirmed_nodes = set()
confirmed_edges = {}
#
# to_walk contains a list of node addresses. we draw from this list
# one at a time, walking across all of the outgoing edges from the
# current node (node_address) to walk the function graph
#
to_walk = set([self.address])
while to_walk:
# this is the address of the node we will 'walk' from
node_address = to_walk.pop()
confirmed_nodes.add(node_address)
# now we loop through all edges that originate from this block
current_src = self.nodes[node_address].instructions[-1]
for current_dest in self.edges[current_src]:
# ignore nodes we have already visited
if current_dest in confirmed_nodes:
continue
#
# it appears that this node has not been visited yet, so we
# will want to walk its edges sometime soon to continue the
# graph exploration
#
to_walk.add(current_dest)
# update the map of confirmed (walked) edges
confirmed_edges[current_src] = self.edges.pop(current_src)
# compute the final cyclomatic complexity for the function
num_edges = sum(len(x) for x in confirmed_edges.itervalues())
num_nodes = len(confirmed_nodes)
return num_edges - num_nodes + 2
def _finalize(self):
"""
Finalize function metadata for use.
"""
self.size = sum(node.size for node in self.nodes.itervalues())
self.node_count = len(self.nodes)
self.edge_count = len(self.edges)
self.instruction_count = sum(node.instruction_count for node in self.nodes.itervalues())
self.cyclomatic_complexity = self._compute_complexity()
#--------------------------------------------------------------------------
# Operator Overloads
#--------------------------------------------------------------------------
def __eq__(self, other):
"""
Compute function metadata equality (==)
"""
result = True
result &= self.name == other.name
result &= self.size == other.size
result &= self.address == other.address
result &= self.node_count == other.node_count
result &= self.instruction_count == other.instruction_count
result &= self.nodes.viewkeys() == other.nodes.viewkeys()
return result
#------------------------------------------------------------------------------
# Node Metadata
#------------------------------------------------------------------------------
class NodeMetadata(object):
"""
Node (basic block) level metadata cache.
"""
def __init__(self, start_ea, end_ea, node_id=None):
# node metadata
self.size = end_ea - start_ea
self.address = start_ea
self.instruction_count = 0
# flowchart node_id
self.id = node_id
# parent function_metadata
self.function = None
# instruction addresses
self.instructions = []
#----------------------------------------------------------------------
# collect metadata from the underlying database
self._build_metadata()
#--------------------------------------------------------------------------
# Metadata Population
#--------------------------------------------------------------------------
def _build_metadata(self):
"""
This will be replaced with a disassembler-specific function at runtime.
NOTE: Read the 'MONKEY PATCHING' section at the end of this file.
"""
raise RuntimeError("This function should have been monkey patched...")
def _ida_build_metadata(self):
"""
Collect node metadata from the underlying database.
"""
current_address = self.address
node_end = self.address + self.size
#
# loop through the node's entire address range and count its
# instructions. Note that we are assuming that every defined
# 'head' (in IDA) is an instruction
#
while current_address < node_end:
instruction_size = idaapi.get_item_end(current_address) - current_address
self.instructions.append(current_address)
current_address += instruction_size
# save the number of instructions in this block
self.instruction_count = len(self.instructions)
def _binja_build_metadata(self):
"""
Collect node metadata from the underlying database.
"""
bv = disassembler.bv
current_address = self.address
node_end = self.address + self.size
#
# Note that we 'iterate over' the instructions using their byte length
# because it is far more performant than Binary Ninja's instruction
# generators which also produce instruction text, tokens etc...
#
while current_address < node_end:
self.instructions.append(current_address)
current_address += bv.get_instruction_length(current_address)
# save the number of instructions in this block
self.instruction_count = len(self.instructions)
#--------------------------------------------------------------------------
# Operator Overloads
#--------------------------------------------------------------------------
def __str__(self):
"""
Printable NodeMetadata.
"""
output = ""
output += "Node 0x%08X Info:\n" % self.address
output += " Address: 0x%08X\n" % self.address
output += " Size: %u\n" % self.size
output += " Instruction Count: %u\n" % self.instruction_count
output += " Id: %u\n" % self.id
output += " Function: %s\n" % self.function
output += " Instructions: %s" % self.instructions
return output
def __contains__(self, address):
"""
Overload python's 'in' keyword for this object.
This allows us to use `in` to check if an address falls within a node.
"""
if self.address <= address < self.address + self.size:
return True
return False
def __eq__(self, other):
"""
Compute node equality (==)
"""
result = True
result &= self.size == other.size
result &= self.address == other.address
result &= self.instruction_count == other.instruction_count
result &= self.function == other.function
result &= self.id == other.id
return result
#------------------------------------------------------------------------------
# Async Metadata Helpers
#------------------------------------------------------------------------------
@disassembler.execute_read
def collect_function_metadata(function_addresses):
"""
Collect function metadata for a list of addresses.
"""
return { ea: FunctionMetadata(ea) for ea in function_addresses }
@disassembler.execute_ui
def metadata_progress(completed, total):
"""
Handler for metadata collection callback, updates progress dialog.
"""
disassembler.replace_wait_box(
"Collected metadata for %u/%u Functions" % (completed, total)
)
#------------------------------------------------------------------------------
# MONKEY PATCHING
#------------------------------------------------------------------------------
#
# We use 'monkey patching' to modify the Metadata class definitions at
# runtime. Specifically, we use it to swap in metadata collection routines
# that have been carefully tailored for a given disassembler.
#
# The reason for this is that the metadata collection code is very
# disassembler-specific, and that it needs to be as performant as possible.
# Shimming metadata collection code to be disassembler agnostic is going
# to be messy and slow.
#
if disassembler.NAME == "IDA":
import idaapi
import idautils
FunctionMetadata._refresh_nodes = FunctionMetadata._ida_refresh_nodes
NodeMetadata._build_metadata = NodeMetadata._ida_build_metadata
elif disassembler.NAME == "BINJA":
import binaryninja
FunctionMetadata._refresh_nodes = FunctionMetadata._binja_refresh_nodes
NodeMetadata._build_metadata = NodeMetadata._binja_build_metadata
else:
raise NotImplementedError("DISASSEMBLER-SPECIFIC SHIM MISSING")
|
[
"markus.gaasedelen@gmail.com"
] |
markus.gaasedelen@gmail.com
|
aa27042ddeb0ddff82f1c8f4312778d7feb8da3e
|
cee65c4806593554662330368c799c14ec943454
|
/src/sqlvm-preview/azext_sqlvm_preview/vendored_sdks/sqlvirtualmachine/models/wsfc_domain_profile_py3.py
|
0d7864768ad80fab17f0ea7f8ca57ea27cec3b41
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
azclibot/azure-cli-extensions
|
d5d1a4ecdfc87fd79f5ad042fb85cdbf881897d2
|
c230646258d4b56efb7d44eb7a0230f2943da6f6
|
refs/heads/master
| 2023-08-28T03:55:02.311902
| 2019-04-04T16:05:45
| 2019-04-04T16:05:45
| 179,548,695
| 1
| 1
|
MIT
| 2021-07-28T15:26:17
| 2019-04-04T17:54:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,274
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WsfcDomainProfile(Model):
"""Active Directory account details to operate Windows Server Failover
Cluster.
:param domain_fqdn: Fully qualified name of the domain.
:type domain_fqdn: str
:param ou_path: Organizational Unit path in which the nodes and cluster
will be present.
:type ou_path: str
:param cluster_bootstrap_account: Account name used for creating cluster
(at minimum needs permissions to 'Create Computer Objects' in domain).
:type cluster_bootstrap_account: str
:param cluster_operator_account: Account name used for operating cluster
i.e. will be part of administrators group on all the participating virtual
machines in the cluster.
:type cluster_operator_account: str
:param sql_service_account: Account name under which SQL service will run
on all participating SQL virtual machines in the cluster.
:type sql_service_account: str
:param file_share_witness_path: Optional path for fileshare witness.
:type file_share_witness_path: str
:param storage_account_url: Fully qualified ARM resource id of the witness
storage account.
:type storage_account_url: str
:param storage_account_primary_key: Primary key of the witness storage
account.
:type storage_account_primary_key: str
"""
_attribute_map = {
'domain_fqdn': {'key': 'domainFqdn', 'type': 'str'},
'ou_path': {'key': 'ouPath', 'type': 'str'},
'cluster_bootstrap_account': {'key': 'clusterBootstrapAccount', 'type': 'str'},
'cluster_operator_account': {'key': 'clusterOperatorAccount', 'type': 'str'},
'sql_service_account': {'key': 'sqlServiceAccount', 'type': 'str'},
'file_share_witness_path': {'key': 'fileShareWitnessPath', 'type': 'str'},
'storage_account_url': {'key': 'storageAccountUrl', 'type': 'str'},
'storage_account_primary_key': {'key': 'storageAccountPrimaryKey', 'type': 'str'},
}
def __init__(self, *, domain_fqdn: str=None, ou_path: str=None, cluster_bootstrap_account: str=None, cluster_operator_account: str=None, sql_service_account: str=None, file_share_witness_path: str=None, storage_account_url: str=None, storage_account_primary_key: str=None, **kwargs) -> None:
super(WsfcDomainProfile, self).__init__(**kwargs)
self.domain_fqdn = domain_fqdn
self.ou_path = ou_path
self.cluster_bootstrap_account = cluster_bootstrap_account
self.cluster_operator_account = cluster_operator_account
self.sql_service_account = sql_service_account
self.file_share_witness_path = file_share_witness_path
self.storage_account_url = storage_account_url
self.storage_account_primary_key = storage_account_primary_key
|
[
"wx44@cornell.edu"
] |
wx44@cornell.edu
|
2e6d141383246bc550a0defb798fa6e4f453a724
|
cd80a83d22d8431b17b31fe6453a0bfdc20a74be
|
/stepik_tours/settings.py
|
48294db7fc020b7b4b984b8f8e741a0a1b0be641
|
[] |
no_license
|
HumanAlone/stepik_tours_week_2
|
6d59fbfd78864310830afe98c842a4c8df586614
|
67eea17cf9d6d1389e9aee65dd353e9c41b7ce0b
|
refs/heads/master
| 2023-02-28T16:13:17.071554
| 2021-02-07T18:35:50
| 2021-02-07T18:35:50
| 336,630,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,215
|
py
|
"""
Django settings for stepik_tours project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u79-$0^365(t&nqv6z*@he*(v3n7o__$xkd*68mt!9vp7fmqzv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
STATIC_ROOT = 'static'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tours',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'stepik_tours.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'tours/templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'tours.context_processors.departure_processor',
],
},
},
]
WSGI_APPLICATION = 'stepik_tours.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"humanalone@ya.ru"
] |
humanalone@ya.ru
|
b7de8d0353885bd2bdaef731db820add225c22a8
|
940eb3328eae2538d56712dbffd65dfdd63bfb5d
|
/HW3/mysite/db/urls.py
|
ebc7ea281599826db6589fa398b76fb591dc5618
|
[] |
no_license
|
HyeongRae/cloud_computing
|
a54da89b4767bf7dcf17df394d0359750ab03d33
|
8fcf6c4daaadc9be9b61abf56d22e73f08f7e453
|
refs/heads/master
| 2020-08-28T22:08:01.715887
| 2019-10-27T10:03:04
| 2019-10-27T10:03:04
| 217,835,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='db_index'),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
cb0cf218daf778a82dc0638a59378ce65eb1e44d
|
df160fecd74e06fe1d7fd276c6f84a8f39dff79c
|
/Bhakti/math variable.py
|
3b88f510309cade6983a5f92cd5a682f0fd74400
|
[] |
no_license
|
vatsalmehta-3009/MLZS-CLASS-11-PRACTICALS
|
78dfdcbe289d95618db6d1bc5aab6a24cd4cbde2
|
f9056b110b9c1e4666ae3f4ff0427664dc3d81bb
|
refs/heads/main
| 2023-08-14T09:26:51.196305
| 2021-10-18T09:00:41
| 2021-10-18T09:00:41
| 397,482,578
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31
|
py
|
import math
print (math.pi)
|
[
"vatsalmehta3009@gmail.com"
] |
vatsalmehta3009@gmail.com
|
a0cdca9df71fac33fa3fe7b6fe3ce26b877ab1dd
|
eee8425c9e4e2173d323dc1113909f4427d5a547
|
/ue4docker/test.py
|
8da5ab88dfde7ae8c81da429e7065311506ed0c3
|
[
"MIT"
] |
permissive
|
MorganRuffell/ue4-docker
|
9b98adc81b82ebb0780056961d17e5ec9bc7f646
|
664359a6c5240ebc3b3d15f5c1ecc4d456fd6cd1
|
refs/heads/master
| 2023-07-10T17:37:54.580478
| 2021-08-19T21:06:27
| 2021-08-19T21:06:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,845
|
py
|
from .infrastructure import DockerUtils, GlobalConfiguration, Logger
from container_utils import ContainerUtils, ImageUtils
import docker, os, platform, sys
def test():
# Create our logger to generate coloured output on stderr
logger = Logger(prefix='[{} test] '.format(sys.argv[0]))
# Create our Docker API client
client = docker.from_env()
# Check that an image tag has been specified
if len(sys.argv) > 1 and sys.argv[1].strip('-') not in ['h', 'help']:
# Verify that the specified container image exists
tag = sys.argv[1]
image = GlobalConfiguration.resolveTag('ue4-full:{}'.format(tag) if ':' not in tag else tag)
if DockerUtils.exists(image) == False:
logger.error('Error: the specified container image "{}" does not exist.'.format(image))
sys.exit(1)
# Use process isolation mode when testing Windows containers, since running Hyper-V containers don't currently support manipulating the filesystem
platform = ImageUtils.image_platform(client, image)
isolation = 'process' if platform == 'windows' else None
# Start a container to run our tests in, automatically stopping and removing the container when we finish
logger.action('Starting a container using the "{}" image...'.format(image), False)
container = ContainerUtils.start_for_exec(client, image, isolation=isolation)
with ContainerUtils.automatically_stop(container):
# Create the workspace directory in the container
workspaceDir = ContainerUtils.workspace_dir(container)
ContainerUtils.exec(container, ContainerUtils.shell_prefix(container) + ['mkdir ' + workspaceDir])
# Copy our test scripts into the container
testDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests')
ContainerUtils.copy_from_host(container, testDir, workspaceDir)
# Create a harness to invoke individual tests
containerPath = ContainerUtils.path(container)
pythonCommand = 'python' if ContainerUtils.container_platform(container) == 'windows' else 'python3'
def runTest(script):
logger.action('Running test "{}"...'.format(script), False)
try:
ContainerUtils.exec(container, [pythonCommand, containerPath.join(workspaceDir, script)], workdir=workspaceDir)
logger.action('Passed test "{}"'.format(script), False)
except RuntimeError as e:
logger.error('Error: test "{}" failed!'.format(script))
raise e from None
# Run each of our tests in turn
runTest('build-and-package.py')
runTest('consume-external-deps.py')
# If we've reached this point then all of the tests passed
logger.action('All tests passed.', False)
else:
# Print usage syntax
print('Usage: {} test TAG'.format(sys.argv[0]))
print('Runs tests to verify the correctness of built container images\n')
print('TAG should specify the tag of the ue4-full image to test.')
|
[
"adam@adamrehn.com"
] |
adam@adamrehn.com
|
c477af6c57995ecddcbfdc254fe373d15f3999c8
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/252/252.meeting-rooms.234346443.Runtime-Error.leetcode.py
|
92868ca8e540837d3283eb90122ea37aa2b82d4d
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504
| 2023-07-16T07:38:36
| 2023-07-16T07:38:36
| 143,352,065
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
class Solution:
def canAttendMeetings(self, intervals):
overlap = []
for interval in sorted(intervals, key=lambda x: x.start):
if overlap and overlap[-1].end > interval.start:
return False
else:
overlap.append(interval)
return True
|
[
"huangyingw@gmail.com"
] |
huangyingw@gmail.com
|
da01348ac037e5b41128bf85f4b96215b1ecc938
|
b457cefb2a577df52234e45e0d1a891f6ac48771
|
/Camera/cam.py
|
46892b319216e4e3b487b71e153bf6914363f187
|
[] |
no_license
|
zhangxingshuo/py-robot
|
257210364ff510b20b71bdb840cc6e4589c5a797
|
f9b8046f8b7538674cf9446f9b2e895af244ec72
|
refs/heads/master
| 2021-01-17T12:53:50.524193
| 2016-08-04T17:07:11
| 2016-08-04T17:07:11
| 59,252,294
| 0
| 0
| null | 2016-05-25T23:12:02
| 2016-05-20T00:24:14
|
Python
|
UTF-8
|
Python
| false
| false
| 996
|
py
|
'''
Video Camera
============
Simple video capture program.
Usage:
------
cam.py [<video source>]
Press 's' to save an image.
Press ESC to exit.
'''
import cv2
import numpy as np
from datetime import datetime
class Cam(object):
def __init__(self, src):
self.cam = cv2.VideoCapture(src)
ret, self.frame = self.cam.read()
cv2.namedWindow('Camera')
def save(self):
filename = 'cam_img/frame_' + str(datetime.now()).replace('/','-')[:19] + '.jpg'
cv2.imwrite(filename, self.frame)
def run(self):
while True:
ret, self.frame = self.cam.read()
cv2.imshow('Camera', self.frame)
k = 0xFF & cv2.waitKey(5)
if k == 27:
break
if k == ord('s'):
self.save()
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys
try:
src = sys.argv[1]
except:
src = 0
print(__doc__)
Cam(src).run()
|
[
"axzhang@hmc.edu"
] |
axzhang@hmc.edu
|
fe9a70d83e1e83d976db34782dcfc28fb9c952e2
|
688dfc8f23ebda4b6418e9b6e77727313601fcb2
|
/src/world/Landwalker.py
|
170deb8f314719343f33e209a7a724db20bb9923
|
[] |
no_license
|
loonaticx/ToonTrouble
|
f590d112b7b2db0800f4dab0c89cbf7f9ff2ff8b
|
28c85842d3d09ab5ad83d06e836577f84ed95010
|
refs/heads/master
| 2020-07-09T22:37:19.695125
| 2019-12-08T21:01:58
| 2019-12-08T21:01:58
| 204,098,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,028
|
py
|
from panda3d.core import *
from panda3d.core import CollisionTraverser
from panda3d.core import PandaNode, NodePath
from direct.controls.GravityWalker import GravityWalker
from direct.gui.DirectButton import DirectButton
from direct.gui.DirectScrolledList import DirectScrolledList
from direct.task import Task
from src.actor import ActorDict, ActorManager, LandwalkerAvatarControls
from src.gamebase import LandwalkerGlobals
from src.scenefx import EffectsManager
#borrowed the xray mod from /samples/culling/portal_culling.py
# https://www.panda3d.org/manual/?title=Common_Image_Filters
#possibly make a slider for bloom
#YO WHAT IF I MAKE A BENCHMARK PROGRAM
objectList = list()
actor = ActorManager
#filters = CommonFilters(base.win, base.cam)
graphicShaders = EffectsManager
class Landwalker():
def __init__(self):
self.onScreenDebug = onScreenDebug
pass
def loadGame(self):
# Setting up key maps and the instruction set into the scene...
LandwalkerGlobals.setKeys()
LandwalkerGlobals.setInstructions()
# Loads our world.
scene = self.loadWorld()
# Makes our local avatar.
localAvatar = actor.makeActor()
base.localAvatar = localAvatar
base.localAvatar.reparentTo(render)
# Load our buttons.
self.LoadButtons()
# Load our shaders.
#fog = loadFog()
#print(fogStats(fog))
EffectsManager.loadShaders()
#FogDensity = EffectsManager.loadFog(1)
# Floater Object (For camera)
floater = NodePath(PandaNode("floater"))
floater.reparentTo(localAvatar)
floater.setY(-10)
floater.setZ(8.5)
floater.setHpr(0, -10, 0)
# Set Camera
camera.reparentTo(floater)
wallBitmask = BitMask32(1)
floorBitmask = BitMask32(2)
base.cTrav = CollisionTraverser()
# Walk controls
walkControls = GravityWalker(legacyLifter=True)
walkControls.setWallBitMask(wallBitmask)
walkControls.setFloorBitMask(floorBitmask)
walkControls.setWalkSpeed(16.0, 24.0, 8.0, 80.0)
walkControls.initializeCollisions(base.cTrav, localAvatar, floorOffset=0.025, reach=4.0)
walkControls.setAirborneHeightFunc(LandwalkerAvatarControls.getAirborneHeight())
walkControls.enableAvatarControls()
# controlManager.add(walkControls, 'walk')
localAvatar.physControls = walkControls
localAvatar.physControls.placeOnFloor()
# Some debug stuff, should be moved later once I can toggle stuff from different files./
self.onScreenDebug.enabled = True
base.setFrameRateMeter(True)
base.taskMgr.add(LandwalkerAvatarControls.move, "moveTask")
base.taskMgr.add(self.updateOnScreenDebug, 'UpdateOSD')
# Loading our world.
def loadWorld(self):
# Loading our Scene
background = loader.loadModel('phase_4/models/neighborhoods/toontown_central.bam')
background.reparentTo(render)
background.show()
objectList.append(background)
print("Loading world")
return background
def removeWorld(scene):
scene.removeNode()
# This shouldn't exist in the future for this class.
def loadFog(self):
fog = Fog('distanceFog')
fog.setColor(0, 0, 0)
fog.setExpDensity(.07)
render.setFog(fog)
fog.setOverallHidden(False)
return fog
def fogStats(fog):
return [fog, fog.getExpDensity(), LandwalkerGlobals.fogEnabled]
# Loading our actor.
def getActor(self):
actorStartPos = self.scene.find("**/start_point").getPos()
actorBody = ActorDict.playerBody
actorBody.reparentTo(render)
actorBody.loop('neutral')
actorBody.setPos(actorStartPos + (0, 0, 1.5))
actorBody.setScale(0.3)
actorBody.setH(-180)
def ActorHead():
actorHead = loader.loadModel("custom/def_m.bam")
actorHead.reparentTo(actorBody.find('**/to_head'))
actorHead.setScale(0.20)
actorHead.setZ(0)
actorHead.setH(-180)
ActorHead()
return actorBody
# Loading onscreen buttons.
def LoadButtons(self):
Button_Up = loader.loadModel('phase_3/models/gui/quit_button.bam').find('**/QuitBtn_UP')
Button_Down = loader.loadModel('phase_3/models/gui/quit_button.bam').find('**/QuitBtn_DN')
Button_Rlvr = loader.loadModel('phase_3/models/gui/quit_button.bam').find('**/QuitBtn_RLVR')
# https://pastebin.com/agdb8260
Arrow_Up = loader.loadModel('phase_3/models/gui/nameshop_gui.bam').find('**/triangleButtonUp')
Arrow_Down = loader.loadModel('phase_3/models/gui/nameshop_gui.bam').find('**/triangleButtonDwn')
Arrow_Rlvr = loader.loadModel('phase_3/models/gui/nameshop_gui.bam').find('**/triangleButtonRllvr')
Buttons = [Button_Up, Button_Down, Button_Rlvr]
numItemsVisible = 4
itemHeight = 0.11
myScrolledList = DirectScrolledList(
decButton_pos=(0.35, 0, 0.54),
decButton_text_scale=0.04,
decButton_relief=None,
decButton_image=(Arrow_Up, Arrow_Down, Arrow_Rlvr),
incButton_pos=(0.35, 0, -0.01),
incButton_hpr=(0, 0, 180),
incButton_text_scale=0.04,
incButton_relief=None,
incButton_image=(Arrow_Up, Arrow_Down, Arrow_Rlvr),
pos=(0.74, 0, 0.4),
numItemsVisible=numItemsVisible,
forceHeight=itemHeight,
itemFrame_pos=(0.35, 0, 0.43))
modelArray = ['phase_4/models/neighborhoods/toontown_central.bam',
'phase_13/models/parties/partyGrounds.bam',
'models/world.egg.pz',
'custom/ship/ship.egg']
nameArray = ['Toontown Central', 'Party Grounds', 'Default World', 'Ship Test']
for index, name in enumerate(nameArray):
l = DirectButton(text=name, image=(Buttons), extraArgs=[modelArray[index]], command=self.spawnObject,
text_scale=0.045, text_pos=(0, -0.007, 0), relief=None)
myScrolledList.addItem(l)
# Used to spawn objects within the scene.
def spawnObject(self, modelName):
# If spawned object already exists, we're gonna need to remove it
while len(objectList) >= 1:
for world in objectList:
world.removeNode()
objectList.pop(0)
self.spawnObject = loader.loadModel(modelName)
self.spawnObject.reparentTo(render)
self.spawnObject.setPos(base.localAvatar.getPos())
objectList.append(self.spawnObject)
print("Model Name: " + repr(modelName))
print("Spawned Object: " + repr(self.spawnObject))
def toggle_osd(self):
self.OSD = not self.OSD
if self.OSD:
self.onScreenDebug.enabled = True
else:
self.onScreenDebug.enabled = False
def updateOnScreenDebug(self, task):
if(onScreenDebug.enabled):
onScreenDebug.add('Avatar Position', base.localAvatar.getPos())
onScreenDebug.add('Avatar Angle', base.localAvatar.getHpr())
onScreenDebug.add('Camera Position', base.camera.getPos())
onScreenDebug.add('Camera Angle', base.camera.getHpr())
return Task.cont
def unloadShaders(self):
if self.shadersLoaded:
self.drawnScene.hide()
self.shadersLoaded = False
def loadCartoonShaders(self):
if not self.shadersLoaded:
separation = 0.0015
cutoff = 0.35
inkGen = loader.loadShader("shaders/inkGen.sha")
self.drawnScene.setShader(inkGen)
self.drawnScene.setShaderInput("separation", LVecBase4(separation, 0, separation, 0))
self.drawnScene.setShaderInput("cutoff", LVecBase4(cutoff))
self.drawnScene.show()
shadersLoaded = True
|
[
"l.oony@aol.com"
] |
l.oony@aol.com
|
c6480612638cc68e0ac42c454540f75929f0c857
|
9c6ce4688ef9e0493ea054f185d7039e5df4638c
|
/clients/commands.py
|
5e6ed87461b6058c669f2a2f229de7954dbe4b25
|
[] |
no_license
|
k3itaro-k/CRUD
|
eb52c9f112b8d32a48f41b474691e486ad80ba58
|
328fa88b19fb2fe2105e0c9cd83f742501ae1f12
|
refs/heads/master
| 2023-06-03T22:26:14.506795
| 2021-06-19T07:00:22
| 2021-06-19T07:00:22
| 378,344,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,834
|
py
|
import click
from clients.services import ClientService
from clients.models import Client
@click.group()
def clients():
"""Manages the clients lifecycles"""
pass
@clients.command()
@click.option('-n','--name', type=str, prompt=True, help= 'The client name')
@click.option('-c','--company', type=str, prompt=True, help= 'The client company')
@click.option('-e','--email', type=str, prompt=True, help= 'The client email')
@click.option('-p','--position', type=str, prompt=True, help= 'The client position')
@click.pass_context
def create(ctx, name, company, email, position):
"""Create a new client"""
client = Client(name, company, email, position)
client_service = ClientService(ctx.obj['clients_table'])
client_service.create_client(client)
click.echo('*'*25+' Client created. '+'*'*25)
@clients.command()
@click.pass_context
def list(ctx):
"""list all clients"""
client_service = ClientService(ctx.obj['clients_table'])
clients = client_service.list_clients()
click.echo('ID | NAME | COMPANY | EMAIL | POSITION')
click.echo('*'*100)
for client in clients:
click.echo(f' {client["uid"]} | {client["name"]} | {client["company"]} | {client["email"]} | {client["position"]}')
@clients.command()
@click.argument('client_uid', type=str)
@click.pass_context
def update(ctx, client_uid):
"""update a client"""
client_service = ClientService(ctx.obj['clients_table'])
client_list = client_service.list_clients()
client = [client for client in client_list if client['uid']==client_uid]
if client:
client = _update_client_flow(Client(**client[0]))
client_service.update_client(client)
click.echo('*'*25+' Client updated. '+'*'*25)
else:
click.echo('*'*25+' Client not found. '+'*'*25)
def _update_client_flow(client):
click.echo('Leave empty if you dont want to modify the value.')
client.name = click.prompt('New name: ', type=str, default=client.name)
client.company = click.prompt('New company: ', type=str, default=client.company)
client.email = click.prompt('New email: ', type=str, default=client.email)
client.position = click.prompt('New position: ', type=str, default=client.position)
return client
@clients.command()
@click.argument('client_uid', type=str)
@click.pass_context
def delete(ctx, client_uid):
"""delete a client"""
client_service = ClientService(ctx.obj['clients_table'])
client = [client for client in client_service.list_clients() if client['uid'] == client_uid]
if client:
client_service.delete_client(client_uid)
click.echo('*'*25+' Client deleted. '+'*'*25)
else:
click.echo('*'*25+' Client not found. '+'*'*25)
all = clients
|
[
"alejandrocc42@gmail.com"
] |
alejandrocc42@gmail.com
|
dfcc4650b279dcc7d9caebfb982384bc6a05433f
|
3e34dec0f8b6a6e508a3ff107369ca791ebe470b
|
/암호해독.py
|
39deac551275a516cfc6e30776ec7090a665a7ce
|
[] |
no_license
|
Se-eun84/algorithm
|
7f8fac60e1ac8045bf7c9bc4d77ff0a93a7b4f2d
|
298841cf5460872310bc88c08ab9a53a557bdcda
|
refs/heads/main
| 2023-08-29T16:00:01.109559
| 2021-10-25T12:02:00
| 2021-10-25T12:02:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
#1001010 J
#1000101 E
#1001010 J
#1010101 U
#strip 오른쪽, 왼쪽 공백 제거
#replace() 중간 내용 교체
#ord() : 문자 -> 숫자
#chr() : 숫자 -> 문자
text = ['+ -- + - + -',
'+ --- + - +',
'+ -- + - + -',
'+ - + - + - +']
l=[]
for i in text:
print(chr(int(i.strip().replace(' ','').replace('+',"1").replace('-','0'),2)))
l.append(chr(int(i.strip().replace(' ','').replace('+',"1").replace('-','0'),2)))
''.join(l)
|
[
"noreply@github.com"
] |
noreply@github.com
|
2f0c8bb0781336a52fc86b6bd0b3292a1399d324
|
923e7fdffc52ad7d2bcb820b80312d4af7797810
|
/lab4-6/DeleteBillsOfApartamentsInRange.py
|
462aa02cdd94b8bbb7ced4af048f05136fc38d4a
|
[] |
no_license
|
boldijar/python-labs
|
727fc1d22446cca2cf2e1c19f8297c2522bafb02
|
00742b1f3c2742114bd106cb5925ce3cf3b77f2b
|
refs/heads/master
| 2021-05-30T16:28:31.815240
| 2016-01-27T03:20:49
| 2016-01-27T03:20:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
import wx
from apartament_controller import ApartamentController
from validator import IntValidator
class DeleteBillsOfApartamentsInRange(wx.Panel):
def __init__(self,parent,apartamentController,position,size):
super(DeleteBillsOfApartamentsInRange,self).__init__(parent,pos=position,size=size)
self.apartamentController = apartamentController
wx.StaticText(self, label="Apartament first number", style=wx.ALIGN_CENTRE,pos=(10,10))
self.leftNumber=wx.TextCtrl(self,pos=(10,30),size=(50,20))
wx.StaticText(self, label="Apartament second number", style=wx.ALIGN_CENTRE,pos=(10,50))
self.rightNumber=wx.TextCtrl(self,pos=(10,70),size=(50,20))
self.addButton = wx.Button(self, label='Delete apartaments bills in range', pos=(20, 100))
self.addButton.Bind(wx.EVT_BUTTON, self.OnEditBill)
def OnEditBill(self,e):
if IntValidator.valid(self.leftNumber.GetValue(),0,99) == False:
dlg = wx.MessageDialog(None, "Invalid input!", "Info", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
return
if IntValidator.valid(self.rightNumber.GetValue(),0,99) == False:
dlg = wx.MessageDialog(None, "Invalid input!", "Info", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
return
leftNumberInt = int(self.leftNumber.GetValue())
rightNumberInt = int(self.rightNumber.GetValue())
if leftNumberInt>rightNumberInt:
dlg = wx.MessageDialog(None, "Invalid input!", "Info", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
return
self.apartamentController.deleteAllBillsFromApartamentsInRange(leftNumberInt,rightNumberInt)
dlg = wx.MessageDialog(None, "Success", "Info", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
|
[
"paul.bv96@yahoo.com"
] |
paul.bv96@yahoo.com
|
32f958a88900efe7f6f34f6ab338193b5f18d780
|
7c568ca8675ee507d231dc3ddc2c26db8af81d3f
|
/app/dashboard/migrations/0002_auto_20191016_2341.py
|
acd59e0dff357ba9c8e01033f9b9af9e411aab7a
|
[
"MIT"
] |
permissive
|
pnsn/squacapi
|
ccfb458c7230fc5b0a0be7921eb6db611d8c646a
|
40d9608295daefc5e1cd83afd84ecb5b0518cc3d
|
refs/heads/main
| 2023-04-30T22:10:51.651835
| 2023-04-28T17:01:06
| 2023-04-28T17:01:06
| 176,352,115
| 7
| 0
|
MIT
| 2023-04-28T17:01:07
| 2019-03-18T19:03:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
# Generated by Django 2.2.6 on 2019-10-16 23:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dashboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='widgettype',
name='type',
field=models.CharField(max_length=255, unique=True),
),
migrations.CreateModel(
name='StatType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.CharField(blank=True, default='', max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('type', models.CharField(max_length=255, unique=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
[
"jontconnolly@gmail.com"
] |
jontconnolly@gmail.com
|
b3adc8b5d026d0d4dfb7a157e42a73a83f86f053
|
f1d18ce5bbeb91dfa4bceb7aa5a571b2064f1901
|
/reversenum.py
|
0222ba1633886fce3acb4beab6c0b43035888c0d
|
[] |
no_license
|
gopalakrishnanngk/gopal
|
66f3bcfd6dab6b2f888749286dec82e04f9131b4
|
057998e43ad4072edf45ff62040c0bdf12d48e5b
|
refs/heads/master
| 2021-09-25T01:54:45.692713
| 2018-10-16T17:27:00
| 2018-10-16T17:27:00
| 114,449,011
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45
|
py
|
number=raw_input()
a=number[::-1]
print(a)
|
[
"noreply@github.com"
] |
noreply@github.com
|
36ea9f0067d11c5dabf132183f895f4f5efea7a3
|
0f3464caf596c9dace873df8cde3b5528b99cf72
|
/mhc_parser/msa_utils.py
|
bc434fb4e42575bce13e6bca51a47e6b757d6f0b
|
[] |
no_license
|
carlomazzaferro/mhc_parser
|
6c87118a2ba510832bd0043db9252e65dd37aaf5
|
04a62bf6db1c6b9936d5dc176c2410f39671978b
|
refs/heads/master
| 2021-01-11T14:16:27.894813
| 2017-03-31T18:58:52
| 2017-03-31T18:58:52
| 81,285,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,826
|
py
|
import numpy as np
import pandas
from mhc_parser import pep_utils
from skbio import TabularMSA, Protein
import webbrowser
from urllib import request
from subprocess import Popen, PIPE
import os
class Alignment(object):
def __init__(self, fasta_file, ref_protein_id):
"""
Maniputlation of alignment data. Works in conjunction with scikit-bio's TabulatMSA and Protein modules.
:param msa_file: multiple sequence alignment file in fasta format (Clutal Omega recommended)
:param ref_protein_file: Fasta file of reference protein
"""
self.fasta = fasta_file
self.project_dir = os.path.dirname(self.fasta)
self.msa_file = self._create_and_viz_alignment()
self.msa = self.read_msa_file()
self.reference_protein_id = ref_protein_id
self.reference_protein_string = self._get_ref_prot_from_id()
self.positional_conservation = self._get_positional_conservation()
def read_msa_file(self):
msa = TabularMSA.read(self.msa_file, constructor=Protein)
msa.reassign_index(minter='id')
return msa
def create_score_df_from_scikit_bio(self, nmers):
"""
Function to generate a pandas dataframe containing information about how conserved each peptide in the
reference protein is. Conservation scores are calculated for each nmer passed.
is within a
:param nmers: list of nmers of interest
:return: dataframe with columns 'Score', 'Peptide', 'n-mer', that is the conservation score for each peptide
identified
"""
list_of_dfs = []
for nmer in nmers:
scores = []
peptides = []
for j in range(0, len(self.reference_protein_string) - nmer):
scores.append(np.mean(self.positional_conservation[j:j + nmer])) #Mean score for peptide
peptides.append(self.reference_protein_string[j:j + nmer])
df = pandas.DataFrame([scores, peptides], index=['Score', 'Peptide'])
df = df.T
df['n-mer'] = nmer
list_of_dfs.append(df)
return pandas.concat(list_of_dfs)
def visualize_alignemnt(self):
url_ = 'file:{}'.format(request.pathname2url(os.path.abspath(self.project_dir + '/alignment/' + 'MSA_easy_viewing.html')))
webbrowser.open(url_)
def _get_positional_conservation(self):
"""
Apply metric to compute conservation for all alignment positions
:return: conservation at each position, nan's replaced by zeros.
"""
positional_conservation = self.msa.conservation(metric='inverse_shannon_uncertainty',
degenerate_mode='nan', gap_mode='include')
return np.nan_to_num(positional_conservation)
def _get_ref_prot_from_id(self):
"""
Returns ref protein string from fasta
"""
prot_id, prot_seqs = pep_utils.create_separate_lists(self.msa_file)
prot_id = [prot.strip('>') for prot in prot_id]
as_tuple = list(zip(prot_id, prot_seqs))
ref_seq = None
for tpl in as_tuple:
if tpl[0] == self.reference_protein_id:
ref_seq = tpl[1]
if not ref_seq:
raise ValueError('Protein ID provided not found in fasta file')
else:
return ref_seq
def _create_and_viz_alignment(self):
out_dir = os.path.dirname(self.fasta)
if not os.path.isdir(out_dir + '/alignment'):
os.mkdir(out_dir + '/alignment')
out_align = out_dir + '/alignment' + '/MSA.fasta'
if os.path.isfile(out_align):
raise FileExistsError('Alignemnt already exists. Delete it or select other project location')
self._create_fasta_and_html(out_align)
return out_align
def _create_fasta_and_html(self, out_align):
print('clustalo', '-i', self.fasta, '--residuenumber', '-o', out_align, '--outfmt=fasta')
process = Popen(['clustalo', '-i', self.fasta, '--residuenumber', '-o', out_align, '--outfmt=fasta'],
stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
if not stderr:
print('MSA in fasta created to %s' % out_align)
self._create_html(out_align)
else:
print(stderr)
@staticmethod
def _create_html(out_dir):
html_dir = os.path.dirname(out_dir) + '/MSA_easy_viewing.html'
process = Popen(" ".join(['perl /Applications/BINF_Tools/mview-master/bin/mview', '-in', 'fasta', '-ruler', 'on', '-html', 'head', '-coloring', 'any',
out_dir, '>', html_dir]), shell=True)
stdout, stderr = process.communicate()
if not stderr:
print('MSA in html created to %s' % html_dir)
else:
print(stderr)
class AddData (object):
def __init__(self, msa_file_input, msa_file_output, scores_df, positional_conservation,
all_alleles=True, list_alleles=None, pos_cons_treshold=None):
"""
:param msa_file_input:
:param msa_file_output:
:param scores_df:
:param positional_conservation:
:param all_alleles:
:param list_alleles:
:param pos_cons_treshold:
"""
if pos_cons_treshold is None:
self.pos_cons_treshold = 0.1
else:
self.pos_cons_treshold = pos_cons_treshold
self.msa_file_input = msa_file_input
self.msa_file_output = msa_file_output
self.scores_df = scores_df
self.all_alleles = all_alleles
self.list_alleles = list_alleles
self.positional_conservation = positional_conservation
self.alleles = self._check_return_alleles(self.scores_df, self.all_alleles, self.list_alleles)
self.nmers = self._get_nmers_from_affinity_df(self.scores_df)
self.high_aa_low_cons_df = self._high_aff_low_cons_to_df(self.return_high_affinity_and_not_conserved())
def _create_html(self):
html_dir = os.path.dirname(self.msa_file_output) + '/MSA_easy_viewing.html'
process = Popen(" ".join(['mview', '-in', 'fasta', '-ruler', 'on', '-html', 'head', '-coloring', 'any',
self.msa_file_output, '>', html_dir]), shell=True)
stdout, stderr = process.communicate()
if not stderr:
print('MSA in html created to %s' % html_dir)
else:
print(stderr)
return html_dir
def visualize_alignemnt(self):
html_dir = self._create_html()
url_ = 'file:{}'.format(request.pathname2url(html_dir))
webbrowser.open(url_)
def open_files(self):
with open(self.msa_file_input) as inf, open(self.msa_file_output, 'w') as out:
self.write_conservation_scores(inf, out)
self.write_affinity_scores(out)
def write_conservation_scores(self, inf, out):
for line in inf:
line = line.replace('X', '-')
out.write(line)
out.write('>CONSERVATION_INFO\n')
for i in self.positional_conservation:
if i > self.pos_cons_treshold:
out.write('O')
else:
out.write('-')
def write_affinity_scores(self, out):
for nmer in self.nmers:
for allele in self.alleles:
to_print = self._slice_df(nmer, allele, self.scores_df)
peps = self._get_peptides(to_print)
for idx in range(0, len(peps)):
if idx > 3250:
continue
if '--' in peps[idx]:
continue
if not self._get_affinity_per_peptide(peps[idx], to_print):
continue
else:
self._write_out(nmer, allele, idx, out, peps)
def high_affinity_low_cons_df(self):
selected_df = self.scores_df.loc[(self.scores_df['Affinity Level'] == 'High') &
(self.scores_df['Score'] < self.pos_cons_treshold)]
selected_df = selected_df.loc[(selected_df['Pos'] < 3250) & (selected_df['Peptide'].str.contains('--') == False)]
return selected_df
def return_high_affinity_and_not_conserved(self):
high_aff_not_cons = []
for nmer in self.nmers:
for allele in self.alleles:
to_print = self._slice_df(nmer, allele, self.scores_df)
peps = self._get_peptides(to_print)
for idx in range(0, len(peps)):
mean_cons = self._get_mean_pos_cons_per_pep(nmer, idx)
if self._get_affinity_per_peptide(peps[idx], to_print):
if mean_cons < self.pos_cons_treshold:
print (mean_cons)
high_aff_not_cons.append([idx, peps[idx]])
return high_aff_not_cons
@staticmethod
def _high_aff_low_cons_to_df(list_of_lists):
return pandas.DataFrame(list_of_lists, columns=['Peptide Position', 'Peptide'])
def _get_mean_pos_cons_per_pep(self, nmer, index):
initial_aminoa_acid = index*nmer
endind_amino_acid = (index+1)*nmer
return np.mean(self.positional_conservation[initial_aminoa_acid:endind_amino_acid])
@staticmethod
def _write_out(nmer, allele, idx, out, peps):
out.write('\n>High_Affinity_Loc|n-mer=%i|allele=%s\n' % (nmer, allele))
out.write('-' * idx)
out.write(peps[idx])
out.write('-' * (len(peps) - idx - 1))
@staticmethod
def _get_affinity_per_peptide(pep, df):
aff_per_pep = df.loc[df['Peptide'] == pep]
if len(aff_per_pep) > 1:
return False
if list(aff_per_pep['Affinity Level'].values)[0] == 'High':
return True
else:
return False
@staticmethod
def _slice_df(nmer, allele, df):
to_print = df.loc[(df['n-mer'] == nmer) & (df['Allele'] == allele)]
to_print['Peptide'] = to_print['Peptide'].str.replace('X', '-')
return to_print
@staticmethod
def _get_peptides(df):
return list(df['Peptide'].values)
@staticmethod
def _check_return_alleles(scores_df, all_alleles, list_alleles):
if all_alleles:
alls = list(scores_df.Allele.unique())
else:
alls = list_alleles
if (all_alleles is False) & (list_alleles is None):
raise ValueError('No allele provided')
return alls
@staticmethod
def _get_nmers_from_affinity_df(scores_df):
return list(scores_df['n-mer'].unique())
"""
class PyhloTree(object):
def __init__(self):
self.msa_file
"""
|
[
"carlo.mazzaferro@gmail.com"
] |
carlo.mazzaferro@gmail.com
|
4ba5f1f05c063992f8dda481db06849e4d26d27d
|
350d89a0e1f7b8f4a0f90d11e9be12a639996d0d
|
/react/compiler.py
|
ae80a548eeca0a093c0a2ce941cf6a894fcf847b
|
[] |
no_license
|
rizkivmaster/joernalia
|
709220118c6c3726146021505a801d8a01d15b80
|
758e17bb88463f1d2c8750ff5eb7bdd59c7e7eef
|
refs/heads/master
| 2021-01-10T05:14:38.902812
| 2016-01-27T12:53:23
| 2016-01-27T12:53:23
| 50,297,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
def compile(filenames, target):
alljsx=''
for filename in filenames:
lines=open(filename, 'r').readlines()
for line in lines:
alljsx+=line
from react import jsx
transformer = jsx.JSXTransformer()
js = transformer.transform_string(alljsx)
open(target, 'w').write(js)
print('all written')
|
[
"rrangkuti@traveloka.com"
] |
rrangkuti@traveloka.com
|
a4ab47fc05aa9c7990c605ef9911fd6a95175d23
|
f1869b0709ed8ad27625c1dc299a79595929e72e
|
/portal/app/common/enum_field_handler.py
|
ab710c4c57f00e3740e7a7843ae4c7cf02435a4b
|
[] |
no_license
|
powernet-project/powernet
|
57b26b4734297d868f635ab0929ef9c845beb2aa
|
75700848dbd05f141f481b40cdbc2d4b629b98c5
|
refs/heads/master
| 2022-06-26T08:03:56.967304
| 2021-04-13T06:31:06
| 2021-04-13T06:31:06
| 74,173,526
| 2
| 1
| null | 2022-06-21T21:31:02
| 2016-11-18T23:07:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
from enumfields.fields import EnumFieldMixin
from rest_framework.fields import ChoiceField
from django.utils.translation import ugettext_lazy as _
class EnumField(ChoiceField):
default_error_messages = {
'invalid': _("No matching enum type.")
}
def __init__(self, **kwargs):
self.enum_type = kwargs.pop("enum_type")
kwargs.pop("choices", None)
super(EnumField, self).__init__(self.enum_type.choices(), **kwargs)
def to_internal_value(self, data):
for choice in self.enum_type:
if choice.name == data or choice.value == data:
return choice
self.fail('invalid')
def to_representation(self, value):
if not value:
return None
return value.name
class EnumFieldSerializerMixin(object):
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(EnumFieldSerializerMixin, self).build_standard_field(field_name, model_field)
if field_class == ChoiceField and isinstance(model_field, EnumFieldMixin):
field_class = EnumField
field_kwargs['enum_type'] = model_field.enum
return field_class, field_kwargs
|
[
"jonathan@rhumbix.com"
] |
jonathan@rhumbix.com
|
fac5e4f2113c176254285486cf83496ad8294af2
|
587f90dd34f72dbf8c616edcd42a2ee69a945ad5
|
/solution.py
|
6d158a0182174041f17d2c8582bde3f02165414c
|
[] |
no_license
|
UtkarshMishra04/python_challenge
|
7d60d0ac68af9f626d71bfe6826113aad31cc011
|
eaf04caa8c8011b6cdbe6396318ce25b41263489
|
refs/heads/master
| 2023-03-20T04:52:41.992318
| 2021-03-16T08:54:42
| 2021-03-16T08:54:42
| 348,275,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from game_class import Game
if __name__ == '__main__':
env = Game()
env.start_game()
|
[
"utkarsh75477@gmail.com"
] |
utkarsh75477@gmail.com
|
3bb4b250c9e481e8342d3d85a655fadd62014d8a
|
82c7adb0bfaa667c50ac7b336bb815863b378fa9
|
/finace/items.py
|
60984524386545327a13568ee270fe67c087fc4d
|
[
"Apache-2.0"
] |
permissive
|
pythonyhd/finace
|
c8a7dca65dfe33cabcb90630d8791d3a5b942bc9
|
614d98ad92e1bbaa6cf7dc1d6dfaba4f24431688
|
refs/heads/master
| 2022-11-30T17:53:40.947747
| 2020-08-14T03:47:26
| 2020-08-14T03:47:26
| 287,253,978
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class FinaceItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"18353626676@163.com"
] |
18353626676@163.com
|
af3394311c1d32ffcc9e695c0984fe28f230ec2f
|
e999f2229d8e5831fe0dd732134a54d358872cfb
|
/scripts/classicalAB.py
|
6df0529a63ee2d7d992ed8917992766f55d744fc
|
[] |
no_license
|
Blvisse/ABtesting
|
b177673563557397d197444d6ddbc370b7d4c585
|
ef0f98f54273eefa40ca190178eb860770cc3c15
|
refs/heads/main
| 2023-06-24T07:55:59.788502
| 2021-07-25T12:46:35
| 2021-07-25T12:46:35
| 387,365,291
| 0
| 0
| null | 2021-07-25T12:46:35
| 2021-07-19T06:43:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,441
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as ss
import logging
logging.basicConfig(filename='../applogs/classical.log', filemode='w', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',level=logging.DEBUG)
class classicalAB:
def __init__(self,data):
self.data=data
def convertData(self):
#creating distinctive groups for exposed and control groups
exposed=self.data[self.data['experiment']=='exposed']
control=self.data[self.data['experiment']=='control']
print("The number of users in each experiment is as follows \n")
logging.debug("Calulating distribution of data")
print("The number of exposed users {} \n".format(len(exposed)))
print("The number of control users {} \n".format(len(control)))
#calculating positive engagments
positiveEngagmentExposed=exposed[exposed['yes']==1]
positiveEngagmentControl=control[control['yes']==1]
logging.debug("Calculating positive interactions")
print("Those with a positive interaction with the ad \n ")
print("From the exposed group {} \n".format(len(positiveEngagmentExposed)))
print("From the control group {} \n".format(len(positiveEngagmentControl)))
noPositiveExposed=len(positiveEngagmentExposed)
noPositiveControl=len(positiveEngagmentControl)
logging.debug("Calculating conversion rate")
probPosExposed,probPosControl=noPositiveExposed/len(exposed),noPositiveControl/len(control)
print("The conversion rate is \n")
print("Exposed {} \n".format(probPosExposed))
print("Control {} \n ".format(probPosControl))
print("The lift from the experiment is {} ".format(probPosExposed-probPosControl))
summary=self.data.pivot_table(values='yes',index='experiment',aggfunc=np.sum)
return exposed,control,noPositiveExposed,noPositiveControl,probPosExposed,probPosControl,summary
def compareSamples(self):
probExposed,probControl=self.convertData()
exposed=self.data[self.data['experiment']=='exposed']
control=self.data[self.data['experiment']=='control']
probControl* len(exposed)
positiveEngagmentExposed=exposed[exposed['yes']==1]
positiveEngagmentControl=control[control['yes']==1]
ss.binomial()
|
[
"blaisepke@gmail.com"
] |
blaisepke@gmail.com
|
148ea8e659b1f395932dd56bb4319bd9d6022474
|
9ec58308459dc95405d1a32fcf8fae7f687a207b
|
/test/test_k_bank.py
|
71dc290f6f4630d2eaa7649866a90201a40f7e18
|
[
"MIT"
] |
permissive
|
ivanlyon/exercises
|
067aed812486dbd7a3d7de6e47a692c8b9383163
|
0792976ae2acb85187b26a52812f9ebdd119b5e8
|
refs/heads/master
| 2021-05-24T04:17:29.012329
| 2021-05-11T17:26:50
| 2021-05-11T17:26:50
| 65,584,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
import io
import unittest
from unittest.mock import patch
from kattis import k_bank
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input_1(self):
'''Run and assert problem statement sample 1 input and output.'''
inputs = []
inputs.append('4 4')
inputs.append('1000 1')
inputs.append('2000 2')
inputs.append('500 2')
inputs.append('1200 0')
inputs = '\n'.join(inputs) + '\n'
outputs = '4200\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_bank.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
def test_sample_input_2(self):
'''Run and assert problem statement sample 2 input and output.'''
inputs = []
inputs.append('3 4')
inputs.append('1000 0')
inputs.append('2000 1')
inputs.append('500 1')
inputs = '\n'.join(inputs) + '\n'
outputs = '3000\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_bank.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
|
[
"roblyon00@gmail.com"
] |
roblyon00@gmail.com
|
c2f07a0caa76746377059d4cbb4ef5b3cd643a2c
|
71c58e0449aabb550ed69b72d368e90017575631
|
/grades.py
|
7bedd765d1a88b720821faa861626eeeb5b4fc84
|
[
"Apache-2.0"
] |
permissive
|
ChangedNameTo/MPDiscordBot
|
7e6534d2d1b6260faa955c3733abdd3ec4553d50
|
7d2de57c1b25166bfdf08a9d362fba680a4f76f1
|
refs/heads/master
| 2022-12-30T18:04:19.751534
| 2020-10-23T14:26:55
| 2020-10-23T14:26:55
| 298,614,171
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
from discord.ext import commands
french = ['2-3', '3-4', '4', '4+', '5a', '5b', '6a', '6a+', '6b', '6b+', '6c', '6c+', '7a', '7a+', '7b', '7b+', '7c', '7c+', '8a', '8a+', '8b', '8b+', '8c', '8c+', '9a', '9a+', '9b', '9b+', '9c']
uk = ['HVD', 'MS', 'S', 'VS', 'HVS', 'E1 5a/HVS 5b', 'E1 5b', 'E2 5c', 'E3 5c/6a', 'E3 6a', 'E4 6a', 'E4 6b/E5 6a', 'E5 6b', 'E5 6c/E6 6b', 'E6 6b', 'E6 6b/6c', 'E6 6c/E7 6c', 'E7 7a', 'E7 7a/E8 6c', 'E8 6c', 'E8 7a/E9 7a', 'E9 7b/E10 7a', 'E10 7a', 'E10 7b', 'E10 7c/E11 7a', 'E11 7b', 'fuck off mate', 'get out u fuckin nonce', 'oi you got a loicense for that grade?']
yds = ['5.2-3', '5.4-5', '5.6', '5.7', '5.8', '5.9', '5.10a', '5.10b', '5.10c', '5.10d', '5.11a', '5.11b', '5.11c/d', '5.12a', '5.12b', '5.12c', '5.12d', '5.13a', '5.13b', '5.13c', '5.13d', '5.14a', '5.14b', '5.14c', '5.14d', '5.15a', '5.15b', '5.15c', '5.15d']
hueco = ['VB', 'VB', 'VB', 'VB', 'VB', 'V0-', 'V0', 'V0+', 'V1', 'V2', 'V3', '', 'V4', '', 'V5', '', 'V6', 'V7', 'V8', '', 'V9', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15', 'V16', 'V17']
font = ['3', '3', '3', '3', '3', '4-', '4', '4+', '5', '5+', '6A', '6A+', '6B', '6B+', '6C', '6C+', '7A', '7A+', '7B', '7B+', '7C', '7C+', '8A', '8A+', '8B', '8B+', '8C', '8C+', '9A']
def convert_grade(source, destination, grade):
source_scale = get_scales(source)
dest_scale = get_scales(destination)
if grade in source_scale:
original = source_scale.index(grade)
return dest_scale[original]
else:
raise ValueException('Not a valid scale')
def get_scales(system):
return {
'french':french,
'sport':french,
'french sport':french,
'fr':french,
'france':french,
'eu':french,
'euro':french,
'francia':french,
'uk':uk,
'british':uk,
'british tech':uk,
'brit tech':uk,
'british trad':uk,
'gb':uk,
'uk tech':uk,
'yds':yds,
'yosemite':yds,
'us':yds,
'hueco':hueco,
'v':hueco,
'vermin':hueco,
'font':font,
'fontainebleau':font
}[system]
|
[
"ChangedNameTo@users.noreply.github.com"
] |
ChangedNameTo@users.noreply.github.com
|
ddcf8c6e14ca3be28eb4cda3f448ff4a21d6cd8d
|
1e436ee39f4a063d13fc5c3c07eca80dcd830b70
|
/build/lib/app.py
|
8b51808d32c28ab713120ad8710d065476532dd8
|
[] |
no_license
|
Mineria/Jaccard-similarity
|
5f8c72a833b01f9a39aaa184f1cbd83a91c95860
|
f3ede7794d388e59ed48aaa7eb5905b2f8117685
|
refs/heads/master
| 2021-01-10T16:25:43.242005
| 2016-03-15T13:10:29
| 2016-03-15T13:10:29
| 53,945,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,706
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import math
from random import randint
import api as api
config = json.load(open('config.json')) # Loading configuration from file
rows = config['clients'] # Number of clientes
columns = config['products'] # Number of products max
categories = config['categories'] # list with all the possible values for a product
categories_num = len(categories)
max_products_result = 10
def main():
total_clients = rows
total_products = columns
clients = api.init_clients(rows, categories_num, columns)
products = api.init_products(total_products, categories)
matrix = api.init_random_matrix(total_clients, total_products, clients, categories_num)
products_related = api.get_related_products(matrix, rows, columns)
print matrix[0]
for index_user in range(0, 20):
print "Recommendations for user %d" % index_user
total_displayed_products = 0
api.define_profile(clients[index_user]) # print informatio nabout the user
recommendations = api.get_user_recommendations(index_user, products_related, matrix, rows, columns)
for r in recommendations:
if total_displayed_products >= max_products_result:
break
product_name = r[0]
product_distance = r[1]
accuracy = product_distance * 100
if accuracy < 60:
pass # don't recommend products with less than 50% of accuracy
else:
print "Product_id(" + str(product_name) + ") - Accuracy: " + str(int(product_distance * 100)) + "% | " + str(products[product_name])
total_displayed_products += 1
#get_user_preferred_category() # returns a list of the categories the user prefer (based on the probabilities for each category)
if total_displayed_products == 0:
print "¡Hola!. De momento no tienes productos recomendados"
print "¿Qué te parece si le echas un vistazo a nuestra tienda?"
print "Cuanto más compres, más productos únicos vas a encontrar ;)"
print "-----------"
#
# for user_index in range(0, total_clients):
# print "---\n\nRecommendations for user %d are: " % user_index
# print api.define_profile(clients[0])
# print "\n"
# user_recommendations = api.get_user_recommendations(user_index, products_related, matrix, rows, columns)
# print user_recommendations
# total_products_displayed = 0
#
# for r in user_recommendations:
# if total_products_displayed >= max_products_result:
# break
# product_name = r[0]
# product_distance = r[1]
# accuracy = product_distance * 100
# #print product_name
# if (accuracy < 50):
# pass # don't recommend products with less than 50% of accuracy
# else:
# total_products_displayed += 1
# print "Product: " + str(product_name) + ". Accuracy " + str(product_distance * 100) + "%"
# print "Type of product " + str(products[product_name])
#
# #get_user_preferred_category() # returns a list of the categories the user prefer (based on the probabilities for each category)
#
# if total_products_displayed == 0:
# print "¡Hola!. De momento no tienes productos recomendados"
# print "¿Qué te parece si le echas un vistazo a nuestra tienda?"
# print "Cuanto más compres, más productos únicos vas a encontrar ;)"
#
#
#
#
# from mynewmodule import hola
#hola.hola()
main()
|
[
"me@jgferreiro.com"
] |
me@jgferreiro.com
|
9088845ee4cd9fc4f784727bc6f020bc4213b6a6
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/Detector/tags/V00-00-05/SConscript
|
d6fb3976c08526bf2e9adb925905a3b3a1b85635
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,454
|
#--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package Detector
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
#
# For the standard SIT packages which build libraries, applications,
# and Python modules it is usually sufficient to call
# standardSConscript() function which defines rules for all
# above targets. Many standard packages do not need any special options,
# but those which need can modify standardSConscript() behavior using
# a number of arguments, here is a complete list:
#
# LIBS - list of additional libraries needed by this package
# LIBPATH - list of directories for additional libraries
# BINS - dictionary of executables and their corresponding source files
# TESTS - dictionary of test applications and their corresponding source files
# SCRIPTS - list of scripts in app/ directory
# UTESTS - names of the unit tests to run, if not given then all tests are unit tests
# PYEXTMOD - name of the Python extension module, package name used by default
# CCFLAGS - additional flags passed to C/C++ compilers
# NEED_QT - set to True to enable Qt support
#
#
#standardSConscript()
standardSConscript(PYEXTMOD="detector_ext")
#, DOCGEN="doxy-all psana-modules-doxy")
|
[
"dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
|
5a60e0394e9f9480b97481c167aa7af809b7d4c2
|
281d50a81837793ec9d563ed1fa9caf9af354d16
|
/Zbirka2 - zadatak28, strana3 - prirodan broj k - pecurke.py
|
dbdb10a2d551188acd3c076d9b47c9874c53b971
|
[] |
no_license
|
AdnanRedzic/Uvod-u-programiranje
|
d095e6f1393ad3d27525cf8f957f45bad3c97dfc
|
1c6c259409f7622a7ee857cb5e333cbb43067e59
|
refs/heads/main
| 2023-08-23T09:49:20.811929
| 2021-10-26T06:28:02
| 2021-10-26T06:28:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
"""
Za prirodan broj k, štampati frazu „Na izletu smo ubrali k pecuraka“, gdje završetak rijeci
„pecurka“ prilagodite broju k. Npr. 101 pecurku, 1204 pecurke, 506 pecuraka
"""
broj_pecuraka = int(input('Unesite broj pecuraka:'))
def mijenjanje_rijeci_pecurka_u_odnosu_na_unijeti_broj(broj_pecuraka):
if broj_pecuraka%10 == 1:
print('Na izletu smo ubrali', broj_pecuraka,'pecurku')
elif broj_pecuraka%10 > 1 and broj_pecuraka%10 < 5:
print('Na izletu smo ubrali', broj_pecuraka,'pecurke')
else:
print('Na izletu smo ubrali', broj_pecuraka,'pecuraka')
print(mijenjanje_rijeci_pecurka_u_odnosu_na_unijeti_broj(broj_pecuraka))
|
[
"noreply@github.com"
] |
noreply@github.com
|
218c8af1d22be553515a68a82499c5e24d1fc27f
|
f690b0a68e51e29a87840a4db01842fdf410b30d
|
/dependency_parser.py
|
9c06d22765dfdbf767728f16f52b4cb8c0f9c5fe
|
[] |
no_license
|
nyutal/nlp02_dependency_parser
|
396f8884aec8a03a20d5968176e17b715bdd71d5
|
bf9333b8ba91ce2e8a23ee1504697844514682f3
|
refs/heads/master
| 2021-01-11T16:45:22.052408
| 2017-01-29T22:10:16
| 2017-01-30T09:15:18
| 79,666,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,869
|
py
|
import os
import time
from basic_features.feature import *
from basic_features.unigrams import *
from basic_features.bigrams import *
from basic_features.complex import *
import perceptron as pr
from conf import Conf
from dataParser import *
def train(fv: FeatureVec, trainer: pr.Perceptron, train_corpus: Corpus, out_file):
out_file.write('start training at ' + time.asctime())
weights = trainer.train(train_corpus, fv, Conf.train_niter)
out_file.write('finish training at ' + time.asctime())
return weights
def comp(dp: DataParser, fv: FeatureVec, trainer: pr.Perceptron):
comp_corpus = dp.parse(Conf.comp_file_name, Conf.comp_max_samples, True)
weights = np.asarray(list(map(float, [line.strip() for line in open(Conf.weights_src_comp)])))
trainer.predict(comp_corpus, fv, weights)
comp_out_file = open(Conf.comp_output_file_name, 'w')
for s in comp_corpus.get_sentences():
for k in s.words[1:]:
comp_out_file.write(str(k.counter) + '\t' + str(k.token) + '\t' + '_' + '\t' + str(
k.pos) + '\t' + '_' + '\t' + '_' + '\t' + str(
k.head) + '\t' + '_' + '\t' + '_' + '\t' + '_' + '\n') # tabs[0], tabs[1], tabs[3], tabs[6]
comp_out_file.write('\n')
comp_out_file.close()
def test_from_train(dp: DataParser, fv: FeatureVec, trainer: pr.Perceptron, weights, out_file):
if Conf.test_file_name is None: return
test_corpus = dp.parse(Conf.test_file_name, Conf.test_max_samples)
out_file.write('start testing weights from train at ' + time.asctime())
accuracy = trainer.test(test_corpus, fv, weights)
out_file.write(', finish testing at ' + time.asctime() + ', ')
out_file.write('accuracy=' + str(accuracy) + "\n")
def test_from_path(dp: DataParser, fv: FeatureVec, trainer: pr.Perceptron, out_file):
if Conf.test_file_name is None: return
test_corpus = dp.parse(Conf.test_file_name, Conf.test_max_samples)
if os.path.isdir(Conf.weights_src):
files = os.listdir(Conf.weights_src)
wlist = []
iter = 1
print('start multiple iteration tests:' + Conf.test_name + ' at ' + time.asctime())
while True:
curr = [ f for f in files if 'weights_' + str(iter) + '_' in f]
if len(curr) == 0: break
src = curr[0]
weights = np.asarray(list(map(float, [line.strip() for line in open(Conf.weights_src + src)])))
out_file.write('start testing weights from ' + src + ' at ' + time.asctime())
accuracy = trainer.test(test_corpus, fv, weights)
out_file.write(', finish testing at ' + time.asctime() + ', ')
out_file.write('accuracy=' + str(accuracy) + "\n")
wlist.append(str(iter) + ', ' + str(accuracy))
print('test iteration ' + str(iter) + ', accuracy=' + str(accuracy) + ' time: ' + time.asctime())
iter += 1
print(wlist)
out_acc_file = open(Conf.weights_src + Conf.test_name + '_accuracy_data.txt', 'w')
for l in wlist:
out_acc_file.write(l)
out_acc_file.close()
# weights = np.asarray(list(map(float, [line.strip() for line in open(Conf.weights_src)])))
# out_file.write('start testing weights from ' + Conf.weights_src+ ' at ' + time.asctime())
# accuracy = trainer.test(test_corpus, fv, weights)
# out_file.write(', finish testing at ' + time.asctime() + ', ')
# out_file.write('accuracy=' + str(accuracy) + "\n")
else:
weights = np.asarray(list(map(float, [line.strip() for line in open(Conf.weights_src)])))
out_file.write('start testing weights from ' + Conf.weights_src + ' at ' + time.asctime())
accuracy = trainer.test(test_corpus, fv, weights)
out_file.write(', finish testing at ' + time.asctime() + ', ')
out_file.write('accuracy=' + str(accuracy) + "\n")
def main():
out_file = open(Conf.output_file_name, 'w')
dp = DataParser()
train_corpus = dp.parse(Conf.train_file_name, Conf.train_max_samples)
fv = FeatureVec()
add_unigrams(fv)
add_bigrams(fv)
if Conf.is_complex:
add_complex(fv)
fv.generate_features(train_corpus)
out_file.write(Conf.get_conf_str() + "\n")
out_file.write(str(fv.get_feature_gen_count()) + "\n")
trainer = pr.Perceptron()
if Conf.is_competition:
comp(dp, fv, trainer)
elif Conf.weights_src is None:
weights = train(fv, trainer, train_corpus, out_file)
out_weight_file = open(Conf.output_weight_file_name, 'w')
for i in weights:
out_weight_file.write("%s\n" % i)
out_weight_file.close()
test_from_train(dp, fv, trainer, weights, out_file)
else:
test_from_path(dp, fv, trainer, out_file)
out_file.close()
if __name__ == '__main__':
main()
|
[
"nyutal@yahoo-inc.com"
] |
nyutal@yahoo-inc.com
|
f15d181e2a3f37e31f85d1871156a11d42b83881
|
5cb77252081eec8c700eb294f4d674c88b23bf49
|
/gitlab-backup.py
|
4ae4ea108d136e153d27945e92f1f400161e11a2
|
[] |
no_license
|
joyceqiao/gitlab-backup
|
15fb3f05b2cc2093521f474d7d94b74ebfb7cef9
|
43798e1703002df19dda003165dd842aaed14632
|
refs/heads/master
| 2021-01-17T21:15:48.756706
| 2016-04-08T11:10:54
| 2016-04-08T11:10:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,004
|
py
|
# -*- coding: utf-8 -*-
import os
import logging
import gitlab
from settings import GIT_SETTINGS
from settings import MAIL_SETTINGS
from settings import LOG_SETTINGS
from settings import MAIL_NOTIFY_ENABLE
from custome_logging import BufferingSMTPHandler
from custome_logging import ConsoleHandler
def get_gitlab_instance():
gitlab_url = GIT_SETTINGS.get('gitlab_url')
private_token = GIT_SETTINGS.get('private_token')
gitlab_server = gitlab.Gitlab(gitlab_url, private_token=private_token)
gitlab_server.auth()
return gitlab_server
def record_log_with_level(logger, output):
if output.strip().startswith("fatal") or output.strip().startswith("error"):
logger.error(output.strip())
else:
logger.info(output.strip())
def backup_git_repo(logger):
# backup git repo by paging
page = 1
while True:
backup_git_by_page(page, logger)
page += 1
def backup_git_by_page(page, logger):
git = get_gitlab_instance()
projects = git.projects.all(page=page, per_page=100)
git_data_path = GIT_SETTINGS.get('git_data_path')
if 0 == len(projects):
logger.info("All projects backup completed !")
exit(0)
else:
logger.info("There are %s projects on page %s." % (len(projects), page))
try:
for project in projects:
git_repo_path = os.path.join(git_data_path, project.path_with_namespace + ".git")
logger.debug("begin to backup git repo %s !" % project.path_with_namespace)
# if the project has been cloned,then exec git fetch command,else exec git clone command.
if os.path.exists(git_repo_path):
os.chdir(git_repo_path)
for output in os.popen("git fetch 2>&1"):
record_log_with_level(logger, output)
else:
for output in os.popen("git clone --mirror %s %s 2>&1" % (project.http_url_to_repo, git_repo_path)):
record_log_with_level(logger, output)
except:
logger.exception('Got exception on logger handler:')
raise
logger.info("The projects of page %s backup completed !" % page)
def main():
# get log level from settings
log_level = LOG_SETTINGS.get('level')
# setup logger and handler
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
logger.addHandler(ConsoleHandler())
if MAIL_NOTIFY_ENABLE:
mailhost = MAIL_SETTINGS.get('mailhost')
mail_username = MAIL_SETTINGS.get('username')
mail_password = MAIL_SETTINGS.get('password')
fromaddr = MAIL_SETTINGS.get('fromaddr')
toaddrs = MAIL_SETTINGS.get('toaddrs')
subject = MAIL_SETTINGS.get('subject')
logger.addHandler(BufferingSMTPHandler(mailhost, fromaddr, toaddrs, subject, mail_username, mail_password, 10000))
# backup git repo
backup_git_repo(logger)
if __name__ == "__main__":
main()
|
[
"977675308@qq.com"
] |
977675308@qq.com
|
ba8c4775490031f4b1abd9541e76e7d99773e96c
|
44845df9198ae8c80fabecb6ed3ae6a44e43f38c
|
/modo/admin.py
|
4aa582f42f92bbc0b441d3019c6b6fb02550a96f
|
[] |
no_license
|
CarlosSanz81/cima
|
570da404bddd0a813a025163a9e94676b9d0b4a9
|
3ad9b37af4a2d8a5789915208afffec7b6af3c0e
|
refs/heads/master
| 2021-01-23T08:00:04.964713
| 2017-03-28T14:33:09
| 2017-03-28T14:33:09
| 72,184,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
from django.contrib import admin
from .models import Modo
@admin.register(Modo)
class AdminModo(admin.ModelAdmin):
list_display = ('nombre',)
|
[
"carlossanzgarcia81@gmail.com"
] |
carlossanzgarcia81@gmail.com
|
9fd2adff33eb37163fba31027204557321194233
|
6320fef2ea7376c2b35f97f1a5af004e90f09098
|
/1-2주차 실습(복습)/venv/Lib/site-packages/pygments/formatters/irc.py
|
49f8b3d13114e627e86ef8bdd693496bd155fd7f
|
[] |
no_license
|
Dplo1514/ploaistudy
|
7aa08d7f71653748a9e32dcc09ee8f6cec0aaed9
|
e35e42b1e5f0c90cc1e2a59993a1ef73d8872d0c
|
refs/heads/master
| 2023-09-03T00:45:55.601651
| 2021-10-24T12:19:38
| 2021-10-24T12:19:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,026
|
py
|
"""
pygments.formatters.irc
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for IRC output
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.util import get_choice_opt
__all__ = ['IRCFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
IRC_COLORS = {
Token: ('', ''),
Whitespace: ('gray', 'brightblack'),
Comment: ('gray', 'brightblack'),
Comment.Preproc: ('cyan', 'brightcyan'),
Keyword: ('blue', 'brightblue'),
Keyword.Type: ('cyan', 'brightcyan'),
Operator.Word: ('magenta', 'brightcyan'),
Name.Builtin: ('cyan', 'brightcyan'),
Name.Function: ('green', 'brightgreen'),
Name.Namespace: ('_cyan_', '_brightcyan_'),
Name.Class: ('_green_', '_brightgreen_'),
Name.Exception: ('cyan', 'brightcyan'),
Name.Decorator: ('brightblack', 'gray'),
Name.Variable: ('red', 'brightred'),
Name.Constant: ('red', 'brightred'),
Name.Attribute: ('cyan', 'brightcyan'),
Name.Tag: ('brightblue', 'brightblue'),
String: ('yellow', 'yellow'),
Number: ('blue', 'brightblue'),
Generic.Deleted: ('brightred', 'brightred'),
Generic.Inserted: ('green', 'brightgreen'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
Generic.Error: ('brightred', 'brightred'),
Error: ('_brightred_', '_brightred_'),
}
IRC_COLOR_MAP = {
'white': 0,
'black': 1,
'blue': 2,
'brightgreen': 3,
'brightred': 4,
'yellow': 5,
'magenta': 6,
'orange': 7,
'green': 7, #compat w/ ansi
'brightyellow': 8,
'lightgreen': 9,
'brightcyan': 9, # compat w/ ansi
'cyan': 10,
'lightblue': 11,
'red': 11, # compat w/ ansi
'brightblue': 12,
'brightmagenta': 13,
'brightblack': 14,
'gray': 15,
}
def ircformat(color, text):
if len(color) < 1:
return text
add = sub = ''
if '_' in color: # italic
add += '\x1D'
sub = '\x1D' + sub
color = color.strip('_')
if '*' in color: # bold
add += '\x02'
sub = '\x02' + sub
color = color.strip('*')
# underline (\x1F) not supported
# backgrounds (\x03FF,BB) not supported
if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
sub = '\x03' + sub
return add + text + sub
return '<'+add+'>'+text+'</'+sub+'>'
class IRCFormatter(Formatter):
r"""
Format tokens with IRC color sequences
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
`linenos`
Set to ``True`` to have line numbers in the output as well
(default: ``False`` = no line numbers).
"""
name = 'IRC'
aliases = ['irc', 'IRC']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
self.linenos = options.get('linenos', False)
self._lineno = 0
def _write_lineno(self, outfile):
self._lineno += 1
outfile.write("\n%04d: " % self._lineno)
def _format_unencoded_with_lineno(self, tokensource, outfile):
self._write_lineno(outfile)
for ttype, value in tokensource:
if value.endswith("\n"):
self._write_lineno(outfile)
value = value[:-1]
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
self._write_lineno(outfile)
if line:
outfile.write(ircformat(color, line[:-1]))
if spl[-1]:
outfile.write(ircformat(color, spl[-1]))
else:
outfile.write(value)
outfile.write("\n")
def format_unencoded(self, tokensource, outfile):
if self.linenos:
self._format_unencoded_with_lineno(tokensource, outfile)
return
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ircformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ircformat(color, spl[-1]))
else:
outfile.write(value)
|
[
"dladlsgur3334@gmail.com"
] |
dladlsgur3334@gmail.com
|
b531600002bc42640cd2caa1c95dd69689267dae
|
e153f0d5b97c9b5706856e47187968ded1ec3b0a
|
/client_code/old_code/PublishTest.py
|
047e3995f50675ffa1762380f56dc3f1564696cf
|
[] |
no_license
|
msynth/artist_app
|
e1ea7b7401b31c2d574b7153aebb0da20d350972
|
06edf4d44e518067e5a7b9d656f214b797722e63
|
refs/heads/master
| 2021-01-01T20:06:42.116314
| 2017-08-11T15:36:25
| 2017-08-11T15:36:25
| 98,764,193
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
# PubNub imports
from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
# MIDI and Music-related imports
import mido
# Time imports for capturing roundtrip delay
# Verbose printing if DEBUG is true
DEBUG = False
# Define Channel name
channel_name = 'sensor_data'
# Standard PubNub object configuration under V4 API
pnconfig = PNConfiguration()
pnconfig.publish_key = 'pub-c-ff1da703-9b2a-41df-bdd4-96e21bbfb0b8'
pnconfig.subscribe_key = 'sub-c-d1024ca8-74bb-11e7-8153-0619f8945a4f'
pubnub = PubNub(pnconfig)
# New V4 Python API requires a callback
def publish_callback(result, status):
print(result)
pass # Do nothing
# Handle PNPublishResult and PNStatus
print("Entering main loop. Press Control-C to exit.")
with mido.open_input('Midi Fighter Twister') as inport:
print ("Succesfully connected to MIDI FIGHTER TWISTER")
for message in inport:
# Only consider note_on and note_off messages, filter out control change messaeges
if message.type == "control_change":
# Data to be transmitted. Parse "message" list into constituent parts
data = {
'type': message.type,
'channel': message.channel,
'control': message.control,
'value': message.value
}
if DEBUG:
print ("Sending data: ", data)
# Publish to PubNub channel
pubnub.publish().channel(channel_name).message(data).async(publish_callback)
|
[
"hanoi@lamtharn-hantrakul.sfb.lyft-corp.net"
] |
hanoi@lamtharn-hantrakul.sfb.lyft-corp.net
|
5b80a2ef686bef03895fc623fe22bb41d632eb86
|
f2ca96d4e9319f1df17f7b6853fe6f832fd25b23
|
/main.py
|
c06f48af229311f883e5c010bcd9dc02d168db48
|
[] |
no_license
|
chinarahul04/heroku_practi
|
0b2da0b5b662023fdc012f6339c39f32a3052e33
|
4107d1cd11234ad5cd6c7212653ecdcb4a53e50b
|
refs/heads/main
| 2023-06-05T22:19:11.520882
| 2021-06-25T05:25:11
| 2021-06-25T05:25:11
| 380,130,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
from flask import Flask
app=flask(__name__)
@app.route('/', method=['GET','POST'])
def index():
return "hy this rahul web"
if __name__=="__main__":
app.run()
|
[
"bandaruchinarahul04@gmail.com"
] |
bandaruchinarahul04@gmail.com
|
195a19e8ab62566d58ec241180b4cbe050d87f27
|
f8dd9d621cfd3703df9f206cf8bd4b815ca91f6f
|
/.ycm_extra_conf.py
|
7f3cd021725cc9750d4cac52a214e8b405dfd291
|
[
"Apache-2.0"
] |
permissive
|
ezchi/virtio
|
f0937dc7bd39ad57032566f49bcb6e5c4caf7539
|
dd975d96dfdaf176a54ceafc239501a96dbed571
|
refs/heads/master
| 2020-03-20T03:28:41.961704
| 2018-01-23T22:10:28
| 2018-01-23T22:10:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,401
|
py
|
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Weverything',
'-Werror',
'-Wno-padded',
'-Wno-c++98-compat',
'-Wno-c++98-compat-pedantic',
'-Wno-global-constructors',
'-Wno-exit-time-destructors',
'-Wno-covered-switch-default',
'-fexceptions',
'-std=c++11',
'-xc++',
'-Iinclude',
'-Ilogic/include',
'-isystem/usr/local/systemc/2.3.1/include',
'-isystem/usr/local/share/verilator/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags }
|
[
"tymoteusz.blazejczyk.pl@gmail.com"
] |
tymoteusz.blazejczyk.pl@gmail.com
|
5da193ab8f0e2efa5b0645b1029e0314fd56b029
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_011/ch92_2019_10_02_17_54_14_425785.py
|
043154a806fa8650cc4d1a71882bef7df3c5440f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
def simplifica_dict(dicionario):
lista = []
for chave in dicionario:
if chave not in lista:
lista.append(chave)
for valor in dicionario[chave]:
if dicionario[chave] not in lista:
lista.append(dicionario[chave])
return lista
|
[
"you@example.com"
] |
you@example.com
|
dcdfd17496925a85400ab2e195a3c8e50d5401e6
|
d7f486eebaa164bf3274c843e1932c7eef596e5e
|
/importer/facebook.py
|
352a80e06ffca4048160d7b028cf173373aa9667
|
[
"MIT"
] |
permissive
|
Galaxyvintage/journal-1
|
aafe107645a6dde038b0010496c041ac635e966d
|
f666a3b38f0eeb2cc1f5576e0668f174bf1cbd8d
|
refs/heads/master
| 2020-03-20T09:15:09.269993
| 2018-07-05T16:31:17
| 2018-07-05T16:31:17
| 137,332,462
| 0
| 0
| null | 2018-06-14T08:54:38
| 2018-06-14T08:54:37
| null |
UTF-8
|
Python
| false
| false
| 7,091
|
py
|
import events
from database import db
import json
import datetime
import os
def load_to_json(filename):
json_data = open(filename).read()
return json.loads(json_data)
def read_app_posts(directory):
data = load_to_json(directory + "apps/posts_from_apps.json")
for post in data["app_posts"]:
attachment_data = post["attachments"][0]["data"][0]["external_context"]
time = datetime.datetime.fromtimestamp(post["timestamp"])
message = attachment_data["name"]
title = post["title"]
app_name = "unknown app"
if "via" in title:
app_name = title[title.index("via") + 4 : -1]
kvps = {"message": message, "title": title, "app": app_name}
if attachment_data.has_key("url"):
kvps["url"] = attachment_data["url"]
events.add("Facebook post via " + app_name + ": " + message, time, ["facebook", "post", "app"], kvps)
def read_app_installs(directory):
data = load_to_json(directory + "apps/installed_apps.json")
for item in data["installed_apps"]:
events.add("Added Facebook app " + item["name"] + ".", datetime.datetime.fromtimestamp(item["time_added"]), ["facebook", "app"], {"app": item["name"]})
def read_comments(directory):
data = load_to_json(directory + "comments/comments.json")
for comment in data["comments"]:
time = datetime.datetime.fromtimestamp(comment["timestamp"])
message = comment["data"][0]["comment"]["comment"]
events.add("Facebook: " + comment["title"], time, ["facebook", "comment"], {"message": message})
def read_events(directory):
data = load_to_json(directory + "events/event_responses.json")
for event in data["event_responses"]["events_joined"]:
time = datetime.datetime.fromtimestamp(event["start_timestamp"])
name = event["name"]
events.add("Participated in Facebook event: " + name, time, ["facebook", "event"], {"name": name})
data = load_to_json(directory + "events/your_events.json")
for event in data["your_events"]:
time = datetime.datetime.fromtimestamp(event["start_timestamp"])
name = event["name"]
location = event["place"]["name"]
events.add("Hosted Facebook event: " + name, time, ["facebook", "event"], {"name": name, "location": location, "message": event["description"]})
def read_friends(directory):
data = load_to_json(directory + "friends/friends_added.json")
for friend in data["friends"]:
time = datetime.datetime.fromtimestamp(friend["timestamp"])
name = friend["name"]
events.add("Added Facebook friend " + name + ".", time, ["facebook", "friend"], {"name": name})
def create_conversation_event(title, message_count, time, participants, history, first):
kvps = {"participants": participants, "message": history}
if first:
events.add(
"Started a Facebook conversation with " + title + " (" + str(message_count) + " message" + (
"s" if message_count > 1 else "") + ").",
time, ["facebook", "message"], kvps)
else:
events.add(
"Exchanged " + str(message_count) + " Facebook message" + (
"s" if message_count > 1 else "") + " with " + title + ".",
time, ["facebook", "message"], kvps)
def read_messages(directory):
message_directory = directory + "messages/"
for conversation in [os.path.join(message_directory, name) for name in os.listdir(message_directory) if os.path.isdir(os.path.join(message_directory, name)) and name != "stickers_used"]:
data = load_to_json(conversation + "/message.json")
if not data.has_key("title"):
continue
title = data["title"]
participants = [title]
if data.has_key("participants"):
participants = data["participants"]
messages = data["messages"]
session_start_time = None
last_message_time = None
history = ""
message_count = 0
session_count = 0
for message in reversed(messages):
if message.has_key("content"):
message_time = datetime.datetime.fromtimestamp(message["timestamp"])
if session_start_time is None:
session_start_time = message_time
elif (message_time - last_message_time).total_seconds() > 4 * 60 * 60:
create_conversation_event(title, message_count, session_start_time, ", ".join(participants), history, session_count == 0)
session_start_time = message_time
message_count = 0
session_count += 1
history = ""
last_message_time = message_time
message_count += 1
history += message["sender_name"] + ": " + message["content"] + "\n"
if message.has_key("photos") and not message["sender_name"] in participants:
events.add("Sent " + (str(len(message["photos"])) + " images" if len(message["photos"]) > 1 else "an image") + " to " + title + ".",
datetime.datetime.fromtimestamp(message["timestamp"]),
["facebook", "message", "image"], kvps={"participants": ", ".join(participants)}, images=[directory + photo["uri"] for photo in message["photos"]])
if message.has_key("photos") and message["sender_name"] in participants:
events.add("Received " + (str(len(message["photos"])) + " images" if len(
message["photos"]) > 1 else "an image") + " from " + message["sender_name"] + ".",
datetime.datetime.fromtimestamp(message["timestamp"]),
["facebook", "message", "image"], kvps={"participants": ", ".join(participants)},
images=[directory + photo["uri"] for photo in message["photos"]])
create_conversation_event(title, message_count, session_start_time, ", ".join(participants), history, session_count == 0)
def read_photos(directory):
photo_directory = directory + "photos/album/"
for album_file in [os.path.join(photo_directory, name) for name in os.listdir(photo_directory)]:
data = load_to_json(album_file)
album_name = data["name"]
for photo in data["photos"]:
file = directory + photo["uri"]
metadata = photo["media_metadata"]["photo_metadata"]
time = datetime.datetime.fromtimestamp(metadata["taken_timestamp"]) if metadata.has_key("taken_timestamp") else datetime.datetime.fromtimestamp(metadata["modified_timestamp"])
tags = ["facebook", "photo"]
kvps = {}
if metadata.has_key("camera_make") and metadata.has_key("camera_model"):
camera = metadata["camera_make"] + " " + metadata["camera_model"]
tags.append(camera)
kvps["camera"] = camera
events.add("Added photo to Facebook album " + album_name + ".",
time,
tags,
kvps,
hash=file,
latitude=(metadata["latitude"] if metadata.has_key("latitude") else None),
longitude=(metadata["longitude"] if metadata.has_key("longitude") else None),
images=[file])
def import_facebook_data(directory = "data/facebook/"):
with db.atomic():
print "Reading Facebook app posts..."
read_app_posts(directory)
read_app_installs(directory)
print "Reading Facebook comments..."
read_comments(directory)
print "Reading Facebook events..."
read_events(directory)
print "Reading Facebook friends..."
read_friends(directory)
print "Reading Facebook messages..."
read_messages(directory)
print "Reading Facebook photos..."
read_photos(directory)
if __name__ == "__main__":
import_facebook_data()
|
[
"mail@marian42.de"
] |
mail@marian42.de
|
d905ee37aa6ecea6a752fbc54249897a44a54d0e
|
66e6360325b781ed0791868765f1fd8a6303726f
|
/TB2009/WorkDirectory/5223 All Charges/ExportCharge.py
|
0256e8dcc77eb233c47742a482097e9b389b68a6
|
[] |
no_license
|
alintulu/FHead2011PhysicsProject
|
c969639b212d569198d8fce2f424ce866dcfa881
|
2568633d349810574354ad61b0abab24a40e510e
|
refs/heads/master
| 2022-04-28T14:19:30.534282
| 2020-04-23T17:17:32
| 2020-04-23T17:17:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,613
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PrintCharges")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(50000))
process.source = cms.Source("HcalTBSource",
fileNames = cms.untracked.vstring('file:/tmp/chenyi/HTB_.root'),
streams = cms.untracked.vstring('HCAL_Trigger','HCAL_SlowData','HCAL_QADCTDC','HCAL_DCC021','Chunk699')
)
process.hcal_db_producer = cms.ESProducer("HcalDbProducer",
dump = cms.untracked.vstring(''),
file = cms.untracked.string('')
)
process.es_hardcode = cms.ESSource("HcalHardcodeCalibrations",
toGet = cms.untracked.vstring('GainWidths','PedestalWidths','QIEData','ChannelQuality','ZSThresholds','RespCorrs')
)
process.es_ascii = cms.ESSource("HcalTextCalibrations",
input = cms.VPSet(
cms.PSet(
object = cms.string('ElectronicsMap'),
file = cms.FileInPath('emap_TB2009_A.txt')
),
cms.PSet(
object = cms.string('Pedestals'),
file = cms.FileInPath('pedestals_TB2009_.txt')
),
cms.PSet(
object = cms.string('Gains'),
file = cms.FileInPath('gains_TB2009_LMIP_newpedestal.txt')
)
)
)
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.tbUnpacker = cms.EDFilter("HcalTBObjectUnpacker",
IncludeUnmatchedHits = cms.untracked.bool(False),
HcalTDCFED = cms.untracked.int32(8),
HcalQADCFED = cms.untracked.int32(8),
HcalSlowDataFED = cms.untracked.int32(3),
HcalTriggerFED = cms.untracked.int32(1),
HcalVLSBFED = cms.untracked.int32(699),
ConfigurationFile = cms.untracked.string('configQADCTDC_TB2009.txt')
)
process.hcalDigis = cms.EDFilter("HcalRawToDigi",
UnpackZDC = cms.untracked.bool(True),
FilterDataQuality = cms.bool(True),
ExceptionEmptyData = cms.untracked.bool(True),
InputLabel = cms.InputTag("source"),
ComplainEmptyData = cms.untracked.bool(False),
UnpackCalib = cms.untracked.bool(False),
firstSample = cms.int32(0),
lastSample = cms.int32(9),
FEDs = cms.untracked.vint32(21),
HcalFirstFED = cms.untracked.int32(21)
)
process.load("RecoLocalCalo.HcalRecProducers.HcalSimpleReconstructor_hbhe_cfi")
process.hbhereco.firstSample = 5
process.hbhereco.samplesToAdd = 4
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound',
'TooManyProducts',
'TooFewProducts')
)
process.triggerfilter = cms.EDFilter("TriggerFilter",
allowBeamTrigger = cms.untracked.bool(True),
allowOutOfSpillPedestalTrigger = cms.untracked.bool(False),
allowOthers = cms.untracked.bool(False)
)
process.oneparticle = cms.EDFilter("SingleTowerParticleFilter",
particleNumber = cms.untracked.int32(1)
)
process.muonveto = cms.EDFilter("MuonVetoFilter")
process.export = cms.EDAnalyzer("ExportChargeAnalyzer",
normalModule = cms.untracked.string('hbhereco')
)
process.vlsbinfo = cms.EDProducer("VLSBInformationProducer",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
baselineSamples = cms.untracked.uint32(2),
useMotherBoard0 = cms.untracked.bool(True),
useMotherBoard1 = cms.untracked.bool(True),
useMotherBoard2 = cms.untracked.bool(False),
useMotherBoard3 = cms.untracked.bool(True),
usePedestalMean = cms.untracked.bool(False),
mip = cms.untracked.string('MIP_EarlyRejection_Median.txt'),
adcMap = cms.untracked.string('FinalAdcMapping_All.txt'),
beamEnergy = cms.untracked.double()
)
process.vlsbreco = cms.EDProducer("HcalTBVLSBReconstructor",
minSample = cms.untracked.uint32(0),
maxSample = cms.untracked.uint32(31),
mipFileName = cms.untracked.string("MIP_EarlyRejection_Median.txt"),
adcMapFileName = cms.untracked.string("FinalAdcMapping_All.txt")
)
process.energydistribution = cms.EDAnalyzer("FillRHEnergyDistributionAnalyzer",
vlsbModule = cms.untracked.string("vlsbreco"),
normalModule = cms.untracked.string("hbhereco"),
output = cms.untracked.string("EnergyDistribution_ABC_.root")
)
process.timecut = cms.EDFilter("HighestSampleTimeFilter",
minimum = cms.untracked.double(7.5),
threshold = cms.untracked.double(100)
)
process.hitcut = cms.EDFilter("HitXFilter",
maximum = cms.untracked.double(-5)
)
process.mincut = cms.EDFilter("RHTotalEnergyCut",
minimum = cms.untracked.double(),
vlsbModule = cms.untracked.string("vlsbreco"),
normalModule = cms.untracked.string("hbhereco")
)
process.maxcut = cms.EDFilter("RHTotalEnergyCut",
minimum = cms.untracked.double(),
vlsbModule = cms.untracked.string("vlsbreco"),
normalModule = cms.untracked.string("hbhereco")
)
process.merge = cms.EDProducer("CombineCollectionProducer",
vlsbModule = cms.untracked.string("vlsbreco"),
normalModule = cms.untracked.string("hbhereco")
# interCalibration = cms.untracked.string("InterCalibration_Secondary.txt")
)
process.export = cms.EDAnalyzer("CExportChargeAnalyzer",
moduleName = cms.untracked.string('merge'),
simplified = cms.untracked.bool(True),
exportVlsb = cms.untracked.bool(True)
)
process.runinfo = cms.EDProducer("RunInformationProducer",
beamEnergy = cms.untracked.double()
)
process.p = cms.Path(
process.tbUnpacker *
process.vlsbinfo *
process.runinfo *
process.vlsbreco *
process.hcalDigis *
process.hbhereco *
process.triggerfilter *
process.oneparticle *
process.muonveto *
process.timecut *
process.hitcut *
process.mincut *
~process.maxcut *
process.merge *
process.export
)
|
[
"yichen@positron01.hep.caltech.edu"
] |
yichen@positron01.hep.caltech.edu
|
808f3c9b4270aa88ba057ffff60dbf1a55d19ad3
|
c3f6c2f9a2e35ede54f48979770c8b42fd390089
|
/submodules/python-stats/cross_correlation_algs.py
|
6fe7d4712a50bef4b3796fe47e0518cddae4e868
|
[] |
no_license
|
VideoMem/CRT_filter
|
4469fa34dc19e1fa096bdef609629759cc8ed11a
|
ebe81f50bc2402f1a17cac405a8e8c6984483d07
|
refs/heads/master
| 2023-01-01T22:14:06.734071
| 2020-10-27T18:52:54
| 2020-10-27T18:52:54
| 247,828,511
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,321
|
py
|
from scipy.io import wavfile as wav
from scipy.signal import butter, filtfilt, resample
import pandas as pd
import numpy as np
#matplotlib inline
import matplotlib.pyplot as plt
#import seaborn as sns
import scipy.stats as stats
def log3_2( data ):
return np.log(data) / np.log ( 1.5 )
def log_compress ( data ):
data_max = np.max(np.abs(data))
data_norm = data / (data_max * 1.2)
return log3_2( ( data_norm + 2 ) / 2)
def loop_compress( data, level ):
loop = data
for i in range(level):
loop = log_compress( loop )
return loop
def downsample(data, srate, newrate):
newshape = round(data.size * newrate / srate)
if srate != newrate:
return resample(data, newshape)
else:
return data
def rms(data):
audiodata = data.astype(np.float64)
rms = np.sqrt(audiodata**2)
return rms.reshape(data.shape)
def mono_mix(data):
audiodata = data.astype(np.float64)
return audiodata.sum(axis=1) / 0xFFFF
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = filtfilt(b, a, data)
return y
def load_wave( filename ):
rate, samples = wav.read( filename )
print(f"Sample Rate: {rate}")
channels = samples.shape[1]
print(f"Number of channels = {channels}")
length = samples.shape[0] / rate
print(f"length = {length}s")
if channels > 1:
mono = mono_mix(samples)
else:
mono = samples
return rate, mono
def hash_rms( data ):
rmsv = rms(data)
rmslp = butter_bandpass_filter( rmsv, lowcut=lowest, highcut=mid_lo, fs=rate, order=2)
return log_compress(rmslp)
def pandas_pearson_r( df ):
overall_pearson_r = df.corr().iloc[0,1]
print(f"Pandas computed Pearson r: {overall_pearson_r}")
return overall_pearson_r
def pearson( df ):
overall_pearson_r = pandas_pearson_r(df)
r, p = stats.pearsonr( df.dropna()['original'], df.dropna()['transform'] )
print(f"Scipy computed Pearson r: {r} and p-value: {p}")
f,ax=plt.subplots(figsize=(14,3))
df.rolling(window=30,center=True).median().plot(ax=ax)
ax.set(xlabel='Sample',ylabel='Correlation',title=f"Overall Pearson r = {np.round(overall_pearson_r,2)}")
#plt.show()
def threshold_sync( data, level ):
i = 0
for sample in data:
if np.abs( sample ) > level: return i
i+=1
def crosscorr(datax, datay, lag=0, wrap=False):
""" Lag-N cross correlation.
Shifted data filled with NaNs
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
if wrap:
shiftedy = datay.shift(lag)
shiftedy.iloc[:lag] = datay.iloc[-lag:].values
return datax.corr(shiftedy)
else:
return datax.corr(datay.shift(lag))
def slice_part( data, start, end, margin ):
if start > margin:
if data.size - end > margin:
return data[start - margin: end + margin ]
else:
return data[start - margin: end ]
else:
if data.size - end > margin:
return data[start: end + margin ]
else:
return data[start: end ]
def crosscorr_offset( d1, d2, downrate, seconds ):
rs = [ crosscorr( d1 , d2, lag ) for lag in range( -int(seconds*downrate), int(seconds*downrate+1) ) ]
return rs, np.ceil( len(rs) / 2 ) - np.argmax(rs)
def crosscorr_plot( rs, offset ):
f , ax = plt.subplots( figsize=( 14, 3 ) )
ax.plot(rs)
ax.axvline(np.ceil(len(rs)/2),color='k',linestyle='--',label='Center')
ax.axvline(np.argmax(rs),color='r',linestyle='--',label='Peak synchrony')
ax.set(title=f'Offset = {offset} frames\nS1 leads <> S2 leads',ylim=[.1,.31],xlim=[0,301], xlabel='Offset',ylabel='Pearson r')
ax.set_xticks([0, 50, 100, 151, 201, 251, 301])
ax.set_xticklabels([-150, -100, -50, 0, 50, 100, 150]);
plt.legend()
def to_dataframe( d0, d1, org_ini, org_end, cpy_ini, cpy_end, rate, downrate ):
original = downsample( slice_part( d0, org_ini, org_end, 1024 ), rate, downrate)
transform = downsample( slice_part( d1, cpy_ini, cpy_end, 1024 ), rate, downrate)
p_original = original[:transform.size]
df = pd.DataFrame({ 'original':p_original, 'transform':transform })
return df
def pearson_slicer( df, start, step ):
pearson_r = 1
while np.abs(pearson_r) > 0.05:
slice = df[ start: start + step ]
pearson_r = pandas_pearson_r( slice )
start += step
return int( start - step ), pearson_r
def pearson_filter( df ):
bits = int(np.log(df.size) / np.log( 2 ))
print( f'bits {bits}')
newstart = 0
for i in range(1, bits):
step = int( df.size / 2**i )
if newstart - step > 0:
start = newstart - step
else:
start = newstart
print( f'start {start}, step {step}' )
newstart, pearson_r = pearson_slicer( df, start, step )
if np.abs(pearson_r) < 0.05: break
return int( newstart )
def gain_range( original, transform, start, end, divisor ):
error_f = []
for gain in range( start, end ):
error_f.append( np.mean( rms( original - transform * gain / divisor ) ) )
return error_f
def gain_min( error, start, end ):
min_error = np.min( error )
id = 0
for gain in range( start, end ):
if error[id] == min_error:
return gain
id+=1
return None
def autogain( original, transform ):
error_f = gain_range( original, transform, 2, 18, 10 )
tens = gain_min( error_f, 2, 18 )
print( f'10: min error at gain:{ tens }')
error_f = gain_range( original, transform, (tens - 1) * 10, (tens + 1) * 10, 100 )
hundreds = gain_min( error_f, (tens - 1) * 10, (tens + 1) * 10 )
print( f'100: min error at gain:{ hundreds }')
error_f = gain_range( original, transform, (hundreds - 1) * 10, (hundreds + 1) * 10, 1000 )
thousands = gain_min( error_f, (hundreds - 1) * 10, (hundreds + 1) * 10 )
print( f'1000: min error at gain:{ thousands }')
return thousands / 1000
rate, mono = load_wave( 'sample00.wav' )
none, copy = load_wave( 'correlated.wav' )
lowest= 5
mid_lo = 80
mid_hi = 3000
highest = rate /2 -1
lowband = butter_bandpass_filter( mono, lowcut=lowest, highcut=mid_lo, fs=rate, order=2)
lowbcpy = butter_bandpass_filter( copy, lowcut=lowest, highcut=mid_lo, fs=rate, order=2)
#midband = butter_bandpass_filter( mono, lowcut=mid_lo, highcut=mid_hi, fs=rate, order=3)
#higband = butter_bandpass_filter( mono, lowcut=mid_hi, highcut=highest, fs=rate, order=2)
rmslog = hash_rms( lowband )
rmscpy = hash_rms( lowbcpy )
reversed_rmslog = rmslog[::-1]
reversed_rmscpy = rmslog[::-1]
wav.write( "rmslo.wav", rate, rmslog )
wav.write( "rmscp.wav", rate, rmscpy )
#thresold sync focus
th_org_ini = threshold_sync( rmslog, 0.01 )
th_cpy_ini = threshold_sync( rmscpy, 0.01 )
th_org_end = rmslog.size - threshold_sync( reversed_rmslog, 0.01 )
th_cpy_end = rmscpy.size - threshold_sync( reversed_rmscpy, 0.01 )
if th_org_end - th_org_ini < th_cpy_end - th_cpy_ini:
copy_len = th_org_end - th_org_ini
th_cpy_end = th_cpy_ini + copy_len
print( f'original ini: {th_org_ini} ~ {th_org_end} end' )
print( f'transform ini: {th_cpy_ini} ~ {th_cpy_end} end' )
downrate = round( mid_lo * 2.2 )
df = to_dataframe( rmslog, rmscpy, th_org_ini, th_org_end, th_cpy_ini, th_cpy_end, rate, downrate )
pearson( df )
seconds = 1
rs, offset = crosscorr_offset( df['original'], df['transform'], downrate, seconds )
crosscorr_plot( rs, offset )
print( f'offset: {offset}' )
##offset error of threshold sync done
# sync correction
th_org_ini -= int( offset * rate / downrate )
dmx = to_dataframe( rmslog, rmscpy, th_org_ini, th_org_end, th_cpy_ini, th_cpy_end, rate, downrate )
pearson( dmx )
drs, doffset = crosscorr_offset( dmx['original'], dmx['transform'], downrate, seconds )
crosscorr_plot( drs, doffset )
print( f'offset after: {doffset}' )
newending = pearson_filter( dmx )
print( f'original ending: {dmx.size}, new ending: {newending}' )
total_len = int(newending * rate / downrate) - th_cpy_ini
th_cpy_end = th_cpy_ini + total_len
th_org_end = th_org_ini + total_len
newsynced = copy[th_cpy_ini: th_cpy_end ]
orgsynced = mono[th_org_ini: th_org_end ]
dfs = to_dataframe( orgsynced, newsynced, 0, seconds * rate, 0, seconds * rate, rate, rate )
rss, hi_offset = crosscorr_offset( dfs['original'], dfs['transform'], rate, seconds / 2 )
#crosscorr_plot( rs, hi_offset )
print( f'hi offset: {hi_offset}' )
th_org_ini -= int( hi_offset ) -1
th_org_end -= int( hi_offset ) -1
orgsynced = mono[th_org_ini: th_org_end ]
c_gain = autogain( orgsynced[:rate*seconds], newsynced[:rate*seconds] )
print( f'min error at gain: {c_gain }')
print( f'len {total_len} newsync: {newsynced.size} orgsync: {orgsynced.size}' )
synced = np.transpose ( np.asarray ( (orgsynced, newsynced * c_gain) ) )
print( f'synced shape: {synced.shape}' )
wav.write( "resynced.wav", rate, synced )
error = orgsynced - newsynced * c_gain
wav.write( "error.wav", rate, error )
#plt.show()
|
[
"sebastian.wilwerth@gmail.com"
] |
sebastian.wilwerth@gmail.com
|
10402105f08426d7754983b383bf147cf2c0611e
|
8c415b773604c2ac17e18c7ba116d720d9403fef
|
/vision/vision-iface.gypi
|
12db2b67726e0e919d30da307fdcc395e2cc7ce7
|
[] |
no_license
|
kiik/libtuum
|
2df4668ac01b4c37cadac3ca391b1359967eb0f0
|
844566b8715dba99c3d75c702491c8b1834573a3
|
refs/heads/master
| 2020-05-23T08:08:57.048312
| 2016-12-01T12:42:32
| 2016-12-01T12:42:32
| 69,343,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
gypi
|
{
'include_dirs': [
'inc'
],
}
|
[
"kiik.meelik@gmail.com"
] |
kiik.meelik@gmail.com
|
339f9df0dd568b0dac0574b4653c263cc9d9af76
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/come_old_problem_to_point/ask_thing/see_day/seem_problem/time/find_few_week_over_point.py
|
b79ee7a454f58433209a2c9c27edba7f4f38079b
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
#! /usr/bin/env python
def government(str_arg):
use_public_day(str_arg)
print('small_man_and_long_world')
def use_public_day(str_arg):
print(str_arg)
if __name__ == '__main__':
government('ask_day_from_year')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
cfd998ce54da9c295abf2acbdda401a867013882
|
bef5d82150ea8db3297e92fdd6e276d82645e2fe
|
/code/code1a.py
|
d2451c659f1b086872e6af93cd704cf1c5ebf748
|
[] |
no_license
|
dcruzp/proyectoProb
|
20892eb76dc1e06c36e4f99e0498c8f53fd57028
|
8ef7e71de064ea35c605a63a4d1af0d2e86e3ef0
|
refs/heads/master
| 2022-12-27T19:38:10.030621
| 2020-10-17T10:46:28
| 2020-10-17T10:46:28
| 304,534,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
import numpy as np
import random as rdm
def exponencial (l):
U = rdm.random()
return -1 / l * np.log(U)
def gammasimulation (l , n ): # lambda , numero de llamadas
total = 0
for i in range (n):
total += exponencial(l)
return total
def media (l,n,s): #lambda , numero de llamadas , cant de simulaciones
promedio = 0 ;
for i in range (s):
promedio += gammasimulation(l,n)
return promedio / s
s = 1000 # numero de simulaciones
table = [[1/2 , 30 , s ],
[1/4 , 20 , s ],
[1/6 , 10 , s ],
[1/8 , 36 , s ],]
for x in table :
print ( '(landa= ' + str(x[0]) + ' , numero de llamadas ' + str(x[1]) + ' media -> ' + str(media(x[0],x[1],x[2])) )
|
[
"danieldelacruzprieto@gmail.com"
] |
danieldelacruzprieto@gmail.com
|
2725dddf88956fdbeb3e30bc7d9b47f2079f6b71
|
1b622808bd714e3c770c811bfa6aed0b36693928
|
/18.py
|
33d89ee4745bf39087f28df537e20e5094b298ac
|
[] |
no_license
|
dinob0t/project_euler
|
a4d9a28b2994b64ea6ad064b05553f13ad38fc6d
|
b0fed278ae2bfc1bfe44043f2b02482ebc210a56
|
refs/heads/master
| 2020-09-12T14:07:16.137051
| 2014-09-01T16:16:49
| 2014-09-01T16:16:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
def read_file_return_list(name):
list = []
with open(name,'r') as f:
for line in f:
line = line.split('\n')
list.append(line[0])
if 'str' in line:
break
return list
def triangle_to_dict(triangle):
tri_dict = {}
row_count = 0
for i in triangle:
tri_dict.update({row_count: i.split(' ')})
row_count = row_count + 1
return tri_dict
def find_max_path_sum(tri_dict):
end = max(tri_dict.keys()) + 1
for row in range(end-2, -1, -1):
for index in range(len(tri_dict[row])):
(tri_dict[row])[index] = int((tri_dict[row])[index]) + max(int((tri_dict[row+1])[index+1]),int((tri_dict[row+1])[index]))
return tri_dict[0]
if __name__ == "__main__":
triangle = read_file_return_list('18_input.dat')
tri_dict = triangle_to_dict(triangle)
print find_max_path_sum(tri_dict)
|
[
"dean.hillan@gmail.com"
] |
dean.hillan@gmail.com
|
5dd9d036390973bb4274c0546b4809ac2988db87
|
18b4252fc653583528d3171ab04281801341836b
|
/projects/simparc/mbase/IP.py
|
06e27221e01b0c433434de2b48c291d2dddcb57d
|
[
"Apache-2.0"
] |
permissive
|
sumannam/DEVS-Python
|
dd45a7e6b17fd904a20691effa78711d2aa05ca4
|
919ddc77f15cf61af7b2351a681b6de703d11c08
|
refs/heads/master
| 2023-08-31T23:21:16.410193
| 2023-08-19T11:37:30
| 2023-08-19T11:37:30
| 216,816,867
| 1
| 1
|
Apache-2.0
| 2023-08-19T11:37:31
| 2019-10-22T13:10:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
from src.ATOMIC_MODELS import *
class IP(ATOMIC_MODELS):
def __init__(self):
ATOMIC_MODELS.__init__(self)
self.setName(self.__class__.__name__)
self.addInPorts("in")
self.addInPorts("urgent")
self.addOutPorts("out")
self.addOutPorts("message")
self.state["sigma"]=math.inf
self.state["phase"]="passive"
self.addState("job-id", "")
self.addState("temp", "")
self.addState("processing_time", 10)
self.addState("time_remaining", 0)
self.addState("interrupthandling_time", 0.1)
def externalTransitionFunc(self, e, x):
if x.port == "in":
if self.state["phase"] == "passive":
self.state["job-id"] = x.value
self.state["time_remaining"] = self.state["processing_time"]
self.holdIn("busy", self.state["processing_time"])
elif self.state["phase"] == "busy":
self.state["time_remaining"] = self.state["time_remaining"] - e
self.state["temp"] = x.value
self.holdIn("interrupted", self.state["interrupthandling_time"])
elif self.state["phase"] == "interrupted":
self.Continue(e)
elif x.port == "urgent":
if self.state["phase"] == "passive":
self.state["job-id"] = x.value
self.state["time_remaining"] = self.state["processing_time"]
self.holdIn("busy", self.state["processing_time"])
else:
self.Continue(e)
def internalTransitionFunc(self):
if self.state["phase"] == "busy":
self.passviate()
elif self.state["phase"] == "interrupted":
self.holdIn("busy", self.state["time_remaining"])
else:
self.passviate()
def outputFunc(self):
content = CONTENT()
if self.state["phase"] == "busy":
content.setContent("out", self.state["job-id"])
elif self.state["phase"] == "interrupted":
id = "interrupted by " + self.state["temp"]
content.setContent("message", id)
return content
|
[
"sumannam@gmail.com"
] |
sumannam@gmail.com
|
6083b94a569ab6fb475e6053152cd4ad9492c856
|
44437fe0d8394d8ece0c0fe1b2b1e3b4f1fd0e04
|
/teste.py
|
3caa030da550198abd4f4d8c86e2b6c43032522c
|
[
"MIT"
] |
permissive
|
daherk2/telikong
|
d4893ad3b907e54bb69e55e48dbba0b245604ecd
|
fd4ae90df6e6d32c5205c5dc5c17fa9eccc7fe7b
|
refs/heads/master
| 2021-01-20T13:12:19.899970
| 2017-06-12T20:36:17
| 2017-06-12T20:36:17
| 90,463,299
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
import telikong_cli as tcli
###################### Exemplo ########################
try:
a = 3/0
print a
except Exception as e:
try:
print tcli.chck_stackoverflow(e)
except Exception as e:
print tcli.chck_stackoverflow(e)
|
[
"fabio.rosindo.daher.de.barros@gmail.com"
] |
fabio.rosindo.daher.de.barros@gmail.com
|
d2c4eb944aeb7b5201cd68a2fb149375fbaeff34
|
1bcdefe3798ab2d04be323b5834f0df987c5a1ad
|
/assignment_1_5.py
|
8c394ec090d76ac457817e3f20bd3ad6ecf023b5
|
[] |
no_license
|
kristjanleifur4/kristjan
|
7229894a9d781c408db5289e6aca403d7784e9f0
|
167220fc00e7aba72fcbea7c0cf14d6ed3ed34ad
|
refs/heads/master
| 2020-07-20T00:11:02.504445
| 2019-09-17T13:23:22
| 2019-09-17T13:23:22
| 206,537,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
x_str = input("Input x: ")
x_int = int(x_str)
first_three = int(x_int / 1000)
last_two = (x_int % 100)
middle_two = (x_int % 10000)
middle_two = (middle_two // 100)
print("original input:", x_str)
print("first_three:", first_three)
print("last_two:", last_two)
print("middle_two:", middle_two)
|
[
"kristjanls18@ru.is"
] |
kristjanls18@ru.is
|
fcb878a2819bc83a0ed79bdb5b844916fa3fbdbe
|
794e14945c0521b4eab03e8b9a3f93b8fa14e021
|
/src/compas_rhino/utilities/constructors.py
|
e71275fa0d0e525a4bf92e58e2154310209ae1c9
|
[
"MIT"
] |
permissive
|
KEERTHANAUDAY/compas
|
5e8ada865bc87ee48ba77b3f6fd03661a9b9c17d
|
4d1101cf302f95a4472a01a1265cc64eaec6aa4a
|
refs/heads/master
| 2021-07-11T16:26:19.452926
| 2020-09-10T14:27:11
| 2020-09-10T14:27:11
| 294,453,684
| 0
| 0
|
MIT
| 2020-09-10T15:47:31
| 2020-09-10T15:47:30
| null |
UTF-8
|
Python
| false
| false
| 2,494
|
py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.utilities import geometric_key
import Rhino
import scriptcontext as sc
__all__ = ['volmesh_from_polysurfaces']
def volmesh_from_polysurfaces(cls, guids):
"""Construct a volumetric mesh from given polysurfaces.
Essentially, this function does the following:
* find each of the polysurfaces and check if they have a boundary representation (b-rep)
* convert to b-rep and extract the edge loops
* make a face of each loop by referring to vertices using their geometric keys
* add a cell per brep
* and add the faces of a brep to the cell
* create a volmesh from the found vertices and cells
Parameters
----------
cls : :class:`compas.datastructures.VolMesh`
The class of volmesh.
guids : sequence of str or System.Guid
The *globally unique identifiers* of the polysurfaces.
Returns
-------
:class:`compas.datastructures.Volmesh`
The volumetric mesh object.
"""
gkey_xyz = {}
cells = []
for guid in guids:
cell = []
obj = sc.doc.Objects.Find(guid)
if not obj.Geometry.HasBrepForm:
continue
brep = Rhino.Geometry.Brep.TryConvertBrep(obj.Geometry)
for loop in brep.Loops:
curve = loop.To3dCurve()
segments = curve.Explode()
face = []
sp = segments[0].PointAtStart
ep = segments[0].PointAtEnd
sp_gkey = geometric_key(sp)
ep_gkey = geometric_key(ep)
gkey_xyz[sp_gkey] = sp
gkey_xyz[ep_gkey] = ep
face.append(sp_gkey)
face.append(ep_gkey)
for segment in segments[1:-1]:
ep = segment.PointAtEnd
ep_gkey = geometric_key(ep)
face.append(ep_gkey)
gkey_xyz[ep_gkey] = ep
cell.append(face)
cells.append(cell)
gkey_index = dict((gkey, index) for index, gkey in enumerate(gkey_xyz))
vertices = [list(xyz) for gkey, xyz in gkey_xyz.items()]
cells = [[[gkey_index[gkey] for gkey in face] for face in cell] for cell in cells]
return cls.from_vertices_and_cells(vertices, cells)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
[
"vanmelet@ethz.ch"
] |
vanmelet@ethz.ch
|
98eb8e7dc1fb21ee50b9002c5f691820120ba470
|
19aa291198622834cc0fc04610d419189a098f24
|
/stock/jupyterAlgo/AlgoTest/showPrice_Vol.py
|
3326dfefc8dfe1e14589419be6d85e88bb4a0837
|
[] |
no_license
|
johnsonhongyi/pyQuant
|
3127cc30a7fa07a9ca58a1a067c8ee1d289c29a3
|
a1873ff29383c4f3a1cfb7206c2cb72ab0da8b3b
|
refs/heads/master
| 2023-01-23T15:58:59.332695
| 2023-01-18T13:05:13
| 2023-01-18T13:05:13
| 47,158,933
| 6
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,780
|
py
|
import sys,logging
stdout=sys.stdout
sys.path.append('../../')
import JSONData.tdx_data_Day as tdd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick
from matplotlib.finance import volume_overlay3
from matplotlib.dates import num2date
from matplotlib.dates import date2num
import matplotlib.mlab as mlab
import datetime
stock_code = '000002'
start = None
end= None
dl=60
df = tdd.get_tdx_append_now_df_api(code=stock_code, start=start, end=end, dl=dl).sort_index(ascending=True)
# print df.close.T
fig = plt.figure()
# ax = fig.add_subplot(211, sharex=None, sharey=None)
ax = fig.add_subplot(211)
ax.plot(df.close)
ax.set_xticklabels(df.index)
plt.xticks(rotation=30, horizontalalignment='center')
# plt.subplots_adjust(left=0.05, bottom=0.08, right=0.95, top=0.95, wspace=0.15, hspace=0.25)
pad = 0.25
yl = ax.get_ylim()
ax.set_ylim(yl[0]-(yl[1]-yl[0])*pad,yl[1])
# ax2 = ax.twinx()
ax2 = fig.add_subplot(211,sharex=ax)
# ax2.set_position(matplotlib.transforms.Bbox([[0.125,0.1],[0.9,0.32]]))
# ax2.bar([x for x in range(len(df.index))],df.vol)
volume = np.asarray(df.vol)
pos = df['open']-df['close']<0
neg = df['open']-df['close']>=0
idx = df.reset_index().index
ax2.bar(idx[pos],volume[pos],color='red',width=1,align='center')
ax2.bar(idx[neg],volume[neg],color='green',width=1,align='center')
# plt.subplots_adjust(left=0.05, bottom=0.08, right=0.95, top=0.95, wspace=0.15, hspace=0.25)
# ax2 = ax.twinx()
# width = 0.4
# df.vol.plot(kind='bar', color='red', ax=ax, width=width, position=1, sharex=False, sharey=False)
# df.vol.plot(kind='bar', color='red', ax=ax, width=width, position=1)
# df.close.plot(kind='bar', color='blue', ax=ax2, width=width, position=0, sharex=False, sharey=False)
ax_2 = fig.add_subplot(212, sharex=ax, sharey=None)
ax_22 = ax_2.twinx()
ax_2.plot([1, 3, 5, 7, 9])
ax_22.plot([1.0/x for x in [1, 3, 5, 7, 9]])
ax_2.set_xlabel("AX2 X Lablel")
ax_2.set_ylabel("AX2 Y Lablel")
ax_22.set_ylabel("AX2_Twin Y Lablel")
# ax_2 = fig.add_subplot(223, sharex=None, sharey=None)
# ax_22 = ax_2.twinx()
# ax_2.plot([100, 300, 500, 700, 900])
# ax_22.plot([x*x for x in [100, 300, 500, 700, 900]])
# ax_2.set_xlabel("AX3 X Lablel")
# ax_2.set_ylabel("AX3 Y Lablel")
# ax_22.set_ylabel("AX3_Twin Y Lablel")
# ax_2 = fig.add_subplot(224, sharex=None, sharey=None)
# ax_22 = ax_2.twinx()
# ax_2.set_xlabel("AX4 X Lablel")
# ax_2.set_ylabel("AX4 Y Lablel")
# ax_22.set_ylabel("AX4_Twin Y Lablel")
# ax.set_xlabel("Alphabets")
# ax.set_ylabel('Amount')
# ax2.set_ylabel('Price')
plt.subplots_adjust(wspace=0.8, hspace=0.8)
# plt.savefig("t1.png", dpi=300)
plt.show()
'''
show price and vol
datafile = 'data.csv'
r = mlab.csv2rec(datafile, delimiter=';')
# the dates in my example file-set are very sparse (and annoying) change the dates to be sequential
for i in range(len(r)-1):
r['date'][i+1] = r['date'][i] + datetime.timedelta(days=1)
stock_code = '000002'
start = None
end= None
dl=60
df = tdd.get_tdx_append_now_df_api(code=stock_code, start=start, end=end, dl=dl).sort_index(ascending=True)
# r = r.reset_index()
date = df.index.to_datetime().to_pydatetime()
import pdb;pdb.set_trace();
candlesticks = zip(date2num(date),df['open'],df['high'],df['low'],df['close'],df['vol'])
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_ylabel('Quote ($)', size=20)
candlestick(ax, candlesticks,width=1,colorup='g', colordown='r')
# shift y-limits of the candlestick plot so that there is space at the bottom for the volume bar chart
pad = 0.25
yl = ax.get_ylim()
ax.set_ylim(yl[0]-(yl[1]-yl[0])*pad,yl[1])
# create the second axis for the volume bar-plot
ax2 = ax.twinx()
# set the position of ax2 so that it is short (y2=0.32) but otherwise the same size as ax
ax2.set_position(matplotlib.transforms.Bbox([[0.125,0.1],[0.9,0.32]]))
# get data from candlesticks for a bar plot
dates = [x[0] for x in candlesticks]
dates = np.asarray(dates)
volume = [x[5] for x in candlesticks]
volume = np.asarray(volume)
# make bar plots and color differently depending on up/down for the day
pos = df['open']-df['close']<0
neg = df['open']-df['close']>0
ax2.bar(dates[pos],volume[pos],color='green',width=1,align='center')
ax2.bar(dates[neg],volume[neg],color='red',width=1,align='center')
#scale the x-axis tight
ax2.set_xlim(min(dates),max(dates))
# the y-ticks for the bar were too dense, keep only every third one
yticks = ax2.get_yticks()
ax2.set_yticks(yticks[::3])
ax2.yaxis.set_label_position("right")
ax2.set_ylabel('Volume', size=20)
# format the x-ticks with a human-readable date.
xt = ax.get_xticks()
new_xticks = [datetime.date.isoformat(num2date(d)) for d in xt]
ax.set_xticklabels(new_xticks,rotation=45, horizontalalignment='right')
# plt.ion()
plt.show()
'''
|
[
"5208115@qq.com"
] |
5208115@qq.com
|
b5b5bcafe86af53a80ef5011154eda8a1f569e97
|
eb229f1109dd58108b8c9fde432933c713e88ede
|
/touchstone/utilities/__init__.py
|
331d91870f900269c0e5fb2474698194ebd49a8c
|
[
"MIT"
] |
permissive
|
LechuzaAI/touchstone
|
21c07c0f81e1b7152562758c7585532ab3e255ad
|
6893b199f14f34986b475c79b4a41934fcf8e7a5
|
refs/heads/main
| 2023-01-11T03:37:56.350552
| 2020-11-13T18:49:32
| 2020-11-13T18:49:32
| 302,409,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
from touchstone.utilities.gae import generalized_advantage_estimate
|
[
"drmiguelalonsojr@gmail.com"
] |
drmiguelalonsojr@gmail.com
|
91892ec892c4c5f1f18e75e2985112b83be033ca
|
c02a742d17ee15b72e010bcd8f3b6f86ed8ebfbf
|
/setup.py
|
224a90a601a3ad600300359b62349b8bcff280e8
|
[] |
no_license
|
jky88/weixinsougou
|
439296aacd8b5ef4e2e4566ba7eeceef0ee4d92a
|
0427cd3f5aa04be356cda5d87146d88ca907e952
|
refs/heads/master
| 2020-04-09T04:09:58.817032
| 2017-11-13T14:12:49
| 2017-11-13T14:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
# Automatically created by: shub deploy
from setuptools import setup, find_packages
setup(
name = 'project',
version = '1.0',
packages = find_packages(),
entry_points = {'scrapy': ['settings = weixinsougou.settings']},
)
|
[
"windows.lxy"
] |
windows.lxy
|
40bdf31a657ea2bf2f78e3add7ecf6e3c4c36e96
|
5285d87989438f8635dd41bea94b298a384bc2f1
|
/airline0/list.py
|
742a5d8dd3b2d4549746375d42ada276ab9fcaa2
|
[] |
no_license
|
saedyousef/flask-apps
|
a65d17a3d9b3ed07f87ce536df6f27b364a231fc
|
ba7c50dd43dcf190cefe7208e2b61b49a8ea26aa
|
refs/heads/master
| 2022-04-14T03:14:59.729383
| 2020-04-27T23:59:53
| 2020-04-27T23:59:53
| 259,441,497
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
import os
from flask import Flask, render_template, request
from models import *
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app)
def main():
flights = Flight.query.all()
for flight in flights:
print(f"{flight.origin} to {flight.destination} lasting {flight.duration}")
if __name__ == "__main__":
with app.app_context():
main()
|
[
"saed.alzaben@gmail.com"
] |
saed.alzaben@gmail.com
|
8afca40a8cd8620496c43177263ba9b063b969f0
|
6a81e51745fc9598d24fb51e8a3d817c707435d7
|
/EDU 3.0 TUTORIAIS 1 2020/Edu 3.0_TAC-master/educacao_3_0/wsgi.py
|
a330243d7f653c076392fa4b526fc446e4c172fd
|
[] |
no_license
|
bmedeirosneto/TacProgWeb
|
fbd7ecb9191ef2703580edc9300d15ee71d37058
|
1daeda3b97bb901bd4f69d649c1d735c3509d9de
|
refs/heads/master
| 2021-12-03T21:19:58.212572
| 2021-08-25T12:35:36
| 2021-08-25T12:35:36
| 226,856,253
| 0
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for educacao_3_0 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'educacao_3_0.settings')
application = get_wsgi_application()
|
[
"medeiros@filosofiacienciaarte.org"
] |
medeiros@filosofiacienciaarte.org
|
a30686b6eabb2cac56f288acadb5c196580ebf70
|
e6947a8ecc14ddb3c078321958856f888953f4fa
|
/my_project.py
|
d96f10703e7f2af3f045b4ee516f87f077c77cb7
|
[] |
no_license
|
raja073/SimpleMovieDB
|
a5dd4b924f1ecb8d04a61c9884e25e6a51af5c3c
|
4d28dba684ea0ebf6ad4b78af4c2bdd13b072406
|
refs/heads/master
| 2021-09-05T13:59:35.372062
| 2018-01-28T14:06:57
| 2018-01-28T14:06:57
| 118,252,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,032
|
py
|
from flask import Flask, render_template, request, redirect, url_for
app = Flask(__name__) ### Instance of the Flask with name of the running application as an argument
#################################################################################################
# Adding database to Flask application
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Movie, Actor
engine = create_engine('sqlite:///movieactors.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind = engine)
session = DBSession()
#################################################################################################
@app.route('/')
@app.route('/movies')
def movieList():
movies = session.query(Movie).all()
return render_template('full_movie_list.html', movies = movies)
@app.route('/movie/<int:movie_id>/')
def movieActors(movie_id):
movie = session.query(Movie).filter_by(id = movie_id).one()
actors = session.query(Actor).filter_by(movie_id = movie.id)
return render_template('menu.html', movie = movie, actors = actors)
@app.route('/movie/new/', methods=['GET','POST'])
def newMovie():
if request.method == 'POST':
newMov = Movie(name=request.form['name'])
session.add(newMov)
session.commit()
return redirect(url_for('movieList'))
else:
return render_template('new_movie.html')
# Task 1: Create route for newActor function here
@app.route('/movie/<int:movie_id>/new/', methods=['GET','POST'])
def newActor(movie_id):
if request.method == 'POST':
newAct = Actor(name=request.form['name'], gender=request.form['gender'], \
age=request.form['age'], biography=request.form['bio'], movie_id=movie_id)
session.add(newAct)
session.commit()
return redirect(url_for('movieActors', movie_id=movie_id))
else:
return render_template('new_actor.html', movie_id=movie_id)
# Task 2: Create route for editActor function here
@app.route('/movie/<int:movie_id>/<int:actor_id>/edit/', methods=['GET','POST'])
def editActor(movie_id, actor_id):
editedActor = session.query(Actor).filter_by(id=actor_id).one()
if request.method == 'POST':
if request.form['name']:
editedActor.name = request.form['name']
session.add(editedActor)
session.commit()
return redirect(url_for('movieActors', movie_id=movie_id))
else:
return render_template('edit_actors.html', movie_id=movie_id, actor_id=actor_id, i=editedActor)
# Task 3: Create route for deleteActor function here
@app.route('/movie/<int:movie_id>/<int:actor_id>/delete/', methods=['GET','POST'])
def deleteActor(movie_id, actor_id):
actorToDelete = session.query(Actor).filter_by(id=actor_id).one()
if request.method == 'POST':
session.delete(actorToDelete)
session.commit()
return redirect(url_for('movieActors', movie_id=movie_id))
else:
return render_template('delete_actor.html', i=actorToDelete)
if __name__ == '__main__':
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
092c2800ed58c6b45479c1f65d4df860935259da
|
6c7b36f69ad7a55e2ad3262f8a1082e9f1dd0d58
|
/src/opal/core/log.py
|
bc966c8dbd1566523d835f54bbaf2b8fae4b1ec0
|
[] |
no_license
|
kiendc/opal
|
d338805f5d38fc67441813cd7507757291db2790
|
99a16ff4f6da396d9a02f5f4cfe0a24c1fb7051d
|
refs/heads/master
| 2020-05-20T06:28:53.314038
| 2012-10-24T14:18:35
| 2012-10-24T14:18:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,516
|
py
|
import re
import logging
class HandlerDescription:
def __init__(self, handler):
self.file_name = handler.baseFilename
self.level = handler.level
def generate_handler(self):
handler = logging.FileHandler(filename=self.file_name)
handler.set_level(self.level)
return handler
class OPALLogger:
'''
We specialize logging facility of Python by this class to support
the ability of pickling an logger with handlers that are the streamming
objects
'''
def __init__(self, name=None, handlers=[]):
self.name = name
self.initialize()
# Initialize an empty list of descriptions
# of the user-required handlers
self.handler_descriptions = []
# Get the description of the user-required handlers
# and add it to logger
for hdlr in handlers:
self.handler_descriptions.append(HandlerDescription(hdlr))
self.logger.addHandler(hdlr)
return
def initialize(self):
self.logger = logging.getLogger(self.name)
self.logger.setLevel(logging.DEBUG) # Set level to highest level so
# that actual level depends on the
# handler level
# A default handler is created for logging to file with INFO level
handler = logging.FileHandler(filename='/var/tmp/opal.log')
handler.setFormatter(logging.Formatter('%(asctime)s - ' +
'%(name)s: ' +
'%(message)s'))
handler.setLevel(logging.INFO)
self.logger.addHandler(handler)
return
def __getstate__(self):
# To serialize a OPALLogger object, we save only
# the name and the descriptions of the user-required handlers
dict = {}
dict['handler_descriptions'] = self.handler_descriptions
dict['name'] = self.name
return dict
def __setstate__(self, dict):
# The expected dict is two-element dictionary.
# The first element of dict has key is 'handler_descriptions'
# and has value is a list of description of handlers. The
# second one is the name of logger.
self.name = dict['name']
# Initialize the logger with the specified name
self.initialize()
# Create the handler descriptions for unpickled object
# and create handlers for the logger
self.handler_descriptions = dict['handler_descriptions']
for desc in self.handler_descriptions:
handler = desc.generate_handler()
self.logger.addHandler(handler)
return
def log(self, message, level=logging.INFO):
self.logger.log(level, message + '\n')
return
class Debugger:
def __init__(self, fileName='/var/tmp/opal-debug.log'):
self.logger = logging.getLogger('DEBUG')
self.logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename=fileName)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s - ' +
'%(name)s: ' +
'%(message)s'))
self.logger.addHandler(handler)
return
def log(self, message, level=logging.DEBUG):
self.logger.log(level, message)
return
debugger = Debugger()
|
[
"kiendc@Cong-Kien-Dangs-Mac-mini.local"
] |
kiendc@Cong-Kien-Dangs-Mac-mini.local
|
874faf954ae174bedcfe8ce4f42f219ac04bd355
|
14449108de18a8e956830cd7d5107bb38de41c5d
|
/workshopvenues/venues/migrations/0009_auto__del_field_venue_address.py
|
45329577f0f1c85666401d3a4ba848f7477f2436
|
[
"BSD-3-Clause"
] |
permissive
|
andreagrandi/workshopvenues
|
736e53ccb6ff0b15503e92a5246b945f615d2ff8
|
21978de36f443296788727d709f7f42676b24484
|
refs/heads/master
| 2021-05-16T03:00:23.879925
| 2014-03-18T15:10:00
| 2014-03-18T15:10:00
| 8,843,235
| 1
| 3
| null | 2015-10-26T11:11:20
| 2013-03-17T23:19:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,698
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Venue.address'
db.delete_column(u'venues_venue', 'address_id')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Venue.address'
raise RuntimeError("Cannot reverse this migration. 'Venue.address' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Venue.address'
db.add_column(u'venues_venue', 'address',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['venues.Address']),
keep_default=False)
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Country']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.country': {
'Meta': {'object_name': 'Country'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.image': {
'Meta': {'object_name': 'Image'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Venue']"})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'capacity': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'cost': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['venues']
|
[
"a.grandi@gmail.com"
] |
a.grandi@gmail.com
|
ff9d5d5e5194ae62f6f8a2888b5e8c36abe265af
|
8cb6d50076c527b4c81d21b992fc93f77263adc5
|
/orden/models.py
|
f0b736f5697c7219518a2d729725177f23df2fa5
|
[] |
no_license
|
alrvivas/CrevenApp
|
6b9fefc4661a32cdf00ebb4a3eb869bf778f67e7
|
190291cfc798cbc52ba4cdbfa258ef0b983f7249
|
refs/heads/master
| 2020-06-04T20:24:03.685451
| 2015-02-12T17:36:01
| 2015-02-12T17:36:01
| 30,713,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,473
|
py
|
from django.db import models
from distutils.version import LooseVersion
from django.contrib.auth.models import User
from cliente.models import Cliente
from producto.models import Product
from orden.managers import OrderManager
from util.fields import CurrencyField
from jsonfield.fields import JSONField
from django.db.models.aggregates import Sum
from django.core.urlresolvers import reverse
import django
# Create your models here.
class Order(models.Model):
objects = OrderManager()
"""
A model representing an Order.
An order is the "in process" counterpart of the shopping cart, which holds
stuff like the shipping and billing addresses (copied from the User
profile) when the Order is first created), list of items, and holds stuff
like the status, shipping costs, taxes, etc...
"""
PROCESSING = 10 # New order, addresses and shipping/payment methods chosen (user is in the shipping backend)
CONFIRMING = 20 # The order is pending confirmation (user is on the confirm view)
CONFIRMED = 30 # The order was confirmed (user is in the payment backend)
COMPLETED = 40 # Payment backend successfully completed
SHIPPED = 50 # The order was shipped to client
CANCELED = 60 # The order was canceled
CANCELLED = CANCELED # DEPRECATED SPELLING
PAYMENT = 30 # DEPRECATED!
STATUS_CODES = (
(PROCESSING, ('Procesando')),
(CONFIRMING, ('Confirmando')),
(CONFIRMED, ('Confirmada')),
(COMPLETED, ('Completada')),
(SHIPPED, ('Enviada')),
(CANCELED, ('Cancelada')),
)
# If the user is null, the order was created with a session
user = models.ForeignKey(User, blank=True, null=True, verbose_name=('User'))
cliente = models.ForeignKey(Cliente,null=True, blank=True)
status = models.IntegerField(choices=STATUS_CODES, default=PROCESSING,verbose_name=('Status'))
order_subtotal = CurrencyField(verbose_name=('Orden subtotal'))
order_total = CurrencyField(verbose_name=('Orden Total'))
order_totalpeso = models.DecimalField(max_digits=10,decimal_places=3,null=True)
shipping_address_text = models.TextField(('Direccion de Envio'), blank=True, null=True)
billing_address_text = models.TextField(('Direccion de Facturacion'), blank=True, null=True)
created = models.DateTimeField(auto_now_add=True,verbose_name=('Creado'))
modified = models.DateTimeField(auto_now=True, verbose_name=('Updated'))
cart_pk = models.PositiveIntegerField(('Cart primary key'), blank=True, null=True)
class Meta(object):
verbose_name = ('Orden')
verbose_name_plural = ('Ordenes')
def __unicode__(self):
return ('Orden ID: %(id)s') % {'id': self.pk}
def get_absolute_url(self):
return reverse('order_detail', kwargs={'pk': self.pk})
def is_paid(self):
"""Has this order been integrally paid for?"""
return self.amount_paid >= self.order_total
is_payed = is_paid #Backward compatability, deprecated spelling
def is_completed(self):
return self.status == self.COMPLETED
def get_status_name(self):
return dict(self.STATUS_CODES)[self.status]
@property
def amount_paid(self):
"""
The amount paid is the sum of related orderpayments
"""
from .models import OrderPayment
sum_ = OrderPayment.objects.filter(order=self).aggregate(sum=Sum('amount'))
result = sum_.get('sum')
if result is None:
result = Decimal(0)
return result
amount_payed = amount_paid #Backward compatability, deprecated spelling
@property
def shipping_costs(self):
from .models import ExtraOrderPriceField
sum_ = Decimal('0.00')
cost_list = ExtraOrderPriceField.objects.filter(order=self).filter(
is_shipping=True)
for cost in cost_list:
sum_ += cost.value
return sum_
@property
def short_name(self):
"""
A short name for the order, to be displayed on the payment processor's
website. Should be human-readable, as much as possible
"""
return "%s-%s" % (self.pk, self.order_total)
def set_billing_address(self, billing_address):
"""
Process billing_address trying to get as_text method from address
and copying.
You can override this method to process address more granulary
e.g. you can copy address instance and save FK to it in your order
class.
"""
if hasattr(billing_address, 'as_text') and callable(billing_address.as_text):
self.billing_address_text = billing_address.as_text()
self.save()
def set_shipping_address(self, shipping_address):
"""
Process shipping_address trying to get as_text method from address
and copying.
You can override this method to process address more granulary
e.g. you can copy address instance and save FK to it in your order
class.
"""
if hasattr(shipping_address, 'as_text') and callable(shipping_address.as_text):
self.shipping_address_text = shipping_address.as_text()
self.save()
# We need some magic to support django < 1.3 that has no support
# models.on_delete option
f_kwargs = {}
if LooseVersion(django.get_version()) >= LooseVersion('1.3'):
f_kwargs['on_delete'] = models.SET_NULL
class OrderItem(models.Model):
"""
A line Item for an order. """
order = models.ForeignKey(Order, related_name='items', verbose_name=('Orden'))
product_reference = models.CharField(max_length=255, verbose_name=('Product reference'))
product_name = models.CharField(max_length=255, null=True, blank=True, verbose_name=('Product name'))
product = models.ForeignKey(Product, verbose_name=('Producto'), null=True, blank=True, **f_kwargs)
unit_price = CurrencyField(verbose_name=('Unit price'))
quantity = models.IntegerField(verbose_name=('Cantidad'))
line_subtotal = CurrencyField(verbose_name=('Line subtotal'))
line_total = CurrencyField(verbose_name=('Line total'))
line_subtotalpeso = models.DecimalField(max_digits = 30,decimal_places = 3,null=True)
line_totalpeso = models.DecimalField(max_digits = 30,decimal_places = 3,null=True)
class Meta(object):
verbose_name = ('Orden item')
verbose_name_plural = ('Orden items')
def save(self, *args, **kwargs):
if not self.product_name and self.product:
self.product_name = self.product.get_name()
super(OrderItem, self).save(*args, **kwargs)
def clear_products(sender, instance, using, **kwargs):
for oi in OrderItem.objects.filter(product=instance):
oi.product = None
oi.save()
if LooseVersion(django.get_version()) < LooseVersion('1.3'):
pre_delete.connect(clear_products, sender=Product)
class OrderExtraInfo(models.Model):
order = models.ForeignKey(Order, related_name="extra_info",verbose_name=('Order'))
text = models.TextField(verbose_name=('Extra info'), blank=True)
class Meta(object):
verbose_name = ('Orden informacion extra')
verbose_name_plural = ('Orden informacion extra')
class ExtraOrderPriceField(models.Model):
"""
This will make Cart-provided extra price fields persistent since we want
to "snapshot" their statuses at the time when the order was made
"""
order = models.ForeignKey(Order, verbose_name=('Order'))
label = models.CharField(max_length=255, verbose_name=('Label'))
value = CurrencyField(verbose_name=('Amount'))
data = JSONField(null=True, blank=True, verbose_name=('Serialized extra data'))
# Does this represent shipping costs?
is_shipping = models.BooleanField(default=False, editable=False, verbose_name=('Is shipping'))
class Meta(object):
verbose_name = ('Extra order price field')
verbose_name_plural = ('Extra order price fields')
class ExtraOrderItemPriceField(models.Model):
"""
This will make Cart-provided extra price fields persistent since we want
to "snapshot" their statuses at the time when the order was made
"""
order_item = models.ForeignKey(OrderItem, verbose_name=('Order item'))
label = models.CharField(max_length=255, verbose_name=('Label'))
value = CurrencyField(verbose_name=('Amount'))
data = JSONField(null=True, blank=True, verbose_name=('Serialized extra data'))
class Meta(object):
verbose_name = ('Extra order item price field')
verbose_name_plural = ('Extra order item price fields')
class OrderPayment(models.Model):
"""
A class to hold basic payment information. Backends should define their own
more complex payment types should they need to store more informtion
"""
order = models.ForeignKey(Order, verbose_name=('Order'))
# How much was paid with this particular transfer
amount = CurrencyField(verbose_name=('Amount'))
transaction_id = models.CharField(max_length=255, verbose_name=('Transaction ID'), help_text=("The transaction processor's reference"))
payment_method = models.CharField(max_length=255, verbose_name=('Payment method'), help_text=("The payment backend used to process the purchase"))
class Meta(object):
verbose_name = ('Order payment')
verbose_name_plural = ('Order payments')
|
[
"alr.vivas@gmail.com"
] |
alr.vivas@gmail.com
|
c5a314056e8cf06ac9db444cce8d020213784d5d
|
217a76bf468ec80547f5d59ff2a560c794ad7800
|
/instibuddydjango/scrapdata/apps.py
|
6c320b2307eebb2b56b3c9411423a92eae1af745
|
[] |
no_license
|
SahilKumar2203/instibuddy
|
c92ce135bb5820fdc30bc93d602f71af229eaef4
|
ea3a38d7ceb44959451191eaed96b8f45f1317d3
|
refs/heads/master
| 2021-05-16T22:25:12.265459
| 2020-07-08T16:40:11
| 2020-07-08T16:40:11
| 250,494,547
| 0
| 0
| null | 2020-03-27T09:38:38
| 2020-03-27T09:38:38
| null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
from django.apps import AppConfig
class ScrapdataConfig(AppConfig):
name = 'scrapdata'
|
[
"rishabharya32@gmail.com"
] |
rishabharya32@gmail.com
|
38ca5c408a737d5d08a18256429c005182c0e566
|
f01d6884bb99ddf0c8d9c76d39d9480be78a5581
|
/tests/test_parser_cli.py
|
0e962925e028677134a685e03d0c70055ea0e254
|
[
"MIT"
] |
permissive
|
manageacloud/manageacloud-cli
|
906e0617d01c6561e1e51d99d12e1f854825afa3
|
e782bb4f207b84a10d4d96fa421227d6fe53d3dc
|
refs/heads/master
| 2022-05-28T17:03:45.169824
| 2022-04-07T00:44:50
| 2022-04-07T00:44:50
| 36,004,075
| 6
| 4
| null | 2015-10-27T08:08:32
| 2015-05-21T09:34:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,114
|
py
|
import unittest
import mock
from argparse import ArgumentTypeError
from tests.mock_data import *
import maccli.parser_cli
class ParserCliTestCase(unittest.TestCase):
def test_validate_environment(self):
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_environment, "invalid")
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_environment, "invalid = spaces")
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_environment, "BENCH_CREATION=")
self.assertEqual(maccli.parser_cli.validate_environment("UNO=dos"), {'UNO':'dos'})
self.assertEqual(maccli.parser_cli.validate_environment("uno=dos"), {'uno':'dos'})
self.assertEqual(maccli.parser_cli.validate_environment("A_VALUE=dos2"), {'A_VALUE':'dos2'})
self.assertEqual(maccli.parser_cli.validate_environment("a_value=dos2"), {'a_value':'dos2'})
self.assertEqual(maccli.parser_cli.validate_environment("a_value=dos2=3"), {'a_value':'dos2=3'})
self.assertEqual(maccli.parser_cli.validate_environment("""a_value=UNO
DOS
TRES"""), {'a_value':'''UNO
DOS
TRES'''})
self.assertEqual(maccli.parser_cli.validate_environment("BENCH_CREATION=-i -s 70"), {'BENCH_CREATION':'-i -s 70'})
def test_validate_hd(self):
self.assertEqual(maccli.parser_cli.validate_hd("/dev/sda1:100"), {'/dev/sda1':'100'})
self.assertEqual(maccli.parser_cli.validate_hd("/dev/sda1:50"), {'/dev/sda1':'50'})
self.assertEqual(maccli.parser_cli.validate_hd("attachment:50:ssd"), {'attachment':'50:ssd'})
self.assertEqual(maccli.parser_cli.validate_hd("/dev/ok:100"), {'/dev/ok':'100'})
self.assertEqual(maccli.parser_cli.validate_hd("/dev/sda1:100:ok"), {'/dev/sda1':'100:ok'})
self.assertEqual(maccli.parser_cli.validate_hd("/dev/sda1:100:ok:1000"), {'/dev/sda1':'100:ok:1000'})
#self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_hd, "/dev/not/ok:100")
#self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_hd, "/not/ok:100")
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_hd, "/dev/ok:wtf")
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_hd, "/dev/ok")
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_hd, "100")
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_hd, "/dev/sda1:100:not-ok")
def test_validate_port(self):
self.assertEqual(maccli.parser_cli.validate_port("22"), [22])
self.assertEqual(maccli.parser_cli.validate_port("22,8080"), [22,8080])
self.assertEqual(maccli.parser_cli.validate_port("1,22,8080,65535"), [1,22,8080,65535])
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_port, "0,22,8080,65535")
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_port, "22,8080,65536")
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_port, "22,sssss8080")
self.assertRaises(ArgumentTypeError, maccli.parser_cli.validate_port, "sssss8080")
|
[
"ruben@manageacloud.com"
] |
ruben@manageacloud.com
|
4d312a1a57d49be61935deedc393a5994769225e
|
90a756a8a0e470761dfad47e67293e5f880882cd
|
/hex.py
|
600bb477fbc89e37b7b4ab35ac9f93b1decf8bd8
|
[] |
no_license
|
Gaspi/panda3d-draft
|
d62d3b2624adfeaccc0cbb6a6e70d552a36b261e
|
665cc2ca6da6b9366ce1952bf4ec2d3eb426d904
|
refs/heads/master
| 2022-12-04T03:21:15.196026
| 2020-08-22T15:59:24
| 2020-08-22T15:59:24
| 287,313,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,462
|
py
|
import math, random
class Point:
__slots__ = ['x', 'y']
def __init__(self,x,y):
self.x=x
self.y=y
def __repr__(self):
return "Point(%d,%d)" % (self.x, self.y)
def __str__(self):
return "(%d,%d)" % (self.x, self.y)
def __hash__(self):
return hash( (self.x,self.y) )
def __eq__(self,other):
return (self.x,self.y) == (other.x,other.y)
def isVertex(self):
return self.y % 3 == 0 and (self.x + self.y // 3) % 2 == 0
def isCenter(self):
return self.y % 3 != 0 and (self.x + self.y // 3) % 2 + self.y % 3 == 2
def getTriangles(self):
ic = self.x
jc = self.y // 3
return [ Triangle(ic ,jc ),
Triangle(ic-1,jc ),
Triangle(ic-1,jc-1),
Triangle(ic ,jc-1),
Triangle(ic+1,jc-1),
Triangle(ic+1,jc ) ]
def getAdjacents(self):
return [ Point(self.x+2,self.y ),
Point(self.x+1,self.y+3),
Point(self.x-1,self.y+3),
Point(self.x-2,self.y ),
Point(self.x-1,self.y-3),
Point(self.x+1,self.y-3) ]
def getEdges(self):
return [ Edge(self,p) for p in self.getAdjacents() ]
def d2(self,pt):
return (self.x-pt.x)*(self.x-pt.x)+(self.y-pt.y)*(self.y-pt.y)
def d(self,pt):
return math.sqrt(self.d2(pt))
def d1(self,pt):
return abs(self.x-pt.x) + abs(self.y-pt.y)
def getVertices(self):
return [ self ]
class Edge:
__slots__ = ['a', 'b']
def __init__(self,a,b):
self.a=a
self.b=b
def __repr__(self):
return "Edge(%d,%d)" % (self.a, self.b)
def __str__(self):
return "(%d -> %d)" % (self.a, self.b)
def __hash__(self):
return hash( (self.a,self.b) )
def __eq__(self,other):
return (self.a,self.b) == (other.a,other.b)
def getVertices(self):
return [a,b]
# Triangle (0,0) is facing down with center (0,2)
# - vertices are (-1,3) , (1,3) , (0,0)
# - adjacent triangles are (1,0) , (-1,0) , (0,1)
class Triangle:
__slots__ = ['i', 'j']
def __init__(self,i,j):
self.i=i
self.j=j
def __repr__(self):
return "Triangle(%d,%d)" % (self.i, self.j)
def __str__(self):
return self.__repr__()
def __hash__(self):
return hash( (self.i,self.j) )
def __eq__(self,other):
return (self.i,self.j) == (other.i,other.j)
def isDown(self):
return (self.i ^ self.j) % 2 == 0
def isUp(self):
return not self.isDown()
def getCenter(self):
return Point(self.i, 3*self.j + (2 if self.isDown() else 1))
def getVertices(self):
i = self.i
j3 = 3*self.j
if self.isDown():
return [ Point(i+1,j3+3 ), Point(i-1,j3+3), Point(i,j3) ]
else:
return [ Point(i-1,j3 ), Point(i+1,j3), Point(i,j3+3) ]
def getBase(self):
j3 = 3*self.j + (0 if self.isDown() else 3)
return (Point(self.i+1,j3), Point(self.i-1,j3))
def getEdges(self):
v = self.getVertices()
return [ Edge(v[i],v[(i+1)%3]) for i in range(3) ]
def getHex(self):
return Hex( (self.i+1)//3 , (self.j+1)//2 )
def getAdjacents(self):
return [
Triangle(self.i+1,self.j),
Triangle(self.i-1,self.j),
Triangle(self.i ,self.j + (1 if self.isDown() else -1) ) ]
# Hex (0,0) has center (0,0)
# Its triangles are
# - N ( 0, 0)
# - NW (-1, 0)
# - SW (-1,-1)
# - S ( 0,-1)
# - SE ( 1,-1)
# - NE ( 1, 0)
# Hex (0,1) is directly north of (0,0):
# - center is (0,6)
# Hex (1,0) is north east of (0,0) and south-east of (0,1):
# - center is (3,3)
class Hex:
__slots__ = ['i', 'j']
def __init__(self,i,j):
self.i=i
self.j=j
def __repr__(self):
return "Hex(%d,%d)" % (self.i, self.j)
def __str__(self):
return self.__repr__()
def __hash__(self):
return hash( (self.i,self.j) )
def __eq__(self,other):
return (self.i,self.j) == (other.i,other.j)
# NE: i + n, j + (n + (i%2) )//2
# SE: i + n, j - (n - (i%2) + 1)//2
# N : i , j + n
def path_NE_N(self,h):
n = h.i - self.i
m = self.j + (n+(self.i%2))//2 - h.j
return (n,m)
def path_SE_N(self,h):
n = h.i - self.i
m = self.j - (n+(self.i%2)+1)//2 - h.j
return (n,m)
def path_NE_SE(self,h):
m = h.j - self.j + (self.i-1)//2 - (h.i-1)//2
n = h.i - self.i + m
return (n,m)
def dist(self,h):
dnen = self.path_NE_N(h)
dsen = self.path_SE_N(h)
dnese= self.path_NE_SE(h)
return min( abs(dnen[0])+abs(dnen[1]),
abs(dsen[0])+abs(dsen[1]),
abs(dnese[0])+abs(dnese[1]) )
def _center(self):
return (3*self.i, 6*self.j + 3*(self.i%2))
def getCenter(self):
return Point(*self._center())
def getVertices(self):
xc,yc=self._center()
return [ Point(xc+dx,yc+dy)
for (dx,dy) in
[ (2,0), (1,3), (-1,3), (-2,0),(-1,-3), (1,-3)] ]
def getEdges(self):
v = self.getVertices()
return [ Edge(v[i],v[(i+1)%6]) for i in range(6) ]
def getTriangles(self):
ic = 3*self.i
jc = 2*self.j + (self.i % 2)
return [ Triangle(ic ,jc ),
Triangle(ic-1,jc ),
Triangle(ic-1,jc-1),
Triangle(ic ,jc-1),
Triangle(ic+1,jc-1),
Triangle(ic+1,jc ) ]
def getN(self):
return Hex(self.i,self.j+1)
def getS(self):
return Hex(self.i,self.j-1)
def getNE(self):
return Hex(self.i+1,self.j+(self.i%2))
def getNW(self):
return Hex(self.i-1,self.j+(self.i%2))
def getSE(self):
return Hex(self.i+1,self.j-1+(self.i%2))
def getSW(self):
return Hex(self.i-1,self.j-1+(self.i%2))
def getAdjacents(self):
return [ self.getN(), self.getNW(), self.getSW(),
self.getS(), self.getSE(), self.getNE() ]
def hexGrid(i0,irange,j0,jrange):
return [ Hex(i,j) for i in range(i0,irange) for j in range(j0,jrange) ]
def hexCircle(center,radius):
return [ h for h in hexGrid(center.i-radius,2*radius+1,center.j-radius,2*radius+1)
if center.dist(h) <= radius ]
|
[
"gaspard.ferey@inria.fr"
] |
gaspard.ferey@inria.fr
|
74367f4ca6450969099765912f745207351a2c9c
|
e44c21d65e13a976e16ccabe4eccd952adfdddac
|
/08/b.py
|
2803d919358ac113b72303320432e3b0c3b4cdfa
|
[] |
no_license
|
kscharlund/aoc_2020
|
f5295226543fe1afd5b0eb79f21cfafe65cfbf58
|
7500b7761de618c513c781b00ccb6c72fc597f2e
|
refs/heads/master
| 2023-02-08T00:08:56.752126
| 2020-12-26T08:45:05
| 2020-12-26T08:45:05
| 319,719,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
import sys
from pprint import pprint
def parse_op(line):
op, arg = line.strip().split()
return op, int(arg.replace('+', ''))
next_pc = {
'acc': lambda pc, arg: pc + 1,
'nop': lambda pc, arg: pc + 1,
'jmp': lambda pc, arg: pc + arg,
}
alt_next_pc = {
'acc': lambda pc, arg: pc + 1,
'jmp': lambda pc, arg: pc + 1,
'nop': lambda pc, arg: pc + arg,
}
def find_terminals(parents, child):
if child not in parents:
return {child}
terminals = set()
for parent in parents[child]:
terminals |= find_terminals(parents, parent)
return terminals
def main():
input_operations = [parse_op(line) for line in sys.stdin.readlines()]
for altered_index in range(len(input_operations)):
if input_operations[altered_index][0] == 'acc':
continue
operations = input_operations[:]
old_op, old_arg = input_operations[altered_index]
operations[altered_index] = ('jmp' if old_op == 'nop' else 'nop', old_arg)
next_pcs = [next_pc[op[0]](pc, op[1]) for pc, op in enumerate(operations)]
parents = {}
for parent, child in enumerate(next_pcs):
parents.setdefault(child, []).append(parent)
terminals = find_terminals(parents, len(operations))
if 0 in terminals:
print(altered_index, old_op, old_arg)
break
if __name__ == '__main__':
main()
|
[
"kalle@scharlund.se"
] |
kalle@scharlund.se
|
fec5dfca15a354781094c991dd2f486c90f6b869
|
f6f247c836c708969568506e70103e87dc20c584
|
/урок 1/stroki.py
|
b6f58c6da7d63c9e638850606b3e38d821eb9991
|
[] |
no_license
|
exel14/first_gitproject
|
4ad589023c7287e589ac19675a6589e0e0bfb09d
|
aca5687b092a5176516d0e839ce4cd3e13d41770
|
refs/heads/master
| 2022-12-31T13:35:00.612393
| 2020-10-01T12:13:26
| 2020-10-01T12:13:26
| 294,680,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
text = "fjdisjfisdfj Vasya fsijfoisd"
imya = 'Vasya'
if imya in text:
print('Yes')
else:
print('No')
|
[
"showmetheway220@gmail.com"
] |
showmetheway220@gmail.com
|
bb2e2e6db053a76895cf456bd9e0322b88fad9c1
|
68ac5bf4a7e4ad7478b7e1ac45b8540a14826402
|
/ergo/publishconf.py
|
e8df03c1c829ce52ba4ce5de454581c95c0a9ea0
|
[] |
no_license
|
doobeh/ergo
|
3833d8c5663c6a9d3aaac9904dff430eee00110b
|
ee075146d4cbb4eed2297d60436ea000af34812a
|
refs/heads/master
| 2020-05-30T10:41:47.420500
| 2015-10-09T15:17:59
| 2015-10-09T15:17:59
| 27,210,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
this_directory = os.path.dir
from pelicanconf import *
SITEURL = 'http://ergo.io'
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
|
[
"anthony@thefort.org"
] |
anthony@thefort.org
|
d5f34735f201edeb1130c4cb2a9efc396cbf184e
|
1ec8734beba25739979cbd4a9414a95273cce6aa
|
/10.9/移除元素.py
|
f3a3c26997d12fbc85a770412e56ce40c9f3a40b
|
[] |
no_license
|
MATATAxD/untitled1
|
4431e4bc504e74d9a96f54fd6065ce46d5d9de40
|
18463f88ce60036959aabedabf721e9d938bacfb
|
refs/heads/master
| 2023-01-01T23:16:30.140947
| 2020-10-23T04:32:38
| 2020-10-23T04:32:38
| 306,529,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
from typing import List
def removeElement(nums:List[int],val:int)->int:
fast = 0
slow = 0
while fast < len(nums):
if nums[fast]== val:
fast +=1
else:
nums[slow] = nums [fast]
slow +=1
fast +=1
return slow
a = [1,2,3,4,5,6]
print(removeElement(a,1))
|
[
"502513072@qq.com"
] |
502513072@qq.com
|
6fcc525132976c116ea70511282befacca492375
|
573a516233447c8384f26ed56ae4e356e3995153
|
/ques6.py
|
c06b87f3ab0dae128a898dd372ba780d807a5d97
|
[] |
no_license
|
BhagyashreeKarale/if-else
|
437b0867247f827c44f469a90efeecbf9444803d
|
1224fca2bdda389b22897f17b22f21320260e75f
|
refs/heads/main
| 2023-07-19T15:03:03.351825
| 2021-09-11T19:16:07
| 2021-09-11T19:16:07
| 397,150,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
# Question 6
# Draw a flowchart for this question and write the program.
# Take two numbers as input from the user in variables varx and vary.
# Check whether varx is divisible by vary.
# If yes, print Divisible else print Not Divisible.
varx=int(input("Enter dividend:\n"))
vary=int(input("Enter divisor:\n"))
if varx % vary == 0:
print(varx,"is completely divisible by",vary)
else:
print(varx,"isn't completely divisible by",vary)
|
[
"noreply@github.com"
] |
noreply@github.com
|
2e56f5cdcb6487d4631e61f2dd8ee8baa69b504c
|
0fb3b73f8e6bb9e931afe4dcfd5cdf4ba888d664
|
/Web-UI/scrapyproject/migrations/0010_auto_20170406_1835.py
|
28afbaa4d39615071f49bc6050e0d270de3e4686
|
[
"MIT"
] |
permissive
|
mrpal39/ev_code
|
6c56b1a4412503604260b3346a04ef53a2ba8bf2
|
ffa0cf482fa8604b2121957b7b1d68ba63b89522
|
refs/heads/master
| 2023-03-24T03:43:56.778039
| 2021-03-08T17:48:39
| 2021-03-08T17:48:39
| 345,743,264
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapyproject', '0009_auto_20170215_0657'),
]
operations = [
migrations.RemoveField(
model_name='mongopass',
name='user',
),
migrations.DeleteModel(
name='MongoPass',
),
]
|
[
"rp9545416@gmail.com"
] |
rp9545416@gmail.com
|
716b77deb7f8f935eada888a20f2b54d08a47dd3
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/services/services/hotel_group_view_service/client.py
|
b9c83f2d85aa2f2fdc21c12add578c9da910ff31
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 18,873
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import hotel_group_view
from google.ads.googleads.v9.services.types import hotel_group_view_service
from .transports.base import HotelGroupViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import HotelGroupViewServiceGrpcTransport
class HotelGroupViewServiceClientMeta(type):
"""Metaclass for the HotelGroupViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[HotelGroupViewServiceTransport]]
_transport_registry["grpc"] = HotelGroupViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[HotelGroupViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class HotelGroupViewServiceClient(metaclass=HotelGroupViewServiceClientMeta):
"""Service to manage Hotel Group Views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
HotelGroupViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
HotelGroupViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> HotelGroupViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
HotelGroupViewServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def hotel_group_view_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified hotel_group_view string."""
return "customers/{customer_id}/hotelGroupViews/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_hotel_group_view_path(path: str) -> Dict[str, str]:
"""Parse a hotel_group_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/hotelGroupViews/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, HotelGroupViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the hotel group view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.HotelGroupViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, HotelGroupViewServiceTransport):
# transport is a HotelGroupViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = HotelGroupViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_hotel_group_view(
self,
request: Union[
hotel_group_view_service.GetHotelGroupViewRequest, dict
] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> hotel_group_view.HotelGroupView:
r"""Returns the requested Hotel Group View in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetHotelGroupViewRequest, dict]):
The request object. Request message for
[HotelGroupViewService.GetHotelGroupView][google.ads.googleads.v9.services.HotelGroupViewService.GetHotelGroupView].
resource_name (:class:`str`):
Required. Resource name of the Hotel
Group View to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.HotelGroupView:
A hotel group view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a hotel_group_view_service.GetHotelGroupViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, hotel_group_view_service.GetHotelGroupViewRequest
):
request = hotel_group_view_service.GetHotelGroupViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_hotel_group_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("HotelGroupViewServiceClient",)
|
[
"noreply@github.com"
] |
noreply@github.com
|
0c1ccf1b2f6432152b0ac72f070dae2a20ff3d35
|
6e4dcc4ebbbf337d33981e1b72eb61232f796c66
|
/create_sql.py
|
1e802cb316795e49ff52e629d7566e0c3b0eef91
|
[] |
no_license
|
kelechiP/AgencyNet
|
ba9fe69292e4e19b920b4673ab4de84901ec16a3
|
4f9923b03129e9f3dd381a89fe9b7ffb413760c6
|
refs/heads/master
| 2021-03-31T11:41:53.930709
| 2020-05-24T00:18:35
| 2020-05-24T00:18:35
| 248,104,430
| 0
| 1
| null | 2020-05-24T00:18:36
| 2020-03-18T00:46:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,814
|
py
|
import json
import requests
def get_search():
g = input("Enter the name of Author: ")
print("Searching for the Author name ", g, " in the API Call")
return g
def get_request():
# Replace this with file access
response = requests.get(
"https://api.crossref.org/works?filter=has-full-text:true&mailto=GroovyBib@example.org")
items = response.json()["message"]["items"]
return items
# Pretty Printing JSON string back'
def jprint(obj):
text = json.dumps(obj, sort_keys=True, indent=4)
print(text)
def author_search(items, g):
author_store = []
found_author = False
# For each element of list
for item in items:
# item (not itemS) is a dict
# Check if authors exist
if "author" in item.keys():
for author in item["author"]:
for key in author:
search_item = author[key]
# if type(search_item) is not list and g.lower() in author[key].lower():
if type(search_item) is list:
continue
elif g.lower() in str(author[key]).lower():
found_author = True
print("Author found and")
print("Author Exists in given line--->>>",
key, ":", author[key])
author_store.append((key, author[key], item))
if not found_author:
print('Author name is NOT found in given API call')
return False
else:
return author_store
def author_save(author_store)
return True
def main():
g = get_search()
items = get_request()
author_search(items, g)
# Calling the main function which runs everything
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
aa4b45c03c37c440887f601fdd49a108f1479990
|
83c2cfd249e2e3c6fce223c9279e7d99e1596eda
|
/tf_gan.py
|
1631a07f29bd815bd6c822c169bd0e59f810ae1c
|
[
"Apache-2.0"
] |
permissive
|
hackthecrisis21/nist_differential_privacy_synthetic_data_challenge
|
ec5eff7102d65b3fc3406039eed80f587ef40062
|
09e93201e36e20f25bebd82cd68cd4c837789297
|
refs/heads/master
| 2022-02-16T19:31:44.739948
| 2019-05-30T00:55:32
| 2019-05-30T01:19:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,866
|
py
|
"""
Author: Moustafa Alzantot (malzantot@ucla.edu)
All rights reserved.
"""
import sys
import pdb
import math
import numpy as np
import data_utils
import pandas as pd
import json
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
import tensorflow as tf
from tensorflow.core.framework import summary_pb2
import time
from tensorflow.distributions import Bernoulli, Categorical
from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
from differential_privacy.dp_sgd.dp_optimizer import sanitizer
from differential_privacy.dp_sgd.dp_optimizer import utils
from differential_privacy.privacy_accountant.tf import accountant
flags = tf.app.flags
flags.DEFINE_string('input_file', 'input.csv', 'Input file')
flags.DEFINE_string('output_file', 'output.csv', 'output file')
flags.DEFINE_string('meta_file', 'metadata.json', 'metadata file')
flags.DEFINE_float('epsilon', 8.0, 'Target eps')
flags.DEFINE_float('delta', None, 'maximum delta')
# Training parameters
flags.DEFINE_integer('batch_size', 64, 'Batch size')
flags.DEFINE_float('lr', 1e-3, 'learning rate')
flags.DEFINE_integer('num_epochs', 20, 'Number of training epochs')
flags.DEFINE_integer(
'save_every', 1, 'Save training logs every how many epochs')
flags.DEFINE_float('weight_clip', 0.01, 'weight clipping value')
# Model parameters
flags.DEFINE_integer('z_size', 64, 'Size of input size')
flags.DEFINE_integer('hidden_dim', 1024, 'Size of hidden layer')
# Privacy parameters
flags.DEFINE_bool('with_privacy', False, 'Turn on/off differential privacy')
flags.DEFINE_float('gradient_l2norm_bound', 1.0, 'l2 norm clipping')
# Sampling and model restore
flags.DEFINE_integer('sampling_size', 100000, 'Number of examples to sample')
flags.DEFINE_string('checkpoint', None, 'Checkpoint to restore')
flags.DEFINE_bool('sample', False, 'Perform sampling')
flags.DEFINE_bool('dummy', False,
'If True, then test our model using dummy data ')
#########################################################################
# Utility functions for building the WGAN model
#########################################################################
def lrelu(x, alpha=0.01):
""" leaky relu activation function """
return tf.nn.leaky_relu(x, alpha)
def fully_connected(input_node, output_dim, activation=tf.nn.relu, scope='None'):
""" returns both the projection and output activation """
with tf.variable_scope(scope or 'FC'):
w = tf.get_variable('w', shape=[input_node.get_shape()[1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=0.1))
b = tf.get_variable('b', shape=[output_dim],
initializer=tf.constant_initializer())
tf.summary.histogram('w', w)
tf.summary.histogram('b', b)
z = tf.matmul(input_node, w) + b
h = activation(z)
return z, h
def critic_f(input_node, hidden_dim):
""" Defines the critic model architecture """
z1, h1 = fully_connected(input_node, hidden_dim, lrelu, scope='fc1')
# z2, h2 = fully_connected(h1, hidden_dim, lrelu, scope='fc2')
z3, _ = fully_connected(h1, 1, tf.identity, scope='fc3')
return z3
def generator(input_node, hidden_dim, output_dim):
""" Defines the generator model architecture """
z1, h1 = fully_connected(input_node, hidden_dim, lrelu, scope='fc1')
# z2, h2 = fully_connected(h1, hidden_dim, lrelu, scope='fc2')
z3, _ = fully_connected(h1, output_dim, tf.identity, scope='fc3')
return z3
def nist_data_format(output, metadata, columns_list, col_maps):
""" Output layer format for generator data """
with tf.name_scope('nist_format'):
output_list = []
cur_idx = 0
for k in columns_list:
v = col_maps[k]
if isinstance(v, dict):
if len(v) == 2:
output_list.append(tf.nn.sigmoid(
output[:, cur_idx:cur_idx+1]))
cur_idx += 1
else:
output_list.append(
tf.nn.softmax(output[:, cur_idx: cur_idx+len(v)]))
cur_idx += len(v)
elif v == 'int':
output_list.append(output[:, cur_idx:cur_idx+1])
cur_idx += 1
elif v == 'int_v':
output_list.append(tf.nn.sigmoid(output[:, cur_idx:cur_idx+1]))
output_list.append(output[:, cur_idx+1:cur_idx+2])
cur_idx += 2
elif v == 'void':
pass
else:
raise Exception('ivnalid mapping for col {}'.format(k))
return tf.concat(output_list, axis=1)
def nist_sampling_format(output, metadata, columns_list, col_maps):
"""
Output layer format for generator data plus performing random sampling
from the output softmax and bernoulli distributions.
"""
with tf.name_scope('nist_sampling_format'):
output_list = []
cur_idx = 0
for k in columns_list:
v = col_maps[k]
if isinstance(v, dict):
if len(v) == 2:
output_list.append(
tf.cast(
tf.expand_dims(
Bernoulli(logits=output[:, cur_idx]).sample(), axis=1), tf.float32)
)
cur_idx += 1
else:
output_list.append(
tf.cast(tf.expand_dims(
Categorical(logits=output[:, cur_idx: cur_idx+len(v)]).sample(), axis=1), tf.float32))
cur_idx += len(v)
elif v == 'int':
output_list.append(
tf.nn.relu(output[:, cur_idx:cur_idx+1]))
cur_idx += 1
elif v == 'int_v':
output_list.append(tf.nn.sigmoid(output[:, cur_idx:cur_idx+1]))
output_list.append(tf.nn.relu(output[:, cur_idx+1:cur_idx+2]))
cur_idx += 2
elif v == 'void':
pass
return tf.concat(output_list, axis=1)
def sample_dataset(sess, sampling_output, output_fname, columns_list, sampling_size):
""" Performs sampling to output synthetic data from the generative model.
Saves the result to output_fname file.
"""
sampling_result = []
num_samples = 0
while num_samples < sampling_size:
batch_samples = sess.run(sampling_output)
num_samples += batch_samples.shape[0]
sampling_result.append(batch_samples)
sampling_result = np.concatenate(sampling_result, axis=0)
print(sampling_result.shape)
final_df = data_utils.postprocess_data(
sampling_result, metadata, col_maps, columns_list, greedy=False)
print(final_df.shape)
final_df = pd.DataFrame(
data=final_df, columns=original_df.columns, index=None)
final_df.to_csv(output_fname, index=False)
if __name__ == '__main__':
FLAGS = flags.FLAGS
# Reading input data
original_df, input_data, metadata, col_maps, columns_list = data_utils.preprocess_nist_data(
FLAGS.input_file, FLAGS.meta_file, subsample=False)
input_data = input_data.values # .astype(np.float32)
data_dim = input_data.shape[1]
format_fun = nist_data_format
num_examples = input_data.shape[0]
print('** Reading input ** ')
print('-- Read {} rows, {} columns ----'.format(num_examples, data_dim))
batch_size = FLAGS.batch_size
print('Batch size = ', batch_size)
num_batches = math.ceil(num_examples / batch_size)
T = FLAGS.num_epochs * num_batches
q = float(FLAGS.batch_size) / num_examples
max_eps = FLAGS.epsilon
if FLAGS.delta is None:
max_delta = 1.0 / (num_examples**2)
else:
max_delta = FLAGS.delta
print('Privacy budget = ({}, {})'.format(max_eps, max_delta))
# Decide which accountanint_v to use
use_moments_accountant = max_eps > 0.7
if use_moments_accountant:
if max_eps > 5.0:
sigma = 1.0
else:
sigma = 3.0
eps_per_step = None # unused for moments accountant
delta_per_step = None # unused for moments accountant
print('Using moments accountant (\sigma = {})'.format(sigma))
else:
sigma = None # unused for amortized accountant
# bound of eps_per_step from lemma 2.3 in https://arxiv.org/pdf/1405.7085v2.pdf
eps_per_step = max_eps / (q * math.sqrt(2 * T * math.log(1/max_delta)))
delta_per_step = max_delta / (T * q)
print('Using amortized accountant (\eps, \delta)-per step = ({},{})'.format(
eps_per_step, delta_per_step))
with tf.name_scope('inputs'):
x_holder = tf.placeholder(tf.float32, [None, data_dim], 'x')
z_holder = tf.random_normal(shape=[FLAGS.batch_size, FLAGS.z_size],
dtype=tf.float32, name='z')
sampling_noise = tf.random_normal([FLAGS.batch_size, FLAGS.z_size],
dtype=tf.float32, name='sample_z')
eps_holder = tf.placeholder(tf.float32, [], 'eps')
delta_holder = tf.placeholder(tf.float32, [], 'delta')
print("Data Dimention: ", data_dim)
print("X Holder: ", x_holder)
print("Z Holder: ", z_holder)
with tf.variable_scope('generator') as scope:
gen_output = generator(z_holder, FLAGS.hidden_dim, data_dim)
print(gen_output)
gen_output = format_fun(gen_output, metadata, columns_list, col_maps)
print(gen_output)
scope.reuse_variables()
sampling_output = generator(sampling_noise, FLAGS.hidden_dim, data_dim)
sampling_output = nist_sampling_format(
sampling_output, metadata, columns_list, col_maps)
print(sampling_output)
with tf.variable_scope('critic') as scope:
critic_real = critic_f(x_holder, FLAGS.hidden_dim)
scope.reuse_variables()
critic_fake = critic_f(gen_output, FLAGS.hidden_dim)
with tf.name_scope('train'):
global_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name='global_step')
loss_critic_real = - tf.reduce_mean(critic_real)
loss_critic_fake = tf.reduce_mean(critic_fake)
loss_critic = loss_critic_real + loss_critic_fake
critic_vars = [x for x in tf.trainable_variables()
if x.name.startswith('critic')]
if FLAGS.with_privacy:
# assert FLAGS.sigma > 0, 'Sigma has to be positive when with_privacy=True'
with tf.name_scope('privacy_accountant'):
if use_moments_accountant:
# Moments accountant introduced in (https://arxiv.org/abs/1607.00133)
# we use same implementation of
# https://github.com/tensorflow/models/blob/master/research/differential_privacy/privacy_accountant/tf/accountant.py
priv_accountant = accountant.GaussianMomentsAccountant(
num_examples)
else:
# AmortizedAccountant which tracks the privacy spending in the amortized way.
# It uses privacy amplication via sampling to compute the privacyspending for each
# batch and strong composition (specialized for Gaussian noise) for
# accumulate the privacy spending (http://arxiv.org/pdf/1405.7085v2.pdf)
# we use the implementation of
# https://github.com/tensorflow/models/blob/master/research/differential_privacy/privacy_accountant/tf/accountant.py
priv_accountant = accountant.AmortizedAccountant(
num_examples)
# per-example Gradient l_2 norm bound.
example_gradient_l2norm_bound = FLAGS.gradient_l2norm_bound / FLAGS.batch_size
# Gaussian sanitizer, will enforce differential privacy by clipping the gradient-per-example.
# Add gaussian noise, and sum the noisy gradients at each weight update step.
# It will also notify the privacy accountant to update the privacy spending.
gaussian_sanitizer = sanitizer.AmortizedGaussianSanitizer(
priv_accountant,
[example_gradient_l2norm_bound, True])
critic_step = dp_optimizer.DPGradientDescentOptimizer(
FLAGS.lr,
# (eps, delta) unused parameters for the moments accountant which we are using
[eps_holder, delta_holder],
gaussian_sanitizer,
sigma=sigma,
batches_per_lot=1,
var_list=critic_vars).minimize((loss_critic_real, loss_critic_fake),
global_step=global_step, var_list=critic_vars)
else:
# This is used when we train without privacy.
critic_step = tf.train.RMSPropOptimizer(FLAGS.lr).minimize(
loss_critic, var_list=critic_vars)
# Weight clipping to ensure the critic function is K-Lipschitz as required
# for WGAN training.
clip_c = [tf.assign(var, tf.clip_by_value(
var, -FLAGS.weight_clip, FLAGS.weight_clip)) for var in critic_vars]
with tf.control_dependencies([critic_step]):
critic_step = tf.tuple(clip_c)
# Traing step of generator
generator_vars = [x for x in tf.trainable_variables()
if x.name.startswith('generator')]
loss_generator = -tf.reduce_mean(critic_fake)
generator_step = tf.train.RMSPropOptimizer(FLAGS.lr).minimize(
loss_generator, var_list=generator_vars)
weight_summaries = tf.summary.merge_all()
tb_c_op = tf.summary.scalar('critic_loss', loss_critic)
tb_g_op = tf.summary.scalar('generator_loss', loss_generator)
final_eps = 0.0
final_delta = 0.0
critic_iters = 10
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter('./logs', sess.graph)
summary_writer.flush()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if FLAGS.checkpoint:
# Load the model
saver.restore(sess, FLAGS.checkpoint)
if FLAGS.sample:
sample_dataset(sess, sampling_output,
FLAGS.output_file, columns_list, FLAGS.sampling_size)
assert FLAGS.checkpoint is not None, "You must provide a checkpoint."
sys.exit(0)
abort_early = False # Flag that will be changed to True if we exceed the privacy budget
for e in range(FLAGS.num_epochs):
if abort_early:
break
# One epoch is one full pass over the whole training data
start_time = time.time()
# Randomly shuffle the data at the beginning of each epoch
rand_idxs = np.arange(num_examples)
np.random.shuffle(rand_idxs)
idx = 0
abort_early = False
while idx < num_batches and not abort_early:
if idx % 10 == 0:
sys.stdout.write('\r{}/{}'.format(idx, num_batches))
sys.stdout.flush()
critic_i = 0
while critic_i < critic_iters and idx < num_batches and not abort_early:
# Train the critic.
batch_idxs = rand_idxs[idx*batch_size: (idx+1)*batch_size]
batch_xs = input_data[batch_idxs, :]
feed_dict = {x_holder: batch_xs,
eps_holder: eps_per_step,
delta_holder: delta_per_step
}
_, tb_c = sess.run(
[critic_step, tb_c_op], feed_dict=feed_dict)
critic_i += 1
idx += 1
if FLAGS.with_privacy:
if use_moments_accountant:
spent_eps_deltas = priv_accountant.get_privacy_spent(
sess, target_deltas=[max_delta])[0]
else:
spent_eps_deltas = priv_accountant.get_privacy_spent(
sess, target_eps=None)[0]
# Check whether we exceed the privacy budget
if (spent_eps_deltas.spent_delta > max_delta or
spent_eps_deltas.spent_eps > max_eps):
abort_early = True
print(
"\n*** Discriminator training exceeded privacy budget, aborting the training of generator ****")
else:
final_eps = spent_eps_deltas.spent_eps
final_delta = spent_eps_deltas.spent_delta
else:
# Training without privacy
spent_eps_deltas = accountant.EpsDelta(np.inf, 1)
# Train the generator
if not abort_early:
# Check for abort_early because we stop updating the generator
# once we exceeded privacy budget.
privacy_summary = summary_pb2.Summary(value=[
summary_pb2.Summary.Value(tag='eps',
simple_value=final_eps)])
summary_writer.add_summary(privacy_summary, e)
_, tb_g = sess.run([generator_step, tb_g_op])
if e % FLAGS.save_every == 0 or (e == FLAGS.num_epochs-1):
summary_writer.add_summary(tb_g, e)
end_time = time.time()
if (e % FLAGS.save_every == 0) or (e == FLAGS.num_epochs-1) or abort_early:
summary_writer.add_summary(tb_c, e)
weight_summary_out = sess.run(
weight_summaries, feed_dict=feed_dict)
summary_writer.add_summary(weight_summary_out, e)
print('\nEpoch {} took {} seconds. Privacy = ({}, {}).'.format(
e, (end_time-start_time), spent_eps_deltas.spent_eps, spent_eps_deltas.spent_delta))
summary_writer.flush()
if FLAGS.with_privacy:
print('\nTotal (\eps, \delta) privacy loss spent in training = ({}, {})'.format(
final_eps, final_delta))
summary_writer.close()
# Sample synthetic data from the model after training is done.
sample_dataset(sess, sampling_output,
FLAGS.output_file, columns_list, FLAGS.sampling_size)
|
[
"malzantot@ucla.edu"
] |
malzantot@ucla.edu
|
e8af1aab3d551f098082a72a6eb666b0423f557d
|
21514017d4208e9a20e9e625b146e5c0b51bce68
|
/app/blog/migrations/0001_initial.py
|
2f3d3a3830c412aa62800dd9a885a1ca583f3aae
|
[
"MIT"
] |
permissive
|
Thewest123/blog-app-api
|
d419152557dd10c1d37df7281bf0b09be875aeab
|
c4654318100ec65995df1c6775390c32dda15307
|
refs/heads/main
| 2023-07-01T13:25:28.294979
| 2021-08-06T10:06:26
| 2021-08-06T10:06:26
| 392,237,230
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,985
|
py
|
# Generated by Django 3.2.6 on 2021-08-03 10:39
import blog.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('slug', models.SlugField()),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('slug', models.SlugField()),
],
),
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=80)),
('slug', models.SlugField()),
('content', models.TextField()),
('image', models.ImageField(null=True, upload_to=blog.models.blog_image_file_path)),
('is_for_logged_users_only', models.BooleanField(default=False, help_text='When selected, only logged-in users can view this post or see it in posts list')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='blog.category')),
('tags', models.ManyToManyField(to='blog.Tag')),
],
),
]
|
[
"info@jancerny.dev"
] |
info@jancerny.dev
|
d3813671c7b96dd94e66342390d4574c412700a3
|
ef32b87973a8dc08ba46bf03c5601548675de649
|
/pytglib/api/functions/search_user_by_phone_number.py
|
218f9710f017f0467ab39dc7429e7841c3300db5
|
[
"MIT"
] |
permissive
|
iTeam-co/pytglib
|
1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721
|
d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5
|
refs/heads/master
| 2022-07-26T09:17:08.622398
| 2022-07-14T11:24:22
| 2022-07-14T11:24:22
| 178,060,880
| 10
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
from ..utils import Object
class SearchUserByPhoneNumber(Object):
"""
Searches a user by their phone number. Returns a 404 error if the user can't be found
Attributes:
ID (:obj:`str`): ``SearchUserByPhoneNumber``
Args:
phone_number (:obj:`str`):
Phone number to search for
Returns:
User
Raises:
:class:`telegram.Error`
"""
ID = "searchUserByPhoneNumber"
def __init__(self, phone_number, extra=None, **kwargs):
self.extra = extra
self.phone_number = phone_number # str
@staticmethod
def read(q: dict, *args) -> "SearchUserByPhoneNumber":
phone_number = q.get('phone_number')
return SearchUserByPhoneNumber(phone_number)
|
[
"arshshia@gmail.com"
] |
arshshia@gmail.com
|
1c23cd9bec50756f3a2bea2745a173ac45cdc882
|
c278b06f77cac0a2942fa2ca0636f2dc72b52505
|
/4.blog_project/mydjangoproject/blog/migrations/0004_auto_20190320_0504.py
|
f8a4d492ef89b65190cfc991db5c4e1a9cab6c16
|
[] |
no_license
|
hooong/Django_study
|
2d27bc7d5b2ad53fa4c9e1bcd808437af98cbe09
|
b760ace8f562d538ad18d552388e48ed52cc78d1
|
refs/heads/master
| 2022-12-02T15:51:24.510596
| 2019-11-02T07:38:37
| 2019-11-02T07:38:37
| 165,012,883
| 5
| 1
| null | 2022-11-22T03:26:18
| 2019-01-10T07:35:07
|
Python
|
UTF-8
|
Python
| false
| false
| 318
|
py
|
# Generated by Django 2.1.5 on 2019-03-20 05:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blog_blog_hit'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={},
),
]
|
[
"tjrwns0529@gmail.com"
] |
tjrwns0529@gmail.com
|
3ddc20aebdc144d9693019af06524d5ea4513712
|
78a28bd6b95041bfe67d8aa6a3a3c111911afaab
|
/18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/news_scraper/news_scraper/settings.py
|
dec217105fcd124cbb665b4076642b6d93bf5eb9
|
[
"MIT"
] |
permissive
|
jailukanna/Python-Projects-Dojo
|
8200a60ab925bf796bd39cb1977e6f0e0a575c23
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
refs/heads/master
| 2023-03-15T06:54:38.141189
| 2021-03-11T08:17:02
| 2021-03-11T08:17:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,212
|
py
|
# Scrapy settings for news_scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'news_scraper'
SPIDER_MODULES = ['news_scraper.spiders']
NEWSPIDER_MODULE = 'news_scraper.spiders'
CLOSESPIDER_PAGECOUNT = 10
FEED_URI = 'news_articles.json'
FEED_FORMAT = 'json'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'news_scraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'news_scraper.middlewares.NewsScraperSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'news_scraper.middlewares.NewsScraperDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'news_scraper.pipelines.NewsScraperPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"ptyadana@users.noreply.github.com"
] |
ptyadana@users.noreply.github.com
|
d9dcdca62c24190cf42aa047155d88fcee2cd81a
|
775264659445173665b153cfdc450071845939e8
|
/repos/models.py
|
257c46155a938a911e083ce14728e8c09b3e7d40
|
[] |
no_license
|
ovokpus/flask-app
|
e19abba21413956408be8f11e99eda936de199b5
|
a89202ac95a9ecc37fbf1297aa0dee1b070f2dbd
|
refs/heads/main
| 2023-06-04T07:15:19.206402
| 2021-07-01T15:25:34
| 2021-07-01T15:25:34
| 382,059,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
class GitHubRepo:
def __init__(self, name, language, num_stars):
self.name = name
self.language = language
self.num_stars = num_stars
def __str__(self):
return f"-> {self.name} is a {self.language} repo with {self.num_stars} stars."
def __repr__(self):
return f"GitHubRepo({self.name}, {self.language}, {self.num_stars})"
|
[
"ovo@live.ca"
] |
ovo@live.ca
|
81d70837b62ed7c9dbad2ad8927c5d723e1d4953
|
63e2bed7329c79bf67279f9071194c9cba88a82c
|
/SevOneApi/python-client/test/test_performance_metrics_settings.py
|
5471c91fa1f10ac623252fd1733b391f5e288962
|
[] |
no_license
|
jsthomason/LearningPython
|
12422b969dbef89578ed326852dd65f65ab77496
|
2f71223250b6a198f2736bcb1b8681c51aa12c03
|
refs/heads/master
| 2021-01-21T01:05:46.208994
| 2019-06-27T13:40:37
| 2019-06-27T13:40:37
| 63,447,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
# coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.performance_metrics_settings import PerformanceMetricsSettings # noqa: E501
from swagger_client.rest import ApiException
class TestPerformanceMetricsSettings(unittest.TestCase):
"""PerformanceMetricsSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPerformanceMetricsSettings(self):
"""Test PerformanceMetricsSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.performance_metrics_settings.PerformanceMetricsSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"johnsthomason@gmail.com"
] |
johnsthomason@gmail.com
|
fda8ec9ce60357ce957ed4b4aab4f9a27ee7f128
|
5e213c666164548b338a519f59d8d2cb733ef289
|
/Tests/con_vaccini_test/epiMOX_new_model/epi/parameters_const.py
|
af1f3d17204a6c355a1f9977c78733303bd36781
|
[] |
no_license
|
giovanniziarelli/epiMOX_SUIHTER
|
73fb0fd556f9c7c07d4c8d7d348cc9e206d1849b
|
aeaf25674ebc33ef824512fd67de471c7522868a
|
refs/heads/main
| 2023-09-04T18:11:34.193844
| 2021-11-16T14:55:53
| 2021-11-16T14:55:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,736
|
py
|
# Definition of the parameters of the models
# At the moment parameters depend on space and time
import numpy as np
from scipy.special import erfc
import scipy.interpolate as si
from lmfit import Model
import functools
import datetime
import pandas as pd
# Mask function
def maskParams(params,m_mask):
m_mask = np.invert(m_mask)
return(np.ma.compressed( np.ma.masked_array( params, mask=m_mask) ))
def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
""" an alternative exponentially modified Gaussian."""
dx = center-x
return amplitude* np.exp(gamma*dx) * erfc( dx/(np.sqrt(2)*sigma))
def EMGextrapol(x,y):
model = Model(expgaussian)
params = model.make_params(sigma=10, gamma=0.01, amplitude=y.max(), center=y.argmax())
result = model.fit(y, params, x=x, nan_policy='propagate')
return result
# Utility for reading a section
def readSection(content,section,values):
counter = 0
data = np.zeros(values)
sec_content = []
found = False
if values == 0:
return()
# extract from section
for line in content:
if line.startswith(section) or found:
found = True
sec_content.append(line)
else:
pass
for line in sec_content:
if line.startswith(section):
pass
elif line.startswith(b'#'):
pass
elif not line:
pass
else:
tokens = line.split()
for v in tokens:
data[counter] = float(v)
counter = counter + 1
if counter == values:
return data
return
def readTimes(content,section,values):
counter = 0
data = []
sec_content = []
found = False
if values == 0:
return()
# extract from section
for line in content:
if line.startswith(section) or found:
found = True
sec_content.append(line)
else:
pass
for line in sec_content:
if line.startswith(section):
pass
elif line.startswith(b'#'):
pass
elif not line:
pass
else:
data.append(datetime.date.fromisoformat(line.decode("utf-8").replace('\n','').replace('\n','')))
counter = counter + 1
if counter == values:
return data
return
# Params class
class Params():
def __init__(self,dataStart):
self.nParams = 0
self.nSites = 0
self.nPhases = 0
self.estimated = False
self.times = np.zeros(0)
self.dataStart = dataStart
self.dataEnd = 0
self.degree = 0
self.extrapolator = []
self.scenario = np.zeros((0,2))
self.constant = np.zeros(0)
self.constantSites = np.zeros(0)
self.params = np.zeros((0, 0))
self.params_time = np.zeros((0,0))
self.mask = np.zeros((0, 0))
self.lower_bounds = np.zeros((0, 0))
self.upper_bounds = np.zeros((0, 0))
def omegaI_vaccines(self,t): return 1
def gammaT_vaccines(self,t): return 1
def gammaH_vaccines(self,t): return 1
def get(self):
return(self.params)
def getMask(self):
return( np.array(self.mask, dtype=bool) )
def getConstant(self):
return( np.array(self.constant, dtype=bool) )
def getConstantSites(self):
return( np.array(self.constantSites, dtype=bool) )
def getLowerBounds(self):
return(self.lower_bounds)
def getUpperBounds(self):
return(self.upper_bounds)
def save(self,paramFileName):
if paramFileName.lower().endswith(('.csv', '.txt')):
self.__saveCsv__(paramFileName)
elif paramFileName.lower().endswith('.npy'):
self.__saveNpy__(paramFileName)
return()
def load(self,paramFileName):
if paramFileName.lower().endswith(('.csv', '.txt')):
self.__loadCsv__(paramFileName)
elif paramFileName.lower().endswith('.npy'):
self.__loadNpy__(paramFileName)
return()
def define_params_time(self, Tf):
self.params_time = np.zeros((Tf+1,self.nParams,self.nSites)).squeeze()
def compute_param_over_time(self,Tf):
times = np.arange(0,Tf+1,).astype(int)
for i,t in enumerate(times):
self.params_time[i,self.getMask()[0]] = self.atTime(t)[self.getMask()[0]]
#self.params_time[i] = self.atTime(t)
def addPhase(self,ndays):
self.nPhases += 1
self.times = np.append(self.times,ndays)
self.params = np.append(self.params,[self.params[-1]],axis=0)
self.mask = np.append(self.mask,[self.mask[-1]],axis=0)
self.lower_bounds = np.append(self.lower_bounds,[self.lower_bounds[-1]],axis=0)
self.upper_bounds = np.append(self.upper_bounds,[self.upper_bounds[-1]],axis=0)
def getPhase(self,p,t):
if self.constant[p]:
phase = 0
else:
phase = self.nPhases-1
for i, interval in enumerate(self.times):
if ( t <= interval ):
phase = i
break
return (phase)
def atTime(self,t):
params_time = np.zeros((self.nParams,self.nSites)).squeeze()
transient = 3
if self.nSites==1:
if self.dataEnd>0 and t>self.dataEnd:
m = 1
if len(self.scenario) > 0:
d,s = self.scenario.transpose()
i = np.searchsorted(d,t,side='right')-1
if i>=0:
if len(d)==1:
for q in range(self.nParams):
if i==0 and q==0 and (t-d[0])<=4:
transient = 4
params_time[0] = self.params[-1, 0] * (1 - (t - d[0]) / transient) + \
self.params[-1, 0] * s[0] * (t - d[0]) / transient
#elif q==3:
# params_time[q] = np.maximum(self.scenario_extrapolator[q](t)*self.omegaI_vaccines(t), 0)
elif q==9:
params_time[q] = np.maximum(self.scenario_extrapolator[q](t)*self.gammaT_vaccines(t), 0)
elif q==10:
params_time[q] = np.maximum(self.scenario_extrapolator[q](t)*self.gammaH_vaccines(t), 0)
else:
params_time[q] = np.maximum(self.scenario_extrapolator[q](t), 0)
return params_time
else:
t = d[0] - 1
m = s[i]
#if len(d)==1:
# for q in range(self.nParams):
# params_time[q] = np.maximum(self.scenario_extrapolator[q](t), 0)
# return params_time
params_time = np.array(self.params[-1])
if type(self.degree)==int:
for q in range(self.nParams):
if q==0:
params_time[q] = np.maximum(self.extrapolator[q](t) * m,0)
elif q==3:
params_time[q] = np.maximum(self.extrapolator[q](t)*self.omegaI_vaccines(t), 0)
elif q==9:
params_time[q] = np.maximum(self.extrapolator[q](t)*self.gammaT_vaccines(t), 0)
elif q==10:
params_time[q] = np.maximum(self.extrapolator[q](t)*self.gammaH_vaccines(t), 0)
else:
params_time[q] = np.maximum(self.extrapolator[q](t),0)
else:
params_time[0] = self.extrapolator(x=t) * m
params_time[3] *= self.omegaI_vaccines(t)
params_time[9] *= self.gammaT_vaccines(t)
params_time[10] *= self.gammaH_vaccines(t)
else:
for p in range(self.nParams):
phase = self.getPhase(p,t)
phasetime = self.times[phase - 1]
if (t > phasetime + transient) or (phase == 0) or (abs(t-self.dataEnd)<6):
params_time[p] = self.params[phase,p]
else:
params_time[p] = self.params[ phase-1 , p ]*(1-(t-phasetime)/transient)+self.params[ phase , p ]*(t-phasetime)/transient
if p==9:
params_time[p] *= self.gammaT_vaccines(t)
elif p==10:
params_time[p] *= self.gammaH_vaccines(t)
else:
if self.dataEnd>0 and t>self.dataEnd:
for p in range(self.nSites):
m = 1
if len(self.scenario) > 0:
d,s = self.scenario[p].transpose()
i = np.searchsorted(d,t,side='right')-1
if i>=0:
if len(d) == 1:
for q in range(self.nParams):
params_time[q,p] = np.maximum(self.scenario_extrapolator[p][q](t), 0)
return params_time
else:
t = d[0] - 1
m = s[i]
params_time[:,p] = np.array(self.params[-1,:,p])
if type(self.degree)==int:
for q in range(self.nParams):
if q==0:
params_time[q,p] = np.maximum(self.extrapolator[p][q](t) * m,0)
else:
params_time[q,p] = np.maximum(self.extrapolator[p][q](t),0)
else:
params_time[0,p] = self.extrapolator[p](x=t) * m
else:
for p in range(self.nParams):
if self.constantSites[p]:
phase = self.getPhase(p, t)
phasetime = self.times[phase - 1]
if (t > phasetime + transient) or phase == 0 or (abs(t-self.dataEnd)<6):
params_time[p,:] = self.params[phase, p, 0]
else:
params_time[p,:] = self.params[phase - 1, p, 0] * (1 - (t - phasetime) / transient) + self.params[phase, p, 0] * (t - phasetime) / transient
else:
for s in range(self.nSites):
phase = self.getPhase(p, t)
phasetime = self.times[phase - 1]
if (t > phasetime + transient) or phase == 0 or (abs(t-self.dataEnd)<6):
params_time[p,s] = self.params[phase, p, s]
else:
params_time[p,s] = self.params[phase - 1, p, s] * (1 - (t - phasetime) / transient) + self.params[phase, p, s] * (t - phasetime) / transient
return params_time
def atPhase(self,i):
return(self.params[ i , ...])
def atSite(self,i): # works only if more than 1 site
if self.nSites > 1:
return(self.params[ ... , i ])
return ()
def forecast(self, DPC_time, Tf, deg, scenarios=None):
if DPC_time>=Tf:
return ()
self.degree = deg
self.dataEnd = DPC_time
tmp_times = np.concatenate(([0],self.times,[self.dataEnd]))
if self.nSites == 1:
if type(self.degree)==int:
x = tmp_times[-(deg+1):]
self.extrapolator = []
for q in range(self.nParams):
y = self.get()[-(deg+1):,q]
self.extrapolator.append(np.poly1d(np.polyfit(x,y,self.degree)))
elif self.degree == 'exp':
x = tmp_times[1:]
y = self.get()[:,0]
EMG = EMGextrapol(x,y)
self.extrapolator = functools.partial(EMG.eval,**EMG.best_values)
else:
self.extrapolator = []
if type(self.degree)==int:
for p in range(self.nSites):
tmp_extrapolator = []
x = tmp_times[-(deg+1):]
for q in range(self.nParams):
y = self.get()[-(deg+1):,q,p]
tmp_extrapolator.append(np.poly1d(np.polyfit(x,y,self.degree)))
self.extrapolator.append(tmp_extrapolator)
elif self.degree == 'exp':
x = tmp_times[1:]
for p in range(self.nSites):
y = self.get()[:,0,p]
EMG = EMGextrapol(x,y)
self.extrapolator.append(functools.partial(EMG.eval,**EMG.best_values))
if scenarios is not None:
self.scenario = scenarios
if self.nSites != 1:
if len(scenarios.shape) == 2:
self.scenario = np.tile(self.scenario,(self.nSites,1,1))
return ()
def extrapolate_scenario(self):
if self.nSites == 1:
if self.scenario.shape[0] != 1:
return()
d,s = self.scenario.transpose()
tmp_times = np.concatenate(([0],self.times,[self.dataEnd],d))
if type(self.degree)==int:
x = tmp_times[-(self.degree+1):]
#x = tmp_times[-1:]
self.scenario_extrapolator = []
for q in range(self.nParams):
if q==0:
y = np.concatenate((self.get()[:,q],self.extrapolator[q](d-1)*s))
else:
y = np.concatenate((self.get()[:,q],self.extrapolator[q](d)))
self.scenario_extrapolator.append(np.poly1d(np.polyfit(x,y[-(self.degree+1):],self.degree)))
#self.scenario_extrapolator.append(np.poly1d(np.polyfit(x,y[-1:],0)))
else:
if self.scenario.shape[1] != 1:
return()
self.scenario_extrapolator = []
for p in range(self.nSites):
d,s = self.scenario[p].transpose()
tmp_times = np.concatenate(([0],self.times,[self.dataEnd],d))
if type(self.degree)==int:
x = tmp_times[-(self.degree+1):]
tmp_scenario_extrapolator = []
for q in range(self.nParams):
if q==0:
y = np.concatenate((self.get()[:,q,p],self.extrapolator[p][q](d-1)*s))
else:
y = np.concatenate((self.get()[:,q,p],self.extrapolator[p][q](d)))
tmp_scenario_extrapolator.append(np.poly1d(np.polyfit(x,y[-(self.degree+1):],self.degree)))
self.scenario_extrapolator.append(tmp_scenario_extrapolator)
return ()
# def vaccines_effect_omega(self):
# age_data = pd.read_csv('https://raw.githubusercontent.com/giovanniardenghi/dpc-covid-data/main/SUIHTER/stato_clinico.csv')
# vaccines = pd.read_csv('https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/master/dati/somministrazioni-vaccini-latest.csv')
# age_groups = {'0-9': '0-19',
# '10-19': '0-19',
# '20-29': '20-39',
# '30-39': '20-39',
# '40-49': '40-59',
# '50-59': '40-59',
# '60-69': '60-79',
# '70-79': '60-79',
# '80-89': '80-89',
# '>90': '90+'}
# vaccines['data_somministrazione'] = pd.to_datetime(vaccines.data_somministrazione)
# vaccines[
# vaccines = vaccines.groupby(['data_somministrazione',age_groups],level=[0,1]).sum()
# print(vaccines)
# age_data['Data'] = pd.to_datetime(age_data.Data)
# age_data = age_data[age_data['Data']>=pd.to_datetime(self.dataStart)]
# age_data = age_data[age_data['Data']<=pd.to_datetime(self.dataStart)+pd.Timedelta(self.dataEnd,'days')]
# ages_dfs = [x[['Data','Isolated','Hospitalized']].set_index('Data') for ages,x in age_data.groupby('Età')]
# f_I = [si.interp1d(range(len(x)),x.Isolated.rolling(window=7,min_periods=1,center=True).mean(),fill_value="extrapolate") for x in ages_dfs]
# f_H = [si.interp1d(range(len(x)),x.Hospitalized.rolling(window=7,min_periods=1,center=True).mean(),fill_value="extrapolate") for x in ages_dfs]
# ages_dfs = [x.reset_index(drop=True) for x in ages_dfs]
# medie = pd.DataFrame(columns=['Isolated','Hospitalized'])
# for i,x in enumerate(ages_dfs):
# medie = medie.append(x[int(self.times[-1])+1:].mean(),ignore_index=True)
# def omegaI_reduction(t):
# multiplier=0
# for i,x in enumerate(ages_dfs):
# multiplier += np.clip(f_H[i](t),0,1)**2/np.clip(f_I[i](t-5),0,1)
# return multiplier/np.sum(medie.Hospitalized.values**2/medie.Isolated.values)
# self.omegaI_vaccines = omegaI_reduction
def vaccines_effect_omega(self):
age_data = pd.read_csv('https://raw.githubusercontent.com/giovanniardenghi/dpc-covid-data/main/SUIHTER/stato_clinico.csv')
age_data['Data'] = pd.to_datetime(age_data.Data)
age_data = age_data[age_data['Data']>=pd.to_datetime(self.dataStart)]
age_data = age_data[age_data['Data']<=pd.to_datetime(self.dataStart)+pd.Timedelta(self.dataEnd,'days')]
ages_dfs = [x[['Data','Isolated','Hospitalized']].set_index('Data') for ages,x in age_data.groupby('Età')]
f_I = [si.interp1d(range(len(x)),x.Isolated.rolling(window=7,min_periods=1,center=True).mean(),fill_value="extrapolate") for x in ages_dfs]
f_H = [si.interp1d(range(len(x)),x.Hospitalized.rolling(window=7,min_periods=1,center=True).mean(),fill_value="extrapolate") for x in ages_dfs]
ages_dfs = [x.reset_index(drop=True) for x in ages_dfs]
medie = pd.DataFrame(columns=['Isolated','Hospitalized'])
for i,x in enumerate(ages_dfs):
medie = medie.append(x[int(self.times[-1])+1:].mean(),ignore_index=True)
def omegaI_reduction(t):
multiplier=0
for i,x in enumerate(ages_dfs):
multiplier += np.clip(f_H[i](t),0,1)**2/np.clip(f_I[i](t-5),0,1)
return multiplier/np.sum(medie.Hospitalized.values**2/medie.Isolated.values)
self.omegaI_vaccines = omegaI_reduction
def vaccines_effect_gammaT(self):
age_data = pd.read_csv('https://raw.githubusercontent.com/giovanniardenghi/dpc-covid-data/main/SUIHTER/stato_clinico.csv')
age_data['Data'] = pd.to_datetime(age_data.Data)
age_data = age_data[age_data['Data']>=pd.to_datetime(self.dataStart)]
age_data = age_data[age_data['Data']<=pd.to_datetime(self.dataStart)+pd.Timedelta(self.dataEnd,'days')]
ages_dfs = [x[['Data','Threatened','Extinct','Daily_extinct']].set_index('Data') for ages,x in age_data.groupby('Età')]
f_T = [si.interp1d(range(len(x)),x.Threatened.rolling(window=7,min_periods=1,center=True).mean(),kind='nearest',fill_value="extrapolate") for x in ages_dfs]
f_dE = [si.interp1d(range(len(x)),x.Daily_extinct.rolling(window=14,min_periods=1,center=True).mean(),kind='nearest',fill_value="extrapolate") for x in ages_dfs]
f_E = [si.interp1d(range(len(x)),x.Extinct.rolling(window=7,min_periods=1,center=True).mean(),kind='nearest',fill_value="extrapolate") for x in ages_dfs]
ages_dfs = [x.reset_index(drop=True) for x in ages_dfs]
medie = pd.DataFrame(columns=['Threatened','Extinct','Daily_etinct'])
for i,x in enumerate(ages_dfs):
medie = medie.append(x[:int(self.times[6])].mean(),ignore_index=True)
global gammaT_reduction
def gammaT_reduction(t):
multiplier=0
for i,x in enumerate(ages_dfs):
#multiplier += np.clip(f_E[i](t),0,1)*np.clip(f_dE[i](t),0,1)/np.clip(f_T[i](t-5),1e-5,1)
multiplier += np.clip(f_dE[i](t),0,1)*medie.iloc[i].Daily_extinct/medie.iloc[i].Threatened
return np.max(multiplier/np.sum(medie.Extinct.values*medie.Daily_extinct.values/medie.Threatened),0)
self.gammaT_vaccines = gammaT_reduction
def vaccines_effect_gammaH(self):
age_data = pd.read_csv('https://raw.githubusercontent.com/giovanniardenghi/dpc-covid-data/main/SUIHTER/stato_clinico.csv')
age_data['Data'] = pd.to_datetime(age_data.Data)
age_data = age_data[age_data['Data']>=pd.to_datetime(self.dataStart)]
age_data = age_data[age_data['Data']<=pd.to_datetime(self.dataStart)+pd.Timedelta(self.dataEnd,'days')]
ages_dfs = [x[['Data','Hospitalized','Extinct','Daily_extinct']].set_index('Data') for ages,x in age_data.groupby('Età')]
f_H = [si.interp1d(range(len(x)),x.Hospitalized.rolling(window=7,min_periods=1,center=True).mean(),kind='nearest',fill_value="extrapolate") for x in ages_dfs]
f_dE = [si.interp1d(range(len(x)),x.Daily_extinct.rolling(window=14,min_periods=1,center=True).mean(),kind='nearest',fill_value="extrapolate") for x in ages_dfs]
f_E = [si.interp1d(range(len(x)),x.Extinct.rolling(window=7,min_periods=1,center=True).mean(),kind='nearest',fill_value="extrapolate") for x in ages_dfs]
ages_dfs = [x.reset_index(drop=True) for x in ages_dfs]
medie = pd.DataFrame(columns=['Hospitalized','Extinct','Daily_extinct'])
for i,x in enumerate(ages_dfs):
medie = medie.append(x[:int(self.times[6])].mean(),ignore_index=True)
global gammaH_reduction
def gammaH_reduction(t):
multiplier=0
for i,x in enumerate(ages_dfs):
#multiplier += np.clip(f_E[i](t),0,1)*np.clip(f_dE[i](t),0,1)/np.clip(f_T[i](t-5),1e-5,1)
multiplier += np.clip(f_dE[i](t),0,1)*medie.iloc[i].Daily_extinct/medie.iloc[i].Hospitalized
return np.max(multiplier/np.sum(medie.Extinct.values*medie.Daily_extinct.values/medie.Hospitalized),0)
self.gammaH_vaccines = gammaH_reduction
def __saveCsv__(self,paramFileName):
with open(paramFileName, "w") as f:
print(f"[nParams]", file=f)
print(self.nParams, file=f)
print(f"[nSites]", file=f)
print(self.nSites, file=f)
print(f"[nPhases]", file=f)
print(self.nPhases, file=f)
print(f"[times]", file=f)
if len(self.times) != 0:
tmp = '\n'.join(map(lambda x: (self.dataStart + datetime.timedelta(days=int(x))).isoformat(), self.times))
print(tmp, file=f)
print(f"[constant]", file=f)
if len(self.constant) != 0:
tmp = ' '.join(('%f' % x).rstrip('0').rstrip('.') \
for x in self.constant)
print(tmp, file=f)
if len(self.constantSites) != 0:
print(f"[constantSites]", file=f)
tmp = ' '.join(('%f' % x).rstrip('0').rstrip('.') \
for x in self.constantSites)
print(tmp, file=f)
print(f"[Estimated]", file=f)
print(int(self.estimated),file=f)
print("", file=f)
print(f"[params]", file=f)
if len(self.params) != 0:
if self.nSites==1:
tmp = '\n'.join(' '.join(np.format_float_positional(x,precision=8,pad_right=8).rstrip('0').rstrip('.') \
for x in y) for y in self.params)
else:
tmp = '\n\n'.join('\n'.join(' '.join(np.format_float_positional(x,precision=8,pad_right=8).rstrip('0').rstrip('.') \
for x in y) for y in z) for z in np.moveaxis(self.params,-1,0))
print(tmp, file=f)
print("", file=f)
print(f"[mask]", file=f)
if len(self.mask) != 0:
if self.nSites == 1:
tmp = '\n'.join(' '.join(('%f' % x).rstrip('0').rstrip('.') \
for x in y) for y in self.mask)
else:
tmp = '\n\n'.join('\n'.join(' '.join(('%f' % x).rstrip('0').rstrip('.') \
for x in y) for y in z) for z in np.moveaxis(self.mask,-1,0))
print(tmp, file=f)
print("", file=f)
print(f"[l_bounds]", file=f)
if len(self.lower_bounds) != 0:
if self.nSites == 1:
tmp = '\n'.join(' '.join(('%f' % x).rstrip('0').rstrip('.') \
for x in y) for y in self.lower_bounds)
else:
tmp = '\n\n'.join('\n'.join(' '.join(('%f' % x).rstrip('0').rstrip('.') \
for x in y) for y in z) for z in np.moveaxis(self.lower_bounds,-1,0))
print(tmp, file=f)
print("", file=f)
print(f"[u_bounds]", file=f)
if len(self.upper_bounds) != 0:
if self.nSites == 1:
tmp = '\n'.join(' '.join(('%f' % x).rstrip('0').rstrip('.') \
for x in y) for y in self.upper_bounds)
else:
tmp = '\n\n'.join('\n'.join(' '.join(('%f' % x).rstrip('0').rstrip('.') \
for x in y) for y in z) for z in np.moveaxis(self.upper_bounds,-1,0))
print(tmp, file=f)
def __saveNpy__(self,paramFileName):
with open(paramFileName, 'wb') as f:
np.savez(f, nParams = self.nParams, \
nSites = self.nSites, \
nPhases = self.nPhases, \
estimated = self.estimated,\
times = self.times, \
constant = self.constant, \
params = self.params, \
mask = self.mask, \
lower_bounds = self.lower_bounds, \
upper_bounds = self.upper_bounds )
def __loadCsv__(self,paramFileName):
with open(paramFileName, 'rb') as f:
content = f.readlines()
self.nParams = int(readSection(content,b'[nParams]',1))
try:
self.nSites = int(readSection(content,b'[nSites]',1))
except:
self.nSites = 1
self.nPhases = int(readSection(content,b'[nPhases]',1))
tmp = readTimes(content, b'[times]', self.nPhases - 1)
self.times = np.reshape([int((x-self.dataStart).days) for x in tmp],self.nPhases - 1)
try:
self.constant = np.reshape( \
readSection(content, b'[constant]', self.nParams), \
self.nParams)
except:
self.constant = np.zeros(self.nParams)
if self.nSites > 1:
try:
self.constantSites = np.reshape( \
readSection(content, b'[constantSites]', self.nParams), \
self.nParams)
except:
self.constantSites = np.zeros(self.nParams)
try:
self.estimated = bool(readSection(content,b'[Estimated]',1))
except:
self.estimated = False
nParams = self.nParams * self.nPhases if not self.estimated else self.nParams * self.nPhases * self.nSites
self.params = readSection(content, b'[params]', nParams)
if not self.estimated:
self.params = np.tile(self.params, (self.nSites,1))
self.params = np.reshape(self.params, (self.nSites, self.nPhases, self.nParams))
self.params=np.moveaxis(self.params,0,-1).squeeze()
self.mask = readSection(content, b'[mask]', nParams)
if not self.estimated:
self.mask = np.tile(self.mask, (self.nSites,1))
self.mask = np.reshape(self.mask, (self.nSites, self.nPhases, self.nParams))
self.mask=np.moveaxis(self.mask,0,-1).squeeze()
self.lower_bounds = readSection(content, b'[l_bounds]', nParams)
if not self.estimated:
self.lower_bounds = np.tile(self.lower_bounds, (self.nSites,1))
self.lower_bounds = np.reshape(self.lower_bounds, (self.nSites,self.nPhases, self.nParams))
self.lower_bounds = np.moveaxis(self.lower_bounds,0,-1).squeeze()
self.upper_bounds = readSection(content, b'[u_bounds]', nParams)
if not self.estimated:
self.upper_bounds = np.tile(self.upper_bounds, (self.nSites,1))
self.upper_bounds = np.reshape(self.upper_bounds, (self.nSites, self.nPhases, self.nParams))
self.upper_bounds = np.moveaxis(self.upper_bounds,0,-1).squeeze()
def __loadNpy__(self,paramFileName):
with open(paramFileName, 'rb') as f:
data = np.load(f)
self.nParams = data['nParams']
try:
self.nSites = data['nSites']
except:
self.nSites = 1
self.nPhases = data['nPhases']
self.times = data['times']
try:
self.constant = data['constant']
except:
self.constant= np.zeros(self.nPhases)
try:
self.estimated = data['estimated']
except:
self.estimated = False
self.params = data['params']
self.mask = data['mask']
self.lower_bounds = data['lower_bounds']
self.upper_bounds = data['upper_bounds']
|
[
"giovanni.ardenghi@polimi.it"
] |
giovanni.ardenghi@polimi.it
|
c45e8e0400ff9d0a80d3861ee9d4f16481928447
|
845e3c428e18232777f17b701212dcbb1b72acc1
|
/lib/fast_rcnn/test_upper_body.py
|
a0d72a59ea4af92083ed2328b831359d2c136799
|
[
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
chuanxinlan/ohem-1
|
dd10b2f5ff15e81ab9e42e936bb44d98e01c6795
|
b7552ceb8ed1e9768e0d522258caa64b79834b54
|
refs/heads/master
| 2021-09-16T18:31:25.651432
| 2018-06-23T10:09:24
| 2018-06-23T10:09:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,043
|
py
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from fast_rcnn.nms_wrapper import nms
import cPickle
from utils.blob import im_list_to_blob
import os
from utils.cython_bbox import bbox_vote
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
# Make width and height be multiples of a specified number
im_scale_x = np.floor(im.shape[1] * im_scale / cfg.TEST.SCALE_MULTIPLE_OF) * cfg.TEST.SCALE_MULTIPLE_OF / im.shape[1]
im_scale_y = np.floor(im.shape[0] * im_scale / cfg.TEST.SCALE_MULTIPLE_OF) * cfg.TEST.SCALE_MULTIPLE_OF / im.shape[0]
im = cv2.resize(im_orig, None, None, fx=im_scale_x, fy=im_scale_y,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(np.array([im_scale_x, im_scale_y, im_scale_x, im_scale_y]))
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def im_detect(net, im, _t, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
_t['im_preproc'].tic()
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[np.hstack((im_blob.shape[2], im_blob.shape[3], im_scales[0]))],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
net.blobs['data'].data[...] = blobs['data']
#forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].data[...] = blobs['im_info']
#forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
net.blobs['rois'].data[...] = blobs['rois']
#forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
_t['im_preproc'].toc()
_t['im_net'].tic()
blobs_out = net.forward()
_t['im_net'].toc()
#blobs_out = net.forward(**forward_kwargs)
_t['im_postproc'].tic()
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
#---------------_cg_ added upper body --------------------
scores_upper_body = blobs_out['cls_prob_upper_body']
rois_upper_body = rois.copy()
rois_upper_body[:, 4] = \
(rois_upper_body[:, 2] + rois_upper_body[:, 4]) / 2
boxes_upper_body = rois_upper_body[:, 1:5] / im_scales[0]
upper_body_deltas = blobs_out['upper_body_pred']
pred_upper_body = bbox_transform_inv(boxes_upper_body, \
upper_body_deltas)
pred_upper_body = clip_boxes(pred_upper_body, im.shape)
#---------------end _cg_ added upper body --------------------
_t['im_postproc'].toc()
return scores, pred_boxes, scores_upper_body, pred_upper_body
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# CPU NMS is much faster than GPU NMS when the number of boxes
# is relative small (e.g., < 10k)
# TODO(rbg): autotune NMS dispatch
keep = nms(dets, thresh, force_cpu=True)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb, max_per_image=100, thresh=0.05, vis=False):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes + 1)]
output_dir = get_output_dir(imdb, net)
# timers
_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
for i in xrange(num_images):
# filter out any ground truth boxes
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
# The roidb may contain ground-truth rois (for example, if the roidb
# comes from the training or val split). We only want to evaluate
# detection on the *non*-ground-truth rois. We select those the rois
# that have the gt_classes field set to 0, which means there's no
# ground truth.
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
scores, boxes, scores_upper_body, boxes_upper_body = \
im_detect(net, im, _t, box_proposals)
_t['misc'].tic()
# skip j = 0, because it's the background class
for j in xrange(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
dets_NMSed = cls_dets[keep, :]
'''
if cfg.TEST.BBOX_VOTE:
cls_dets = bbox_vote(dets_NMSed, cls_dets)
else:
cls_dets = dets_NMSed
'''
cls_dets = dets_NMSed
#--------------- _cg_ added upper body --------------------
inds = np.where(scores_upper_body[:, j] > thresh)[0]
cls_scores_upper_body = scores_upper_body[inds, j]
cls_boxes_upper_body = boxes_upper_body[inds, j*4:(j+1)*4]
cls_dets_upper_body = np.hstack((cls_boxes_upper_body,
cls_scores_upper_body[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets_upper_body, cfg.TEST.NMS)
dets_NMSed = cls_dets_upper_body[keep, :]
cls_dets_upper_body = dets_NMSed
#--------------- end _cg_ added upper body --------------------
if vis:
vis_detections(im, imdb.classes[j], cls_dets)
all_boxes[j][i] = cls_dets
all_boxes[j + 1][i] = cls_dets_upper_body
'''
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
'''
_t['misc'].toc()
print 'im_detect: {:d}/{:d} net {:.3f}s preproc {:.3f}s postproc {:.3f}s misc {:.3f}s' \
.format(i + 1, num_images, _t['im_net'].average_time,
_t['im_preproc'].average_time, _t['im_postproc'].average_time,
_t['misc'].average_time)
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
# print 'Evaluating detections'
# imdb.evaluate_detections(all_boxes, output_dir)
|
[
"cg@example.com"
] |
cg@example.com
|
1e318f5508cf947742b1b1bc218b4f29dba2cbbb
|
611129837d052598d1d310149dda24b252616d0c
|
/enroll/models.py
|
16bae801bf969caf48d6cf4622470cc8f340abc1
|
[] |
no_license
|
Monalipython/Student
|
b1e169b1ff9550dbde494e0f30f3d79d8cabe6fa
|
94a470ad1c28acfbe13ed833725c8e5f3d98d077
|
refs/heads/master
| 2023-08-17T13:08:13.039098
| 2021-09-19T08:04:41
| 2021-09-19T08:04:41
| 408,050,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
from django.db import models
# Create your models here.
class Profile(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=100)
password = models.CharField(max_length=100)
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.CharField(max_length=100)
pdf = models.FileField(upload_to='books/pdfs/')
cover = models.ImageField(upload_to='books/covers/', null=True, blank=True)
def __str__(self):
return self.title
|
[
"monali.nimkar7@gmail.com"
] |
monali.nimkar7@gmail.com
|
e8ca07a932c0963eadc432912f1b306cfd4bce63
|
efb3f14e40cd89135aa2ee53c504da96844f74d1
|
/productsapi/views.py
|
8b7e5a4421bff7afe844605cb4f1fbb5c1f47876
|
[] |
no_license
|
ganesh7856/Assignment
|
da8424cdd01892f74a01adfeb2709e460e468d96
|
1fd7ce36223dd72d6e218c8b7b4ac89f2e9411da
|
refs/heads/master
| 2023-01-13T00:37:42.484196
| 2020-11-21T20:06:12
| 2020-11-21T20:06:12
| 314,884,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,963
|
py
|
from django.shortcuts import render
# Create your views here.
from rest_framework import viewsets
from rest_framework.views import APIView
from django.views.generic import ListView
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from productsapi.serializer import ProductSerializer, CategorySerializer
from products.models import Product, Category
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
search_fields=('name',)
ordering_fields = ('name',)
authentication_classes = [TokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
class ProductViewSet(viewsets.ModelViewSet):
queryset = Product.objects.all()
serializer_class = ProductSerializer
lookup_field = "slug"
search_fields = ('name','slug','price')
ordering_fields = ('name','slug','price')
authentication_classes = [TokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
# class ProductsDetailView(APIView):
# queryset = Product.objects.all()
# serializer_class = ProductSerializer
#
# def get(self, request, *args, **kwargs):
# object = self.get_object()
# object.count = int(object.count) + 1
# object.save()
# return super(ProductsDetailView, self).get(self, request, *args, **kwargs)
class CategoryDetailView(viewsets.ModelViewSet):
queryset = Category.objects.order_by('name')
serializer_class = CategorySerializer
authentication_classes = [TokenAuthentication, ]
permission_classes = [IsAuthenticated, ]
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
for obj in queryset:
obj.view_count = int(obj.view_count) + 1
obj.save(update_fields=("view_count", ))
return super().list(request, *args, **kwargs)
|
[
"ganesh.a.jadhav7856"
] |
ganesh.a.jadhav7856
|
8675e6a6dfc3b446db0711fdad5cf5ba6734b1b7
|
3e2616d26d909634a8dd05877281368872d01ade
|
/Backup/BigbrotherClass.py
|
89b8613de19b418d662d98e551a27f2a61d35555
|
[] |
no_license
|
ENSAKIC/BigBrother
|
a627f6ab2253d8f87c7fb9cb8de83cdd6ae6f3ad
|
b8c9d889a5b27ce0517b23e329a0f6e91a83f836
|
refs/heads/master
| 2021-01-19T08:20:32.059715
| 2013-04-28T21:30:38
| 2013-04-28T21:30:38
| 9,736,449
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
"""Subclass of BBMain, which is generated by wxFormBuilder."""
import wx
import Bigbrother
import tesseract
import locale
# Implementing BBMain
class BigbrotherClass( Bigbrother.BBMain ):
def __init__( self, parent ):
Bigbrother.BBMain.__init__( self, parent )
locale.setlocale(locale.LC_ALL, 'C')
# Init the Tesseract API
api = tesseract.TessBaseAPI()
api.Init(".","fra",tesseract.OEM_DEFAULT)
api.SetVariable("tessedit_char_whitelist", "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ")
api.SetPageSegMode(tesseract.PSM_AUTO)
# Handlers for BBMain events.
def EventFileChanged( self, event ):
# TODO: Implement EventFileChanged
pass
|
[
"darkvador@DeathStar.(none)"
] |
darkvador@DeathStar.(none)
|
9febec6bd7f0a74d7a44f2976d85b2d2cc702447
|
baaef08af947854bbdcb6d7f92292fbb786d9014
|
/bridge_skeleton/models/core/product_template.py
|
e2f24767c7eaf05748e21cacff0cc55c8640cdfa
|
[] |
no_license
|
hafzalabbas/crm_demo
|
b0b5e2df79eddb4455c84d893ea24fb1836955bf
|
d14012a6dff1abd51aebe33a4c08ac8713ae05e9
|
refs/heads/master
| 2022-11-30T22:11:05.224969
| 2020-05-27T16:39:58
| 2020-05-27T16:39:58
| 254,611,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,345
|
py
|
# -*- coding: utf-8 -*-
##########################################################################
#
# Copyright (c) 2015-Present Webkul Software Pvt. Ltd. (<https://webkul.com/>)
# See LICENSE file for full copyright and licensing details.
# License URL : <https://store.webkul.com/license.html/>
#
##########################################################################
import binascii
import requests
from odoo import fields, api, models
from ..core.res_partner import _unescape
import logging
_logger = logging.getLogger(__name__)
class ProductTemplate(models.Model):
_inherit = "product.template"
config_sku = fields.Char(string='SKU')
connector_mapping_ids = fields.One2many(
string='Ecomm Channel Mappings',
comodel_name='connector.template.mapping',
inverse_name='name',
copy=False
)
connector_categ_ids = fields.One2many(
string='Connector Extra Category',
comodel_name='connector.extra.category',
inverse_name='product_tmpl_id',
copy=False
)
@api.model
def create(self, vals):
_logger.info("****create*******ProductTemplate**************** : %r", vals.keys())
ctx = dict(self._context or {})
_logger.info("*****create*****ctx******ProductTemplate*********** : %r", ctx.keys())
ecomm_cannels = dict(self.env['connector.snippet']._get_ecomm_extensions()).keys()
instance_id = ctx.get('instance_id')
if any(key in ctx for key in ecomm_cannels):
ecomm_id = vals.pop('ecomm_id', 0)
vals = self.update_vals(vals, instance_id, True)
response = super(ProductTemplate, self).create(vals)
if any(key in ctx for key in ecomm_cannels) and 'configurable' in ctx:
channel = "".join(list(set(ctx.keys())&set(ecomm_cannels))) or 'Ecommerce' + str(instance_id)
self.env['connector.snippet'].create_odoo_connector_mapping('connector.template.mapping', ecomm_id, response.id, instance_id, is_variants=True, created_by=channel)
return response
def write(self, vals):
_logger.info("****write*******ProductTemplate**************** : %r", vals.keys())
ctx = dict(self._context or {})
_logger.info("*****write*****ctx******ProductTemplate*********** : %r", ctx.keys())
instance_id = ctx.get('instance_id')
ecomm_cannels = dict(self.env['connector.snippet']._get_ecomm_extensions()).keys()
if any(key in ctx for key in ecomm_cannels):
vals.pop('ecomm_id', 0)
vals = self.update_vals(vals, instance_id)
for tempObj in self:
for tempMapObj in tempObj.connector_mapping_ids:
tempMapObj.need_sync = 'No' if instance_id and tempMapObj.instance_id.id == instance_id else 'Yes'
return super(ProductTemplate, self).write(vals)
def _create_variant_ids(self):
ctx = dict(self._context or {})
ecomm_cannels = dict(self.env['connector.snippet']._get_ecomm_extensions()).keys()
_logger.info("****self*******_create_variant_ids**************** : %r", [self, ctx, ecomm_cannels])
if any(key in ctx for key in ecomm_cannels):
_logger.info("--------ecomm_cannels----------- : %r", ecomm_cannels)
return True
else:
_logger.info("****Else******************** : %r", [self, ctx, ecomm_cannels])
return super(ProductTemplate, self)._create_variant_ids()
def update_vals(self, vals, instance_id, create=False):
if vals.get('default_code'):
vals['config_sku'] = _unescape(vals.pop('default_code', ''))
if 'name' in vals:
vals['name'] = _unescape(vals['name'])
if 'description' in vals:
vals['description'] = _unescape(vals['description'])
if 'description_sale' in vals:
vals['description_sale'] = _unescape(vals['description_sale'])
category_ids = vals.pop('category_ids', None)
if category_ids:
categ_ids = list(set(category_ids))
default_categ_obj = self.env["connector.instance"].browse(instance_id).category
if default_categ_obj and create:
vals['categ_id'] = default_categ_obj.id
if create:
extra_categ_objs = self.env['connector.extra.category'].create({
'instance_id':instance_id, 'categ_ids': [(6, 0, categ_ids)]
})
vals['connector_categ_ids'] = [(6, 0, [extra_categ_objs.id])]
else:
extra_categ_objs = self.connector_categ_ids.filtered(lambda obj: obj.instance_id.id == instance_id)
if extra_categ_objs:
extra_categ_objs.write({'categ_ids': [(6, 0, categ_ids)]})
else:
extra_categ_objs = self.env['connector.extra.category'].create({
'instance_id':instance_id, 'categ_ids': [(6, 0, categ_ids)]
})
vals['connector_categ_ids'] = [(6, 0, [extra_categ_objs.id])]
image_url = vals.pop('image_url', False)
if image_url:
vals['image_1920'] = binascii.b2a_base64(requests.get(image_url, verify=False).content)
vals.pop('attribute_list', None)
vals.pop('magento_stock_id', None)
return vals
|
[
"noreply@github.com"
] |
noreply@github.com
|
a7438ca02484cd42c1d46f32f2b6415efa83040e
|
cadb25b610777d1a91404c7dcfe3d29ca1ddd542
|
/apps/localidades/migrations/0010_alter_localidade_nomelocalidade.py
|
cb9f7aeb7196267ac6b6462739e16d51937b8d84
|
[] |
no_license
|
luanaAlm/sistema_ebd
|
851b8d98979e33187ec68b301910fe0c309a1ce2
|
ec6a97ddf413e5b10ddff20a781e37ddce77794d
|
refs/heads/main
| 2023-08-28T01:10:27.381064
| 2021-10-18T23:11:25
| 2021-10-18T23:11:25
| 415,992,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
# Generated by Django 3.2.7 on 2021-10-06 18:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('localidades', '0009_alter_localidade_nomelocalidade'),
]
operations = [
migrations.AlterField(
model_name='localidade',
name='nomeLocalidade',
field=models.CharField(max_length=100, verbose_name='Igreja'),
),
]
|
[
"luanarodrigues3211@gmail.com"
] |
luanarodrigues3211@gmail.com
|
13279672b8c47331a37e9052b40787fc939702ac
|
5b85703aa0dd5a6944d99370a5dde2b6844517ec
|
/03.Python/15.ZerosandOnes.py
|
4d5e2053608bce9ef159ceccd2e274087611e083
|
[] |
no_license
|
alda07/hackerrank
|
255329196e6a4b9d598c3f51790caf4a99a755bc
|
a09091f859e87462c95ee856cbbd0ad9b5992159
|
refs/heads/master
| 2021-10-24T07:38:34.795632
| 2019-03-23T17:29:32
| 2019-03-23T17:29:32
| 90,329,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
# zeros
# import numpy
# print (numpy.zeros((1, 2)))
# print (numpy.zeros((1, 2), dtype = numpy.int))
# ones
# import numpy
# print (numpy.ones((1, 2)))
# print (numpy.ones((1, 2), dtype = numpy.int))
import numpy
list_i = list(map(int,input().split()))
print(numpy.zeros(list_i, dtype = numpy.int))
print(numpy.ones(list_i, dtype = numpy.int))
|
[
"hanh.vo.programmer@gmail.com"
] |
hanh.vo.programmer@gmail.com
|
3b68926d2b085942c1fa005f821aa58397bc197f
|
0d38d4b4f9f179724f2fbf685e8381a2bac0912f
|
/tests/test_response.py
|
abb87f59787116c3bb92ae48078c90fc6983b060
|
[] |
permissive
|
grantmcconnaughey/django-reports
|
20d047df704b2dc2adc9e486220549d8f0412ac6
|
34fbd723fc5907e6f87c95cba8f11724e03d89ab
|
refs/heads/master
| 2023-01-09T22:46:42.065299
| 2016-01-18T04:14:49
| 2016-01-18T04:14:49
| 49,586,842
| 2
| 0
|
BSD-3-Clause
| 2022-12-26T20:00:25
| 2016-01-13T16:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
from django.test import TestCase
from djreports import Report
from djreports.response import CSVReportResponse
class CSVReportResponseTests(TestCase):
def test_response_has_200_status_code(self):
report = Report([['Hello', 'World'], ['Hello', 'World']])
response = CSVReportResponse(report)
self.assertEqual(response.status_code, 200)
def test_response_has_csv_content_type(self):
report = Report([['Hello', 'World'], ['Hello', 'World']])
response = CSVReportResponse(report)
self.assertEqual(response._headers['content-type'],
('Content-Type', 'text/csv'))
def test_response_uses_default_file_name(self):
report = Report([['Hello', 'World'], ['Hello', 'World']])
response = CSVReportResponse(report)
self.assertEqual(response._headers['content-disposition'],
('Content-Disposition', 'attachment; filename="report.csv"'))
def test_response_has_csv_file_content(self):
report = Report([['Col1', 'Col2'], ['Cell1', 'Cell2']])
response = CSVReportResponse(report)
self.assertEqual(response.content.decode(), 'Col1,Col2\r\nCell1,Cell2\r\n')
|
[
"grantmcconnaughey@gmail.com"
] |
grantmcconnaughey@gmail.com
|
3ad1f03b5b5f2d7eca5e84e51e13b8539c377bfa
|
aae908c86413f51c717c031f82d502828f9fd0fd
|
/regular_expression_part1.py
|
65ac85fdd0e3eed8c3b33f5d31cb5cf7d8447c34
|
[] |
no_license
|
ramyashree581/Python_Code
|
2e27c4761ec8d06894575c62f1b6fddf868d332e
|
50e72c7acdaf97b4d71b80d51a1d4012dcdf3a94
|
refs/heads/master
| 2020-03-23T20:00:50.878361
| 2019-01-16T06:02:03
| 2019-01-16T06:02:03
| 142,015,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,026
|
py
|
import re
pattern = r'Ramya'
sequence = 'Ramya'
if re.match(pattern,sequence):
print "Match"
else:
print "Not a match "
r = re.match(pattern,sequence)
print r.group()
####################################
print re.search(r'R....', 'Ramya').group() #. macthes one occurence
print re.search(r'Ra\wy\w', 'RaMya').group() # it mahces a single charachter (Upper case/ Lowercase or '_')
print re.search(r'C\Wke', 'C@ke').group() # it matches special charachters
print re.search(r'Eat\scake', 'Eat cake').group() #whitepace new line
print re.search(r'Cook\Se', 'Cookie').group() #matches single char
print re.search(r'Eat\tcake', 'Eat cake').group() #matches a tab
print re.search(r'c\d\dkie', 'c00kie').group() #search digit
print re.search(r'^Eat', 'Eat cake').group() #start
print re.search(r'cake$', 'Eat cake').group() #end
print re.search(r'Number: [^5]', 'Number: 3').group() #match any charchter except 5
#######################greedy vs non greedy##################
heading = r'<h1>TITLE</h1>'
print re.match(r'<.*>', heading).group() #Prints everything, is greedy
print re.match(r'<.*?>', heading).group() #? makes it non greedy and prints only first few chatachters possible wil be matcehed
email_address = "Please contact us at: xyz@datacamp.com"
NEW_email_address = re.sub(r'[\w\.-]+@[\w\.-]+', r'ramyashree581@gmail.com', email_address)
print NEW_email_address
pattern = re.compile(r"cookie")
sequence = "Cake and cookie"
print pattern.search(sequence).group()
######################*************************
import re
import requests
the_idiot_url = 'https://www.gutenberg.org/files/2638/2638-0.txt'
def get_book(url):
raw = requests.get(url).text
start = re.search(r"\*\*\* START OF THIS PROJECT GUTENBERG EBOOK .* \*\*\*",raw ).end()
stop = re.search(r"II", raw).start()
text = raw[start:stop]
return text
def preprocess(sentence):
return re.sub('[^A-Za-z0-9.]+' , ' ', sentence).lower()
book = get_book(the_idiot_url)
processed_book = preprocess(book)
print(processed_book)
|
[
"ramyashree581@gmail.com"
] |
ramyashree581@gmail.com
|
42d2ccd0a08c1520cae02783637eee771aedda4f
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_196/ch31_2020_03_14_15_42_06_957078.py
|
7229a92343174b1d0b472e5e5af883e664d7d8d9
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
def eh_primo(a):
if a == 2:
return True
x=1
elif (a%2 == 0) or (a%x == 0):
x+=2
return False
elif (a==0) or (a==1):
return False
else:
return True
|
[
"you@example.com"
] |
you@example.com
|
dd7c42bf3677ff4d5c0535593c8a3d205b5bbb4f
|
9404a8593ff2d82133897c9e187523d301df7888
|
/0x09-Unittests_and_integration_tests/client.py
|
09fe617f4bf9b728195056ec7874888a22e52d18
|
[] |
no_license
|
emna7/holbertonschool-web_back_end
|
ac2bc16e47f464530c4dee23497488c77377977e
|
744e6cb3bb67b2caa30f967708243b5474046961
|
refs/heads/main
| 2023-03-06T17:56:10.699982
| 2021-02-12T21:24:04
| 2021-02-12T21:24:04
| 305,394,170
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
#!/usr/bin/env python3
"""A github org client
"""
from typing import (
List,
Dict,
)
from utils import (
get_json,
access_nested_map,
memoize,
)
class GithubOrgClient:
"""A Githib org client
"""
ORG_URL = "https://api.github.com/orgs/{org}"
def __init__(self, org_name: str) -> None:
"""Init method of GithubOrgClient"""
self._org_name = org_name
@memoize
def org(self) -> Dict:
"""Memoize org"""
return get_json(self.ORG_URL.format(org=self._org_name))
@property
def _public_repos_url(self) -> str:
"""Public repos URL"""
return self.org["repos_url"]
@memoize
def repos_payload(self) -> Dict:
"""Memoize repos payload"""
return get_json(self._public_repos_url)
def public_repos(self, license: str = None) -> List[str]:
"""Public repos"""
json_payload = self.repos_payload
public_repos = [
repo["name"] for repo in json_payload
if license is None or self.has_license(repo, license)
]
return public_repos
@staticmethod
def has_license(repo: Dict[str, Dict], license_key: str) -> bool:
"""Static: has_license"""
assert license_key is not None, "license_key cannot be None"
try:
has_license = access_nested_map(repo, ("license", "key")) == license_key
except KeyError:
return False
return has_license
|
[
"bhmemna7@gmail.com"
] |
bhmemna7@gmail.com
|
f136e4143c095943a038c5d18d26267dcce3717d
|
7950b777b68ff97d7ade05c0cc23d5b2b847c447
|
/mysimulation.py
|
d9f762c354163fad2befd89ea7881a1f0c1c1322
|
[] |
no_license
|
zhandongdong/PyPlan
|
08ffa79c7779f13f32a391dc0f8b633203f7770f
|
61240ce41899d112ebabaac8f628fd873f62e322
|
refs/heads/master
| 2021-05-28T22:32:54.581675
| 2015-08-25T23:06:30
| 2015-08-25T23:06:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
from agents import *
from simulators import *
# --------------------------------------------------------
# SET THESE VARIABLES BEFORE RUNNING A CUSTOM SIMULATION.
# --------------------------------------------------------
game_name = "-TEMP-NAME-"
output_file_name = "TEMPFILE.txt"
players_count = 2
simulation_count = 5
simulation_horizon = 20
# --------------------------------------------------------
# THESE VARIABLES SHOULD NOT BE MODIFIED HERE.
# --------------------------------------------------------
agents_list = []
simulator_obj = None
# --------------------------------------------------------
# USE THIS FUNCTION TO CREATE YOUR OWN SIMULATION.
# THIS FUNCTION SHOULD RETURN AN ARRAY WITH TWO VALUES.
# VALUE 0 - THE SIMULATOR OBJECT
# VALUE 1 - THE AGENTS LIST
# EXAMPLE : return [simulator_obj, agents_list]
# --------------------------------------------------------
def create_simulation():
# EXAMPLE CODE TO RUN A CONNECT4 GAME BETWEEN A RANDOM AND UCT AGENT (WITH SIMCOUNT = 100)
simulator_obj = connect4simulator.Connect4SimulatorClass(num_players = players_count)
agent_random = randomagent.RandomAgentClass(simulator=simulator_obj)
agent_uct = uctagent.UCTAgentClass(simulator=simulator_obj, rollout_policy=agent_random, tree_policy="UCB",
num_simulations=100,
uct_constant=0.8,
horizon=100,
time_limit=-1) #TIME LIMIT SHOULD BE -1 IF ONLY SIM COUNT IS TO BE CONSIDERED.
agents_list.append(agent_random)
agents_list.append(agent_uct)
return [simulator_obj, agents_list]
|
[
"shankarj@outlook.com"
] |
shankarj@outlook.com
|
62b9400ae29452a90e4bfe5f3f5a343dd988242d
|
cf25738acc2a44d7a77c20ef44b9bbcb5508b1ca
|
/second/migrations/0003_auto_20210719_1244.py
|
21f61f980e74da0f1bf8abd1202c3e622247acd3
|
[] |
no_license
|
liyaaugustine/djangoproject
|
8e1377dc46ebb907fa0db28b55a398c6178985e8
|
257ae04eb6a1797d500bf8dc11608ccf4f010f3e
|
refs/heads/master
| 2023-08-16T11:19:16.946606
| 2021-10-07T16:54:14
| 2021-10-07T16:54:14
| 373,740,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
# Generated by Django 3.2.3 on 2021-07-19 07:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('second', '0002_userdetails'),
]
operations = [
migrations.AddField(
model_name='userdetails',
name='parentname',
field=models.CharField(default='default', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='userdetails',
name='phone',
field=models.BigIntegerField(default=15),
preserve_default=False,
),
migrations.AddField(
model_name='userdetails',
name='place',
field=models.CharField(default='default', max_length=30),
preserve_default=False,
),
]
|
[
"liyaaugustinek@gmail.com"
] |
liyaaugustinek@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.