blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a34d40955607245d7cd5152f6173d3ba1d85f7cd
|
0e867a76d0263d40f78a643d466ebfc10b0ac4f0
|
/activities/nmz/nmz_setup.py
|
bd9401b72f5fcf89e245c9ae516efefd4d687f25
|
[
"MIT"
] |
permissive
|
anordin95/replay_mouse
|
b49d29b0ce0c72ed347e178b982c96b93af678b9
|
569abe771cac3b639317b1ca97c98b0c486a4714
|
refs/heads/master
| 2022-07-04T18:28:57.299865
| 2020-05-16T14:33:41
| 2020-05-16T14:33:41
| 259,782,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
from pathlib import Path
from primitives.potion_tracker import setup_potions_tracker
from primitives.get_quick_pray_location import get_quick_pray_location
from primitives.record import record
PICKLE_FOLDER = Path('pickled_objects')
PRAYER_POTS_FILENAME = PICKLE_FOLDER / 'prayer_pots.pkl'
RANGE_POTS_FILENAME = PICKLE_FOLDER / 'ranging_pots.pkl'
ABSORPTION_POTS_FILENAME = PICKLE_FOLDER / 'absorption_pots.pkl'
QUICK_PRAY_LOC_FILE = PICKLE_FOLDER / 'quick_pray_loc.pkl'
ROCK_CAKE_ACTION_LIST_FILE = PICKLE_FOLDER / 'rock_cake_action_list.pkl'
import logging
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
log_level = logging.INFO
logging.basicConfig(level=log_level, format=log_format)
# for use with prayer pots
# def setup():
# setup_potions_tracker(filename=RANGE_POTS_FILENAME, potion_type='range')
# setup_potions_tracker(filename=PRAYER_POTS_FILENAME, potion_type='prayer')
# for use with absorption pots
logger = logging.getLogger('__name__')
def setup():
logger.info("Record guzzling a rock cake. When done, press esc.")
record(use_potions=False, filename=ROCK_CAKE_ACTION_LIST_FILE)
get_quick_pray_location(filename=QUICK_PRAY_LOC_FILE)
setup_potions_tracker(filename=RANGE_POTS_FILENAME, potion_type='range')
setup_potions_tracker(filename=ABSORPTION_POTS_FILENAME, potion_type='absorption')
if __name__ == '__main__':
setup()
|
[
"anordin@butterflynetinc.com"
] |
anordin@butterflynetinc.com
|
6a27868511bae2d8a9d10a768aa6fea1b3b93397
|
7e246c308597762dccb129883706fb5f827b1f05
|
/examples/cli_debug.py
|
7cf1bec440e4396e14220a1389a6e98210f17e55
|
[] |
no_license
|
NGnius/casl
|
b54bdd26003e582d77bb04b4e80e13c34074b4ad
|
db5bc4fbf6819ba89d0258e4c24a7fa85273d145
|
refs/heads/master
| 2023-03-01T08:52:31.681391
| 2021-02-05T03:12:43
| 2021-02-05T03:12:43
| 330,711,583
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
#!/usr/bin/python3
import sys
import json
data = input()
payload = json.loads(data)
response = json.dumps({"error": "debug error", "action": {"type": "Custom"}})
print(response)
|
[
"ngniusness@gmail.com"
] |
ngniusness@gmail.com
|
1d539066706ca4f69d3130d49688deb922c477b3
|
98311c7b2b2257f14f0f4a0657363e893872798e
|
/project/src/python/practicum.py
|
e3f1dfcf9ef76f4b71a4dd1106d26832dc48802f
|
[
"MIT"
] |
permissive
|
aslupin/Yak-Ngaen-Project
|
fed9a264a863e1174c00ec8ad360f1c03422f393
|
c91b3cc83d2eda22b62fe877276bdd1a8a1b24fd
|
refs/heads/master
| 2022-01-28T02:44:39.385903
| 2019-05-09T13:36:04
| 2019-05-09T13:36:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,616
|
py
|
import usb
# RQ_SET_LED = 0
# RQ_SET_LED_VALUE = 1
# RQ_GET_SWITCH = 2
# RQ_GET_LIGHT = 3
RQ_GET_SOUND_PLAYER_I = 1
RQ_GET_SOUND_PLAYER_II = 2
####################################
def find_mcu_boards():
'''
Find all Practicum MCU boards attached to the machine, then return a list
of USB device handles for all the boards
>>> devices = find_mcu_boards()
>>> first_board = McuBoard(devices[0])
'''
boards = [dev for bus in usb.busses()
for dev in bus.devices
if (dev.idVendor,dev.idProduct) == (0x16c0,0x05dc)]
return boards
####################################
class McuBoard:
'''
Generic class for accessing Practicum MCU board via USB connection.
'''
################################
def __init__(self, dev):
self.device = dev
self.handle = dev.open()
################################
def usb_write(self, request, data=[], index=0, value=0):
'''
Send data output to the USB device (i.e., MCU board)
request: request number to appear as bRequest field on the USB device
index: 16-bit value to appear as wIndex field on the USB device
value: 16-bit value to appear as wValue field on the USB device
'''
reqType = usb.TYPE_VENDOR | usb.RECIP_DEVICE | usb.ENDPOINT_OUT
self.handle.controlMsg(
reqType, request, data, value=value, index=index)
################################
def usb_read(self, request, length=1, index=0, value=0):
'''
Request data input from the USB device (i.e., MCU board)
request: request number to appear as bRequest field on the USB device
length: number of bytes to read from the USB device
index: 16-bit value to appear as wIndex field on the USB device
value: 16-bit value to appear as wValue field on the USB device
If successful, the method returns a tuple of length specified
containing data returned from the MCU board.
'''
reqType = usb.TYPE_VENDOR | usb.RECIP_DEVICE | usb.ENDPOINT_IN
buf = self.handle.controlMsg(
reqType, request, length, value=value, index=index)
return buf
####################################
class PeriBoard:
################################
def __init__(self, mcu):
self.mcu = mcu
################################
# def get_sound_playeri(self):
# sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_I, length=2)
# return sound[0]
# def get_sound_playerii(self):
# sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_II, length=2)
# return sound[0]
def get_sound(self, player):
'''
Return the current reading of light sensor on peripheral board
'''
if(player == RQ_GET_SOUND_PLAYER_I):
sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_I, length=2)
return sound[0]
# return sound[0]
elif(player == RQ_GET_SOUND_PLAYER_II):
sound = self.mcu.usb_read(request=RQ_GET_SOUND_PLAYER_II, length=2)
# return sound[0]
return sound[0]
# light[1] *= 256
# result = light[1] + light[0]
# return (sound[1] * 256 ) + sound[0]
# ################################
# def set_led(self, led_no, led_state):
# '''
# Set status of LED led_no on peripheral board to led_state
# (0 => off, 1 => on)
# '''
# self.mcu.usb_write(request=RQ_SET_LED, index=led_no, value=led_state)
# # return
# ################################
# def set_led_value(self, value):
# '''
# Display right 3 bits of value on peripheral board's LEDs
# '''
# self.mcu.usb_write(request=RQ_SET_LED_VALUE, value=value)
# # return
# ################################
# def get_switch(self):
# '''
# Return a boolean value indicating whether the switch on the peripheral
# board is currently pressed
# '''
# state = self.mcu.usb_read(request=RQ_GET_SWITCH, length=1)
# return state[0] == 1
# ################################
# def get_light(self):
# '''
# Return the current reading of light sensor on peripheral board
# '''
# light = self.mcu.usb_read(request=RQ_GET_LIGHT, length=2)
# # light[1] *= 256
# # result = light[1] + light[0]
# return ( light[1] * 256 ) + light[0]
|
[
"poon_arsene_lupin@hotmail.com"
] |
poon_arsene_lupin@hotmail.com
|
5dcf1531f3266b5a1c867bd6a62ba36a36b2bbc2
|
7b08ceb8c901a09e41d4a67804e2adf94142cb17
|
/description2process/__init__.py
|
2f99a8019b7c0dace78658a646cc5d28bfb7d318
|
[] |
no_license
|
simrit1/Description2Process
|
1e7cfcc4dc6bb762d69f27bbe1eedd4e0cef6a38
|
223372f3588f7ac67537eae3012667951b5543e0
|
refs/heads/master
| 2023-08-25T23:12:50.838804
| 2019-05-16T16:51:51
| 2019-05-16T16:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,370
|
py
|
import tensorflow as tf
# We need to enable eager execution for inference at the end of this notebook.
tfe = tf.contrib.eager
tfe.enable_eager_execution()
TFVERSION='1.13'
import os
os.environ['TFVERSION'] = TFVERSION
# Import library
from description2process import data_generation
from description2process import contraction_expansion
from description2process import coreference_resolution
from description2process import clause_extraction
from description2process import activity_recognition
from description2process import activity_extraction
from description2process import structured_description
from description2process import xml_model
from description2process import visualization
from description2process import evaluation
# Returns the visualisation of a process description
# INPUT: process description in string format
def description2model(description, png = False):
# step1 : contraction expansion
description = contraction_expansion.expand_contractions(description)
print("Step 1/8 DONE: contraction expansion")
# step2 : coreference resolution
description = coreference_resolution.resolve_coreferences(description)
print("Step 2/8 DONE: coreference resolution")
# step3 : clause extraction
subsentences = clause_extraction.get_clauses(description)
print("Step 3/8 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses_df = activity_recognition.contains_activity_list(subsentences)
print("Step 4/8 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses_df)
print("Step 5/8 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description(description, df_activities)
print("Step 6/8 DONE: semi-structured descriptions")
# step7: get XML format of models
xml = xml_model.structured2xml(str_descr)
print("Step 7/8 DONE: model in XML")
# step8: Visualize the model in xml
model = visualization.xml2model(xml, png)
print("Step 8/8 DONE: Visualize model")
return model
# Returns the xml format of the process description
# INPUT: process description in string format
def description2xml(description):
# step1 : contraction expansion
description = contraction_expansion.expand_contractions(description)
print("Step 1/7 DONE: contraction expansion")
# step2 : coreference resolution
description = coreference_resolution.resolve_coreferences(description)
print("Step 2/7 DONE: coreference resolution")
# step3 : clause extraction
subsentences = clause_extraction.get_clauses(description)
print("Step 3/7 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses_df = activity_recognition.contains_activity_list(subsentences)
print("Step 4/7 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses_df)
print("Step 5/7 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description(description, df_activities)
print("Step 6/7 DONE: semi-structured descriptions")
# step7: get XML format of models
xml = xml_model.structured2xml(str_descr)
print("Step 7/7 DONE: model in XML")
return xml
# returns the structured description of raw process descriptions
# Input: pandas dataframe of process descriptions
def description2structured_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
print("Step 1/6 DONE: contraction expansion")
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
print("Step 2/6 DONE: coreference resolution")
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
print("Step 3/6 DONE: extracted clauses ")
# step4: label clauses
labeled_clauses = activity_recognition.contains_activity_df(description_df)
print("Step 4/6 DONE: labeled clauses ")
# step5: activity extraction
df_activities = activity_extraction.get_activity_df(labeled_clauses)
print("Step 5/6 DONE: extracted activities ")
# step6: get a structured_descriptions
str_descr = structured_description.get_structured_description_df(description_df, df_activities)
print("Step 6/6 DONE: returned structured descriptions")
return str_descr
# return the descripition after contraction expansion and coreference resolution.
# This type of description can be seen as a cleaned version of the original one.
# Input: pandas dataframe of process descriptions
def description2referenceresolved_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
return description_df
# Return the description with a list containing the description's extracted clauses
# Input: pandas dataframe of process description
def description2clauses_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
return description_df
# Return the description with a list containg the descriptions's extracted clauses
# + an extra dataframe with all its labeled clauses
# Input: pandas dataframe of process descriptions
def description2labeledclauses_df(description_df):
# step1 : contraction expansion
description_df = contraction_expansion.expand_contractions_df(description_df)
# step2 : coreference resolution
description_df = coreference_resolution.resolve_coreferences_df(description_df)
# step3 : clause extraction
description_df = clause_extraction.get_clauses_df(description_df)
# step4: label clauses
labeled_clauses = activity_recognition.contains_activity_df(description_df)
return labeled_clauses, description_df
|
[
"noreply@github.com"
] |
simrit1.noreply@github.com
|
c77f59e3b90ce19c50bd0a77c092b148f74acab0
|
9d30a8c8620640b5e18c6aa5aa4bca6c01a60182
|
/Code/utils/inference1.py
|
13954a3246b767b1a5ad098f07456a4cfcff6c9b
|
[
"MIT"
] |
permissive
|
zhouzhiyuan1/RADANet
|
00ed5e2380007b53f918788d9e44fcec26c9ce21
|
f0db67e5b16b5b566efd40402b7b2b2a5342d5ad
|
refs/heads/main
| 2023-08-31T18:50:05.560253
| 2021-11-02T11:37:59
| 2021-11-02T11:37:59
| 423,765,661
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,803
|
py
|
#!/usr/bin/env python3
# coding: utf-8
__author__ = 'cleardusk'
import numpy as np
from math import sqrt
import scipy.io as sio
import matplotlib.pyplot as plt
from .ddfa import reconstruct_vertex
def get_suffix(filename):
"""a.jpg -> jpg"""
pos = filename.rfind('.')
if pos == -1:
return ''
return filename[pos:]
def crop_img(img, roi_box):
h, w = img.shape[:2]
sx, sy, ex, ey = [int(round(_)) for _ in roi_box]
dh, dw = ey - sy, ex - sx
if len(img.shape) == 3:
res = np.zeros((dh, dw, 3), dtype=np.uint8)
else:
res = np.zeros((dh, dw), dtype=np.uint8)
if sx < 0:
sx, dsx = 0, -sx
else:
dsx = 0
if ex > w:
ex, dex = w, dw - (ex - w)
else:
dex = dw
if sy < 0:
sy, dsy = 0, -sy
else:
dsy = 0
if ey > h:
ey, dey = h, dh - (ey - h)
else:
dey = dh
res[dsy:dey, dsx:dex] = img[sy:ey, sx:ex]
return res
def calc_hypotenuse(pts):
bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]
center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2
bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]
llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)
return llength / 3
def parse_roi_box_from_landmark(pts):
"""calc roi box from landmark"""
bbox = [min(pts[0, :]), min(pts[1, :]), max(pts[0, :]), max(pts[1, :])]
center = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
radius = max(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 2
bbox = [center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius]
llength = sqrt((bbox[2] - bbox[0]) ** 2 + (bbox[3] - bbox[1]) ** 2)
center_x = (bbox[2] + bbox[0]) / 2
center_y = (bbox[3] + bbox[1]) / 2
roi_box = [0] * 4
roi_box[0] = center_x - llength / 2
roi_box[1] = center_y - llength / 2
roi_box[2] = roi_box[0] + llength
roi_box[3] = roi_box[1] + llength
return roi_box
def parse_roi_box_from_bbox(bbox):
left, top, right, bottom = bbox
old_size = (right - left + bottom - top) / 2
center_x = right - (right - left) / 2.0
center_y = bottom - (bottom - top) / 2.0 + old_size * 0.14
size = int(old_size * 1.58)
roi_box = [0] * 4
roi_box[0] = center_x - size / 2
roi_box[1] = center_y - size / 2
roi_box[2] = roi_box[0] + size
roi_box[3] = roi_box[1] + size
return roi_box
def dump_to_ply(vertex, tri, wfp):
header = """ply
format ascii 1.0
element vertex {}
property float x
property float y
property float z
element face {}
property list uchar int vertex_indices
end_header"""
n_vertex = vertex.shape[1]
n_face = tri.shape[1]
header = header.format(n_vertex, n_face)
with open(wfp, 'w') as f:
f.write(header + '\n')
for i in range(n_vertex):
x, y, z = vertex[:, i]
f.write('{:.4f} {:.4f} {:.4f}\n'.format(x, y, z))
for i in range(n_face):
idx1, idx2, idx3 = tri[:, i]
f.write('3 {} {} {}\n'.format(idx1 - 1, idx2 - 1, idx3 - 1))
print('Dump tp {}'.format(wfp))
def dump_vertex(vertex, wfp):
sio.savemat(wfp, {'vertex': vertex})
print('Dump to {}'.format(wfp))
def _predict_vertices(param, roi_bbox, dense, transform=True):
vertex = reconstruct_vertex(param, dense=dense)
sx, sy, ex, ey = roi_bbox
scale_x = (ex - sx) / 120
scale_y = (ey - sy) / 120
vertex[0, :] = vertex[0, :] * scale_x + sx
vertex[1, :] = vertex[1, :] * scale_y + sy
s = (scale_x + scale_y) / 2
vertex[2, :] *= s
return vertex
def predict_68pts(param, roi_box):
return _predict_vertices(param, roi_box, dense=False)
def predict_dense(param, roi_box):
return _predict_vertices(param, roi_box, dense=True)
def draw_landmarks(img, pts, style='fancy', wfp=None, show_flg=False, **kwargs):
"""Draw landmarks using matplotlib"""
height, width = img.shape[:2]
plt.figure(figsize=(12, height / width * 12))
plt.imshow(img[:, :, ::-1])
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.axis('off')
if not type(pts) in [tuple, list]:
pts = [pts]
for i in range(len(pts)):
if style == 'simple':
plt.plot(pts[i][0, :], pts[i][1, :], 'o', markersize=4, color='g')
elif style == 'fancy':
alpha = 0.8
markersize = 10
lw = 1.5
color = kwargs.get('color', 'r')
markeredgecolor = kwargs.get('markeredgecolor', 'red')
nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]
# close eyes and mouths
plot_close = lambda i1, i2: plt.plot([pts[i][0, i1], pts[i][0, i2]], [pts[i][1, i1], pts[i][1, i2]],
color=color, lw=lw, alpha=alpha - 0.1)
plot_close(41, 36)
plot_close(47, 42)
plot_close(59, 48)
plot_close(67, 60)
for ind in range(len(nums) - 1):
l, r = nums[ind], nums[ind + 1]
plt.plot(pts[i][0, l:r], pts[i][1, l:r], color=color, lw=lw, alpha=alpha - 0.1)
plt.plot(pts[i][0, l:r], pts[i][1, l:r], marker='o', linestyle='None', markersize=markersize,
color=color,
markeredgecolor=markeredgecolor, alpha=alpha)
if wfp is not None:
plt.savefig(wfp, dpi=200)
print('Save visualization result to {}'.format(wfp))
if show_flg:
plt.show()
def get_colors(image, vertices):
[h, w, _] = image.shape
vertices[0, :] = np.minimum(np.maximum(vertices[0, :], 0), w - 1) # x
vertices[1, :] = np.minimum(np.maximum(vertices[1, :], 0), h - 1) # y
ind = np.round(vertices).astype(np.int32)
colors = image[ind[1, :], ind[0, :], :] # n x 3
return colors
def write_obj_with_colors(obj_name, vertices, triangles, colors):
triangles = triangles.copy() # meshlab start with 1
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
# write obj
with open(obj_name, 'w') as f:
# write vertices & colors
for i in range(vertices.shape[1]):
s = 'v {:.4f} {:.4f} {:.4f} {} {} {}\n'.format(vertices[1, i], vertices[0, i], vertices[2, i], colors[i, 2],
colors[i, 1], colors[i, 0])
f.write(s)
# write f: ver ind/ uv ind
for i in range(triangles.shape[1]):
s = 'f {} {} {}\n'.format(triangles[0, i], triangles[1, i], triangles[2, i])
f.write(s)
def main():
pass
if __name__ == '__main__':
main()
|
[
"zy980203123@163.com"
] |
zy980203123@163.com
|
d008e616c943f18e5f7f5c090bc112e713db99cf
|
c4b7b5a9c56a9b6394a14704d2faf76754175473
|
/rooms/templatetags/is_booked.py
|
da615b5d82465d9cb146e16beb8eeaefaf53bbc4
|
[] |
no_license
|
seungjinhan/airbnb_clone_django
|
71a15e5242bad28fd96d5f47652a049a77f12f61
|
4c38780746409ea1ed9b4f5b02abca60326752c2
|
refs/heads/master
| 2022-12-02T15:14:39.341441
| 2020-08-23T13:50:42
| 2020-08-23T13:50:42
| 280,878,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
import datetime
from django import template
from reservations import models as reservation_model
register = template.Library()
@register.simple_tag
def is_booked(room, day):
if day.number == 0:
return False
try:
date = datetime.datetime(
year=day.year, month=day.month, day=day.number)
reservation_model.BookedDay.objects.get(
day=date, reservation__room=room)
print(date)
print(room)
return True
except reservation_model.BookedDay.DoesNotExist:
return False
|
[
"hanblues@gmail.com"
] |
hanblues@gmail.com
|
e5f7852757d20d9217562defb3d22da0c1893cb6
|
5e809acc62b080f1adad2c34e647241cdc5ad297
|
/myenv/bin/markdown_py
|
fa2c63491a1647ccda5e1725538898c521cfc6a8
|
[
"MIT"
] |
permissive
|
RootenberG/My-blog-project
|
f520af79a2f3eb416b3dadee46813a812ce9d53d
|
7ef4670cfa9d54d9345d52ca008aae5fed5605bc
|
refs/heads/master
| 2020-08-15T20:04:29.478049
| 2020-02-08T21:57:46
| 2020-02-08T21:57:46
| 215,400,930
| 0
| 0
|
MIT
| 2019-10-30T20:54:38
| 2019-10-15T21:34:30
|
Python
|
UTF-8
|
Python
| false
| false
| 255
|
#!/home/den/devspace/My-blog-project/myenv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from markdown.__main__ import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"savichevdenis244@gmail.com"
] |
savichevdenis244@gmail.com
|
|
ac65b404dace4784df733dfdfafadcc28eb379aa
|
c135da511684bfb267a8bac5a84b3f032b2d7b26
|
/algorithms/delete-columns-to-make-sorted/DeleteColumnsToMakeSorted.py
|
b25bbcf217bb13ce0ca196ee9f751ee7ef66193b
|
[] |
no_license
|
hieudtrinh/coursera_python
|
0bc9841a8be7ea38b5fdf9bf1d2bcd6e40387e60
|
6bd01f898eca617ec3c6ad215f47f7f41265dd4f
|
refs/heads/main
| 2023-02-16T20:59:46.617531
| 2021-01-19T19:05:35
| 2021-01-19T19:05:35
| 319,189,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
import sys
from typing import List
class DeleteColumnsToMakeSorted:
def minDeletionSize(self, A: List[str]) -> int:
return sum(col != tuple(sorted(col)) for col in zip(*A))
def minDeletionSize1(self, A: List[str]) -> int:
res = 0
for pos in range(len(A[0])):
for word in range(len(A)-1):
if A[word][pos] > A[word+1][pos]:
res += 1
break
return res
def minDeletionSize2(self, A: List[str]) -> int:
strings = []
for i in range(0,len(A[0])):
temp = "".join([item[i] for item in A])
if "".join(sorted(temp)) == temp:
pass
else:
strings.append(1)
return len(strings)
def minDeletionSize3(self, A: List[str]) -> int:
l=[]
k=[]
for i in range(len(A[0])):
for j in range(len(A)):
l.append((A[j][i]))
if l != sorted(l):
k.append(i)
l=[]
return len(k)
def main(argv, arc):
A = ["cba", "daf", "ghi"]
solution = DeleteColumnsToMakeSorted()
solution.minDeletionSize(A)
if __name__ == '__main__':
main(sys.argv, len(sys.argv))
|
[
"user.namecd"
] |
user.namecd
|
98ae73f5af580dce3fc708af8516af5e1c67bbf3
|
50e03dae243af6bfab19f8cf42494284ff70fbd3
|
/BIG-BIRD/RelGAN.py
|
05e0634536e46c4d7140e7c904e0f5d7773baeb5
|
[] |
no_license
|
BritneyMuller/Summarization-Lab
|
bf2d79abe724e999e4017d4ffe6220863fe7f162
|
4b40f5ac7a629f509c323bf426d3058268628186
|
refs/heads/master
| 2021-01-25T23:13:13.669487
| 2019-09-30T14:38:13
| 2019-09-30T14:38:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,010
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import matplotlib.pyplot as plt
import os
import torch.autograd as autograd
from RelationalMemory import *
from Transformer import *
class BigBird():
#generator is translator here
def __init__(self, generator, discriminator, reconstructor, dictionary, gamma = 0.99, clip_value = 0.1, lr_G = 5e-5, lr_D = 5e-5, lr_R = 1e-4, LAMBDA = 10, TEMP_END = 0.5, vq_coef =0.8, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(BigBird, self).__init__()
self.device = device
self.dictionary = dictionary
self.generator = generator.to(self.device)
self.reconstructor = reconstructor.to(self.device)
self.discriminator = discriminator.to(self.device)
self.gamma = gamma
self.eps = np.finfo(np.float32).eps.item()
self.optimizer_R = torch.optim.Adam(list(self.generator.parameters()) + list(self.reconstructor.parameters()), lr=lr_R)
#normal WGAN
self.optimizer_G = torch.optim.RMSprop(self.generator.parameters(), lr=lr_G)
self.optimizer_D = torch.optim.RMSprop(self.discriminator.parameters(), lr=lr_D)
#WGAN GP
#self.LAMBDA = LAMBDA # Gradient penalty lambda hyperparameter
#self.optimizer_G = torch.optim.Adam(self.generator.parameters(), lr=lr_G, betas=(0.0, 0.9))
#self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(), lr=lr_D, betas=(0.0, 0.9))
self.clip_value = clip_value
self.TEMP_END = TEMP_END
self.lr_G = lr_G
self.lr_D = lr_D
self.lr_R = lr_R
self.total_steps = 0
self.vq_coef = 0.8
self.epoch = 0
def calc_gradient_penalty(self, netD, real_data, fake_data):
#print real_data.size()
BATCH_SIZE = real_data.shape[0]
dim_1 = real_data.shape[1]
dim_2 = real_data.shape[2]
alpha = torch.rand(BATCH_SIZE, dim_1)
alpha = alpha.view(-1,1).expand(dim_1 * BATCH_SIZE, dim_2).view(BATCH_SIZE, dim_1, dim_2)
alpha = alpha.to(self.device)
#print(real_data.shape) #[BATCH_SIZE, 19, vocab_sz]
#print(fake_data.shape) #[BATCH_SIZE, 19, vocab_sz]
interpolates_data = ( alpha * real_data.float() + ((1 - alpha) * fake_data.float()) )
interpolates = interpolates_data.to(self.device)
#interpolates = netD.disguised_embed(interpolates_data)
interpolates = autograd.Variable(interpolates, requires_grad=True)
src_mask = (interpolates_data.argmax(-1) != netD.padding_index).type_as(interpolates_data).unsqueeze(-2)
disc_interpolates = netD.transformer_encoder( interpolates, src_mask )
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(self.device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA
return gradient_penalty
def _to_one_hot(self, y, n_dims):
scatter_dim = len(y.size())
y_tensor = y.to(self.device).long().view(*y.size(), -1)
zeros = torch.zeros(*y.size(), n_dims).to(self.device)
return zeros.scatter(scatter_dim, y_tensor, 1)
def train_D(self, fake_datas, real_datas):
## train discriminator
# print("real")
# print(real_datas[:10])
real_score = torch.mean(self.discriminator(real_datas))
# print("fake")
# print(fake_datas[:10])
fake_score = torch.mean(self.discriminator(fake_datas))
batch_d_loss = -real_score + fake_score #+ self.calc_gradient_penalty(self.discriminator, real_datas, fake_datas)
return batch_d_loss, real_score.item(), fake_score.item()
def train_G(self, fake_datas):
self.optimizer_G.zero_grad()
batch_g_loss = -torch.mean(self.discriminator(fake_datas))
batch_g_loss.backward(retain_graph=True)
self.optimizer_G.step()
return batch_g_loss.item()
def indicies2string(self, indices):
inv_map = {v: k for k, v in self.dictionary.items()}
return ' '.join([inv_map[i.item()] for i in indices])
def train(self):
self.generator.train()
self.reconstructor.train()
self.discriminator.train()
def eval(self):
self.generator.eval()
self.reconstructor.eval()
self.discriminator.eval()
def load(self, load_path):
print('load Bird from', load_path)
loader = torch.load(load_path)
self.generator.load_state_dict(loader['generator'])
self.discriminator.load_state_dict(loader['discriminator'])
self.reconstructor.load_state_dict(loader['reconstructor'])
self.total_steps = loader['total_steps']
self.epoch = loader['epoch']
self.gumbel_temperature = loader['gumbel_temperature']
def save(self, save_path):
print('lay egg to ./Nest ... save as', save_path)
torch.save({'generator':self.generator.state_dict(),
'reconstructor':self.reconstructor.state_dict(),
'discriminator':self.discriminator.state_dict(),
'total_steps':self.total_steps,
'epoch':self.epoch,
'gumbel_temperature':self.gumbel_temperature
},save_path)
def eval_iter(self, src, src_mask, max_len, real_data, ct, verbose = 1):
with torch.no_grad():
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
memory = self.reconstructor.initial_state(batch_size, trainable=True).to(self.device)
CE_loss, acc, out = self.reconstructor.reconstruct_forward(gumbel_one_hot, src, memory, self.dictionary['[CLS]'])
if verbose == 1 and ct % 1 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("reconsturct out:")
print(self.indicies2string(out[0]))
print("")
return acc, CE_loss.item()
def pretrainGAN_run_iter(self, src, src_mask, max_len, real_data, D_iters = 5, D_toggle = 'On', verbose = 1):
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
batch_G_loss = 0
NNcriterion = nn.NLLLoss().to(self.device)
batch_G_loss = NNcriterion(summary_probs.log().contiguous().view(batch_size * max_len, -1), real_data.contiguous().view(-1))
self.optimizer_G.zero_grad()
batch_G_loss.backward()
self.optimizer_G.step()
self.total_steps += 1
if self.total_steps % 500 == 0:
if not os.path.exists("./Nest"):
os.makedirs("./Nest")
self.save("./Nest/Pretrain_RelGAN")
if verbose == 1 and self.total_steps % 1000 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("")
distrib = summary_probs[0,0, :100].cpu().detach().numpy()
one_hot_out = gumbel_one_hot[0,0, :100].cpu().detach().numpy()
return [batch_G_loss, 0], [0], [0, 0, 0], [self.indicies2string(src[0]), self.indicies2string(summary_sample[0]), 0], distrib, one_hot_out
def run_iter(self, src, src_mask, max_len, real_data, D_iters = 5, D_toggle = 'On', verbose = 1, writer = None):
#summary_logits have some problem
#summary = self.generator(src, src_mask, max_len, self.dictionary['[CLS]'])
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
batch_D_loss = 0
if(D_toggle == 'On'):
for i in range(D_iters):
self.optimizer_D.zero_grad()
batch_d_loss, real_score, fake_score = self.train_D(gumbel_one_hot, self._to_one_hot(real_data, len(self.dictionary)))
batch_D_loss += batch_d_loss
batch_d_loss.backward(retain_graph=True);
#Clip critic weights
for p in self.discriminator.parameters():
p.data.clamp_(-self.clip_value, self.clip_value)
self.optimizer_D.step();
batch_D_loss = batch_D_loss.item()/D_iters
batch_G_loss = 0
if(D_toggle == 'On'):
#print(gumbel_one_hot.shape)
batch_G_loss = self.train_G(gumbel_one_hot)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
memory = self.reconstructor.initial_state(batch_size, trainable=True).to(self.device)
CE_loss, acc, out = self.reconstructor.reconstruct_forward(gumbel_one_hot, src, memory, self.dictionary['[CLS]'])
rec_loss = CE_loss #+ self.vq_coef * vq_loss + 0.25 * self.vq_coef * commit_loss
self.optimizer_R.zero_grad()
rec_loss.backward()
nn.utils.clip_grad_norm_(list(self.generator.parameters()) + list(self.reconstructor.parameters()), 0.1)
self.optimizer_R.step()
self.total_steps += 1
if self.total_steps % 500 == 0:
if not os.path.exists("./Nest"):
os.makedirs("./Nest")
self.save("./Nest/DoubleRelationMEM_GAN")
#for i in range(5):
#plt.plot(range(1000),summary_probs.cpu().detach().numpy()[0,i,:1000] )
# wandb.log({"prob {}".format(i): wandb.Histogram(summary_probs.cpu().detach().numpy()[0,i,:1000])},step=step)
if verbose == 1 and self.total_steps % 100 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("reconsturct out:")
print(self.indicies2string(out[0]))
# print("sentiment:",label[0].item())
# print("y:",sentiment_label[0].item())
# print("reward:",rewards[0].item())
print("")
# for name, param in self.generator.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), self.total_steps)
# for name, param in self.reconstructor.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), self.total_steps)
distrib = summary_probs.cpu().detach().numpy()[0,0, :100]
one_hot_out = gumbel_one_hot.cpu().detach().numpy()[0,0, :100]
return [batch_G_loss, batch_D_loss], [CE_loss.item()], [real_score, fake_score, acc], [self.indicies2string(src[0]), self.indicies2string(summary_sample[0]), self.indicies2string(out[0])], distrib, one_hot_out
class LSTMEncoder(nn.Module):
def __init__(self, vocab_sz, hidden_dim, padding_index):
super().__init__()
self.src_embed = nn.Embedding(vocab_sz, hidden_dim)
self.rnn_cell = nn.LSTM(hidden_dim, hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
self.padding_index = padding_index
self.outsize = hidden_dim*2
def forward(self, x):
#src_mask = (x != self.padding_index).type_as(x).unsqueeze(-2)
out, (h,c) = self.rnn_cell( self.src_embed(x))
return out
# class LSTM_Gumbel_Encoder_Decoder(nn.Module):
# def __init__(self, hidden_dim, emb_dim, input_len, output_len, voc_size, device, eps=1e-8, num_layers = 2):
# super().__init__()
# self.hidden_dim = hidden_dim
# self.emb_dim = emb_dim
# #self.input_len = input_len
# #self.output_len = output_len
# #self.voc_size = voc_size
# #self.teacher_prob = 1.
# #self.epsilon = eps
# self.emb_layer = nn.Embedding(voc_size, emb_dim)
# self.num_layers = num_layers
# self.encoder = nn.LSTM(emb_dim, hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=True)
# self.decoder = nn.LSTM(emb_dim, hidden_dim*2, num_layers=num_layers, batch_first=True)
# self.device = device
# self.attention_softmax = nn.Softmax(dim=1)
# # self.pro_layer = nn.Sequential(
# # nn.Linear(hidden_dim*4, voc_size, bias=True)
# # )
# self.adaptive_softmax = torch.nn.AdaptiveLogSoftmaxWithLoss(hidden_dim*4, voc_size, [100, 1000, 10000], div_value=4.0, head_bias=False)
# def forward(self, x, src_mask, max_len, start_symbol, mode = 'argmax', temp = 2.0):
# batch_size = x.shape[0]
# input_len = x.shape[1]
# device = x.device
# # encoder
# x_emb = self.emb_layer(x)
# memory, (h, c) = self.encoder(x_emb)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# h = h.view(batch_size, self.num_layers, h.shape[-1]*2)
# c = c.view(batch_size, self.num_layers, c.shape[-1]*2)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# ## decoder
# out_h, out_c = (h, c)
# ys = torch.ones(batch_size, 1).fill_(start_symbol).type_as(x.data)
# values = []
# all_probs = []
# gumbel_one_hots = []
# for i in range(max_len-1):
# ans_emb = self.emb_layer(ys[:,-1]).view(batch_size, 1, self.emb_dim)
# out, (out_h, out_c) = self.decoder(ans_emb, (out_h, out_c))
# attention = torch.bmm(memory, out.transpose(1, 2)).view(batch_size, input_len)
# attention = self.attention_softmax(attention)
# context_vector = torch.bmm(attention.view(batch_size, 1, input_len), memory)
# logits = torch.cat((out, context_vector), -1).view(batch_size, -1)
# one_hot, next_words, value, prob = self.gumbel_softmax(logits, temp)
# # print(feature.shape)
# # print(one_hot.shape)
# # print(next_words.shape)
# # print(values.shape)
# # print(log_probs.shape)
# # input("")
# ys = torch.cat((ys, next_words.view(batch_size, 1)), dim=1)
# values.append(value)
# all_probs.append(prob)
# gumbel_one_hots.append(one_hot)
# values = torch.stack(values,1)
# all_probs = torch.stack(all_probs,1)
# gumbel_one_hots = torch.stack(gumbel_one_hots, 1)
# return ys, values, all_probs, gumbel_one_hots
# def sample_gumbel(self, shape, eps=1e-20):
# U = torch.rand(shape).to(self.device)
# return -Variable(torch.log(-torch.log(U + eps) + eps))
# def gumbel_softmax_sample(self, logits, temperature):
# y = logits + self.sample_gumbel(logits.size())
# #the formula should be prob not logprob, I guess it still works
# return self.adaptive_softmax.log_prob(logits).exp()
# #return F.softmax(y / temperature, dim=-1)
# def gumbel_softmax(self, logits, temperature):
# """
# ST-gumple-softmax
# input: [*, n_class]
# return: flatten --> [*, n_class] an one-hot vector
# """
# y = self.gumbel_softmax_sample(logits, temperature)
# shape = y.size()
# values, ind = y.max(dim=-1)
# y_hard = torch.zeros_like(y).view(-1, shape[-1])
# y_hard.scatter_(1, ind.view(-1, 1), 1)
# y_hard = y_hard.view(*shape)
# y_hard = (y_hard - y).detach() + y
# return y_hard.view(logits.shape[0], -1), ind, values, y
# class LSTM_Normal_Encoder_Decoder(nn.Module):
# def __init__(self, hidden_dim, emb_dim, input_len, output_len, voc_size, pad_index, device, eps=1e-8, num_layers = 2):
# super().__init__()
# self.hidden_dim = hidden_dim
# self.emb_dim = emb_dim
# self.device = device
# #self.input_len = input_len
# #self.output_len = output_len
# #self.voc_size = voc_size
# #self.teacher_prob = 1.
# #self.epsilon = eps
# self.num_layers = num_layers
# #self.emb_layer = nn.Embedding(voc_size, emb_dim)
# self.disguise_embed = nn.Linear(voc_size, emb_dim)
# self.encoder = nn.LSTM(emb_dim, hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=True)
# self.decoder = nn.LSTM(emb_dim, hidden_dim*2, num_layers=num_layers, batch_first=True)
# self.attention_softmax = nn.Softmax(dim=1)
# self.vocab_sz = voc_size
# self.criterion = torch.nn.AdaptiveLogSoftmaxWithLoss(hidden_dim*4, voc_size, [1000, 5000, 20000], div_value=4.0, head_bias=False)
# def forward(self, x, src_mask, max_len, start_symbol, y, mode = 'argmax', temp = 2.0):
# batch_size = x.shape[0]
# input_len = x.shape[1]
# device = x.device
# # encoder
# x_emb = self.disguise_embed(x)
# memory, (h, c) = self.encoder(x_emb)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# h = h.view(batch_size, self.num_layers, h.shape[-1]*2)
# c = c.view(batch_size, self.num_layers, c.shape[-1]*2)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# ## decoder
# out_h, out_c = (h, c)
# logits = []
# for i in range(max_len):
# ans_emb = self.disguise_embed(self._to_one_hot(y[:,i], self.vocab_sz)).view(batch_size, 1, self.emb_dim)
# out, (out_h, out_c) = self.decoder(ans_emb, (out_h, out_c))
# attention = torch.bmm(memory, out.transpose(1, 2)).view(batch_size, input_len)
# attention = self.attention_softmax(attention)
# context_vector = torch.bmm(attention.view(batch_size, 1, input_len), memory)
# logit = torch.cat((out, context_vector), -1).view(batch_size, -1)
# # if mode == 'argmax':
# # values, next_words = torch.max(log_probs, dim=-1, keepdim=True)
# # if mode == 'sample':
# # m = torch.distributions.Categorical(logits=log_probs)
# # next_words = m.sample()
# # values = m.log_prob(next_words)
# logits.append(logit)
# logits = torch.stack(logits, 1)
# _ ,loss = self.criterion(logits[:,:-1].contiguous().view(batch_size * (max_len - 1), -1), y[:,1:].contiguous().view(batch_size * (max_len-1)))
# #y from one to get rid of [CLS]
# log_argmaxs = self.criterion.predict(logits[:,:-1].contiguous().view(batch_size * (max_len - 1), -1)).view(batch_size, max_len-1)
# acc = ( log_argmaxs== y[:,1:]).float().mean()
# return loss, acc, log_argmaxs
# def _to_one_hot(self, y, n_dims):
# scatter_dim = len(y.size())
# y_tensor = y.to(self.device).long().view(*y.size(), -1)
# zeros = torch.zeros(*y.size(), n_dims).to(self.device)
# return zeros.scatter(scatter_dim, y_tensor, 1)
class Discriminator(nn.Module):
def __init__(self, transformer_encoder, hidden_dim, vocab_sz, padding_index):
super(Discriminator, self).__init__()
self.padding_index = padding_index
self.disguise_embed = nn.Linear(vocab_sz, hidden_dim)
self.transformer_encoder = transformer_encoder
self.linear = nn.Linear(self.transformer_encoder.layers[-1].size, 1)
#self.sigmoid = nn.Sigmoid()
def forward(self, x):
src_mask = (x.argmax(-1) != self.padding_index).type_as(x).unsqueeze(-2)
x = self.transformer_encoder(self.disguise_embed(x), src_mask)
score = self.linear(x)
return score
|
[
"you@example.com"
] |
you@example.com
|
d1716fb4ec493d1b89f08262b63cd4a0ccee5a05
|
af8cb7ec280573b58a16ae6e92a938828ffc052d
|
/Recurrent_network/Recurrent_network2.py
|
d0ec9b7584d15643b4ca53b345c7e20dda0a2df4
|
[] |
no_license
|
ninetailskim/Tensorflow_Tutorial
|
65e44ecce976fdd469fc8c34b0d1ed975e5b9989
|
cb7d8fcd12e57de80f5cded091e014b425e3467f
|
refs/heads/master
| 2021-07-02T22:46:55.503318
| 2017-09-23T08:31:03
| 2017-09-23T08:31:03
| 104,552,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
'''
static_rnn
input :A length T list of inputs, each a Tensor of shape [batch_size, input_size], or a nested tuple of such elements.
x = tf.unstack(x, timesteps, 1)
output : A list of outputs (one for each input), or a nested tuple of such elements.
output[-1]
'''
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('/tmp/data', one_hot=True)
learning_rate = 0.001
training_steps = 10000
batch_size = 128
display_step = 200
num_input = 28
timesteps = 28
num_hidden = 128
num_classes = 10
X = tf.placeholder(tf.float32, [None, timesteps, num_input])
Y = tf.placeholder(tf.float32, [None, num_classes])
weights={
'out':tf.get_variable('weight_out', [num_hidden, num_classes], tf.float32)
}
biases = {
'out':tf.get_variable('biases_out', [num_classes], tf.float32)
}
def RNN(x, weight, biases):
x = tf.unstack(x, timesteps, 1)
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
#h0 = lstm_cell.zero_state(batch_size, tf.float32)
output, state = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
out = tf.nn.bias_add(tf.matmul(output[-1], weight['out']), biases['out'])
return out
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(1, training_steps + 1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
batch_x = batch_x.reshape((-1, timesteps, num_input))
_, loss, acc = sess.run([train_op, loss_op, accuracy],feed_dict={X:batch_x, Y:batch_y})
if step % display_step == 0:
print("Step:", step, "loss:", loss, "Accuracy:", acc)
print("Training finished!")
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Test:", sess.run(accuracy, feed_dict={X:test_data, Y:test_label}))
|
[
"ninetailsyeon@163.com"
] |
ninetailsyeon@163.com
|
8bb434118d8a33e713f63403eaff1c739d22dc81
|
144c0ec2098c1a79f277729550262ab953a34aed
|
/logisticka.py
|
32ff6f38f03be4fd21104c731494fc99743dc6be
|
[] |
no_license
|
mkonjikovac/logistickaRegresija
|
ad7142d38824c70e992213ae8d4f6bb6ab2d74c7
|
37e58a40526a3679aeeaacbe49d5ad0e06d8881c
|
refs/heads/master
| 2022-03-07T03:30:00.813775
| 2019-11-08T13:57:02
| 2019-11-08T13:57:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,989
|
py
|
# logisticka
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import seaborn as sn
def standardization(x):
xs = np.copy(x)
for i in range(1, n):
xa = np.average(xs[:, i])
std = np.std(xs[:, i])
for j in range(m):
xs[j, i] = (xs[j, i] - xa) / std
return xs
def h(theta, x):
return 1 / (1 + math.exp(-theta.T.dot(x)))
# sarzni gradijentni spust, maksimizacija verodostojnosti
def gradient(x, y, theta):
gl = np.zeros((n, 1))
for i in range(m):
h_theta = h(theta, x[i].T)
for j in range(n):
gl[j] = gl[j] + (y[i] - h_theta) * x[i, j]
return gl
def gradient_loss(x, y):
alpha_l, alpha, alpha_h = 0.01, 0.02, 0.04
theta_l, J_l = gradient_descent_j(x, y, alpha_l, 1)
theta, J = gradient_descent_j(x, y, alpha, 1)
theta_h, J_h = gradient_descent_j(x, y, alpha_h, 1)
plt.plot(range(len(J_l)), J_l)
plt.plot(range(len(J)), J)
plt.plot(range(len(J_h)), J_h)
plt.legend(['alpha = 0.01', 'alpha = 0.02', 'alpha = 0.04'], loc='upper right')
plt.xlabel('iter')
plt.ylabel('J')
plt.show()
def gradient_descent_j(x, y, alpha=0.02, flag=0):
theta = np.zeros((n, 1))
bound = 2e-2
J = []
dl = gradient(x, y, theta)
while np.linalg.norm(dl) > bound:
theta = theta + alpha * dl
dl = gradient(x, y, theta)
if flag:
dJ = 0
for i in range(m):
ht = h(theta, xs[i].T)
dJ = dJ + y[i] * math.log(ht) + (1 - y[i]) * math.log(1 - ht)
J.append(-dJ)
return theta, J
def gradient_descent(x, y):
theta, J = gradient_descent_j(x, y)
return theta
# softmax
def delta(x, y, theta):
m = x.shape[0]
deltaJ = np.zeros((k, n))
for r in range(k - 1):
for i in range(m):
s = 0
for j in range(k):
s = s + math.exp(theta[j].dot(x[i].T))
deltaJ[r] = deltaJ[r] + ((y[i] == r) - math.exp(theta[r].dot(x[i].T)) / s) * x[i]
return deltaJ
def gauss(x, my, sigma):
sigma2 = math.pow(sigma, 2)
return 1 / math.sqrt(2 * math.pi * sigma2) * math.exp(-math.pow((x - my), 2) / 2 * sigma2)
def gnb(x, my1, sigma1, my0, sigma0):
invS1, invS0 = np.linalg.inv(sigma1), np.linalg.inv(sigma0)
return math.exp(0.5 * x.T.dot(invS1).dot(x) - my1.T.dot(invS1).dot(x) + 0.5 * my1.T.dot(invS1).dot(my1)
- 0.5 * x.T.dot(invS0).dot(x) + my0.T.dot(invS0).dot(x) - 0.5 * my0.T.dot(invS0).dot(my0))
def plot_conf(conf, reg, train):
if train == 1:
print(reg)
print('conf_train:')
else:
print('conf_test:')
print(conf)
df_cm = pd.DataFrame(conf, range(k), range(k))
hm = sn.heatmap(df_cm, annot=True, annot_kws={"size": 12})
bottom, top = hm.get_ylim()
hm.set_ylim(bottom + 0.5, top - 0.5)
plt.show()
df = pd.read_csv('multiclass_data.csv', header=None)
df.columns = ['x1', 'x2', 'x3', 'x4', 'x5', 'y']
df.insert(0, 'one', 1)
boundary_index = round(df.shape[0] * 0.8)
df = df.sample(frac=1)
y = df['y'].to_numpy()
x = df.iloc[:, 0:6].to_numpy()
m, n, k = x.shape[0], x.shape[1], len(np.unique(y)) # n ukljucuje kolonu sa 1
xs = standardization(x)
# logisticka regresija
y0, y1, y2 = np.copy(y), np.copy(y), np.copy(y)
y0[y0 >= 1], y0[y0 == 0], y0[y0 > 1] = 2, 1, 0
y1[y1 != 1] = 0
y2[y2 <= 1], y2[y2 == 2] = 0, 1
theta0, theta1, theta2 = gradient_descent(xs, y0), gradient_descent(xs, y1), gradient_descent(xs, y2)
conf_train, conf_test = np.zeros((k, k)), np.zeros((k, k))
y_guess = np.zeros((m, 1), int)
for i in range(m):
h0, h1, h2 = h(theta0, xs[i].T), h(theta1, xs[i].T), h(theta2, xs[i].T)
if h0 > h1 and h0 > h2:
y_guess[i] = 0
elif h1 > h0 and h1 > h2:
y_guess[i] = 1
else:
y_guess[i] = 2
if i < boundary_index:
conf_train[y[i], y_guess[i]] = conf_train[y[i], y_guess[i]] + 1
else:
conf_test[y[i], y_guess[i]] = conf_test[y[i], y_guess[i]] + 1
plot_conf(conf_train, 'LOGISTIČKA:', 1)
plot_conf(conf_test, 'LOGISTIČKA:', 0)
gradient_loss(xs, y1) # funkcija gubitka u zavisnosti od stope ucenja
# softmax
shuffle = np.arange(m)
row_num = [5, 10, 20]
row_size = row_num[1]
for row in row_num:
alpha, step, cnt = 0.02, 0, 1000
theta_row, J = np.zeros((k, n)), []
for i in range(cnt):
theta_row = theta_row + alpha * delta(xs[step:min(m, step + row)], y[step:min(m, step + row)], theta_row)
dJ = 0
for i in range(m):
y_guess = 0
for j in range(k):
y_guess = y_guess + math.exp(theta_row[j].dot(xs[i].T))
dJ = dJ + (theta_row[y[i]].dot(xs[i].T) - math.log(y_guess))
J.append(-dJ)
step = (step + row) % m
if step < row:
step = 0
np.random.shuffle(shuffle)
xs, y = xs[shuffle], y[shuffle]
if row == row_size:
theta = theta_row
plt.plot(range(len(J)), J)
plt.legend(['šarža = 5', 'šarža = 10', 'šarža = 20'], loc='upper right')
plt.xlabel('iter')
plt.ylabel('J')
plt.show()
conf_train, conf_test = np.zeros((k, k)), np.zeros((k, k))
for i in range(m):
phi, s = np.zeros((k, 1)), 0
for r in range(k):
phi[r] = math.exp(theta[r].dot(xs[i].T))
s = s + math.exp(theta[r].dot(xs[i].T))
phi = phi / s
phi_max_index = np.argmax(phi)
if i < boundary_index:
conf_train[y[i], phi_max_index] = conf_train[y[i], phi_max_index] + 1
else:
conf_test[y[i], phi_max_index] = conf_test[y[i], phi_max_index] + 1
plot_conf(conf_train, 'SOFTMAX:', 1)
plot_conf(conf_test, 'SOFTMAX:', 0)
# GDA - Gausovska diskriminantna analiza
xs = xs[:, 1:]
xs = np.c_[xs, y]
n = n - 1 # nema potrebe vise za kolonom sa 1
xs0, xs1, xs2 = xs[np.where(xs[:, n] == 0)], xs[np.where(xs[:, n] == 1)], xs[np.where(xs[:, n] == 2)]
xs0, xs1, xs2 = xs0[:, :-1], xs1[:, :-1], xs2[:, :-1]
x_sep = [xs0, xs1, xs2]
my, sigma = np.zeros((k, n)), np.zeros((k, n))
# racunanje my-matematicko ocekivanje, sigma-standardna devijansa
for i in range(k):
for j in range(n):
my[i, j] = np.mean(x_sep[i][:, j])
sigma[i, j] = np.std(x_sep[i][:, j])
conf_train, conf_test = np.zeros((k, k)), np.zeros((k, k))
for i in range(m):
gm, p = np.zeros((k, n)), np.zeros(k) # gauss matrix
total = 0
for l in range(k):
for j in range(n):
gm[l, j] = gauss(xs[i, j], my[l, j], sigma[l, j])
p[l] = np.prod(gm[l])
total = total + p[l]
p = p / total
if i < boundary_index:
conf_train[y[i], np.argmax(p)] = conf_train[y[i], np.argmax(p)] + 1
else:
conf_test[y[i], np.argmax(p)] = conf_test[y[i], np.argmax(p)] + 1
plot_conf(conf_train, 'GDA:', 1)
plot_conf(conf_test, 'GDA:', 0)
# GNB - Naivni Bayes
MY0 = np.ones((5, xs0.shape[0]))
MY1 = np.ones((5, xs1.shape[0]))
MY2 = np.ones((5, xs2.shape[0]))
for j in range(n):
MY0[j] = my[0, j]
MY1[j] = my[1, j]
MY2[j] = my[2, j]
print(xs0, my)
SIGMA0 = 1 / (xs0.shape[0] - 1) * (xs0.T - MY0).dot((xs0.T - MY0).T)
SIGMA1 = 1 / (xs1.shape[0] - 1) * (xs1.T - MY1).dot((xs1.T - MY1).T)
SIGMA2 = 1 / (xs2.shape[0] - 1) * (xs2.T - MY2).dot((xs2.T - MY2).T)
conf_train, conf_test = np.zeros((k, k)), np.zeros((k, k))
xs = xs[:, :-1] # izbacivanje kolone sa 1
for i in range(m):
p = np.zeros(k)
p[0] = 1 / (1 + gnb(xs[i].T, my[1].T, SIGMA1, my[0].T, SIGMA0) + gnb(xs[i].T, my[2].T, SIGMA2, my[0].T, SIGMA0))
p[1] = 1 / (1 + gnb(xs[i].T, my[0].T, SIGMA0, my[1].T, SIGMA1) + gnb(xs[i].T, my[2].T, SIGMA2, my[1].T, SIGMA1))
p[2] = 1 / (1 + gnb(xs[i].T, my[0].T, SIGMA0, my[2].T, SIGMA2) + gnb(xs[i].T, my[1].T, SIGMA1, my[2].T, SIGMA2))
if i < boundary_index:
conf_train[y[i], np.argmax(p)] = conf_train[y[i], np.argmax(p)] + 1
else:
conf_test[y[i], np.argmax(p)] = conf_test[y[i], np.argmax(p)] + 1
plot_conf(conf_train, 'GNB:', 1)
plot_conf(conf_test, 'GNB:', 0)
|
[
"mkonjikovac12@gmail.com"
] |
mkonjikovac12@gmail.com
|
c35a45aa07d805a1a36b6c9ba503f843f82fe68e
|
3554cedeca0e21a015534290a95d0a3930ff1cc1
|
/spider/spideOnDelegation.py
|
f6be5c0879481f57b3fb6d95aa692fb56bf285b8
|
[] |
no_license
|
baolintian/EZTrade
|
27d2329468f44bbedc610e0f8ab75be05ccfb247
|
72ee63fdcbfd37574a7734bd0991cff114481f79
|
refs/heads/main
| 2023-09-01T04:08:20.086976
| 2021-09-23T08:47:32
| 2021-09-23T08:47:32
| 390,328,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,650
|
py
|
import requests
import json
import time
import datetime
def get_token():
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/app/login?password=31c15919&userName=admin"
payload = {}
headers = {
'accept': '*/*',
'Authorization': 'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJhZG1pbiIsImV4cCI6MTYyNTY3NTgyNH0.LkYBQnKfeDoEYJAMs4HOZae_Gq9nyu8kqOVP3T_qkkdmHb9pgRJbw4dlbxjEO69tFh7NQ3-vT-EHLTYo6b8Nyw'
}
response = requests.request("GET", url, headers=headers, data=payload)
return json.loads(response.text)["data"]
def get_delegation_info():
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/DelegateInfo/objects"
payload = json.dumps({
"condition": "and 1=1",
"pageSize": 10,
"startIndex": 0
})
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': 'JSESSIONID=8BC976CB994C3656F9AE0E913A2521C9'
}
response = requests.request("POST", url, headers=headers, data=payload)
response = json.loads(response.text)
return response
def get_coin_info(class_name, condition):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects"
payload = json.dumps({
"condition": condition,
"pageSize": 100,
"startIndex": 0
})
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': 'JSESSIONID=8BC976CB994C3656F9AE0E913A2521C9'
}
response = requests.request("POST", url, headers=headers, data=payload)
response = json.loads(response.text)
return response
def delete_delegation_by_oid(class_name, oid):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects-delete"
payload = json.dumps([
oid
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': 'JSESSIONID=8BC976CB994C3656F9AE0E913A2521C9'
}
requests.request("POST", url, headers=headers, data=payload)
def create_transaction(class_name, message):
import requests
import json
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects-create"
payload = json.dumps([
message
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': 'JSESSIONID=8BC976CB994C3656F9AE0E913A2521C9'
}
response = requests.request("POST", url, headers=headers, data=payload)
if response.status_code == 200:
return True
else:
return False
def get_instance_by_oid(class_name, oid):
import requests
import json
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects/oids"
payload = json.dumps([
oid
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return json.loads(response.text)
def edit_VirtualAccount_by_oid(class_name, obj):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects-update?forceUpdate=false"
payload = json.dumps([
obj
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
def get_single_coin_info(class_name, condition):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/"+class_name+"/objects"
payload = json.dumps({
"condition": condition,
"pageSize": 100
})
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
return json.loads(response.text)
def edit_single_coin_hold(class_name, obj):
url = "http://i-2o0wkhxv.cloud.nelbds.org.cn:8180/api/app//dwf/v1/omf/entities/" + class_name + "/objects-update?forceUpdate=false"
payload = json.dumps([
obj
])
token = get_token()
headers = {
'accept': '*/*',
'Authorization': token,
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
while(True):
delegation_info = get_delegation_info()
# print(delegation_info)
# print(len(delegation_info["data"]))
delegation_info = delegation_info["data"]
# 获取各个币种的价格信息
coin_info = get_coin_info("CoinInfo", "and 1=1")
coin_info = coin_info["data"]
# print(coin_info)
coin_dict = {}
for i in range(len(coin_info)):
coin_name = coin_info[i]["coinName"]
coin_price = coin_info[i]["coinPrice"]
coin_dict[coin_name] = coin_price
# 对所有的委托进行处理
for i in range(len(delegation_info)):
print(delegation_info[i])
delegate_coin_name = delegation_info[i]["delegateCoinName"]
delegate_price = delegation_info[i]["delegatePrice"]
delegate_action = delegation_info[i]["delegateAction"]
delegate_number = delegation_info[i]["delegateAmount"]
delegate_oid = delegation_info[i]["oid"]
delegator_oid = delegation_info[i]["delegatorOID"]
# delegate_type = delegation_info[i]["delegateType"]
# 异常处理
if delegate_action != "BUY" and delegate_action != "SELL":
delete_delegation_by_oid("DelegateInfo", delegate_oid)
if delegate_coin_name in coin_dict.keys():
if delegate_action == "BUY" and delegate_price < coin_dict[delegate_coin_name]:
continue
if delegate_action == "SELL" and delegate_price > coin_dict[delegate_coin_name]:
continue
if delegate_action == "BUY":
transaction_message = {
"transactionCoinName": delegate_coin_name,
"transactionAmount": delegate_number,
"transactionPrice": delegate_price,
"transactionPersonOID": delegator_oid,
"transactionAction": delegate_action,
"transactionTime": str(int(time.mktime(datetime.datetime.now().timetuple()))*1000)
}
print("BUY")
print(transaction_message)
result = create_transaction("TransactionHistory", transaction_message)
if result:
# 增加或者修改持仓信息
user = get_instance_by_oid("VirtualAccount", delegator_oid)["data"][0]
user_oid = user["oid"]
user_tot = user["asset"]
user_coin_asset = user["coinAsset"]
user_cash = user["cash"]
user_frozenAsset = user["frozenAsset"]
user_usableAsset = user["usableAsset"]
user_frozenAsset = user_frozenAsset - delegate_number*delegate_price*(1+0.001)
user_coin_asset = user_coin_asset + delegate_number*coin_dict[delegate_coin_name]
user_cash = user_frozenAsset+user_usableAsset
user_tot = user_cash+user_coin_asset
# TODO: 更新收益率
obj = {
"oid": user_oid,
"asset": user_tot,
"coinAsset": user_coin_asset,
"cash": user_cash,
"frozenAsset": user_frozenAsset,
"usableAsset": user_usableAsset
# "delegatorOID": delegator_oid
}
edit_VirtualAccount_by_oid("VirtualAccount", obj)
# 增加或者修改持仓信息
hold_info = get_single_coin_info(r"SingleCoinInfo", "and obj.coinHolderOID = '"+str(user_oid)+r"'")
hold_info = hold_info["data"]
hold_coin_dict = {}
flag = False
for j in range(len(hold_info)):
if hold_info[j]["coinName"] == delegate_coin_name:
print("real update")
flag = True
coin_number = hold_info[j]["coinAmount"]
hold_price = hold_info[j]["coinHoldPrice"]
avg_price = hold_info[j]["coinAveragePrice"]
transaction_time = hold_info[j]["coinTime"]
usable_amount = hold_info[j]["coinUsableAmount"]
hold_price = (hold_price * coin_number + delegate_number * delegate_price) / (
coin_number + delegate_number)
avg_price = (hold_price*transaction_time+delegate_price)/(1+transaction_time)
transaction_time = transaction_time+1
coin_number = coin_number+delegate_number
usable_amount = usable_amount+delegate_number
obj = {
"oid": hold_info[j]["oid"],
"coinAmount": coin_number,
"coinHoldPrice": hold_price,
"coinAveragePrice": avg_price,
"coinTime": transaction_time,
"coinUsableAmount": usable_amount
}
edit_single_coin_hold("SingleCoinInfo", obj)
break
if flag == False:
obj = {
"coinAmount": delegate_number,
"coinHoldPrice": delegate_price,
"coinAveragePrice": delegate_price,
"coinTime": 1,
"coinName": delegate_coin_name,
"coinHolderOID": delegator_oid,
"coinUsableAmount": delegate_number
}
create_transaction("SingleCoinInfo", obj)
# 删除委托信息
delete_delegation_by_oid("DelegateInfo", delegate_oid)
if delegate_action == "SELL":
# 更改用户的资金
# 更改/删除用户持仓信息
# 创建交易记录
# 删除委托信息
transaction_message = {
# 包含SELL 和 AUTO SELL
"transactionAction": delegate_action,
"transactionCoinName": delegate_coin_name,
"transactionAmount": delegate_number,
"transactionPrice": delegate_price,
"transactionPersonOID": delegator_oid,
"transactionTime": str(int(time.mktime(datetime.datetime.now().timetuple())) * 1000)
}
print("SELL")
print(transaction_message)
result = create_transaction("TransactionHistory", transaction_message)
if result:
# 增加或者修改持仓信息
user = get_instance_by_oid("VirtualAccount", delegator_oid)["data"][0]
user_oid = user["oid"]
user_tot = user["asset"]
user_coin_asset = user["coinAsset"]
user_cash = user["cash"]
user_frozenAsset = user["frozenAsset"]
user_usableAsset = user["usableAsset"]
user_coin_asset = user_coin_asset - delegate_number * coin_dict[delegate_coin_name]
user_usableAsset = user_usableAsset+delegate_number * delegate_price*(1-0.001)
user_cash = user_frozenAsset + user_usableAsset
user_tot = user_cash + user_coin_asset
# TODO: 更新收益率
obj = {
"oid": user_oid,
"asset": user_tot,
"coinAsset": user_coin_asset,
"cash": user_cash,
"frozenAsset": user_frozenAsset,
"usableAsset": user_usableAsset
}
edit_VirtualAccount_by_oid("VirtualAccount", obj)
# 增加或者修改持仓信息
hold_info = get_single_coin_info(r"SingleCoinInfo",
"and obj.coinHolderOID = '" + str(user_oid) + r"'")
hold_info = hold_info["data"]
hold_coin_dict = {}
for j in range(len(hold_info)):
if hold_info[j]["coinName"] == delegate_coin_name:
print("real update")
# flag = True
coin_number = hold_info[j]["coinAmount"]
hold_price = hold_info[j]["coinHoldPrice"]
avg_price = hold_info[j]["coinAveragePrice"]
transaction_time = hold_info[j]["coinTime"]
usable_amount = hold_info[j]["coinUsableAmount"]
if(coin_number - delegate_number != 0):
hold_price = (hold_price * coin_number - delegate_number * delegate_price) / (
coin_number - delegate_number)
else:
hold_price = 0
avg_price = (hold_price * transaction_time + delegate_price) / (1 + transaction_time)
transaction_time = transaction_time+1
coin_number = coin_number - delegate_number
print("剩余币种")
print(coin_number)
if(coin_number <= 0.0001):
# 直接删除持仓记录
delete_delegation_by_oid("SingleCoinInfo", hold_info[j]["oid"])
else:
usable_amount = usable_amount
obj = {
"oid": hold_info[j]["oid"],
"coinAmount": coin_number,
"coinHoldPrice": hold_price,
"coinAveragePrice": avg_price,
"coinTime": transaction_time,
"coinUsableAmount": usable_amount
}
edit_single_coin_hold("SingleCoinInfo", obj)
break
# 删除委托信息
delete_delegation_by_oid("DelegateInfo", delegate_oid)
time.sleep(2)
|
[
"tianbaolin1@gmail.com"
] |
tianbaolin1@gmail.com
|
50317930bb9698c10a56bf2f5e1c9bf9b3f6f36b
|
38f619c6210d77d156c6a9ae2850b30b1d96fd79
|
/gen_winning_paths.py
|
3a4c2ee73521727270a7dd6a1f62200d8b43fc07
|
[] |
no_license
|
ofraam/GTTT
|
3c2d0fc55e17c794b9e4f7078640e86d73b780fc
|
83e4c4f0d4c667bc719239d79daa1ab9417e7d1e
|
refs/heads/master
| 2021-01-20T04:46:15.095923
| 2019-08-08T15:57:04
| 2019-08-08T15:57:04
| 89,724,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
if __name__ == "__main__":
dimension = 10
streak = 5
filename = "examples/board_"+str(dimension)+"_"+str(streak)+".txt"
row = 1
col = 1
winning_paths = []
#check horizontal
for row in range(1,dimension+1):
for col in range(1, dimension + 1):
i = (row-1)*dimension+col
if (i+(streak-1))<=(dimension*row): #horizontal paths
path = []
for s in range(0,streak):
path.append(i+s)
winning_paths.append(path)
if (i+(streak-1)*dimension)<=dimension*(dimension-1)+col: #vertical paths
path = []
for s in range(0,streak):
path.append(i+(s)*dimension)
winning_paths.append(path)
if (i+(streak-1)*(dimension+1))<=dimension*dimension: #diagonal right paths
if (i + (streak - 1) * (dimension + 1)) <= (row + (streak - 1)) * dimension: # diagonal right paths
path = []
for s in range(0,streak):
path.append(i+(s)*(dimension+1))
winning_paths.append(path)
if (i+(streak-1)*(dimension-1))<=dimension*dimension: #diagonal right paths
if (i + (streak - 1) * (dimension - 1)) > ((row-1) + (streak - 1)) * dimension: # diagonal right paths
path = []
for s in range(0,streak):
path.append(i+(s)*(dimension-1))
winning_paths.append(path)
with open(filename, "w") as text_file:
text_file.write(str(dimension*dimension))
text_file.write("\n")
for path in winning_paths:
for i in range(len(path)):
text_file.write(str(path[i]))
if i<len(path)-1:
text_file.write(" ")
text_file.write("\n")
print winning_paths
|
[
"oamir@seas.harvard.edu"
] |
oamir@seas.harvard.edu
|
cb87f2390f4328b284144e4fa1564341cb8bdcf7
|
c27c51f5c33e0431dbe7db6e18c21b249d476cfa
|
/OpenSource_Python_Code/nova-2013.2/nova/tests/api/ec2/test_faults.py
|
36cee0663bf4ff4b4c640f0b081a869d016d26a6
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/Python_Stuff
|
9bef74e0db17bb5e3ba2d908ced01ee744820d80
|
9aa94a0fa5e4e802090c7b29ec88b840e304d9e5
|
refs/heads/master
| 2022-11-20T06:54:36.581623
| 2017-12-04T18:56:02
| 2017-12-04T18:56:02
| 282,171,169
| 0
| 0
| null | 2020-07-24T08:54:37
| 2020-07-24T08:54:36
| null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import webob
from nova.api.ec2 import faults
from nova import test
from nova import wsgi
class TestFaults(test.NoDBTestCase):
"""Tests covering ec2 Fault class."""
def test_fault_exception(self):
# Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPBadRequest(
explanation='test'))
self.assertTrue(isinstance(fault.wrapped_exc,
webob.exc.HTTPBadRequest))
def test_fault_exception_status_int(self):
# Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
self.assertEquals(fault.wrapped_exc.status_int, 404)
def test_fault_call(self):
# Ensure proper EC2 response on faults.
message = 'test message'
ex = webob.exc.HTTPNotFound(explanation=message)
fault = faults.Fault(ex)
req = wsgi.Request.blank('/test')
req.GET['AWSAccessKeyId'] = "test_user_id:test_project_id"
self.mox.StubOutWithMock(faults, 'ec2_error_response')
faults.ec2_error_response(mox.IgnoreArg(), 'HTTPNotFound',
message=message, status=ex.status_int)
self.mox.ReplayAll()
fault(req)
|
[
"thelma1944@gmail.com"
] |
thelma1944@gmail.com
|
fecb95f2df1a15ec0d1133aa0f186e37532e7f1c
|
357ce8dbb7e2ebab438ae90a8f598ba625ee74a1
|
/perticks/api/models.py
|
b2bf85e2e850ce59d32e170e83561f33d1a78fcd
|
[] |
no_license
|
HealthHackAu2016/per-ticks
|
899870f0c3915bb8d0aed9fcfe609674934b1a76
|
03eeaf57ea7e8c1efc07a8ff48c59edc058f7b4d
|
refs/heads/master
| 2021-01-11T02:50:00.246122
| 2016-10-16T07:07:42
| 2016-10-16T07:07:42
| 70,917,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
from django.db import models
from django.contrib import admin
from django.core.validators import RegexValidator, EmailValidator
class BiteReport(models.Model):
# Validators
alphanumeric = RegexValidator(r'^[0-9]*$', 'Only numeric characters are allowed.')
validate_email = EmailValidator()
# Fields
id = models.AutoField(primary_key=True)
auth_id = models.CharField(max_length=20)
auth_code = models.CharField(max_length=20)
email = models.CharField(max_length=200, blank=True, validators=[validate_email])
phone = models.CharField(max_length=11, blank=True, validators=[alphanumeric])
allows_follow_up = models.BooleanField(default=False)
wants_reminder = models.BooleanField(default=False)
symptom_comments = models.TextField()
submission_date = models.DateField(auto_now_add=True)
bite_date = models.DateField()
lat = models.FloatField()
lon = models.FloatField()
bitten_before = models.BooleanField(default=False)
number_of_bites = models.IntegerField(default=1)
# travel
admin.site.register(BiteReport)
class HospitalData(models.Model):
numeric = RegexValidator(r'^[0-9]*$', 'Only numeric characters are allowed.')
hospital_name = models.CharField(max_length=128)
hospital_address = models.CharField(max_length=512)
hospital_telephone = models.CharField(max_length=11, blank=True, validators=[numeric])
admin.site.register(HospitalData)
class Reminders(models.Model):
report = models.ForeignKey(BiteReport)
reminder_date = models.DateField()
reminder_sent = models.BooleanField(default=False)
admin.site.register(Reminders)
|
[
"mail@trisreed.com"
] |
mail@trisreed.com
|
dfab9c98e6e8c2274dad941069669ae7f05d9833
|
15f438d029528a978383f24f85035c911e314b72
|
/scripts/tile.py
|
4265e245943caf8bebb807cdee81181b01d0187c
|
[
"MIT"
] |
permissive
|
rg314/autoballs
|
91d11315a61d4c088b099744301b3f1b68eecc93
|
21fab5c810f18c0d50c23051928d3bb86fbc6941
|
refs/heads/main
| 2023-05-30T11:48:52.901933
| 2021-06-23T14:48:27
| 2021-06-23T14:48:27
| 341,683,921
| 1
| 0
|
MIT
| 2021-03-18T23:28:23
| 2021-02-23T20:39:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,397
|
py
|
import os
import glob
import cv2
import matplotlib.pyplot as plt
import numpy as np
import math
size = 512
tile_size = (size, size)
offset = (size, size)
image_types = 'data'
origin_path = os.path.abspath(os.path.join(f'train_data/{image_types}/*', os.pardir))
images = glob.glob(origin_path+'/imgs/*.tif')
masks = [x.replace(f'imgs/img_', 'masks/img_') for x in images]
data = [(x, y) for (x, y) in list(zip(images, masks)) if os.path.exists(x) and os.path.exists(y)]
images, masks = zip(*data)
x = []
y = []
idx = 0
non_zero = 0
for img_n, mask_n in list(zip(images, masks)):
mask = cv2.imread(mask_n)
mask = (mask > 255//2) * 255
img = cv2.imread(img_n)
mask = np.asarray(mask).astype('uint8')
mask = mask[:,:,0]
img_shape = img.shape
# cv2.imwrite('test.tif', mask)
# print(mask)
if mask.shape[:2] == img.shape[:2]:
for i in range(int(math.ceil(img_shape[0]/(offset[1] * 1.0)))):
for j in range(int(math.ceil(img_shape[1]/(offset[0] * 1.0)))):
cropped_img = img[offset[1]*i:min(offset[1]*i+tile_size[1], img_shape[0]), offset[0]*j:min(offset[0]*j+tile_size[0], img_shape[1])]
cropped_mask = mask[offset[1]*i:min(offset[1]*i+tile_size[1], img_shape[0]), offset[0]*j:min(offset[0]*j+tile_size[0], img_shape[1])]
#
path = os.getcwd() + f'/train_data/data_tile_{size}/imgs'
if not os.path.exists(path):
os.makedirs(path)
imtgt = 'img_'+str(idx).zfill(5)+'.tif'
img_target = os.path.join(path, imtgt)
path = os.getcwd() + f'/train_data/data_tile_{size}/masks'
if not os.path.exists(path):
os.makedirs(path)
mskgt = imtgt
mask_target = os.path.join(path, mskgt)
# # print(cropped_img.shape, img_target)
# # print(cropped_mask.shape, mask_target)
cv2.imwrite(img_target, cropped_img)
cv2.imwrite(mask_target, ~cropped_mask)
if np.sum(cropped_mask) > 0:
non_zero += 1
idx += 1
print(f'Total {non_zero} out of {idx} which is {(non_zero*100/idx):.2f} %')
|
[
"ryan.greenhalgh@hotmail.co.uk"
] |
ryan.greenhalgh@hotmail.co.uk
|
886183df918841571dc3a1914dbf86b3af70ee3d
|
9ce345af50e58596564a942471c19b17fec5b1b7
|
/venv/Scripts/pip-script.py
|
8ff319223bd8fc27e410ac0fa90fd31b50f27fd7
|
[] |
no_license
|
ArsenTrynko/Python_lab10
|
2f6a4379a53c66f365a85f9db6c818128690d17f
|
8da5281ef60e40e43b31e7a38e1d3739d926b552
|
refs/heads/master
| 2020-05-31T00:47:41.813171
| 2019-06-03T16:26:38
| 2019-06-03T16:26:38
| 190,041,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
#!C:\Users\MI\PycharmProjects\Lab10\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"jarkodara@gmail.com"
] |
jarkodara@gmail.com
|
ff06d12c1f57c1abcc60e770b2ba9375591bfd04
|
7ba5e9e271f1199582500bc40334ce4dfff03698
|
/manage.py
|
e1e56792c1b390bb6ae5ff85c7019e487c5a3838
|
[] |
no_license
|
R1Ng0-1488/four-a-docker
|
9ffc0cd2004b06ea9b9871eb2aad778854083bf5
|
2b66ed5baa6df777391343f82c5512b90689b981
|
refs/heads/master
| 2023-04-13T05:04:34.600023
| 2021-04-27T09:23:25
| 2021-04-27T09:23:25
| 357,534,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fourArest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"57253219+R1Ng0-1488@users.noreply.github.com"
] |
57253219+R1Ng0-1488@users.noreply.github.com
|
af319737ac47b4c0bdc71afb813cb1635135868b
|
8e8f09667b7aae2e8e35e6c130e426aedbe3d565
|
/apps/destination/migrations/0005_auto_20170719_1338.py
|
251620743aee30278a95641ae10effdf2bac21ae
|
[] |
no_license
|
TripHub/_API
|
c33e8b08f43cc45b5d7ed788aaaaed714fdcf802
|
dad85e34e826d951a971088bc77c8e63b403f01f
|
refs/heads/master
| 2021-06-24T05:50:28.964085
| 2017-08-06T11:01:50
| 2017-08-06T11:01:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-19 13:38
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('destination', '0004_auto_20170705_1325'),
]
operations = [
migrations.RemoveField(
model_name='destination',
name='address',
),
migrations.RemoveField(
model_name='destination',
name='latitude',
),
migrations.RemoveField(
model_name='destination',
name='longitude',
),
migrations.AddField(
model_name='destination',
name='data',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
preserve_default=False,
),
]
|
[
"Ben@hadfieldfamily.co.uk"
] |
Ben@hadfieldfamily.co.uk
|
955bb168de6e1ab256033fbf68a95eb968b92146
|
180d93304e80e485be81dd06dbbc8a3be0c34365
|
/exporter/opentelemetry-exporter-opencensus/tests/test_otcollector_trace_exporter.py
|
222a94d60b3b85e025d4607da7ff392a7c43b338
|
[
"Apache-2.0"
] |
permissive
|
marcosflobo/opentelemetry-python
|
dbb26b04dbbc813696dbc3f8b3db4543af8cf68c
|
81d80aab5d4fd23d0d75b223d482d491ac86f006
|
refs/heads/main
| 2023-05-04T21:51:24.754989
| 2021-05-06T01:51:26
| 2021-05-06T01:51:26
| 365,263,246
| 1
| 0
|
Apache-2.0
| 2021-05-07T14:40:16
| 2021-05-07T14:40:15
| null |
UTF-8
|
Python
| false
| false
| 11,824
|
py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
import grpc
from google.protobuf.timestamp_pb2 import Timestamp
from opencensus.proto.trace.v1 import trace_pb2
import opentelemetry.exporter.opencensus.util as utils
from opentelemetry import trace as trace_api
from opentelemetry.exporter.opencensus.trace_exporter import (
OpenCensusSpanExporter,
translate_to_collector,
)
from opentelemetry.sdk import trace
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SpanExportResult
from opentelemetry.trace import TraceFlags
# pylint: disable=no-member
class TestCollectorSpanExporter(unittest.TestCase):
def test_constructor(self):
mock_get_node = mock.Mock()
patch = mock.patch(
"opentelemetry.exporter.opencensus.util.get_node",
side_effect=mock_get_node,
)
trace_api.set_tracer_provider(
TracerProvider(
resource=Resource.create({SERVICE_NAME: "testServiceName"})
)
)
host_name = "testHostName"
client = grpc.insecure_channel("")
endpoint = "testEndpoint"
with patch:
exporter = OpenCensusSpanExporter(
host_name=host_name,
endpoint=endpoint,
client=client,
)
self.assertIs(exporter.client, client)
self.assertEqual(exporter.endpoint, endpoint)
mock_get_node.assert_called_with("testServiceName", host_name)
def test_get_collector_span_kind(self):
result = utils.get_collector_span_kind(trace_api.SpanKind.SERVER)
self.assertIs(result, trace_pb2.Span.SpanKind.SERVER)
result = utils.get_collector_span_kind(trace_api.SpanKind.CLIENT)
self.assertIs(result, trace_pb2.Span.SpanKind.CLIENT)
result = utils.get_collector_span_kind(trace_api.SpanKind.CONSUMER)
self.assertIs(result, trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED)
result = utils.get_collector_span_kind(trace_api.SpanKind.PRODUCER)
self.assertIs(result, trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED)
result = utils.get_collector_span_kind(trace_api.SpanKind.INTERNAL)
self.assertIs(result, trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED)
def test_proto_timestamp_from_time_ns(self):
result = utils.proto_timestamp_from_time_ns(12345)
self.assertIsInstance(result, Timestamp)
self.assertEqual(result.nanos, 12345)
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
def test_translate_to_collector(self):
trace_id = 0x6E0C63257DE34C926F9EFCD03927272E
span_id = 0x34BF92DEEFC58C92
parent_id = 0x1111111111111111
base_time = 683647322 * 10 ** 9 # in ns
start_times = (
base_time,
base_time + 150 * 10 ** 6,
base_time + 300 * 10 ** 6,
)
durations = (50 * 10 ** 6, 100 * 10 ** 6, 200 * 10 ** 6)
end_times = (
start_times[0] + durations[0],
start_times[1] + durations[1],
start_times[2] + durations[2],
)
span_context = trace_api.SpanContext(
trace_id,
span_id,
is_remote=False,
trace_flags=TraceFlags(TraceFlags.SAMPLED),
trace_state=trace_api.TraceState([("testkey", "testvalue")]),
)
parent_span_context = trace_api.SpanContext(
trace_id, parent_id, is_remote=False
)
other_context = trace_api.SpanContext(
trace_id, span_id, is_remote=False
)
event_attributes = {
"annotation_bool": True,
"annotation_string": "annotation_test",
"key_float": 0.3,
}
event_timestamp = base_time + 50 * 10 ** 6
event = trace.Event(
name="event0",
timestamp=event_timestamp,
attributes=event_attributes,
)
link_attributes = {"key_bool": True}
link_1 = trace_api.Link(
context=other_context, attributes=link_attributes
)
link_2 = trace_api.Link(
context=parent_span_context, attributes=link_attributes
)
span_1 = trace._Span(
name="test1",
context=span_context,
parent=parent_span_context,
events=(event,),
links=(link_1,),
kind=trace_api.SpanKind.CLIENT,
)
span_2 = trace._Span(
name="test2",
context=parent_span_context,
parent=None,
kind=trace_api.SpanKind.SERVER,
)
span_3 = trace._Span(
name="test3",
context=other_context,
links=(link_2,),
parent=span_2.get_span_context(),
)
otel_spans = [span_1, span_2, span_3]
otel_spans[0].start(start_time=start_times[0])
otel_spans[0].set_attribute("key_bool", False)
otel_spans[0].set_attribute("key_string", "hello_world")
otel_spans[0].set_attribute("key_float", 111.22)
otel_spans[0].set_attribute("key_int", 333)
otel_spans[0].set_status(trace_api.Status(trace_api.StatusCode.OK))
otel_spans[0].end(end_time=end_times[0])
otel_spans[1].start(start_time=start_times[1])
otel_spans[1].set_status(
trace_api.Status(
trace_api.StatusCode.ERROR,
{"test", "val"},
)
)
otel_spans[1].end(end_time=end_times[1])
otel_spans[2].start(start_time=start_times[2])
otel_spans[2].end(end_time=end_times[2])
output_spans = translate_to_collector(otel_spans)
self.assertEqual(len(output_spans), 3)
self.assertEqual(
output_spans[0].trace_id, b"n\x0cc%}\xe3L\x92o\x9e\xfc\xd09''."
)
self.assertEqual(
output_spans[0].span_id, b"4\xbf\x92\xde\xef\xc5\x8c\x92"
)
self.assertEqual(
output_spans[0].name, trace_pb2.TruncatableString(value="test1")
)
self.assertEqual(
output_spans[1].name, trace_pb2.TruncatableString(value="test2")
)
self.assertEqual(
output_spans[2].name, trace_pb2.TruncatableString(value="test3")
)
self.assertEqual(
output_spans[0].start_time.seconds,
int(start_times[0] / 1000000000),
)
self.assertEqual(
output_spans[0].end_time.seconds, int(end_times[0] / 1000000000)
)
self.assertEqual(output_spans[0].kind, trace_api.SpanKind.CLIENT.value)
self.assertEqual(output_spans[1].kind, trace_api.SpanKind.SERVER.value)
self.assertEqual(
output_spans[0].parent_span_id, b"\x11\x11\x11\x11\x11\x11\x11\x11"
)
self.assertEqual(
output_spans[2].parent_span_id, b"\x11\x11\x11\x11\x11\x11\x11\x11"
)
self.assertEqual(
output_spans[0].status.code,
trace_api.StatusCode.OK.value,
)
self.assertEqual(len(output_spans[0].tracestate.entries), 1)
self.assertEqual(output_spans[0].tracestate.entries[0].key, "testkey")
self.assertEqual(
output_spans[0].tracestate.entries[0].value, "testvalue"
)
self.assertEqual(
output_spans[0].attributes.attribute_map["key_bool"].bool_value,
False,
)
self.assertEqual(
output_spans[0]
.attributes.attribute_map["key_string"]
.string_value.value,
"hello_world",
)
self.assertEqual(
output_spans[0].attributes.attribute_map["key_float"].double_value,
111.22,
)
self.assertEqual(
output_spans[0].attributes.attribute_map["key_int"].int_value, 333
)
self.assertEqual(
output_spans[0].time_events.time_event[0].time.seconds, 683647322
)
self.assertEqual(
output_spans[0]
.time_events.time_event[0]
.annotation.description.value,
"event0",
)
self.assertEqual(
output_spans[0]
.time_events.time_event[0]
.annotation.attributes.attribute_map["annotation_bool"]
.bool_value,
True,
)
self.assertEqual(
output_spans[0]
.time_events.time_event[0]
.annotation.attributes.attribute_map["annotation_string"]
.string_value.value,
"annotation_test",
)
self.assertEqual(
output_spans[0]
.time_events.time_event[0]
.annotation.attributes.attribute_map["key_float"]
.double_value,
0.3,
)
self.assertEqual(
output_spans[0].links.link[0].trace_id,
b"n\x0cc%}\xe3L\x92o\x9e\xfc\xd09''.",
)
self.assertEqual(
output_spans[0].links.link[0].span_id,
b"4\xbf\x92\xde\xef\xc5\x8c\x92",
)
self.assertEqual(
output_spans[0].links.link[0].type,
trace_pb2.Span.Link.Type.TYPE_UNSPECIFIED,
)
self.assertEqual(
output_spans[1].status.code,
trace_api.StatusCode.ERROR.value,
)
self.assertEqual(
output_spans[2].links.link[0].type,
trace_pb2.Span.Link.Type.PARENT_LINKED_SPAN,
)
self.assertEqual(
output_spans[0]
.links.link[0]
.attributes.attribute_map["key_bool"]
.bool_value,
True,
)
def test_export(self):
mock_client = mock.MagicMock()
mock_export = mock.MagicMock()
mock_client.Export = mock_export
host_name = "testHostName"
collector_exporter = OpenCensusSpanExporter(
client=mock_client, host_name=host_name
)
trace_id = 0x6E0C63257DE34C926F9EFCD03927272E
span_id = 0x34BF92DEEFC58C92
span_context = trace_api.SpanContext(
trace_id,
span_id,
is_remote=False,
trace_flags=TraceFlags(TraceFlags.SAMPLED),
)
otel_spans = [
trace._Span(
name="test1",
context=span_context,
kind=trace_api.SpanKind.CLIENT,
)
]
result_status = collector_exporter.export(otel_spans)
self.assertEqual(SpanExportResult.SUCCESS, result_status)
# pylint: disable=unsubscriptable-object
export_arg = mock_export.call_args[0]
service_request = next(export_arg[0])
output_spans = getattr(service_request, "spans")
output_node = getattr(service_request, "node")
self.assertEqual(len(output_spans), 1)
self.assertIsNotNone(getattr(output_node, "library_info"))
self.assertIsNotNone(getattr(output_node, "service_info"))
output_identifier = getattr(output_node, "identifier")
self.assertEqual(
getattr(output_identifier, "host_name"), "testHostName"
)
|
[
"noreply@github.com"
] |
marcosflobo.noreply@github.com
|
bcaf5aa98c8edf969dc67d07dbc2b241654d3d1d
|
fb7f04ffbdcdf4f5aa7c0e6ccf83f7671ef10770
|
/server.py
|
cadfd74d1f7a4b805332831f5238f54f81f48b2d
|
[] |
no_license
|
Bthelisma/LandingPage
|
8ce348ece186c57e98d00cb5fdde8149587accae
|
87fdf0bed1ad0e7a978095c47d9ba3ea860b74b7
|
refs/heads/master
| 2020-03-22T18:59:26.266574
| 2018-07-10T23:14:03
| 2018-07-10T23:14:03
| 140,496,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/ninjas')
def ninjas():
return render_template("ninjas.html")
@app.route('/dojos')
def dojos():
return render_template("dojos.html")
app.run(debug=True)
|
[
"contactjerseysure@gmail.com"
] |
contactjerseysure@gmail.com
|
1af07f337196fda10e15701549e6243804b7e233
|
9467b65606bdeb2ff1417267728f95aac96e2bd9
|
/p24.py
|
ab13db0c38f7d99139ac1511ae52bfb7916bcb43
|
[] |
no_license
|
prince3453/python
|
a7d1e46f0669f50ac4ca74aa11a393a3f69c9471
|
ca31d46dd885b619e4d7cefbf83b813684afad93
|
refs/heads/master
| 2020-12-06T13:31:13.314451
| 2020-05-16T05:53:00
| 2020-05-16T05:53:00
| 232,474,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 901
|
py
|
class Bank:
def __init__(self, balance):
self.balance = balance
self.methods = [self.printBalance,self.deposite,self.withdraw]
def printBalance(self):
print("Current Balance:",self.balance)
def inputAmount(self):
return float(input("Enter Amount:"))
def deposite(self):
amount = self.inputAmount()
self.balance += amount
self.printBalance()
def withdraw(self):
amount = self.inputAmount()
if self.balance - amount <= 500:
print("The Account Does Not Has Sufficient Balance.")
else:
self.balance -= amount
self.printBalance()
var = Bank(10000)
while True:
choice = int(input("select \n1. for checking balance.\n2. for deposite.\n3. for withdrawal.\n4. for exit."))
if choice == 4: break
else:
var.methods[choice-1]()
|
[
"noreply@github.com"
] |
prince3453.noreply@github.com
|
68e6812af340c1592f989fbc771b1033a152cf91
|
582660ae9d3c21010042bd6262e421a2a6e94e61
|
/python/introduction/python_if-else/python_if_else.py
|
a8c09703dd210c9a51a596580200c33614db93c6
|
[] |
no_license
|
tim-mccabe/hacker-rank
|
5e12bcd9baabb94c98bca8ef906063092279f4a2
|
61480d2f7b4d567ac48d526417afd7dbc5a2329e
|
refs/heads/master
| 2023-01-30T11:57:23.041756
| 2020-12-10T20:50:52
| 2020-12-10T20:50:52
| 320,064,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
if int(n) % 2 == 1:
print('Weird')
if 2 <= int(n) <= 5 and int(n) % 2 == 0:
print('Not Weird')
if 6 <= int(n) <= 20 and int(n) % 2 == 0:
print('Weird')
if int(n) > 20 and int(n) % 2 == 0:
print('Not Weird')
|
[
"timmccabe44@gmail.com"
] |
timmccabe44@gmail.com
|
79dbeaf0b944d391662e5119f73dae9367fe504f
|
bd26284c804ded76f21d25b9c7a355304428e4d7
|
/2/2-4. Cartpole.py
|
e5c1e31c1bf0433c30db2eb054cf87b7c840057f
|
[
"Apache-2.0"
] |
permissive
|
Wonjuseo/Project101
|
0c93171bbd6ab86dfbc32f474e12e7b7229db4da
|
8c49601e34f56035acd198a09428fa71f6606ca7
|
refs/heads/master
| 2021-01-19T23:25:40.658736
| 2018-07-02T13:45:03
| 2018-07-02T13:45:03
| 88,979,764
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
# Cart Pole example
import gym
# Environment
env = gym.make('CartPole-v0')
env.reset()
# Parameters
random_episodes = 0
reward_sum = 0
while random_episodes < 10:
# Rendering
env.render()
# Get action
action = env.action_space.sample()
# Update state, reward, done
observation, reward, done, _ = env.step(action)
print(observation,reward,done)
# Add reward
reward_sum += reward
# if it fails, the results were shown
if done:
random_episodes += 1
print("Reward for this episode was:", reward_sum)
reward_sum = 0
env.reset()
|
[
"noreply@github.com"
] |
Wonjuseo.noreply@github.com
|
9800c7757cdf7213dc56c1006e976f8cfdd3b3f5
|
19e84b3ea7944811b6fd113309b8a7c7b5ae33ba
|
/oec/db_data/views.py
|
cf69e179543b30faae5c5f12887affeaeba22e82
|
[] |
no_license
|
fxcebx/oec
|
cf9c4cfaa3b4d92d4cbd3539ff94b7f910209167
|
cbba5d7513f63cdb5dc761146db784f2a9879ea7
|
refs/heads/master
| 2020-12-06T20:41:17.105920
| 2015-10-10T03:01:56
| 2015-10-10T03:01:56
| 44,028,188
| 0
| 0
| null | 2015-10-25T22:06:13
| 2015-10-10T21:01:46
|
CSS
|
UTF-8
|
Python
| false
| false
| 5,007
|
py
|
from flask import Blueprint, request, jsonify, make_response, g
from oec import db
from oec.utils import make_query
from oec.db_attr.models import Yo as Attr_yo
from oec.db_data import hs92_models
from oec.db_data import hs96_models
from oec.db_data import hs02_models
from oec.db_data import hs07_models
from oec.db_data import sitc_models
from oec.decorators import crossdomain
mod = Blueprint('data', __name__, url_prefix='/<any("sitc","hs","hs92","hs96","hs02","hs07"):classification>')
@mod.url_value_preprocessor
def get_product_classification_models(endpoint, values):
g.locale="en"
classification = values.pop('classification')
g.prod_classification = classification
if classification == "hs" or classification == "hs92":
g.prod_models = hs92_models
elif classification == "hs96":
g.prod_models = hs96_models
elif classification == "hs02":
g.prod_models = hs02_models
elif classification == "hs07":
g.prod_models = hs07_models
elif classification == "sitc":
g.prod_models = sitc_models
g.output_depth = request.args.get("output_depth")
############################################################
# ----------------------------------------------------------
# 2 variable views
#
############################################################
@mod.route('/<trade_flow>/all/<origin_id>/all/all/')
@mod.route('/<trade_flow>/<year>/<origin_id>/all/all/')
@mod.route('/<trade_flow>/<year>/show/all/all/')
@crossdomain(origin='*')
def yo(**kwargs):
q = db.session.query(Attr_yo, getattr(g.prod_models, "Yo")) \
.filter(Attr_yo.origin_id == getattr(g.prod_models, "Yo").origin_id) \
.filter(Attr_yo.year == getattr(g.prod_models, "Yo").year)
return make_response(make_query(q, request.args, g.locale, getattr(g.prod_models, "Yo"), **kwargs))
@mod.route('/<trade_flow>/all/all/<dest_id>/all/')
@mod.route('/<trade_flow>/<year>/all/<dest_id>/all/')
@mod.route('/<trade_flow>/<year>/all/show/all/')
@crossdomain(origin='*')
def yd(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yd"), request.args, g.locale, **kwargs))
@mod.route('/<trade_flow>/all/all/all/<prod_id>/')
@mod.route('/<trade_flow>/<year>/all/all/<prod_id>/')
@mod.route('/<trade_flow>/<year>/all/all/show/')
@crossdomain(origin='*')
def yp(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yp"), \
request.args, g.locale, classification=g.prod_classification, \
output_depth=g.output_depth, **kwargs))
############################################################
# ----------------------------------------------------------
# 3 variable views
#
############################################################
@mod.route('/<trade_flow>/all/<origin_id>/show/all/')
@mod.route('/<trade_flow>/<year>/<origin_id>/show/all/')
@crossdomain(origin='*')
def yod(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yod"), request.args, g.locale, **kwargs))
@mod.route('/<trade_flow>/all/<origin_id>/all/show/')
@mod.route('/<trade_flow>/<year>/<origin_id>/all/show/')
@crossdomain(origin='*')
def yop(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yop"), \
request.args, g.locale, classification=g.prod_classification, \
output_depth=g.output_depth, **kwargs))
@mod.route('/<trade_flow>/all/show/all/<prod_id>/')
@mod.route('/<trade_flow>/<year>/show/all/<prod_id>/')
@crossdomain(origin='*')
def yop_dest(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yop"), \
request.args, g.locale, classification=g.prod_classification, **kwargs))
@mod.route('/<trade_flow>/all/all/<dest_id>/show/')
@mod.route('/<trade_flow>/<year>/all/<dest_id>/show/')
@crossdomain(origin='*')
def ydp(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Ydp"), \
request.args, g.locale, classification=g.prod_classification, \
output_depth=g.output_depth, **kwargs))
############################################################
# ----------------------------------------------------------
# 4 variable views
#
############################################################
@mod.route('/<trade_flow>/all/<origin_id>/<dest_id>/all/')
@mod.route('/<trade_flow>/<year>/<origin_id>/<dest_id>/all/')
@mod.route('/<trade_flow>/all/<origin_id>/<dest_id>/show/')
@mod.route('/<trade_flow>/<year>/<origin_id>/<dest_id>/show/')
@crossdomain(origin='*')
def yodp(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yodp"), \
request.args, g.locale, classification=g.prod_classification, \
output_depth=g.output_depth, **kwargs))
@mod.route('/<trade_flow>/all/<origin_id>/show/<prod_id>/')
@mod.route('/<trade_flow>/<year>/<origin_id>/show/<prod_id>/')
@crossdomain(origin='*')
def yodp_dest(**kwargs):
return make_response(make_query(getattr(g.prod_models, "Yodp"), \
request.args, g.locale, classification=g.prod_classification, **kwargs))
|
[
"alexandersimoes@gmail.com"
] |
alexandersimoes@gmail.com
|
49ef89ed9847f6370bab12ee14d5b37c98c4382f
|
8cf211cabe8c5169b2c9c3c6b38f69ac6c93b93e
|
/flask_bootcamp/section_6/venv/lib/python3.6/os.py
|
ac0bdb84664fdfc380e6a61aeee8932f5167c92b
|
[] |
no_license
|
bopopescu/Python_Personal_Projects
|
020256cb6835438b1b776eacb1a39d4cb5bc2efc
|
025145130da5ac846b8aa14764783739ff68f64c
|
refs/heads/master
| 2022-11-21T04:47:17.253558
| 2018-10-25T22:12:07
| 2018-10-25T22:12:07
| 281,146,690
| 0
| 0
| null | 2020-07-20T14:57:05
| 2020-07-20T14:57:04
| null |
UTF-8
|
Python
| false
| false
| 44
|
py
|
/home/vinicius/anaconda3/lib/python3.6/os.py
|
[
"vinicius.yosiura@live.com"
] |
vinicius.yosiura@live.com
|
a3832a608ada34da7f6cc1b6ee7f96711396596b
|
00b2e5b0e600dccf0857e00b5710005062df92e3
|
/Fatima/fatima_raman.py
|
3abb8f47a90baebde4c4833d5c9befe4a28ee767
|
[] |
no_license
|
NMI-BMNT/auswertung
|
bf933046df3db729a3769fc50ce8c047d8a86177
|
b9017ac6745764fc4ddf63c9d982a21e30777885
|
refs/heads/master
| 2022-01-10T12:41:10.416401
| 2018-05-23T11:48:55
| 2018-05-23T11:48:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,036
|
py
|
import os
import numpy as np
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import scipy.optimize as opt
from scipy.optimize import curve_fit, basinhopping
import scipy.sparse as sparse
from scipy.special import *
from plotsettings import *
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import seaborn as sns
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from skimage.filters import threshold_otsu
import re
import scipy.signal as signal
import peakutils as pu
def lorentz(x, amplitude, x0, sigma):
g = (amplitude*2/(np.pi*sigma))/(1+4*np.square((x-x0)/sigma))
return g.ravel()
def gauss(x, amplitude, x0, sigma):
g = amplitude/sigma * np.sqrt(4*np.log(2)/np.pi)*np.exp(-4*np.log(2)*np.square((x-x0)/sigma))
return g.ravel()
# https://www.webpages.uidaho.edu/brauns/vibspect1.pdf
def asymvoigt(x, amplitude, x0, sigma, a , f):
sigma = 2 * sigma/(1 + np.exp(a*(x-x0)) )
g = f*lorentz(x,amplitude,x0,sigma)+(1-f)*gauss(x,amplitude,x0,sigma)
return g.ravel()
def fit_fun(x, amp, x0, sigma,a,f):
return asymvoigt(x, amp, x0, sigma,a,f)
path = '/home/sei/Raman/Fatima3/'
savedir = path + 'plots/'
peak_pos = [1085,1590]
search_width = 100 # cm^-1
try:
os.mkdir(savedir)
except:
pass
files = []
for file in os.listdir(path):
if re.search(r"\.(txt)$", file) is not None:
files.append(file)
print(files)
#file = files[0]
k_max = np.zeros((len(files),len(peak_pos)))
c_max = np.zeros((len(files),len(peak_pos)))
labels = np.array([])
for i,file in enumerate(files):
print(file)
k, counts = np.loadtxt(path + file, unpack=True)
counts = signal.savgol_filter(counts, 31, 1, mode='interp')
base = pu.baseline(counts, 11, max_it=10000, tol=0.00001)
counts -= base
#newfig(0.9)
plt.plot(k, counts, linewidth=1)
# plt.plot(k, bl, linewidth=1)
# plt.plot(wl[mask], filtered[mask], color="black", linewidth=0.6)
plt.ylabel(r'$I_{\nu}\, /\, counts$')
plt.xlabel(r'$wavenumber\, /\, cm^{-1}$')
# plt.xlim((minwl, maxwl))
# plt.plot(wl, counts)
plt.tight_layout()
#plt.show()
plt.savefig(savedir + file[:-4] + ".pdf", dpi=300)
plt.close()
for j,peak in enumerate(peak_pos):
mask = (k <= peak + search_width) & (k >= peak - search_width)
c1 = counts[mask]
k1 = k[mask]
max_ind = np.argmax(c1)
k_max[i,j] = k1[max_ind]
c_max[i,j] = c1[max_ind]
labels = np.append(labels,file[:-6])
print(c_max)
sort = np.argsort(labels)
labels = labels[sort]
k_max = k_max[sort,:]
c_max = c_max[sort,:]
print(labels)
label = np.unique(labels)
print(label)
for l in label:
mask = labels == l
plt.scatter(k_max[mask], c_max[mask])
plt.savefig(path + "scatter.pdf", dpi=300)
plt.close()
mean = np.zeros((len(label),len(peak_pos)))
err = np.zeros((len(label),len(peak_pos)))
for i,l in enumerate(label):
mask = labels == l
for j in range(len(peak_pos)):
mean[i,j] = np.mean(c_max[mask,j])
err[i,j] = np.std(c_max[mask,j])
print(mean)
print(mean[:,0].ravel())
print(np.arange(0,mean.shape[0],1))
for i in range(mean.shape[1]):
plt.bar(np.arange(0,mean.shape[0],1)*mean.shape[1]+(i+1),mean[:,i].ravel(),yerr=err[:,i].ravel())
plt.xticks((np.arange(0,mean.shape[0],1)*mean.shape[1]+(mean.shape[1]+1)/2), label)
plt.savefig(path + "bar.pdf", dpi=300)
plt.close()
print('-> Writing measured values to file')
with open(path + "raman.csv", 'w') as f:
f.write("label,")
for j in range(mean.shape[1]):
f.write("mean"+str(peak_pos[j])+",err"+str(peak_pos[j])+",")
f.write("\r\n")
for i in range(len(label)):
f.write( label[i] + ",")
for j in range(mean.shape[1]):
f.write( str(mean[i,j])+ "," + str(err[i,j])+"," )
f.write("\r\n")
mean = np.zeros((len(label),len(counts)))
err = np.zeros((len(label),len(counts)))
for i, l in enumerate(label):
buf = []
for j,file in enumerate(files):
if file[:-6] == l:
k, counts = np.loadtxt(path + file, unpack=True)
#counts = signal.savgol_filter(counts, 31, 1, mode='interp')
#base = pu.baseline(counts, 11, max_it=10000, tol=0.00001)
#counts -= base
buf.append(counts)
buf = np.array(buf)
print(buf.shape)
mean[i, :] = np.mean(buf,axis=0)
err[i, :] = np.std(buf,axis=0)
fig, ax = newfig(0.9)
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
legend = ["A: 30 min","B: 30 min","C: 90 min","D: 90 min"]
print(label)
print(legend)
for i, l in enumerate(label):
poly = np.array((k,mean[i,:]+err[i,:]+1000*i))
poly = np.hstack((poly,np.fliplr(np.array((k, mean[i,:] - err[i,:]+1000*i)))))
poly = poly.T
ax.add_patch(Polygon(poly, closed=True,fill=True,alpha = 0.3,facecolor=colors[i]))
#plt.plot(wl, mean_spec, linewidth=0.8)
plt.plot(k,mean[i,:]+1000*i, linewidth=0.8)
plt.ylabel(r'$I_{\nu}\, /\, counts$')
plt.xlabel(r'$\Delta\widetilde{\nu}\, /\, cm^{-1}$')
plt.legend(legend)
plt.tight_layout()
plt.savefig(path + "overview.pdf", dpi=300)
plt.close()
# width = 100
# max_ind = np.argmax(counts)
# indices = np.arange(0, len(k), 1)
# mask = (indices <= max_ind + width) & (indices >= max_ind - width)
# # inds = np.arange(max_ind-width,max_ind+width,1)
# k1 = k[mask]
# counts1 = counts[mask]
# def err_fun(p):
# fit = fit_fun(k1, *p)
# diff = np.abs(counts1 - fit)
# return np.sum(diff)
#
# #def fit_fun(x, amp, x0, sigma,a,f,b,c):
# b = 0# ( np.mean(counts1[20:])-np.mean(counts1[:-20]) )/( np.mean(k1[20:])-np.mean(k1[:-20]) )
# c = 0#np.mean(k1[20:])
# start = [counts[max_ind]*3,k[max_ind],150,0.01,0.1]
# upper = [counts[max_ind]*10, k[max_ind]+width, 500, 1,1]
# lower = [ 0, k[max_ind]-width, 10, 0,0]
# bnds = []
# for i in range(len(upper)):
# bnds.append((lower[i], upper[i]))
#
# #minimizer_kwargs = {"method": "SLSQP","bounds": bnds,"tol":1e-10}
# #res = basinhopping(err_fun, start, minimizer_kwargs=minimizer_kwargs, niter=1000,disp=False)
# res = opt.minimize(err_fun, start, method='SLSQP', options={'disp': True, 'maxiter': 10000},tol=1e-10)
# #res = opt.minimize(err_fun, start, method='L-BFGS-B', options={'disp': True, 'maxiter': 5000})
# #res = opt.minimize(err_fun, start, method='Nelder-Mead', options={'disp': True, 'maxiter': 5000})
#
# popt = res.x
#
# print(popt)
# plt.plot(k1, counts1, linewidth=1)
# plt.plot(k1, fit_fun(k1,popt[0],popt[1],popt[2],popt[3],popt[4]), linewidth=1)
# #plt.plot(k1, popt[5]*k1+popt[6])
# plt.ylabel(r'$I_{\nu}\, /\, counts$')
# plt.xlabel(r'$wavenumber\, /\, cm^{-1}$')
# plt.savefig(savedir + file[:-4] + "fit.pdf", dpi=300)
# #plt.show()
# plt.close()
#
# fit = fit_fun(k1,popt[0],popt[1],popt[2],popt[3],popt[4])
# print(np.max(fit))
|
[
"Simon.Dickreuter@uni-tuebingen.de"
] |
Simon.Dickreuter@uni-tuebingen.de
|
9ae24c0c1c39be6cfa372b401d1b1ebdd5bd2035
|
d15be7017a8d28ad351d2872fdf36b8638a60abd
|
/Solutions/week01/word_counter.py
|
a35bc79b42b58ad6bf3ccc3b99120b527f4f46df
|
[] |
no_license
|
shadydealer/Python-101
|
60ebdd098d38de45bede35905a378e8311e6891a
|
9ec2dccd61f54f4ff8f86fe6dd26cd7dd06f570d
|
refs/heads/master
| 2021-04-15T11:42:57.712723
| 2018-06-04T10:32:30
| 2018-06-04T10:32:30
| 126,197,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
word = input()
rows, cols = map(int, input().split())
def get_input():
"""get_input() -> 2D array"""
matrix = []
for i in range(rows):
matrix.append(input().split())
# for subset in matrix:
# print(subset)
return matrix
move_x =[-1,0,1]
move_y =[-1,0,1]
def is_valid_index(x,y):
"""is_valid_index(int, int) -> bool"""
if x >= 0 and y >= 0 and x < cols and y < rows:
return True
return False
def count_occurance(matrix, x,y, str_ind):
"""count_occurance(2D array, string) -> unsigned int"""
for y in range(matrix):
for x in range(matrix[i]): #matrix_char
for k in range(word): #word_char
if matrix[y][x] == word[k]:
y+=move_y[j]
x+=move_x[i]
else:
y-=move_y[j]*k
x-=move_x[i]*k
matrix = get_input()
|
[
"shady"
] |
shady
|
e62a576701748974d99d413ad69f0fa9b0b33e9b
|
21c77c2ff4d5fbb982943a22abd46a18a804621c
|
/flow_control/your_name.py
|
68fec85da56a05006fdeaef5eb410d09972eb812
|
[] |
no_license
|
foleymd/boring-stuff
|
56592f576da19238de5c742b78c34d86688b6319
|
d81f10f801a512c38a713344a2fe1d8b5b7e5a09
|
refs/heads/main
| 2023-01-05T19:21:31.248420
| 2020-10-22T20:24:10
| 2020-10-22T20:24:10
| 302,201,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
# break example
name = ''
while True:
print('Please type your name.')
name = input()
if name == 'your name':
break
print('Thank you!')
# continue example
spam = 0
while spam < 5:
spam = spam + 1
if spam == 3:
continue
print('spam is ' + str(spam))
|
[
"foleymd@gmail.com"
] |
foleymd@gmail.com
|
8de31727528745859574b0a71d4d7f4265c46740
|
2718b6f68a717b24cd6238a20d4116b3dea3201b
|
/BlogTemplate/mysite_env/mysite/apps/blog/views.py
|
39b584eea388bcf248d6a6d595bae4840b4bf60b
|
[] |
no_license
|
tminlun/BlogTemplate
|
e94654e01e170f27c97c197c898c102518ad13ab
|
d475587fdd9e111961bbfa56666255d38cfdc056
|
refs/heads/master
| 2022-12-11T00:51:53.019391
| 2018-12-05T14:54:04
| 2018-12-05T14:54:04
| 138,825,320
| 0
| 0
| null | 2022-12-08T02:25:29
| 2018-06-27T03:30:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,519
|
py
|
from django.shortcuts import render,get_object_or_404
from django.core.paginator import Paginator
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db.models.aggregates import Count
from read_statistics.utils import read_statistics_once_read
from comment.models import Comment
from .models import Blog,BlogType
#获取博客列表共同的数据,设置参数blog_all_list全部博客,因为每个方法都有不同的获取方法
def get_blog_list_common_data(request, blog_all_list):
paginator = Paginator(blog_all_list, settings.EACH_PAGE_BLOG_NUMBER) # 每一页10篇博客
page_num = request.GET.get('page', 1) # 获取页码参数,get请求
page_of_blogs = paginator.get_page(page_num) # 获取当前页码
current_page_num = page_of_blogs.number # 获取当前页码
# current_page_num - 2 , 1 只是拿1和currentr_page_num - 2比,range范围还是
# current_page_num - 2, currentr_page_num
page_range = list(range(max(current_page_num - 2, 1), current_page_num)) + \
list(range(current_page_num, min(current_page_num + 2, paginator.num_pages) + 1))
# 添加省略
if page_range[0] - 1 >= 2:
page_range.insert(0, '...')
# 如果总页 - 最后一页 大于等于2
if paginator.num_pages - page_range[-1] >= 2:
page_range.append('...')
# 添加第一页和最后一页
if page_range[0] != 1:
page_range.insert(0, 1) # 将第一个页码变成1(insert在第一个插入)
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages) # 添加总页码到最后显示页码(append在尾部添加)
blog_dates = Blog.objects.dates('created_time','month',order="DESC")
blog_dates_dict = {}
for blog_date in blog_dates:
date_count = Blog.objects.filter(created_time__year=blog_date.year,created_time__month=blog_date.month).count()
blog_dates_dict[blog_date] = date_count
context = {}
context['page_of_blogs'] = page_of_blogs # 当前页码
context['page_range'] = page_range # 返回所有页码给模板
context['blogs'] = page_of_blogs.object_list # 获取所有博客
# annotate自动返回BlogType的所有数据
context['blog_types']=BlogType.objects.annotate(type_count = Count('blog')).filter(type_count__gt=0)
# 获取到全部的年和月
context['blog_dates'] = blog_dates_dict # 这里是一个坑,记住把日期和数量给对象
return context #返回给模板 render(request,'?.html',context)
def blog_list(request):
blog_all_list = Blog.objects.all()#全部的博客列表
context = get_blog_list_common_data(request,blog_all_list) #传递给context
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request,blog_with_type_pk):
blog_type = get_object_or_404(BlogType,pk = blog_with_type_pk)#获取分类
blog_all_list = Blog.objects.filter(blog_type=blog_type)#获取所有筛选类型博客
context = get_blog_list_common_data(request, blog_all_list)
context['blog_type'] = blog_type # 分类名
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request,year,month):
#获取到对应年和月的博客
blog_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)
context = get_blog_list_common_data(request, blog_all_list)
context['blog_with_date'] = "%s年%s月" %(year,month) #当前的年月
return render(request, 'blog/blogs_with_date.html', context)
#博客细节
def blog_detail(request,blog_pk):
context = {}
blog = get_object_or_404(Blog, pk = blog_pk)
#判断浏览器是否有cookie记录,有不加数,没有加数;get获取字典的key
read_cookie_key = read_statistics_once_read(request, blog)
blog_content_type = ContentType.objects.get_for_model(blog)
comments = Comment.objects.filter(content_type=blog_content_type,object_id=blog.pk)
context['blog'] = blog
#前一篇博客,大于:__gt=
context['previous_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()
#后一篇博客,小于:__lt=
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()
context['user'] = request.user
context['comments'] = comments
response=render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'ture') #坑,值 记得填写
return response
|
[
"1272443075@qq.com"
] |
1272443075@qq.com
|
31c16a62d12f6538275dc374ce02c654b07ba690
|
582b93ca3747f7ec4ce8c00464c26698b0b8b229
|
/DevExa/settings.py
|
658b53ef61b015873d139dc62dd14ca2e8e29f93
|
[] |
no_license
|
BeToOxX/Final
|
7615e0e37c4ca8858687f0293b5058dc75d79a9c
|
f4371207836b4f7cd856c7237ada3cd60a597bce
|
refs/heads/master
| 2023-08-23T18:06:00.585583
| 2021-10-07T06:01:50
| 2021-10-07T06:01:50
| 414,474,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,325
|
py
|
"""
Django settings for DevExa project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=g8j_n6d=)gf_b*vn4hlt%!v5#njdwz_x_u80roi@51qcfze52'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.web'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'DevExa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['template'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DevExa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
import dj_database_url
from decouple import config
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
"""
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
"""
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"rubengza.98@gmail.com"
] |
rubengza.98@gmail.com
|
ed3e3d70a1fd13a1e41fa4985818c02092340a95
|
413fb29b62fe9ba07362d614ba49e7200482216d
|
/fraud_detection/src/com/mr/data_analysis_python/sampleFraudData.py
|
4a77df03d96dbcfeca3f64da411e6f1ddb5ee5a5
|
[] |
no_license
|
cash2one/fraud_detection
|
ff2cc0a151b16cd3151c584839a227a384716ca7
|
6097e47800394f8659c5d14ab6a6538b2af8d444
|
refs/heads/master
| 2021-01-19T04:46:32.710395
| 2016-07-09T11:58:05
| 2016-07-09T11:58:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
__author__ = 'TakiyaHideto'
import sys
import random
class SampleFraudData:
def __init__(self, input, output):
self.input = input
self.output = output
self.sampleRatio = float(sys.argv[3])/float(sys.argv[4])
def __sample(self):
with open(self.output, "w") as fileOut:
with open(self.input, "r") as fileIn:
for line in fileIn:
if line.startswith("0"):
if random.random() < self.sampleRatio:
fileOut.write(line)
elif line.startswith("1"):
fileOut.write(line)
def runMe(self):
self.__sample()
if __name__ == "__main__":
if len(sys.argv) != 5:
print "<inputFile> <outputSampledFile> <fraudDataQuant> <normalDataQuant>"
exit(1)
job = SampleFraudData(sys.argv[1], sys.argv[2])
job.runMe()
|
[
"TakiyaHideto@iekoumatoMacBook-Pro.local"
] |
TakiyaHideto@iekoumatoMacBook-Pro.local
|
b391859b94fd32af4b40bd699c1b6acde8391faf
|
edb884e3f639261f36bbb8f444e2200bb879a9a2
|
/diagfi_compare_singlemonth.py
|
baa02ec94263fd6a9e083e4dbd41e800d3960a62
|
[] |
no_license
|
draelsaid/MGCM-python
|
7df36a783829fadb1d89ec9e54f92470d54c0493
|
9ee1491f009bed5f092c21a9235d61e9612f32f0
|
refs/heads/master
| 2020-06-21T22:41:09.556055
| 2017-05-31T12:22:45
| 2017-05-31T12:22:45
| 74,768,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,806
|
py
|
# Compares NetCDF data from the Mars GCM for Full Mars Year by combining monthly output of diagfi.nc files
# Adam El-Said 08/2016
import matplotlib as mpl
#mpl.use('Agg') # removes need for X-Server (graphics in linux). For qsub only.
import numpy as np
import pylab as py
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mars_time import MarsTime
from scipy.io import *
from matplotlib import cm,ticker
from plt_timeseries import *
from matplotlib.ticker import FormatStrFormatter
from MidPointNorm import *
# Prints EVERYTHING inside a variable without holding back (intended for diagnostic)
np.set_printoptions(threshold=np.inf)
# Abbreviate sol_ls conversion function
sol_Ls=MarsTime().sol_ls
# Moving average
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
# Initialise dictionaries - due to data size
Ls_m = {}
psa, psb = {}, {}
presa, presb = {}, {}
tempa, tempb = {}, {}
tsurfa, tsurfb = {}, {}
ua, ub = {}, {}
va, vb = {}, {}
dustqa, dustqb = {}, {}
dustNa, dustNb = {}, {}
rhoa, rhob = {}, {}
fluxsurflwa, fluxsurflwb = {}, {}
fluxsurfswa, fluxsurfswb = {}, {}
fluxtoplwa, fluxtoplwb = {}, {}
fluxtopswa, fluxtopswb = {}, {}
taua, taub = {}, {}
rdusta, rdustb = {}, {}
lw_htrta, lw_htrtb = {}, {}
sw_htrta, sw_htrtb = {}, {}
dqsseda, dqssedb = {}, {}
dqsdeva, dqsdevb = {}, {}
# Grab topography from surface.nc or mola32.nc file
ml = netcdf.netcdf_file('/padata/mars/users/aes442/mgcm_data/surface.nc','r')
mola = {}
mola[0] = ml.variables['latitude'][:]
mola[1] = ml.variables['longitude'][:]
mola[2] = ml.variables['zMOL'][:]
# Import data from Luca's TES dust files for comparison
a = netcdf.netcdf_file('/padata/mars/users/aes442/mgcm_data/dust_MY28.nc','r')
d_lat_s = a.variables['latitude'][:]
d_lon_s = a.variables['longitude'][:]
d_t = a.variables['Time'][:]
d_d = a.variables['dustop'][:]
d_lat = np.linspace(-90,90,d_lat_s.shape[0])
d_lon = np.linspace(-180,180,d_lon_s.shape[0])
# Number of months in comparison (always add 1 because of Python indexing)
Months = 2 # No. of months
amth = 1 # Actual month
# This loop assigns the data in both directories to variables here. This is done for each month. The result is a dictionary of dictionaries. One dictionary containing a dictionary for every month.
for i in xrange(1,Months):
mgcm = "MGCM_v5-1"
rundira = "a_ds8"
rundirb = "a_ref4"
month = ("m%s" % (amth)) # CHANGE
filename = "diagfi.nc"
a = netcdf.netcdf_file("/padata/alpha/users/aes442/RUNS/R-%s/%s/%s/%s" % (mgcm,rundira,month,filename),'r')
b = netcdf.netcdf_file("/padata/alpha/users/aes442/RUNS/R-%s/%s/%s/%s" % (mgcm,rundirb,month,filename),'r')
lat = a.variables['lat'][:]
lon = a.variables['lon'][:]
sigma = a.variables['sigma'][:]
t_m = a.variables['time'][:]
Ls_m[i] = a.variables['Ls'][:]
psa[i] = a.variables['ps'][:]
presa[i] = a.variables['pressure'][:]
tempa[i] = a.variables['temp'][:]
tsurfa[i] = a.variables['tsurf'][:]
ua[i] = a.variables['u'][:]
va[i] = a.variables['v'][:]
dustqa[i] = a.variables['dustq'][:]
dustNa[i] = a.variables['dustN'][:]
rhoa[i] = a.variables['rho'][:]
fluxsurflwa[i] = a.variables['fluxsurf_lw'][:]
fluxsurfswa[i] = a.variables['fluxsurf_sw'][:]
fluxtoplwa[i] = a.variables['fluxtop_lw'][:]
fluxtopswa[i] = a.variables['fluxtop_sw'][:]
taua[i] = a.variables['taudustvis'][:]
rdusta[i] = a.variables['reffdust'][:]
lw_htrta[i] = a.variables['lw_htrt'][:]
sw_htrta[i] = a.variables['sw_htrt'][:]
dqsseda[i] = a.variables['dqssed'][:]
dqsdeva[i] = a.variables['dqsdev'][:]
psb[i] = b.variables['ps'][:]
presb[i] = b.variables['pressure'][:]
tempb[i] = b.variables['temp'][:]
tsurfb[i] = b.variables['tsurf'][:]
ub[i] = b.variables['u'][:]
vb[i] = b.variables['v'][:]
dustqb[i] = b.variables['dustq'][:]
dustNb[i] = b.variables['dustN'][:]
rhob[i] = b.variables['rho'][:]
fluxsurflwb[i] = b.variables['fluxsurf_lw'][:]
fluxsurfswb[i] = b.variables['fluxsurf_sw'][:]
fluxtoplwb[i] = b.variables['fluxtop_lw'][:]
fluxtopswb[i] = b.variables['fluxtop_sw'][:]
taub[i] = b.variables['taudustvis'][:]
rdustb[i] = b.variables['reffdust'][:]
lw_htrtb[i] = b.variables['lw_htrt'][:]
sw_htrtb[i] = b.variables['sw_htrt'][:]
dqssedb[i] = b.variables['dqssed'][:]
dqsdevb[i] = b.variables['dqsdev'][:]
# Calculate approximate HEIGHT from sigma (km)
alt = np.zeros((sigma.shape[0]))
for i in xrange(len(sigma)):
alt[i] = -10.8*np.log(sigma[i])
print "Latitude: %i || Longitude: %i || Model levels: %i => Alt Min:%.3f | Alt Max:%.3f | Alt half: %.3f " % (lat.shape[0],lon.shape[0],sigma.shape[0],alt[0],alt[-1],alt[18])
alt_half=18 # 47.8km
# Get time dimension length
n = 0
for i in xrange(1,len(psa)+1,1): # len(psa) gives the number of months
n = n + len(dustqa[i]) # len(dustqa[i]) gives the number of time steps in each month.
print ("Total time steps: %i" % (n))
## Ls vector
Ls_s = (Months-1)*30 # Number of solar longitudes for time vector for comparison
Ls = np.zeros((n))
# Method 2 grabs Ls's from model (has bugs, but can be ironed out)
p=0
for i in xrange(1,len(Ls_m)+1,1):
gg = Ls_m[i]
for j in xrange(gg.shape[0]):
Ls[p] = gg[j]
p = p + 1
Ls = np.roll(Ls,5)
Ls[-1] = np.ceil(Ls[-2])
Ls[:6] = np.linspace(np.floor(Ls[5]),Ls[5],6)
print Ls[:8], Ls[-8:]
## Create all other variables, with altitude dimension removed
ps_a, ps_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
temp_a, temp_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
tsurf_a, tsurf_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
u_a, u_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
v_a, v_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
dustq_a, dustq_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
dustN_a, dustN_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
rho_a, rho_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
fslwa, fslwb = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
fsswa, fsswb = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
ftlwa, ftlwb = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
ftswa, ftswb = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
tau_a, tau_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
rdust_a, rdust_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
lw_htrt_a, lw_htrt_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
sw_htrt_a, sw_htrt_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
pres_a, pres_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
dqssed_a, dqssed_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
dqsdev_a, dqsdev_b = np.zeros((n,lat.shape[0],lon.shape[0])), np.zeros((n,lat.shape[0],lon.shape[0]))
# 3D Vars
ps_a, ps_b = psa[1][:,:,:], psb[1][:,:,:]
fslwa, fslwb = fluxsurflwa[1][:,:,:], fluxsurflwb[1][:,:,:]
fsswa, fsswb = fluxsurfswa[1][:,:,:], fluxsurfswb[1][:,:,:]
ftlwa, ftlwb = fluxtoplwa[1][:,:,:], fluxtoplwb[1][:,:,:]
ftswa, ftswb = fluxtopswa[1][:,:,:], fluxtopswb[1][:,:,:]
tau_a, tau_b = taua[1][:,:,:], taub[1][:,:,:]
tsurf_a, tsurf_b = tsurfa[1][:,:,:], tsurfb[1][:,:,:]
dqssed_a, dqssed_b = dqsseda[1][:,:,:], dqssedb[1][:,:,:]
dqsdev_a, dqsdev_b = dqsdeva[1][:,:,:], dqsdevb[1][:,:,:]
# 4D Vars
temp_a, temp_b = tempa[1][:,1,:,:], tempb[1][:,1,:,:]
u_a, u_b = ua[1][:,1,:,:], ub[1][:,1,:,:]
v_a, v_b = va[1][:,1,:,:], vb[1][:,1,:,:]
dustq_a, dustq_b = dustqa[1][:,1,:,:], dustqb[1][:,1,:,:]
dustN_a, dustN_b = dustNa[1][:,1,:,:], dustNb[1][:,1,:,:]
rho_a, rho_b = rhoa[1][:,1,:,:], rhob[1][:,1,:,:]
rdust_a, rdust_b = rdusta[1][:,1,:,:], rdustb[1][:,1,:,:]
lw_htrt_a, lw_htrt_b = lw_htrta[1][:,1,:,:], lw_htrtb[1][:,1,:,:]
sw_htrt_a, sw_htrt_b = sw_htrta[1][:,1,:,:], sw_htrtb[1][:,1,:,:]
pres_a, pres_b = presa[1][:,1,:,:], presb[1][:,1,:,:]
# Longitudal averaging
# Variables without longitude
temp_aa, temp_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
tsurf_aa, tsurf_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
u_aa, u_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
dustq_aa, dustq_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
dustN_aa, dustN_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
rho_aa, rho_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
rdust_aa, rdust_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
lw_htrt_aa, lw_htrt_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
sw_htrt_aa, sw_htrt_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
pres_aa, pres_bb = np.zeros((n,sigma.shape[0],lat.shape[0])), np.zeros((n,sigma.shape[0],lat.shape[0]))
# 4D Vars
temp_aa, temp_bb = np.sum(tempa[1],axis=3)/tempa[1].shape[3], np.sum(tempb[1],axis=3)/tempb[1].shape[3]
u_aa, u_bb = np.sum(ua[1],axis=3)/ua[1].shape[3], np.sum(ub[1],axis=3)/ub[1].shape[3]
dustq_aa, dustq_bb = np.sum(dustqa[1],axis=3)/dustqa[1].shape[3], np.sum(dustqb[1],axis=3)/dustqb[1].shape[3]
dustN_aa, dustN_bb = np.sum(dustNa[1],axis=3)/dustNa[1].shape[3], np.sum(dustNb[1],axis=3)/dustNb[1].shape[3]
rho_aa, rho_bb = np.sum(rhoa[1],axis=3)/rhoa[1].shape[3], np.sum(rhob[1],axis=3)/rhob[1].shape[3]
rdust_aa, rdust_bb = np.sum(rdusta[1],axis=3)/rdusta[1].shape[3], np.sum(rdustb[1],axis=3)/rdustb[1].shape[3]
lw_htrt_aa, lw_htrt_bb = np.sum(lw_htrta[1],axis=3)/lw_htrta[1].shape[3], np.sum(lw_htrtb[1],axis=3)/lw_htrtb[1].shape[3]
sw_htrt_aa, sw_htrt_bb = np.sum(sw_htrta[1],axis=3)/sw_htrta[1].shape[3], np.sum(sw_htrtb[1],axis=3)/sw_htrtb[1].shape[3]
pres_aa, pres_bb = np.sum(presa[1],axis=3)/presa[1].shape[3], np.sum(presb[1],axis=3)/presb[1].shape[3]
# Calculate differences
dustq_diff = dustq_a - dustq_b
dustN_diff = dustN_a - dustN_b
temp_diff = temp_a - temp_b
tsurf_diff = tsurf_a - tsurf_b
ps_diff = ps_a - ps_b
rho_diff = rho_a - rho_b
u_diff = u_a - u_b
v_diff = v_a - v_b
rdust_diff = rdust_a - rdust_b
lw_htrt_diff = lw_htrt_a - lw_htrt_b
sw_htrt_diff = sw_htrt_a - sw_htrt_b
pres_diff = pres_a - pres_b
dqssed_diff = dqssed_a - dqssed_b
dqsdev_diff = dqsdev_a - dqsdev_b
fslw_diff = fslwa - fslwb
fssw_diff = fsswa - fsswb
ftlw_diff = ftlwa - ftlwb
ftsw_diff = ftswa - ftswb
t_d = temp_aa - temp_bb
pres_d = pres_aa - pres_bb
ts_d = tsurf_aa - tsurf_bb
dq_d = dustq_aa - dustq_bb
dN_d = dustN_aa - dustN_bb
rho_d = rho_aa - rho_bb
u_d = u_aa - u_bb
rdust_d = rdust_aa - rdust_bb
lw_htrt_d = lw_htrt_aa - lw_htrt_bb
sw_htrt_d = sw_htrt_aa - sw_htrt_bb
# Zonal averaging (time,lat)
temp_avg = np.sum(temp_a,axis=2)/temp_a.shape[2] - np.sum(temp_b,axis=2)/temp_b.shape[2]
tsurf_avg = np.sum(tsurf_a,axis=2)/tsurf_a.shape[2] - np.sum(tsurf_b,axis=2)/tsurf_b.shape[2]
ps_avg = np.sum(ps_a,axis=2)/ps_a.shape[2] - np.sum(ps_b,axis=2)/ps_b.shape[2]
pres_avg = np.sum(pres_a,axis=2)/pres_a.shape[2] - np.sum(pres_b,axis=2)/pres_b.shape[2]
u_avg = np.sum(u_a,axis=2)/u_a.shape[2] - np.sum(u_b,axis=2)/u_b.shape[2]
rho_avg = np.sum(rho_a,axis=2)/rho_a.shape[2] - np.sum(rho_b,axis=2)/rho_b.shape[2]
fssw_avg = np.sum(fsswa,axis=2)/fsswa.shape[2] - np.sum(fsswb,axis=2)/fsswb.shape[2]
fslw_avg = np.sum(fslwa,axis=2)/fslwa.shape[2] - np.sum(fslwb,axis=2)/fslwb.shape[2]
ftsw_avg = np.sum(ftswa,axis=2)/ftswa.shape[2] - np.sum(ftswb,axis=2)/ftswb.shape[2]
ftlw_avg = np.sum(ftlwa,axis=2)/ftlwa.shape[2] - np.sum(ftlwb,axis=2)/ftlwb.shape[2]
tau_a_avg = np.sum(tau_a,axis=2)/tau_a.shape[2]
tau_b_avg = np.sum(tau_b,axis=2)/tau_b.shape[2]
rdust_avg = np.sum(rdust_a,axis=2)/rdust_a.shape[2] - np.sum(rdust_b,axis=2)/rdust_b.shape[2]
lw_htrt_avg = np.sum(lw_htrt_a,axis=2)/lw_htrt_a.shape[2] - np.sum(lw_htrt_b,axis=2)/lw_htrt_b.shape[2]
sw_htrt_avg = np.sum(sw_htrt_a,axis=2)/sw_htrt_a.shape[2] - np.sum(sw_htrt_b,axis=2)/sw_htrt_b.shape[2]
temp_avg_ = np.sum(temp_b,axis=2)/temp_b.shape[2]
pres_avg_ = np.sum(pres_b,axis=2)/pres_b.shape[2]
tsurf_avg_ = np.sum(tsurf_b,axis=2)/tsurf_b.shape[2]
ps_avg_ = np.sum(ps_b,axis=2)/ps_b.shape[2]
u_avg_ = np.sum(u_b,axis=2)/u_b.shape[2]
rho_avg_ = np.sum(rho_b,axis=2)/rho_b.shape[2]
fssw_avg_ = np.sum(fsswb,axis=2)/fsswb.shape[2]
fslw_avg_ = np.sum(fslwb,axis=2)/fslwb.shape[2]
ftsw_avg_ = np.sum(ftswb,axis=2)/ftswb.shape[2]
ftlw_avg_ = np.sum(ftlwb,axis=2)/ftlwb.shape[2]
# from 35N to 55N Lat
#tmp_ = np.sum(np.sum(temp_avg_[:,7:11],axis=0)/n,axis=0)/4
#tmps_ = np.sum(np.sum(tsurf_avg_[:,7:11],axis=0)/n,axis=0)/4
#ps_ = np.sum(np.sum(ps_avg_[:,7:11],axis=0)/n,axis=0)/4
#pres_ = np.sum(np.sum(pres_avg_[:,7:11],axis=0)/n,axis=0)/4
#rho_ = np.sum(np.sum(rho_avg_[:,7:11],axis=0)/n,axis=0)/4
#u_ = np.sum(np.sum(np.absolute(u_avg_[:,7:11]),axis=0)/n,axis=0)/4
#fslw_ = np.sum(np.sum(fslw_avg_[:,7:11],axis=0)/n,axis=0)/4
#fssw_ = np.sum(np.sum(fssw_avg_[:,7:11],axis=0)/n,axis=0)/4
#ftlw_ = np.sum(np.sum(ftlw_avg_[:,7:11],axis=0)/n,axis=0)/4
#ftsw_ = np.sum(np.sum(ftsw_avg_[:,7:11],axis=0)/n,axis=0)/4
#tmp_1 = np.sum(np.sum(temp_avg[:,7:11],axis=0)/n,axis=0)/4
#tmps_1 = np.sum(np.sum(tsurf_avg[:,7:11],axis=0)/n,axis=0)/4
#ps_1 = np.sum(np.sum(ps_avg[:,7:11],axis=0)/n,axis=0)/4
#pres_1 = np.sum(np.sum(pres_avg[:,7:11],axis=0)/n,axis=0)/4
#rho_1 = np.sum(np.sum(rho_avg[:,7:11],axis=0)/n,axis=0)/4
#u_1 = np.sum(np.sum(u_avg[:,7:11],axis=0)/n,axis=0)/4
#fslw_1 = np.sum(np.sum(fslw_avg[:,7:11],axis=0)/n,axis=0)/4
#fssw_1 = np.sum(np.sum(fssw_avg[:,7:11],axis=0)/n,axis=0)/4
#ftlw_1 = np.sum(np.sum(ftlw_avg[:,7:11],axis=0)/n,axis=0)/4
#ftsw_1 = np.sum(np.sum(ftsw_avg[:,7:11],axis=0)/n,axis=0)/4
#print "AVERAGES: tmp: %.2f || surf tmp: %.2f || press: %.2f || surf press: %.2f || dens: %.2f || zon wind: #%.2f || fluxes (inLW: %.2f, outLW: %.2f, inSW: %.2f, outSW: %.2f). " % (tmp_, tmps_, pres_, ps_, rho_, u_, #fslw_, ftlw_, fssw_, ftsw_)
#print tmp_1/tmp_, tmps_1/tmps_, pres_1/pres_, ps_1/ps_, rho_1/rho_, u_1/u_, fslw_1/fslw_, fssw_1/fssw_, ftlw_1/ftlw_, ftsw_1/ftsw_
# Time moving-point average of zonal average
nn=2 # Number of points to average over
t_avg = Ls[:-(nn-1)]
temp_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
pres_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
tsurf_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
ps_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
u_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
rho_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
fssw_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
fslw_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
ftsw_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
ftlw_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
rdust_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
lw_htrt_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
sw_htrt_avg_t = np.zeros((t_avg.shape[0],lat.shape[0]))
for i in xrange(0,lat.shape[0]):
temp_avg_t[:,i] = moving_average(temp_avg[:,i],n=nn)
pres_avg_t[:,i] = moving_average(pres_avg[:,i],n=nn)
tsurf_avg_t[:,i] = moving_average(tsurf_avg[:,i],n=nn)
ps_avg_t[:,i] = moving_average(ps_avg[:,i],n=nn)
u_avg_t[:,i] = moving_average(u_avg[:,i],n=nn)
rho_avg_t[:,i] = moving_average(rho_avg[:,i],n=nn)
fssw_avg_t[:,i] = moving_average(fssw_avg[:,i],n=nn)
fslw_avg_t[:,i] = moving_average(fslw_avg[:,i],n=nn)
ftsw_avg_t[:,i] = moving_average(ftsw_avg[:,i],n=nn)
ftlw_avg_t[:,i] = moving_average(ftlw_avg[:,i],n=nn)
rdust_avg_t[:,i] = moving_average(rdust_avg[:,i],n=nn)
lw_htrt_avg_t[:,i] = moving_average(lw_htrt_avg[:,i],n=nn)
sw_htrt_avg_t[:,i] = moving_average(sw_htrt_avg[:,i],n=nn)
############ TIME AVERAGE of differences ###################
nnn=nn
t_av = Ls[:-(nnn-1)]
td_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
pres_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
tds_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
dqd_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
dNd_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
rhod_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
ud_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
rd_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
lwhr_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
swhr_avg = np.zeros((t_av.shape[0],sigma.shape[0],lat.shape[0]))
for j in xrange(0,lat.shape[0],1):
for i in xrange(0,sigma.shape[0],1):
td_avg[:,i,j] = moving_average(t_d[:,i,j],n=nnn)
pres_avg[:,i,j] = moving_average(pres_d[:,i,j],n=nnn)
tds_avg[:,i,j] = moving_average(ts_d[:,i,j],n=nnn)
dqd_avg[:,i,j] = moving_average(dq_d[:,i,j],n=nnn)
dNd_avg[:,i,j] = moving_average(dN_d[:,i,j],n=nnn)
rhod_avg[:,i,j] = moving_average(rho_d[:,i,j],n=nnn)
ud_avg[:,i,j] = moving_average(u_d[:,i,j],n=nnn)
rd_avg[:,i,j] = moving_average(rdust_d[:,i,j],n=nnn)
lwhr_avg[:,i,j] = moving_average(lw_htrt_d[:,i,j],n=nnn)
swhr_avg[:,i,j] = moving_average(sw_htrt_d[:,i,j],n=nnn)
# Save destination
fpath = "/home/physastro/aes442/results/Dustruns/m%i/" % (amth)
## Plot settings (MUST CHANGE FROM MONTH TO MONTH)
######################################################################################
# Which Ls do you want to focus on?
Ls_ee= 4.
Ls_e = 5.5
l_1 = np.where(Ls - Ls_ee > 0.001)[0][0]
l_2 = np.where(Ls - Ls_e > 0.001)[0][0]
Ls = Ls[l_1:l_2]
n = l_2 - l_1
## Dust storm insertion points (Ls - tstep_start - centre [lat,lon])
# m1 = 3.95 - 96 - [45, -135]
# m26 = 45.66 - 408 - [45, -90]
# m30 = 153.95 - 84 - [ 0, 0]
# m33 = 244.28 - 84 - [-2, -6]
# m34 = 273.52 - 60 - [-45, 90]
c = np.matrix('4. 45.') # Dust storm mid-points [Ls Lat]
#########################################################################################
######## TES dust files
# Zonal averaging
tau_d_z = d_d.sum(axis=2)/d_d.shape[2]
# Time averaging
nnnn=2
tau_d_avg=np.zeros((tau_d_z.shape[0]-(nnnn-1),tau_d_z.shape[1]))
for i in xrange(0,d_lat_s.shape[0]):
tau_d_avg[:,i] = moving_average(tau_d_z[:,i],nnnn)
# first and last sols
sol_a = int(np.round(669*(Ls_ee/360.)))
sol_s = int(np.round(669*(Ls_e/360.)))
tau_d_avg = tau_d_avg[sol_a:sol_s,:]
d_Ls_avg = np.linspace(Ls_ee,Ls_e,tau_d_avg.shape[0])
#########
## PLOTS
# Common settings (ticks)
t_t = np.linspace(Ls_ee,Ls_e,n)
t_tau = np.linspace(Ls_ee,Ls_e,n)
lat_t = np.linspace(90,-90,lat.shape[0])
lon_t = np.linspace(-180,180,lon.shape[0])
# Solar longitude
i_mj=0.2
i_mn=0.05
major_ticksx = np.arange(Ls_ee, Ls_e+i_mj, i_mj)
minor_ticksx = np.arange(Ls_ee, Ls_e, i_mn)
# Latitude
major_ticksy = np.arange(-90, 91, 30)
minor_ticksy = np.arange(-90, 91, 10)
## tau_ref, tau_ds, tau_tes PLOT
tau_ds = np.matrix.transpose(tau_a_avg)
tau_ds = tau_ds[:,l_1:l_2]
tau_ref = np.matrix.transpose(tau_b_avg)
tau_ref = tau_ref[:,l_1:l_2]
tau_TES = np.matrix.transpose(tau_d_avg)
f, axarr = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(12,12), dpi=100)
x = t_tau
y = lat_t
xx = d_Ls_avg
yy = np.linspace(-90,90,d_lat_s.shape[0])
xlabel = 'Solar longitude / degrees'
ylabel = 'Latitude / degrees'
cb_label = 'Dust optical depth / SI'
# Common axis labels
f.text(0.5, 0.04, '%s' % (xlabel), fontsize=18, ha='center')
f.text(0.06, 0.5, '%s' % (ylabel), fontsize=18, va='center', rotation='vertical')
ax1 = axarr[0].pcolormesh(x, y, tau_ds, cmap='gist_rainbow_r', vmin=np.min((np.min(tau_ds),np.min(tau_ref),np.min(tau_TES))), vmax=np.max((np.max(tau_ds),np.max(tau_ref),np.max(tau_TES))))
axarr[0].axis('tight')
axarr[0].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[0].set_xticks(major_ticksx)
axarr[0].set_xticks(minor_ticksx, minor=True)
axarr[0].set_yticks(major_ticksy)
axarr[0].set_yticks(minor_ticksy, minor=True)
axarr[0].set_title('(a) Dust storm run', fontsize=14)
axarr[0].tick_params(axis='both', labelsize=11, pad=10)
ax2 = axarr[1].pcolormesh(x, y, tau_ref, cmap='gist_rainbow_r', vmin=np.min((np.min(tau_ds),np.min(tau_ref),np.min(tau_TES))), vmax=np.max((np.max(tau_ds),np.max(tau_ref),np.max(tau_TES))))
axarr[1].set_title('(b) Reference run', fontsize=14)
# Colorbar creation and placement
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.1, 0.04, 0.8]) # [h_placement, v_placement, h_size, v_size]
cb = f.colorbar(ax1, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
#f.subplots_adjust(right=0.8)
#cbar_ax = f.add_axes([0.85, 0.665, 0.04, 0.235]) # [h_placement, v_placement, h_size, v_size]
#cb = f.colorbar(ax1, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
#cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
#f.subplots_adjust(right=0.8)
#cbar_ax2 = f.add_axes([0.85, 0.38, 0.04, 0.235]) # [h_placement, v_placement, h_size, v_size]
#cb = f.colorbar(ax2, cax=cbar_ax2, format='%.1f', extend='both') # double-edged colorbar
#cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
#f.subplots_adjust(right=0.8)
#cbar_ax3 = f.add_axes([0.85, 0.095, 0.04, 0.235]) # [h_placement, v_placement, h_size, v_size]
#cb = f.colorbar(ax3, cax=cbar_ax3, format='%.1f', extend='both') # double-edged colorbar
#cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
plt.savefig("%sCDOD_latvsLs_dsrunvsrefrun.png" % (fpath), bbox_inches='tight')
## TEMP/WIND/TOPG map
# DATA
day = 1
hr = 96 # this is actually the tstep (t=96 is storm start)
lvl = 0
# variable[day][hour, elevation, lat, lon]
ut = ua[day][hr,lvl,:,:] - ub[day][hr,lvl,:,:]
vt = va[day][hr,lvl,:,:] - vb[day][hr,lvl,:,:]
#data = tempa[day][hr,lvl,:,:] - tempb[day][hr,lvl,:,:]
data = tsurfa[day][hr,:,:] - tsurfb[day][hr,:,:]
data2= presa[day][hr,:,:] - presb[day][hr,:,:]
# Longitude
major_ticksx = np.arange(np.floor(lon_t[0]), np.ceil(lon_t[-1]), 30)
minor_ticksx = np.arange(np.floor(lon_t[0]), np.ceil(lon_t[-1]), 10)
# Latitude
major_ticksy = np.arange(np.floor(lat_t[-1]), np.ceil(lat_t[0]), 30)
minor_ticksy = np.arange(np.floor(lat_t[-1]), np.ceil(lat_t[0]), 10)
## PLOT temperature/winds/topography
f, axarr = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(10,10), dpi=100)
x = lon_t
y = lat_t
xlabel = 'Longitude / degrees'
ylabel = 'Latitude / degrees'
cblabel= 'Temperature difference / K'
plt.xlabel(xlabel, fontsize=14, labelpad=10)
plt.ylabel(ylabel, fontsize=14, labelpad=10)
# Main plot
ax = axarr.pcolormesh(x, y, data, cmap='RdBu_r', norm=MidPointNorm(midpoint=0.))
# Secondary plot
ax2 = axarr.quiver(x, y, ut, vt, scale=2**2, units='y', width=0.1)
aq = axarr.quiverkey(ax2, 0.815, 0.9, 1, r'$1 \frac{m}{s}$', labelpos='E', coordinates='figure')
# Topography
lvls = [-5,0,5,10,15]
ax3 = axarr.contour(mola[1], mola[0], mola[2], lvls, colors='k')
# Ticks
axarr.set_xticks(major_ticksx)
axarr.set_xticks(minor_ticksx, minor=True)
axarr.set_yticks(major_ticksy)
axarr.set_yticks(minor_ticksy, minor=True)
axarr.tick_params(axis='both', labelsize=12, pad=10)
axarr.axis('tight')
# Colour bar
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.1, 0.04, 0.8]) # [h_place, v_place, h_size, v_size]
cb = f.colorbar(ax, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cblabel), fontsize=16) # colorbar label
plt.axis('tight')
plt.savefig("%stemp_uvwind_mola_latvslon.png" % (fpath), bbox_inches='tight')
plt.close('all')
## Temperature PLOT
temp_t = tsurf_avg_t.T
temp_t = temp_t[:,l_1:l_2]
fig = plt.figure(figsize=(10,10), dpi=100)
ax = fig.add_subplot(1,1,1)
plt.pcolormesh(t_t,lat_t,temp_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
plt.xlabel('Solar longitude / degrees', fontsize=14, labelpad=10)
plt.ylabel('Latitude / degrees', fontsize=14, labelpad=10)
# Extra Markers
ax.plot(c[0,0],c[0,1],'o',color='y',markersize=10)
# Ticks
ax.set_xticks(major_ticksx)
ax.set_xticks(minor_ticksx, minor=True)
ax.set_yticks(major_ticksy)
ax.set_yticks(minor_ticksy, minor=True)
ax.tick_params(axis='both', labelsize=12, pad=10)
# Colour bar
cb = plt.colorbar(format='%.2f', extend='both')
cb.set_label('Temperature difference / K')
tick_locator = ticker.MaxNLocator(nbins=16)
cb.locator = tick_locator
plt.axis('tight')
plt.savefig("%sSurfTempDiff_LatvsTime_FY_uavg_tavg.png" % (fpath), bbox_inches='tight')
## Surface pressure and Atmospheric density at surface PLOT
ps_t = np.matrix.transpose(pres_avg_t)
ps_t = ps_t[:,l_1:l_2]
rho_t = np.matrix.transpose(rho_avg_t)
rho_t = rho_t[:,l_1:l_2]
f, axarr = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(12,12), dpi=100)
x = t_t
y = lat_t
xlabel = 'Solar longitude / degrees'
ylabel = 'Latitude / degrees'
cb_label = 'Atmospheric pressure difference / Pa'
cb_label2 = 'Atmospheric density difference / kg / $m^3$'
# Common axis labels
f.text(0.5, 0.04, '%s' % (xlabel), fontsize=18, ha='center')
f.text(0.06, 0.5, '%s' % (ylabel), fontsize=18, va='center', rotation='vertical')
ax1 = axarr[0].pcolormesh(x, y, ps_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[0].axis('tight')
axarr[0].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[0].set_xticks(major_ticksx)
axarr[0].set_xticks(minor_ticksx, minor=True)
axarr[0].set_yticks(major_ticksy)
axarr[0].set_yticks(minor_ticksy, minor=True)
axarr[0].set_title('(a)', fontsize=18)
axarr[0].tick_params(axis='both', labelsize=14)
ax2 = axarr[1].pcolormesh(x, y, rho_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[1].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[1].set_title('(b)', fontsize=18)
# Colorbar creation and placement
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.54, 0.04, 0.36]) # [h_placement, v_placement, h_size, v_size]
cb = f.colorbar(ax1, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cb_label), fontsize=14) # colorbar label
cbar_ax2 = f.add_axes([0.85, 0.1, 0.04, 0.36]) # [h_placement, v_placement, h_size, v_size]
cb2 = f.colorbar(ax2, cax=cbar_ax2, format='%.1e', extend='both') # double-edged colorbar
cb2.set_label('%s' % (cb_label2), fontsize=14) # colorbar label
plt.savefig("%sPresDensDiff_LatvsLs_zonavg_tavg.png" % (fpath), bbox_inches='tight')
# Zonal wind PLOT
u_t = np.matrix.transpose(u_avg_t)
u_t = u_t[:,l_1:l_2]
fig = plt.figure( figsize=(10,10), dpi=100)
ax = fig.add_subplot(1,1,1)
plt.pcolormesh(t_t,lat_t,u_t,norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
plt.xlabel('Solar longitude / degrees',fontsize=16)
plt.ylabel('Latitude / degrees',fontsize=16)
ax.plot(c[0,0],c[0,1],'o',color='y',markersize=10)
ax.set_xticks(major_ticksx)
ax.set_xticks(minor_ticksx, minor=True)
ax.set_yticks(major_ticksy)
ax.set_yticks(minor_ticksy, minor=True)
cb = plt.colorbar(format='%.1f', extend='both')
cb.set_label('Zonal wind velocity difference / m / s')
tick_locator = ticker.MaxNLocator(nbins=7)
cb.locator = tick_locator
cb.update_ticks()
plt.axis('tight')
plt.savefig("%sZonalWindDiff_LatvsTime_FY_uavg_tavg.png" % (fpath), bbox_inches='tight')
# ALL FLUXES on one plot
fslw_t = np.matrix.transpose(fslw_avg_t[l_1:l_2,:]) # Incoming (surf) long wave (IR) radiation
ftlw_t = np.matrix.transpose(ftlw_avg_t[l_1:l_2,:]) # Outgoing (top) long wave (IR) radiation
fssw_t = np.matrix.transpose(fssw_avg_t[l_1:l_2,:]) # Incoming (surf) short wave (VL) radiation
ftsw_t = np.matrix.transpose(ftsw_avg_t[l_1:l_2,:]) # Outgoing (top) short wave (VL) radiation
f, axarr = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(12,12), dpi=100)
x = t_t
y = lat_t
xlabel = 'Solar longitude / degrees'
ylabel = 'Latitude / degrees'
cb_label = 'Radiative flux difference / W / $m^2$'
# Common axis labels
f.text(0.5, 0.04, '%s' % (xlabel), fontsize=18, ha='center')
f.text(0.06, 0.5, '%s' % (ylabel), fontsize=18, va='center', rotation='vertical')
ax1 = axarr[0,0].pcolormesh(x, y, fslw_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[0,0].axis('tight')
axarr[0,0].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[0,0].set_xticks(major_ticksx)
axarr[0,0].set_xticks(minor_ticksx, minor=True)
axarr[0,0].set_yticks(major_ticksy)
axarr[0,0].set_yticks(minor_ticksy, minor=True)
axarr[0,0].set_title('Incident flux at surface (LW) (a)', fontsize=10)
axarr[0,0].tick_params(axis='both', labelsize=10)
dv1 = make_axes_locatable(axarr[0,0])
cax1 = dv1.append_axes("right",size="5%",pad=0.05)
cb = f.colorbar(ax1,cax=cax1, format='%.1f', extend='both')
cb.set_label('%s' % (cb_label), fontsize=10)
ax2 = axarr[0,1].pcolormesh(x, y, ftlw_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[0,1].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[0,1].set_title('Outgoing flux at top (LW) (b)', fontsize=10)
axarr[0,1].tick_params(axis='both', labelsize=10)
dv2 = make_axes_locatable(axarr[0,1])
cax2 = dv2.append_axes("right",size="5%",pad=0.05)
cb2 = f.colorbar(ax2,cax=cax2, format='%.1f', extend='both')
cb2.set_label('%s' % (cb_label), fontsize=10)
ax3 = axarr[1,0].pcolormesh(x, y, fssw_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[1,0].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[1,0].set_title('Incident flux at surface (SW) (c)', fontsize=10)
axarr[1,0].tick_params(axis='both', labelsize=10)
dv3 = make_axes_locatable(axarr[1,0])
cax3 = dv3.append_axes("right",size="5%",pad=0.05)
cb3 = f.colorbar(ax3,cax=cax3, format='%.1f', extend='both')
cb3.set_label('%s' % (cb_label), fontsize=10)
ax4 = axarr[1,1].pcolormesh(x, y, ftsw_t, norm=MidPointNorm(midpoint=0.), cmap='RdBu_r')
axarr[1,1].plot(c[0,0],c[0,1],'o',color='y',markersize=10)
axarr[1,1].set_title('Outgoing flux at top (SW) (d)', fontsize=10)
axarr[1,1].tick_params(axis='both', labelsize=10)
dv4 = make_axes_locatable(axarr[1,1])
cax4 = dv4.append_axes("right",size="5%",pad=0.05)
cb4 = f.colorbar(ax4,cax=cax4, format='%.1f', extend='both')
cb4.set_label('%s' % (cb_label), fontsize=10)
# Colorbar creation and placement
#f.subplots_adjust(right=0.8)
#cbar_ax = f.add_axes([0.85, 0.1, 0.04, 0.8]) # [h_placement, v_placement, h_size, v_size]
#cb = f.colorbar(ax3, cax=cbar_ax, format='%.1f', extend='both') # double-edged colorbar
#cb.set_label('%s' % (cb_label), fontsize=14) # colorbar label
plt.savefig("%sfluxes_latvsLs_zonavg_tavg.png" % (fpath), bbox_inches='tight')
### Short-term Temperature and Heating rates at exact location vs Altitude (put in particle size or mmr)
# lat = 87.49999, 82.49999, 77.5, 72.5, 67.5, 62.5, 57.5, 52.5, 47.5, 42.5,
# 37.5, 32.5, 27.5, 22.5, 17.5, 12.5, 7.500001, 2.500001, -2.500001,
# -7.500003, -12.5, -17.5, -22.5, -27.5, -32.5, -37.5, -42.5, -47.5, -52.5,
# -57.5, -62.5, -67.5, -72.5, -77.5, -82.49999, -87.49999 ;
# lon = -180, -175, -170, -165, -160, -155, -150, -145, -140, -135, -130,
# -125, -120, -115, -110, -105, -100, -95, -90, -84.99999, -80, -75, -70,
# -65, -60, -55, -50, -45, -40, -35, -30, -25, -20, -15, -10, -5, 0, 5, 10,
# 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 84.99999, 90, 95,
# 100, 105, 110, 115, 120, 125, 130, 135, 140, 145, 150, 155, 160, 165,
# 170, 175 ;
## Dust storm insertion points (Ls - tstep_start - [lat,lon])
# m26 = 45.66 - 408 - [45, -90]
# m30 = 153.95 - 84 - [ 0, 0]
# m33 = 244.28 - 84 - [-2, -6]
# m34 = 273.52 - 60 - [-45, 90]
### Plot explaination
# Storm starts at tstep=96, which is midnight of sol 8 relative to (0,0). However the storm is at 135W (midpoint).
# So 360/24 = 15deg for each hour, meaning local time at 135W is 135/15=9hrs behind (0,0) local time, so at dust storm time insertion it is 15:00 locally.
# We want the plot to start 1 day before the storm, which will be at tstep=84, since each tstep accounts for 2 hours.
# From tstep=84 we push forward 10 hours for approximate midnight plot start
### In reality the plot starts at 01:00 the night before the storm, local time 135W.
f, axarr = plt.subplots(3, 1, sharex=True, sharey=True, figsize=(12,12), dpi=100)
tl1, tl2 =89, 125
al=15
latl, lonl=6, 9
d1 = tempa[1][tl1:tl2,:al,latl,lonl]
d2 = tempb[1][tl1:tl2,:al,latl,lonl]
d3 = (88800/24.)*(sw_htrta[1][tl1:tl2,:al,latl,lonl] + lw_htrta[1][tl1:tl2,:al,latl,lonl]) # NET heat rate (SW cooling, LW heating) changed from K/s to K/hr
d4 = rdusta[1][tl1:tl2,:al,latl,lonl]
d5 = dustqa[1][tl1:tl2,:al,latl,lonl]
data = np.matrix.transpose(d2)
data2 = np.matrix.transpose(d1)
data3 = np.matrix.transpose(d3)
data4 = np.matrix.transpose(d4)
data5 = np.matrix.transpose(d5)
y = alt[:al]
x = np.linspace(0,72,data.shape[1])
xlabel = 'Local time / hr'
ylabel = 'Altitude above surface / km'
cb_label = 'Temperature / K'
cb_label2 = 'Net heating rate / K / hr'
major_ticksx = np.arange(0,np.max(x)+1,6)
minor_ticksx = np.arange(0,np.max(x),2)
major_ticksy = np.arange(0,np.max(y)+1,10)
minor_ticksy = np.arange(0,np.max(y),2)
# Common axis labels
f.text(0.5, 0.04, '%s' % (xlabel), fontsize=16, ha='center')
f.text(0.06, 0.5, '%s' % (ylabel), fontsize=16, va='center', rotation='vertical')
ax1 = axarr[0].pcolormesh(x, y, data, cmap='jet')
axarr[0].axis('tight')
axarr[0].set_xticks(major_ticksx)
axarr[0].set_yticks(major_ticksy)
axarr[0].set_xticks(minor_ticksx, minor=True)
axarr[0].set_yticks(minor_ticksy, minor=True)
axarr[0].set_title('Reference run (a)', fontsize=10)
axarr[0].tick_params(axis='both', labelsize=14)
ax2 = axarr[1].pcolormesh(x, y, data2, cmap='jet')
axarr[1].set_title('Dust storm run (b)', fontsize=10)
axarr[1].tick_params(axis='both', labelsize=14)
axarr[1].add_patch(mpl.patches.Rectangle((14, 0), 24, 9, facecolor="none", linestyle='dashed'))
ax3 = axarr[2].pcolormesh(x, y, data3, cmap='RdBu_r', vmax=10, vmin=-10)
axarr[2].set_title('Dust storm run (c)', fontsize=10)
axarr[2].tick_params(axis='both', labelsize=14)
axarr[2].add_patch(mpl.patches.Rectangle((14, 0), 24, 9, facecolor="none", linestyle='dashed'))
lvl = np.array([10**-6,10**-5,1*10**-4,10**-3]) # Contour levels
ax = axarr[2].contour(x,y,data5,lvl,colors='k',linewidth=3,locator=ticker.LogLocator())
plt.clabel(ax, fontsize=9, inline=1,fmt='%2.0e')
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.38, 0.02, 0.52]) # [h_placement, v_placement, h_size, v_size]
cb = f.colorbar(ax1, cax=cbar_ax, format='%.0f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cb_label), fontsize=16) # colorbar label
f.subplots_adjust(right=0.8)
cbar_ax2 = f.add_axes([0.85, 0.095, 0.02, 0.235]) # [h_placement, v_placement, h_size, v_size]
cb = f.colorbar(ax3, cax=cbar_ax2, format='%.0f', extend='both') # double-edged colorbar
cb.set_label('%s' % (cb_label2), fontsize=16) # colorbar label
#locs,labels = py.xticks()
#py.xticks(locs, map(lambda x: "%02d" % x, locs*1e9))
plt.savefig("%sheating.png" % (fpath), bbox_inches='tight')
plt.close('all')
### Time series plots
# settings
s_l = [-2.05, -6.12, 242.7] # landing site marking on plot (actually for 244.7, Ls is messed up)
ticky_latlon = [60,10,30,10] # tick settings [xmajor,xminor,ymajor,yminor] ticks
ticky_latalt = [60,10,20,10]
int_Ls = int(np.ceil(Ls.shape[0]/(12*Months)))
# Dust particle size contours
rd_ds1 = {}
rd_ds1[0] = alt[:alt_half]
rd_ds1[1] = lat_t
rd_ds1[2] = rd_avg[:,:alt_half,:]
# dust mmr average difference contours
dqd_ds = {}
dqd_ds[0] = alt[:alt_half]
dqd_ds[1] = lat_t
dqd_ds[2] = dqd_avg[:,:alt_half,:]
wind = {}
wind[0] = u_diff
wind[1] = v_diff
## Dust storm 1 Time series dustq (mmr) (time, lat, lon)
plt_timeseries(dustq_diff[l_1:,:,:], lon_t, lat_t, Ls_m[1][l_1:], 4,4, ticky_latlon, 'Longitude / degrees', 'Latitude / degrees', 'Ls: ', 'Dust MMR difference / kg / kg', 3, '%sDustqdiff_latlon_tseries_ds1.png' % (fpath), mola)
alt_t = alt # Height of 20.9km
latll = 26
dustq_diff_altlon = dustqa[1][l_1:,:,latll,:] - dustqb[1][l_1:,:,latll,:]
temp_diff_altlon = tempa[1][l_1:,:,latll,:] - tempb[1][l_1:,:,latll,:]
plt_timeseries(temp_diff_altlon, lon_t, alt_t, Ls, 4,4, ticky_latalt, 'Longitude / degrees', 'Altitude / km', 'Ls: ', 'Temperature difference / K', int_Ls, '%stemp_altlon_tseries_ds1.png' % (fpath))
a
plt_timeseries(dustq_diff_altlon, lon_t, alt_t, Ls_m[1][l_1:], 4, 4, ticky_latalt, 'Longitude / degrees', 'Altitude / km', 'Ls: ', 'Dust MMR difference / kg / kg', 3, '%sdustq_altlon_tseries_ds1.png' % (fpath))
plt.close('all')
## IMPACT CALCULATIONS
## Dust storm insertion points (Ls - tstep_start - [lat,lon])
### DS1 m1 = 3.95 - (96-120, 2 sol) - [45, -135] (ORIGINAL DS)
llat1, llat2 = 22.5, 67.5
llon1, llon2 = -155., -115.
lalt1, lalt2 = 0., 8.
ts1, ts2 = 120, 132
### DS2 m26 = 45.66 - 408 - [45, -90]
#llat1, llat2 = 22.5, 67.5
#llon1, llon2 = -110., -70.
#lalt1, lalt2 = 0., 8.
#ts1, ts2 = 420, 432
### DS3 m30 = 153.95 - 84 - [ 0, 0]
#llat1, llat2 = -22.5, 22.5
#llon1, llon2 = -20., 20.
#lalt1, lalt2 = 0., 8.
#ts1, ts2 = 96, 108
### DS4 m33 = 244.28 - 84 - [-2, -6] (EXOMARS)
#llat1, llat2 = -22.5, 22.5
#llon1, llon2 = -20., 20.
#lalt1, lalt2 = 0., 8.
#ts1, ts2 = 96, 108
### DS5 m34 = 273.52 - 60 - [-45, 90]
#llat1, llat2 = -67.5, -22.5
#llon1, llon2 = 70., 110.
#lalt1, lalt2 = 0., 8.
#ts1, ts2 = 72, 84
lat_1, lat_2 = np.where(lat - llat2 >= 0.001)[0][-1]+1, np.where(lat - llat1 >= 0.001)[0][-1]+1
lon_1, lon_2 = np.where(lon - llon1 >= 0.001)[0][0]-1, np.where(lon - llon2 >= 0.001)[0][0]-1
alt_1, alt_2 = 0., np.where(alt - lalt2 >= 0.001)[0][0]
# Loop to compute impact
re_err, avg_t = {}, {}
re, avg = {}, {}
day = 1
var_da = [dustqa[1], dustNa[1], tempa[1], tsurfa[1], presa[1], psa[1], ua[1], va[1], rhoa[1], fluxsurflwa[1], fluxsurfswa[1], fluxtoplwa[1], fluxtopswa[1]]
var_db = [dustqb[1], dustNb[1], tempb[1], tsurfb[1], presb[1], psb[1], ub[1], vb[1], rhob[1], fluxsurflwb[1], fluxsurfswb[1], fluxtoplwb[1], fluxtopswb[1]]
re[day] = np.zeros([len(var_da), (ts2-ts1)+1])
avg[day] = np.zeros([len(var_da), (ts2-ts1)+1])
re_err[day] = np.zeros(len(var_da))
avg_t[day] = np.zeros(len(var_da))
for n in xrange(0, len(var_da)):
data_a = var_da[n]
data_b = var_db[n]
if len(data_a.shape)==4:
m=0
for j in xrange(ts1, ts2+1):
aa = data_a[j,alt_1:alt_2,lat_1:lat_2,lon_1:lon_2].flatten() - data_b[j,alt_1:alt_2,lat_1:lat_2,lon_1:lon_2].flatten()
a_ref = data_b[j,alt_1:alt_2,lat_1:lat_2,lon_1:lon_2].flatten()
avg[day][n,m] = sum(a_ref)/a_ref.shape[0]
re[day][n,m] = np.linalg.norm(aa) / np.linalg.norm(a_ref)
m=m+1
else:
m=0
for j in xrange(ts1, ts2+1):
aa = data_a[j,lat_1:lat_2,lon_1:lon_2].flatten() - data_b[j,lat_1:lat_2,lon_1:lon_2].flatten()
a_ref = data_b[j,lat_1:lat_2,lon_1:lon_2].flatten()
avg[day][n,m] = sum(a_ref)/a_ref.shape[0]
re[day][n,m] = np.linalg.norm(aa) / np.linalg.norm(a_ref)
m=m+1
re[day][(np.isnan(re[day])==True)] = 0.
re_err[day][n] = sum(re[day][n,:]) / re[day][n,:].shape[0]
avg_t[day][n] = sum(avg[day][n,:]) / avg[day][n,:].shape[0]
np.savetxt("%srelative_errors_t.txt" % (fpath), re[1], fmt='%.2e')
np.savetxt("%srelative_errors.txt" % (fpath), re_err[1], fmt='%.2e')
np.savetxt("%saverages.txt" % (fpath), avg[1], fmt='%.2e')
np.savetxt("%saverages_t.txt" % (fpath), avg_t[1], fmt='%.2e')
|
[
"adam.el-said@open.ac.uk"
] |
adam.el-said@open.ac.uk
|
bf4f8be8ccdd998f8098cbf3f6605a7b524c9816
|
f92722620b74644ee0f2e1a7461d4330ea3374da
|
/blog/migrations/0001_initial.py
|
7ade040810ee75b4aa619f2bb513df02743ad060
|
[] |
no_license
|
designwithabhishek/mywebsite
|
c01e2784b733a681f215cac1c449a98554ca8cb0
|
4aa0593cb750330921de4367e2a389c4918845a1
|
refs/heads/master
| 2023-05-11T10:12:24.617089
| 2019-06-25T17:36:08
| 2019-06-25T17:36:08
| 200,085,766
| 0
| 0
| null | 2023-04-21T20:35:26
| 2019-08-01T16:37:09
|
HTML
|
UTF-8
|
Python
| false
| false
| 835
|
py
|
# Generated by Django 2.2.2 on 2019-06-23 04:53
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('author', models.CharField(max_length=50)),
('created', models.DateTimeField(verbose_name=datetime.datetime(2019, 6, 23, 4, 53, 36, 984687, tzinfo=utc))),
('content', models.TextField()),
('image', models.ImageField(upload_to='')),
],
),
]
|
[
"designwithabhishek1996.com"
] |
designwithabhishek1996.com
|
4da9c1e6ca004b93d1f275e2bd86ea3be8e69b31
|
52bb1d25a8c146b81b876343f861025e034fa964
|
/roglick/dungeon/utils.py
|
fcf6a2a864c5ae7cc6c50f2c302b33b63529bf23
|
[
"MIT"
] |
permissive
|
Kromey/roglick
|
b3fc7a6bce7e60a150c9a9ed1cc825ef3988cf8a
|
b76202af71df0c30be0bd5f06a3428c990476e0e
|
refs/heads/master
| 2020-12-14T15:49:53.163385
| 2016-05-24T16:29:06
| 2016-05-24T16:29:06
| 21,549,421
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,519
|
py
|
from roglick.engine import random
from roglick.utils import clamp
def smoothstep(a, b, x):
"""Basic S-curve interpolation function.
Based on reference implementation available at
https://en.wikipedia.org/wiki/Smoothstep
"""
x = clamp((x - a)/(b - a), 0.0, 1.0)
return x*x*(3 - 2*x)
def smootherstep(a, b, x):
"""Improved S-curve interpolation function.
Based on reference implementation of the improved algorithm proposed by
Ken Perlin that is available at https://en.wikipedia.org/wiki/Smoothstep
"""
x = clamp((x - a)/(b - a), 0.0, 1.0)
return x*x*x*(x*(x*6 - 15) + 10);
def lerp(a, b, x):
"""Linear interpolation function."""
return a + x * (b - a)
class PerlinNoise2D(object):
def __init__(self, seed=None):
self.p = [x for x in range(256)]
if seed is None:
seed = random.get_int()
rand = random.Random(seed)
rand.shuffle(self.p)
def octave(self, x, y, octaves=5, persistence=0.5):
total = 0
frequency = 1
amplitude = 1
max_val = 0
for i in range(octaves):
total += self.noise(x*frequency, y*frequency) * amplitude
max_val += amplitude
amplitude *= persistence
frequency *= 2
return total / max_val
def noise(self, x, y):
xi = int(x)
yi = int(y)
xf = x - xi
yf = y - yi
u = self.fade(xf)
v = self.fade(yf)
aa = self.p_hash(self.p_hash( xi )+ yi )
ab = self.p_hash(self.p_hash( xi )+ yi+1)
ba = self.p_hash(self.p_hash(xi+1)+ yi )
bb = self.p_hash(self.p_hash(xi+1)+ yi+1)
x1 = lerp(self.grad(aa, xf , yf), self.grad(ba, xf-1, yf), u)
x2 = lerp(self.grad(ab, xf , yf-1), self.grad(bb, xf-1, yf-1), u)
return (lerp(x1, x2, v) + 1) / 2 # Constrain to [0,1] rather than [-1,1]
def fade(self, t):
return smootherstep(0.0, 1.0, t)
def p_hash(self, i):
i = i & 255
return self.p[i]
def grad(self, h, x, y):
"""This gradient function is based on Riven's optimization
Source: http://riven8192.blogspot.com/2010/08/calculate-perlinnoise-twice-as-fast.html
"""
h = h % 4
if h == 0:
return x + y
elif h == 1:
return -x + y
elif h == 2:
return x - y
elif h == 3:
return -x - y
else:
# Never happens
return 0
|
[
"travisvz@gmail.com"
] |
travisvz@gmail.com
|
019e5dd75cb9ff5d93826aea9822c1224063626a
|
3a7b0262d408c8faad77d0710d0bee91e27643b9
|
/Major_Requirements.py
|
a91f9661064cb93b0d8670f6a5c2951172039976
|
[] |
no_license
|
fmixson/testfulldegree
|
c1a13eb89001b017e7800304a3197d042a7e234b
|
de013a7f2171d66ab6a9fd4ed6a1604b53ef79d5
|
refs/heads/main
| 2023-06-27T23:08:22.016373
| 2021-08-08T20:07:37
| 2021-08-08T20:07:37
| 394,053,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,400
|
py
|
import pandas as pd
class MajorRequirements:
# major_requirements_dataframe = pd.read_csv("C:/Users/family/Desktop/Programming/English_PlanA_GE.csv")
# major_requirements_dataframe = pd.read_csv("C:/Users/family/Desktop/Programming/Copy of COMM_AA.csv")
# major_requirements_dict = {}
def __init__(self, revised_course_list, completed_ge_courses, major_requirements):
self.major_requirements = major_requirements
self.revised_course_list = revised_course_list
self.completed_ge_courses = completed_ge_courses
self.major_course_dict = {}
self.major_courses_list = []
self.major_courses_list2 = []
self.major_units_list = []
self.major_units_dict = {}
self.area_units_dict = {}
self.major_requirements_dict = {}
self.major_no_courses_requirement_dict = {}
# self.discipline_list = []
# self.discipline_set = set()
def _two_disciplines(self, course_key, total_area_units, total_units):
discipline = course_key.split()
discipline = discipline[0]
disc = False
# print('area units', total_area_units, 'total units', total_units - 3)
if total_area_units == (total_units - 3):
unique_disciplines = set(self.discipline_list)
# print(len(unique_disciplines))
# print('unique', unique_disciplines)
if len(unique_disciplines) < 2:
if discipline in unique_disciplines:
disc = True
else:
self.discipline_list.append(discipline)
else:
self.discipline_list.append(discipline)
# print('discipline list', self.discipline_list)
return disc
def _three_disciplines(self, course_key, total_area_units, total_units):
discipline = course_key.split()
discipline = discipline[0]
disc = False
# print('area units', total_area_units, 'total units', total_units - 6)
if total_area_units >= (total_units - 6):
unique_disciplines = set(self.discipline_list)
# print(len(unique_disciplines))
# print('unique', unique_disciplines)
if len(unique_disciplines) < 3:
if len(unique_disciplines) == 2:
self.discipline_list.append(discipline)
elif unique_disciplines == 1:
if discipline in unique_disciplines:
disc = True
else:
self.discipline_list.append(discipline)
else:
self.discipline_list.append(discipline)
return disc
def major_courses_completed(self, area_name, total_units, number_of_disciplines=1):
proficiency_list = ['Writing_Proficiency', 'Math_Proficiency', 'Health_Proficiency', 'Reading_Proficiency']
major_requirements_dataframe = pd.read_csv(self.major_requirements)
self.major_courses_list2 = []
total_area_units = 0
area_units_list = []
ge_course_list = []
self.major_requirements_dict[area_name] = total_units
print('total units', total_units)
if total_units == '':
pass
else:
if total_units < 3:
self.major_no_courses_requirement_dict[area_name] = 1
else:
self.major_no_courses_requirement_dict[area_name] = total_units / 3
disc = False
self.discipline_list = []
self.discipline_set = set()
# print('maj course no', self.major_no_courses_requirement_dict)
# print('maj req dict', self.major_requirements_dict)
for key in self.completed_ge_courses:
if key not in proficiency_list:
ge_course_list.append(self.completed_ge_courses[key])
for i in range(len(major_requirements_dataframe[area_name])):
ge_course = False
major_course = False
if total_area_units < total_units:
for course_key in self.revised_course_list:
if course_key == major_requirements_dataframe.loc[i, area_name]:
if course_key in ge_course_list:
ge_course = True
if course_key in self.major_courses_list:
major_course = True
if not major_course:
if number_of_disciplines > 1:
if number_of_disciplines == 2:
disc = MajorRequirements._two_disciplines(self, course_key=course_key,
total_area_units=total_area_units,
total_units=total_units)
elif number_of_disciplines == 3:
disc = MajorRequirements._three_disciplines(self, course_key=course_key,
total_area_units=total_area_units,
total_units=total_units)
if not disc:
self.area_units_dict[area_name] = self.revised_course_list[course_key]
self.major_courses_list.append(course_key)
self.major_courses_list2.append(course_key)
self.major_course_dict[area_name] = self.major_courses_list2
# print('dict under',self.major_course_dict)
area_units_list.append(self.revised_course_list[course_key])
if not ge_course:
self.major_units_list.append(self.revised_course_list[course_key])
total_area_units = sum(area_units_list)
self.area_units_dict[area_name] = total_area_units
# print('maj course dict', self.major_course_dict)
return self.major_requirements_dict, self.major_course_dict, self.major_no_courses_requirement_dict
|
[
"noreply@github.com"
] |
fmixson.noreply@github.com
|
d3d6757ce1df376dff4c92caaad8942329c824b0
|
801e30ca6313e09ae19c2109604325556edf7e11
|
/validate_hcp_release.py
|
295ec2b714eeaf5286237d8780121fdcfc0be382
|
[] |
no_license
|
MHouse/validate_hcp_release
|
5335dbb531564e52e38b10bf538cced6bc2b1265
|
0c9f98fcd51b5c7e7c64f962c6393019b67790ec
|
refs/heads/master
| 2021-01-16T18:18:22.110563
| 2013-02-22T21:17:38
| 2013-02-22T21:17:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,747
|
py
|
#! /usr/bin/env python
__author__ = 'mhouse01'
import requests
import json
import os
import csv
from lxml import etree
from sys import exit
from operator import itemgetter, attrgetter
import argparse
import ConfigParser
from seriesDetails import seriesDetails, csvOrder, seriesLabels, scanIsPackage
# Declare the XNAT Namespace for use in XML parsing
xnatNS = "{http://nrg.wustl.edu/xnat}"
xmlFormat = {'format': 'xml'}
jsonFormat = {'format': 'json'}
# PARSE INPUT
parser = argparse.ArgumentParser(description="Alpha program to pull Subject session parameters from XNAT for verification")
parser.add_argument("-c", "--config", dest="configFile", default="validate_hcp_release.cfg", type=str, help="config file must be specified")
parser.add_argument("-P", "--project", dest="Project", default="HCP_Phase2", type=str, help="specify project")
parser.add_argument("-S", "--subject", dest="Subject", type=str, help="specify subject of interest")
parser.add_argument("-D", "--destination_dir", dest="destDir", default='/tmp', type=str, help="specify the directory for output")
parser.add_argument("-M", "--output_map", dest="outputMap", default='all', type=str, help="specify the output mapping: all, public, package")
parser.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true", help="show more verbose output")
parser.add_argument('--version', action='version', version='%(prog)s: v0.1')
args = parser.parse_args()
args.destDir = os.path.normpath( args.destDir )
# Read the config file
config = ConfigParser.ConfigParser()
try:
config.read( args.configFile )
username = config.get('Credentials', 'username')
password = config.get('Credentials', 'password')
restServerName = config.get('Server', 'server')
restSecurity = config.getboolean('Server', 'security')
except ConfigParser.Error as e:
print "Error reading configuration file:"
print " " + str( e )
exit(1)
if restSecurity:
print "Using only secure connections"
restRoot = "https://" + restServerName
else:
print "Security turned off for all connections"
restRoot = "http://" + restServerName + ":8080"
# If we find an OS certificate bundle, use it instead of the built-in bundle
if requests.utils.get_os_ca_bundle_path() and restSecurity:
os.environ['REQUESTS_CA_BUNDLE'] = requests.utils.get_os_ca_bundle_path()
print "Using CA Bundle: %s" % requests.utils.DEFAULT_CA_BUNDLE_PATH
# Establish a Session ID
try:
r = requests.get( restRoot + "/data/JSESSION", auth=(username, password) )
# If we don't get an OK; code: requests.codes.ok
r.raise_for_status()
# Check if the REST Request fails
except (requests.ConnectionError, requests.exceptions.RequestException) as e:
print "Failed to retrieve REST Session ID:"
print " " + str( e )
exit(1)
restSessionID = r.content
print "Rest Session ID: %s " % (restSessionID)
restSessionHeader = {"Cookie": "JSESSIONID=" + restSessionID}
mrSessions = {"xsiType": "xnat:mrSessionData"}
# Get the list of MR Sessions for each Experiment
# Create a URL pointing to the Experiments for this Subject
restExperimentsURL = restRoot + "/data/archive/projects/" + args.Project + "/subjects/" + args.Subject + "/experiments/"
# Get the list of MR Sessions for the Subject in JSON format
try:
# Create a dictionary of parameters for the rest call
restParams = mrSessions.copy()
restParams.update(jsonFormat)
# Make the rest call
r = requests.get( restExperimentsURL, params=restParams, headers=restSessionHeader)
# If we don't get an OK; code: requests.codes.ok
r.raise_for_status()
# Check if the REST Request fails
except (requests.ConnectionError, requests.exceptions.RequestException) as e:
print "Failed to retrieve MR Session list: %s" % e
exit(1)
# Parse the JSON from the GET
experimentJSON = json.loads( r.content )
# Strip off the trash that comes back with it and store it as a list of name/value pairs
experimentResultsJSON = experimentJSON.get('ResultSet').get('Result')
# List Comprehensions Rock! http://docs.python.org/tutorial/datastructures.html
# Create a stripped down version of the results with a new field for seriesList; Store it in the experimentResults object
experimentResults = [ {'label': experimentItem.get('label').encode('ascii', 'ignore'),
'date': experimentItem.get('date').encode('ascii', 'ignore'),
'subjectSessionNum': None,
'seriesList': None } for experimentItem in experimentResultsJSON ]
# Loop over the MR Experiment Results
for experiment in experimentResults:
print "Gathering results for " + experiment['label']
# Compose a rest URL for this Experiment
restSingleExperimentURL = restExperimentsURL + experiment['label']
# Make a rest request to get the complete XNAT Session XML
try:
r = requests.get( restSingleExperimentURL, params=xmlFormat, headers=restSessionHeader, timeout=10.0 )
# If we don't get an OK; code: requests.codes.ok
r.raise_for_status()
# Check if the REST Request fails
except requests.Timeout as e:
print "Timed out while attempting to retrieve XML:"
print " " + str( e )
if not args.restSecurity:
print "Note that insecure connections are only allowed locally"
exit(1)
# Check if the REST Request fails
except (requests.ConnectionError, requests.exceptions.RequestException) as e:
print "Failed to retrieve XML: %s" % e
exit(1)
# Parse the XML result into an Element Tree
root = etree.fromstring(r.text.encode(r.encoding))
# Extract the Study Date for the session
if experiment['date'] == "":
experiment['date'] = "2013-01-01"
print "Assuming study date of " + experiment['date']
# Start with an empty series list
seriesList = list()
# Iterate over 'scan' records that contain an 'ID' element
for element in root.iterfind(".//" + xnatNS + "scan[@ID]"):
# Create an empty seriesDetails record
currentSeries = seriesDetails()
#Record some basic experiment level info in each scan
currentSeries.subjectName = args.Subject
currentSeries.sessionLabel = experiment['label']
currentSeries.sessionDate = experiment['date']
currentSeries.fromScanXML( element )
# Add the current series to the end of the list
seriesList.append( currentSeries )
# Sort the series list by DateTime
seriesList.sort( key=attrgetter('DateTime') )
# Store the subjectSessionNum extracted from the first item (first acquired scan) in the sorted list
experiment['subjectSessionNum'] = iter(seriesList).next().subjectSessionNum
# Store the series list along with the experiment label
experiment['seriesList'] = seriesList
# Sort the Experiment Results list by the Subject Session Number
experimentResults.sort( key=itemgetter('subjectSessionNum') )
# Name the CSV file by the Subject name
csvFile = args.destDir + os.sep + args.Subject + "_" + args.outputMap + ".csv"
# Create an empty Series Notes object. This can be populated with field specific notes for each Experiment
seriesNotes = seriesDetails()
# Open the CSV file for write/binary
with open( csvFile, 'wb' ) as f:
# Create a CSV Writer for dictionary formatted objects. Give it the Dictionary order for output.
csvWriter = csv.DictWriter( f, csvOrder( args.outputMap ) )
# Write out the series labels as a Header
if args.outputMap != "package":
csvWriter.writerow( seriesLabels(args.outputMap) )
# Loop over all experiment results
for experiment in experimentResults:
# Populate the Series Notes for this Experiment with the Experiment Date and Label
seriesNotes.scan_ID = experiment['label']
seriesNotes.startTime = experiment['date']
# Write out the notes only on 'all' maps
if args.outputMap == "all":
csvWriter.writerow( seriesNotes.asDictionary(args.outputMap) )
# Loop over all scans in each experiment
for scan in experiment['seriesList']:
# Write each scan by converting it to a Dictionary and pulling the relevant Mapping subset
nextRow = scan.asDictionary(args.outputMap)
# But only if this row should be included
if args.outputMap == "all" or \
(args.outputMap == "release" and scan.targetForRelease == "1") or \
(args.outputMap == "release" and restServerName == "hcpx-demo.humanconnectome.org") or \
(args.outputMap == "package" and scanIsPackage(scan.dbDesc)):
csvWriter.writerow( nextRow )
print "Subject details written to: " + csvFile
|
[
"mdhouse@gmail.com"
] |
mdhouse@gmail.com
|
94885939895e110e0050528f9b92b238256a9c00
|
39efbd67fa02ef628bd86051781145b77b8244d9
|
/PLXs/086-01173-00 ok/CSVReader.py
|
2965cac991750f4034087c8861c66e11bd242ba1
|
[] |
no_license
|
hbenr/ProjectBender
|
9df2326df01ec04db93d2311e0107a5ac2706802
|
c0432ae0a9ceaf6442f92f59805bdfbdddc2fd14
|
refs/heads/master
| 2021-05-29T19:36:07.437213
| 2015-09-17T20:02:07
| 2015-09-17T20:02:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,343
|
py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: MAT.TE
#
# Created: 20/08/2015
# Copyright: (c) MAT.TE 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import csv
from UtilitiesHB import *
def cadFetch(BOARD_ORIGIN, CAD_FILENAME):
startSaving = False
with open(CAD_FILENAME + '.csv', 'rb') as cadFile:
reader = csv.reader(cadFile)
fiducials = []
devices = []
for row in reader:
if row[0] == 'Name':
startSaving = True
continue
if startSaving and (row[0] != '' and not 'H' in row[0]
and not 'TP' in row[0] and not 'F' in row[0] and not 'N' in row[0]
and not 'P' in row[0] and not 'K' in row[0] and not 'LED' in row[0]
and not 'MOV' in row[0]):
devices.append(['d', unitsConverter(int(row[2]), False, BOARD_ORIGIN, False),
unitsConverter(int(row[1]), False, BOARD_ORIGIN, True), row[0].lower(), 'n0000', row[3],
'partNo', 'f-1', row[0].lower(), 'SHAPE'])
elif startSaving and 'F' in row[0]:
fiducials.append(['f', unitsConverter(int(row[2]), False, BOARD_ORIGIN, False),
unitsConverter(int(row[1]), False, BOARD_ORIGIN, True)])
elif startSaving and (row[0] == ''):
break
return fiducials, devices
def bomFetch(devices, BOM_FILENAME):
startSaving = False
with open(BOM_FILENAME + '.csv', 'rb') as bomFile:
reader = csv.reader(bomFile)
for row in reader:
currentDevices = []
for elem in row[8].split(','):
currentDevices.extend(deviceEnumerator(elem))
if row[0] == 'Part No':
startSaving = True
continue
if startSaving and row[8] != '':
for elem in currentDevices:
for component in devices:
if elem.lower() == component[3]:
component[6] = row[4]
return sorted(devices, key= lambda x: int(x[2]))
def main():
raw_input("Wrong file! Use plxHelper ")
if __name__ == '__main__':
main()
|
[
"MAT.TE@CTLMAT0301D.Robertshaw.com"
] |
MAT.TE@CTLMAT0301D.Robertshaw.com
|
4e303dd29190aa0d67c81ae34f62c8efeeaa0d0a
|
b17af89152a7305efb915da7c856c744e7dbd4f0
|
/Dict_Project01_NumericIntegrasjon/F6_SendData.py
|
da51e5ba8dbeb3ed8133bee03f93e93dbcebc84e
|
[] |
no_license
|
EspenEig/Bachelor2021-vedlegg
|
6bb8e9efa84710500855f6129ce8f706b16bd690
|
2893f41e9e92f757360fe7d85dc03fd51d497f39
|
refs/heads/main
| 2023-05-01T12:34:45.403503
| 2021-05-12T16:31:59
| 2021-05-12T16:31:59
| 358,355,290
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
import json
def SendData(robot, measurements, online):
data = {}
for key in measurements:
if key == "zeroTime" or key == "ts":
continue
data[key] = (measurements[key][-1])
if online:
msg = json.dumps(data)
robot["connection"].send(msg)
if not robot["connection"].recv(3) == b"ack":
print("No data ack")
robot["out_file"].write("{},{},{},{}\n".format(
data["light"],
data["time"],
data["flow"],
data["volume"]))
|
[
"54357741+EspenEig@users.noreply.github.com"
] |
54357741+EspenEig@users.noreply.github.com
|
341fc6379af0b753834833efa91503520488d7fa
|
a3aaf7bb73471c67d4adc40aee159e60e7fc964b
|
/face_pic.py
|
f2573255284ef455f037aef1a2398b61799bfee2
|
[] |
no_license
|
muskaankularia/Gaze-tracker
|
29360516efbe94a8ef19aeefa8805db7224b15df
|
dcae52f85d486ce0f8ec1566814be7136c97df38
|
refs/heads/master
| 2021-08-22T05:53:12.770849
| 2017-11-29T12:09:06
| 2017-11-29T12:09:06
| 112,461,356
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,519
|
py
|
import numpy as np
import cv2
import sys
import os
import shutil
import timm
if os.path.exists('./data'):
shutil.rmtree('./data')
dirname = 'data'
os.mkdir(dirname)
face_cascade = cv2.CascadeClassifier('/usr/local/Cellar/opencv/3.3.0_3/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('//usr/local/Cellar/opencv/3.3.0_3/share/OpenCV/haarcascades/haarcascade_eye.xml')
# mouth_cascade = cv2.CascadeClassifier('/usr/local/Cellar/opencv/3.3.0_3/share/OpenCV/haarcascades/haarcascade_mcs_mouth.xml')
mouth_cascade = cv2.CascadeClassifier('./haarcascade_mcs_mouth.xml')
# if len(sys.argv) < 2:
# sys.exit('Wrong Usage')
# image_name = sys.argv[1]
# img = cv2.imread(image_name)
camera = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('sample.avi',fourcc, 3, (1280,720))
counter = 0
kernel = np.ones((3,3),np.uint8)
while 1:
retval, img = camera.read()
# print img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# print 'y'
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
# print 'face found'
cv2.rectangle(img, (x,y), (x+w, y+h), 0, 2)
roi_face = gray[y:y+h, x:x+w]
roi_face_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_face, 1.3, 5)
for (ex, ey, ew, eh) in eyes:
counter += 1
cv2.rectangle(roi_face_color, (ex,ey), (ex+ew, ey+eh), (0,255,0), 2)
# print "eye " + str(ex) + " " + str(ey)
# roi_eye = roi_face[int(1.2*ey):int(0.8*(ey+eh)), int(1.2*ex):int(0.8*(ex+ew))]
roi_eye = roi_face[ey:ey+eh, ex:ex+ew]
center = 0
roi_eye = cv2.GaussianBlur(roi_eye,(3,3),0)
roi_eye = cv2.addWeighted(roi_eye,1.5,roi_eye,-0.5,0)
roi_eye_canny = cv2.Canny(roi_eye,100,200)
cv2.imwrite('./data/canny' + str(counter) + '.png', roi_eye_canny)
laplacian = cv2.Laplacian(roi_eye,cv2.CV_64F)
cv2.imwrite('./data/lapla' + str(counter) + '.png', laplacian)
# res = cv2.resize(roi_eye,(int(ew/2), int(eh/2)), interpolation = cv2.INTER_AREA)
roi_eyex = cv2.Sobel(roi_eye, cv2.CV_64F, 1, 0, ksize=3)
roi_eyey = cv2.Sobel(roi_eye, cv2.CV_64F, 0, 1, ksize=3)
roi_eyex = np.absolute(roi_eyex)
roi_eyey = np.absolute(roi_eyey)
roi_eyex = np.uint8(roi_eyex)
roi_eyey = np.uint8(roi_eyey)
# sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)
# abs_sobel64f = np.absolute(sobelx64f)
# sobel_8u = np.uint8(abs_sobel64f)
cv2.imwrite('./data/zsobely' + str(counter) + '.png', roi_eyey)
cv2.imwrite('./data/zsobelx' + str(counter) + '.png', roi_eyex)
ret, tmp = cv2.threshold(roi_eyex, 0, 255, cv2.THRESH_OTSU)
tmp = cv2.erode(tmp, kernel, iterations=1)
cv2.imwrite('./data/zsobelxt' + str(counter) + '.png', tmp)
mag = np.hypot(roi_eyex, roi_eyey) # magnitude
mag *= 255.0 / np.max(mag) # normalize (Q&D)
roi_eye_sobel = mag.astype(np.uint8)
# roi_eye_sobel = cv2.morphologyEx(roi_eye_sobel, cv2.MORPH_OPEN, kernel)
cv2.imwrite('./data/xy' + str(counter) + '.png', roi_eye_sobel)
# roi_eye_sobel = cv2.morphologyEx(roi_eye_sobel, cv2.MORPH_OPEN, kernel)
# roi_eye_sobel = cv2.erode(roi_eye_sobel, kernel, iterations = 1)
# roi_eye_sobel = cv2.morphologyEx(roi_eye_sobel, cv2.MORPH_CLOSE, kernel)
ret, roi_eye_sobel = cv2.threshold(roi_eye_sobel, 0, 255, cv2.THRESH_OTSU)
roi_eye_sobel = cv2.erode(roi_eye_sobel, kernel, iterations=1)
cv2.imwrite('./data/tempthresh' + str(counter) + '.png', roi_eye_sobel)
roi_eye_color = roi_face_color[ey:ey+eh, ex:ex+ew]
# center = timm.findEyeCenter(roi_eye_color, (0,0))
# cv2.circle(roi_eye_color, center, 5, (255, 255, 255), 2)
pupils = cv2.HoughCircles(roi_eye_sobel, cv2.HOUGH_GRADIENT, 1, 100, param1 = 100, param2 = 10, minRadius=int(ew/11), maxRadius=int(ew/3))
if pupils is not None:
# print 'not none'
pupils = np.round(pupils[0,:]).astype("int")
for (x,y,r) in pupils:
print str(x) + " " + str(y) + " " + str(r) + " --- " + str(counter) + " " + str(int(ew/11)) + "-" + str(int(ew/3))
# cv2.circle(roi_eye_color, (x, y), r, (255, 165, 0), 2)
cv2.circle(roi_eye_color, (x, y), 2, (255, 165, 0), 3)
# cv2.imshow('eye' + str(x), roi_eye_color)
# print roi_eye_sobel.shape
# print roi_eye_color.shape
comb = np.zeros(shape=(roi_eye_color.shape[0], roi_eye_color.shape[1]*2, roi_eye_color.shape[2]), dtype=np.uint8)
comb[:roi_eye_color.shape[0], :roi_eye_color.shape[1]] = roi_eye_color
comb[:roi_eye_sobel.shape[0], roi_eye_sobel.shape[1]:] = roi_eye_sobel[:, :, None]
# cat = np.concatenate([roi_eye_sobel, roi_eye_color])
cv2.imwrite('./data/eye' + str(counter) + '.png', comb)
# cv2.moveWindow('eye' + str(x), 1000, 100)
# cv2.resizeWindow('eye' + str(x), eh*2, ew*2)
# mouths = mouth_cascade.detectMultiScale(roi_face, 1.7, 11)
# for (mx, my, mw, mh) in mouths:
# cv2.rectangle(roi_face_color, (mx, my), (mx+mw, my+mh), (0, 0, 0), 2)
# roi_mouth = roi_face[my:my+mh, mx:mx+mw]
# roi_mouth_color = roi_face_color[my:my+mh, mx:mx+mw]
# roi_mouth = cv2.cornerHarris(roi_mouth, 2, 3, 0.04)
# roi_mouth = cv2.dilate(roi_mouth, None)
# roi_mouth_color[roi_mouth>0.01*roi_mouth.max()]=[0,0,255]
out.write(img)
cv2.imshow('test', img)
# cv2.imshow('bhawsar', gray)
# cv2.moveWindow('bhawsar', 800,100)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
camera.release()
out.release()
# cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
muskaankularia.noreply@github.com
|
beff39c0b7dcb6bdf841867e852ac3a4d5057438
|
42dc79035b8488b59374a44ee87136d9fd56bdb3
|
/30-Day-Challange/Day-7/count_negative_sorted-2.py
|
1903e4b1dc070af2101c398bcd4be981714a4312
|
[
"Apache-2.0"
] |
permissive
|
EashanKaushik/LeetCode
|
f8e655b8a52fa01ef5def44b18b2352875bb7ab8
|
8ee2a61cefa42b332b6252fafff4a2772d25aa31
|
refs/heads/main
| 2023-06-06T17:15:54.218097
| 2021-07-06T11:46:35
| 2021-07-06T11:46:35
| 371,109,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
class Solution:
def countNegatives(self, grid):
m = len(grid)
n = len(grid[0])
current = 0
row = 0
col = n - 1
while row < m and col >= 0:
curr = grid[row][col]
if curr < 0:
current += m - row
col -= 1
else:
row += 1
return current
s = Solution()
print(s.countNegatives([[4,3,2,-1],[3,2,1,-1],[1,1,-1,-2],[-1,-1,-2,-3]]))
|
[
"EashanK16@gmail.com"
] |
EashanK16@gmail.com
|
113b1426d9036aee80c7202882206d1f33646a46
|
fa1e90dedb7f9b84cd210420215ff6a9bf7e6f2d
|
/airmozilla/suggest/forms.py
|
605254a63fff168bd1e667a2ed8a5f5f55e9866b
|
[] |
no_license
|
sara-mansouri/airmozilla
|
f7bdf6aeafa9a7a299fc69c506e186ba47be7ccb
|
8f93162be46044798df1e6d0ce80c8407fc41995
|
refs/heads/master
| 2021-01-16T18:28:35.569244
| 2014-03-28T02:59:31
| 2014-03-28T02:59:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,080
|
py
|
from django import forms
from django.conf import settings
from django.template.defaultfilters import filesizeformat
from django.utils.timesince import timesince
from django.utils.safestring import mark_safe
from django.db.models import Q
from slugify import slugify
import requests
from funfactory.urlresolvers import reverse
from airmozilla.base.forms import BaseModelForm
from airmozilla.main.models import (
SuggestedEvent,
Event,
Tag,
Channel,
SuggestedEventComment
)
from airmozilla.comments.models import SuggestedDiscussion
from airmozilla.uploads.models import Upload
from . import utils
class StartForm(BaseModelForm):
event_type = forms.ChoiceField(
label='',
choices=[
('upcoming', 'Upcoming'),
('pre-recorded', 'Pre-recorded'),
('popcorn', 'Popcorn')
],
widget=forms.widgets.RadioSelect()
)
class Meta:
model = SuggestedEvent
fields = ('title',)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(StartForm, self).__init__(*args, **kwargs)
# self.fields['upcoming'].label = ''
# self.fields['upcoming'].widget = forms.widgets.RadioSelect(
# choices=[(True, 'Upcoming'), (False, 'Pre-recorded')]
# )
def clean_title(self):
value = self.cleaned_data['title']
if Event.objects.filter(title__iexact=value):
raise forms.ValidationError("Event title already used")
if SuggestedEvent.objects.filter(title__iexact=value, user=self.user):
raise forms.ValidationError(
"You already have a suggest event with this title"
)
return value
class TitleForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('title', 'slug')
def clean_slug(self):
value = self.cleaned_data['slug']
if value:
if Event.objects.filter(slug__iexact=value):
raise forms.ValidationError('Already taken')
return value
def clean_title(self):
value = self.cleaned_data['title']
if Event.objects.filter(title__iexact=value):
raise forms.ValidationError("Event title already used")
return value
def clean(self):
cleaned_data = super(TitleForm, self).clean()
if 'slug' in cleaned_data and 'title' in cleaned_data:
if not cleaned_data['slug']:
cleaned_data['slug'] = slugify(cleaned_data['title']).lower()
if Event.objects.filter(slug=cleaned_data['slug']):
raise forms.ValidationError('Slug already taken')
return cleaned_data
class ChooseFileForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('upload',)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(ChooseFileForm, self).__init__(*args, **kwargs)
this_or_nothing = (
Q(suggested_event__isnull=True) |
Q(suggested_event=self.instance)
)
uploads = (
Upload.objects
.filter(user=self.user)
.filter(this_or_nothing)
.order_by('created')
)
self.fields['upload'].widget = forms.widgets.RadioSelect(
choices=[(x.pk, self.describe_upload(x)) for x in uploads]
)
@staticmethod
def describe_upload(upload):
html = (
'%s <br><span class="metadata">(%s) uploaded %s ago</span>' % (
upload.file_name,
filesizeformat(upload.size),
timesince(upload.created)
)
)
return mark_safe(html)
class PopcornForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('popcorn_url',)
def __init__(self, *args, **kwargs):
super(PopcornForm, self).__init__(*args, **kwargs)
self.fields['popcorn_url'].label = 'Popcorn URL'
def clean_popcorn_url(self):
url = self.cleaned_data['popcorn_url']
if '://' not in url:
url = 'http://' + url
response = requests.get(url)
if response.status_code != 200:
raise forms.ValidationError('URL can not be found')
return url
class DescriptionForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('description', 'short_description')
def __init__(self, *args, **kwargs):
super(DescriptionForm, self).__init__(*args, **kwargs)
self.fields['description'].help_text = (
"Write a description of your event that will entice viewers to "
"watch.<br>"
"An interesting description improves the chances of your "
"presentation being picked up by bloggers and other websites."
"<br>"
"Please phrase your description in the present tense. "
)
self.fields['short_description'].help_text = (
"This Short Description is used in public feeds and tweets. "
"<br>If your event is non-public be careful "
"<b>not to "
"disclose sensitive information here</b>."
"<br>If left blank the system will use the first few "
"words of the description above."
)
class DetailsForm(BaseModelForm):
tags = forms.CharField(required=False)
enable_discussion = forms.BooleanField(required=False)
class Meta:
model = SuggestedEvent
fields = (
'location',
'start_time',
'privacy',
'category',
'tags',
'channels',
'additional_links',
'remote_presenters',
)
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields['channels'].required = False
if not self.instance.upcoming:
del self.fields['location']
del self.fields['start_time']
del self.fields['remote_presenters']
else:
self.fields['location'].required = True
self.fields['start_time'].required = True
self.fields['location'].help_text = (
"Choose an Air Mozilla origination point. <br>"
"If the location of your event isn't on the list, "
"choose Live Remote. <br>"
"Note that live remote dates and times are UTC."
)
self.fields['remote_presenters'].help_text = (
"If there will be presenters who present remotely, please "
"enter email addresses, names and locations about these "
"presenters."
)
self.fields['remote_presenters'].widget.attrs['rows'] = 3
if 'instance' in kwargs:
event = kwargs['instance']
if event.pk:
tag_format = lambda objects: ','.join(map(unicode, objects))
tags_formatted = tag_format(event.tags.all())
self.initial['tags'] = tags_formatted
self.fields['tags'].help_text = (
"Enter some keywords to help viewers find the recording of your "
"event. <br>Press return between keywords"
)
self.fields['channels'].help_text = (
"Should your event appear in one or more particular "
"Air Mozilla Channels? <br>If in doubt, select Main."
)
self.fields['additional_links'].help_text = (
"If you have links to slides, the presenter's blog, or other "
"relevant links, list them here and they will appear on "
"the event page."
)
self.fields['additional_links'].widget.attrs['rows'] = 3
def clean_tags(self):
tags = self.cleaned_data['tags']
split_tags = [t.strip() for t in tags.split(',') if t.strip()]
final_tags = []
for tag_name in split_tags:
t, __ = Tag.objects.get_or_create(name=tag_name)
final_tags.append(t)
return final_tags
def clean_channels(self):
channels = self.cleaned_data['channels']
if not channels:
return Channel.objects.filter(slug=settings.DEFAULT_CHANNEL_SLUG)
return channels
class DiscussionForm(BaseModelForm):
emails = forms.CharField(required=False, label="Moderators")
class Meta:
model = SuggestedDiscussion
fields = ('enabled', 'moderate_all')
def __init__(self, *args, **kwargs):
super(DiscussionForm, self).__init__(*args, **kwargs)
event = self.instance.event
self.fields['moderate_all'].help_text = (
'That every comment has to be approved before being shown '
'publically. '
)
self.fields['emails'].widget.attrs.update({
'data-autocomplete-url': reverse('suggest:autocomplete_emails')
})
if event.privacy != Event.PRIVACY_COMPANY:
self.fields['moderate_all'].widget.attrs.update(
{'disabled': 'disabled'}
)
self.fields['moderate_all'].help_text += (
'<br>If the event is not MoCo private you have to have '
'full moderation on '
'all the time.'
)
def clean_emails(self):
value = self.cleaned_data['emails']
emails = list(set([
x.lower().strip() for x in value.split(',') if x.strip()
]))
for email in emails:
if not utils.is_valid_email(email):
raise forms.ValidationError(
'%s is not a valid email address' % (email,)
)
return emails
class PlaceholderForm(BaseModelForm):
class Meta:
model = SuggestedEvent
fields = ('placeholder_img',)
def __init__(self, *args, **kwargs):
super(PlaceholderForm, self).__init__(*args, **kwargs)
self.fields['placeholder_img'].help_text = (
"We need a placeholder image for your event. <br>"
"A recent head-shot of the speaker is preferred. <br>"
"Placeholder images should be 200 x 200 px or larger."
)
#class ParticipantsForm(BaseModelForm):
#
# participants = forms.CharField(required=False)
#
# class Meta:
# model = SuggestedEvent
# fields = ('participants',)
#
# def clean_participants(self):
# participants = self.cleaned_data['participants']
# split_participants = [p.strip() for p in participants.split(',')
# if p.strip()]
# final_participants = []
# for participant_name in split_participants:
# p = Participant.objects.get(name=participant_name)
# final_participants.append(p)
# return final_participants
#
class SuggestedEventCommentForm(BaseModelForm):
class Meta:
model = SuggestedEventComment
fields = ('comment',)
|
[
"mail@peterbe.com"
] |
mail@peterbe.com
|
ee56c5923d4e412ecef2b0e9a6abc6e9db42e260
|
1f60222696b27d1a0f93282c73f72f6870c0c7d6
|
/alpha_transform/AlphaTransformUtility.py
|
6c4cbca2cb07bcccddf7a558df7b93567d90c79c
|
[
"MIT"
] |
permissive
|
dedale-fet/alpha-transform
|
7ff3d5859ccf4924170894a9eb030fa8ac4da099
|
41b4fb0b28b908391f9ddf17cdcde8b956d3d064
|
refs/heads/master
| 2021-01-13T14:28:57.702914
| 2020-08-04T15:40:22
| 2020-08-04T15:40:22
| 72,874,669
| 14
| 7
| null | 2020-07-19T18:47:18
| 2016-11-04T18:26:33
|
Python
|
UTF-8
|
Python
| false
| false
| 11,097
|
py
|
r"""
This module contains several utility functions which can be used e.g.
for thresholding the alpha-shearlet coefficients or for using the
alpha-shearlet transform for denoising.
Finally, it also contains the functions :func:`my_ravel` and :func:`my_unravel`
which can be used to convert the alpha-shearlet coefficients into a
1-dimensional vector and back. This is in particular convenient for the
subsampled transform, where this conversion is not entirely trivial, since the
different "coefficient images" have varying dimensions.
"""
import os.path
import math
import numpy as np
import numexpr as ne
import scipy.ndimage
def find_free_file(file_template):
r"""
This function finds the first nonexistent ("free") file obtained by
"counting upwards" using the passed template/pattern.
**Required Parameter**
:param string file_template:
This should be a string whose ``format()`` method can be called
using only an integer argument, e.g. ``'/home/test_{0:0>2d}.txt'``,
which would result in ``find_free_file`` consecutively checking
the following files for existence:
`/home/test_00.txt,`
`/home/test_01.txt, ...`
**Return value**
:return:
``file_template.format(i)`` for the first value of ``i`` for which
the corresponding file does not yet exist.
"""
i = 0
while os.path.isfile(file_template.format(i)):
i += 1
return file_template.format(i)
def threshold(coeffs, thresh_value, mode):
r"""
Given a set of coefficients, this function performs a thresholding
procedure, i.e., either soft or hard thresholding.
**Required parameters**
:param coeffs:
The coefficients to be thresholded.
Either a three-dimensional :class:`numpy.ndarray` or a generator
producing two dimensional :class:`numpy.ndarray` objects.
:param float thresh_value:
The thresholding cutoff :math:`c` for the coefficients, see also
``mode`` for more details.
:param string mode:
Either ``'hard'`` or ``'soft'``. This parameter determines whether
the hard thresholding operator
.. math::
\Lambda_cx
=\begin{cases}
x, & \text{if }|x|\geq c,\\
0, & \text{if }|x|<c,
\end{cases}
or the soft thresholding operator
.. math::
\Lambda_cx
=\begin{cases}
x\cdot \frac{|x|-c}{|x|}, & \text{if }|x|\geq c,\\
0, & \text{if }|x|<c
\end{cases}
is applied to each entry of the coefficients.
**Return value**
:return:
A generator producing the thresholded coefficients. Each
thresholded "coefficient image", i.e., each thresholded
2-dimensional array, is produced in turn.
"""
if mode == 'hard':
for coeff in coeffs:
ev_string = 'coeff * (real(abs(coeff)) >= thresh_value)'
yield ne.evaluate(ev_string)
# yield coeff * (np.abs(coeff) >= thresh_value)
elif mode == 'soft':
for coeff in coeffs:
ev_string = ('(real(abs(coeff)) - thresh_value) * '
'(real(abs(coeff)) >= thresh_value)')
large_values = ne.evaluate(ev_string)
# large_values = np.maximum(np.abs(coeff) - thresh_value, 0)
ev_str_2 = 'coeff * large_values / (large_values + thresh_value)'
yield ne.evaluate(ev_str_2)
# yield coeff * large_values / (large_values + thresh_value)
else:
raise ValueError("'mode' must be 'hard' or 'soft'")
def scale_gen(trafo):
r"""
**Required parameter**
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
**Return value**
:return:
A generator producing integers. The i-th produced integer
is the *scale* (starting from -1 for the low-pass part) of the i-th
alpha-shearlet associated to ``trafo``.
Hence, if ``coeff = trafo.transform(im)``, then the following iteration
produces the associated scale to each "coefficient image"::
for scale, c in zip(scale_gen(trafo), coeff):
...
"""
indices_gen = iter(trafo.indices)
next(indices_gen)
yield -1
for index in indices_gen:
yield index[0]
def denoise(img, trafo, noise_lvl, multipliers=None):
r"""
Given a noisy image :math:`\tilde f`, this function performs a denoising
procedure based on shearlet thresholding. More precisely:
#. A scale dependent threshold parameter :math:`c=(c_j)_j` is calculated
according to :math:`c_j=m_j\cdot \lambda / \sqrt{N_1\cdot N_2}`, where
:math:`m_j` is a multiplier for the jth scale, :math:`\lambda` is the
noise level present in the image :math:`\tilde f` and
:math:`N_1\times N_2` are its dimensions.
#. The alpha-shearlet transform of :math:`\tilde f` is calculated
using ``trafo``.
#. Hard thesholding with threshold parameter (cutoff) :math:`c` is
performed on alpha-shearlet coefficients, i.e., for each scale ``j``,
each of the coefficients belonging to the jth scale is set to zero if
its absolute value is smaller than :math:`c_j` and otherwise it is
left unchanged.
#. The (pseudo)-inverse of the alpha-shearlet transform is applied to the
thresholded coefficients and this reconstruction is the return value
of the function.
**Required parameters**
:param numpy.ndarray img:
The “image” (2 dimensional array) that should be denoised.
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
This object is used to calculate the (inverse) alpha-shearlet
transform during the denoising procedure.
The dimension of the transform and of ``img`` need to coincide.
:param float noise_lvl:
The (presumed) noise level present in ``img``.
If ``img = img_clean + noise``, then ``noise_lvl`` should be
approximately equal to the :math:`\ell^2` norm of ``noise``.
In particular, if ``im`` is obtained by adding Gaussian noise with
standard deviation :math:`\sigma` (in each entry) to a noise free
image :math:`f`, then the noise level :math:`\lambda` is given by
:math:`\lambda= \sigma\cdot \sqrt{N_1\cdot N_2}`; see also
:func:`AdaptiveAlpha.optimize_denoising`.
**Keyword parameter**
:param list multipliers:
A list of multipliers (floats) for each scale. ``multipliers[j]``
determines the value of :math:`m_j` and thus of the cutoff
:math:`c_j = m_j \cdot \lambda / \sqrt{N_1 \cdot N_2}` for scale ``j``.
In particular, ``len(multipliers)`` needs
to be equal to the number of the scales of ``trafo``.
**Return value**
:return:
The denoised image, i.e., the result of the denoising procedure
described above.
"""
coeff_gen = trafo.transform_generator(img, do_norm=True)
if multipliers is None:
# multipliers = [1] + ([2.5] * (trafo.num_scales - 1)) + [5]
multipliers = [3] * trafo.num_scales + [4]
width = trafo.width
height = trafo.height
thresh_lvls = [multi * noise_lvl / math.sqrt(width * height)
for multi in multipliers]
thresh_coeff = (coeff * (np.abs(coeff) >= thresh_lvls[scale + 1])
for (coeff, scale) in zip(coeff_gen, scale_gen(trafo)))
recon = trafo.inverse_transform(thresh_coeff, real=True, do_norm=True)
return recon
def image_load(path):
r"""
Given a '.npy' or '.png' file, this function loads the file and returns
its content as a two-dimensional :class:`numpy.ndarray` of :class:`float`
values.
For '.png' images, the pixel values are normalized to be between 0 and 1
(instead of between 0 and 255) and color images are converted to
grey-scale.
**Required parameter**
:param string path:
Path to the image to be converted, either of a '.png' or '.npy' file.
**Return value**
:return:
The loaded image as a two-dimensional :class:`numpy.ndarray`.
"""
image_extension = path[path.rfind('.'):]
if image_extension == '.npy':
return np.array(np.load(path), dtype='float64')
elif image_extension == '.png':
return np.array(scipy.ndimage.imread(path, flatten=True) / 255.0,
dtype='float64')
else:
raise ValueError("This function can only load .png or .npy files.")
def _print_listlist(listlist):
for front, back, l in zip(['['] + ([' '] * (len(listlist) - 1)),
([''] * (len(listlist) - 1)) + [']'],
listlist):
print(front + str(l) + back)
def my_ravel(coeff):
r"""
The subsampled alpha-shearlet transform returns a list of differently
sized(!) two-dimensional arrays. Likewise, the fully sampled transform
yields a three dimensional numpy array containing the coefficients.
The present function can be used (in both cases) to convert this list into
a single *one-dimensional* numpy array.
.. note::
In order to invert this conversion to a one-dimensional array,
use the associated function :func:`my_unravel`. Precisely,
:func:`my_unravel` satisfies
``my_unravel(my_trafo, my_ravel(coeff)) == coeff``,
if coeff is obtained from calling ``my_trafo.transform(im)``
for some image ``im``.
The preceding equality holds at least up to (negligible)
differences (the left-hand side is a generator while the
right-hand side could also be a list).
**Required parameter**
:param list coeff:
A list (or a generator) containing/producing two-dimensional
numpy arrays.
**Return value**
:return:
A one-dimensional :class:`numpy.ndarray` from which **coeff** can
be reconstructed.
"""
return np.concatenate([c.ravel() for c in coeff])
def my_unravel(trafo, coeff):
r"""
This method is a companion method to :func:`my_ravel`.
See the documentation of that function for more details.
**Required parameters**
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
:param numpy.ndarray coeff:
A one-dimensional numpy array, obtained via
``my_ravel(coeff_unrav)``, where ``coeff_unrav`` is of the same
dimensions as the output of ``trafo.transform(im)``, where
``im`` is an image.
**Return value**
:return:
A generator producing the same values as ``coeff_unrav``, i.e.,
an "unravelled" version of ``coeff``.
"""
coeff_sizes = [spec.shape for spec in trafo.spectrograms]
split_points = np.cumsum([spec.size for spec in trafo.spectrograms])
return (c.reshape(size)
for size, c in zip(coeff_sizes, np.split(coeff, split_points)))
|
[
"felix.voigtlaender@gmail.com"
] |
felix.voigtlaender@gmail.com
|
18c4b876571211b4d59ba56578c12df35106481c
|
5f08d36d8cf92bff8c778eb4fa04e0df4b5768b1
|
/Week10/CurveFitting.py
|
f1719852ab0bba0e27a75c1874c86a835d44f949
|
[] |
no_license
|
npilgram/PHYS202-S13
|
ae22e5ced93fdedfe757187c8a364a9c3cb359a9
|
8ed9162d820e61aae624f5e646b894e83ce5faca
|
refs/heads/master
| 2021-01-02T23:13:38.342579
| 2013-06-15T02:47:14
| 2013-06-15T02:47:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
import numpy as np
def LinearLeastSquaresFit(x,y):
"""Take in arrays representing (x,y) values for set of linearly varying data and perform
a linear least squares regression. Return the resulting slope and intercept parameters of
the best fit line with their uncertainties."""
x_ave = np.sum(x)/len(x)
y_ave = np.sum(y)/len(y)
xsqr_ave = np.sum((x*x))/len((x*x))
xy_ave = np.sum((x*y))/len((x*y))
m = (xy_ave - (x_ave*y_ave))/(xsqr_ave - (x_ave**2))
b = ((xsqr_ave*y_ave)-(x_ave*xy_ave))/(xsqr_ave - (x_ave**2))
uncer = np.zeros(len(x))
for i in range(len(x)):
uncer[i]=y[i]-((m*x[i])+b)
uncer_sqr_ave = np.sum((uncer*uncer))/len((uncer*uncer))
m_err = np.sqrt((1/(len(x)-2.))*(uncer_sqr_ave/(xsqr_ave -(x_ave**2))))
b_err = np.sqrt((1/(len(x)-2.))*((uncer_sqr_ave*xsqr_ave)/(xsqr_ave -(x_ave**2))))
return (m,b,m_err,b_err)
|
[
"npilgram@calpoly.edu"
] |
npilgram@calpoly.edu
|
bc48ed3a69d6249ded7e941d7d465964d67fa3dc
|
4d946b12fa07bb4375b687e74bbc785d35c5175b
|
/DIO-intro-python.py
|
1e8c1c1014ebc5344d98c14d8f69b35db3e45730
|
[] |
no_license
|
Thalitachargel/100daysofCode
|
c4994bdc48870fc6b702387fe9ec004148ac73b0
|
0333a3f0358d1309368a4f93fec6759e307d7dba
|
refs/heads/main
| 2023-07-05T02:14:00.105793
| 2021-08-18T22:28:21
| 2021-08-18T22:28:21
| 363,527,384
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,648
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Introdução a programação em Python 🐱💻
# ## Digital Inovation One
# ## Modulo 6
# 🐍 Organizando conjuntos e subconjuntos de elementos em Python
#
# In[3]:
# O que é conjunto:
conjunto = {1, 2, 3, 4}
print(type(conjunto))
# In[4]:
# Conjunto não permite duplicidade
conjunto_Duplo = {1, 2, 3, 4, 4, 2}
print(conjunto_Duplo)
# In[5]:
# adincionando elementos ao conjunto
conjunto.add(5)
conjunto
# In[7]:
# removento elemento
conjunto.discard(2)
conjunto
# ### Operações com conjuntos
# In[11]:
# União entre conjuntos
conj1 = {1, 2, 3, 4, 5}
conj2 = {5, 6, 7, 8}
print( f'conjunto 1 ={ conj1} e conjunto 1 = {conj2}')
conj_uniao = conj1.union(conj2)
conj_uniao
# In[12]:
# Intersecção entre conjuntos
conj_interseccao = conj1.intersection(conj2)
conj_interseccao
# In[16]:
# Diferença
conj_diferencaA = conj1.difference(conj2)
conj_diferencaB = conj2.difference(conj1)
print(f"conj1 ≠ conj2 = {conj_diferencaA} e conj2 ≠ conj1 = {conj_diferencaB}")
# In[20]:
# diferença simétrica (o não tem nos dos conjuntos, ou seja,
# todos os elementos que não são compartilhados entre os conjuntos)
conj_dif_simetrico = conj1.symmetric_difference(conj2)
conj_dif_simetrico
# ### Pertinencia
# In[33]:
# Is subset - Se um conjunto é um subconjunto de outro
conjA = {1, 2, 3}
conjB = {1, 2, 3, 4, 5}
conj_subset = conjA.issubset(conjB)
conj_subset2 = conjB.issubset(conjA)
conj_subset2
if conj_subset == True:
print("Conjunto A é subset do Conjunto B")
else:
print("Conjunto B é subset do Conjunto A")
if conj_subset2 == True:
print("Conjunto A é subset do Conjunto B")
else:
print("Conjunto B é subset do Conjunto A")
# In[36]:
# Super conjunto
conj_superset = conjA.issuperset(conjB)
conj_superset1 = conjB.issuperset(conjA)
if conj_superset == True:
print("Conjunto A é superconjunto do Conjunto B")
else:
print("Conjunto B é superconjunto do Conjunto A")
if conj_superset1 == True:
print("Conjunto A é superconjunto do Conjunto B")
else:
print("Conjunto B é superconjunto do Conjunto A")
# In[46]:
# convertendo uma lista em conjunto
lista = ['cachorro', 'gato', 'gato', 'elefante']
conj_animais = set(lista)
print(conj_animais, type(lista), type(conj_animais))
# In[51]:
# converter de volta a lista
#lista_animais = list(conj_animais)
#print(lista_animais, type(lista_animais))
# ## Módulo 7 - Construindo Métodos, Funções e Classes em Python
# In[56]:
# condicional IF, else
a = int(input("Primeiro Valor: "))
b = int(input("Segundo valor: "))
if a > b:
print(f'O primeiro valor, {a}, é maior que o segundo valor, {b}.')
else:
print(f'O segundo valor, {b}, é maior que o primeiro valor, {a}.')
# In[58]:
# E Elif
a = int(input("Primeiro Valor: "))
b = int(input("Segundo valor: "))
c = int(input("Terceiro Valor: "))
if a > b and a > c:
print(f'O maior numero é o primerio, {a}.')
elif b > a and b > c:
print(f'O maior numero é o segundo, {b}.')
else:
print(f'O maior numero é o terceiro, {c}.')
# In[62]:
# Exercício
# saber se o numero digitado é par
n = int(input("Digite um número:"))
if n == 0:
print("Digite um número diferente de zero!")
elif n % 2 == 0:
print(f'O número {n} é par.')
else:
print(f'O número {n} é impar')
# In[63]:
# função é tudo aquilo que retorna valor
# Método é Definição e não retorna valor
def soma(a, b):
return a + b # como tem retorno, vira uma função
print(soma(1, 2))
print(soma(3, 4))
# In[64]:
def subtracao(a, b):
return a - b
print(subtracao(10, 2))
# In[76]:
# Classe
def multiplicacao(a, b):
return a * b
def divisao(a, b):
return (a / b)
print(multiplicacao(10, 2))
print(divisao(50, 5))
# Transformando em classe
class Calculadora:
def __init__(self, num1, num2):
self.a = num1
self.b = num2
def soma(self):
return self.a + self.b
def subtracao(self):
return self.a - self.b
def multiplicacao(self):
return self.a * self.b
def divisao(self):
return self.a / self.b
calculadora = Calculadora(10, 2)
print(calculadora.a, calculadora.b)
print(calculadora.soma())
print(calculadora.subtracao())
print(calculadora.multiplicacao())
print(calculadora.divisao())
# In[79]:
# Calculadora 2
class Calculadora2:
def __init__(self):
pass
def soma(self, a, b):
return a + b
def subtracao(self, a, b):
return a - b
def multiplicacao(self, a, b):
return a * b
def divisao(self, a, b):
return a / b
calculadora2 = Calculadora2()
print(calculadora2.soma(10,2))
print(calculadora2.subtracao(5,3))
print(calculadora2.multiplicacao(100,2))
print(calculadora2.divisao(10,5))
# In[95]:
# criar uma televisão usando Class
class Televisao:
def __init__(self):
self.ligada = False
self.canal = 5
def power(self):
if self.ligada:
self.ligada = False
else:
self.ligada = True
def aumenta_canal(self):
if self.ligada:
self.canal += 1
else:
print("A tv está desligada")
def diminui_canal(self):
if self.ligada:
self.canal -= 1
else:
print("A tv está desligada")
televisao = Televisao()
print(f'A televisão está ligada: {televisao.ligada}')
televisao.power()
print(f'A televisão está ligada: {televisao.ligada}')
televisao.power()
print(f'A televisão está ligada: {televisao.ligada}')
print(f'Canal {televisao.canal}')
televisao.aumenta_canal()
televisao.power()
televisao.aumenta_canal()
televisao.aumenta_canal()
print(f'Canal {televisao.canal}')
televisao.diminui_canal()
print(f'Canal {televisao.canal}')
# ### Módulo 8 - Lidando com módulos, importação de classes, métodos e lambdas
# In[100]:
#modulo - são os arquivos py
#import ClasseTelevisao #O exercicio proposto só funciona no PY;.
# In[108]:
def contador_de_letras(lista_palavras):
contador = []
for x in lista_palavras:
quantidade = len(x)
contador.append(quantidade)
return contador
if __name__ == '__main__':
lista = ['cachorro', 'gato']
print(contador_de_letras(lista))
list1=['cachorro', 'gato', 'elefante']
total_de_letras_lista = contador_de_letras(list1)
print(f'Total letras da lista {list1} é {total_de_letras_lista}')
# In[107]:
list1=['cachorro', 'gato', 'elefante']
total_de_letras_lista = contador_de_letras(list1)
print(total_de_letras_lista)
# In[110]:
#Função anonima
# convertendo o contador em uma função anonima
lista_animais = ['cachorro', 'gato', 'elefante']
#contador_letras = lambda lista # paramentro : [Len (x) for x in lista] #devolução
#passe o for de x pela lista, e retorne o len de x em forma de lista
contador_letras = lambda lista : [len(x) for x in lista]
contador_letras(lista_animais)
# In[115]:
soma2 = lambda a, b: a + b
soma2(2, 3)
# In[127]:
#criando um dicionario de lambdas
calculadora3 ={ 'soma': lambda a, b: a + b,
'subtracao': lambda a, b : a - b,
'multiplicacao': lambda a, b: a * b,
'divisao': lambda a, b : a / b}
type(calculadora3)
# In[128]:
cal3 = calculadora3['soma']
# In[129]:
cal3(2,3)
# ### Modulo 9 - Gere, copie, mova e escreva
# ### Módulo 10 Aprenda a utilizar data e hora
#
# In[183]:
#Importanto a biblioteca
from datetime import date, time, datetime, timedelta
# In[135]:
data_atual = date.today()
data_atual
# In[137]:
#Formatando data atual
data_atual.strftime('%d/%m/%y')
# In[138]:
data_atual.strftime('%d/%m/%Y')
# In[139]:
data_atual.strftime('%d * %m * %y')
# In[140]:
data_atual.strftime('%d ~%m~%y')
# In[143]:
data_atual_str = data_atual.strftime('%A/%B/%Y')
data_atual_str
# In[145]:
type(data_atual) #datetime.date
type (data_atual_str) #string
# In[186]:
# time
def trabalhando_com_date():
data_atual = date.today()
data_atual_str = data_atual.strftime('%A %B %Y')
dia1 = data_atual.day
print(data_atual_str, dia1)
def trabalhando_com_time():
horario = time(hour=15, minute=18, second=30)
print(horario.strftime('%H:%M:%S'))
def trabalhando_com_datetime():
data_atual = datetime.now()
dia = data_atual.strftime('%d %m %y')
hora = data_atual.strftime('%H, %M %S')
completa = data_atual.strftime('%c')
print(data_atual, dia, hora, completa)
print(data_atual.weekday())
tupla = ('Segunda', 'Terça', 'Quarta',
'Quinta', 'Sexta', 'Sábado', 'Domingo')
print(tupla[data_atual.weekday()])
data_criada = datetime(2008, 5, 25, 20, 15, 30, 20)
print(data_criada)
print(data_criada.strftime('%c'))
data_str = '21/03/1985 12:20:22'
data_con = datetime.strptime(data_str, '%d/%m/%Y %H:%M:%S')
print(data_str)
#subtração de data e hora
nova_data = data_con - timedelta(days = 365, hours = 2)
print(nova_data.strftime('%d - %m - %Y'))
if __name__ == '__main__':
trabalhando_com_date()
trabalhando_com_time()
trabalhando_com_datetime()
# ### Módulo 11 Gerenciando e criando excessões
# In[187]:
# forçando um erro
divisao = 10 / 0
# In[189]:
try:
divisao = 10 / 0
except ZeroDivisionError:
print('Não é possivel dividir por zero')
# In[191]:
# forçar erro
lista = [1, 10]
numero = lista[3]
# In[198]:
try:
lista = [1, 2]
numero = lista[3]
except IndexError:
print("Erro ao acessar indice inesistente")
except:
print('Erro desconhecido')
# In[204]:
try:
x = alma
print(alma)
except BaseException as ex:
print(f'Erro desconhecido. Erro tipo: {ex}.')
# In[211]:
#else
arquivo = open('teste.txt', 'w')
try:
texto = arquivo.read()
print('fechar arquivo')
arquivo.close()
except ZeroDivisionError:
print("não é possivel dividr por zero")
except ArithmeticError:
print("Erro de op aritmetica")
except IndexError:
print("Erro ao acessar indice inesistente")
except BaseException as ex:
print(f'Erro desconhecido. Erro tipo: {ex}.')
else:
print('Executa quando não ocorre exceção')
# In[213]:
arquivo = open('teste.txt', 'w')
try:
texto = arquivo.read()
print('fechar arquivo')
except ZeroDivisionError:
print("não é possivel dividr por zero")
except ArithmeticError:
print("Erro de op aritmetica")
except IndexError:
print("Erro ao acessar indice inesistente")
except BaseException as ex:
print(f'Erro desconhecido. Erro tipo: {ex}.')
else:
print('Executa quando não ocorre exceção')
arquivo.close()
# In[223]:
#Exercicio
while True:
try:
nota = int(input("Digite uma nota entre 0 e 10: "))
print(nota)
if x > 10:
break
except ValueError:
print("Valor inválido. Deve-se digitar apenas Numeros.")
# In[222]:
#criando classe de excessão
class Error(Exception):
pass
class InputError(Error):
def __init__(self, message):
self.message = message
# In[ ]:
while True:
try:
nota = int(input("Digite uma nota entre 0 e 10: "))
print(nota)
if nota > 10:
raise InputError('O valor não pode ser maior que 10')
elif nota < 0:
raise InputError('O valor não pode ser negativo')
break
except ValueError:
print("Valor inválido. Deve-se digitar apenas Numeros.")
except InputError as ex:
print(ex)
# ### Modulo 12 e final: Instalando pacotes e request
# In[230]:
pip list
# In[235]:
#bibliotecas ou pacotes
get_ipython().system('pip install requests')
# In[236]:
pip freeze
# In[240]:
import requests
# In[250]:
#testando o requests
response = requests.get('https://viacep.com.br/ws/70165900/json/')
print(response.status_code) # sucesso = 200
print(response.text)
print(response.json()) #em formato de dicionário
print(type(response.text))
# In[252]:
dado_cep = response.json()
print(dado_cep['logradouro'])
print(dado_cep['complemento'])
# In[256]:
def pokemon(nome_pokemon):
response = requests.get(f'https://pokeapi.co/api/v2/pokemon/{nome_pokemon}')
dados_pokemon = response.json()
return dados_pokemon
pokemon('ditto')
# In[257]:
#request de sites comuns
def retorna_response(url):
response = requests.get(url)
return response.txt
# In[262]:
print(retorna_response('https://recruit.navercorp.com/global/recruitMain'))
# In[ ]:
|
[
"noreply@github.com"
] |
Thalitachargel.noreply@github.com
|
ce9628165675f68be35472bfb365504846ce6053
|
9f078f64c86af7425d32c5e498b0af4458543a8b
|
/bin/owl
|
084c37efcdf13305ac24c9f84df33c20cb7572e6
|
[] |
no_license
|
lucassmagal/Owl
|
7252ba86f7b4a1deea1be10d1f1a715476585712
|
40272bdc03415d6073ea62da39399e5a2ace1344
|
refs/heads/master
| 2021-01-01T20:17:36.396245
| 2013-05-12T14:43:09
| 2013-05-12T14:43:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from owl.cli import run
run(sys.argv[1:])
|
[
"magal.lucas@gmail.com"
] |
magal.lucas@gmail.com
|
|
07a5041034955e430ae5f1723511345940c3a5b6
|
1f4e6b4045df3a0313880da83e69f10d44c8bab4
|
/4 Data types 3listpy.py
|
15a8f1cd8c2ab2129144c106420c4f510286c828
|
[] |
no_license
|
AhmedAliGhanem/PythonForNetowrk-Cisco
|
daee76b17cc271b56516d559a8fb4184656a15b0
|
f71b2325db69cb5eb9c6a9fe0c6f04a217468875
|
refs/heads/master
| 2022-01-12T09:26:25.338980
| 2019-06-22T16:18:23
| 2019-06-22T16:18:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
#List
years = [2000, 2001, 2002]
print(years)
Repeatable = [2000, 2001, 2000]
print(Repeatable)
mix = [2000, "yasser", 2002]
print(mix)
x = ["A", "B", "C"]
y = ["D", "E"]
z = x + y
print(z)
z = x * 3
print(z)
z = "A" in y
print(z)
fastethernet_speed=['auto', '10', '100']
print(fastethernet_speed)
print(fastethernet_speed[0])
portList = []
portList.append(21)
portList.append(80)
portList.append(443)
portList.append(25)
print(portList)
portList.sort()
print(portList)
pos = portList.index(80)
print ("[+] There are "+str(pos)+" ports to scan before 80.")
portList.remove(443)
print(portList)
test = 'CCIE CCNP CCNA and CCNT'
print(test.split())
fastethernet_duplex = 'auto half full'
fastethernet_duplex_list = fastethernet_duplex.split()
print(fastethernet_duplex_list)
fastethernet_duplex_list[0] = 'Auto'
fastethernet_duplex_list[1] = 'Half'
fastethernet_duplex_list[2] = 'Full'
print(fastethernet_duplex_list)
print(fastethernet_duplex_list[0])
del fastethernet_duplex_list[0]
print(fastethernet_duplex_list)
print('Auto' in fastethernet_duplex_list)
|
[
"noreply@github.com"
] |
AhmedAliGhanem.noreply@github.com
|
c10ea4aaf707b2472d05f5082eeeb2cded2d7235
|
e69aa8050bced4e625928e3e18e14e892ba860dc
|
/Partial_Permutations.py
|
5886fbe97a1612b6882017146363daf371f01eae
|
[
"MIT"
] |
permissive
|
Esprit-Nobel/ROSALIND
|
e65cf85f1fbe0660cda547926d91be3f109edce9
|
ec964f6d9cc5d97339106c89df865fb105251928
|
refs/heads/master
| 2021-01-12T18:16:19.768656
| 2016-11-02T14:06:50
| 2016-11-02T14:06:50
| 71,357,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 18:18:43 2016
@author: yannick
"""
import sys
import math
with open(sys.argv[1], "r") as fichier_lu:
CONTENU = fichier_lu.readlines()
NUM = CONTENU[0].strip("\n\r\t ").split()
TOT = ( math.factorial( long(NUM[0]) ) / \
math.factorial( long(NUM[0])-long(NUM[1]) ) ) \
% 1000000
print TOT
|
[
"esprit.nobel@orange.fr"
] |
esprit.nobel@orange.fr
|
b29d6c67789222e938357e11fa5b9b8b77863402
|
6bbe91ea2ebc098b7b7ea5179761f7852875b3f6
|
/pugbot.py
|
12aea378d489f64a42a51b0e33ce7a3006854f7e
|
[] |
no_license
|
dpyro/pugbot
|
531c1c83ceac8d8ae937247af2d1221752c35f13
|
1e8f1b30de11f98be9ee53c2128104758f997799
|
refs/heads/master
| 2021-01-20T13:49:58.008215
| 2010-08-27T19:33:28
| 2010-08-27T19:33:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,453
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# vim: enc=utf-8
from __future__ import print_function
from sys import stderr
import logging
import re
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol, task
from pugdata import *
from pugserver import public_ip
def connectSSL(irc_server, irc_port, app):
f = PugBotFactory(app)
reactor.connectSSL(irc_server, irc_port, f, ssl.ClientContextFactory())
def connectTCP(irc_server, irc_port, app):
f = PugBotFactory(app)
reactor.connectTCP(irc_server, irc_port, f)
# needed for @command decorator
_commands = {}
class PugBot(irc.IRCClient):
_re_stripper = re.compile("""[\x0f\x02\x1f\x16\x1d\x11] | # formatting
\x03(?:\d{1,2}(?:,\d{1,2})?)? | # mIRC colors
\x04[0-9a-fA-F]{0,6} # rgb colors
""", re.UNICODE | re.VERBOSE)
@staticmethod
def _strip_all(str):
return PugBot._re_stripper.sub('', str)
@staticmethod
def _has_color(str):
str_strip = PugBot._strip_all(str)
return str != str_strip
MSG_INFO = 0x1
MSG_CONFIRM = 0x2
MSG_ERROR = 0x3
def __init__(self, app):
self.app = app
self.nickname = app.irc_nick
self.password = app.irc_pass
self.color = app.irc_color
self.lineRate = .75
self.versionName = 'PugBot'
self.keep_alive = task.LoopingCall(self._ping)
self.nickmodes = {}
self.users = {} # (nick, PugUser)
self.logger = logging.getLogger("PugApp.PugBot")
def _colorize(self, str, type):
color_dict = {
self.MSG_ERROR : '\x02\x035,01',
self.MSG_INFO : '\x02\x030,01',
self.MSG_CONFIRM : '\x02\x033,01'
}
color_reset = '\x0f'
if self.color:
# only automatically color if no (custom) color formatting is already present
str = color_dict.get(type, '') + str + color_reset if not self._has_color(str) else str + color_reset
else:
str = self._strip_all(str)
return str
# overrides
def msg(self, user, message, type=None):
message_stripped = self._strip_all(message)
log_message = u"{0} (msg) ← {1}".format(user, message_stripped)
self.logger.info(log_message) if user != self.app.irc_server else self.logger.debug(log_message)
if type is not None:
message = self._colorize(message, type)
nick = PugBot._get_nick(user)
irc.IRCClient.msg(self, nick, message)
def notice(self, user, message, type=None):
message_stripped = self._strip_all(message)
self.logger.info(u"{0} (notice) ← {1}".format(user, message_stripped))
if type is not None:
message = self._colorize(message, type)
nick = PugBot._get_nick(user)
irc.IRCClient.notice(self, nick, message)
def describe(self, channel, action):
self.logger.info("{0} (action) ← {1}".format(channel, action))
irc.IRCClient.describe(self, channel, action)
def whois(self, nickname, server=None):
self.logger.debug(u"Requested WHOIS {0}".format(nickname))
irc.IRCClient.whois(self, nickname, server)
# callbacks
def signedOn(self):
self.logger.info(u"Signed onto IRC network {0}:{1}".format(self.app.irc_server, self.app.irc_port))
self._nickserv_login()
self.join(self.app.irc_channel)
self.keep_alive.start(100)
def joined(self, channel):
self.app.print_irc("* joined channel {0}".format(channel))
self.logger.info(u"Joined channel {0}".format(channel))
self._who(channel)
self.whois(self.app.irc_nick)
def left(self, channel):
self.app.print_irc("* left channel {0}".format(channel))
self.logger.info(u"Left channel {0}".format(channel))
self.nickmodes.clear()
self.users.clear()
def kickedFrom(self, channel, kicker, message):
self.logger.warning(u"Kicked from {0} by {1} ({2})".format(channel, kicker, message))
self.nickmodes.clear()
self.users.clear()
task.deferLater(reactor, 5.0, self.join, self.app.irc_channel)
def nickChanged(self, nick):
self.logger.warning(u"Nick changed to: {0}".format(nick))
def privmsg(self, user, channel, msg):
msg = self._strip_all(msg)
self.logger.info(u":{0} (msg) → {1}: {2}".format(user, channel, msg))
cmd = msg.split(' ', 1)[0].lower()
nick = PugBot._get_nick(user)
if cmd in _commands:
cmd_f, cmd_access = _commands[cmd]
if cmd_access is None:
cmd_f(self, user, channel, msg)
elif nick not in self.users:
self.whois(nick)
self.notice(user, "Refreshing access list, please try again shortly.", self.MSG_ERROR)
elif self.users[nick].irc_access >= cmd_access:
cmd_f(self, user, channel, msg)
else:
self.notice(user, "You don't have access to this command!", self.MSG_ERROR)
def noticed(self, user, channel, msg):
self.logger.info(u"{0} (notice) → {1}: {2}".format(user, channel, msg))
def action(self, user, channel, data):
self.logger.info(u"{0} (action) → {1}: {2}".format(user, channel, msg))
def _purge_user(self, user, reason):
self.logger.info(u"{0}: {1}".format(user, reason))
nick = PugBot._get_nick(user)
if nick in self.users:
p_user = self.users[nick]
if p_user in self.app.players:
self.app.remove(p_user)
self.logger.debug(u"Removed user {0} from game ({1})".format(nick, reason))
self._list_players(channel)
del self.users[nick]
def userLeft(self, user, channel):
reason = u"left {0}".format(channel)
if channel.lower() == self.app.irc_channel:
self._purge_user(user, reason)
def userQuit(self, user, quitMessage):
reason = u"quit ({0})".format(quitMessage)
self._purge_user(user, reason)
def userKicked(self, kickee, channel, kicker, message):
reason = u"kicked by {0} in {1} ({2})".format(kicker, channel, message)
if channel.lower() == self.app.irc_channel:
self._purge_user(kickee, reason)
def userRenamed(self, oldname, newname):
if oldname in self.users:
p_user = self.users[oldname]
p_user.irc_name = newname
self.db_session.add(p_user)
self.db_session.commit()
self.users[newname] = p_user
del self.users[oldname]
self.logger.info(u"User renamed: {0} → {1}".format(oldname, newname))
def modeChanged(self, user, channel, set, modes, args):
if channel.lower() == self.app.irc_channel:
self._who(channel)
mode_prefix = '+' if set else '-'
for mode, arg in zip(modes, args):
self.logger.debug(u"{0} → {1} mode change: {2}{3} {4}".format(
user, channel, mode_prefix, mode, arg))
def pong(self, user, secs):
self.logger.debug(u"{0} (pong) ← {1}".format(user, secs))
def irc_RPL_WHOREPLY(self, prefix, args):
me, chan, uname, host, server, nick, modes, name = args
log_msg = u"Recieved WHOREPLY: chan: {0}, uname: {1}, host: {2}, server: {3}, nick: {4}, modes: {5}, name: {6}".format(
chan, uname, host, server, nick, modes, name)
self.logger.debug(log_msg)
if chan.lower() == self.app.irc_channel:
access = PugBot._get_access(modes)
self.nickmodes[nick] = access
self.logger.debug(u"Set {0} to access level {1}".format(nick, access))
def irc_RPL_ENDOFWHO(self, prefix, args):
self.logger.debug(u"Recieved WHO list: {0}".format(args))
def irc_RPL_WHOISUSER(self, prefix, args):
self.logger.debug(u"WHOIS list: {0}".format(args))
def irc_RPL_WHOISACCOUNT(self, prefix, args):
me, nick, account, msg = args
self.logger.debug(u"WHOIS account: nick: {0}, account {1}".format(nick, account))
if nick in self.users:
self.users[nick].irc_account = account
else:
p_user = PugUser(nick, account)
self.users[nick] = p_user
def irc_RPL_ENDOFWHOIS(self, prefix, args):
self.logger.debug(u"Recieved WHOIS: {0}".format(args))
@staticmethod
def _get_nick(user):
return user.split('!', 1)[0]
@staticmethod
def _get_access(modes):
mode_dict = {
'@': PugUser.IRC_OP,
'+': PugUser.IRC_VOICED
}
for key, val in mode_dict.iteritems():
if key in modes:
return val
return PugUser.IRC_USER
def _who(self, channel):
msg = 'WHO {0}'.format(channel.lower())
self.logger.debug(u"Requested {0}".format(msg))
self.sendLine(msg)
def _ping(self):
self.ping(self.app.irc_server)
def _nickserv_login(self):
self.msg('NickServ@services.', 'IDENTIFY {0} {1}'.format(self.nickname, self.password))
def _authserv_login(self):
self.msg('AuthServ@services.', 'AUTH {0} {1}'.format(self.nickname, self.password))
def _list_players(self, channel):
players = self.app.players
if len(players) == 0:
self.msg(channel, "No players are currently signed up.", self.MSG_INFO)
else:
player_list = ', '.join((p.irc_nick for p in self.app.players))
suffix = 's' if len(self.app.players) != 1 else ''
self.msg(channel, "{0} player{1}: {2}".format(len(players), suffix, player_list), self.MSG_INFO)
def _teams(self, channel):
team1, team2 = self.app.teams()
team1 = ', '.join((p.irc_nick for p in team1))
team2 = ', '.join((p.irc_nick for p in team2))
self.msg(channel, "10,01BLU Team: {0}".format(team1))
self.msg(channel, "05,01RED Team: {0}".format(team2))
msg_red = "You have been assigned to RED team. Connect as soon as possible to {0}:{1}".format(
self.app.rcon_server, self.app.rcon_port)
msg_blu = "You have been assigned to BLU team. Connect as soon as possible to {0}:{1}".format(
self.app.rcon_server, self.app.rcon_port)
[self.msg(p.irc_nick, msg_red, MSG_INFO) for p in team1]
[self.msg(p.irc_nick, msg_blu, MSG_INFO) for p in team2]
class command(object):
def __init__(self, name, access=None):
self.name = name
self.access = access
def __call__(self, f):
global _commands
if not isinstance(self.name, str):
for name in self.name:
name = name.lower()
_commands[name] = (f, self.access)
else:
name = self.name.lower()
_commands[name] = (f, self.access)
def exec_cmd(*args):
try:
f(args)
except Exception as e:
print(Fore.RED + e, file=stderr)
self.logger.exception(e)
return exec_cmd
# commands
@command('!startgame', PugUser.IRC_OP)
def cmd_startgame(self, user, channel, msg):
self.app.startgame()
self.msg(channel, "Game started. Type !add to join the game.", self.MSG_INFO)
@command([ '!add', '!a' ], PugUser.IRC_USER)
def cmd_join(self, user, channel, msg):
nick = PugBot._get_nick(user)
p_user = self.users[nick]
if self.app.game is not None:
if p_user not in self.app.players:
self.app.add(p_user)
self.notice(user, "You successfully added to the game.", self.MSG_CONFIRM)
if len(self.app.players) >= 12:
self._teams(channel)
else:
self._list_players(channel)
else:
self.notice(user, "You have already signed up for the game!", self.MSG_ERROR)
else:
self.notice(user, "There is no active game to sign up for!", self.MSG_ERROR)
@command('!join')
def cmd_add(self, user, channel, msg):
self.notice(user, "Please use !add instead.", self.MSG_ERROR)
@command([ '!remove', '!r' ], PugUser.IRC_USER)
def cmd_remove(self, user, channel, msg):
nick = PugBot._get_nick(user)
p_user = self.users[nick]
if p_user in self.app.players:
self.app.remove(p_user)
self.notice(user, "You successfully removed from the game.", self.MSG_CONFIRM)
self._list_players(channel)
else:
self.notice(user, "You are not in the game!", self.MSG_ERROR)
@command(('!players', '!p'))
def cmd_list(self, user, channel, msg):
if self.app.game is None:
self.msg(channel, "There is no game running currently.", self.MSG_INFO)
else:
self._list_players(channel)
@command('!endgame', PugUser.IRC_OP)
def cmd_endgame(self, user, channel, msg):
if self.app.game is not None:
self.app.endgame()
self.msg(channel, "Game ended.", self.MSG_INFO)
else:
self.notice(user, "There is no game to be ended!", self.MSG_ERROR)
@command('!server')
def cmd_server(self, user, channel, msg):
info = self.app.serverinfo()
self.msg(channel, "connect {0}:{1};".format(self.app.rcon_server, info['port']), self.MSG_INFO)
#TODO: Why does it give key errors when using format()?
self.msg(channel, "%(map)s | %(numplayers)s / %(maxplayers)s | stv: %(specport)s" % (info), self.MSG_INFO)
@command('!mumble')
def cmd_mumble(self, user, channel, msg):
self.msg(channel, ("Mumble is the shiniest new voice server/client used by players to communicate with each other.\n"
"It's not laggy as hell like Ventrilo and has a sweet ingame overlay. Unfortunately, Europeans use it.\n"
"Mumble IP: {0} port: {1}").format(self.app.mumble_server, self.app.mumble_port), self.MSG_INFO)
@command('!version')
def cmd_version(self, user, channel, msg):
self.msg(channel, "PugBot: 3alpha", self.MSG_INFO)
@command('!bear')
def cmd_bear(self, user, channel, msg):
self.describe(channel, "goes 4rawr!", self.MSG_INFO)
@command('!magnets')
def cmd_magnets(self, user, channl, msg):
self.msg(channel, "What am I, a scientist?", self.MSG_INFO)
@command('!rtd')
def cmd_rtd(self, user, channel, msg):
nick = PugBot._get_nick(user)
self.msg(channel, "Don't be a noob, {0}.".format(nick), self.MSG_INFO)
@command('!whattimeisit')
def cmd_whattimeisit(self, user, channel, msg):
nick = PugBot._get_nick(user)
self.msg(channel, "Go back to #tf2.pug.na, {0}.".format(nick))
class PugBotFactory(protocol.ReconnectingClientFactory):
protocol = PugBot
def __init__(self, app):
self.app = app
self.logger = logging.getLogger("PugApp.PugBot")
def buildProtocol(self, addr):
self.resetDelay()
p = PugBot(self.app)
p.factory = self
return p
def clientConnectionLost(self, connector, reason):
msg = "connection lost, reconnecting: {0}".format(reason)
self.app.print_irc(msg)
self.logger.error(msg)
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
msg = "connection failed: {0}".format(reason)
self.app.print_irc(msg)
self.logger.error(msg)
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
|
[
"darkpyro@gmail.com"
] |
darkpyro@gmail.com
|
791732e7600f99bd887cc9a3ffddbe99e5bf9c14
|
fe7feafddd2b03814046039411afbaafd1cdcd43
|
/b7/b7.py
|
7b38e13986d44c3e30b1ada5028f64862afafad0
|
[] |
no_license
|
vamsikrishna2421/Scripting-Languages
|
d87e668eb8d0d9002cd32bed301a7795d4477908
|
89fc94ff6d0e77a4e75bbee3a127ac0b3e16cb51
|
refs/heads/master
| 2020-06-25T20:14:41.082900
| 2017-12-26T21:09:37
| 2017-12-26T21:09:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
fname=raw_input("Enter filename: ")
df=pd.read_csv(fname)
print "Headers of the Dataset are :-"
#print df.columns.values
print df.head(0)
print "**Dataset Description**"
print df.info()
print df.describe()
df=df.drop(["Pclass","PassengerId","Parch","Name"],axis=1)
print "\n\nAfter Dropping Unwanted Columns:-\n",df.head(0)
print "\nWith Empty Column Values:-\n",df
df['Cabin']=df["Cabin"].fillna("CC55")
print "\n\nWithout Empty Column Values:-\n",df
print "\n\nNumber of Entries: ",len(df)
print "\nNumber of Columns: ",len(df.columns)
print "\n\nAttributes and Their Datatypes:-\n"
df.info()
print "\nMinimum Age: ",df['Age'].min()
print "\nMaximum Age: ",df['Age'].max()
print "\nMean Age: ",round(df['Age'].mean(),2)
gp=df['Age'].plot.hist()
gp.set_ylabel("Number of People")
gp.set_xlabel("Age")
gp.set_title("Age vs No. of People")
plt.show()
dgp=df.Age.plot()
dgp.set_ylabel("Age")
dgp.set_xlabel("Number of People")
dgp.set_title("Age vs No. of People")
plt.show()
|
[
"noreply@github.com"
] |
vamsikrishna2421.noreply@github.com
|
99aaac368b5a91c9ccdea2dd36758013b517c21f
|
03db4adc692a989a0dbc90d1175bdabaaa5341b3
|
/Lab-9-hhu14.py
|
2af4c479900e134d179c8572e04c55d197cc93ab
|
[] |
no_license
|
MeloHu/CS128
|
e70d18668238533cd0e8b42588e5d7b0e88c8766
|
e2595aea14f150efce25816b1215f35ef319b751
|
refs/heads/master
| 2021-05-05T10:06:35.993342
| 2017-09-19T04:24:44
| 2017-09-19T04:24:44
| 104,021,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
# coding: utf-8
# In[ ]:
## Honglie Hu,hhu14@earlham.edu,Saturday&8:00 at 66.5486,-18.0233,Lab9-M&M
# In[1]:
## Part A
def main():
filename = input("file_name:")
file = open(filename,"r")
text = file.read()
length = text.split()
count=len(length)
print("Count:",count)
TheNumber =[float(i) for i in length]
file.close()
## to get "mode"
mNumber = [TheNumber.count(i) for i in TheNumber]
position = max(mNumber)
positionArray = mNumber.index(position)
Mode = TheNumber[positionArray]
## --------------------------------------
print("Total:",sum(TheNumber))
print("Smallest:",min(TheNumber))
print("Largest:",max(TheNumber))
average=sum(TheNumber)/count
print("Mode:",Mode)
print("Average:",average)
main()
# In[2]:
## Part B
def main():
import math
filename = input("file_name:")
file = open(filename,"r")
text = file.read()
length = text.split()
count = len(length)
print("Count:",count)
TheNumber =[float(i) for i in length]
file.close()
## to get "mode"
mNumber = [TheNumber.count(i) for i in TheNumber]
position = max(mNumber)
positionArray = mNumber.index(position)
Mode = TheNumber[positionArray]
## ---------------------------------
print("Total:",sum(TheNumber))
print("Smallest:",min(TheNumber))
print("Largest:",max(TheNumber))
average=sum(TheNumber)/count
print("Mode:",Mode)
## to get "median"
TheNumber.sort()
medianNumber = len(TheNumber)
Median =(TheNumber[math.floor(medianNumber/2)])
print("Median:",Median)
## ----------------------------------
print("Average:",average)
main()
# In[ ]:
|
[
"noreply@github.com"
] |
MeloHu.noreply@github.com
|
d1c81821046b2be4b5a8b0c06ec25dac7c3a6841
|
cc33d6045e08be160dcae36dc9e9e24d190878d8
|
/visualization/visualizer2d.py
|
84f60383c873dd67276d11a3c1b07c0329de4ffb
|
[
"Apache-2.0"
] |
permissive
|
StanfordVL/visualization
|
66ca20a2d7d435b1fc950dfbed4e205488418e3a
|
06a179c550a608a548f3cad70f06dd0c8610fa66
|
refs/heads/master
| 2021-06-23T01:54:16.414594
| 2017-08-30T01:05:43
| 2017-08-30T01:05:43
| 105,830,187
| 0
| 0
| null | 2017-10-04T23:35:12
| 2017-10-04T23:35:12
| null |
UTF-8
|
Python
| false
| false
| 5,636
|
py
|
"""
Common 2D visualizations using pyplot
Author: Jeff Mahler
"""
import numpy as np
import IPython
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from autolab_core import Box, Contour
from perception import BinaryImage, ColorImage, DepthImage, GrayscaleImage, RgbdImage, GdImage, SegmentationImage
class Visualizer2D:
@staticmethod
def figure(size=(8,8), *args, **kwargs):
""" Creates a figure.
Parameters
----------
size : 2-tuple
size of the view window in inches
args : list
args of mayavi figure
kwargs : list
keyword args of mayavi figure
Returns
-------
pyplot figure
the current figure
"""
return plt.figure(figsize=size, *args, **kwargs)
@staticmethod
def show(*args, **kwargs):
""" Show the current figure """
plt.show(*args, **kwargs)
@staticmethod
def clf(*args, **kwargs):
""" Clear the current figure """
plt.clf(*args, **kwargs)
@staticmethod
def xlim(*args, **kwargs):
""" Set the x limits of the current figure """
plt.xlim(*args, **kwargs)
@staticmethod
def ylim(*args, **kwargs):
""" Set the y limits the current figure """
plt.ylim(*args, **kwargs)
@staticmethod
def savefig(*args, **kwargs):
""" Save the current figure """
plt.savefig(*args, **kwargs)
@staticmethod
def colorbar(*args, **kwargs):
""" Adds a colorbar to the current figure """
plt.colorbar(*args, **kwargs)
@staticmethod
def subplot(*args, **kwargs):
""" Creates a subplot in the current figure """
plt.subplot(*args, **kwargs)
@staticmethod
def title(*args, **kwargs):
""" Creates a title in the current figure """
plt.title(*args, **kwargs)
@staticmethod
def xlabel(*args, **kwargs):
""" Creates an x axis label in the current figure """
plt.xlabel(*args, **kwargs)
@staticmethod
def ylabel(*args, **kwargs):
""" Creates an y axis label in the current figure """
plt.ylabel(*args, **kwargs)
@staticmethod
def legend(*args, **kwargs):
""" Creates a legend for the current figure """
plt.legend(*args, **kwargs)
@staticmethod
def scatter(*args, **kwargs):
""" Scatters points """
plt.scatter(*args, **kwargs)
@staticmethod
def plot(*args, **kwargs):
""" Plots lines """
plt.plot(*args, **kwargs)
@staticmethod
def imshow(image, **kwargs):
""" Displays an image.
Parameters
----------
image : :obj:`perception.Image`
image to display
"""
if isinstance(image, BinaryImage) or isinstance(image, GrayscaleImage):
plt.imshow(image.data, cmap=plt.cm.gray, **kwargs)
elif isinstance(image, ColorImage) or isinstance(image, SegmentationImage):
plt.imshow(image.data, **kwargs)
elif isinstance(image, DepthImage):
plt.imshow(image.data, cmap=plt.cm.gray_r, **kwargs)
elif isinstance(image, RgbdImage):
# default to showing color only, for now...
plt.imshow(image.color.data, **kwargs)
elif isinstance(image, GdImage):
# default to showing gray only, for now...
plt.imshow(image.gray.data, cmap=plt.cm.gray, **kwargs)
plt.axis('off')
@staticmethod
def box(b, line_width=2, color='g', style='-'):
""" Draws a box on the current plot.
Parameters
----------
b : :obj:`autolab_core.Box`
box to draw
line_width : int
width of lines on side of box
color : :obj:`str`
color of box
style : :obj:`str`
style of lines to draw
"""
if not isinstance(b, Box):
raise ValueError('Input must be of type Box')
# get min pixels
min_i = b.min_pt[1]
min_j = b.min_pt[0]
max_i = b.max_pt[1]
max_j = b.max_pt[0]
top_left = np.array([min_i, min_j])
top_right = np.array([max_i, min_j])
bottom_left = np.array([min_i, max_j])
bottom_right = np.array([max_i, max_j])
# create lines
left = np.c_[top_left, bottom_left].T
right = np.c_[top_right, bottom_right].T
top = np.c_[top_left, top_right].T
bottom = np.c_[bottom_left, bottom_right].T
# plot lines
plt.plot(left[:,0], left[:,1], linewidth=line_width, color=color, linestyle=style)
plt.plot(right[:,0], right[:,1], linewidth=line_width, color=color, linestyle=style)
plt.plot(top[:,0], top[:,1], linewidth=line_width, color=color, linestyle=style)
plt.plot(bottom[:,0], bottom[:,1], linewidth=line_width, color=color, linestyle=style)
@staticmethod
def contour(c, subsample=1, size=10, color='g'):
""" Draws a contour on the current plot by scattering points.
Parameters
----------
c : :obj:`autolab_core.Contour`
contour to draw
subsample : int
subsample rate for boundary pixels
size : int
size of scattered points
color : :obj:`str`
color of box
"""
if not isinstance(c, Contour):
raise ValueError('Input must be of type Contour')
for i in range(c.num_pixels)[0::subsample]:
plt.scatter(c.boundary_pixels[i,1], c.boundary_pixels[i,0], s=size, c=color)
|
[
"jmahler@berkeley.edu"
] |
jmahler@berkeley.edu
|
4ceac2bc4f9946d9f2573cc41ecd8880bc8d7375
|
d200aee203cb0d384213747d5fd3934c80c9728a
|
/Python/First Exercise.py
|
5632cb72b27aebec64b0d76abd7f701832a9d8de
|
[] |
no_license
|
Dmiller2599/BFOR206
|
1ab1b43e38423926080f15f2b7d50c44612906f3
|
a8cae7cc8fe60f63175e3abc6ed4b4f7a68ac247
|
refs/heads/main
| 2023-03-20T19:17:18.063724
| 2021-03-21T03:29:53
| 2021-03-21T03:29:53
| 335,765,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,525
|
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a block comment.
I can type whatever I desire here.
This script will demonstarte basic variables,
if statements, and for loops with Python.
"""
# this is a normal comment.
"""
The comment below defines a chunck that spyder can
use to separate parts of code into small blocks.
This makes it easy to run just a small part of your code.
The syntax is
#%%
If you want to name a chunk
#%% chunk name
"""
#%% define vars
# To run a single line press, f9
my_str = "this is a string"
print(my_str)
my_num = 123.456789
my_int = 123
print(my_num, my_int)
# to run entire chunk
# Ctrl + Enter (Cmd + Enter on Mac)
# to run the entire chunk and go to the next chunk
# Shift + Enter
#%% if statements
a = 0
b = 1
print("the value of a is:", a)
if a > b:
# Everything indented is part of the if statement
print("a is greater than b. Wow!")
elif a < b:
print("a is less than b. Weak!")
else:
print("a and b are the same, eh?")
print("Done with if statements.")
#%% for loops
for i in range(10):
print("the number i is", i)
#%% nested statements
for i in range(5, 10):
print("i is ", i)
# indents are important!
for j in range(3):
print("j is ", j)
print("done with nested loops")
#%% lab
"""
Fix this code below to complete the lab
"""
my_list = ['Hello', 'BFOR', 206, None, 'Bye!']
for item in my_list:
if item is None:
print("Found item with value of None!")
else:
print("The item is", item)
|
[
"david.miller2599@gmail.com"
] |
david.miller2599@gmail.com
|
c2a6d24f20bb1c2478b4feea8182623aca53bac4
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_14413.py
|
5e67c83692878ae8becbb59fe8019e05781959d1
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
# changing type of a entry in dictionary throws error
d = {'today': datetime.today()}
d['today'] = d['today'].strftime(<your format>)
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
1e65771ae50c232198a15764de8fa56266b68719
|
646c30e512e7ead3de33974e856f1a0ef242fec8
|
/Educational Codeforces Round 73/C. Perfect Team.py
|
b972a3b41cc05690f80a2878cd9c667420188aad
|
[] |
no_license
|
Hybrid-Skills/Codeforces
|
2194f6d78186b8f6af5389ae6eccdd45c724ee23
|
68f281ba3d14ee039aa89238f3545bf06b90bc74
|
refs/heads/master
| 2020-06-03T02:38:24.502731
| 2019-11-16T16:30:25
| 2019-11-16T16:30:25
| 191,399,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
for _ in range(int(input())):
c, m, x = map(int, input().split())
maxi = (c + m + x)//3
if c >= m:
if m <= maxi:
print(m)
elif m > maxi:
print(maxi)
else:
if c <= maxi:
print(c)
else:
print(maxi)
|
[
"ankurgoyal616@gmail.com"
] |
ankurgoyal616@gmail.com
|
9cff0f2a316ca7bb8e9daefe0127a1a8ef5609ea
|
90177443dddd57dc7a8ad2cfb0758b0abb2a10f2
|
/currency_converter.py
|
d786177fbbec6974fdfcfcaa260732194b7bc02b
|
[] |
no_license
|
Tengtiantian/data-analytics
|
18181d6d17d4077f503f505500f50b1fdb6efe44
|
ac63bde80a4355c2d0911f60fd8f55683ae026bf
|
refs/heads/main
| 2023-07-10T15:53:32.334067
| 2021-08-17T09:19:25
| 2021-08-17T09:19:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
作者:梁斌
功能:汇率兑换
版本:1.0
日期:01/08/2017
"""
# 汇率
USD_VS_RMB = 6.77
# 人民币的输入
rmb_str_value = input('请输入人民币(CNY)金额:')
# 将字符串转换为数字
rmb_value = eval(rmb_str_value)
# 汇率计算
usd_value = rmb_value / USD_VS_RMB
# 输出结果
print('美元(USD)金额是:', usd_value)
|
[
"wangsiyuan_id@126.com"
] |
wangsiyuan_id@126.com
|
0239f50106a25909870229a9b49d17ca773a5c68
|
d125a7467b815ea3027567b0a6976c8ad730beb9
|
/src/itsmservice/itsmservice/conf/product.py
|
82ef0f132d7f11c4ceea13a01500a1df5a454f0f
|
[] |
no_license
|
sunyaxiong/itsmservice
|
06a1cb38b7314695613e2432f2e1d56c86aad815
|
e50fccae9ae536ac520337ec79b1d1c985e49aa4
|
refs/heads/master
| 2022-12-12T11:14:03.838601
| 2018-10-31T06:17:25
| 2018-10-31T06:17:25
| 137,029,391
| 0
| 0
| null | 2022-12-08T00:58:47
| 2018-06-12T06:50:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
# cas conf
SUCC_REDIRECT_URL = "itsm.ecscloud.com"
CAS_SERVER_URL = "http://cas.ecscloud.com/cas/"
CMP_URL = "http://cmp.ecscloud.com"
# CAS_REDIRECT_URL = "http://www.baidu.com"
# databases conf
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'itsm',
'USER': 'root',
'PASSWORD': 'Itsm@vstecs.com',
'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
'charset': 'utf8mb4',
},
},
'cas_db': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cas',
'USER': 'root',
'PASSWORD': 'Db@vstecs.com',
"HOST": "172.31.31.255",
"PORT": "3306",
},
}
# use multi-database in django
DATABASE_ROUTERS = ['itsmservice.database_router.DatabaseAppsRouter']
DATABASE_APPS_MAPPING = {
# example:
# 'app_name':'database_name',
'cas_sync': 'cas_db',
}
# fit2cloud api conf
INTERNET_HOST = "cmp.ecscloud.com"
CLOUD_HOST = "172.16.13.155"
CMDB_HOST = "172.16.13.155"
access_key = "My00ZjRkMzVkZA=="
cloud_secret_key = "228e1f50-3b39-4213-a8d8-17e8bf2aeb1e"
CMDB_CONF = {
"access_key": access_key,
"version": "v1",
"signature_method": "HmacSHA256",
"signature_version": "v1"
}
CLOUD_CONF = {
"access_key": access_key,
"version": "v1",
"signature_method": "HmacSHA256",
"signature_version": "v1",
"user": "sunyaxiong@vstecs.com",
}
secret_key = cloud_secret_key
# cloud_secret_key = '228e1f50-3b39-4213-a8d8-17e8bf2aeb1e'
# mail
EMAIL_HOST = 'smtp.163.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'sunyaxiongnn@163.com'
EMAIL_HOST_PASSWORD = 'Sun880519'
EMAIL_SUBJECT_PREFIX = u'[vstecs.com]'
EMAIL_USE_TLS = True
|
[
"sunyaxiong"
] |
sunyaxiong
|
479f083fa79fc3fdc8c1cf6c85a8c0a00641158c
|
a62b70e3eed1bee2b2214f1f78be131d9485f370
|
/codes/app.py
|
3794515aeaaa27242abe484bc055afe523041a2a
|
[] |
no_license
|
nileshvarshney/restful_database
|
53a12a68f40d142021c30d155d9b67bc3fab99aa
|
2a16b74c2ab99804158d9eeb23fec0ada33292aa
|
refs/heads/master
| 2020-12-03T07:50:35.017918
| 2020-01-03T07:30:52
| 2020-01-03T07:30:52
| 231,248,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from resources.user import RegisterUser
from resources.item import ItemList, Items
app = Flask(__name__)
api = Api(app)
app.secret_key='sanjose'
jwt = JWT(app, authentication_handler=authenticate, identity_handler=identity) # /auth
api.add_resource(Items,'/item/<string:name>')
api.add_resource(ItemList,'/items')
api.add_resource(RegisterUser,'/registerUser')
app.run(port=5000,debug=True)
|
[
"nilvarshney@stubhub.com"
] |
nilvarshney@stubhub.com
|
d9b3198dc97ae3d100c2537f1a374cc313ba3383
|
0e22ce0913d3f0f7a7404a3add796533df10ffd2
|
/code_exam_student_grade.py
|
db18cc99d9d2ae1c6cf6534b6d0b6f0c95625ef4
|
[] |
no_license
|
Bhuvan1696/PythoN
|
714e98717d277c81d8a6d0a83873af0ff6f45df3
|
685eddd9cb7132867519f9dff71ed3a55502cca6
|
refs/heads/master
| 2020-12-09T18:05:57.724508
| 2020-02-22T17:51:06
| 2020-02-22T17:51:06
| 233,379,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
""" Student Mark Statement """
def grade(percentage, eng, sci, mat):
if ( eng >= 25 and sci >= 35 and mat >= 35):
if (percentage > 90):
print ("Grade A")
elif (percentage > 75 and percentage <= 90):
print ("Grafe B")
else:
print ("Average")
else:
print("Fail..!")
def total_marks(eng, theory, practical, mat):
if (eng <= 75 and theory <= 75 and practical <= 25 and mat <= 100):
tot_sci = theory + practical
total = eng + tot_sci + mat
percent = total/3
print ("Over all percentage :", percent)
grade(percent, eng, tot_sci, mat)
else:
print(" Out of Marks..")
def get_marks():
eng = input("Enter English out of 75 :")
eng = int(eng)
sci_thoery = input("Enter Science_Thoery out of 75 :")
sci_thoery = int(sci_thoery)
sci_practical = input("Enter Science_Pracical out of 25 :")
sci_practical = int(sci_practical)
mat = input("Enter Maths out of 100 :")
mat = int(mat)
return eng, sci_thoery, sci_practical, mat
def main():
english, thoery, practical, maths = get_marks()
total_marks(english, thoery, practical, maths)
#Main starts from here
main()
|
[
"noreply@github.com"
] |
Bhuvan1696.noreply@github.com
|
eeb85c0763b4b58838c030ceccd1de9ec42a82e6
|
5cea11c9373d997430b523227ce81b61972ad1e3
|
/tests/test_client_events.py
|
bd3bc8ac4bf3a96cd62673408ee09427626646ff
|
[
"BSD-3-Clause"
] |
permissive
|
tinylambda/grpclib
|
fcc0d4f5723fe36359ceb9655764e9a37c87ebc1
|
948e32a29a4ad82ebbfdbb681f7a797f6233bff3
|
refs/heads/master
| 2023-07-15T16:19:59.776603
| 2021-08-25T19:56:10
| 2021-08-25T19:56:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,234
|
py
|
import pytest
from multidict import MultiDict
from google.rpc.error_details_pb2 import ResourceInfo
from grpclib.const import Status
from grpclib.events import listen, SendRequest, SendMessage, RecvMessage
from grpclib.events import RecvInitialMetadata, RecvTrailingMetadata
from grpclib.testing import ChannelFor
from grpclib._compat import nullcontext
from grpclib.exceptions import GRPCError
from dummy_pb2 import DummyRequest, DummyReply
from dummy_grpc import DummyServiceStub, DummyServiceBase
class DummyService(DummyServiceBase):
def __init__(self, fail=False):
self.fail = fail
async def UnaryUnary(self, stream):
await stream.recv_message()
await stream.send_initial_metadata(metadata={'initial': 'true'})
await stream.send_message(DummyReply(value='pong'))
if self.fail:
await stream.send_trailing_metadata(
status=Status.NOT_FOUND,
status_message="Everything is not OK",
status_details=[ResourceInfo()],
metadata={'trailing': 'true'},
)
else:
await stream.send_trailing_metadata(metadata={'trailing': 'true'})
async def UnaryStream(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def StreamUnary(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def StreamStream(self, stream):
raise GRPCError(Status.UNIMPLEMENTED)
async def _test(event_type, *, fail=False):
service = DummyService(fail)
events = []
async def callback(event_):
events.append(event_)
async with ChannelFor([service]) as channel:
listen(channel, event_type, callback)
stub = DummyServiceStub(channel)
ctx = pytest.raises(GRPCError) if fail else nullcontext()
with ctx:
reply = await stub.UnaryUnary(DummyRequest(value='ping'),
timeout=1,
metadata={'request': 'true'})
assert reply == DummyReply(value='pong')
event, = events
return event
@pytest.mark.asyncio
async def test_send_request():
event = await _test(SendRequest)
assert event.metadata == MultiDict({'request': 'true'})
assert event.method_name == '/dummy.DummyService/UnaryUnary'
assert event.deadline.time_remaining() > 0
assert event.content_type == 'application/grpc'
@pytest.mark.asyncio
async def test_send_message():
event = await _test(SendMessage)
assert event.message == DummyRequest(value='ping')
@pytest.mark.asyncio
async def test_recv_message():
event = await _test(RecvMessage)
assert event.message == DummyReply(value='pong')
@pytest.mark.asyncio
async def test_recv_initial_metadata():
event = await _test(RecvInitialMetadata)
assert event.metadata == MultiDict({'initial': 'true'})
@pytest.mark.asyncio
async def test_recv_trailing_metadata():
event = await _test(RecvTrailingMetadata, fail=True)
assert event.metadata == MultiDict({'trailing': 'true'})
assert event.status is Status.NOT_FOUND
assert event.status_message == "Everything is not OK"
assert isinstance(event.status_details[0], ResourceInfo)
|
[
"vladimir@magamedov.com"
] |
vladimir@magamedov.com
|
31dd5fd0705bfebccf299f10eb6ba594038b885d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/5ejvPTQeiioTTA9xZ_0.py
|
9b5d0b04aa8e5dca2af5037100305f74b9f4c108
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
Create a function that checks if the argument is an integer or a string.
Return `"int"` if it's an integer and `"str"` if it's a string.
### Examples
int_or_string(8) ➞ "int"
int_or_string("Hello") ➞ "str"
int_or_string(9843532) ➞ "int"
### Notes
Input will either be an integer or a string.
"""
def int_or_string(var):
return var.__class__.__name__
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7b288b67b9fa3473f2fb3c72085b6de7ea893109
|
6cecdc007a3aafe0c0d0160053811a1197aca519
|
/apps/receiver/management/commands/generate_submissions.py
|
ae672ba20a318c1fc46d7ecce22a17363b20c062
|
[] |
no_license
|
commtrack/temp-aquatest
|
91d678c927cc4b2dce6f709afe7faf2768b58157
|
3b10d179552b1e9d6a0e4ad5e91a92a05dba19c7
|
refs/heads/master
| 2016-08-04T18:06:47.582196
| 2010-09-29T13:20:13
| 2010-09-29T13:20:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,172
|
py
|
""" This script generates all the necessary data to
synchronize with a remote CommCareHQ server on that server.
This is only really useful if you intend to manually
scp/rsync data to your local server, which requires a
login to the remote server. So this is not the standard
synchronization workflow (but is necessary for low-connectivity
settings)
"""
import bz2
import sys
import urllib2
import httplib
import cStringIO
from urlparse import urlparse
from optparse import make_option
from django.core.management.base import LabelCommand, CommandError
from django_rest_interface import util as rest_util
from receiver.models import Submission
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('-a','--all', action='store_true', dest='all', \
default=False, help='Generate all files'),
make_option('-?','--debug', action='store_true', dest='debug', \
default=False, help='Generate some files'),
make_option('-d','--download', action='store_true', dest='download', \
default=False, help='Download files.'),
)
help = "Generate synchronization files on a CommCareHQ remote server."
args = "<remote_url username password>"
label = 'IP address of the remote server (including port), username, and password'
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Please specify %s.' % self.label)
remote_url = args[0]
username = args[1]
password = args[2]
print "Generating synchronization data from %s" % remote_url
all = options.get('all', False)
debug = options.get('debug', False)
download = options.get('download', False)
generate_submissions(remote_url, username, password, not all, debug, download)
def __del__(self):
pass
def generate_submissions(remote_url, username, password, latest=True, debug=False, download=False, to='submissions.tar'):
""" Generate sync data from remote server
remote_url: url of remote server (ip:port)
username, password: credentials for logging in
"""
status = rest_util.login(remote_url, username, password)
if not status:
print "Sorry. Your credentials were not accepted."
sys.exit()
url = 'http://%s/api/submissions/' % remote_url
if latest:
MD5_buffer = rest_util.get_field_as_bz2(Submission, 'checksum', debug)
response = rest_util.request(url, username, password, MD5_buffer)
print "Generated latest remote submissions"
else:
response = urllib2.urlopen(url)
print "Generated all remote submissions archive"
if download:
fout = open(to, 'w+b')
fout.write(response.read())
fout.close()
print "Submissions downloaded to %s" % to
else:
# Check for status messages
# (i think tar payloads always begin 'BZ'...)
response = response.read(255)
if response[:2] != "BZ":
print response
return response
|
[
"allen.machary@gmail.com"
] |
allen.machary@gmail.com
|
5a5a5583911ddb9db5402f6b3d6030070b115f57
|
1e50f1643376039ca988d909e79f528e01fa1371
|
/leetcode/editor/cn/292.nim-游戏.py
|
174da887a6b080c9b99b41e140bf445662a9f611
|
[] |
no_license
|
mahatmaWM/leetcode
|
482a249e56e2121f4896e34c58d9fa44d6d0034b
|
4f41dad6a38d3cac1c32bc1f157e20aa14eab9be
|
refs/heads/master
| 2022-09-04T17:53:54.832210
| 2022-08-06T07:29:46
| 2022-08-06T07:29:46
| 224,415,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
#
# @lc app=leetcode.cn id=292 lang=python3
#
# [292] Nim 游戏
#
# https://leetcode-cn.com/problems/nim-game/description/
#
# algorithms
# Easy (69.45%)
# Likes: 326
# Dislikes: 0
# Total Accepted: 50K
# Total Submissions: 71.9K
# Testcase Example: '4'
#
# 你和你的朋友,两个人一起玩 Nim 游戏:桌子上有一堆石头,每次你们轮流拿掉 1 - 3 块石头。 拿掉最后一块石头的人就是获胜者。你作为先手。
#
# 你们是聪明人,每一步都是最优解。 编写一个函数,来判断你是否可以在给定石头数量的情况下赢得游戏。
#
# 示例:
#
# 输入: 4
# 输出: false
# 解释: 如果堆中有 4 块石头,那么你永远不会赢得比赛;
# 因为无论你拿走 1 块、2 块 还是 3 块石头,最后一块石头总是会被你的朋友拿走。
#
#
#
# @lc code=start
class Solution:
def canWinNim(self, n: int) -> bool:
return False if n % 4 == 0 else True
# @lc code=end
|
[
"chrismwang@tencent.com"
] |
chrismwang@tencent.com
|
22f4ffa79f304c929e6c0680c0a2228d0e15dd2b
|
dbf2d3f8eb11d04123894e398446b56ca791c9f6
|
/examples/02.py
|
c9847666ba51a1574e379280d847d651e7982b21
|
[] |
no_license
|
podhmo/nendo
|
ed8d9a62ab23f7409a8ce519f28deff7d3642942
|
841ec7a990019596c769a2f581a1190aeb8cbd56
|
refs/heads/master
| 2021-01-22T17:47:58.964323
| 2015-06-28T11:37:38
| 2015-06-28T11:37:38
| 37,828,656
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding:utf-8 -*-
import logging
logger = logging.getLogger(__name__)
"""
-- select explicitly
SELECT open_emp_id, product_cd
FROM account
ORDER BY open_emp_id, product_cd;
"""
from nendo import Query, make_record, render
from nendo.value import List
Account = make_record("account", "account_id product_cd open_date avail_balance open_emp_id")
query = (Query()
.from_(Account)
.order_by(List([Account.open_emp_id, Account.product_cd]).desc())
.select(Account.open_emp_id, Account.product_cd))
print(render(query))
|
[
"podhmo+altair@beproud.jp"
] |
podhmo+altair@beproud.jp
|
ee39967cfee84345c3f981e0d983d21bfa8dc82f
|
dbe86e522bf7c0fa58531e13bed3dd97051e1b79
|
/cognitoLogin.py
|
4ab85e5d4802764ca7ab75cbd00e13fa51ba772e
|
[] |
no_license
|
Asteriw/CMPT473-AWSApp
|
e214281bbae59f9319efe423f55745e0a10dddb1
|
9d30543439913259a5e88fdf5b8913d3cac5acb4
|
refs/heads/master
| 2023-04-25T05:39:35.352531
| 2021-04-09T05:57:31
| 2021-04-09T05:57:31
| 369,430,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
import boto3
import botocore.exceptions
import hmac
import hashlib
import base64
import json
USER_POOL_ID = 'us-east-1_b6HnaK2eM'
CLIENT_ID = '4tnka15q9dfg2si6rd9d44mncc'
CLIENT_SECRET = '1i3e81c7nqnjqkl9dcnd48ebmn629ieivs3umi37ib3lv9907n8r'
def get_secret_hash(username):
msg = username + CLIENT_ID
dig = hmac.new(str(CLIENT_SECRET).encode('utf-8'),
msg = str(msg).encode('utf-8'), digestmod=hashlib.sha256).digest()
d2 = base64.b64encode(dig).decode()
return d2
def initiate_auth(client, username, password):
secret_hash = get_secret_hash(username)
try:
resp = client.admin_initiate_auth(
UserPoolId=USER_POOL_ID,
ClientId=CLIENT_ID,
AuthFlow='ADMIN_NO_SRP_AUTH',
AuthParameters={
'USERNAME': username,
'SECRET_HASH': secret_hash,
'PASSWORD': password,
},
ClientMetadata={
'name': username,
'password': password,
})
except client.exceptions.NotAuthorizedException:
return None, "The username or password is incorrect"
except client.exceptions.UserNotConfirmedException:
return resp, None
except Exception as e:
return None, e.__str__()
return resp, None
def lambda_handler(event, context):
client = boto3.client('cognito-idp')
for field in ["username", "password"]:
if event.get(field) is None:
return {"error": True,
"success": False,
"message": f"{field} is required",
"data": None}
resp, msg = initiate_auth(client, event.get("username"), event.get("password"))
print(resp)
print(msg)
if msg != None:
return {'message': msg,
"error": True, "success": False, "data": None}
if resp.get("AuthenticationResult"):
return {'message': "success",
"error": False,
"success": True,
"data": {
"id_token": resp["AuthenticationResult"]["IdToken"],
"refresh_token": resp["AuthenticationResult"]["RefreshToken"],
"access_token": resp["AuthenticationResult"]["AccessToken"],
"expires_in": resp["AuthenticationResult"]["ExpiresIn"],
"token_type": resp["AuthenticationResult"]["TokenType"]
}}
else: #this code block is relevant only when MFA is enabled
return {"error": True,
"success": False,
"data": None, "message": None}
|
[
"zhiqi_qiao@sfu.ca"
] |
zhiqi_qiao@sfu.ca
|
d4a33c08e35fe6ddedc4fee59d98a62a0b60cb31
|
1493997bb11718d3c18c6632b6dd010535f742f5
|
/particles/particles_point_sprites.py
|
34a1996efb93c10ffe497379fab53cf8acfd7ca9
|
[] |
no_license
|
kovrov/scrap
|
cd0cf2c98a62d5af6e4206a2cab7bb8e4560b168
|
b0f38d95dd4acd89c832188265dece4d91383bbb
|
refs/heads/master
| 2021-01-20T12:21:34.742007
| 2010-01-12T19:53:23
| 2010-01-12T19:53:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,964
|
py
|
from pyglet.gl import gl_info
assert gl_info.have_extension("GL_ARB_point_sprite"), "ARB_point_sprite not available"
from pyglet.gl import *
import random
# see:
# http://www.opengl.org/registry/specs/ARB/point_sprite.txt
# http://www.opengl.org/registry/specs/ARB/point_parameters.txt
g_slowdown = 2.0
# Query for the max point size supported by the hardware
g_maxSize = c_float(0.0)
glGetFloatv(GL_POINT_SIZE_MAX_ARB, pointer(g_maxSize))
# Clamp size to 100.0f or the sprites could get a little too big on some of the
# newer graphic cards. My ATI card at home supports a max point size of 1024.0!
if (g_maxSize.value > 100.0): g_maxSize.value = 100.0
def draw_task(texture_id):
particles = [{
'life': 1.0,
'fade': random.uniform(0.1, 0.004),
#'r': 1.0, 'g': 1.0, 'b': 1.0,
'r': 0.32, 'g': 0.32, 'b': 0.32,
'x': 0.0, 'y': 0.0, 'z': 0.0,
'xi': float(random.randint(-250, 250)),
'yi': float(random.randint(-250, 250)),
'zi': float(random.randint(-250, 250)),
'xg': 0.0, 'yg': -0.8, 'zg': 0.0,
} for i in xrange(1000)]
glDisable(GL_DEPTH_TEST) # TODO: see if this integrates well with rest of render...
glEnable(GL_POINT_SPRITE_ARB) # affects global state
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE)
# This is how will our point sprite's size will be modified by
# distance from the viewer
glPointParameterfvARB(GL_POINT_DISTANCE_ATTENUATION_ARB, (c_float*3)(1.0, 0.0, 0.01))
glPointSize(g_maxSize)
# The alpha of a point is calculated to allow the fading of points instead
# of shrinking them past a defined threshold size. The threshold is defined
# by GL_POINT_FADE_THRESHOLD_SIZE_ARB and is not clamped to the minimum and
# maximum point sizes.
# glPointParameterfARB(GL_POINT_FADE_THRESHOLD_SIZE_ARB, 60.0)
# glPointParameterfARB(GL_POINT_SIZE_MIN_ARB, 1.0)
# glPointParameterfARB(GL_POINT_SIZE_MAX_ARB, g_maxSize)
# Specify point sprite texture coordinate replacement mode for each
# texture unit (see ARB_point_sprite specs)
glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE) # per-texture unit
while True:
yield
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glBindTexture(GL_TEXTURE_2D, texture_id)
glBegin(GL_POINTS)
for p in particles:
# draw
glColor4f(p['r'], p['g'], p['b'], p['life'])
glVertex3f(p['x'], p['y'], p['z'])
# update
p['life'] -= p['fade']
p['x'] += p['xi'] / (g_slowdown * 1000)
p['y'] += p['yi'] / (g_slowdown * 1000)
p['z'] += p['zi'] / (g_slowdown * 1000)
p['xi'] += p['xg']
p['yi'] += p['yg']
p['zi'] += p['zg']
if p['life'] < 0.0:
p['life'] = 1.0
p['fade'] = random.uniform(0.1, 0.004)
p['x'] = 0.0; p['y'] = 0.0; p['z'] = 0.0
p['xi'] = random.uniform(-32.0, 32.0)
p['yi'] = random.uniform(-32.0, 32.0)
p['zi'] = random.uniform(-32.0, 32.0)
glEnd()
glEnable(GL_BLEND)
glDisable(GL_POINT_SPRITE_ARB)
|
[
"kovrov@gmail.com"
] |
kovrov@gmail.com
|
e5ae86739f26139d2a56b19277ea7832e21d41bd
|
f74dd098c3e665d8f605af5ebe7e2874ac31dd2f
|
/aiogithubapi/namespaces/user.py
|
1d1bd8b8cab4928b70f10f1d9836568e6cc2db64
|
[
"MIT"
] |
permissive
|
ludeeus/aiogithubapi
|
ce87382698827939aaa127b378b9a11998f13c06
|
90f3fc98e5096300269763c9a5857481b2dec4d2
|
refs/heads/main
| 2023-08-20T19:30:05.309844
| 2023-08-14T20:24:21
| 2023-08-14T20:24:21
| 198,505,021
| 21
| 20
|
MIT
| 2023-09-11T06:12:10
| 2019-07-23T20:39:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,993
|
py
|
"""
Methods for the authenticated user namespace
https://docs.github.com/en/rest/reference/users#get-the-authenticated-user
"""
from __future__ import annotations
from typing import Any, Dict
from ..const import GitHubRequestKwarg
from ..models.organization import GitHubOrganizationMinimalModel
from ..models.repository import GitHubRepositoryModel
from ..models.response import GitHubResponseModel
from ..models.user import GitHubAuthenticatedUserModel
from .base import BaseNamespace
from .projects import GitHubUserProjectsNamespace
class GitHubUserNamespace(BaseNamespace):
"""Methods for the user namespace"""
def __post_init__(self) -> None:
self._projects = GitHubUserProjectsNamespace(self._client)
@property
def projects(self) -> GitHubUserProjectsNamespace:
"""Property to access the users projects namespace"""
return self._projects
async def get(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[GitHubAuthenticatedUserModel]:
"""
Get the authenticated user
https://docs.github.com/en/rest/reference/users#get-a-user
"""
response = await self._client.async_call_api(
endpoint="/user",
**kwargs,
)
response.data = GitHubAuthenticatedUserModel(response.data)
return response
async def starred(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubRepositoryModel]]:
"""
Get the authenticated user starred repositories
https://docs.github.com/en/rest/reference/users#get-a-user
"""
response = await self._client.async_call_api(
endpoint="/user/starred",
**kwargs,
)
response.data = [GitHubRepositoryModel(data) for data in response.data]
return response
async def repos(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubRepositoryModel]]:
"""
Get the repositories for the authenticated user
https://docs.github.com/en/rest/reference/repos#list-repositories-for-a-user
"""
response = await self._client.async_call_api(
endpoint="/user/repos",
**kwargs,
)
response.data = [GitHubRepositoryModel(data) for data in response.data]
return response
async def orgs(
self,
**kwargs: Dict[GitHubRequestKwarg, Any],
) -> GitHubResponseModel[list[GitHubOrganizationMinimalModel]]:
"""
List public organization memberships for the specified user.
https://docs.github.com/en/rest/reference/orgs#list-organizations-for-the-authenticated-user
"""
response = await self._client.async_call_api(endpoint="/user/orgs", **kwargs)
response.data = [GitHubOrganizationMinimalModel(data) for data in response.data or []]
return response
|
[
"noreply@github.com"
] |
ludeeus.noreply@github.com
|
adf625842636ccc75d545aa5f1e107a48d4ec5cb
|
f8d753a822047a68e417ba58d17f754789e2af93
|
/migrations/versions/ad4acce05428_.py
|
182a439446f9e08a2fd3cbe43752625d82ba13eb
|
[] |
no_license
|
daronjp/travel_blog
|
113eba826ccabcc18c51fc169e3b2ae359365b77
|
2a016ec840ebb468112a79c52605404d2ac1aa72
|
refs/heads/master
| 2023-05-10T19:39:34.713528
| 2022-09-13T03:47:57
| 2022-09-13T03:47:57
| 211,604,162
| 0
| 0
| null | 2023-05-01T22:49:07
| 2019-09-29T04:39:32
|
Python
|
UTF-8
|
Python
| false
| false
| 798
|
py
|
"""empty message
Revision ID: ad4acce05428
Revises: 07c5566941d1
Create Date: 2019-09-30 14:09:13.551393
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad4acce05428'
down_revision = '07c5566941d1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_name', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
# ### end Alembic commands ###
|
[
"daronjp@gmail.com"
] |
daronjp@gmail.com
|
d556f5c5b3363e7fc2bbc713413256455f6f53d3
|
7b6e3c5e6b963c749da9f946275661ae0e67dbd2
|
/src/model/test/yolo_v2_test.py
|
f12f453a1da9ecff535acc2209d498da9c687322
|
[] |
no_license
|
WeiZongqi/yolo-tensorflow
|
c8237295b41beb61943207d8511c80a0f33507f2
|
53eaa2ad779918ced2ded2834e09abf2e0ed7202
|
refs/heads/master
| 2021-01-25T14:26:58.371334
| 2017-12-28T08:18:59
| 2017-12-28T08:18:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,688
|
py
|
# -*- coding: utf8 -*-
# author: ronniecao
from __future__ import print_function
import sys
import os
import time
import numpy
import matplotlib.pyplot as plt
import tensorflow as tf
from src.data.image import ImageProcessor
from src.model.yolo_v2 import TinyYolo
class TinyYoloTestor:
def test_calculate_loss(self):
self.batch_size = 1
self.cell_size = 2
self.n_boxes = 2
self.max_objects = 3
self.n_classes = 5
coord_pred = numpy.zeros((1, 2, 2, 2, 4))
coord_pred[0,0,0,0,:] = [0.4, 0.4, 0.1, 0.1]
coord_pred[0,0,0,1,:] = [0.1, 0.1, 0.1, 0.1]
coord_pred[0,0,1,0,:] = [0.75, 0.25, 0.1, 0.1]
coord_pred[0,0,1,1,:] = [0.7, 0.2, 0.1, 0.1]
coord_pred[0,1,0,0,:] = [0.3, 0.8, 0.1, 0.1]
coord_pred[0,1,0,1,:] = [0.25, 0.75, 0.1, 0.1]
coord_pred[0,1,1,0,:] = [0.75, 0.75, 0.1, 0.1]
coord_pred[0,1,1,1,:] = [0.7, 0.8, 0.1, 0.1]
conf_pred = numpy.zeros((1, 2, 2, 2, 1))
conf_pred[0,0,0,0,0] = 1.0
conf_pred[0,0,0,1,0] = 1.0
conf_pred[0,0,1,0,0] = 1.0
conf_pred[0,0,1,1,0] = 0.2
conf_pred[0,1,0,0,0] = 0.1
conf_pred[0,1,0,1,0] = 0.9
conf_pred[0,1,1,0,0] = 1.0
class_pred = numpy.zeros((1, 2, 2, 2, 5))
class_pred[0,0,0,0,0] = 0.9
class_pred[0,0,0,0,1] = 0.1
class_pred[0,0,0,1,1] = 1.0
class_pred[0,0,1,0,4] = 0.8
class_pred[0,0,1,0,3] = 0.1
class_pred[0,0,1,0,2] = 0.1
class_pred[0,1,0,1,2] = 1.0
class_pred[0,1,1,0,3] = 0.8
class_pred[0,1,1,0,0] = 0.05
class_pred[0,1,1,0,1] = 0.05
class_pred[0,1,1,0,2] = 0.05
class_pred[0,1,1,0,4] = 0.05
coord_true = numpy.zeros((1, 2, 2, 3, 4))
coord_true[0,0,0,0,:] = [0.1, 0.1, 0.1, 0.1]
coord_true[0,0,0,1,:] = [0.4, 0.4, 0.1, 0.1]
coord_true[0,0,1,0,:] = [0.75, 0.25, 0.1, 0.1]
coord_true[0,1,0,0,:] = [0.25, 0.75, 0.1, 0.1]
coord_true[0,1,1,0,:] = [0.75, 0.75, 0.1, 0.1]
class_true = numpy.zeros((1, 2, 2, 3, 5))
class_true[0,0,0,0,1] = 1.0
class_true[0,0,0,1,0] = 1.0
class_true[0,0,1,0,4] = 1.0
class_true[0,1,0,0,2] = 1.0
class_true[0,1,1,0,3] = 1.0
object_mask = numpy.zeros((1, 2, 2, 3))
object_mask[0,0,0,0] = 1
object_mask[0,0,0,1] = 1
object_mask[0,0,1,0] = 1
object_mask[0,1,0,0] = 1
object_mask[0,1,1,0] = 1
coord_true_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 3, 4], name='coord_true_tf')
coord_pred_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 2, 4], name='coord_pred_tf')
conf_pred_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 2, 1], name='conf_pred_tf')
class_true_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 3, 5], name='class_true_tf')
class_pred_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 2, 5], name='class_pred_tf')
object_mask_tf = tf.placeholder(
dtype=tf.float32, shape=[1, 2, 2, 3], name='object_mask_tf')
coord_pred_iter = tf.tile(
tf.reshape(coord_pred_tf, shape=[
self.batch_size, self.cell_size, self.cell_size, self.n_boxes, 1, 4]),
[1, 1, 1, 1, self.max_objects, 1])
coord_true_iter = tf.reshape(coord_true_tf, shape=[
self.batch_size, self.cell_size, self.cell_size, 1, self.max_objects, 4])
coord_true_iter = tf.tile(coord_true_iter, [1, 1, 1, self.n_boxes, 1, 1])
iou_tensor = self.calculate_iou_tf(coord_pred_iter, coord_true_iter)
iou_tensor_max = tf.reduce_max(iou_tensor, 3, keep_dims=True)
iou_tensor_mask = tf.cast(
(iou_tensor >= iou_tensor_max), dtype=tf.float32) * tf.reshape(
object_mask_tf, shape=(
self.batch_size, self.cell_size, self.cell_size, 1, self.max_objects, 1))
iou_tensor_pred_mask = tf.reduce_sum(iou_tensor_mask, axis=4)
coord_label = tf.reduce_max(iou_tensor_mask * coord_true_iter, axis=4)
coord_loss = tf.nn.l2_loss((coord_pred_tf - coord_label) * iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
iou_value = tf.reduce_sum(
tf.reduce_max(iou_tensor, axis=4) * iou_tensor_pred_mask, axis=[0,1,2,3]) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
conf_label = tf.reduce_max(iou_tensor_mask * tf.ones(shape=(
self.batch_size, self.cell_size, self.cell_size,
self.n_boxes, self.max_objects, 1)), axis=4)
object_loss = tf.nn.l2_loss(
(conf_pred_tf - conf_label) * iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
object_value = tf.reduce_sum(
conf_pred_tf * iou_tensor_pred_mask, axis=[0,1,2,3]) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
inv_iou_tensor_pred_mask = tf.ones(shape=(
self.batch_size, self.cell_size, self.cell_size,
self.n_boxes, 1)) - iou_tensor_pred_mask
noobject_loss = tf.nn.l2_loss(
(conf_pred_tf - conf_label) * inv_iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
noobject_value = tf.reduce_sum(
conf_pred_tf * inv_iou_tensor_pred_mask, axis=[0,1,2,3]) / (
tf.reduce_sum(inv_iou_tensor_pred_mask, axis=[0,1,2,3]))
class_true_iter = tf.reshape(class_true_tf, shape=[
self.batch_size, self.cell_size, self.cell_size, 1, self.max_objects, self.n_classes])
class_true_iter = tf.tile(class_true_iter, [1, 1, 1, self.n_boxes, 1, 1])
class_label = tf.reduce_max(iou_tensor_mask * class_true_iter, axis=4)
class_loss = tf.nn.l2_loss(
(class_pred_tf - class_label) * iou_tensor_pred_mask) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
class_value = tf.reduce_sum(
class_pred_tf * class_label * iou_tensor_pred_mask, axis=[0,1,2,3,4]) / (
tf.reduce_sum(object_mask_tf, axis=[0,1,2,3]))
sess = tf.Session()
[output] = sess.run(
fetches=[class_value],
feed_dict={coord_true_tf: coord_true, coord_pred_tf: coord_pred,
conf_pred_tf: conf_pred,
class_true_tf: class_true, class_pred_tf: class_pred,
object_mask_tf: object_mask})
print(output)
def calculate_iou_tf(self, box_pred, box_true):
box1 = tf.stack([
box_pred[:,:,:,:,:,0] - box_pred[:,:,:,:,:,2] / 2.0,
box_pred[:,:,:,:,:,1] - box_pred[:,:,:,:,:,3] / 2.0,
box_pred[:,:,:,:,:,0] + box_pred[:,:,:,:,:,2] / 2.0,
box_pred[:,:,:,:,:,1] + box_pred[:,:,:,:,:,3] / 2.0])
box1 = tf.transpose(box1, perm=[1, 2, 3, 4, 5, 0])
box2 = tf.stack([
box_true[:,:,:,:,:,0] - box_true[:,:,:,:,:,2] / 2.0,
box_true[:,:,:,:,:,1] - box_true[:,:,:,:,:,3] / 2.0,
box_true[:,:,:,:,:,0] + box_true[:,:,:,:,:,2] / 2.0,
box_true[:,:,:,:,:,1] + box_true[:,:,:,:,:,3] / 2.0])
box2 = tf.transpose(box2, perm=[1, 2, 3, 4, 5, 0])
left_top = tf.maximum(box1[:,:,:,:,:,0:2], box2[:,:,:,:,:,0:2])
right_bottom = tf.minimum(box1[:,:,:,:,:,2:4], box2[:,:,:,:,:,2:4])
intersection = right_bottom - left_top
inter_area = intersection[:,:,:,:,:,0] * intersection[:,:,:,:,:,1]
mask = tf.cast(intersection[:,:,:,:,:,0] > 0, tf.float32) * \
tf.cast(intersection[:,:,:,:,:,1] > 0, tf.float32)
inter_area = inter_area * mask
box1_area = (box1[:,:,:,:,:,2]-box1[:,:,:,:,:,0]) * (box1[:,:,:,:,:,3]-box1[:,:,:,:,:,1])
box2_area = (box2[:,:,:,:,:,2]-box2[:,:,:,:,:,0]) * (box2[:,:,:,:,:,3]-box2[:,:,:,:,:,1])
iou = inter_area / (box1_area + box2_area - inter_area + 1e-6)
return tf.reshape(iou, shape=[
self.batch_size, self.cell_size, self.cell_size, self.n_boxes, self.max_objects, 1])
def test_get_box_pred(self):
label = [[0, 0, 0, 0, 0]] * 5
label[0] = [0.5, 0.15, 0.8, 0.2, 1]
label[1] = [0.5, 0.7, 0.1, 0.2, 1]
label[2] = [0.5, 0.9, 0.6, 0.1, 1]
pred = numpy.zeros(shape=(3,3,6,5))
pred[0,1,4,:] = [-1.6, -1.73, 0.09, -0.09, 1.0]
# pred[1,0,4,:] = [0.0, 0.0, 0.0, 0.0, 1.0]
image_processor = ImageProcessor(
'Z:', image_size=96, max_objects_per_image=5, cell_size=3, n_classes=1)
class_label, class_mask, box_label, object_num = \
image_processor.process_label(label)
tiny_yolo = TinyYolo(
n_channel=3, n_classes=1, image_size=96, max_objects_per_image=5,
box_per_cell=6, object_scala=10, nobject_scala=5,
coord_scala=10, class_scala=1, batch_size=1)
box_pred = tf.placeholder(
dtype=tf.float32, shape=[3, 3, 6, 4], name='box_pred')
box_truth = tf.placeholder(
dtype=tf.float32, shape=[3, 3, 1, 4], name='box_truth')
iou_matrix = tiny_yolo.get_box_pred(box_pred)
sess = tf.Session()
[output] = sess.run(
fetches=[iou_matrix],
feed_dict={box_pred: pred[:,:,:,0:4]})
sess.close()
print(output, output.shape)
# 画图
image = numpy.zeros(shape=(256, 256, 3), dtype='uint8') + 255
cv2.line(image, (0, int(256/3.0)), (256, int(256/3.0)), (100, 149, 237), 1)
cv2.line(image, (0, int(256*2.0/3.0)), (256, int(256*2.0/3.0)), (100, 149, 237), 1)
cv2.line(image, (int(256/3.0), 0), (int(256/3.0), 256), (100, 149, 237), 1)
cv2.line(image, (int(256*2.0/3.0), 0), (int(256*2.0/3.0), 256), (100, 149, 237), 1)
for center_x, center_y, w, h, prob in label:
if prob != 1.0:
continue
# 画中心点
cv2.circle(image, (int(center_x*256), int(center_y*256)), 2, (255, 99, 71), 0)
# 画真实框
xmin = int((center_x - w / 2.0) * 256)
xmax = int((center_x + w / 2.0) * 256)
ymin = int((center_y - h / 2.0) * 256)
ymax = int((center_y + h / 2.0) * 256)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 99, 71), 0)
for x in range(3):
for y in range(3):
for n in range(2):
[center_x, center_y, w, h, prob] = pred[x, y, n, :]
# 画中心点
cv2.circle(image, (int(center_x*256), int(center_y*256)), 2, (238, 130, 238), 0)
# 画预测框
xmin = int((center_x - w / 2.0) * 256)
xmax = int((center_x + w / 2.0) * 256)
ymin = int((center_y - h / 2.0) * 256)
ymax = int((center_y + h / 2.0) * 256)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (238, 130, 238), 0)
plt.imshow(image)
plt.show()
|
[
"caocao7066@outlook.com"
] |
caocao7066@outlook.com
|
25758d4020776fdb429b99cd383fb2251ca42ea7
|
cbbc0c95e367932e962f8d9e6175a5150d0c6570
|
/coursera/algorithmic_toolbox/Greedy Algorithms/Maximum Salary/maximum_salary.py
|
2ebcd8633f3620ee15e42cef0dfecc8f14bbe780
|
[] |
no_license
|
chobostar/education_and_training
|
1369ab98f28b93651bb861a40c1fa0603973519e
|
fcec324a1b92916401ba8de5c61f6d7b1ee69c68
|
refs/heads/master
| 2023-08-04T04:36:16.217908
| 2023-07-22T08:09:54
| 2023-07-22T08:09:54
| 216,988,123
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
# python3
from itertools import permutations
import random
def if_greater(x, y: str) -> bool:
return (x + y) > (y + x)
def sort(items):
less = []
equal = []
greater = []
if len(items) > 1:
pivot = random.choice(items)
for x in items:
if not if_greater(x, pivot) and x != pivot:
less.append(x)
elif x == pivot:
equal.append(x)
elif if_greater(x, pivot):
greater.append(x)
return sort(greater)+equal+sort(less)
else:
return items
def largest_number_naive(numbers):
numbers = list(map(str, numbers))
largest = 0
for permutation in permutations(numbers):
largest = max(largest, int("".join(permutation)))
return largest
def largest_number(numbers):
sorted_numbers = sort([str(number) for number in numbers])
result = ''.join(sorted_numbers)
return int(result)
if __name__ == '__main__':
n = int(input())
input_numbers = input().split()
assert len(input_numbers) == n
print(largest_number(input_numbers))
|
[
"yakutskkirill@mail.ru"
] |
yakutskkirill@mail.ru
|
d4eb0b65a8e727748c9d78004d51c636bf799cf0
|
6aea393423a0f840c5d28e903726c1fc82dd0544
|
/System_class.py
|
4b9ee2d8eebe3fa193ecc2a20d2e3af9fc762a77
|
[] |
no_license
|
markrsteiner/markosim_reloaded
|
7ea4e9ed6d3403a2e560e055f89ab359c69519be
|
96ce8d534c9e59feb79ed1e80a52ef55e88a7749
|
refs/heads/master
| 2020-04-10T18:09:24.717592
| 2019-02-28T16:28:55
| 2019-02-28T16:28:55
| 161,195,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,260
|
py
|
import math
import numpy as np
import format__output
class System:
def __init__(self, user_input_file: dict, simulation_instance) -> None: # should take simulator instance or something
self.input_bus_voltage = float(user_input_file['Vcc [V]'])
self.input_ic_peak = float(user_input_file['Io [Apk]'])
self.input_mod_depth = float(user_input_file['Mod. Depth'])
self.input_output_freq = float(user_input_file['fo [Hz]'])
self.input_t_sink = float(user_input_file['Ts [\u00B0C]'])
self.input_modulation_type = simulation_instance.get__modulation_type()
self.input_freq_carrier = float(user_input_file['fc [kHz]'])
self.is_three_level = simulation_instance.get__three_level_flag()
if self.is_three_level:
self.input_bus_voltage /= 2
self.input_rg_on_inside = float(user_input_file['Inside rg on [\u03A9]'])
self.input_rg_off_inside = float(user_input_file['Inside rg off [\u03A9]'])
self.input_rg_on_outside = float(user_input_file['Outside rg on [\u03A9]'])
self.input_rg_off_outside = float(user_input_file['Outside rg off [\u03A9]'])
else:
self.input_rg_on = float(user_input_file['rg on [\u03A9]'])
self.input_rg_off = float(user_input_file['rg off [\u03A9]'])
self.rg_output_flag = True
self.input_power_factor = float(user_input_file['PF [cos(\u03D5)]'])
self.step_size = simulation_instance.get__step_size()
self.time_division = 1 / self.input_output_freq / 360.0 * self.step_size
self.switches_per_degree = self.input_freq_carrier * self.time_division
self.power_factor_phase_shift = math.acos(float(user_input_file['PF [cos(\u03D5)]']))
self.output_current = []
self.system_output_view = {}
self.cycle_angle__degree = None
self.system_output_voltage = np.arange(0)
self.duty_cycle__p = []
self.duty_cycle__n = []
self.calculate__system_output()
def calculate__system_output(self):
self.cycle_angle__degree = np.array([val * math.pi / 180 * self.step_size for val in range(int(360 / self.step_size))]) #todo there is probably a smarter way to do this with numpy arange
if self.input_modulation_type == "Sinusoidal":
self.calculate__sinusoidal_output()
if self.input_modulation_type == "SVPWM": # add later maybe
self.calculate__svpwm_output()
if self.input_modulation_type == 'Two Phase I':
self.calculate__two_phase1_output()
if self.is_three_level:
self.duty_cycle__p = np.clip(self.system_output_voltage / self.input_bus_voltage, 0, 1)
self.duty_cycle__n = np.clip(-self.system_output_voltage / self.input_bus_voltage, 0, 1)
else:
self.duty_cycle__p = np.clip(np.divide(self.system_output_voltage, self.input_bus_voltage), 0, 1)
self.duty_cycle__n = 1 - self.duty_cycle__p
def create__output_view(self, inside_module, outside_module=None, diode_module=None):
is_three_level = outside_module is not None and diode_module is not None
if is_three_level:
self.system_output_view = format__output.build__output_view_dict(self, inside_module, outside_module, diode_module)
self.system_output_view.update({'Modulation': self.input_modulation_type})
if not self.rg_output_flag:
self.system_output_view.update({
'Outside rg on [\u03A9]': "STOCK",
'Outside rg off [\u03A9]': "STOCK",
'Inside rg on [\u03A9]': "STOCK",
'Inside rg off [\u03A9]': "STOCK"
})
else:
self.system_output_view = format__output.build__output_view_dict(self, inside_module)
self.system_output_view.update({'Modulation': self.input_modulation_type})
if self.rg_output_flag:
self.system_output_view.update({'rg on [\u03A9]': self.input_rg_on, 'rg off [\u03A9]': self.input_rg_off})
else:
self.system_output_view.update({'rg on [\u03A9]': "STOCK", 'rg off [\u03A9]': "STOCK"})
def calculate__sinusoidal_output(self):
if self.is_three_level:
self.system_output_voltage = self.input_bus_voltage * self.input_mod_depth * np.sin(self.cycle_angle__degree)
else:
self.system_output_voltage = self.input_bus_voltage * (1 + self.input_mod_depth * np.sin(self.cycle_angle__degree)) / 2
self.output_current = self.input_ic_peak * np.sin(self.cycle_angle__degree - self.power_factor_phase_shift)
def calculate__svpwm_output(self):
sector = np.floor(self.cycle_angle__degree * 3 / math.pi)
duty_cycle = np.array([self.svpwm_helper(_sector, _degree) for _sector, _degree in zip(sector, self.cycle_angle__degree)])
self.system_output_voltage = self.input_bus_voltage * duty_cycle
self.output_current = self.input_ic_peak * np.cos(self.cycle_angle__degree - self.power_factor_phase_shift)
def svpwm_helper(self, sector, degree):
modified_input_mod_depth = self.input_mod_depth * math.sqrt(3) / 2
duty_cycle_results = {
0: modified_input_mod_depth * math.cos(degree - math.pi / 6) + (1.0 - modified_input_mod_depth * math.cos(degree - math.pi / 6)) / 2.0,
1: modified_input_mod_depth * math.sin(2 * math.pi / 3 - degree) + (1.0 - modified_input_mod_depth * math.cos(degree - math.pi / 2)) / 2.0,
2: (1.0 - modified_input_mod_depth * math.cos(degree - 5 * math.pi / 6)) / 2.0,
3: (1.0 - modified_input_mod_depth * math.cos(degree - 7 * math.pi / 6)) / 2.0,
4: modified_input_mod_depth * math.sin(degree - 4 * math.pi / 3) + (1.0 - modified_input_mod_depth * math.cos(degree - 3 * math.pi / 2)) / 2.0,
5: modified_input_mod_depth * math.cos(degree - 11 * math.pi / 6) + (1.0 - modified_input_mod_depth * math.cos(degree - 11 * math.pi / 6)) / 2.0
}
return duty_cycle_results[sector]
def calculate__two_phase1_output(self):
sector = np.floor(self.cycle_angle__degree * 3 / math.pi)
duty_cycle = np.array([self.two_phase1_helper(_sector, _degree) for _sector, _degree in zip(sector, self.cycle_angle__degree)])
self.system_output_voltage = self.input_bus_voltage * duty_cycle
self.output_current = self.input_ic_peak * np.cos(self.cycle_angle__degree - self.power_factor_phase_shift)
def two_phase1_helper(self, sector, degree):
modified_input_mod_depth = self.input_mod_depth * math.sqrt(3) / 2
duty_cycle_results = {
0: modified_input_mod_depth * math.sin(degree + math.pi / 6),
1: 1.0,
2: -modified_input_mod_depth * math.sin(degree - 7 * math.pi / 6),
3: 1.0 + modified_input_mod_depth * math.sin(degree + math.pi / 6),
4: 0.0,
5: 1.0 - modified_input_mod_depth * math.sin(degree - 7 * math.pi / 6)
}
return duty_cycle_results[sector]
def calculate__two_phase2_output(self):
sector = np.floor(self.cycle_angle__degree * 1.5 * math.pi)
duty_cycle = np.array([self.two_phase2_helper(_sector, _degree) for _sector, _degree in zip(sector, self.cycle_angle__degree)])
self.system_output_voltage = self.input_bus_voltage * duty_cycle
self.output_current = self.input_ic_peak * np.cos(self.cycle_angle__degree - self.power_factor_phase_shift - math.pi / 6)
def two_phase2_helper(self, sector, degree):
modified_input_mod_depth = self.input_mod_depth * math.sqrt(3) / 2
duty_cycle_results = {
0: modified_input_mod_depth * math.sin(degree),
1: modified_input_mod_depth * math.sin(degree - math.pi / 3),
2: 0
}
return duty_cycle_results[sector]
# Getters and setters
#
# def set__step_size(self, step_size):
# self.step_size = step_size
# self.time_division = 1 / self.input_output_freq / 360.0 * self.step_size
# self.switches_per_degree = self.input_freq_carrier * self.time_division
#
# def set__three_level(self, is_three_level):
# self.is_three_level = is_three_level
# self.input_bus_voltage /= 2
#
# def set__modulation(self, input__modulation_type):
# self.input_modulation_type = input__modulation_type
def set__input_current(self, input_current):
self.input_ic_peak = input_current
def set__rg_flag(self, flag):
self.rg_output_flag = flag
def get__input_current(self):
return self.input_ic_peak
def get__input_bus_voltage(self):
return self.input_bus_voltage
def get__switches_per_degree(self):
return self.switches_per_degree
def get__input_output_freq(self):
return self.input_output_freq
def get__input_mod_depth(self):
return self.input_mod_depth
def get__input_freq_carrier(self):
return self.input_freq_carrier
def get__input_power_factor(self):
return self.input_power_factor
def get__duty_cycle__p(self):
return self.duty_cycle__p
def get__duty_cycle__n(self):
return self.duty_cycle__n
def get__step_size(self):
return self.step_size
def get__time_division(self):
return self.time_division
def get__input_t_sink(self):
return self.input_t_sink
def get__system_output_current(self):
return self.output_current
def get__system_output_voltage(self):
return self.system_output_voltage
def get__system_output_view(self):
return self.system_output_view
def get__input_rg_on(self):
return self.input_rg_on
def get__input_rg_off(self):
return self.input_rg_off
def get__input_rg_on_inside(self):
return self.input_rg_on_inside
def get__input_rg_off_inside(self):
return self.input_rg_off_inside
def get__input_rg_on_outside(self):
return self.input_rg_on_outside
def get__input_rg_off_outside(self):
return self.input_rg_off_outside
def get__three_level(self):
return self.is_three_level
|
[
"calarmy1"
] |
calarmy1
|
282e63fed4ef69cb10987c6e83a4b406b3ef4bf6
|
f0316e656767cf505b32c83eef4df13bb9f6b60c
|
/Kattis/cups.py
|
4789de27f116b754ca591f250e6577a087e0b6a9
|
[] |
no_license
|
AkshdeepSharma/Classroom
|
70ec46b35fab5fc4a9d2eac430659d7dafba93da
|
4e55799466c101c736de6c7e07d716ff147deb83
|
refs/heads/master
| 2022-06-13T18:14:03.236503
| 2022-05-17T20:16:28
| 2022-05-17T20:16:28
| 94,828,359
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
N = int(input())
cups = []
for i in range(N):
a, b = input().split()
try:
cups.append([int(b), a])
except:
cups.append([int(a) // 2, b])
cups = sorted(cups, key=lambda x: x[0])
for k in range(len(cups)):
print(cups[k][1])
|
[
"akshdeep.s@live.com"
] |
akshdeep.s@live.com
|
4f7ae60a8596d2b441a4ff0da86b405f6c80aba6
|
ad5d38fce4785037c108186f17eb1c64380355ef
|
/sddsd/google-cloud-sdk.staging/lib/googlecloudsdk/calliope/arg_parsers.py
|
106bfe82ce32e1f5504ba759ff9f2da633c36cc4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
saranraju90/multik8s
|
75864b605a139ddb7947ed4de4ae8466bdd49acb
|
428576dedef7bb9cd6516e2c1ab2714581e1137c
|
refs/heads/master
| 2023-03-03T21:56:14.383571
| 2021-02-20T14:56:42
| 2021-02-20T14:56:42
| 339,665,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53,962
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module that provides parsing utilities for argparse.
For details of how argparse argument pasers work, see:
http://docs.python.org/dev/library/argparse.html#type
Example usage:
import argparse
import arg_parsers
parser = argparse.ArgumentParser()
parser.add_argument(
'--metadata',
type=arg_parsers.ArgDict())
parser.add_argument(
'--delay',
default='5s',
type=arg_parsers.Duration(lower_bound='1s', upper_bound='10s')
parser.add_argument(
'--disk-size',
default='10GB',
type=arg_parsers.BinarySize(lower_bound='1GB', upper_bound='10TB')
res = parser.parse_args(
'--names --metadata x=y,a=b,c=d --delay 1s --disk-size 10gb'.split())
assert res.metadata == {'a': 'b', 'c': 'd', 'x': 'y'}
assert res.delay == 1
assert res.disk_size == 10737418240
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import collections
import copy
import re
from dateutil import tz
from googlecloudsdk.calliope import parser_errors
from googlecloudsdk.core import log
from googlecloudsdk.core import yaml
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import times
import six
from six.moves import zip # pylint: disable=redefined-builtin
__all__ = ['Duration', 'BinarySize']
class Error(Exception):
"""Exceptions that are defined by this module."""
class ArgumentTypeError(Error, argparse.ArgumentTypeError):
"""Exceptions for parsers that are used as argparse types."""
class ArgumentParsingError(Error, argparse.ArgumentError):
"""Raised when there is a problem with user input.
argparse.ArgumentError takes both the action and a message as constructor
parameters.
"""
def _GenerateErrorMessage(error, user_input=None, error_idx=None):
"""Constructs an error message for an exception.
Args:
error: str, The error message that should be displayed. This
message should not end with any punctuation--the full error
message is constructed by appending more information to error.
user_input: str, The user input that caused the error.
error_idx: int, The index at which the error occurred. If None,
the index will not be printed in the error message.
Returns:
str: The message to use for the exception.
"""
if user_input is None:
return error
elif not user_input: # Is input empty?
return error + '; received empty string'
elif error_idx is None:
return error + '; received: ' + user_input
return ('{error_message} at index {error_idx}: {user_input}'
.format(error_message=error, user_input=user_input,
error_idx=error_idx))
_VALUE_PATTERN = r"""
^ # Beginning of input marker.
(?P<amount>\d+) # Amount.
((?P<suffix>[-/a-zA-Z]+))? # Optional scale and type abbr.
$ # End of input marker.
"""
_RANGE_PATTERN = r'^(?P<start>[0-9]+)(-(?P<end>[0-9]+))?$'
_SECOND = 1
_MINUTE = 60 * _SECOND
_HOUR = 60 * _MINUTE
_DAY = 24 * _HOUR
# The units are adopted from sleep(1):
# http://linux.die.net/man/1/sleep
_DURATION_SCALES = {
's': _SECOND,
'm': _MINUTE,
'h': _HOUR,
'd': _DAY,
}
_BINARY_SIZE_SCALES = {
'': 1,
'K': 1 << 10,
'M': 1 << 20,
'G': 1 << 30,
'T': 1 << 40,
'P': 1 << 50,
'Ki': 1 << 10,
'Mi': 1 << 20,
'Gi': 1 << 30,
'Ti': 1 << 40,
'Pi': 1 << 50,
}
def GetMultiCompleter(individual_completer):
"""Create a completer to handle completion for comma separated lists.
Args:
individual_completer: A function that completes an individual element.
Returns:
A function that completes the last element of the list.
"""
def MultiCompleter(prefix, parsed_args, **kwargs):
start = ''
lst = prefix.rsplit(',', 1)
if len(lst) > 1:
start = lst[0] + ','
prefix = lst[1]
matches = individual_completer(prefix, parsed_args, **kwargs)
return [start + match for match in matches]
return MultiCompleter
def _DeleteTypeAbbr(suffix, type_abbr='B'):
"""Returns suffix with trailing type abbreviation deleted."""
if not suffix:
return suffix
s = suffix.upper()
i = len(s)
for c in reversed(type_abbr.upper()):
if not i:
break
if s[i - 1] == c:
i -= 1
return suffix[:i]
def GetBinarySizePerUnit(suffix, type_abbr='B'):
"""Returns the binary size per unit for binary suffix string.
Args:
suffix: str, A case insensitive unit suffix string with optional type
abbreviation.
type_abbr: str, The optional case insensitive type abbreviation following
the suffix.
Raises:
ValueError for unknown units.
Returns:
The binary size per unit for a unit+type_abbr suffix.
"""
unit = _DeleteTypeAbbr(suffix.upper(), type_abbr)
return _BINARY_SIZE_SCALES.get(unit)
def _ValueParser(scales, default_unit, lower_bound=None, upper_bound=None,
strict_case=True, type_abbr='B',
suggested_binary_size_scales=None):
"""A helper that returns a function that can parse values with units.
Casing for all units matters.
Args:
scales: {str: int}, A dictionary mapping units to their magnitudes in
relation to the lowest magnitude unit in the dict.
default_unit: str, The default unit to use if the user's input is
missing unit.
lower_bound: str, An inclusive lower bound.
upper_bound: str, An inclusive upper bound.
strict_case: bool, whether to be strict on case-checking
type_abbr: str, the type suffix abbreviation, e.g., B for bytes, b/s for
bits/sec.
suggested_binary_size_scales: list, A list of strings with units that will
be recommended to user.
Returns:
A function that can parse values.
"""
def UnitsByMagnitude(suggested_binary_size_scales=None):
"""Returns a list of the units in scales sorted by magnitude."""
scale_items = sorted(six.iteritems(scales),
key=lambda value: (value[1], value[0]))
if suggested_binary_size_scales is None:
return [key + type_abbr for key, _ in scale_items]
return [key + type_abbr for key, _ in scale_items
if key + type_abbr in suggested_binary_size_scales]
def Parse(value):
"""Parses value that can contain a unit and type avvreviation."""
match = re.match(_VALUE_PATTERN, value, re.VERBOSE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'given value must be of the form INTEGER[UNIT] where units '
'can be one of {0}'
.format(', '.join(UnitsByMagnitude(suggested_binary_size_scales))),
user_input=value))
amount = int(match.group('amount'))
suffix = match.group('suffix') or ''
unit = _DeleteTypeAbbr(suffix, type_abbr)
if strict_case:
unit_case = unit
default_unit_case = _DeleteTypeAbbr(default_unit, type_abbr)
scales_case = scales
else:
unit_case = unit.upper()
default_unit_case = _DeleteTypeAbbr(default_unit.upper(), type_abbr)
scales_case = dict([(k.upper(), v) for k, v in scales.items()])
if not unit and unit == suffix:
return amount * scales_case[default_unit_case]
elif unit_case in scales_case:
return amount * scales_case[unit_case]
else:
raise ArgumentTypeError(_GenerateErrorMessage(
'unit must be one of {0}'.format(', '.join(UnitsByMagnitude())),
user_input=unit))
if lower_bound is None:
parsed_lower_bound = None
else:
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
else:
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
elif parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
else:
return parsed_value
return ParseWithBoundsChecking
def RegexpValidator(pattern, description):
"""Returns a function that validates a string against a regular expression.
For example:
>>> alphanumeric_type = RegexpValidator(
... r'[a-zA-Z0-9]+',
... 'must contain one or more alphanumeric characters')
>>> parser.add_argument('--foo', type=alphanumeric_type)
>>> parser.parse_args(['--foo', '?'])
>>> # SystemExit raised and the error "error: argument foo: Bad value [?]:
>>> # must contain one or more alphanumeric characters" is displayed
Args:
pattern: str, the pattern to compile into a regular expression to check
description: an error message to show if the argument doesn't match
Returns:
function: str -> str, usable as an argparse type
"""
def Parse(value):
if not re.match(pattern + '$', value):
raise ArgumentTypeError('Bad value [{0}]: {1}'.format(value, description))
return value
return Parse
def CustomFunctionValidator(fn, description, parser=None):
"""Returns a function that validates the input by running it through fn.
For example:
>>> def isEven(val):
... return val % 2 == 0
>>> even_number_parser = arg_parsers.CustomFunctionValidator(
isEven, 'This is not even!', parser=arg_parsers.BoundedInt(0))
>>> parser.add_argument('--foo', type=even_number_parser)
>>> parser.parse_args(['--foo', '3'])
>>> # SystemExit raised and the error "error: argument foo: Bad value [3]:
>>> # This is not even!" is displayed
Args:
fn: str -> boolean
description: an error message to show if boolean function returns False
parser: an arg_parser that is applied to to value before validation. The
value is also returned by this parser.
Returns:
function: str -> str, usable as an argparse type
"""
def Parse(value):
"""Validates and returns a custom object from an argument string value."""
try:
parsed_value = parser(value) if parser else value
except ArgumentTypeError:
pass
else:
if fn(parsed_value):
return parsed_value
encoded_value = console_attr.SafeText(value)
formatted_err = 'Bad value [{0}]: {1}'.format(encoded_value, description)
raise ArgumentTypeError(formatted_err)
return Parse
def Duration(default_unit='s',
lower_bound='0',
upper_bound=None,
parsed_unit='s'):
"""Returns a function that can parse time durations.
See times.ParseDuration() for details. If the unit is omitted, seconds is
assumed. The parsed unit is assumed to be seconds, but can be specified as
ms or us.
For example:
parser = Duration()
assert parser('10s') == 10
parser = Duration(parsed_unit='ms')
assert parser('10s') == 10000
parser = Duration(parsed_unit='us')
assert parser('10s') == 10000000
Args:
default_unit: str, The default duration unit.
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
parsed_unit: str, The unit that the result should be returned as. Can be
's', 'ms', or 'us'.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single time duration as input to be
parsed.
"""
def Parse(value):
"""Parses a duration from value and returns integer of the parsed_unit."""
if parsed_unit == 'ms':
multiplier = 1000
elif parsed_unit == 'us':
multiplier = 1000000
elif parsed_unit == 's':
multiplier = 1
else:
raise ArgumentTypeError(
_GenerateErrorMessage('parsed_unit must be one of s, ms, us.'))
try:
duration = times.ParseDuration(value, default_suffix=default_unit)
return int(duration.total_seconds * multiplier)
except times.Error as e:
message = six.text_type(e).rstrip('.')
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse duration: {0}'.format(message, user_input=value)))
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return parsed_value
return ParseWithBoundsChecking
def BinarySize(lower_bound=None, upper_bound=None,
suggested_binary_size_scales=None, default_unit='G',
type_abbr='B'):
"""Returns a function that can parse binary sizes.
Binary sizes are defined as base-2 values representing number of
bytes.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "B", "KB", "MB",
"GB", "TB", "KiB", "MiB", "GiB", "TiB", "PiB". If the unit is
omitted then default_unit is assumed.
The result is parsed in bytes. For example:
parser = BinarySize()
assert parser('10GB') == 1073741824
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
suggested_binary_size_scales: list, A list of strings with units that will
be recommended to user.
default_unit: str, unit used when user did not specify unit.
type_abbr: str, the type suffix abbreviation, e.g., B for bytes, b/s for
bits/sec.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single binary size as input to be
parsed.
"""
return _ValueParser(
_BINARY_SIZE_SCALES, default_unit=default_unit, lower_bound=lower_bound,
upper_bound=upper_bound, strict_case=False, type_abbr=type_abbr,
suggested_binary_size_scales=suggested_binary_size_scales)
_KV_PAIR_DELIMITER = '='
class Range(object):
"""Range of integer values."""
def __init__(self, start, end):
self.start = start
self.end = end
@staticmethod
def Parse(string_value):
"""Creates Range object out of given string value."""
match = re.match(_RANGE_PATTERN, string_value)
if not match:
raise ArgumentTypeError('Expected a non-negative integer value or a '
'range of such values instead of "{0}"'
.format(string_value))
start = int(match.group('start'))
end = match.group('end')
if end is None:
end = start
else:
end = int(end)
if end < start:
raise ArgumentTypeError('Expected range start {0} smaller or equal to '
'range end {1} in "{2}"'.format(
start, end, string_value))
return Range(start, end)
def Combine(self, other):
"""Combines two overlapping or adjacent ranges, raises otherwise."""
if self.end + 1 < other.start or self.start > other.end + 1:
raise Error('Cannot combine non-overlapping or non-adjacent ranges '
'{0} and {1}'.format(self, other))
return Range(min(self.start, other.start), max(self.end, other.end))
def __eq__(self, other):
if isinstance(other, Range):
return self.start == other.start and self.end == other.end
return False
def __lt__(self, other):
if self.start == other.start:
return self.end < other.end
return self.start < other.start
def __str__(self):
if self.start == self.end:
return six.text_type(self.start)
return '{0}-{1}'.format(self.start, self.end)
class HostPort(object):
"""A class for holding host and port information."""
IPV4_OR_HOST_PATTERN = r'^(?P<address>[\w\d\.-]+)?(:|:(?P<port>[\d]+))?$'
# includes hostnames
IPV6_PATTERN = r'^(\[(?P<address>[\w\d:]+)\])(:|:(?P<port>[\d]+))?$'
def __init__(self, host, port):
self.host = host
self.port = port
@staticmethod
def Parse(s, ipv6_enabled=False):
"""Parse the given string into a HostPort object.
This can be used as an argparse type.
Args:
s: str, The string to parse. If ipv6_enabled and host is an IPv6 address,
it should be placed in square brackets: e.g.
[2001:db8:0:0:0:ff00:42:8329]
or
[2001:db8:0:0:0:ff00:42:8329]:8080
ipv6_enabled: boolean, If True then accept IPv6 addresses.
Raises:
ArgumentTypeError: If the string is not valid.
Returns:
HostPort, The parsed object.
"""
if not s:
return HostPort(None, None)
match = re.match(HostPort.IPV4_OR_HOST_PATTERN, s, re.UNICODE)
if ipv6_enabled and not match:
match = re.match(HostPort.IPV6_PATTERN, s, re.UNICODE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'or\n\n'
' [IPv6_ADDRESS]:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
elif not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
return HostPort(match.group('address'), match.group('port'))
class Day(object):
"""A class for parsing a datetime object for a specific day."""
@staticmethod
def Parse(s):
if not s:
return None
try:
return times.ParseDateTime(s, '%Y-%m-%d').date()
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date: {0}'.format(six.text_type(e)),
user_input=s))
class Datetime(object):
"""A class for parsing a datetime object."""
@staticmethod
def Parse(s):
"""Parses a string value into a Datetime object in local timezone."""
if not s:
return None
try:
return times.ParseDateTime(s)
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date/time: {0}'.format(six.text_type(e)),
user_input=s))
@staticmethod
def ParseUtcTime(s):
"""Parses a string representing a time in UTC into a Datetime object."""
if not s:
return None
try:
return times.ParseDateTime(s, tzinfo=tz.tzutc())
except times.Error as e:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse UTC time: {0}'.format(six.text_type(e)),
user_input=s))
class DayOfWeek(object):
"""A class for parsing a day of the week."""
DAYS = ['SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT']
@staticmethod
def Parse(s):
"""Validates and normalizes a string as a day of the week."""
if not s:
return None
fixed = s.upper()[:3]
if fixed not in DayOfWeek.DAYS:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse day of week. Value should be one of {0}'.format(
', '.join(DayOfWeek.DAYS)),
user_input=s))
return fixed
def _BoundedType(type_builder, type_description,
lower_bound=None, upper_bound=None, unlimited=False):
"""Returns a function that can parse given type within some bound.
Args:
type_builder: A callable for building the requested type from the value
string.
type_description: str, Description of the requested type (for verbose
messages).
lower_bound: of type compatible with type_builder,
The value must be >= lower_bound.
upper_bound: of type compatible with type_builder,
The value must be <= upper_bound.
unlimited: bool, If True then a value of 'unlimited' means no limit.
Returns:
A function that can parse given type within some bound.
"""
def Parse(value):
"""Parses value as a type constructed by type_builder.
Args:
value: str, Value to be converted to the requested type.
Raises:
ArgumentTypeError: If the provided value is out of bounds or unparsable.
Returns:
Value converted to the requested type.
"""
if unlimited and value == 'unlimited':
return None
try:
v = type_builder(value)
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage('Value must be {0}'.format(type_description),
user_input=value))
if lower_bound is not None and v < lower_bound:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if upper_bound is not None and upper_bound < v:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return v
return Parse
def BoundedInt(*args, **kwargs):
return _BoundedType(int, 'an integer', *args, **kwargs)
def BoundedFloat(*args, **kwargs):
return _BoundedType(float, 'a floating point number', *args, **kwargs)
def _TokenizeQuotedList(arg_value, delim=','):
"""Tokenize an argument into a list.
Args:
arg_value: str, The raw argument.
delim: str, The delimiter on which to split the argument string.
Returns:
[str], The tokenized list.
"""
if arg_value:
if not arg_value.endswith(delim):
arg_value += delim
return arg_value.split(delim)[:-1]
return []
class ArgType(object):
"""Base class for arg types."""
class ArgBoolean(ArgType):
"""Interpret an argument value as a bool."""
def __init__(
self, truthy_strings=None, falsey_strings=None, case_sensitive=False):
self._case_sensitive = case_sensitive
if truthy_strings:
self._truthy_strings = truthy_strings
else:
self._truthy_strings = ['true', 'yes']
if falsey_strings:
self._falsey_strings = falsey_strings
else:
self._falsey_strings = ['false', 'no']
def __call__(self, arg_value):
if not self._case_sensitive:
normalized_arg_value = arg_value.lower()
else:
normalized_arg_value = arg_value
if normalized_arg_value in self._truthy_strings:
return True
if normalized_arg_value in self._falsey_strings:
return False
raise ArgumentTypeError(
'Invalid flag value [{0}], expected one of [{1}]'.format(
arg_value,
', '.join(self._truthy_strings + self._falsey_strings)
)
)
class ArgList(ArgType):
"""Interpret an argument value as a list.
Intended to be used as the type= for a flag argument. Splits the string on
commas or another delimiter and returns a list.
By default, splits on commas:
'a,b,c' -> ['a', 'b', 'c']
There is an available syntax for using an alternate delimiter:
'^:^a,b:c' -> ['a,b', 'c']
'^::^a:b::c' -> ['a:b', 'c']
'^,^^a^,b,c' -> ['^a^', ',b', 'c']
"""
DEFAULT_DELIM_CHAR = ','
ALT_DELIM_CHAR = '^'
def __init__(self,
element_type=None,
min_length=0,
max_length=None,
choices=None,
custom_delim_char=None,
visible_choices=None):
"""Initialize an ArgList.
Args:
element_type: (str)->str, A function to apply to each of the list items.
min_length: int, The minimum size of the list.
max_length: int, The maximum size of the list.
choices: [element_type], a list of valid possibilities for elements. If
None, then no constraints are imposed.
custom_delim_char: char, A customized delimiter character.
visible_choices: [element_type], a list of valid possibilities for
elements to be shown to the user. If None, defaults to choices.
Returns:
(str)->[str], A function to parse the list of values in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
"""
self.element_type = element_type
self.choices = choices
self.visible_choices = (
visible_choices if visible_choices is not None else choices)
if self.visible_choices:
def ChoiceType(raw_value):
if element_type:
typed_value = element_type(raw_value)
else:
typed_value = raw_value
if typed_value not in choices:
raise ArgumentTypeError('{value} must be one of [{choices}]'.format(
value=typed_value,
choices=', '.join(
[six.text_type(choice) for choice in self.visible_choices])))
return typed_value
self.element_type = ChoiceType
self.min_length = min_length
self.max_length = max_length
self.custom_delim_char = custom_delim_char
def __call__(self, arg_value): # pylint:disable=missing-docstring
if isinstance(arg_value, list):
arg_list = arg_value
elif not isinstance(arg_value, six.string_types):
raise ArgumentTypeError('Invalid type [{}] for flag value [{}]'.format(
type(arg_value).__name__, arg_value))
else:
delim = self.custom_delim_char or self.DEFAULT_DELIM_CHAR
if (arg_value.startswith(self.ALT_DELIM_CHAR) and
self.ALT_DELIM_CHAR in arg_value[1:]):
delim, arg_value = arg_value[1:].split(self.ALT_DELIM_CHAR, 1)
if not delim:
raise ArgumentTypeError(
'Invalid delimeter. Please see `gcloud topic flags-file` or '
'`gcloud topic escaping` for information on providing list or '
'dictionary flag values with special characters.')
arg_list = _TokenizeQuotedList(arg_value, delim=delim)
# TODO(b/35944028): These exceptions won't present well to the user.
if len(arg_list) < self.min_length:
raise ArgumentTypeError('not enough args')
if self.max_length is not None and len(arg_list) > self.max_length:
raise ArgumentTypeError('too many args')
if self.element_type:
arg_list = [self.element_type(arg) for arg in arg_list]
return arg_list
_MAX_METAVAR_LENGTH = 30 # arbitrary, but this is pretty long
def GetUsageMsg(self, is_custom_metavar, metavar):
"""Get a specially-formatted metavar for the ArgList to use in help.
An example is worth 1,000 words:
>>> ArgList().GetUsageMetavar('FOO')
'[FOO,...]'
>>> ArgList(min_length=1).GetUsageMetavar('FOO')
'FOO,[FOO,...]'
>>> ArgList(max_length=2).GetUsageMetavar('FOO')
'FOO,[FOO]'
>>> ArgList(max_length=3).GetUsageMetavar('FOO') # One, two, many...
'FOO,[FOO,...]'
>>> ArgList(min_length=2, max_length=2).GetUsageMetavar('FOO')
'FOO,FOO'
>>> ArgList().GetUsageMetavar('REALLY_VERY_QUITE_LONG_METAVAR')
'REALLY_VERY_QUITE_LONG_METAVAR,[...]'
Args:
is_custom_metavar: unused in GetUsageMsg
metavar: string, the base metavar to turn into an ArgList metavar
Returns:
string, the ArgList usage metavar
"""
del is_custom_metavar # Unused in GetUsageMsg
delim_char = self.custom_delim_char or self.DEFAULT_DELIM_CHAR
required = delim_char.join([metavar] * self.min_length)
if self.max_length:
num_optional = self.max_length - self.min_length
else:
num_optional = None
# Use the "1, 2, many" approach to counting
if num_optional == 0:
optional = ''
elif num_optional == 1:
optional = '[{}]'.format(metavar)
elif num_optional == 2:
optional = '[{0}{1}[{0}]]'.format(metavar, delim_char)
else:
optional = '[{}{}...]'.format(metavar, delim_char)
msg = delim_char.join([x for x in [required, optional] if x])
if len(msg) < self._MAX_METAVAR_LENGTH:
return msg
# With long metavars, only put it in once.
if self.min_length == 0:
return '[{}{}...]'.format(metavar, delim_char)
if self.min_length == 1:
return '{}{}[...]'.format(metavar, delim_char)
else:
return '{0}{1}...{1}[...]'.format(metavar, delim_char)
class ArgDict(ArgList):
"""Interpret an argument value as a dict.
Intended to be used as the type= for a flag argument. Splits the string on
commas to get a list, and then splits the items on equals to get a set of
key-value pairs to get a dict.
"""
def __init__(self, key_type=None, value_type=None, spec=None, min_length=0,
max_length=None, allow_key_only=False, required_keys=None,
operators=None):
"""Initialize an ArgDict.
Args:
key_type: (str)->str, A function to apply to each of the dict keys.
value_type: (str)->str, A function to apply to each of the dict values.
spec: {str: (str)->str}, A mapping of expected keys to functions.
The functions are applied to the values. If None, an arbitrary
set of keys will be accepted. If not None, it is an error for the
user to supply a key that is not in the spec. If the function specified
is None, then accept a key only without '=value'.
min_length: int, The minimum number of keys in the dict.
max_length: int, The maximum number of keys in the dict.
allow_key_only: bool, Allow empty values.
required_keys: [str], Required keys in the dict.
operators: operator_char -> value_type, Define multiple single character
operators, each with its own value_type converter. Use value_type==None
for no conversion. The default value is {'=': value_type}
Returns:
(str)->{str:str}, A function to parse the dict in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
ValueError: If both value_type and spec are provided.
"""
super(ArgDict, self).__init__(min_length=min_length, max_length=max_length)
if spec and value_type:
raise ValueError('cannot have both spec and sub_type')
self.key_type = key_type
self.spec = spec
self.allow_key_only = allow_key_only
self.required_keys = required_keys or []
if not operators:
operators = {'=': value_type}
for op in operators.keys():
if len(op) != 1:
raise ArgumentTypeError(
'Operator [{}] must be one character.'.format(op))
ops = ''.join(six.iterkeys(operators))
key_op_value_pattern = '([^{ops}]+)([{ops}]?)(.*)'.format(
ops=re.escape(ops))
self.key_op_value = re.compile(key_op_value_pattern, re.DOTALL)
self.operators = operators
def _ApplySpec(self, key, value):
if key in self.spec:
if self.spec[key] is None:
if value:
raise ArgumentTypeError('Key [{0}] does not take a value'.format(key))
return None
return self.spec[key](value)
else:
raise ArgumentTypeError(
_GenerateErrorMessage(
'valid keys are [{0}]'.format(
', '.join(sorted(self.spec.keys()))),
user_input=key))
def _ValidateKeyValue(self, key, value, op='='):
"""Converts and validates <key,value> and returns (key,value)."""
if (not op or value is None) and not self.allow_key_only:
raise ArgumentTypeError(
'Bad syntax for dict arg: [{0}]. Please see '
'`gcloud topic flags-file` or `gcloud topic escaping` for '
'information on providing list or dictionary flag values with '
'special characters.'.format(key))
if self.key_type:
try:
key = self.key_type(key)
except ValueError:
raise ArgumentTypeError('Invalid key [{0}]'.format(key))
convert_value = self.operators.get(op, None)
if convert_value:
try:
value = convert_value(value)
except ValueError:
raise ArgumentTypeError('Invalid value [{0}]'.format(value))
if self.spec:
value = self._ApplySpec(key, value)
return key, value
def __call__(self, arg_value): # pylint:disable=missing-docstring
if isinstance(arg_value, dict):
raw_dict = arg_value
arg_dict = collections.OrderedDict()
for key, value in six.iteritems(raw_dict):
key, value = self._ValidateKeyValue(key, value)
arg_dict[key] = value
elif not isinstance(arg_value, six.string_types):
raise ArgumentTypeError('Invalid type [{}] for flag value [{}]'.format(
type(arg_value).__name__, arg_value))
else:
arg_list = super(ArgDict, self).__call__(arg_value)
arg_dict = collections.OrderedDict()
for arg in arg_list:
match = self.key_op_value.match(arg)
# TODO(b/35944028): These exceptions won't present well to the user.
if not match:
raise ArgumentTypeError('Invalid flag value [{0}]'.format(arg))
key, op, value = match.group(1), match.group(2), match.group(3)
key, value = self._ValidateKeyValue(key, value, op=op)
arg_dict[key] = value
for required_key in self.required_keys:
if required_key not in arg_dict:
raise ArgumentTypeError(
'Key [{0}] required in dict arg but not provided'.format(
required_key))
return arg_dict
def GetUsageMsg(self, is_custom_metavar, metavar):
# If we're not using a spec to limit the key values or if metavar
# has been overridden, then use the normal ArgList formatting
if not self.spec or is_custom_metavar:
return super(ArgDict, self).GetUsageMsg(is_custom_metavar, metavar)
msg_list = []
spec_list = sorted(six.iteritems(self.spec))
# First put the spec keys with no value followed by those that expect a
# value
for spec_key, spec_function in spec_list:
if spec_function is None:
if not self.allow_key_only:
raise ArgumentTypeError(
'Key [{0}] specified in spec without a function but '
'allow_key_only is set to False'.format(spec_key))
msg_list.append(spec_key)
for spec_key, spec_function in spec_list:
if spec_function is not None:
msg_list.append('{0}={1}'.format(spec_key, spec_key.upper()))
msg = '[' + '],['.join(msg_list) + ']'
return msg
class UpdateAction(argparse.Action):
r"""Create a single dict value from delimited or repeated flags.
This class is intended to be a more flexible version of
argparse._AppendAction.
For example, with the following flag definition:
parser.add_argument(
'--inputs',
type=arg_parsers.ArgDict(),
action='append')
a caller can specify on the command line flags such as:
--inputs k1=v1,k2=v2
and the result will be a list of one dict:
[{ 'k1': 'v1', 'k2': 'v2' }]
Specifying two separate command line flags such as:
--inputs k1=v1 \
--inputs k2=v2
will produce a list of dicts:
[{ 'k1': 'v1'}, { 'k2': 'v2' }]
The UpdateAction class allows for both of the above user inputs to result
in the same: a single dictionary:
{ 'k1': 'v1', 'k2': 'v2' }
This gives end-users a lot more flexibility in constructing their command
lines, especially when scripting calls.
Note that this class will raise an exception if a key value is specified
more than once. To allow for a key value to be specified multiple times,
use UpdateActionWithAppend.
"""
def OnDuplicateKeyRaiseError(self, key, existing_value=None, new_value=None):
if existing_value is None:
user_input = None
else:
user_input = ', '.join([existing_value, new_value])
raise argparse.ArgumentError(self, _GenerateErrorMessage(
'"{0}" cannot be specified multiple times'.format(key),
user_input=user_input))
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None, # pylint:disable=redefined-builtin
choices=None,
required=False,
help=None, # pylint:disable=redefined-builtin
metavar=None,
onduplicatekey_handler=OnDuplicateKeyRaiseError):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != argparse.OPTIONAL:
raise ValueError('nargs must be %r to supply const' % argparse.OPTIONAL)
self.choices = choices
if isinstance(choices, dict):
choices = sorted(choices.keys())
super(UpdateAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
self.onduplicatekey_handler = onduplicatekey_handler
def _EnsureValue(self, namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, dict):
# Get the existing arg value (if any)
items = copy.copy(self._EnsureValue(
namespace, self.dest, collections.OrderedDict()))
# Merge the new key/value pair(s) in
for k, v in six.iteritems(values):
if k in items:
v = self.onduplicatekey_handler(self, k, items[k], v)
items[k] = v
else:
# Get the existing arg value (if any)
items = copy.copy(self._EnsureValue(namespace, self.dest, []))
# Merge the new key/value pair(s) in
for k in values:
if k in items:
self.onduplicatekey_handler(self, k)
else:
items.append(k)
# Saved the merged dictionary
setattr(namespace, self.dest, items)
class UpdateActionWithAppend(UpdateAction):
"""Create a single dict value from delimited or repeated flags.
This class provides a variant of UpdateAction, which allows for users to
append, rather than reject, duplicate key values. For example, the user
can specify:
--inputs k1=v1a --inputs k1=v1b --inputs k2=v2
and the result will be:
{ 'k1': ['v1a', 'v1b'], 'k2': 'v2' }
"""
def OnDuplicateKeyAppend(self, key, existing_value=None, new_value=None):
if existing_value is None:
return key
elif isinstance(existing_value, list):
return existing_value + [new_value]
else:
return [existing_value, new_value]
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None, # pylint:disable=redefined-builtin
choices=None,
required=False,
help=None, # pylint:disable=redefined-builtin
metavar=None,
onduplicatekey_handler=OnDuplicateKeyAppend):
super(UpdateActionWithAppend, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
onduplicatekey_handler=onduplicatekey_handler)
class RemainderAction(argparse._StoreAction): # pylint: disable=protected-access
"""An action with a couple of helpers to better handle --.
argparse on its own does not properly handle -- implementation args.
argparse.REMAINDER greedily steals valid flags before a --, and nargs='*' will
bind to [] and not parse args after --. This Action represents arguments to
be passed through to a subcommand after --.
Primarily, this Action provides two utility parsers to help a modified
ArgumentParser parse -- properly.
There is one additional property kwarg:
example: A usage statement used to construct nice additional help.
"""
def __init__(self, *args, **kwargs):
if kwargs['nargs'] is not argparse.REMAINDER:
raise ValueError(
'The RemainderAction should only be used when '
'nargs=argparse.REMAINDER.')
# Create detailed help.
self.explanation = (
"The '--' argument must be specified between gcloud specific args on "
'the left and {metavar} on the right.'
).format(metavar=kwargs['metavar'])
if 'help' in kwargs:
kwargs['help'] += '\n+\n' + self.explanation
if 'example' in kwargs:
kwargs['help'] += ' Example:\n\n' + kwargs['example']
del kwargs['example']
super(RemainderAction, self).__init__(*args, **kwargs)
def _SplitOnDash(self, args):
split_index = args.index('--')
# Remove -- before passing through
return args[:split_index], args[split_index + 1:]
def ParseKnownArgs(self, args, namespace):
"""Binds all args after -- to the namespace."""
# Not [], so that we can distinguish between empty remainder args and
# absent remainder args.
remainder_args = None
if '--' in args:
args, remainder_args = self._SplitOnDash(args)
self(None, namespace, remainder_args)
return namespace, args
def ParseRemainingArgs(self, remaining_args, namespace, original_args):
"""Parses the unrecognized args from the end of the remaining_args.
This method identifies all unrecognized arguments after the last argument
recognized by a parser (but before --). It then either logs a warning and
binds them to the namespace or raises an error, depending on strictness.
Args:
remaining_args: A list of arguments that the parsers did not recognize.
namespace: The Namespace to bind to.
original_args: The full list of arguments given to the top parser,
Raises:
ArgumentError: If there were remaining arguments after the last recognized
argument and this action is strict.
Returns:
A tuple of the updated namespace and unrecognized arguments (before the
last recognized argument).
"""
# Only parse consecutive unknown args from the end of the original args.
# Strip out everything after '--'
if '--' in original_args:
original_args, _ = self._SplitOnDash(original_args)
# Find common suffix between remaining_args and original_args
split_index = 0
for i, (arg1, arg2) in enumerate(
zip(reversed(remaining_args), reversed(original_args))):
if arg1 != arg2:
split_index = len(remaining_args) - i
break
pass_through_args = remaining_args[split_index:]
remaining_args = remaining_args[:split_index]
if pass_through_args:
msg = ('unrecognized args: {args}\n' + self.explanation).format(
args=' '.join(pass_through_args))
raise parser_errors.UnrecognizedArgumentsError(msg)
self(None, namespace, pass_through_args)
return namespace, remaining_args
class StoreOnceAction(argparse.Action):
r"""Create a single dict value from delimited flags.
For example, with the following flag definition:
parser.add_argument(
'--inputs',
type=arg_parsers.ArgDict(),
action=StoreOnceAction)
a caller can specify on the command line flags such as:
--inputs k1=v1,k2=v2
and the result will be a list of one dict:
[{ 'k1': 'v1', 'k2': 'v2' }]
Specifying two separate command line flags such as:
--inputs k1=v1 \
--inputs k2=v2
will raise an exception.
Note that this class will raise an exception if a key value is specified
more than once. To allow for a key value to be specified multiple times,
use UpdateActionWithAppend.
"""
def OnSecondArgumentRaiseError(self):
raise argparse.ArgumentError(self, _GenerateErrorMessage(
'"{0}" argument cannot be specified multiple times'.format(self.dest)))
def __init__(self, *args, **kwargs):
self.dest_is_populated = False
super(StoreOnceAction, self).__init__(*args, **kwargs)
# pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
# Make sure no existing arg value exist
if self.dest_is_populated:
self.OnSecondArgumentRaiseError()
self.dest_is_populated = True
setattr(namespace, self.dest, values)
class _HandleNoArgAction(argparse.Action):
"""This class should not be used directly, use HandleNoArgAction instead."""
def __init__(self, none_arg, deprecation_message, **kwargs):
super(_HandleNoArgAction, self).__init__(**kwargs)
self.none_arg = none_arg
self.deprecation_message = deprecation_message
def __call__(self, parser, namespace, value, option_string=None):
if value is None:
log.warning(self.deprecation_message)
if self.none_arg:
setattr(namespace, self.none_arg, True)
setattr(namespace, self.dest, value)
def HandleNoArgAction(none_arg, deprecation_message):
"""Creates an argparse.Action that warns when called with no arguments.
This function creates an argparse action which can be used to gracefully
deprecate a flag using nargs=?. When a flag is created with this action, it
simply log.warning()s the given deprecation_message and then sets the value of
the none_arg to True.
This means if you use the none_arg no_foo and attach this action to foo,
`--foo` (no argument), it will have the same effect as `--no-foo`.
Args:
none_arg: a boolean argument to write to. For --no-foo use "no_foo"
deprecation_message: msg to tell user to stop using with no arguments.
Returns:
An argparse action.
"""
def HandleNoArgActionInit(**kwargs):
return _HandleNoArgAction(none_arg, deprecation_message, **kwargs)
return HandleNoArgActionInit
class FileContents(object):
"""Creates an argparse type that reads the contents of a file or stdin.
This is similar to argparse.FileType, but unlike FileType it does not leave
a dangling file handle open. The argument stored in the argparse Namespace
is the file's contents.
Attributes:
binary: bool, If True, the contents of the file will be returned as bytes.
Returns:
A function that accepts a filename, or "-" representing that stdin should be
used as input.
"""
def __init__(self, binary=False):
self.binary = binary
def __call__(self, name):
"""Return the contents of the file with the specified name.
If name is "-", stdin is read until EOF. Otherwise, the named file is read.
Args:
name: str, The file name, or '-' to indicate stdin.
Returns:
The contents of the file.
Raises:
ArgumentTypeError: If the file cannot be read or is too large.
"""
try:
return console_io.ReadFromFileOrStdin(name, binary=self.binary)
except files.Error as e:
raise ArgumentTypeError(e)
class YAMLFileContents(object):
"""Creates an argparse type that reads the contents of a YAML or JSON file.
This is similar to argparse.FileType, but unlike FileType it does not leave
a dangling file handle open. The argument stored in the argparse Namespace
is the file's contents parsed as a YAML object.
Attributes:
validator: function, Function that will validate the provided input
file contents.
Returns:
A function that accepts a filename that should be parsed as a YAML
or JSON file.
"""
def __init__(self, validator=None):
if validator and not callable(validator):
raise ArgumentTypeError('Validator must be callable')
self.validator = validator
def _AssertJsonLike(self, yaml_data):
if not (yaml.dict_like(yaml_data) or yaml.list_like(yaml_data)):
raise ArgumentTypeError('Invalid YAML/JSON Data [{}]'.format(yaml_data))
def _LoadSingleYamlDocument(self, name):
"""Returns the yaml data for a file or from stdin for a single document.
YAML allows multiple documents in a single file by using `---` as a
separator between documents. See https://yaml.org/spec/1.1/#id857577.
However, some YAML-generating tools generate a single document followed by
this separator before ending the file.
This method supports the case of a single document in a file that contains
superfluous document separators, but still throws if multiple documents are
actually found.
Args:
name: str, The file path to the file or "-" to read from stdin.
Returns:
The contents of the file parsed as a YAML data object.
"""
if name == '-':
stdin = console_io.ReadStdin() # Save to potentially reuse below
yaml_data = yaml.load_all(stdin)
else:
yaml_data = yaml.load_all_path(name)
yaml_data = [d for d in yaml_data if d is not None] # Remove empty docs
# Return the single document if only 1 is found.
if len(yaml_data) == 1:
return yaml_data[0]
# Multiple (or 0) documents found. Try to parse again with single-document
# loader so its error is propagated rather than creating our own.
if name == '-':
return yaml.load(stdin)
else:
return yaml.load_path(name)
def __call__(self, name):
"""Load YAML data from file path (name) or stdin.
If name is "-", stdin is read until EOF. Otherwise, the named file is read.
If self.validator is set, call it on the yaml data once it is loaded.
Args:
name: str, The file path to the file.
Returns:
The contents of the file parsed as a YAML data object.
Raises:
ArgumentTypeError: If the file cannot be read or is not a JSON/YAML like
object.
ValueError: If file content fails validation.
"""
try:
yaml_data = self._LoadSingleYamlDocument(name)
self._AssertJsonLike(yaml_data)
if self.validator:
if not self.validator(yaml_data):
raise ValueError('Invalid YAML/JSON content [{}]'.format(yaml_data))
return yaml_data
except (yaml.YAMLParseError, yaml.FileLoadError) as e:
raise ArgumentTypeError(e)
class StoreTrueFalseAction(argparse._StoreTrueAction): # pylint: disable=protected-access
"""Argparse action that acts as a combination of store_true and store_false.
Calliope already gives any bool-type arguments the standard and `--no-`
variants. In most cases we only want to document the option that does
something---if we have `default=False`, we don't want to show `--no-foo`,
since it won't do anything.
But in some cases we *do* want to show both variants: one example is when
`--foo` means "enable," `--no-foo` means "disable," and neither means "do
nothing." The obvious way to represent this is `default=None`; however, (1)
the default value of `default` is already None, so most boolean actions would
have this setting by default (not what we want), and (2) we still want an
option to have this True/False/None behavior *without* the flag documentation.
To get around this, we have an opt-in version of the same thing that documents
both the flag and its inverse.
"""
def __init__(self, *args, **kwargs):
super(StoreTrueFalseAction, self).__init__(*args, default=None, **kwargs)
def StoreFilePathAndContentsAction(binary=False):
"""Returns Action that stores both file content and file path.
Args:
binary: boolean, whether or not this is a binary file.
Returns:
An argparse action.
"""
class Action(argparse.Action):
"""Stores both file content and file path.
Stores file contents under original flag DEST and stores file path under
DEST_path.
"""
def __init__(self, *args, **kwargs):
super(Action, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
"""Stores the contents of the file and the file name in namespace."""
try:
content = console_io.ReadFromFileOrStdin(value, binary=binary)
except files.Error as e:
raise ArgumentTypeError(e)
setattr(namespace, self.dest, content)
new_dest = '{}_path'.format(self.dest)
setattr(namespace, new_dest, value)
return Action
|
[
"saranraju90@gmail.com"
] |
saranraju90@gmail.com
|
88c38efa8ff0a8056b6fc309011e034888426fa0
|
26acc7e23024098661a42da37e2cb4ed56c21b44
|
/dgp/genera/load/loader.py
|
daf5ca8acee012f9dd328fd48ef0fb2baf85a38a
|
[
"MIT"
] |
permissive
|
dataspot/dgp
|
80536c0e296570c109511de3dae6e0297bb8b0fd
|
e86d604c8af5534985f9b788ba809facbc325152
|
refs/heads/master
| 2023-03-16T05:15:38.362702
| 2023-03-09T07:07:28
| 2023-03-09T07:07:28
| 169,378,970
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,879
|
py
|
import os
import json
import requests
from hashlib import md5
from dataflows import Flow, load, dump_to_path
from dataflows.base.schema_validator import ignore
from ...core import BaseDataGenusProcessor, Required, Validator, ConfigurableDGP
from .analyzers import FileFormatDGP, StructureDGP
from ...config.consts import CONFIG_URL, CONFIG_PUBLISH_ALLOWED, RESOURCE_NAME
from ...config.log import logger
class LoaderDGP(BaseDataGenusProcessor):
PRE_CHECKS = Validator(
Required(CONFIG_URL, 'Source data URL or path')
)
def init(self):
self.steps = self.init_classes([
FileFormatDGP,
StructureDGP,
])
def hash_key(self, *args):
data = json.dumps(args, sort_keys=True, ensure_ascii=False)
return md5(data.encode('utf8')).hexdigest()
def flow(self):
if len(self.errors) == 0:
config = self.config._unflatten()
source = config['source']
ref_hash = self.hash_key(source, config['structure'], config.get('publish'))
cache_path = os.path.join('.cache', ref_hash)
datapackage_path = os.path.join(cache_path, 'datapackage.json')
structure_params = self.context._structure_params()
http_session = self.context.http_session()
loader = load(source.pop('path'), validate=False,
name=RESOURCE_NAME,
**source, **structure_params,
http_session=http_session,
http_timeout=120,
infer_strategy=load.INFER_PYTHON_TYPES,
cast_strategy=load.CAST_DO_NOTHING,
limit_rows=(
None
if self.config.get(CONFIG_PUBLISH_ALLOWED)
else 5000
))
if self.config.get(CONFIG_PUBLISH_ALLOWED):
return Flow(
loader,
)
else:
if not os.path.exists(datapackage_path):
logger.info('Caching source data into %s', cache_path)
Flow(
loader,
dump_to_path(cache_path, validator_options=dict(on_error=ignore)),
# printer(),
).process()
logger.info('Using cached source data from %s', cache_path)
return Flow(
load(datapackage_path, resources=RESOURCE_NAME),
)
class PostLoaderDGP(ConfigurableDGP):
def init(self):
super().init('loading', per_taxonomy=False)
self._flows = None
class PreLoaderDGP(ConfigurableDGP):
def init(self):
super().init('preloading', per_taxonomy=False)
self._flows = None
|
[
"adam.kariv@gmail.com"
] |
adam.kariv@gmail.com
|
6dadb8446146a85cfb8ae39894b3b97d9a46708d
|
24108066b4b5b6ecd02c7fb499d970eab1877380
|
/codeforces/queueatschool.py
|
b500a8dde8579bfc2827cd50ca3f5685bf164946
|
[] |
no_license
|
vishu1994/Datastructures-And-Algorithms
|
93fc7e1d1f5fac775b6c50cb8cafd1a4f3060544
|
35bfc28edd8ebf1c1724be41402b1befd478aed4
|
refs/heads/master
| 2020-03-30T19:26:36.413735
| 2019-07-27T10:31:47
| 2019-07-27T10:31:47
| 151,542,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
nt=list(map(int,input().split()))
n=nt[0]
t=nt[1]
myqueue=list(input())
for i in range(t):
j=0
while j<len(myqueue)-1:
if myqueue[j]=="B" and myqueue[j+1]=="G":
myqueue[j],myqueue[j+1]="G","B"
j=j+2
else:
j=j+1
for i in myqueue:
print(i,end="")
|
[
"vishalpandey801@gmail.com"
] |
vishalpandey801@gmail.com
|
449a5b4d464ce12c138b35ee87635fe1817540fc
|
13d3a44447f6a7d8b0d61c2fb445fa6aa76c2f95
|
/stackdio/core/viewsets.py
|
3708da69f32348e2a5e6effb26d7be236dfe77f5
|
[
"Apache-2.0"
] |
permissive
|
stackdio/stackdio
|
6ba4ad6c2ef10a323cbd955e6d6d5bd7917c17c2
|
84be621705031d147e104369399b872d5093ef64
|
refs/heads/master
| 2021-04-09T16:36:38.220557
| 2018-08-13T18:25:29
| 2018-08-13T18:25:29
| 17,679,603
| 9
| 11
|
Apache-2.0
| 2020-03-19T17:21:45
| 2014-03-12T19:02:06
|
Python
|
UTF-8
|
Python
| false
| false
| 13,461
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.models import Group
from django.http import Http404
from guardian.shortcuts import get_groups_with_perms, get_users_with_perms, remove_perm
from rest_framework import viewsets
from rest_framework.serializers import ListField, SlugRelatedField, ValidationError
from stackdio.api.users.models import get_user_queryset
from stackdio.core import fields, mixins, serializers
from stackdio.core.config import StackdioConfigException
from stackdio.core.permissions import StackdioPermissionsModelPermissions
from stackdio.core.shortcuts import get_groups_with_model_perms, get_users_with_model_perms
try:
from django_auth_ldap.backend import LDAPBackend
except ImportError:
LDAPBackend = None
logger = logging.getLogger(__name__)
def _filter_perms(available_perms, perms):
ret = []
for perm in perms:
if perm in available_perms:
ret.append(perm)
return ret
class UserSlugRelatedField(SlugRelatedField):
def to_internal_value(self, data):
try:
return super(UserSlugRelatedField, self).to_internal_value(data)
except ValidationError:
if settings.LDAP_ENABLED:
if LDAPBackend is None:
raise StackdioConfigException('LDAP is enabled, but django_auth_ldap isn\'t '
'installed. Please install django_auth_ldap')
# Grab the ldap user and try again
user = LDAPBackend().populate_user(data)
if user is not None:
return super(UserSlugRelatedField, self).to_internal_value(data)
# Nothing worked, just re-raise the exception
raise
class StackdioBasePermissionsViewSet(mixins.BulkUpdateModelMixin, viewsets.ModelViewSet):
"""
Viewset for creating permissions endpoints
"""
user_or_group = None
model_or_object = None
lookup_value_regex = r'[\w.@+-]+'
parent_lookup_field = 'pk'
parent_lookup_url_kwarg = None
def get_model_name(self):
raise NotImplementedError('`get_model_name()` must be implemented.')
def get_app_label(self):
raise NotImplementedError('`get_app_label()` must be implemented.')
def get_serializer_class(self):
user_or_group = self.get_user_or_group()
model_or_object = self.get_model_or_object()
model_name = self.get_model_name()
app_label = self.get_app_label()
super_cls = self.switch_model_object(serializers.StackdioModelPermissionsSerializer,
serializers.StackdioObjectPermissionsSerializer)
default_parent_lookup_url_kwarg = 'parent_{}'.format(self.parent_lookup_field)
url_field_kwargs = {
'view_name': 'api:{0}:{1}-{2}-{3}-permissions-detail'.format(
app_label,
model_name,
model_or_object,
user_or_group
),
'permission_lookup_field': self.lookup_field,
'permission_lookup_url_kwarg': self.lookup_url_kwarg or self.lookup_field,
'lookup_field': self.parent_lookup_field,
'lookup_url_kwarg': self.parent_lookup_url_kwarg or default_parent_lookup_url_kwarg,
}
url_field_cls = self.switch_model_object(
fields.HyperlinkedModelPermissionsField,
fields.HyperlinkedObjectPermissionsField,
)
# Create a class
class StackdioUserPermissionsSerializer(super_cls):
user = UserSlugRelatedField(slug_field='username', queryset=get_user_queryset())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'user'
class StackdioGroupPermissionsSerializer(super_cls):
group = SlugRelatedField(slug_field='name', queryset=Group.objects.all())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'group'
return self.switch_user_group(StackdioUserPermissionsSerializer,
StackdioGroupPermissionsSerializer)
def get_user_or_group(self):
assert self.user_or_group in ('user', 'group'), (
"'%s' should include a `user_or_group` attribute that is one of 'user' or 'group'."
% self.__class__.__name__
)
return self.user_or_group
def switch_user_group(self, if_user, if_group):
return {
'user': if_user,
'group': if_group,
}.get(self.get_user_or_group())
def get_model_or_object(self):
assert self.model_or_object in ('model', 'object'), (
"'%s' should include a `model_or_object` attribute that is one of 'model' or 'object'."
% self.__class__.__name__
)
return self.model_or_object
def switch_model_object(self, if_model, if_object):
return {
'model': if_model,
'object': if_object,
}.get(self.get_model_or_object())
def _transform_perm(self, model_name):
def do_tranform(item):
# pylint: disable=unused-variable
perm, sep, empty = item.partition('_' + model_name)
return perm
return do_tranform
def get_object(self):
queryset = self.get_queryset()
url_kwarg = self.lookup_url_kwarg or self.lookup_field
name_attr = self.switch_user_group('username', 'name')
for obj in queryset:
auth_obj = obj[self.get_user_or_group()]
if self.kwargs[url_kwarg] == getattr(auth_obj, name_attr):
return obj
raise Http404('No permissions found for %s' % self.kwargs[url_kwarg])
class StackdioModelPermissionsViewSet(StackdioBasePermissionsViewSet):
model_cls = None
model_or_object = 'model'
permission_classes = (StackdioPermissionsModelPermissions,)
def get_model_cls(self):
assert self.model_cls, (
"'%s' should include a `model_cls` attribute or override the `get_model_cls()` method."
% self.__class__.__name__
)
return self.model_cls
def get_model_name(self):
return self.get_model_cls()._meta.model_name
def get_app_label(self):
ret = self.get_model_cls()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_model_permissions(self):
return getattr(self.get_model_cls(),
'model_permissions',
getattr(self, 'model_permissions', ()))
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
ret = []
for permission_cls in self.permission_classes:
permission = permission_cls()
# Inject our model_cls into the permission
if isinstance(permission, StackdioPermissionsModelPermissions) \
and permission.model_cls is None:
permission.model_cls = self.model_cls
ret.append(permission)
return ret
def get_queryset(self): # pylint: disable=method-hidden
model_cls = self.get_model_cls()
model_name = model_cls._meta.model_name
model_perms = self.get_model_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_model_perms(model_cls, attach_perms=True,
with_group_users=False),
lambda: get_groups_with_model_perms(model_cls, attach_perms=True),
)
# Do this as a function so we don't fetch both the user AND group permissions on each
# request
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(model_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioModelPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_model_permissions())
return response
def perform_create(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_update(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_destroy(self, instance):
model_cls = self.get_model_cls()
app_label = model_cls._meta.app_label
model_name = model_cls._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()])
class StackdioModelUserPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioModelGroupPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
class StackdioObjectPermissionsViewSet(StackdioBasePermissionsViewSet):
"""
Viewset for creating permissions endpoints
"""
model_or_object = 'object'
def get_permissioned_object(self):
raise NotImplementedError('`get_permissioned_object()` must be implemented.')
def get_model_name(self):
return self.get_permissioned_object()._meta.model_name
def get_app_label(self):
ret = self.get_permissioned_object()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_object_permissions(self):
return getattr(self.get_permissioned_object(),
'object_permissions',
getattr(self, 'object_permissions', ()))
def get_queryset(self): # pylint: disable=method-hidden
obj = self.get_permissioned_object()
model_name = obj._meta.model_name
object_perms = self.get_object_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_perms(obj, attach_perms=True,
with_superusers=False, with_group_users=False),
lambda: get_groups_with_perms(obj, attach_perms=True),
)
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(object_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioObjectPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_object_permissions())
return response
def perform_create(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_update(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_destroy(self, instance):
obj = self.get_permissioned_object()
app_label = obj._meta.app_label
model_name = obj._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()],
obj)
# pylint: disable=abstract-method
class StackdioObjectUserPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioObjectGroupPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
|
[
"clark.perkins@digitalreasoning.com"
] |
clark.perkins@digitalreasoning.com
|
56441abdcb0c3e4c4bc9e6e51c066f53b4474751
|
d09fd96bbc931fbb8522e5c991973f064a4ded50
|
/baxter/devel/lib/python2.7/dist-packages/baxter_core_msgs/msg/_AssemblyStates.py
|
e02cc99b32b08bd3ff9a2b09c7d226c451abe8d2
|
[] |
no_license
|
rymonyu/EE4-Robotics
|
b3827ba0dff5bdfdd1e47fe07a40e955c5226f38
|
6cf9272abd7fe8a074dc74a032f6e0b35edb8548
|
refs/heads/master
| 2020-08-22T15:09:39.706809
| 2019-12-15T23:35:45
| 2019-12-15T23:35:45
| 216,420,098
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
/home/rob/baxter/devel/.private/baxter_core_msgs/lib/python2.7/dist-packages/baxter_core_msgs/msg/_AssemblyStates.py
|
[
"rymonyu@gmail.com"
] |
rymonyu@gmail.com
|
107fd2eb3a81f9b2607eb4a1121d912182da8ca1
|
a120911eb088a02694e87baeaa996ded22704aab
|
/bookstore/migrations/0003_auto_20170315_2142.py
|
8d1bd6efe973c4c4978227435b76ae79eb6bad6f
|
[] |
no_license
|
muhilvarnan/django-bookstore
|
684acb534ca68b7651bedefae1b15622e2454208
|
812913626350a27846b6c1546a202ec538d40c5f
|
refs/heads/master
| 2021-01-22T18:51:03.877940
| 2017-03-15T22:10:35
| 2017-03-15T22:10:35
| 85,125,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-15 21:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bookstore', '0002_remove_books_isbn_number'),
]
operations = [
migrations.RemoveField(
model_name='books',
name='publication_year',
),
migrations.RemoveField(
model_name='orders',
name='purchase_items',
),
migrations.AddField(
model_name='orders',
name='book',
field=models.ManyToManyField(to='bookstore.Books'),
),
migrations.AlterField(
model_name='orders',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bookstore.Customers'),
),
]
|
[
"muhilvarnan.v@gmail.com"
] |
muhilvarnan.v@gmail.com
|
16e33a5a72cc6d87ecf6a25897cdc01209c3cf9c
|
cd3e72bad360d7426b6504e2e74321f47bc23eeb
|
/levelupreports/views/users/gamesbyuser.py
|
6d2e6130d8838c693d6079219540a0198f4e0765
|
[] |
no_license
|
SLLittrell/Levelup-server
|
e2eb662bf16ee9f2f53603820b03f17cb5bde1a2
|
2b582469f5888c7dcc2a1d506b4d1c7c140b3bb8
|
refs/heads/main
| 2023-05-14T18:07:41.513042
| 2021-05-27T15:50:10
| 2021-05-27T15:50:10
| 364,028,553
| 0
| 0
| null | 2021-05-27T15:50:11
| 2021-05-03T18:46:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
"""Module for generating games by user report"""
import sqlite3
from django.shortcuts import render
from levelupapi.models import Game
from levelupreports.views import Connection
def usergame_list(request):
"""Function to build an HTML report of games by user"""
if request.method == 'GET':
# Connect to project database
with sqlite3.connect(Connection.db_path) as conn:
conn.row_factory = sqlite3.Row
db_cursor = conn.cursor()
# Query for all games, with related user info.
db_cursor.execute("""
SELECT
g.id,
g.title,
g.maker,
g.game_category_id,
g.number_of_players,
g.skill_level,
u.id user_id,
u.first_name || ' ' || u.last_name AS full_name
FROM
levelupapi_game g
JOIN
levelupapi_gamer gr ON g.gamer_id = gr.id
JOIN
auth_user u ON gr.user_id = u.id
""")
dataset = db_cursor.fetchall()
# Take the flat data from the database, and build the
# following data structure for each gamer.
#
# {
# 1: {
# "id": 1,
# "full_name": "Admina Straytor",
# "games": [
# {
# "id": 1,
# "title": "Foo",
# "maker": "Bar Games",
# "skill_level": 3,
# "number_of_players": 4,
# "gametype_id": 2
# }
# ]
# }
# }
games_by_user = {}
for row in dataset:
# Crete a Game instance and set its properties
game = Game()
game.title = row["title"]
game.maker = row["maker"]
game.skill_level = row["skill_level"]
game.number_of_players = row["number_of_players"]
game.game_category_id = row["game_category_id"]
# Store the user's id
uid = row["user_id"]
# If the user's id is already a key in the dictionary...
if uid in games_by_user:
# Add the current game to the `games` list for it
games_by_user[uid]['games'].append(game)
else:
# Otherwise, create the key and dictionary value
games_by_user[uid] = {}
games_by_user[uid]["id"] = uid
games_by_user[uid]["full_name"] = row["full_name"]
games_by_user[uid]["games"] = [game]
# Get only the values from the dictionary and create a list from them
list_of_users_with_games = games_by_user.values()
# Specify the Django template and provide data context
template = 'users/list_with_games.html'
context = {
'usergame_list': list_of_users_with_games
}
return render(request, template, context)
|
[
"stacey.littrell@gmail.com"
] |
stacey.littrell@gmail.com
|
205d88ba56174f27a86af32366448306598b91ae
|
5839955687eefea132172ef47f76963ff219e6ef
|
/DeviceManagement/migrations/0001_initial.py
|
086039bc932336985973f00381bd8ab71fb43522
|
[] |
no_license
|
echo6120/DeviceManagement
|
2724bc3d849a898f2d17e290194dfca39c737a88
|
7b67f8d567620b102914e38f7f41bf23f2adbfda
|
refs/heads/master
| 2020-12-22T20:30:04.790143
| 2020-01-29T07:17:22
| 2020-01-29T07:17:22
| 236,923,453
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
# Generated by Django 2.0 on 2020-01-26 09:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ArticleColumn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('System', models.CharField(max_length=200)),
('created', models.DateField(auto_now_add=True)),
],
),
]
|
[
"jingyu@rd.netease.com"
] |
jingyu@rd.netease.com
|
504975487133379c5ad90d41e592ecc7584e58ac
|
f69af7fb96d29edc5d7bd7424acfa9078ba5047d
|
/models/networks.py
|
95fbfab2f1a59fdeb3945ea0396ea30a6b8e80dc
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
TrendingTechnology/PeeledHuman
|
ece77dcf3eef81990be720fd4a8e72db2cb5f6d9
|
ab7bff2c03b22774ecea4bc4ec3ae214da654dd5
|
refs/heads/master
| 2023-05-08T09:53:23.413879
| 2021-05-21T12:22:20
| 2021-05-21T12:22:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,561
|
py
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
import copy
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda x: Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
net = convert_model(net)
net.cuda()
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | resnet_18blocks
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'resnet_18blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=18)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70x70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
model_depth = []
model_rgb = []
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model_depth += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model_rgb += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model_depth += [nn.ReflectionPad2d(3)]
model_depth += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model_depth += [nn.Sigmoid()]
model_rgb += [nn.ReflectionPad2d(3)]
model_rgb += [nn.Conv2d(ngf, 3*(output_nc-1), kernel_size=7, padding=0)]
# model_rgb += [nn.Conv2d(ngf, 3*output_nc, kernel_size=7, padding=0)]
model_rgb += [nn.Tanh()]
self.model = nn.Sequential(*model)
self.model_depth = nn.Sequential(*model_depth)
self.model_rgb = nn.Sequential(*model_rgb)
def forward(self, input):
"""Standard forward"""
downsample = self.model(input)
return self.model_rgb(downsample), self.model_depth(downsample)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
|
[
"rohanchacko007@gmail.com"
] |
rohanchacko007@gmail.com
|
56b0c048589ed3ef8f13303160de9e5ae6e672df
|
9701287f1cc7734d31c898708581b15a41916e36
|
/backend/app/crud/inventory.py
|
5dc66359f2914fdfc5f4b7ef88503d0e6e4b1ba9
|
[] |
no_license
|
cedric0306/fastERP
|
a40c6d3bd3f07f65d2d8c1a440d8930f20bf4aee
|
d87824e945b01bc1969c9e2fdea3f243f6240a2f
|
refs/heads/main
| 2023-07-27T07:44:58.711211
| 2021-08-27T15:14:08
| 2021-08-27T15:14:08
| 564,331,443
| 1
| 0
| null | 2022-11-10T13:44:17
| 2022-11-10T13:44:16
| null |
UTF-8
|
Python
| false
| false
| 1,833
|
py
|
from sqlalchemy.orm import Session
from fastapi import Depends
from ..models import Inventory as InventoryModel, User
from ..schemas.inventory import InventoryCreate, InventoryDelete, InventoryUpdate
from ..dependencies import get_current_user
# CASH
def get_inventory(db: Session, inventory_id: int):
return db.query(InventoryModel).filter(InventoryModel.id == inventory_id).first()
def get_inventoryes(db: Session, skip: int = 0, limit: int = 100):
return db.query(InventoryModel).offset(skip).limit(limit).all()
def create_inventory(db: Session, inventory: InventoryCreate, current_user: User):
db_inventory = InventoryModel(date=inventory.date,
description=inventory.description,
created_on=inventory.created_on,
user_id=current_user.id,
status=inventory.status)
db.add(db_inventory)
db.commit()
return db_inventory
def update_inventory(db: Session, inventory: InventoryUpdate, current_user: User):
inventory_data = db.query(InventoryModel).filter(
InventoryModel.id == inventory.id).first()
inventory_data.date = inventory.date
inventory_data.description = inventory.description
inventory_data.user_id = current_user.id,
inventory_data.status = inventory.status
db.commit()
db.refresh(inventory_data)
return inventory_data
def delete_inventory(db: Session, inventory: InventoryDelete):
inventory_data = db.query(InventoryModel).filter(
InventoryModel.id == inventory.id).first()
if inventory_data is None:
return None
else:
db.delete(inventory_data)
db.commit()
return inventory_data
|
[
"wasuaje@shorecg.com"
] |
wasuaje@shorecg.com
|
33e88efe4e627f6559fbe2ae3e666d6cd80bb96a
|
25db8b32ecda47a22a8a1ae4551e2378e4d576cf
|
/rest/serializers.py
|
48d3eeebe7c5ac11184608c3cbe7c1f91bd0730c
|
[] |
no_license
|
viperfx/ng-forum
|
4754ca69d699ad466e836b28bda68d9d84e0cd34
|
5a55692122b91876104c209a73bab05f7318c3ff
|
refs/heads/master
| 2021-01-01T19:42:18.879573
| 2013-11-08T15:27:32
| 2013-11-08T15:27:32
| 13,254,263
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from rest.models import Forum, Thread, Post
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name')
class ForumSerializer(serializers.ModelSerializer):
class Meta:
model = Forum
depth=1
fields = ('id','title', 'threads',)
class ThreadSerializer(serializers.ModelSerializer):
class Meta:
model = Thread
depth=2
fields = ('id','title', 'forum', 'body', 'creator','created', 'posts')
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('thread', 'body', 'creator')
|
[
"tharshan09@gmail.com"
] |
tharshan09@gmail.com
|
282cc009f6f0e3ea9db0caeb7c910203e582bc4d
|
13df9ce30c3b6999f38ccf46ea9a85f3fa9f44a9
|
/reports/forms.py
|
75e02c96731b1abb0eb6f31b575fa2e133c1e6a8
|
[] |
no_license
|
PavelM87/tf_idf_app
|
37b5598f7f46af308f24733a145b8308610a0797
|
6b36e516b2daefd89b52765f1244857d16a4efcd
|
refs/heads/master
| 2023-07-26T16:03:33.165133
| 2021-09-13T16:04:20
| 2021-09-13T16:04:20
| 405,765,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from django import forms
from .models import File
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = 'file',
|
[
"mospv87@gmail.com"
] |
mospv87@gmail.com
|
7dbe960a1c9a8e1e356ad75deab3f1df4abc7aac
|
4a0c3f5f697ab694067f5fc59486707440593856
|
/python/20_Slots.py
|
b95795f1ef5714b1713849c16c00279e4745897a
|
[] |
no_license
|
shweb360/Python
|
4cfe5e1e12d0bad02217ccd1bded7815a1c192e9
|
dd589674ed22ebd835efb21954ed0a96430002f8
|
refs/heads/master
| 2021-06-24T17:04:22.378895
| 2019-12-01T06:06:51
| 2019-12-01T06:06:51
| 102,442,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
#1.1创建一个类:
class Student(object):
pass
#1.2给实例绑定一个属性:
s=Student()
s.name="Wushuang"
print(s.name);
#1.3给实例绑定一个方法
def set_age(self,age):
self.age=age
from types import MethodType
#给实例绑定一个方法
s.set_age=MethodType(set_age,s)
#调用实例方法
s.set_age(25)
print(s.age)
#2.0使用__slots_
#但是,如果我们想要限制实例的属性怎么办?
#比如,只允许对Student实例添加name和age属性。
#为了达到限制的目的,Python允许在定义class的时候,
#定义一个特殊的__slots__变量,来限制该class实例能添加的属性:
class Student2(object):
__slots__=('name','age')
s2=Student2()
s2.name="Michael"
s2.age=24
print(s2)
|
[
"785132826@qq.com"
] |
785132826@qq.com
|
648100aedd647315dd5b9631cec11785d7b6a4b0
|
ad3da434e91d28d5062089070b2c3b6915a775ae
|
/Django-Scraping/news/migrations/0002_weather.py
|
a74bcd94da2f22d109807b57d7689578344a6043
|
[
"Apache-2.0"
] |
permissive
|
mitus1212/Django-Scraping
|
f241a97481feef69aee9b159fe833a3bff6154da
|
00d503b004cc7c4a23de154bbca55351b7f3546d
|
refs/heads/master
| 2023-01-01T23:11:49.375542
| 2020-06-10T17:57:56
| 2020-06-10T17:57:56
| 180,415,060
| 0
| 0
| null | 2022-11-22T03:39:21
| 2019-04-09T17:13:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 552
|
py
|
# Generated by Django 2.1.7 on 2019-04-11 14:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Weather',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('weather', models.TextField()),
('degree', models.CharField(max_length=200)),
],
),
]
|
[
"mitus11112@gmail.com"
] |
mitus11112@gmail.com
|
26435cad8b4f6e3701b33caaa53babbe68372fcd
|
122db49900adae3d25bf6a17db54699086593f94
|
/klimplot_fetch.py
|
7bbb62ddb847248ee839f7ceb18b007e5ac29816
|
[] |
no_license
|
geoenvo/klimplot
|
77a63296ad85b5e1e2a2fa391ab3904d289860ea
|
0c70b350c5dca155211f2e4089b7765f34ef7101
|
refs/heads/master
| 2021-01-10T11:19:06.675854
| 2016-02-16T03:41:42
| 2016-02-16T03:41:42
| 51,801,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
# Klimplot
# Fetch Script
# (c) 2015 Mohammad Fadli
# Geo Enviro Omega, PT
# KlimPlot digunakan untuk memvisualisasikan dan mempublikasikan data Iklim menjadi bentuk Map Services.
# Script ini digunakan untuk me
#HOWTO
# Jalankan di background: python klimplot.py &
# Taruh di cron, atau jalankan sebagai services berkala
# Struktur folder hasil akan menyesuaikan
import os.path
import wget
import subprocess
#define variable
#Sumber Data (Server PIKAM via http)
spath="http://202.90.199.147/ec_prob/results_mat/"
#sfolder="2015.01.01/"
#sfile="control.2015.02_ver_2015.01.01.csv"
#surl=spath+sfolder+sfile
#cpath=os.getcwd()+"/"
#nama folder direktori
datadir="data/"
#path di mana folder data yg difetch akan di simpan
datapath="/home/klimplot/"+datadir
#datapath=cpath+datadir
#filepath=datapath+sfile
#Check Folder data
if not os.path.exists(datapath):
os.makedirs(datapath)
else:
print "\n Directory already exist."
"""
#Check File
if not os.path.exists(filepath):
#Get File
wget.download(surl,datapath)
else:
print "\n File already exist. Download Aborted."
"""
subprocess.call("wget -r -np -nc --cut-dirs=2 -A '*.csv' --ignore-case -nH -P "+datapath+" "+spath, shell=True)
"""
-r recursive download folder
-np no parents directory, tidak mendownload isi dari parent directory
-nH would download all files to the directory d in the current directory
-P you will save to specific directory
-nc, --no-clobber: skip downloads that would download to existing files.
--cut-dirs tidak melihat struktur direktori yang ada di sub folder sebelumnya.
-A '*.csv' hanya download csv file
"""
print "\n Alhamdulillah."
|
[
"mf4dl1@gmail.com"
] |
mf4dl1@gmail.com
|
fabcbe720fa7f9586321ad3d1884bd8c89a35a95
|
fa701904e59a94510a5c4fa3e1e64a8fe4135fd6
|
/mysite/mysite/settings.py
|
887b16718fbb0754f36a99c8116988166e0bd302
|
[] |
no_license
|
pr0mila/django_practice
|
6c4c29987d94d7d838fe6f5378862f266203d97f
|
64ab8181f053d158ed1c5d47d459e8771cc4681a
|
refs/heads/master
| 2020-08-18T09:27:01.351518
| 2019-10-18T21:04:18
| 2019-10-18T21:04:18
| 215,774,432
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&x^9hen^vm3#thtq8(ijj3ld=yj^=l%)hy4tp7e4kt!v8=9-^7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
|
[
"me@promila.info"
] |
me@promila.info
|
d52cdaf5f45af5761f3c5ee1d52779036a589402
|
5ed1fa554c97c8d8b6aa3c72ccf6f107edad9969
|
/Q3 length.py
|
c6acb534975c6d447cf0a2b1c7cfec6c7b3929e5
|
[] |
no_license
|
rufinavgurukul24/Listpy
|
f0cd9119755468fdaf91d7929c3d36d4c6d01335
|
59a6ae91793fbd7bef08a19b73cd8157f02a6ea8
|
refs/heads/main
| 2023-08-11T03:22:15.004725
| 2021-10-01T05:21:04
| 2021-10-01T05:21:04
| 412,337,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
number_1= [50, 40, 23, 70, 56, 12, 5, 10, 7]
print(number_1)
count=0
for i in number_1:
count=count+1
print("lenth of the list is",count)
|
[
"noreply@github.com"
] |
rufinavgurukul24.noreply@github.com
|
6d440df45f5272e972930a42b1a331ba016a59be
|
c38ad398d5909eade726fa1c2849b0cd124ef9b7
|
/rltime/env_wrappers/switching_env_wrapper.py
|
f34a2bed13d750ddcd9154f6f00f40a58290fdd5
|
[
"Apache-2.0"
] |
permissive
|
frederikschubert/rltime
|
612318e9ff8702e6775193b6261ea6a83b1d38fd
|
d1722ffd4cf7b4599655b8d9c64abc243919afc9
|
refs/heads/master
| 2020-09-11T02:30:51.074875
| 2019-12-16T10:44:23
| 2019-12-16T10:44:23
| 221,911,500
| 0
| 0
|
Apache-2.0
| 2019-11-15T11:34:33
| 2019-11-15T11:34:33
| null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
from typing import List
import gym
import numpy as np
from gym import spaces
class SwitchingWrapper(gym.Wrapper):
def __init__(self, env: gym.Env, env_index: int):
super().__init__(env)
self.env_index = env_index
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return (
observation,
reward,
done,
{**info, **{"env_index": self.env_index}},
)
|
[
"frederik.schubert@mailbox.org"
] |
frederik.schubert@mailbox.org
|
21199e34f98bf0e139f0ff9d121b283d51ab5daf
|
ddd80f5cff588f6dcd72da90e90dccac3e545397
|
/projects/cam/camloop.py
|
eb953eab4d56292c11cc71f04143c27bcf81b902
|
[] |
no_license
|
HTaylor7486/projects
|
cd5ade1919f71f5e6f1e48d016505364dc919af5
|
8717c8a12237c095f6b9709242f7be27ac4132d1
|
refs/heads/master
| 2020-05-12T16:36:43.137135
| 2014-12-17T13:13:50
| 2014-12-17T13:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
import picamera,time
def getpic(name="null"):
try:
with picamera.PiCamera() as camera:
q = "n"
while q == "n":
camera.start_preview()
time.sleep(3)
camera.capture("{0}.jpeg".format (name))
camera.stop_preview()
q = input("is the image okay? (y/n) ")
filename = ("{0}.jepg".format (name))
print("Your file is called {0}.jpeg".format (name))
return filename
except picamera.exc.PicameraMMALError:
print("Your camera is not working please connect and restart the program")
def getchar():
name = ""
while name == "":
name = input("what is your name?")
hair = ""
while not hair in ["blonde","brown","ginger","no hair"]:
hair = input ("what hair colour do you have? (blonde/brown/ginger/no hair)")
hat = ""
while not hat in ["y","n"]:
hat = input("do you have a hat? (y/n)")
eye = ""
while not eye in ["green","brown","blue"] :
eye = input("what is your eye colour")
gender = ""
while not gender in ["m","f"] :
gender = input("what is your gender?(m/f)")
fhair = ""
while not fhair in ["y","n"]:
fhair = input("do you have facial hair?(y/n)")
glass = ""
while not glass in ["y","n"]:
glass = input("do you have glasses?(y/n)")
charprof = [name,hair,hat,eye,gender,fhair,glass]
getpic(name)
return charprof
|
[
"htaylor7486@sohamcollege.org.uk"
] |
htaylor7486@sohamcollege.org.uk
|
1be87f33c8660ad3c54efa5eb9f2ada26d9a1e6b
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/_PYTHON/DATA_STRUC_PYTHON_NOTES/python-prac/mini-scripts/python_NumPy_Products_1.txt.py
|
d720b0dffc122f94e2aecfe055e2ab67998d23f1
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
import numpy as np
arr = np.array([1, 2, 3, 4])
x = np.prod(arr)
print(x)
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
67b04f877b89acc6dfc9c7553fba773c72403679
|
5fcefc2dff07daa503c95553ee2273403a57a2e2
|
/catkin_ws/build/my_turtle_whitcomb/catkin_generated/pkg.develspace.context.pc.py
|
a59f8f82da051a204e8bbeeee1c1ea927f6fcba9
|
[] |
no_license
|
StephenWhit/TurtleBot
|
a5d5d6bb3fb082e2afff90b3fe7df005ef650bb8
|
61640d480f69b9f3656fc1cba257a073ed8ef3e5
|
refs/heads/master
| 2020-04-01T02:52:51.074586
| 2018-10-12T19:45:15
| 2018-10-12T19:45:15
| 152,799,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/viki/catkin_ws/devel/include;/home/viki/catkin_ws/src/my_turtle_whitcomb/include".split(';') if "/home/viki/catkin_ws/devel/include;/home/viki/catkin_ws/src/my_turtle_whitcomb/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;roscpp;rospy;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmy_turtle_whitcomb".split(';') if "-lmy_turtle_whitcomb" != "" else []
PROJECT_NAME = "my_turtle_whitcomb"
PROJECT_SPACE_DIR = "/home/viki/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"stephenwhit897@gmail.com"
] |
stephenwhit897@gmail.com
|
0855f982056b6cfbc3bce6a7b99b54ca6eddc19d
|
3e367ddabaecca6b3b932c09a933f1f1c3a42190
|
/Province_data/deal_lasa_data.py
|
8244f9ad0088e2b128acd9d04dba7b2c490400b8
|
[] |
no_license
|
13033266796/Air
|
03c8c231f412888aa72c56a06ae588c675ac57f4
|
d1c1fbe4ea86280cf33d12ce07b5b387699c7a11
|
refs/heads/master
| 2022-12-15T05:28:48.027159
| 2020-05-20T03:51:25
| 2020-05-20T03:51:25
| 248,926,154
| 0
| 0
| null | 2022-12-08T04:28:45
| 2020-03-21T07:16:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 854
|
py
|
import os
import re
with open(r"F:\Python\pyCollect\Air_predict\Provice_data/original/ShangHai_2_year_data.txt","r",encoding="utf-8") as f:
data = f.readline()
line = 0
text = ""
while data:
line += 1
if line == 4:
res = re.findall(r"\S+", text)
with open(r"./original/上海_2year_data.csv","a",encoding="utf-8") as t:
t.write("上海,"+",".join(res)+"\n")
# print("***", text, "***")
text = ""
line = 1
data = data.strip()
text += " " + data
data = f.readline()
# data = data.strip()
# print(data)
# data = f.readline()
# data = data.strip()
# print(data)
# data = f.readline()
# data = data.strip()
# print(data)
# print(text)
# res = re.findall(r"\S+",text)
# print(res)
|
[
"jiechen@webot.co"
] |
jiechen@webot.co
|
94b7cafb4bb13b099c3d093549c75e172d5d9a29
|
6df6c9ff29fe7aed0972f935cd53337d95f6ad40
|
/ecommerce/migrations/0025_auto_20210520_1832.py
|
bf1785363566bde6081088d96b406dd367b45628
|
[] |
no_license
|
brijesh681/rentel_website
|
46acae1ceb586c2bfcc795a519061a52ffad58a2
|
ef205304c6f5f9f6ff981a8244f4237c59bc52bb
|
refs/heads/master
| 2023-06-11T21:39:24.715466
| 2021-07-05T11:44:20
| 2021-07-05T11:44:20
| 383,115,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
# Generated by Django 3.1.4 on 2021-05-20 13:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommerce', '0024_dealsofday'),
]
operations = [
migrations.RenameField(
model_name='offer',
old_name='today_selling_mrp',
new_name='discount_percent',
),
migrations.RemoveField(
model_name='offer',
name='today_discount_percent',
),
migrations.AlterField(
model_name='offer',
name='offer_type',
field=models.CharField(choices=[('Deals Of The Day', 'Deals Of The Day'), ('Festive Special', 'Festive Special'), ('Summer Collection', 'Summer Collection'), ('Winter Collection', 'Winter Collection'), ('As Seen Your Favourite', 'As Seen Your Favourite')], max_length=25),
),
migrations.DeleteModel(
name='DealsOfDay',
),
]
|
[
"80308508+brijesh681@users.noreply.github.com"
] |
80308508+brijesh681@users.noreply.github.com
|
e5121a53283e18ff7eca054f670b9368256d0405
|
d9684e7c80f0bab12497e3a14889ffb45e9a41e0
|
/mlshim/_version.py
|
a4f2e2fa66d2fc39b64d70bc046071977bf62b08
|
[
"MIT"
] |
permissive
|
dapperfu/mlshim
|
df9206daa592e01b50c16ddf7a6a92bd1771a802
|
4a1eea0c5ce8d973ada5609c48a4942033b2fbdc
|
refs/heads/master
| 2022-06-22T03:49:46.436570
| 2022-05-18T15:42:39
| 2022-05-18T15:42:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,580
|
py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "None"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "mlshim/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, str]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(
commands, args, cwd=None, verbose=False, hide_stderr=False, env=None
):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print(f"unable to find command, tried {commands}")
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(
GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True
)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = (
"unable to parse git-describe output: '%s'" % describe_out
)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
full_tag, tag_prefix
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(
GITS, ["rev-list", "HEAD", "--count"], cwd=root
)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(
get_keywords(), cfg.tag_prefix, verbose
)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
[
"engineer@example.org"
] |
engineer@example.org
|
a0bcf1146515c5da0c64441490de32599b91f02e
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/12/01/14.py
|
fd0cb48a6c9d059526138c98e8ba82d309f6802b
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
#!/usr/bin/python2
### Google Code Jam template
# Futures
from __future__ import division
from __future__ import with_statement
from __future__ import print_function
## Library
# @memoized
def memoized(func):
mem = {}
def wrapped(*args):
if args not in mem:
mem[args] = func(*args)
return mem[args]
return wrapped
## Setup
from os.path import basename, splitext
# Task letter
TASK=splitext(basename(__file__))[0]
print("Task {}".format(TASK))
## Input templates
# Line as int
#int(infile.readline())
# Line as many ints
#(int(s) for s in infile.readline().split())
## Precalculation
print("Precalculation...")
from string import maketrans
src = """aozq
ejp mysljylc kd kxveddknmc re jsicpdrysi
rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd
de kr kd eoya kw aej tysr re ujdr lkgc jv"""
dst = """yeqz
our language is impossible to understand
there are twenty six factorial possibilities
so it is okay if you want to just give up"""
table = maketrans(src, dst)
print("Precalculation done.")
## Calculation
print("Calculation...")
with open(TASK+".in") as infile:
with open(TASK+".out",mode="wt") as outfile:
cases = int(infile.readline())
for ncase in range(cases):
print("Case #{nc}".format(nc=ncase+1))
# Perform all nessesary calculation
text = infile.readline().strip()
data = text.translate(table)
outfile.write("Case #{nc}: {data}\n".format(nc=ncase+1,data=data))
print("Calculation done.")
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
a11539354e10e7fa486f5db387c8ccfbc7df1177
|
9ae4e7db77c3437331aeb95bfb58e6ab7762b3bd
|
/reside/wsgi.py
|
a957ccf1c84796bfbddab75e8747d1f4dce9e934
|
[] |
no_license
|
ShijuKAbraham/RESIDE
|
7ff190a5110f18a18805a2a636b30d7999309624
|
b67024aab94c3f218dc9bc03f727db8fde68fa9e
|
refs/heads/master
| 2022-01-23T13:12:38.499092
| 2019-07-18T17:07:44
| 2019-07-18T17:07:44
| 197,243,928
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for reside project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reside.settings')
application = get_wsgi_application()
|
[
"k.ashiju10@gmail.com"
] |
k.ashiju10@gmail.com
|
9e656061375e6a5225c6de0e83fae8c1cdc6db7f
|
b3e0d941ac23992e3a80edb1df5bd53e856ce5d5
|
/docs/source/conf.py
|
46b7f0f9c95cd64808e625b4e341ff0415384b63
|
[
"MIT"
] |
permissive
|
tjryankeogh/phytophotoutils
|
6f5c33b4dda06c66f07bd45d5c27336c6304e9b0
|
48c1747bca837f1d4c73ff48d7c232840eca8352
|
refs/heads/master
| 2023-04-18T00:00:50.236233
| 2021-04-12T13:51:58
| 2021-04-12T13:51:58
| 364,512,652
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
#import sphinx_rtd_theme
#sys.path.insert(0, os.path.abspath('../../phyto_photo_utils/'))
sys.path.insert(0, os.path.realpath('../..'))
# -- Project information -----------------------------------------------------
project = 'PhytoPhotoUtils'
copyright = '2019, Thomas Ryan-Keogh, Charlotte Robinson'
author = 'Thomas Ryan-Keogh, Charlotte Robinson'
# The short X.Y version
version = '1.0'
# The full version, including alpha/beta/rc tags
release = '01-10-2019'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.coverage', 'sphinx.ext.napoleon', 'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
|
[
"tjryankeogh@googlemail.com"
] |
tjryankeogh@googlemail.com
|
5efd766bb70d94a197cb80cb858d7211c005cb27
|
4de2b914e4607dd0ca7eec60b21026af6b6c4797
|
/Old_work/valdambrini_cheli_papallo_tarmati/catkin_ws/build/navigation/clear_costmap_recovery/catkin_generated/pkg.develspace.context.pc.py
|
cb8deb76dfb119ed5c90cb0df8ac2a426a6fc434
|
[] |
no_license
|
ABiondi12/project_sgn
|
5203d21f2753dcdf7c53b153324dc75bc1221549
|
570b7be0b01e7c83cb927945e532d6a2213ebf65
|
refs/heads/main
| 2023-06-18T12:59:18.337096
| 2021-07-21T10:27:08
| 2021-07-21T10:27:08
| 307,121,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/robot/catkin_ws/src/navigation/map_server/include".split(';') if "/home/robot/catkin_ws/src/navigation/map_server/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nav_msgs;tf2".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmap_server_image_loader".split(';') if "-lmap_server_image_loader" != "" else []
PROJECT_NAME = "map_server"
PROJECT_SPACE_DIR = "/home/robot/catkin_ws/devel"
PROJECT_VERSION = "1.16.2"
|
[
"petracci.francesco@gmail.com"
] |
petracci.francesco@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.