blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ea78325d350912216e46f960d2a74c62a66ac05 | c54d598652b1270ab134a0bc342755d0506b445a | /replace.py | 7cbd20dca4c6ee11f6b49df25f54e82a879def55 | [] | no_license | nendek/war | 0426cf3771630bb56cf8c8c3041cb83dd9daa753 | ba3ebe0c6706424eeab7576eff9f28f718c3ffac | refs/heads/master | 2020-12-06T00:19:24.122754 | 2020-01-07T09:21:23 | 2020-01-07T09:21:23 | 232,287,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,454 | py | from get_vals import \
memcpy_addr, bis_size, loader_size, main_end, main_start, offset_1, offset_2, offset_3, offset_4, offset_5, offset_6, in_pestilence, in_pestilence2, offset_rip, call_1, call_2, call_3, call_4, call_5, hook_1, hook_2, hook_3, hook_4, hook_5, \
full_size, payload_size, \
exit_1, exit_2, exit_3, exit_4, exit_5, end_ft_end, bis_end, jmpr15, key_addr, offset_pos_rdi, offset_key_loader, pos_neg_bis, addr_index, fingerprint_bis, begin_text
def open_file(name):
f = open(name, "r")
content = f.readlines()
f.close()
f = open(name, "w")
return f, content
f, content = open_file("includes/war.h")
for i in range(0, len(content)):
if content[i].find("FT_MEMCPY_ADDR") == 9:
content[i] = content[i][0:24] + memcpy_addr + content[i][30:-1] + "\n"
if content[i].find("BIS_SIZE") == 9:
content[i] = content[i][0:18] + bis_size + content[i][23:-1] + "\n"
if content[i].find("LOADER_SIZE") == 9:
content[i] = content[i][0:21] + loader_size + content[i][25:-1] + "\n"
if content[i].find("PAYLOAD_SIZE") != -1:
content[i] = content[i][0:22] + main_end + content[i][28:-1] + "\n"
if content[i].find("MAIN_OFFSET") != -1:
content[i] = content[i][0:21] + main_start + content[i][27:-1] + "\n"
if content[i].find("MAIN_SIZE") != -1:
content[i] = content[i][0:19] + main_end + " - " + main_start + content[i][34:-1] + "\n"
if content[i].find("OFFSET_1") != -1:
content[i] = content[i][0:18] + offset_1 + content[i][24:-1] + "\n"
if content[i].find("OFFSET_2") != -1:
content[i] = content[i][0:18] + offset_2 + content[i][24:-1] + "\n"
if content[i].find("OFFSET_3") != -1:
content[i] = content[i][0:18] + offset_3 + content[i][24:-1] + "\n"
if content[i].find("OFFSET_4") != -1:
content[i] = content[i][0:18] + offset_4 + content[i][24:-1] + "\n"
if content[i].find("OFFSET_5") != -1:
content[i] = content[i][0:18] + offset_5 + content[i][24:-1] + "\n"
if content[i].find("OFFSET_6") != -1:
content[i] = content[i][0:18] + offset_6 + content[i][24:-1] + "\n"
if content[i].find("OFFSET_7") != -1:
content[i] = content[i][0:18] + in_pestilence + content[i][24:-1] + "\n"
if content[i].find("OFFSET_8") != -1:
content[i] = content[i][0:18] + in_pestilence2 + content[i][24:-1] + "\n"
if content[i].find("OFFSET_RIP") != -1:
content[i] = content[i][0:20] + offset_rip + content[i][26:-1] + "\n"
if content[i].find("OFFSET_CALL_1") != -1:
content[i] = content[i][0:23] + call_1 + content[i][29:-1] + "\n"
if content[i].find("OFFSET_CALL_2") != -1:
content[i] = content[i][0:23] + call_2 + content[i][29:-1] + "\n"
if content[i].find("OFFSET_CALL_3") != -1:
content[i] = content[i][0:23] + call_3 + content[i][29:-1] + "\n"
if content[i].find("OFFSET_CALL_4") != -1:
content[i] = content[i][0:23] + call_4 + content[i][29:-1] + "\n"
if content[i].find("OFFSET_CALL_5") != -1:
content[i] = content[i][0:23] + call_5 + content[i][29:-1] + "\n"
if content[i].find("OFFSET_HOOK_1") != -1:
content[i] = content[i][0:23] + hook_1 + content[i][29:-1] + "\n"
if content[i].find("OFFSET_HOOK_2") != -1:
content[i] = content[i][0:23] + hook_2 + content[i][29:-1] + "\n"
if content[i].find("OFFSET_HOOK_3") != -1:
content[i] = content[i][0:23] + hook_3 + content[i][29:-1] + "\n"
if content[i].find("OFFSET_HOOK_4") != -1:
content[i] = content[i][0:23] + hook_4 + content[i][29:-1] + "\n"
if content[i].find("OFFSET_HOOK_5") != -1:
content[i] = content[i][0:23] + hook_5 + content[i][29:-1] + "\n"
f.write(content[i])
f.close()
f, content = open_file("srcs_s/loader.s")
for i in range(0, len(content)):
if content[i].find("|REPLACE1|") != -1:
place = content[i].find("|REPLACE1|") - 7
content[i] = content[i][0:place] + hex(int(full_size, 16) + 0x1000) + content[i][place + 6:]
if content[i].find("|REPLACE2|") != -1:
place = content[i].find("|REPLACE2|") - 6
content[i] = content[i][0:place] + hex(int(bis_size, 16)) + content[i][place + 5:]
if content[i].find("|REPLACE3|") != -1:
place = content[i].find("|REPLACE3|") - 5
content[i] = content[i][0:place] + offset_key_loader + content[i][place + 4:]
if content[i].find("|REPLACE4|") != -1:
place = content[i].find("|REPLACE4|") - 5
content[i] = content[i][0:place] + hex(int(offset_key_loader, 16) + 4) + content[i][place + 4:]
f.write(content[i])
f.close()
f, content = open_file("srcs_s/bis.s")
for i in range(0, len(content)):
if content[i].find("|REPLACE2|") != -1:
place = content[i].find("|REPLACE2|") - 7
content[i] = content[i][0:place] + payload_size + content[i][place + 6:]
if content[i].find("|REPLACE3|") != -1:
place = content[i].find("|REPLACE3|") - 6
content[i] = content[i][0:place] + key_addr + content[i][place + 5:]
if content[i].find("|REPLACE4|") != -1:
place = content[i].find("|REPLACE4|") - 6
content[i] = content[i][0:place] + hex(int(key_addr, 16) + 4) + content[i][place + 5:]
f.write(content[i])
f.close()
f, content = open_file("srcs_c/crypto.c")
for i in range(0, len(content)):
if content[i].find("/*D*/") != -1:
place = content[i].find("/*D*/") + 5
content[i] = content[i][0:place] + fingerprint_bis + content[i][content[i].find("/*D`*/"):]
if content[i].find("/*C*/") != -1:
place = content[i].find("/*C*/") + 5
content[i] = content[i][0:place] + key_addr + content[i][content[i].find("/*C`*/"):]
if content[i].find("/*B*/") != -1:
place = content[i].find("/*B*/") + 5
content[i] = content[i][0:place] + jmpr15 + content[i][content[i].find("/*B`*/"):]
if content[i].find("/*G*/") != -1:
place = content[i].find("/*G*/") + 5
content[i] = content[i][0:place] + offset_key_loader + content[i][content[i].find("/*G`*/"):]
if content[i].find("/*G2*/") != -1:
place = content[i].find("/*G2*/") + 6
content[i] = content[i][0:place] + hex(int(offset_key_loader, 16) + 4) + content[i][content[i].find("/*G2`*/"):]
if content[i].find("/*H*/") != -1:
place = content[i].find("/*H*/") + 5
content[i] = content[i][0:place] + key_addr + content[i][content[i].find("/*H`*/"):]
if content[i].find("/*H2*/") != -1:
place = content[i].find("/*H2*/") + 6
content[i] = content[i][0:place] + hex(int(key_addr, 16) + 4) + content[i][content[i].find("/*H2`*/"):]
f.write(content[i])
f.close()
f, content = open_file("srcs_c/check_ownfile.c")
for i in range(0, len(content)):
if content[i].find("/*I*/") != -1:
place = content[i].find("/*I*/") + 5
content[i] = content[i][0:place] + pos_neg_bis + content[i][content[i].find("/*I`*/"):]
if content[i].find("/*J*/") != -1:
place = content[i].find("/*J*/") + 5
content[i] = content[i][0:place] + addr_index + content[i][content[i].find("/*J`*/"):]
f.write(content[i])
f.close()
f, content = open_file("srcs_c/patch.c")
for i in range(0, len(content)):
if content[i].find("//REPLACE1") != -1:
place = content[i].find("-=") + 3
content[i] = content[i][0:place] + hex(bis_end - exit_1) + content[i][content[i].find(";//"):]
if content[i].find("//REPLACE2") != -1:
place = content[i].find("-=") + 3
content[i] = content[i][0:place] + hex(bis_end - exit_2) + content[i][content[i].find(";//"):]
if content[i].find("//REPLACE3") != -1:
place = content[i].find("-=") + 3
content[i] = content[i][0:place] + hex(bis_end - exit_3) + content[i][content[i].find(";//"):]
if content[i].find("//REPLACE4") != -1:
place = content[i].find("-=") + 3
content[i] = content[i][0:place] + hex(bis_end - exit_4) + content[i][content[i].find(";//"):]
if content[i].find("//REPLACE5") != -1:
place = content[i].find("-=") + 3
content[i] = content[i][0:place] + hex(bis_end - exit_5) + content[i][content[i].find(";//"):]
if content[i].find("/*REPLACE1*/") != -1:
place = content[i].find("0x")
content[i] = content[i][0:place] + hex(bis_end - exit_1) + content[i][content[i].find("/*"):]
if content[i].find("/*REPLACE2*/") != -1:
place = content[i].find("0x")
content[i] = content[i][0:place] + hex(bis_end - exit_2) + content[i][content[i].find("/*"):]
if content[i].find("/*REPLACE3*/") != -1:
place = content[i].find("0x")
content[i] = content[i][0:place] + hex(bis_end - exit_3) + content[i][content[i].find("/*"):]
if content[i].find("/*REPLACE4*/") != -1:
place = content[i].find("0x")
content[i] = content[i][0:place] + hex(bis_end - exit_4) + content[i][content[i].find("/*"):]
if content[i].find("/*REPLACE5*/") != -1:
place = content[i].find("0x")
content[i] = content[i][0:place] + hex(bis_end - exit_5) + content[i][content[i].find("/*"):]
if content[i].find("/*A*/") != -1:
place = content[i].find("/*A*/") + 5
content[i] = content[i][0:place] + end_ft_end + content[i][content[i].find("/*A`*/"):]
if content[i].find("/*E*/") != -1:
place = content[i].find("/*E*/") + 5
content[i] = content[i][0:place] + offset_pos_rdi + content[i][content[i].find("/*E`*/"):]
if content[i].find("/*F*/") != -1:
place = content[i].find("/*F*/") + 5
content[i] = content[i][0:place] + begin_text + content[i][content[i].find("/*F`*/"):]
f.write(content[i])
f.close()
| [
"p.nardozi@gmail.com"
] | p.nardozi@gmail.com |
8521b46d423a4699a9b0209ffcdcbbb364cbe453 | abebd304a9bc3ff1b90db09eba66c003f51a74d5 | /sug/preprocess/dump_pinyin_weight.py | fe42a90aeb11b3365e0eb4b3b7cd656d851913df | [] | no_license | jiakechong1991/search | 817715c58a1b117d177a7b49f443cb2411ee3c6f | 86c644e9d26f2eba25d4cf50821ffcc8e14e7953 | refs/heads/master | 2021-04-15T17:42:01.074581 | 2018-04-10T14:04:17 | 2018-04-10T14:04:17 | 126,668,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,860 | py | # coding: utf8
import argparse
import codecs
from collections import defaultdict
import copy
import json
import logging
from config.conf import MIN_WORD_LEN, MAX_WORD_LEN, MAX_MIX_NUM, LOGGING_FORMAT
from utils.common import get_row_num
from utils.pinyin_generator import PinyinGenerator
def dump_pinyin_weight(file_in, file_ot, args):
wf = codecs.open(file_ot, 'w', encoding='utf8')
row_num = get_row_num(file_in)
with codecs.open(file_in, encoding='utf8') as f:
for line_no, line in enumerate(f):
if line_no % 10000 == 0:
logging.info('finished: %s/%s', line_no, row_num)
try:
row = json.loads(line)
except Exception, e:
print e
print line_no, line
raise Exception(e)
word = row['input']
generator = PinyinGenerator(word)
lst_pinyin_weight = []
try:
if args.FULL_PINYIN:
new_weight = row['weight'] - 1
lst_pinyin_weight.append((new_weight, [''.join(x) for x in generator.pinyins]))
if args.FIRST_LETTER:
new_weight = row['weight'] - 2
lst_pinyin_weight.append((new_weight, [''.join(x) for x in generator.first_letters]))
if args.INITIAL:
new_weight = row['weight'] - 3
lst_pinyin_weight.append((new_weight, [''.join(x) for x in generator.initials]))
if args.FUZZY_PINYIN:
new_weight = row['weight'] / 2
lst_pinyin_weight.append((new_weight, [''.join(x) for x in generator.fuzzy_pinyins]))
all_pinyins = set()
for weight, pinyins in lst_pinyin_weight:
all_pinyins |= set(pinyins)
if len(all_pinyins) < MAX_MIX_NUM and len(word) < 6:
if args.MIX_PINYIN_WITH_CHINESE:
new_weight = row['weight'] / 2 - 100
lst_pinyin_weight.append(
(new_weight, [''.join(x) for x in generator.mix_pinyins_with_chinese]))
elif args.MIX_PINYIN:
new_weight = row['weight'] / 2 - 100
lst_pinyin_weight.append((new_weight, [''.join(x) for x in generator.mix_pinyins]))
except Exception, e:
print line_no, line
raise Exception(e)
new_input_weight = defaultdict(int)
for weight, pinyins in lst_pinyin_weight:
for pinyin in pinyins:
if weight > new_input_weight[pinyin]:
new_input_weight[pinyin] = weight
for _input, weight in new_input_weight.items():
if len(_input) <= MAX_WORD_LEN and len(_input) >= MIN_WORD_LEN:
new_row = copy.deepcopy(row)
new_row['input'] = _input
new_row['weight'] = weight
wf.write(json.dumps(new_row, sort_keys=True, ensure_ascii=False) + '\n')
wf.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT)
parser = argparse.ArgumentParser()
parser.add_argument('--file_in', required=True)
parser.add_argument('--file_ot', required=True)
parser.add_argument('--FULL_PINYIN', action='store_true')
parser.add_argument('--FIRST_LETTER', action='store_true')
parser.add_argument('--INITIAL', action='store_true')
parser.add_argument('--FUZZY_PINYIN', action='store_true')
parser.add_argument('--MIX_PINYIN', action='store_true')
parser.add_argument('--MIX_PINYIN_WITH_CHINESE', action='store_true')
args = parser.parse_args()
dump_pinyin_weight(args.file_in, args.file_ot, args)
| [
"wangxiaoke@xuetangx.com"
] | wangxiaoke@xuetangx.com |
908fdaa7f393a77d34b107b9c79ab0e2f18cfe51 | d29e681adefca59c8b7f05e2f24303a96dd4825b | /src/service.py | 4efdeb060735d6287694812e9c9bc9e5862aa64b | [
"MIT"
] | permissive | MetuMobile/WeatherService | ebd77ce22e3e921651ee5cf36934a5efd0a41710 | 7be637e047d0b96fd14a6542f7bd2f4eb7cf468e | refs/heads/master | 2021-01-20T00:47:44.493038 | 2017-05-12T16:37:26 | 2017-05-12T16:37:26 | 89,190,766 | 0 | 1 | null | 2017-04-30T09:27:17 | 2017-04-24T02:45:47 | Python | UTF-8 | Python | false | false | 1,170 | py | from flask import Flask, jsonify#, request, render_template
import sys
from Config import Config
from Weather import Weather
class WeatherService:
serviceName = "weather"
def __init__(self):
self._initializeService()
self.app.add_url_rule('/summary', '', self.getDailyWeather)
self._runService()
def getDailyWeather(self):
return jsonify(DailyForecast=Weather().getDaily())
def _addLogger(self):
import logging
handler = logging.FileHandler(Config.loggerPath) # errors logged to this file
handler.setLevel(logging.ERROR) # only log errors and above
self.app.logger.addHandler(handler) # attach the handler to the app's logger
def _runService(self):
self.app.run(debug=Config.debug, host='0.0.0.0', port=Config.services[self.serviceName]['port'], threaded=True)
print(str(self.serviceName) + " service is started.")
def _initializeService(self):
sys.stdout.flush()
self.app = Flask(__name__)
self._addLogger()
self.app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
if __name__ == "__main__":
service = WeatherService()
| [
"borakrc@gmail.com"
] | borakrc@gmail.com |
7810385ef7e3c7c1b931680d6a30cd82e4f10a47 | d9eef8dd3489682c8db41f2311e3058d1f369780 | /.history/abel-network-files/mcmc_alg_implementation_own_two_20180704115025.py | 971d63b8aa2ad664cc1c93d9e62cd650d73a9178 | [] | no_license | McKenzie-Lamb/Gerrymandering | 93fe4a49fe39a0b307ed341e46ba8620ea1225be | b7a7c4129d6b0fcd760ba8952de51eafa701eac3 | refs/heads/master | 2021-01-25T06:06:43.824339 | 2018-10-16T14:27:01 | 2018-10-16T14:27:01 | 93,526,515 | 0 | 0 | null | 2018-07-12T19:07:35 | 2017-06-06T14:17:47 | Python | UTF-8 | Python | false | false | 10,917 | py | # Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import random
import math
import numpy as np
import graph_tool.all as gt
import time
from pathlib import Path
def create_graph_views(district_total_no):
graph_views = list()
for i in range(district_total_no):
graph_view = gt.GraphView(graph)
graph_view_check = graph_view.new_vertex_property("bool")
matched_vertices = gt.find_vertex(graph, district_no, i)
for j in matched_vertices:
graph_view_check[j] = True
graph_view = gt.GraphView(graph_view, vfilt=graph_view_check)
graph_view.vp.valid = graph_view_check
graph_views.append(graph_view)
return graph_views
def turn_off_edges(districts_graphs):
turned_off_graphs = list()
# Iterate through districts and selects random edges
for district in range(len(districts_graphs)):
to_delete = districts_graphs[district].new_edge_property('bool')
edges = districts_graphs[district].get_edges()
selected = edges[np.random.randint(
edges.shape[0], size=len(edges)//2), :] # Here is the prob for edge turn off
for i in selected:
to_delete[i] = True
turned_off_graphs.append(gt.GraphView(
districts_graphs[district], efilt=to_delete))
return turned_off_graphs
def get_cp_boundaries(graph, turned_on_graphs):
cp_boundary = list()
for g in range(len(turned_on_graphs)):
cp_label, hist = gt.label_components(turned_on_graphs[g])
labels = set(cp_label.a)
for l in labels:
cp = gt.find_vertex(turned_on_graphs[g], cp_label, l)
label_boun = 0
for v in cp:
vertex_bound = False
for n in graph.vertex(v).all_neighbors():
for g_two in range(len(turned_on_graphs)):
if g == g_two:
continue
try:
turned_on_graphs[g_two].vertex(n)
except ValueError:
continue
else:
graph.vp.nd[graph.vertex(v)] = g_two
graph.vp.cd[graph.vertex(v)] = g
vertex_bound = True
break
if vertex_bound == True:
label_boun += 1
break
if label_boun == len(cp):
cp_boundary.append(cp)
return cp_boundary
def get_non_adjacent_v(labels_in_boundaries, graph):
list_to_swap = random.sample(
labels_in_boundaries, random.randint(2, len(labels_in_boundaries))) # Prob for choosing from boundaries
index_to_del = set()
for l in range(len(list_to_swap)):
for v in range(len(list_to_swap[l])):
for l_two in range(len(list_to_swap)):
if l == l_two:
continue
for v_two in range(len(list_to_swap[l_two])):
if len(gt.shortest_path(graph, graph.vertex(list_to_swap[l][v]), graph.vertex(list_to_swap[l_two][v_two]))[0]) < 3:
index_to_del.add(l_two)
for i in range(len(list_to_swap)):
if i in index_to_del:
try:
del list_to_swap[i]
except IndexError:
continue
return list_to_swap
def gather_districts_data(districts_graphs):
for i in range(len(districts_graphs)):
population = districts_graphs[i].new_graph_property('int')
districts_graphs[i].graph_properties["pop"] = population
districts_graphs[i].graph_properties["pop"] = 0
dem_vote = districts_graphs[i].new_graph_property('int')
districts_graphs[i].graph_properties["dem_vote"] = dem_vote
districts_graphs[i].graph_properties["dem_vote"] = 0
rep_vote = districts_graphs[i].new_graph_property('int')
districts_graphs[i].graph_properties["rep_vote"] = rep_vote
districts_graphs[i].graph_properties["rep_vote"] = 0
for v in districts_graphs[i].vertices():
districts_graphs[i].graph_properties["pop"] += graph.vp.data[v]["PERSONS"]
districts_graphs[i].graph_properties["dem_vote"] += graph.vp.data[v]["CONDEM14"]
districts_graphs[i].graph_properties["rep_vote"] += graph.vp.data[v]["CONREP14"]
return districts_graphs
def random_color():
r = random.randint(0, 256)
g = random.randint(0, 256)
b = random.randint(0, 256)
a = 1
color_to_return = [r, g, b, a]
index_to_zero = random.randint(0, 3)
color_to_return[index_to_zero] = 0
return color_to_return
def adjust_color(districts_graphs, color, ring_color, niter_type = 'first', ring_colors_dict = None):
if niter_type == 'nonfirst':
for i in range(len(districts_graphs)):
if districts_graphs[i].graph_properties["dem_vote"] > districts_graphs[i].graph_properties["rep_vote"]:
color_ = (0, 0, 255, 1)
else:
color_ = (255, 0, 0, 1)
for v in districts_graphs[i].vertices():
color[v] = color_
ring_color[v] = ring_colors_dict[i]
return color, ring_color
else:
ring_colors_dict = dict()
for i in range(len(districts_graphs)):
ring_colors_dict[i] = random_color().copy()
if districts_graphs[i].graph_properties["dem_vote"] > districts_graphs[i].graph_properties["rep_vote"]:
color_ = (0, 0, 255, 1)
else:
color_ = (255, 0, 0, 1)
for v in districts_graphs[i].vertices():
color[v] = color_
ring_color[v] = ring_colors_dict[i]
return color, ring_color, ring_colors_dict
def propose_swap(districts_graphs, proposed_components, graph, labels_in_boundaries):
changes = dict()
vertex_to_add = dict()
vertex_to_delete = dict()
for i in range(len(districts_graphs)):
changes[i] = [districts_graphs[i].graph_properties["pop"],
districts_graphs[i].graph_properties["rep_vote"],
districts_graphs[i].graph_properties["dem_vote"]]
vertex_to_add[i] = []
vertex_to_delete[i] = []
for c in proposed_components:
added_pop = 0
added_rep = 0
added_dem = 0
n_dindex = 0
c_dindex = 0
for v in range(len(c)):
added_pop += graph.vp.data[c[v]]['PERSONS']
added_rep += graph.vp.data[c[v]]['CONREP14']
added_dem += graph.vp.data[c[v]]['CONDEM14']
n_dindex = graph.vp.nd[c[v]]
c_dindex = graph.vp.cd[c[v]]
vertex_to_add[n_dindex].append(c[v])
vertex_to_delete[c_dindex].append(c[v])
changes[n_dindex][0] += added_pop
changes[n_dindex][1] += added_rep
changes[n_dindex][2] += added_dem
changes[c_dindex][0] -= added_pop
changes[c_dindex][1] -= added_rep
changes[c_dindex][2] -= added_dem
similar_pop = True
for i in changes.keys():
if i == 0:
continue
similar_pop = math.isclose(changes[i][0], changes[i-1][0], rel_tol=0.50) # Here is the population difference
if similar_pop == True:
contiguos = True
for i in changes.keys():
previous_state = districts_graphs[i].copy()
previous_state.graph_properties["pop"] = changes[i][0]
previous_state.graph_properties["rep_vote"] = changes[i][1]
previous_state.graph_properties["dem_vote"] = changes[i][2]
for j in vertex_to_add[i]:
if len(vertex_to_add[i]) == 0:
break
previous_state.vp.valid[j] = True
for j in vertex_to_delete[i]:
if len(vertex_to_delete[i]) == 0:
break
previous_state.vp.valid[j] = False
comp, hist = gt.label_components(previous_state)
if np.sum(comp.a) != 0:
districts_graphs[i] = previous_state
contiguos == False
break
if contiguos == True:
return districts_graphs
else:
selected_vertices = get_non_adjacent_v(labels_in_boundaries, graph)
propose_swap(districts_graphs, selected_vertices, graph, labels_in_boundaries)
else:
selected_vertices = get_non_adjacent_v(labels_in_boundaries, graph)
propose_swap(districts_graphs, selected_vertices, graph, labels_in_boundaries)
# Paths
main_folder = Path("abel-network-files/")
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph100.gt"))
color = graph.new_vertex_property("vector<double>")
ring_color = graph.new_vertex_property("vector<double>")
cp_label = graph.new_vertex_property("int")
neighbor_district = graph.new_vertex_property('int')
current_district = graph.new_vertex_property('int')
graph.vp.nd = neighbor_district
graph.vp.cd = current_district
# Init variables
district_total_no = 2
# Separates graph into blocks
districts = gt.minimize_blockmodel_dl(
graph, district_total_no, district_total_no)
district_no = districts.get_blocks()
# Create the different graphs
districts_graphs = create_graph_views(district_total_no)
# Initialize data and draw first image
districts_graphs = gather_districts_data(districts_graphs)
color, ring_color, ring_colors_dict = adjust_color(districts_graphs, color, ring_color)
gt.graph_draw(graph, vertex_fill_color = color, vertex_color = ring_color,
output = str(main_folder / 'tmp.png'), bg_color=(255, 255, 255, 1), pos=graph.vp.pos)
# Actual function calling part of algorithm
for i in range(10):
turned_on_graphs = turn_off_edges(districts_graphs)
labels_in_boundaries = get_cp_boundaries(graph, turned_on_graphs)
selected_vertices = get_non_adjacent_v(labels_in_boundaries, graph)
districts_graphs = propose_swap(districts_graphs, selected_vertices, graph, labels_in_boundaries)
for j in districts_graphs:
comp, hist = gt.label_components(j)
print(np.sum(comp.a))
print('------------',i,'------------')
color, ring_color = adjust_color(districts_graphs, color, ring_color, niter_type = 'nonfirst', ring_colors_dict = ring_colors_dict)
gt.graph_draw(graph, vertex_fill_color = color, vertex_color = ring_color,
output = str(main_folder / ('tmp'+str(i)+'.png')), bg_color=(255, 255, 255, 1), pos=graph.vp.pos)
| [
"gonzaleza@ripon.edu"
] | gonzaleza@ripon.edu |
c8904d327b1057cf99c8f56b6a9a66f8aecad6f6 | 02e26e2cbea74a97b56262de031563c8bfb3f468 | /Projects/todo/tasks/migrations/0001_initial.py | 0f8b45c9371344c24ce1c7ec6c02198aff212578 | [] | no_license | LalityaSawant/Python | f2845ecbe83b9c4dbd987c6d4e6fa5352bf54c3c | 7377499629883311029a94e14058599ac166c318 | refs/heads/master | 2022-12-12T06:17:00.311059 | 2020-08-30T06:24:12 | 2020-08-30T06:24:12 | 288,286,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | # Generated by Django 3.0.7 on 2020-06-28 07:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"lalitya.sawant@gmail.com"
] | lalitya.sawant@gmail.com |
6098a8d7522753840f023d7974708f6d2e0e7675 | a4cb825776922fb9030b41d072165d2345538c60 | /Graph.py | 64713cff960d43b699ccc3d8a9efce829fae1253 | [] | no_license | chaiwat-boom/SCMA_247_Homework | 8b4fde7accad5857572805b7e381cb73ae198974 | 6eaa6931fabdf2f74ec2fe5e49bc9c4e94d2f066 | refs/heads/main | 2023-04-23T00:49:46.159223 | 2021-05-15T08:24:51 | 2021-05-15T08:24:51 | 367,565,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,186 | py | #!/usr/bin/env python
# coding: utf-8
# # Graph
# Chaiwat Khampamuan 6105005
# In[1]:
#Represent Graph in Adjacency matrix
class GraphAdjMatrix:
def __init__(self,n):
self.order = n;
self.adj = [[0]*n for i in range(n)];
def display(self):
print("ADJMatrix","order :",self.order,"\n");
s = "";
for v in range(len(self.adj)): s += str(v) + "\t";
print("\t",s,"\n");
for i,a in enumerate(self.adj):
s = "";
for e in a: s += str(e) + "\t";
print(i,"\t",s);
print("\n");
def __validate_input(self,v):
if(v < 0 or v > len(self.adj)):
return "Invalid vertex";
def getOrders(self): return self.order;
def addVertices(self,n=1):
if( n < 0): return self.____validate_input(n);
order = self.order; n += order;
mat = [[0]*n for i in range(n)];
for i in range(order):
for j in range(order):
mat[i][j] = self.adj[i][j];
self.order = n; self.adj = mat;
def removeVertex(self,v):
e = self.__validate_input(v);
if(e): return e;
self.order -= 1;
for i in range(len(self.adj)):
self.adj[i][v] = None;
for j in range(len(self.adj)):
self.adj[v][j] = None;
def addArc(self,v1,v2):
e = self.__validate_input(v1);
if(not e): e = self.__validate_input(v2);
if(e): return e;
if(v1 == v2): return "please use different vertex"
else: self.adj[v1][v2] = 1;
def removeArc(self,v1,v2):
e = self.__validate_input(v1);
if(not e): e = self.__validate_input(v2);
if(e): return e;
if(v1 == v2): return "please use different vertex"
else: self.adj[v1][v2] = 0;
def addEdge(self,v1,v2):
e = self.addArc(v1,v2);
if(e): return e
self.addArc(v2,v1);
def removeEdge(self,v1,v2):
e = self.removeArc(v1,v2);
if(e): return e
self.removeArc(v2,v1);
def nbhs(self,v):
e = self.__validate_input(v);
if(e): return e;
a = [];
for c in range(len(self.adj)):
if(self.adj[v][c] == 1): a.append(c);
return None if a == [] else a;
M = GraphAdjMatrix(4);
# calling methods
M.addEdge(0, 1);
M.addEdge(0, 2);
M.addEdge(1, 2);
M.addEdge(2, 3);
# the adjacency matrix created
M.display();
# adding a vertex to the graph
M.addVertices();
# connecting that verex to other existing vertices
M.addEdge(4, 1);
M.addEdge(4, 3);
# the adjacency matrix with a new vertex
M.display();
# removing an existing vertex in the graph
M.removeVertex(1);
# the adjacency matrix after removing a vertex
M.display();
# In[2]:
#Represent Graph in Adjacency list
class GraphAdjList:
def __init__(self,n):
self.order = n;
self.adj = [None] * n;
def getOrders(self): return self.order;
def __validate_input(self,v):
if(v < 0 or v > len(self.adj)): return "Invalid vertex"
def addVertices(self,n = 1):
if(n < 0): return self.__validate_input(n);
for i in range(n): self.adj.append(None);
self.order += n;
def addArc(self,v1,v2):
err = self.__validate_input(v1);
if(not err): err = self.__validate_input(v2);
if(err): return err;
if(self.adj[v1] == None): self.adj[v1] = [];
elif(v2 in self.adj[v1]): return;
self.adj[v1].append(v2);
def removeArc(self,v1,v2):
err = self.__validate_input(v1);
if(not err): err = self.__validate_input(v2);
if(err): return err;
if(self.adj[v1] != None and v2 in self.adj[v1]):
self.adj[v1].remove(v2);
if(self.adj[v1] == []): self.adj[v1] = None;
def addEdge(self,v1,v2):
err = self.addArc(v1,v2)
if(err): return err;
self.addArc(v2,v1)
def removeEdge(self,v1,v2):
err = self.removeArc(v1,v2)
if(err): return err;
self.removeArc(v2,v1)
def removeVertex(self,v):
if(v < 0): return self.__validate_input(v);
self.adj[v] = None; self.order -= 1;
for i in range(len(self.adj)):
if(self.adj[i] != None and v in self.adj[i]):
self.adj[i].remove(v);
def nbhs(self,v):
err = self.__validate_input(v);
if(err): return err;
else : return self.adj[v];
def display(self):
print("ADJ List","order :",self.order);
for i,e in enumerate(self.adj): print(i,e);
G = GraphAdjList(4);
G.display();
G.addEdge(0, 1);
G.addEdge(0, 2);
G.addEdge(1, 2);
G.addEdge(2, 3);
# the adjacency matrix created
G.display();
# adding a vertex to the graph
G.addVertices();
# connecting that verex to other existing vertices
G.addEdge(4, 1);
G.addEdge(4, 3);
# the adjacency matrix with a new vertex
G.display();
# removing an existing vertex in the graph
G.removeVertex(1);
# the adjacency matrix after removing a vertex
G.display();
# In[ ]:
| [
"noreply@github.com"
] | chaiwat-boom.noreply@github.com |
ffd2b24dc87d99684dca0f458c3970cd7cf4b8dd | 1eab574606dffb14a63195de994ee7c2355989b1 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/switchhostrangehopslearnedinfo_9zdfjhbmdlsg9wc0xlyxjuzwrjbmzv.py | a9217efc563072505ad588370bd54d1ec01eb023 | [
"MIT"
] | permissive | steiler/ixnetwork_restpy | 56b3f08726301e9938aaea26f6dcd20ebf53c806 | dd7ec0d311b74cefb1fe310d57b5c8a65d6d4ff9 | refs/heads/master | 2020-09-04T12:10:18.387184 | 2019-11-05T11:29:43 | 2019-11-05T11:29:43 | 219,728,796 | 0 | 0 | null | 2019-11-05T11:28:29 | 2019-11-05T11:28:26 | null | UTF-8 | Python | false | false | 4,428 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SwitchHostRangeHopsLearnedInfo(Base):
"""NOT DEFINED
The SwitchHostRangeHopsLearnedInfo class encapsulates a list of switchHostRangeHopsLearnedInfo resources that is managed by the system.
A list of resources can be retrieved from the server using the SwitchHostRangeHopsLearnedInfo.find() method.
"""
__slots__ = ()
_SDM_NAME = 'switchHostRangeHopsLearnedInfo'
def __init__(self, parent):
super(SwitchHostRangeHopsLearnedInfo, self).__init__(parent)
@property
def Action(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('action')
@property
def DestinationHostMac(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('destinationHostMac')
@property
def InputPort(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('inputPort')
@property
def InputTimeInMs(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('inputTimeInMs')
@property
def OutputPort(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('outputPort')
@property
def OutputTimeInMs(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('outputTimeInMs')
@property
def SourceHostMac(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('sourceHostMac')
@property
def SwitchDataPathId(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('switchDataPathId')
@property
def SwitchIp(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('switchIp')
def find(self, Action=None, DestinationHostMac=None, InputPort=None, InputTimeInMs=None, OutputPort=None, OutputTimeInMs=None, SourceHostMac=None, SwitchDataPathId=None, SwitchIp=None):
"""Finds and retrieves switchHostRangeHopsLearnedInfo data from the server.
All named parameters support regex and can be used to selectively retrieve switchHostRangeHopsLearnedInfo data from the server.
By default the find method takes no parameters and will retrieve all switchHostRangeHopsLearnedInfo data from the server.
Args:
Action (str): NOT DEFINED
DestinationHostMac (str): NOT DEFINED
InputPort (number): NOT DEFINED
InputTimeInMs (number): NOT DEFINED
OutputPort (number): NOT DEFINED
OutputTimeInMs (number): NOT DEFINED
SourceHostMac (str): NOT DEFINED
SwitchDataPathId (number): NOT DEFINED
SwitchIp (str): NOT DEFINED
Returns:
self: This instance with matching switchHostRangeHopsLearnedInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of switchHostRangeHopsLearnedInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the switchHostRangeHopsLearnedInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
32c4c4ae98dd31dfa43723e716df04dba6ec898e | e48053bc7bf064bc28ffb1949f74970005243c83 | /src/graphql_relay/utils/__init__.py | 2490b59425ba1c6d1fc11da92727d81f7003e738 | [
"MIT"
] | permissive | markedwards/graphql-relay-py | 5dd8eb775bc6d87e18bf71ca5b46d60d27dad09b | 611c0333eeca53663d9b47f7994098edda81b087 | refs/heads/master | 2023-03-08T04:59:27.118363 | 2022-07-23T13:48:14 | 2022-07-23T13:48:14 | 264,901,446 | 0 | 0 | MIT | 2020-05-18T10:10:13 | 2020-05-18T10:10:13 | null | UTF-8 | Python | false | false | 98 | py | """graphql_relay.utils"""
from .base64 import base64, unbase64
__all__ = ["base64", "unbase64"]
| [
"cito@online.de"
] | cito@online.de |
d471de3c47b8bb684ef59ddfcc1774d08abef858 | 819dbb98d33784ed595eea562ae692bf40930ef8 | /Disnaker/appium/publish/_AI_Desain.py | e8eb26e59de323675405d1690986b8df87419374 | [] | no_license | RachmadAgungP/crowling_pribadi | e20dd4ea877ad20ed49342dcd60dfaa4efbbf6f3 | 07fad09d95a160d9c55127ebdf8f78e1789dd248 | refs/heads/main | 2023-03-06T02:57:16.482675 | 2021-02-22T09:24:39 | 2021-02-22T09:24:39 | 332,108,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,975 | py | import pandas as pd
import ast
import win32com.client as win32
import os
import textwrap
import datetime
# df = pd.read_csv("data_namapekerjaan1.csv")
import shutil
import _AI_Size_otomatis
def listToString(s,skalar,kod,uk):
str1 = ""
tnpa_tnd_bca = ""
str1_over = ""
kod = kod
if kod == []:
kod = "Pilihan"
print (kod)
print (kod)
tampungan = {kod:[]}
items = {kod:[]}
w = 1
hitung_baris = 0
ukuran = uk
if skalar == False:
ukuran = uk
else:
ukuran = uk
for ele in s:
if ele == "":
pass
else:
str1 += os.linesep+"- "+" "+ ele
tnpa_tnd_bca += os.linesep+ ele
tampungan[kod].append(os.linesep+"- "+" "+ ele)
hitung_baris += 1
w += 1
# tampungan[kod].append(str1)
if len(tampungan[kod]) >=10:
if skalar == True:
items[kod] = tampungan[kod][10:len(tampungan[kod])]
tampungan[kod] = tampungan[kod][:10]
tampungan[kod].append(os.linesep+"- penjelasan ada di web Foloker.com ...")
print (items)
for i in tampungan[kod]:
str1_over += i
else:
for i in tampungan[kod]:
str1_over += i
else:
for i in tampungan[kod]:
str1_over += i
print ("kualifikasi-> ",str1_over)
return str1_over,items,str1,tnpa_tnd_bca
def _AI_Size_otomatis.contentnya(nama_layer, layer_ke, isi, ukuran_font, visible):
var = layer_ke.TextFrames(nama_layer)
var.TextRange.Contents = str(isi)
tR = var.textRange
tR.characterAttributes.size = ukuran_font
var.hidden = visible
return (var)
def save_pic(urut,nama,tt,adobe_document):
# Define the Export JPEG Options.
jpeg_export_options = win32.Dispatch("Illustrator.ExportOptionsJPEG")
# Export the document.
if nama == "story":
newpath = r'E:/Belajar/www.disnakerja.com/hasil/{}/{}'.format(tgl,"story")
else:
newpath = r'E:/Belajar/www.disnakerja.com/hasil/{}/{}'.format(tgl,str(urut)+". "+nama)
if not os.path.exists(newpath):
os.makedirs(newpath)
if nama == "story":
adobe_document.Export(
ExportFile=r"E:/Belajar/www.disnakerja.com/hasil/{}/{}/{}".format(
str(tgl),"story",str(urut)
),
ExportFormat=1,
Options=jpeg_export_options
)
else:
adobe_document.Export(
ExportFile=r"E:/Belajar/www.disnakerja.com/hasil/{}/{}/{}".format(
str(tgl),str(urut)+". "+nama,str(tt)
),
ExportFormat=1,
Options=jpeg_export_options
)
def yuy(posss,koddk,kualll):
posisiss = []
str_tolist_kodes = []
kualifikasiss = []
posisiss.append(posss)
str_tolist_kodes.append(koddk)
kualifikasiss.append(kualll)
for y in range (len(posisiss)):
o = 0
for u in range (len(str_tolist_kodes[y])):
if "General Requirements" in str_tolist_kodes[y][o] or "Persyaratan Umum" in str_tolist_kodes[y][o] or "Ketentuan" in str_tolist_kodes[y][o] or "Berkas Lamaran" in str_tolist_kodes[y][o]:
posisiss[0].append('*Tambahan Info')
str_tolist_kodes[0].append(str_tolist_kodes[y][o])
kualifikasiss[0].append(kualifikasiss[y][o])
str_tolist_kodes[y][o] = [[]]
kualifikasiss[y][o] = [[]]
o -= 1
o += 1
print ("->",posisiss)
print ("->",str_tolist_kodes)
return posisiss, str_tolist_kodes, kualifikasiss
def urai(df,pengaktifan_AI,tipe_gambar,layer1,layer2,layer3,adobe_document):
r = 0
info_keterangan = []
tambahan_feed = {}
nomer = [] #berapa banyak foto yang dihasilkan
nama_file = []
index_error = 0
for index,row in df.iterrows():
# Nama (feed 1)
try:
nomer_nama = []
tampung_semua = {}
tt = 0
no = 1
list_edit = yuy(ast.literal_eval(row['Posisi']),ast.literal_eval(row["kode_kual"]),ast.literal_eval(row['Kualifikasi']))
posisis = list_edit[0][0]
no_d = 1 + len(posisis) + 1
if '*Tambahan Info' in posisis:
posisis = list_edit[0][0][:-1]
else:
posisis = list_edit[0][0]
pos = listToString(posisis,True,"Posisi",55)
no_d -= 1
nomer_nama.append(no_d)
print ("============ " + str(row["Nama"])+ " ================")
if len(posisis) == 0 :
print ("opp",str(row["Nama"]))
info_keterangan.append("")
no -= 1
nomer.append(no)
no_d += 1
nama_file.append(0)
continue
else:
if pengaktifan_AI == True:
layer1.visible = True
if tipe_gambar == "story":
uk_nama_p = 31
uk_p = 35
else:
uk_nama_p = 28
uk_p = 31
# ============ posisi (feed 1) =============
_AI_Size_otomatis.contentnya("Nama_Perusahaan", layer1, str(row["Nama"]), uk_nama_p, False,False)
# ====> Gambar <====
gam = layer1.placedItems("Gambar")
gam.file = row['Path_Gambar']
# ====> Daftar Posisi <====
_AI_Size_otomatis.contentnya("Posisi", layer1, pos[2], uk_p, False, True)
# ====> Atribut lain <====
if tipe_gambar == "story":
pass
else:
# _AI_Size_otomatis.contentnya("tgl", layer1, str(tgl), 22, False)
_AI_Size_otomatis.contentnya("no", layer1, "0"+str(no), 147, False, False)
# ========> SAVE <=======
if tipe_gambar == "story":
save_pic(index,"story",str(no_d),adobe_document)
else:
save_pic(index,row["Nama"],str(no_d),adobe_document)
# ==========================================
layer1.visible = False
print ("aaaaa",ast.literal_eval(row['Keterangan']))
# ============ posisi (feed 2) =============
if tipe_gambar == "story":
continue
else:
for y in range (len(posisis)):
print ("posisi ->",posisis[y])
str_tolist_kode = list_edit[1][0]
kualifikasis = list_edit[2][0]
if pengaktifan_AI == True:
layer2.visible = True
# hidden layer
for i in range(5):
_AI_Size_otomatis.contentnya("kode_Prasaratan "+str(i), layer2, " ", 32, True,False)
_AI_Size_otomatis.contentnya("Prasaratnya "+str(i), layer2, " ", 32, True,True)
_AI_Size_otomatis.contentnya("Nama_Posisi", layer2, posisis[y], 35, False,False)
if len(str_tolist_kode[y]) >= 5:
print ("============ skip ==================")
pass
else:
if "Lowongan Kerja" in posisis[y] and len(str_tolist_kode[0][0]) == 0 and len(kualifikasis[0]) == 0:
print ("asadas")
pass
elif len(str_tolist_kode[y][0]) == 0 and len(kualifikasis[y][0]) == 0:
print ("asadas",str_tolist_kode[y][0],kualifikasis[y][0])
pass
else:
for oo in range (len(str_tolist_kode[y])):
tampung = []
# subjudul kualifikasi (kode_kualifikasi)
if len(str_tolist_kode[y][oo]) == 0 :
if pengaktifan_AI == True:
_AI_Size_otomatis.contentnya("kode_Prasaratan "+str(oo), layer2, "", 1, True,False)
_AI_Size_otomatis.contentnya("kode_Prasaratan "+str(4), layer2, "", 1, True,False)
elif len(str_tolist_kode[y][oo]) >= 28 :
if pengaktifan_AI == True:
_AI_Size_otomatis.contentnya("kode_Prasaratan "+str(4), layer2, str_tolist_kode[y][oo], 20, False,False)
else:
if pengaktifan_AI == True:
_AI_Size_otomatis.contentnya("kode_Prasaratan "+str(oo), layer2, str_tolist_kode[y][oo], 32, False,False)
# kualifikasi (kualifikasi)
if len(kualifikasis[y][oo]) == 0 :
if pengaktifan_AI == True:
_AI_Size_otomatis.contentnya("Prasaratnya "+str(oo), layer2, "", 1, True,False)
else:
skalar = True
ukuran_fontt = 20
if len(kualifikasis[y]) == 1:
skalar = False
ukuran_fontt = 30
elif len(kualifikasis[y]) == 2:
skalar = True
ukuran_fontt = 25
else:
skalar = True
ukuran_fontt = 20
print ("|||||||||||| Melebihi ||||||||||||||")
data = kualifikasis[y][oo]
info = listToString(data,skalar,str_tolist_kode[y][oo],ukuran_str)
# print (len(kualifikasis[i][j]))
if len(info[0]) >= 700:
ukuran_fontt = 20
if pengaktifan_AI == True:
_AI_Size_otomatis.contentnya("Prasaratnya "+str(oo), layer2, info[0], ukuran_fontt, False,True)
tampung = info[1]
tampung_semua.update(tampung)
print ("============== kelebihan =============")
print ("tampungan-> ",tampung_semua)
no += 1
no_d -= 1
nomer_nama.append(no_d)
if pengaktifan_AI == True:
# _AI_Size_otomatis.contentnya("tgl", layer2, str(tgl), 22, False)
_AI_Size_otomatis.contentnya("no", layer2, "0"+str(no), 147, False,False)
save_pic(index,row["Nama"],str(no_d),adobe_document)
if pengaktifan_AI == True:
layer2.visible = False
# ==========================================
# (feed 3)
keter = ast.literal_eval(row['Keterangan'])
print ("iniloh----",keter)
keteran = listToString(keter,False,'w',50)
kete = listToString(keter,False,'w',50000)
# if type(row["Posisi"]) == float :
# print ("pass")
# continue
# else:
# info_keterangan.append("")
info_keterangan.append(kete[3])
no += 1
no_d -= 1
nomer_nama.append(no_d)
if pengaktifan_AI == True:
layer3.visible = True
_AI_Size_otomatis.contentnya("Keterangan", layer3, keteran[3] + "\n Informasi Lebih lanjut Kunjungi Foloker.com", 33, False,False)
# _AI_Size_otomatis.contentnya("tgl", layer3, str(tgl), 22, False)
_AI_Size_otomatis.contentnya("no", layer3, "0"+str(no), 147, False,False)
save_pic(index,row["Nama"],str(no_d),adobe_document)
layer3.visible = False
except Exception as e:
if tipe_gambar == "story":
print ("ini",e)
continue
else:
if pengaktifan_AI == True:
layer2.visible = False
# newpath = r'E:/Belajar/www.disnakerja.com/hasil/{}/{}'.format(tgl,str(index)+". "+row["Nama"])
# shutil.rmtree(newpath)
index_error = index
df_er = df.loc[[index]]
df = df.drop(index = index)
print ("opo",df['Alamat'])
newpath = r'Disnaker/appium/Debug/Data_debug/{}'.format(tgl)
if not os.path.exists(newpath):
os.makedirs(newpath)
df_er.to_csv('Disnaker/appium/Debug/Data_debug/%s/data_saat_Debug.csv'%tgl)
no = no + 1
no_d = no_d + 1
info_keterangan.append("")
nomer.append(no)
# ==========================================
r+=1
nama_file.append(nomer_nama)
return info_keterangan,nomer,nama_file,index_error
def maine(tgl,tipe_gambar):
df = pd.read_csv("hasil\%s\data_namapekerjaan1.csv"%tgl)
print (df.columns)
adobe_app = win32.GetActiveObject("Illustrator.Application")
adobe_document = adobe_app.ActiveDocument
if tipe_gambar == "story":
layer1 = adobe_document.Layers("Layer1")
layer1.visible = False
rrun = urai(df,True,tipe_gambar,layer1,layer1,layer1,adobe_document)
else:
layer1 = adobe_document.Layers("Layer1")
layer2 = adobe_document.Layers("Layer2")
layer3 = adobe_document.Layers("Layer3")
layer1.visible = False
layer2.visible = False
layer3.visible = False
rrun = urai(df,True,tipe_gambar,layer1,layer2,layer3,adobe_document)
dfe = pd.Series(rrun[0])
nom = pd.Series(rrun[1])
nomer_nam = pd.Series(rrun[2])
DF_fix = df
DF_fix['info'] = dfe
DF_fix['byk_hasil'] = nom
DF_fix['nama_nomer'] = nomer_nam
print (rrun[3])
# DF_EDIT = DF_fix.drop(index=rrun[3])
if tipe_gambar == "story":
pass
else:
DF_fix.to_csv('hasil\%s\data_Fix.csv'%tgl)
def baca_file():
tgl_r = open("E:/Belajar/www.disnakerja.com/Data/tanggal.txt", "r")
tgl = tgl_r.read()
tgl_r.close()
return tgl
tgl = baca_file()
# kode_kual dan kualifikasi harus sama
# now = datetime.datetime.now()
# # tgl = str(now.strftime("%B %d, %Y"))
# # tgl = "January 6, 2021"
tipe_gambar = "feed"
# tipe_gambar = "story"
maine(tgl,tipe_gambar)
| [
"pambudi171@windowslive.com"
] | pambudi171@windowslive.com |
d42bab9b46b1ce7a269fd7d3f5a0583fc29c0f2a | 89854257a02b2b3e3cf00c7f5ddde15222286af5 | /tower_of_hanoi.py | 8ab3110e4aa1ec916b8ff23147352b7ab8a74f4d | [] | no_license | gmraju/tower-of-hanoi-solver | 8efdcd003d2e87e974d4bd810e9e3446fb5ba8fe | 22d1f4e9bc71a7da459f44de0594615ff242d323 | refs/heads/master | 2021-04-15T07:16:44.367579 | 2018-03-21T08:17:10 | 2018-03-21T08:17:10 | 126,145,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,107 | py | import copy
from copy import deepcopy
"""
Objects of class game_state represent states of the tower of hanoi game.
The objects hold the value of their parent state, representation of the game state as a list of lists, and have a class method to calculate the list of
legal moves from the current state.
"""
class game_state:
def __init__(self):
self.parent = []
self.pegs = [];
self.next_moves = [];
def __init__(self, state, parent):
self.parent = parent
self.pegs = state
self.next_moves = []
def get_all_valid_moves(self):
for this_peg in range(len(self.pegs)):
for other_pegs in range(len(self.pegs)):
if this_peg!=other_pegs and self.pegs[this_peg]:
if not self.pegs[other_pegs] or self.pegs[this_peg][-1]<self.pegs[other_pegs][-1]:
new_state = copy.deepcopy(self.pegs)
new_state[other_pegs].append(new_state[this_peg].pop())
self.next_moves.append(new_state)
def remove_duplicates(past_move_list, current_state):
"""
This method removes any previously performed moves from the pool of valid moves performable from current_state.
"""
no_dupe_next_moves = []
for move in current_state.next_moves:
flag = True
for completed_move in past_move_list:
if move == completed_move:
flag = False
break
if flag:
no_dupe_next_moves.append(move)
current_state.next_moves = no_dupe_next_moves
def get_traversal(end_state):
"""
This is a path display method. After reaching the goal state, the 'parent' value of the goal state object is used to trace the path taken
from the start state.
"""
state = end_state
traversal = []
trace = []
num = 0
traversal.append(state.pegs)
while state.parent:
traversal.append(state.parent.pegs)
state = state.parent
while traversal:
num+=1
item = traversal.pop()
trace.append(item)
print str(item)+'\n'
print 'Steps: '+str(num)
def bfs(root, end_state):
"""
This performs breadth-first-search to find the goal state.
A queue is maintained which holds all possible moves that can be made from current state. Each item in the queue is checked against the goal state.
If an item matches the goal state, it is returned and search is completed. If there is no match, all possible moves from that state are added to the
queue (barring repeated moves).This process continues until the goal state is found or the queue is empty.
"""
all_move_list = []
all_move_list += root.pegs
root.get_all_valid_moves()
possible_states = []
for move in root.next_moves:
possible_states.append(game_state(move, root))
all_move_list += root.next_moves
while possible_states:
current_state = possible_states.pop(0)
if current_state.pegs == end_state:
get_traversal(current_state)
else:
current_state.get_all_valid_moves()
remove_duplicates(all_move_list, current_state)
all_move_list += current_state.next_moves
for move in current_state.next_moves:
possible_states.append(game_state(move,current_state))
def dfs(root, end_state):
"""
Helper function for depth-first-search.
"""
all_move_list = []
all_move_list.append(root.pegs)
goal_state = depth_first_search(root, end_state, all_move_list)
get_traversal(goal_state)
def depth_first_search(root, end_state, all_move_list):
"""
This function performs depth-first-search to find the goal state.
The function is called recursively, with the first valid move from current state being passed as root for each function call till goal state is
reached or all possible states resulting from the original valid move have been explored. The process is then repeated for the next valid move and all states
stemming from there, and so on.
"""
if root.pegs == end_state:
return root
else:
root.get_all_valid_moves()
remove_duplicates(all_move_list, root)
for move in root.next_moves:
new_move = game_state(move, root)
all_move_list.append(move)
goal = depth_first_search(new_move, end_state, all_move_list)
if goal:
return goal
def rank_moves_user_defined(possible_states, end_state):
"""
Helper function to carry out shortest distance calculation between next valid moves and goal state. Priority queue is formed based on these calculations.
Heuristic works as follows:
- finds sum of values of disks on each peg of goal state
- for each state/move from list of possible moves, the values of disks on each peg are summed
- absolute difference between the peg values for said state and the goal state is calculated
- the absolute distance values of all pegs are summed to determine a 'distance' value for the move
- This is repeated for each possible move
- next_moves list is re-ordered in ascending order of 'distance'
"""
reordered_list = []
end_state_sum = []
move_weights = []
for item in end_state:
end_state_sum.append(sum(item))
for state in possible_states:
move = state.pegs
move_sum = []
abs_value = []
for item in move:
move_sum.append(sum(item))
difference = [a-b for a,b in zip(end_state_sum,move_sum)]
for item in difference:
abs_value.append(abs(item))
move_weights.append(sum(abs_value))
while possible_states:
closest = move_weights.index(min(move_weights))
move_weights.pop(closest)
reordered_list.append(possible_states.pop(closest))
return reordered_list
def bestfs(root, end_state):
"""
Performs best-first-search in order to find a path to the goal state.
Best first search functions similarly to breadth-first search, but uses a heuristic to prioritize the valid moves queue in terms of distance from goal state.
"""
all_move_list = []
all_move_list += root.pegs
root.get_all_valid_moves()
possible_states = []
for move in root.next_moves:
possible_states.append(game_state(move, root))
possible_states = rank_moves_user_defined(possible_states, end_state)
all_move_list += root.next_moves
while possible_states:
current_state = possible_states.pop(0)
if current_state.pegs == end_state:
get_traversal(current_state)
else:
current_state.get_all_valid_moves()
remove_duplicates(all_move_list, current_state)
all_move_list += current_state.next_moves
for move in current_state.next_moves:
possible_states.append(game_state(move,current_state))
possible_states = rank_moves_user_defined(possible_states, end_state)
#Enter the start and end states below.
#Each inner list represents a peg. Disks are represnted on each of these lists as numbers.
#Please place disks in proper ordering as per the rules of Tower of Hanoi (descending order)
# ex: [[],[],[]] represents 3 pegs
# [[3,2,1],[],[]] represents 3 pegs with 3 disks on the first peg.
start_state = [[3,2,1],[],[]]
end_state = [[],[],[3,2,1]]
root = game_state(start_state, [])
print 'Breadth-First-Search:\a'
bfs(root, end_state)
print '\n\n\nDepth-First-Search:'
dfs(root, end_state)
print '\n\n\nBest-First-Search:'
bestfs(root, end_state)
| [
"noreply@github.com"
] | gmraju.noreply@github.com |
3c01cb626309ed4ad616f8ba9963b0b2f8b8fd2e | 0707663f07d2a2010d44bd5f9dc74dbf289b2606 | /master_table.py | c793b9a1621eed547fa0a9ca5d95a09900af513e | [] | no_license | ejgillia/csc495_amazon | e96c327cccc541acd8709379d6ab62d4240fe599 | f1bfc55d6d6a6e802d8ee88d7da789bcfef107c4 | refs/heads/master | 2021-04-30T05:34:01.100693 | 2018-04-28T21:13:37 | 2018-04-28T21:13:37 | 121,418,784 | 0 | 1 | null | 2018-04-28T21:09:56 | 2018-02-13T18:20:07 | Python | UTF-8 | Python | false | false | 2,652 | py | #!/usr/bin/python3
import pandas as pd
filenames = ["commute_times_amazon_cities.csv", "cities_anshul.csv"]
logistics_base_filepath = ["Logistics"]
logistics = ["State-City-MSA - Airport Connectivity to Major Hubs.csv",
"State-City-MSA - Airport Travel Time.csv",
"State-City-MSA - Inrix Traffic Index.csv",
"State-City-MSA - Public Transit Score.csv"
"State-City-MSA - Statewise Highway and Road Length.csv"]
site_building_base_filepath = ["Site Building/Data"]
site_building = ["csv_cityTax_list.csv", "statewise_cnbc_ranking.csv"]
df = pd.read_csv("csv_city_list.csv", sep = "|")
for f in filenames:
print(f)
tba = pd.read_csv(f, sep = "|", encoding='latin-1')
print(tba.columns)
print(df.columns)
df = pd.merge(df, tba, how = "left", on = ["city", "state", "is_top_twenty", "MSA"])
tba = pd.read_csv("Logistics/State-City-MSA - Airport Connectivity to Major Hubs.csv", sep = "|", encoding='latin-1')
df = pd.merge(df, tba, how = "left", on = ["city", "state"])
#del df["MSA_y"]
del df["Unnamed: 7"]
del df["Unnamed: 8"]
del df["Unnamed: 9"]
del df["Unnamed: 10"]
tba = pd.read_csv("Logistics/State-City-MSA - Airport Travel Time.csv", sep = "|", encoding='latin-1')
df = pd.merge(df, tba, how = "left", on = ["city", "state"])
#del df["MSA"]
del df["Unnamed: 4"]
del df["Unnamed: 5"]
del df["Unnamed: 6"]
del df["Unnamed: 7"]
del df["Unnamed: 8"]
del df["Unnamed: 9"]
del df["Unnamed: 10"]
tba = pd.read_csv("Logistics/State-City-MSA - Inrix Traffic Index.csv", sep = "|", encoding='latin-1')
df = pd.merge(df, tba, how = "left", on = ["city", "state"])
del df["Unnamed: 9"]
del df["Unnamed: 10"]
tba = pd.read_csv("Logistics/State-City-MSA - Public Transit Score.csv", sep = "|", encoding='latin-1')
df = pd.merge(df, tba,how = "left", on = ["city", "state"])
del df["Unnamed: 4"]
del df["Unnamed: 5"]
del df["Unnamed: 6"]
del df["Unnamed: 7"]
del df["Unnamed: 8"]
del df["Unnamed: 9"]
del df["Unnamed: 10"]
tba = pd.read_csv("Logistics/State-City-MSA - Statewise Highway and Road Length.csv", sep = "|", encoding='latin-1')
df = pd.merge(df, tba, how = "left", on = ["city", "state"])
del df["Unnamed: 5"]
del df["Unnamed: 6"]
del df["Unnamed: 7"]
del df["Unnamed: 8"]
del df["Unnamed: 9"]
del df["Unnamed: 10"]
#only an overlap of 32
#tba = pd.read_csv("Site Building/Data/csv_cityTax_list.csv", sep = "|", encoding='latin-1')
#df = pd.merge(df, tba, on = ["city", "state"])
tba = pd.read_csv("Site Building/Data/statewise_cnbc_ranking.csv", sep = "|", encoding='latin-1')
df = pd.merge(df, tba, how='left', on='state')
del df["Unnamed: 0"]
del df["MSA_x"]
del df["MSA_y"]
df.to_csv("master.csv", sep='|')
| [
"edwardjgilliam@gmail.com"
] | edwardjgilliam@gmail.com |
f48d27f70ae9b6d5aaca30d1dd33762d180d6539 | 35e2aa7ecbb94a7f3eb07f5e326440251f0cb1a4 | /parameters.py | 375b0c16442f29e6830b311fcb3143639691adc0 | [] | no_license | Coutlaw/Programming-1 | 648929e09ad0329bfba7f477a5bd36ca1e21607c | e3e304cfe8f9b44ee5a1df4c20dbdafc740dab7a | refs/heads/master | 2020-06-13T03:59:59.860069 | 2019-06-30T14:39:25 | 2019-06-30T14:39:25 | 194,526,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | # input - 2 numbers through parameters
# output - sum returned
def add(op1, op2):
total = op1 + op2
# stores total as local value
return total
def main():
answer = add(4,7)
print(answer)
#creating a program that calls another program to do the math and work
def power(op1, op2):
total = op1 ** op2
return total
def main2():
num1 = eval(input("Enter the number: "))
num2 = eval(input("Enter the power: "))
answer = power(num1, num2)
print(answer)
#creating a program that will take what you enter
#run it through another program and modify it
def sent(text):
sent = text.lower()
words = sent.split()
return words
def main3():
message = input("Enter your sentence: ")
words = sent(message)
print(words)
# program that accepts a list and returns a product
def func(group):
ints = group.split()
total = 1
for i in range(len(ints)):
total *= int(ints[i])
return total
def main4():
inputs = input("Enter numbers separated by a space: ")
summation = func(inputs)
print(summation)
#concatination program
def in(
| [
"cass.d.outlaw@gmail.com"
] | cass.d.outlaw@gmail.com |
651e8d0d02919c339533bbab8a88b973a7774f3b | 68827850d67688b0e196a29057d4698859665624 | /interno/urls.py | 9104d234ec2451c758ac3caa621febcb89751cf9 | [
"Apache-2.0"
] | permissive | jeKnowledge/JekOffice | 2ba4ca741b06ec34ce58a1c9ad440d590b72463c | 010cef87840bf15280328b56d31f7ea12967ab85 | refs/heads/master | 2022-12-06T06:51:05.905496 | 2019-06-26T14:01:00 | 2019-06-26T14:01:00 | 192,950,335 | 0 | 1 | Apache-2.0 | 2022-11-22T03:53:57 | 2019-06-20T16:05:43 | Python | UTF-8 | Python | false | false | 315 | py | from django.urls import path
from . import views
urlpatterns = [
path('novo/', views.relatorio_novo_view, name='criar_relatorios'),
path('', views.relatorio_lista_view, name='relatorios'),
path('download/<int:relatorio_pk>/', views.download_relatorio_recrutamento_view,name='download_recrutamento'),
]
| [
"bernardo.prior@hotmail.com"
] | bernardo.prior@hotmail.com |
e8f51dc8787882d6db42b81be2c792dd429af2b6 | f088f19958d43dfcd2f1851347519f5deb4a2d64 | /migrations/versions/b6f70a4bc682_added_breed_column.py | f5640ce00d02807a00bd1630a7bbe1fde3f44cf2 | [] | no_license | assyifarostiana/project_day3 | 69e5eaacd0c6b1512dcc092bdd15c400ca296d27 | 172ceff7c42aeb058ed4fc2292d27cb16d7e2455 | refs/heads/master | 2022-11-27T20:10:58.020369 | 2020-08-02T19:26:32 | 2020-08-02T19:26:32 | 283,554,169 | 0 | 0 | null | 2020-07-29T16:54:00 | 2020-07-29T16:54:00 | null | UTF-8 | Python | false | false | 632 | py | """added breed column
Revision ID: b6f70a4bc682
Revises:
Create Date: 2020-08-03 00:46:58.793323
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b6f70a4bc682'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('puppies', sa.Column('breed', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('puppies', 'breed')
# ### end Alembic commands ###
| [
"assyifarostiana@gmail.com"
] | assyifarostiana@gmail.com |
34a4face36485e6ae83f65edb28e357f514b966c | b100c09e0ea0f777a70de276c67dec3dcd9698b6 | /diverging/PiYG-r.py | 2486429842fad10b5e2758a2e412f9b0764e5961 | [
"Apache-2.0"
] | permissive | trungnnguyen/colorbrewer-colormaps-4-abaqus | cd3e8781af532a9bbde5b802fdc11f97a367c800 | 3b43d12e2ce8ea5019f2d9b9d939cd3649be0e4d | refs/heads/master | 2021-01-21T07:15:14.742357 | 2015-09-17T10:47:33 | 2015-09-17T10:47:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # Color Schemes for Abaqus/CAE contour plot
# compatible with Abaqus >= 6.14-1
# author: Xiaojun GU
#PiYG-r
from abaqus import *
from abaqusConstants import *
session.Spectrum(name="PiYG-r", colors =(
'#276419',
'#4d9221',
'#7fbc41',
'#b8e186',
'#e6f5d0',
'#f7f7f7',
'#fde0ef',
'#f1b6da',
'#de77ae',
'#c51b7d',
'#8e0152',
))
| [
"myaccadd@outlook.com"
] | myaccadd@outlook.com |
512331448a7d867fef27e66fb94fea9861bd4feb | 93a5ee3a612a218f53541d60c70f9d26da4de35a | /scripts/fpl_cx1_0_vgg_1ss.py | 6591e43a688404a9e81c205ff98fdcac0f9e2372 | [
"BSD-3-Clause"
] | permissive | alexshevelkin/flypylib | 8a0228992c3191a28ab5d7fcb8b6da9e9edcaf10 | b52aa96aca3ae1fcd5e10c22de75e832cf0590c6 | refs/heads/master | 2022-01-25T23:58:05.356570 | 2018-11-02T21:01:41 | 2018-11-02T21:01:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | from flypylib import fplobjdetect, fplmodels, fplnetwork, fplsynapses
import numpy as np
import matplotlib.pyplot as plt
# choose a net architecture
# possible models: baseline_model, vgg_like, resnet_like, unet_like
model = fplmodels.vgg_like;
network = fplnetwork.FplNetwork(model)
n_gpu = 4
batch_size = 64
train_shape = network.rf_size
is_mask = (model==fplmodels.unet_like)
network.make_train_parallel(n_gpu, batch_size, train_shape)
base_dir = '/groups/flyem/data/synapse_training'
train_idx = (28,)
test_idx = (54, 58, 60, 61, 65, 69, 70, 75, 95, 103)
train_dir = '%s/cx1_0_0' % base_dir
train_data = []
for ii in train_idx:
train_data.append( (
'%s/cx1_%03d_mn135_std48_image.h5' % (train_dir,ii),
'%s/cx1_%03d_ru7_ri15_mn135_std48_im_' % (train_dir,ii) ))
generator = fplobjdetect.gen_batches(
train_data, train_shape, batch_size*n_gpu, is_mask)
train_json = []
for ii in train_idx:
train_json.append('%s/cx1_%03d_synapses.json' % (train_dir,ii))
test_dir = '%s/cx1_0_1' % base_dir
test_data = []
for ii in test_idx:
test_image = '%s/cx1_%03d_mn135_std48_image.h5' % (test_dir,ii)
test_json = '%s/cx1_%03d_synapses.json' % (test_dir,ii)
test_data.append( [test_image, test_json] )
for ee in range(3):
network.train(generator, 1000, 10)
network.make_infer_parallel(4)
mm_train = []
for ii in range(len(train_data)):
pred = network.infer(train_data[ii][0])
out = fplobjdetect.voxel2obj(pred, 27, 5, None, 5)
gt = fplsynapses.load_from_json(train_json[ii])
mm_train.append(fplobjdetect.obj_pr_curve(
out, gt, 27, np.arange(0.6,0.96,0.02) ))
mm_train_agg = fplobjdetect.aggregate_pr(mm_train)
mm_test_agg, mm_test = fplobjdetect.evaluate_substacks(
network, test_data, np.arange(0.6,0.96,0.02),
obj_min_dist=27, smoothing_sigma=5,
volume_offset=None, buffer_sz=5)
plt.figure()
plt.plot(mm_train_agg.rr, mm_train_agg.pp, 'b-')
plt.plot(mm_test_agg.rr, mm_test_agg.pp, 'r-')
plt.xlabel('recall')
plt.ylabel('precision')
plt.legend(['train', 'test'], loc='lower left')
plt.show()
| [
"gbhuang@cs.umass.edu"
] | gbhuang@cs.umass.edu |
3798b22e381da18c59fb8d1affcc594e8ca661f2 | 2dd5ac139cde8317e04203b02b3692a0e4feb7b2 | /Universidad/Aplicaciones/Academico/urls.py | a2b1afa8cdcdcefd23995584a32b25c63a6527c2 | [] | no_license | Oriel-Barroso/Crud-django | d0d37cdcf9e981338639a0f2b33255bfd8cdc5f0 | 4e5355bcfa4929b89e6f91a47c52a64237741e96 | refs/heads/main | 2023-07-30T20:48:39.308382 | 2021-09-21T04:15:48 | 2021-09-21T04:15:48 | 408,684,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from django.urls import path
from .views import home as Home, registrarCurso as Registro, eliminacionCurso as Eliminacion, edicionCurso as Edicion, editarCurso as Editar
urlpatterns = [
path('', Home),
path('registrarCurso/', Registro),
path('eliminacionCurso/<codigo>', Eliminacion),
path('edicionCurso/<codigo>', Edicion),
path('editarCurso/', Editar)
] | [
"oriel_gold97@outlook.com"
] | oriel_gold97@outlook.com |
e2c48acc861f4854a7ca006b52a39240c47de4bd | 1292e182157da89a228e006fa6194140dd1196eb | /venv/bin/pip3 | 754afe3bfa0a3b71dd34f1e7b885d9c1e115e7df | [] | no_license | kandruch/keith-blog | b9d6f16d79c962ac64cc2357527d13353a7dff1e | 212d380dfe9ef2ed442e79db5ae57418633925b7 | refs/heads/master | 2023-04-11T03:30:04.685395 | 2021-04-25T02:32:34 | 2021-04-25T02:32:34 | 361,307,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | #!/Users/kandruch/PycharmProjects/day-69-blogusers.py/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kcandruch@gmail.com"
] | kcandruch@gmail.com | |
6c184b37d53fc6a5a548935fc3e615c2a6c233b3 | 76e1d13f91f9b69c75910f91376782b462135017 | /CryptoCurrency/phase2/Tx.py | df2a6423d40e62351e63b38ba0813639cb27c16c | [] | no_license | herentug/CS411 | a57e94f2f75f0dab5516dd6fbdd105cb7554e02e | 5c52bdc5917570999073addf7ababe4f3f7f1360 | refs/heads/master | 2020-08-12T01:20:16.003822 | 2020-01-11T22:44:46 | 2020-01-11T22:44:46 | 214,662,366 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | import random
import DS
import os
def serialNumberGen(b):
#b for bitsize
serial = random.randrange(2**(b-1),(2**(b))-1)
return serial
def gen_random_tx(q,p,g):
lines = [""]*7
lines[0]="***Bitcoin transaction"
lines[1]="Serial number:"+str(serialNumberGen(128))
a,b=DS.KeyGen(q,p,g)
lines[2]="Payer public key (beta): "+str(b)
a2,b2=DS.KeyGen(q,p,g)
lines[3]="Payee public key (beta):"+str(b2)
amount=random.randrange(0,100000)
lines[4]="Amount:"+str(amount)
sign = '\n'.join(lines[0:5])+'\n'
s,r=DS.SignGen(sign.encode('utf-8'),q,p,g,a)
lines[5]="Signature (s): "+str(s)
lines[6]="Signature (r): "+str(r)
ret=""
for i in range(0,7):
#print(lines[i])
ret+=lines[i]+"\n"
return ret
def gen_random_txblock(q, p, g, TxCnt, filename):
f=open(filename,"w")
if(not((TxCnt & (TxCnt-1) == 0) and TxCnt != 0)):
print("TxCnt is not power of 2, handling error...")
f.close()
exit(1)
for i in range(0,TxCnt):
t=gen_random_tx(q,p,g)
f.write(t)
f.close()
"""
Hakan Bugra Erentug - Nidanur Günay
CS411 Project Phase II
05/12/19
Erkay Savaş - Fall 2019
"""
| [
"herentug@sabanciuniv.edu"
] | herentug@sabanciuniv.edu |
75e0ae37ecf9f70341b825d25e43feb4e1676ba9 | ba4650b4d096bab85b5b8c503a7dee36330efdd7 | /car_pole.py | 5099d3a81163ffe4c7ab67e71c0022e8db9c39da | [] | no_license | ZHUDEJUN1985/policygra | e2001c7ebcd977b969a19334102ddb0ede86d621 | 99eccebb3f9b2d664b7a1e519b9fe85543d1a248 | refs/heads/master | 2021-06-27T04:37:38.804730 | 2017-09-21T09:26:32 | 2017-09-21T09:26:32 | 100,348,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | import gym
from pg import PolicyGradient
import matplotlib.pyplot as plt
DISPLAY_REWARD_THRESHOLD = -200
RENDER = False
is_train = False
env = gym.make('MountainCar-v0')
env.seed(1)
env = env.unwrapped
print(env.action_space)
print(env.observation_space)
print(env.observation_space.high)
RL = PolicyGradient(n_actions=env.action_space.n, n_features=env.observation_space.shape[0], learning_rate=0.01,
reward_decay=0.98)
if not is_train:
model_file = RL.restore_file
RL.saver.restore(RL.sess, model_file)
max_reward = -200
for i_episode in range(1000):
observation = env.reset()
running_reward = 0
i = 0
while True:
if RENDER:
env.render()
action = RL.choose_action(observation)
observation_, reward, done, info = env.step(action)
RL.store_transition(observation, action, reward)
i += 1
if i % 1000 == 0:
print("i=%d, action=%d" % (i, action))
if done:
ep_rs_sum = sum(RL.ep_rs)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.99 + ep_rs_sum * 0.01
if running_reward > DISPLAY_REWARD_THRESHOLD:
RENDER = True
print("episode:", i_episode, " reward:", int(running_reward))
vt = RL.learn()
if is_train and running_reward > max_reward:
max_reward = running_reward
RL.saver.save(RL.sess, 'ckpt/car_pole/car_pole.ckpt')
if i_episode == 30:
plt.plot(vt)
plt.xlabel('episode steps')
plt.ylabel('normalized state-action value')
plt.show()
break
observation = observation_
| [
"zhudejun@levelup.ai"
] | zhudejun@levelup.ai |
2f67faca481b05d0be5e0b525efd110a623f5179 | cb8a597cb241eae62903e0354069b524be369d17 | /events/Events.py | b492193287746bc5cc8fe763982436435f2222e7 | [] | no_license | TilerTheTiller/ArdentBot | 25e252f648229d7b168b8e2aeff64df936235f73 | 9070265ab50b98b558266d93601c130b641c4bbf | refs/heads/master | 2022-11-12T20:44:43.099591 | 2020-07-10T23:10:26 | 2020-07-10T23:10:26 | 278,749,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,842 | py | import discord
import random
import datetime
import index
import json
from discord.ext import commands
class Events(commands.Cog):
def __init__(self, client):
self.client = client
# events
@commands.Cog.listener()
async def on_member_join(self, member):
welcomech = member.guild.system_channel
embed = discord.Embed(title='User Joined',
description=f'{member.mention} joined {member.guild.name}, Welcome brother!',
color=0x166CD4)
await welcomech.send(embed=embed)
@commands.Cog.listener()
async def on_member_remove(self, member):
welcomech = member.guild.system_channel
embed = discord.Embed(title='User Left',
description=f'{member.mention} left {member.guild.name}, **_Traitor_**',
color=0xFF4040)
await welcomech.send(embed=embed)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, commands.NoPrivateMessage):
await ctx.send(f'Please use this command in the server')
# if isinstance(error, commands.CommandNotFound):
# response = await ctx.send(f'{ctx.author.mention} That command does not exist')
if isinstance(error, commands.MissingPermissions):
await ctx.send(f'{ctx.author.mention} You do not have permission to use that command')
if isinstance(error, commands.BotMissingPermissions):
await ctx.send(f'{ctx.author.mention} I do not have permission to do this')
if isinstance(error, commands.NotOwner):
await ctx.send(f'{ctx.author.mention} You must be the owner of the bot to perform this action')
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
with open('reaction.json') as fole:
midon = json.load(fole)
message_id = payload.message_id
channel_id = payload.channel_id
if message_id == midon['message']:
guild_id = payload.guild_id
guild = discord.utils.find(lambda g: g.id == guild_id, self.client.guilds)
found = False
for k, v in index.roles.items():
if payload.emoji.name == v['name']:
role = discord.utils.get(guild.roles, name=k)
if role is None:
role = await guild.create_role(name=k)
found = True
break
if not found:
return print('No roles to be assigned')
if role is not None:
member = discord.utils.find(lambda m: m.id == payload.user_id, guild.members)
if not member.bot:
await member.add_roles(role)
@commands.Cog.listener()
async def on_raw_reaction_remove(self, payload):
with open('reaction.json') as fole:
midon = json.load(fole)
if payload.message_id == midon['message']:
guild_id = payload.guild_id
guild = discord.utils.find(lambda g: g.id == guild_id, self.client.guilds)
found = False
for k, v in index.roles.items():
if payload.emoji.name == v['name']:
role = discord.utils.get(guild.roles, name=k)
found = True
break
if not found:
return print('No roles to be removed')
if role is not None:
member = discord.utils.find(lambda m: m.id == payload.user_id, guild.members)
if not member.bot:
await member.remove_roles(role)
def setup(client):
client.add_cog(Events(client))
| [
"noreply@github.com"
] | TilerTheTiller.noreply@github.com |
2da20cd7cb1f6406ed6a080f65357ac1e174a6db | 403d4677aa6fc2d3028fec07424c5ef88eeeba2d | /src/spaceone/core/fastapi/__init__.py | 7384818630ef604154b893cecda9a0062bc7fac9 | [
"Apache-2.0"
] | permissive | jean1042/python-core | c3b0e03bbef3c5a9498bcd0f91090d11f58cb720 | 66613dbaad5a0c1d8917cbb6f38fd02d1b82adf6 | refs/heads/master | 2023-08-21T10:33:03.485806 | 2021-10-29T14:06:08 | 2021-10-29T14:06:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | from spaceone.core.fastapi.server import serve
__all__ = ['serve']
| [
"bluese@megazone.com"
] | bluese@megazone.com |
43f7a02ca6b0f5d64bc9776f56f202cf86a514e8 | a761a5c6fc3b56ec252e4d475d0cd1bedc457d47 | /blog/migrations/0003_post_tags.py | b1a0498afe84ce225501c41a6756f3f6afbdd532 | [] | no_license | GlebMironov/blog | 8f6e1d1f47173372b013d31cdeac492109270228 | 7e639efb570d2329561b0caefb64da4766b6dcbd | refs/heads/master | 2016-09-14T10:43:25.116656 | 2016-05-12T10:43:25 | 2016-05-12T10:43:25 | 58,632,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-08 12:17
from __future__ import unicode_literals
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('blog', '0002_comment'),
]
operations = [
migrations.AddField(
model_name='post',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
| [
"glebmi69@gmail.com"
] | glebmi69@gmail.com |
44c47f9b49a9efe84572482f9094e171c3a4f866 | 83b3a3da85c228aa9592c1d09ba9bf9266e77b40 | /backpro/api/migrations/0003_auto_20190514_0153.py | 8a9b16581343442dc8522b7b105c9d7389d7fdf3 | [] | no_license | abzalmyrzash/SE-Project | 7ba65a65cead83bd11927a7b79021930656ddf0b | 51a7d184a26e8ab54d6cbf39e4059c981f49f9a3 | refs/heads/master | 2020-05-26T05:57:18.338845 | 2019-05-22T23:47:59 | 2019-05-22T23:47:59 | 188,128,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # Generated by Django 2.2.1 on 2019-05-13 19:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20190514_0112'),
]
operations = [
migrations.AlterField(
model_name='category',
name='created_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"abzal.myrzash006@gmail.com"
] | abzal.myrzash006@gmail.com |
0d0a2f0c3406abe0ae8d8f37a49dd1c04e589fdb | c0e493d0590945f511316e44d14f0f5691fee2c9 | /limix_inference/lmm/_fastlmm/_fastlmm.py | 474fde42b806cedc8630a3752281c92ce811328d | [
"MIT"
] | permissive | fpcasale/limix-inference | afc6f00261c4fb7b48d43ea71d8f4afb16df70d7 | 3c2aab9792b4885cf575a8b0e8aaa3e445cda782 | refs/heads/master | 2021-01-17T14:10:41.464861 | 2017-03-06T13:43:33 | 2017-03-06T13:43:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,519 | py | from __future__ import division
from numpy import exp
from numpy import clip
from numpy import atleast_2d
from numpy_sugar import is_all_finite
from optimix import maximize_scalar
from optimix import Function
from optimix import Scalar
from ._core import FastLMMCore
class FastLMM(Function):
r"""Fast Linear Mixed Models inference based on the covariance rank."""
def __init__(self, y, Q0, Q1, S0, covariates=None):
super(FastLMM, self).__init__(logistic=Scalar(0.0))
if not is_all_finite(y):
raise ValueError("There are non-finite values in the phenotype.")
self._flmmc = FastLMMCore(y, covariates, Q0, Q1, S0)
self.set_nodata()
def get_normal_likelihood_trick(self):
return self._flmmc.get_normal_likelihood_trick()
@property
def M(self):
return self._flmmc.M
@M.setter
def M(self, v):
self._flmmc.M = v
def copy(self):
o = FastLMM.__new__(FastLMM)
super(FastLMM, o).__init__(logistic=Scalar(self.get('logistic')))
o._flmmc = self._flmmc.copy()
o.set_nodata()
return o
def _delta(self):
v = clip(self.get('logistic'), -20, 20)
x = 1 / (1 + exp(-v))
return clip(x, 1e-5, 1 - 1e-5)
@property
def heritability(self):
t = (self.fixed_effects_variance + self.genetic_variance +
self.environmental_variance)
return self.genetic_variance / t
@property
def fixed_effects_variance(self):
return self._flmmc.m.var()
@property
def genetic_variance(self):
return self._flmmc.scale * (1 - self._flmmc.delta)
@property
def environmental_variance(self):
return self._flmmc.scale * self._flmmc.delta
@property
def beta(self):
return self._flmmc.beta
@property
def m(self):
return self._flmmc.m
def learn(self, progress=True):
maximize_scalar(self, progress=progress)
self._flmmc.delta = self._delta()
def value(self):
self._flmmc.delta = self._delta()
return self._flmmc.lml()
def lml(self, fast=False):
self._flmmc.delta = self._delta()
return self._flmmc.lml()
def predict(self, X, covariates, Xp, trans=None):
covariates = atleast_2d(covariates)
Xp = atleast_2d(Xp)
if trans is not None:
Xp = trans.transform(Xp)
Cp = Xp.dot(X.T)
Cpp = Xp.dot(Xp.T)
return self._flmmc.predict(covariates, Cp, Cpp)
| [
"danilo.horta@gmail.com"
] | danilo.horta@gmail.com |
d421446cf2b427f23e932808b0a6411885c968a4 | 59f64b5cf799e31c97b11828dba4787afb8f3f17 | /batch/setup.py | 5745340b5d80e1ab1635dfe1ee89c67ad12a54c7 | [
"MIT"
] | permissive | hail-is/hail | 2089e6f3b38548f13fa5c2a8ab67f5cfdd67b4f1 | 07a483ae0f46c66f3ed6fd265b48f48c06298f98 | refs/heads/main | 2023-09-01T15:03:01.450365 | 2023-09-01T02:46:35 | 2023-09-01T02:46:35 | 45,069,467 | 913 | 262 | MIT | 2023-09-14T21:53:32 | 2015-10-27T20:55:42 | Python | UTF-8 | Python | false | false | 300 | py | from setuptools import find_packages, setup
setup(
name='batch',
version='0.0.2',
url='https://github.com/hail-is/hail.git',
author='Hail Team',
author_email='hail@broadinstitute.org',
description='Job manager',
packages=find_packages(),
include_package_data=True,
)
| [
"daniel.zidan.king@gmail.com"
] | daniel.zidan.king@gmail.com |
f18362bf2c4a16f951dd16960374458c73bfda04 | d863e33c41c71fc2c33092830dd91279acde9214 | /tarefas/migrations/0002_tarefa_completado.py | 29b535b49c1bb1f7675f2711e37addcf04fef3cb | [] | no_license | Graziele-Rodrigues/AppToDo | d250cafabf808fb4ea5a8c5642c413e242654331 | 007939f97b49acb4044e5390a6e09e5991e90ca6 | refs/heads/main | 2023-06-18T16:29:28.844205 | 2021-07-15T01:23:24 | 2021-07-15T01:23:24 | 386,118,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.0 on 2021-07-15 00:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tarefas', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tarefa',
name='completado',
field=models.BooleanField(default=False),
),
]
| [
"c.graziele.rodrigues@gmail.com"
] | c.graziele.rodrigues@gmail.com |
3bc3d4f99351278c4c04a4e008454576c15d83cb | 5d7d652a38cb3ae6eb90f20ec2413c6dca9be26f | /basic/functions.py | b6e8a570e436ad1e187ad3cd47078492399814c5 | [] | no_license | HeWangchen/LearnPython | 8cd145e9d63e96a9152b7669a3cae0ec75f84aff | 1e6de8c2b48c89498b79bac129ec51e3d86abc2e | refs/heads/master | 2021-01-22T20:44:18.718505 | 2012-09-03T08:49:15 | 2012-09-03T08:49:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,332 | py | def sqrt(x):
ans = 0
if (x >= 0):
while ans*ans < x:
ans += 1
if ans*ans != x:
print(x , 'is not a Integer square',end = '...\n')
return None
else:
return ans
else:
print(x,'must > 0 !',end = '...\n')
return None
def f(x):
x = x + 1
return x
def fibonacci(x):
if x == 1 or x == 0 : return 1
else : return fibonacci(x - 1) + fibonacci(x - 2)
def squareRootBi(x,epsilon):
assert x >= 0, 'x must be non-negative ,not ' + str(x)
assert epsilon > 0,'epsilon must be positive, not ' + str(epsilon)
low = 0
high = max(x, 1.0)
guess = (low + high)/2.0
ctr = 1
while abs(guess**2 - x) > epsilon and ctr <= 100:
if guess**2 < x:
low = guess
else:
high = guess
guess = (high + low)/2.0
ctr += 1
assert ctr <= 100, 'Iteration count exceeded'
print ('Bi method Num iterations: ', ctr , 'answer : ' , guess)
return guess
def squareRootNR(x , epsilon):
assert x >= 0, 'x must be non-negative ,not ' + str(x)
assert epsilon > 0,'epsilon must be positive, not ' + str(epsilon)
x = float(x)
guess = x/2.0
#guess = 0.001
diff = guess**2 - x
ctr = 1
while(abs(diff) > epsilon and ctr <= 100):
guess = guess - diff/(2.0*guess)
diff = guess**2 - x
ctr += 1
assert ctr <= 100 ,'Iteration count exceeded'
print ('NR method Num ,iterations: ', ctr ,'Answer : ',guess)
return guess
#import math
##function
#def hyp(base,height):
# hyp = math.sqrt(base*base + height*height)
# print ('Base : ' , str(base) , 'height : ' , str(height) , 'hyp : ' , str(hyp))
# return hyp
## Get base
#inputOK = False
#while not inputOK:
# base = input('Input base: ')
# if type(base) == type(1.0): inputOK = True
# else: print('Error : base must be a floating number~')
## Get height
#inputOK = False
#while not inputOK:
# height = input('Input height: ')
# if type(height) == type(1.0) : inputOK = True
# else: print('Error : height must be a floating number~')
#hyp(base,height)
squareRootBi(24,0.001)
squareRootNR(24,0.001)
squareRootBi(0.25,0.0001)
squareRootNR(0.25,0.0001)
z = fibonacci(12)
print(z)
x = 3
x = f(x)
print(x)
y = sqrt(x)
print(y)
| [
"hewangchen0727@gmail.com"
] | hewangchen0727@gmail.com |
d4a23ed0b13e657f4111bcccb64f7c5499707cc3 | 31cf6cb28568307ffa3551f2fbf8a006f26ff52c | /FactorGo/__init__.py | 6027c27adadca8836e107b02189a52360efdc3ee | [] | no_license | LUS8806/FactorGo | 74aa136ee0551b91b4c91bb4c598d2f106e92d05 | cd1f225b1c2fe5499476f62ceab86a7d7d20e8a6 | refs/heads/master | 2022-11-17T11:15:33.465663 | 2020-07-15T13:10:16 | 2020-07-15T13:10:16 | 278,116,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from FactorGo.factor_base import FactorDataStruct
from FactorGo.factor_process import FactorProcess
from FactorGo.factor_process import FactorWinsorize, FactorNeutralize, FactorCodeFilter, FactorStandardize
| [
"sonne.lu@hotmail.com"
] | sonne.lu@hotmail.com |
cef7f4cf6998c60638388556429591f77e314c61 | 4bdd12fc089d3cc5a9b36030dc53b73a8d4b3064 | /src/api/views/review.py | 9d937106955b7a116229348ab025bd1996368ac4 | [] | no_license | CodingCobrasCS480/CPPCarpoolBackend | e888671bf6aa5761a1cebc6928080f304c648416 | 616c239320d015ce68a353e2c9548ceec71d4e5b | refs/heads/master | 2021-01-11T00:36:38.166441 | 2016-12-01T00:46:56 | 2016-12-01T00:46:56 | 70,534,697 | 0 | 1 | null | 2016-11-26T06:39:23 | 2016-10-10T22:39:43 | Python | UTF-8 | Python | false | false | 4,020 | py | from flask import Blueprint, request, jsonify
from flask_restful import Api, Resource
from api import db
from psycopg2.extras import RealDictCursor
from api.extensions import requires_auth
review_bp = Blueprint('review_bp', __name__)
review_api = Api(review_bp)
def get_form(key):
if key not in request.form:
return None
return request.form[key]
class ReviewsResource(Resource):
@requires_auth
def get(self, param):
email = param + '@cpp.edu'
# lookup userid for requested user
c = db.cursor(cursor_factory=RealDictCursor)
c.execute("SELECT id FROM users WHERE cppemail = %s", (email,))
# check if we got a result
row = c.fetchone()
if row is None:
return 'User does not exist', 404
# get id
userid = dict(row)['id']
# get all reviews for user, join with users table
c.execute("SELECT reviews.id, reviews.reviewer_userid, reviews.reviewee_userid, reviews.stars, reviews.content, users.cppemail as reviewer_email, users.fullname as reviewer_name, users.picture as reviewer_picture FROM reviews, users WHERE reviews.reviewee_userid = %s AND reviews.reviewer_userid = users.id", (userid,))
# check if we got any rows
rows = c.fetchall()
if rows is None:
return jsonify([])
# jsonify row and return
return jsonify(results=rows)
@requires_auth
def delete(self, param):
reviewid = int(param)
# lookup review id and enforce reviewer_userid
c = db.cursor(cursor_factory=RealDictCursor)
c.execute("SELECT id FROM reviews WHERE reviewer_userid = %s and id = %s", (request.id, reviewid))
# check if we got a result
row = c.fetchone()
if row is None:
return 'Review does not exist', 404
# get all reviews for user, join with users table
c.execute("DELETE FROM reviews WHERE id = %s", (reviewid,))
db.commit()
return 'OK', 202
class ReviewResource(Resource):
@requires_auth
def post(self):
# make sure user is not reviewing themself
reviewee_email = get_form('email')
if reviewee_email == request.email:
return 'You can not review yourself', 400
# lookup reviewer id
c = db.cursor(cursor_factory=RealDictCursor)
c.execute("SELECT id FROM users WHERE cppemail = %s", (request.email,))
# check if we got a result
row = c.fetchone()
if row is None:
return 'You do not exist', 404
# get id
reviewer_id = dict(row)['id']
# lookup reviewee id
c.execute("SELECT id FROM users WHERE cppemail = %s", (reviewee_email,))
# check if we got a result
row = c.fetchone()
if row is None:
return 'User does not exist', 404
# get id
reviewee_id = dict(row)['id']
# make sure user has not already reviewed this person
c.execute("SELECT id FROM reviews WHERE reviewee_userid = %s and reviewer_userid = %s", (reviewee_id, reviewer_id))
# check if we got a result
row = c.fetchone()
if row is not None:
return 'You can not review more than once', 400
# get # of stars
stars_unparsed = get_form('stars')
if stars_unparsed is None:
return 'Missing field: stars', 400
stars = int(stars_unparsed)
if stars < 1 or stars > 5:
return 'Stars out of range', 400
content = get_form('content')
if content is None or len(content) == 0:
return 'Missing field: content', 400
# insert new record into reviews table
c.execute("INSERT INTO reviews (reviewer_userid, reviewee_userid, stars, content) VALUES (%s, %s, %s, %s)", (reviewer_id, reviewee_id, stars, content))
db.commit()
return 'OK', 201
review_api.add_resource(ReviewsResource, '/<string:param>')
review_api.add_resource(ReviewResource, '/')
| [
"Christian.T.Hill@jpl.nasa.gov"
] | Christian.T.Hill@jpl.nasa.gov |
189bb8a25718fb9f0a4778453cbd0bb5d9197b31 | 537ddf1ca5d9b5f141a82aa3baf4bee782fd9a4e | /utils/dataset.py | 36456d8cd409705706a07d4b1ea360b34ca89e24 | [
"BSD-3-Clause"
] | permissive | iceshade000/ADGT | 113ef60b8edab2fb26b3901031a30c1e870ba381 | 54cf5df302b9ff4a9007856ae2596d136d40c84b | refs/heads/master | 2023-01-01T00:04:31.664843 | 2022-06-07T02:08:10 | 2022-06-07T02:08:10 | 286,201,922 | 0 | 2 | MIT | 2020-10-28T05:50:01 | 2020-08-09T08:59:54 | Python | UTF-8 | Python | false | false | 7,275 | py | from torch.utils.data import Dataset, DataLoader
import os
from PIL import Image
from torchvision.datasets.cifar import CIFAR10
from torchvision import transforms
def is_image_file(filename):
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
]
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def data_process(root, dataset, batch_size, device_ids, is_train=False, img_size=128):
root = os.path.join(root, dataset)
# train loader
train_loader = None
if dataset=='cifar10':
normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010])
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.Resize((img_size, img_size), 0),
#transforms.RandomCrop(img_size,4),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
# test loader
test_transform = transforms.Compose([
transforms.Resize((img_size, img_size), 0),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
if is_train:
if dataset == 'cifar10':
train_dataset = CIFAR10(root, train=True, download=True, transform=train_transform)
elif dataset == 'VOC_single':
train_dataset = SingleDataset(root, train=True, transform=train_transform)
else:
train_dataset = CommonDataset(root, train=True, transform=train_transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
num_workers=4 * len(device_ids), pin_memory=False)
if dataset == 'cifar10':
test_dataset = CIFAR10(root, train=False, download=True, transform=test_transform)
num_classes = 10
elif dataset =='VOC_single':
test_dataset = SingleDataset(root, train=False, transform=test_transform)
num_classes = test_dataset.num_classes
else:
test_dataset = CommonDataset(root, train=False, transform=test_transform)
num_classes = test_dataset.num_classes
test_loader = DataLoader(test_dataset, batch_size=batch_size,
num_workers=4 * len(device_ids), pin_memory=False)
return train_loader, test_loader, num_classes
class CommonDataset(Dataset):
def __init__(self, root, train=True, transform=None):
self.paths = []
self.labels = []
self.transform = transform
if train:
data_dir = os.path.join(root, 'train')
else:
data_dir = os.path.join(root, 'test')
self.num_classes = len(os.listdir(data_dir))
print(data_dir)
if ('animal' or 'NICO') in data_dir:
for class_id, dirs in enumerate(os.listdir(data_dir)):
class_dir = os.path.join(data_dir, dirs)
for prop in os.listdir(class_dir):
property_dir = os.path.join(class_dir, prop)
for img in os.listdir(property_dir):
if not is_image_file(img):
continue
self.paths.append(os.path.join(class_dir, prop, img))
self.labels.append(class_id)
else:
for class_id, dirs in enumerate(os.listdir(data_dir)):
class_dir = os.path.join(data_dir, dirs)
if not os.path.isdir(class_dir):
continue
for basename in os.listdir(class_dir):
if not is_image_file(basename):
continue
self.paths.append(os.path.join(class_dir, basename))
self.labels.append(class_id)
def __len__(self):
return len(self.paths)
def __getitem__(self, item):
path = self.paths[item]
image = Image.open(path).convert('RGB')
if self.transform:
image = self.transform(image)
label = self.labels[item]
return image, label
class SingleDataset(Dataset):
def __init__(self, root, train=True, transform=None):
self.paths = []
self.labels = []
self.transform = transform
if train:
data_dir = os.path.join(root, 'train')
else:
data_dir = os.path.join(root, 'test')
self.num_classes = len(os.listdir(data_dir))
print(data_dir)
for class_id, dirs in enumerate(os.listdir(data_dir)):
class_dir = os.path.join(data_dir, dirs)
if not os.path.isdir(class_dir):
continue
for basename in os.listdir(class_dir):
if not is_image_file(basename):
continue
self.paths.append(os.path.join(class_dir, basename))
self.labels.append(class_id)
def __len__(self):
return len(self.paths)
def __getitem__(self, item):
path = self.paths[item]
image = Image.open(path).convert('RGB')
if self.transform:
image = self.transform(image)
label = self.labels[item]
return image, label
class SegDataset(Dataset):
def __init__(self, root, train=False, transform=None):
self.paths = []
self.labels = []
self.seg_path=[]
self.transform = transform
if train:
data_dir = os.path.join(root, 'train')
seg_dir=None
print('only for test, train maybe wrong')
else:
data_dir = os.path.join(root, 'test')
seg_dir=os.path.join(root, 'segmentation')
self.num_classes = len(os.listdir(data_dir))
print(data_dir)
for class_id, dirs in enumerate(os.listdir(data_dir)):
class_dir = os.path.join(data_dir, dirs)
seg_class=os.path.join(seg_dir,dirs)
if not os.path.isdir(class_dir):
continue
for basename in os.listdir(class_dir):
if not is_image_file(basename):
continue
self.paths.append(os.path.join(class_dir, basename))
basename2=os.path.splitext(basename)[0]+'.png'
self.seg_path.append(os.path.join(seg_class, basename2))
self.labels.append(class_id)
def __len__(self):
return len(self.paths)
def __getitem__(self, item):
path = self.paths[item]
seg_path=self.seg_path[item]
image = Image.open(path).convert('RGB')
seg_img=Image.open(seg_path).convert('RGB')
if self.transform:
image = self.transform(image)
seg_img=self.transform(seg_img)
label = self.labels[item]
return image,seg_img, label
| [
"857332641@qq.com"
] | 857332641@qq.com |
f9eddd2dfd9872cc121bf34cf56a369349327ca5 | 15a706ff72f16291622d3cb038c579a346bd85ef | /config.py | 2675040ad2f672c6b1337ba245b670328299ac5a | [] | no_license | land-pack/clothes-gallery | 418616cde9b9f475c723aa74dd018cc860193d8a | d715871bd1d7974349be5abc865ae2698645586e | refs/heads/master | 2021-01-17T13:00:16.090961 | 2016-06-14T01:59:54 | 2016-06-14T01:59:54 | 58,244,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
LANDPACK_MAIL_SUBJECT_PREFIX = '[Gallery]'
LANDPACK_MAIL_SENDER = 'Landpack <landpack@sina.com>'
ADMIN = '123@qq.com'
LANDPACK_POSTS_PER_PAGE = 20
LANDPACK_IMAGE_PER_PAGE = 5
UPLOAD_FOLDER = '/var/lib/flask-tmp/cloths'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'git'])
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
# DEBUG = True
MAIL_SERVER = 'smtp.sina.com'
MAIL_PORT = 25
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or 'landpack@sina.com'
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or 'landpack911!@#'
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| [
"landpack@sina.com"
] | landpack@sina.com |
9ebab208afe1bd2f7614df1c2de549eb77d9db95 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.9.55/2/1569574642.py | 90461df7fb044b3bea3b407adea39ed89bcc0d15 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,229 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def is_palindromic(n: int)-> bool:
n_list = (str(n)).split()
return n_list
######################################################################
## Lösung Teil 2. (Tests)
def test_is_palindromic():
assert is_palindromic(525) == [5,2,5]
assert is_palindromic(1) == True
assert is_palindromic(123) == False
######################################################################
## Lösung Teil 3.
## Lösung Teil 4.
######################################################################
## test code
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_is_palindromic(self):
assert is_palindromic
assert 'n' in getfullargspec(is_palindromic).args
def test_gen_palindromic(self):
assert gen_palindromic
assert 'n' in getfullargspec(gen_palindromic).args
def test_represent(self):
assert represent
assert 'n' in getfullargspec(represent).args
class TestGrades:
def test_docstring_present(self):
assert is_palindromic.__doc__ is not None
assert gen_palindromic.__doc__ is not None
assert represent.__doc__ is not None
def test_typing_present(self):
assert is_palindromic.__hints__ == typing.get_type_hints(self.is_palindromic_oracle)
assert typing.get_type_hints (gen_palindromic) == typing.get_type_hints (self.gen_palindromic_oracle)
assert typing.get_type_hints (represent) == typing.get_type_hints (self.represent_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def is_palindromic_oracle(self, n:int)->list:
s = str(n)
while len (s) > 1:
if s[0] != s[-1]:
return False
s = s[1:-1]
return True
def gen_palindromic_oracle (self, n:int):
return (j for j in range (n + 1, 0, -1) if self.is_palindromic_oracle (j))
def represent_oracle (self, n:int) -> list:
for n1 in self.gen_palindromic_oracle (n):
if n1 == n:
return [n1]
for n2 in self.gen_palindromic_oracle (n - n1):
if n2 == n - n1:
return [n1, n2]
for n3 in self.gen_palindromic_oracle (n - n1 - n2):
if n3 == n - n1 - n2:
return [n1, n2, n3]
# failed to find a representation
return []
def test_is_palindromic(self):
## fill in
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
def test_gen_palindromic(self):
## fill in
pass
def test_represent (self):
def check(n, r):
for v in r:
assert self.is_palindromic_oracle (v)
assert n == sum (r)
for n in range (1,100):
r = represent (n)
check (n, r)
for i in range (100):
n = random.randrange (10000)
r = represent (n)
check (n, r)
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
7e5f0852a9d3e0223cd78d222333b91a9e678a04 | 3ba76ce98cdc5578f1ce38fba41da152404224fa | /python/test/grammar_translator/testCallEnd.py | 20d874cf234ddb191d66611c5aac626a8862662e | [
"MIT"
] | permissive | DiaosiDev/gpufort | 258a4d3f8e8a8d53428f34f377ad8ff662369a53 | e60e99dfa3b17306ad65a01d56a764aac471eaba | refs/heads/main | 2023-08-14T13:15:08.266532 | 2021-10-05T09:51:23 | 2021-10-05T09:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 Advanced Micro Devices, Inc. All rights reserved.
import addtoplevelpath
import sys
import test
import translator.translator
import grammar as translator
testdata = """
1 )
a_d )
psi_d )
2 * lda, ps_d, 1, 1.D0, psi_d, 1 )
spsi_d )
a_d )
1, spsi_d )
1, 1, spsi_d )
lda, ps_d, 1, 1, spsi_d )
lda, ps_d )
lda, ps_d, 1, 1, spsi_d, 1 )
2 * lda, ps_d, 1, 1, spsi_d, 1 )
2 * lda, ps_d, 1, 1.D0, spsi_d, 1 )
""".strip("\n").strip(" ").strip("\n").splitlines()
test.run(
expression = translator.call_end,
testdata = testdata,
tag = "call_end",
raiseException = True
) | [
"docharri@amd.com"
] | docharri@amd.com |
32c5c444b69f867b603c9b38706e48446c3ed75b | 4e9923dca942f7bb861fcd8399d4eb28d2f990f3 | /core/get_pic.py | 44eecfa6164c12f61d8bcb36572378261074ba7b | [] | no_license | ses4j/empid | c8549a03baeb422f5add52d36e610cf97cc37fc4 | cc68e255ae6377b4fbbe3c5ec8409853c34650be | refs/heads/master | 2022-05-31T11:17:30.684792 | 2022-05-21T03:45:01 | 2022-05-21T03:45:01 | 207,694,752 | 0 | 0 | null | 2022-05-21T03:45:37 | 2019-09-11T01:21:08 | Python | UTF-8 | Python | false | false | 1,933 | py | import logging
import requests, pprint
logger = logging.getLogger(__name__)
def get_image_urls(species, taxonCode='', count=50, sort_by="rating_rank_desc", regionCode='', mr='MCUSTOM', bmo=1, emo=12, yr='YPAST10'):
# taxonCode = "aldfly"
# https://ebird.org/media/catalog.json?searchField=user&
# q=&taxonCode=&hotspotCode=®ionCode=US&customRegionCode=&userId=&_mediaType=&mediaType=p&species=&
# region=United+States+(US)&hotspot=&customRegion=&mr=M8TO11&bmo=1&emo=12&yr=YALL&by=1900&ey=2019&user=&
# view=Gallery&sort=upload_date_desc&includeUnconfirmed=T&_req=&subId=&catId=&_spec=&specId=&collectionCatalogId=&
# dsu=-1&initialCursorMark=AoJ4vt%2BcmO0CKTE3NzA3NjQ5MQ%3D%3D&count=50&_=1568490511079
# https://ebird.org/media/catalog.json?searchField=user&q=&taxonCode=&hotspotCode=®ionCode=US&customRegionCode=
# &userId=&_mediaType=&mediaType=p&species=®ion=United+States+(US)&hotspot=&customRegion=
# &mr=M8TO11&bmo=1&emo=12&yr=YALL&by=1900&ey=2019&user=&view=Gallery&sort=upload_date_desc
# &includeUnconfirmed=T&_req=&subId=&catId=&_spec=&specId=&collectionCatalogId=
# &dsu=-1&initialCursorMark=AoJwp9WbmO0CKTE3NzA3NTM0MQ%3D%3D&count=50&_=1568490511080
# sort_by = "upload_date_desc"
url = (f"https://ebird.org/media/catalog.json?searchField=species&q={species}"
f"&taxonCode={taxonCode}&&mediaType=p®ionCode={regionCode}&view=Gallery&sort={sort_by}"
f"&mr={mr}&bmo={bmo}&emo={emo}&yr={yr}"
f"&count={count}")
r = requests.get(url)
assert r.status_code == 200, str(r.json())
logger.info(f"Fetched new urls for {taxonCode}...\n{url}\nreturn status={r.status_code}")
data = r.json()
# image_urls = [_['largeUrl'] for _ in ]
# return image_urls
# import pdb; pdb.set_trace()
return data['results']['content']
# get_image(species="Alder Flycatcher - Empidonax alnorum", taxonCode="aldfly") | [
"scott.stafford@gmail.com"
] | scott.stafford@gmail.com |
483dceab4a240864aa0361b1ab4ea50699ff9ed0 | 81f5216dcff3755d008f1202e187d7f761bc6dbf | /system_tests/system_tests_async/conftest.py | 9669099245dceb5d5eebac8d3a488a845f2bb169 | [
"Apache-2.0"
] | permissive | bojeil-google/google-auth-library-python | 46cde46cd220148d118ea98eefb2eaa26b2031f1 | a37ff00d7afd6c7aac2d0fab29e05708bbc068be | refs/heads/main | 2023-08-06T23:51:31.529892 | 2021-09-28T21:44:05 | 2021-09-28T21:44:05 | 411,832,250 | 0 | 1 | Apache-2.0 | 2021-09-29T21:18:15 | 2021-09-29T21:18:14 | null | UTF-8 | Python | false | false | 3,362 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from google.auth import _helpers
import google.auth.transport.requests
import google.auth.transport.urllib3
import pytest
import requests
import urllib3
import aiohttp
from google.auth.transport import _aiohttp_requests as aiohttp_requests
from system_tests.system_tests_sync import conftest as sync_conftest
TOKEN_INFO_URL = "https://www.googleapis.com/oauth2/v3/tokeninfo"
@pytest.fixture
def service_account_file():
"""The full path to a valid service account key file."""
yield sync_conftest.SERVICE_ACCOUNT_FILE
@pytest.fixture
def impersonated_service_account_file():
"""The full path to a valid service account key file."""
yield sync_conftest.IMPERSONATED_SERVICE_ACCOUNT_FILE
@pytest.fixture
def authorized_user_file():
"""The full path to a valid authorized user file."""
yield sync_conftest.AUTHORIZED_USER_FILE
@pytest.fixture
async def aiohttp_session():
async with aiohttp.ClientSession(auto_decompress=False) as session:
yield session
@pytest.fixture(params=["aiohttp"])
async def http_request(request, aiohttp_session):
"""A transport.request object."""
yield aiohttp_requests.Request(aiohttp_session)
@pytest.fixture
async def token_info(http_request):
"""Returns a function that obtains OAuth2 token info."""
async def _token_info(access_token=None, id_token=None):
query_params = {}
if access_token is not None:
query_params["access_token"] = access_token
elif id_token is not None:
query_params["id_token"] = id_token
else:
raise ValueError("No token specified.")
url = _helpers.update_query(sync_conftest.TOKEN_INFO_URL, query_params)
response = await http_request(url=url, method="GET")
data = await response.content()
return json.loads(data.decode("utf-8"))
yield _token_info
@pytest.fixture
async def verify_refresh(http_request):
"""Returns a function that verifies that credentials can be refreshed."""
async def _verify_refresh(credentials):
if credentials.requires_scopes:
credentials = credentials.with_scopes(["email", "profile"])
await credentials.refresh(http_request)
assert credentials.token
assert credentials.valid
yield _verify_refresh
def verify_environment():
"""Checks to make sure that requisite data files are available."""
if not os.path.isdir(sync_conftest.DATA_DIR):
raise EnvironmentError(
"In order to run system tests, test data must exist in "
"system_tests/data. See CONTRIBUTING.rst for details."
)
def pytest_configure(config):
"""Pytest hook that runs before Pytest collects any tests."""
verify_environment()
| [
"noreply@github.com"
] | bojeil-google.noreply@github.com |
9a6fe69b312cfd1dbfdc8bf7ac6a4bb97248c01b | 232c8c85b4277e9a51b1ded461f27f761c13c2e1 | /Software/Python/grove_gps_arduino.py | 150560d81065e5462386833caf7df8051423724b | [
"MIT"
] | permissive | schrein/GrovePi | 4b3d6cc4e153fd201fc231c139bfe27843d4b903 | 75fe88c118d8c53ec92e20eca670970cd5c29acf | refs/heads/master | 2021-01-18T09:39:43.254970 | 2015-08-12T07:56:12 | 2015-08-12T07:56:12 | 40,587,212 | 0 | 0 | null | 2015-08-12T07:43:21 | 2015-08-12T07:43:21 | null | UTF-8 | Python | false | false | 1,937 | py | #!/usr/bin/env python
#
# GrovePi Example for using the Grove Temperature & Humidity Sensor Pro (http://www.seeedstudio.com/wiki/Grove_-_Temperature_and_Humidity_Sensor_Pro)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import grovepi
# Connect the Grove GPS to Serial Port
while True:
try:
[ lat, lng, alt, year, month, day, hour, minute, second, ms ] = grovepi.gps_arduino()
print ( lat, lng, alt, year, month, day, hour, minute, second, ms )
except IOError:
print ("Error")
| [
"schreiner@unistra.fr"
] | schreiner@unistra.fr |
eae719c94696afaae249e39f8bfddef165b649e6 | dae7de26f09bf1d0fcc9231082a0cddd5eb88074 | /python_src/d01_FESTIVAL_03.py | 551f2ed0e2f874b34b1235aa4456cf8da0c6a44f | [] | no_license | dubu/algospot | e9b50f27d7b3cf5866841429d736c6839234ecba | 1665321321bd99546b512ce6a285738c92a76c35 | refs/heads/master | 2016-09-11T08:50:29.747545 | 2014-11-24T15:38:20 | 2014-11-24T15:38:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | __author__ = 'dubuapt'
def main():
import sys
f = sys.stdin
caseCount = f.readline()
# print range(int(caseCount))
result = []
ret = 0
for n in range(int(caseCount)):
# print n
line01 = f.readline()
line02 = f.readline()
ln = str(line01).strip().split()
# print ln[0], ln[1]
# print line02
cost = line02.strip().split()
l = int(ln[0])
n0 = int(ln[1])
costs = []
i = 0
while i < l :
sum = 0
for j in range(i,n0+1 ):
sum = sum + int(cost[j])
if(j - i +1 >= n0):
ret = min(ret, sum/float(j - i + 1))
i = i+1;
#print("%.10f" %ret)
result.append(ret)
print("%.10f\n" %min(result))
main()
| [
"kozazz@hanmail.net"
] | kozazz@hanmail.net |
ce05fe6a4190b4e43561937b674f94d5b8483423 | d82db03a258eafd28a758a7b7e56f991e599d798 | /docs/source/conf.py | 4dec93ff7d8aeb16c048a3a6e37d0601df186574 | [
"MIT"
] | permissive | preems/nltk-server | 3fe65e817fb8cc100c8e74dac91fc709808d796f | cf7644b6d5529bcf6bb9a74eedb46761b5b164ab | refs/heads/master | 2021-06-03T19:03:41.905014 | 2021-01-08T21:48:05 | 2021-01-08T21:48:05 | 28,204,818 | 24 | 17 | MIT | 2021-01-08T21:48:07 | 2014-12-18T22:43:50 | Python | UTF-8 | Python | false | false | 8,398 | py | # -*- coding: utf-8 -*-
#
# NLTK Server documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 23 16:54:51 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.append(os.path.abspath('sphinxcontrib'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'httpdomain'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NLTK Server'
copyright = u'2014, Preetham MS'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = []
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NLTKServerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'NLTKServer.tex', u'NLTK Server Documentation',
u'Preetham MS', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nltkserver', u'NLTK Server Documentation',
[u'Preetham MS'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NLTKServer', u'NLTK Server Documentation',
u'Preetham MS', 'NLTKServer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"me@preetham.in"
] | me@preetham.in |
c71f6184c1b42c8351075c6e0810a10da0e108af | 7f52cc0952b39218b12b401c9136d0ae92bb8387 | /Text_Classifier.py | a6410a7f24e8a868ccd0f2d933ed1da8011091c8 | [] | no_license | Subhash3/Sentiment-Analysis | b1336bb48128b0db29cd2807660e9335ba4dd9ab | 159e4727e3b54f4638d5dfb591ac5cd52250a44c | refs/heads/main | 2023-04-28T20:30:01.674730 | 2021-05-21T05:22:20 | 2021-05-21T05:22:20 | 368,753,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,328 | py | import pandas as pd
import json
from helpers import customArgmax, shuffleArray, splitDataframe, splitArr
from Pre_Processor import PreProcess
import sys
from custom_exceptions import RequiredFieldsNotFoundError
class TextClassifier:
"""
Class to classify text using Naive Bayes algorithm
Attributes
----------
dataset: pandas.DataFrame
"""
def __init__(self):
self.dataset = pd.DataFrame()
self.summaryByClass = dict()
self.noOfSamples = 0
self.DEFAULT_PROBABILITY = 1
self.preProcessor = PreProcess()
def loadDatasetJson(self, jsonFile):
"""
Loads the data from a json file into a pandas DataFrame
Parameters
----------
jsonFile: str
Json file containing the dataset, an array of data-samples with the following structure:
{
"sentence": "some sentence",
"category": "positive/negative"
}
Returns
-------
None
if the dataset is loaded successfully.
Error: Exception
if error occurs.
"""
try:
data = json.load(open(jsonFile))
data = self.preProcessor.preProcess(data)
train, test = splitArr(data, 4/5)
self.testing = test
self.training = train
data = shuffleArray(train)
self.dataset = pd.DataFrame(data)
self.noOfSamples = self.dataset.shape[0]
except Exception as e:
raise e
def loadDatasetCsv(self, csvFile):
"""
Loads the data from a json file into a pandas DataFrame
Parameters
----------
jsonFile: str
Json file containing the dataset, an array of data-samples with the following structure:
{
"sentence": "some sentence",
"category": "positive/negative"
}
Returns
-------
None
if the dataset is loaded successfully.
Error: Exception
if error occurs.
"""
try:
data = pd.read_csv(csvFile, encoding="ISO-8859-1")
if ("sentence" not in data.columns) or ("category" not in data.columns):
raise RequiredFieldsNotFoundError
processedData = self.preProcessor.preProcess(data)
self.dataset = processedData
self.training, self.testing = splitDataframe(self.dataset, 3/4)
self.noOfSamples = self.training.shape[0]
except Exception as e:
raise e
def _describeByClass(self, dataset: pd.DataFrame):
"""
Separates data by classname and computes the mean and std of each feature in each class.
Parameters
----------
dataset: pd.DataFrame
Dataframe of features and class values
Returns
-------
summary: Dict[str: List[mean, std]]
Map from class to a list of mean and std values of each feature.
"""
categories = set(dataset["category"])
summary = dict()
for category in categories:
samples = dataset[dataset["category"] == category]
# print(samples)
samplesCount = samples.shape[0]
tokenProbabilities = dict()
for tokenList in samples["tokens"]:
# print(tokenList)
for token in tokenList:
if token not in tokenProbabilities:
tokenProbabilities[token] = 0
else:
tokenProbabilities[token] += 1
for token in tokenProbabilities:
tokenProbabilities[token] /= samplesCount
summary[category] = tokenProbabilities
for category in categories:
s = summary[category]
for token in s:
s[token] += self.DEFAULT_PROBABILITY
# print()
# print(summary)
return summary
def train(self):
"""
Computes the mean and std of each feature in each class and stores the results in self.summaryByClass
"""
self.summaryByClass = self._describeByClass(self.training)
print(f"Trained {self.training.shape[0]} samples")
def computeProbabilities(self, tokens: list):
"""
Computes the probability that the given tokens belong to each class.
Attributes
----------
tokens: list
List of processed tokens
Returns
-------
probabilities: Dict[str, float]
probability of each class/category.
"""
if len(tokens) == 0:
return None
categories = set(self.training["category"])
probabilities = dict()
for category in categories:
samples = self.training[self.training["category"] == category]
samplesCount = samples.shape[0]
priorProbability = samplesCount/self.noOfSamples
likelihood = 1
for token in tokens:
if token in self.summaryByClass[category]:
p = self.summaryByClass[category][token]
else:
p = 1
# print(category, token, p, priorProbability)
likelihood *= p
probabilities[category] = p * priorProbability
# print(probabilities[category])
return probabilities
def predict(self, sentence: str):
"""
Predicts the category of the given sentence.
Attributes
----------
sentence: str
Returns
-------
Tuple[str, Dict[str, float]]
Tuple containing the predicted category and probabilities of all categories.
"""
tokens = self.preProcessor.processString(sentence)
# print(self.summaryByClass)
probabilities = self.computeProbabilities(tokens)
# print(probabilities)
if probabilities == None:
return None
return customArgmax(probabilities), probabilities
def predictByTokens(self, tokens):
"""
Predicts the category of the given tokens of a sentence.
Attributes
----------
tokens: List[str]
Returns
-------
Tuple[str, Dict[str, float]]
Tuple containing the predicted category and probabilities of all categories.
"""
probabilities = self.computeProbabilities(tokens)
# print(probabilities)
return customArgmax(probabilities), probabilities
def Test(self):
"""
Tests the model agaist the part of a dataset and compute the accuracy.
Attributes
----------
Returns
-------
accuracy: float
"""
correct = 0
total = 0
total = self.testing.shape[0]
if total <= 0:
return
testingProgress = 0
testedSamples = 0
nonEmptyTokens = 0
tokensColumn = self.testing["tokens"]
# print(self.testing.head(10))
# print(self.testing.index.values)
for i in self.testing.index.values:
testingProgress = (testedSamples*100)/total
print(
f"Testing {round(testingProgress, 3)}% done {'.-'*(int(testingProgress/5)+1)}", end='\r')
sys.stdout.flush()
tokens = tokensColumn[i]
# print(tokens)
if len(tokens) == 0:
continue
prediction = self.predictByTokens(tokens)
if prediction[0] == self.testing["category"][i]:
correct += 1
nonEmptyTokens += 1
testedSamples += 1
print()
print(f"Tested: {total} samples.")
# print(correct, total, nonEmptyTokens)
accuracy = max(correct*100/total, correct*100/nonEmptyTokens)
print(f"Accuracy: {accuracy}")
return accuracy
| [
"subhashsarangi123@gmail.com"
] | subhashsarangi123@gmail.com |
c578909d8cbe9e593b253df1984ea9ee3076609c | fda71dfb60ef4be197382fbd900f6dfc28da86bd | /asifncfetv2/model_tests/fit100nm.py | 4b93628639c437ec035ab333ff82acae11f0878f | [] | no_license | jpduarte/userjp | d9bfa2a46c0d8eda5796a785f1ed6082617520ce | 3e6190681e706d954976790af2419509613056f6 | refs/heads/master | 2021-03-22T00:05:04.018130 | 2016-06-16T22:21:36 | 2016-06-16T22:21:36 | 34,271,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,090 | py | #example: run hspice Id-Vg using python
#Juan Duarte, BSIM Group
rootfolder = '/home/juan/research'
#indicate path for folders containing required classes
import sys
sys.path.insert(0, rootfolder+'/cmdp/pycsimsupport')
sys.path.insert(0, rootfolder+'/cmdp/plotscripts')
sys.path.insert(0, rootfolder+'/cmdp/fittingsupport')
#import class for python simulation
import pycsimpython
import plotgeneral
import numpy as np
import matplotlib.pyplot as plt
import fitmodel
####################################################################
#add data
##############################################################################
fit1 = fitmodel.fitmodelclass('fitexample')
fit1.updateparameter('modelpath',rootfolder+'/cmdp/compactmodels/UFCM.py')
fit1.updateparameter('modelcardpath',rootfolder+'/userjp/asifncfetv2/modelcards/modecard100nmNFIT1.txt')
#add path to folder which will contain simulation files
fit1.updateparameter('fitfolder',rootfolder+'/userjp/asifncfetv2/fitdata/')
fit1.updateparameter('alldatafile','alldatatofit100nm.txt')
fit1.updateparameter('inputfileformat','asifdata')
fit1.updateparameter('paramtoinclude',['VG', 'VD', 'IG', 'ID', 'IS', 'VS','VB','Lg'])#TODO: do this automatic in case file with data is ready
fit1.updateparameter('simulationresultsfilename','initialresult.txt')
fit1.updateparameter('vartosave',['Ids'])
fit1.updateparameter('nodes',['VD', 'VG', 'VS', 'VB'])
fit1.updateparameter('dcbiases',[[-0.05,-0.2,-0.3,-0.5,-0.7,-0.95], np.linspace(-2,0,100), [0], [0]]) #
fit1.updateparameter('deviceparameter',[])
fit1.updateparameter('deviceparametervalue',[])
fit1.runsim(fit1.modelcardpath)
#uncomment this to load data to a single file, this is done only once
fit1.resetdata()
pathallfiles = '/home/juan/research/userjp/asifncfetv2/data/100nm/'
fit1.addalldatainfolder(pathallfiles, ['VS','VB','Lg'], ['0.0','0.0','100e-9'])
#first cycle
fit1.updateparameter('biasrange', [[-0.04,-1],[-2,-0.4],[-0.1,0.1],[-0.1,0.1]])
fit1.updateparameter('deviceparameterrange', [[90e-9,110e-9]])
fit1.updateparameter('vartofitdata', ['ID'])
fit1.updateparameter('vartofitmodel', ['Ids'])
fit1.updateparameter('paramtofit', ['vsat','Rs','ul'])#,
fit1.updateparameter('modelcardpathfinal',rootfolder+'/userjp/asifncfetv2/modelcards/modecard100nmNFIT2.txt')
################################fit model 1
fit1.fitparameters()
###############################run simulation for initial and final
#update name for results, TODO: change this name, its confusing
fit1.updateparameter('simulationresultsfilename','finalresult.txt')
fit1.runsim(fit1.modelcardpathfinal)
##############################plot results
P1 = plotgeneral.plotgeneral()
#plot experimental results
P1.updateparameter('symbol','o')
P1.updateparameter('lw',3)
pathandfile = fit1.fitfolder+fit1.alldatafile
P1.plotfiledata(pathandfile,'VG','ID',1)
#plot model with initial parameters
P1.updateparameter('symbol','-')
fit1.updateparameter('simulationresultsfilename','initialresult.txt')
pathandfile = fit1.fitfolder+fit1.simulationresultsfilename
P1.plotfiledata(pathandfile,'VG','Ids',1)
#plot model with fitted parameters
P1.updateparameter('symbol','--')
fit1.updateparameter('simulationresultsfilename','finalresult.txt')
pathandfile = fit1.fitfolder+fit1.simulationresultsfilename
P1.plotfiledata(pathandfile,'VG','Ids',1)
#################################################################log scales
P1.updateparameter('ylogflag',1)
P1.updateparameter('symbol','o')
pathandfile = fit1.fitfolder+fit1.alldatafile
P1.plotfiledata(pathandfile,'VG','ID',2)
#plot model with initial parameters
P1.updateparameter('symbol','-')
fit1.updateparameter('simulationresultsfilename','initialresult.txt')
pathandfile = fit1.fitfolder+fit1.simulationresultsfilename
P1.plotfiledata(pathandfile,'VG','Ids',2)
#plot model with fitted parameters
P1.updateparameter('symbol','--')
fit1.updateparameter('simulationresultsfilename','finalresult.txt')
pathandfile = fit1.fitfolder+fit1.simulationresultsfilename
P1.plotfiledata(pathandfile,'VG','Ids',2)
###############################plot ends
plt.show()
| [
"jpduarte@berkeley.edu"
] | jpduarte@berkeley.edu |
878906624466eab280cf92986ee00507d5c002c8 | 813c2a273a6c973d358d38e6016da7f8a7e5d840 | /src/api_wrapper/api.py | d2bc0b35dee9538189da4ae495892a157401196b | [
"MIT"
] | permissive | MSDuncan82/api_wrapper | 22e49f2f54c3ef857f0a0d6d1e59aedd71bc4698 | 602d0ad901cc0c95a302404af609ca6f28d842ee | refs/heads/master | 2022-11-15T00:09:29.689197 | 2020-07-09T12:24:31 | 2020-07-09T12:24:31 | 261,279,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from base_api import API
from geo_api import MapboxAPI
from census_api import CensusBoundaries
| [
"msduncan82@gmail.com"
] | msduncan82@gmail.com |
9c5405c5f44f1cba5e09270396b0321ed198f4d0 | 01f4bf71d200039bf38508aab253a0686a9cba8e | /flink-runtime-web/web-dashboard/node_modules/fmin/test.py | f55af9abffc7cf1888d23be232aa4742da1cc9eb | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"OFL-1.1",
"ISC",
"LicenseRef-scancode-free-unknown",
"MIT-0",
"CDDL-1.0",
"CDDL-1.1",
"EPL-1.0",
"Classpath-exception-2.0",
"CC-BY-2.5",
"GCC-exception-3.1",
"AGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause-Views",
"MPL-2.0-no-copyleft-exception",
"MPL-2.0",
"LicenseRef-scancode-jdom",
"CC-PDDC",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"CC0-1.0"
] | permissive | MinJeromeXU/flink_source | 4599bf88f3eb0152d9a310e4d9a0d11b45472226 | 140c345df48b703582337ae59e465f0765c310a7 | refs/heads/master | 2022-11-06T00:08:32.307280 | 2019-08-24T12:43:39 | 2019-08-24T12:43:39 | 204,151,561 | 2 | 0 | Apache-2.0 | 2022-11-01T23:02:43 | 2019-08-24T12:02:02 | Java | UTF-8 | Python | false | false | 1,353 | py | import scipy.optimize
import math
def himmelblau(x, y):
return (x * x + y - 11) * ( x * x + y - 11) + (x + y * y - 7) * (x + y * y - 7)
def beale(x, y):
return math.pow(1.5 - x + x*y, 2) + math.pow(2.25 - x + x*y*y, 2) + math.pow(2.625 - x + x*y*y*y, 2);
def main():
if True:
initial = [-3.670609291875735,3.8585484651848674]
solution = scipy.optimize.fmin(lambda x: beale(x[0], x[1]),
initial, retall=True)
print "loss", beale(solution[0][0], solution[0][1])
elif False:
def banana(x, y):
return (1 - x) * (1 - x) + 100 * (y - x * x) * ( y - x * x)
initial = [-1.675793744623661,-1.945310341194272]
solution = scipy.optimize.fmin(lambda x: banana(x[0], x[1]),
initial, retall=True)
elif False:
initial = [4.474377192556858, 0.22207495383918285]
initial = [-7.185110699385405, 0.01616438291966915]
solution = scipy.optimize.fmin(lambda x: himmelblau(x[0], x[1]),
initial, retall=True)
else:
solution = scipy.optimize.fmin(lambda x: (x[0]-10) * (x[0]-10), [0], retall=True)
print solution[0]
for i, s in enumerate(solution[1]):
print str(i) + ":", s
if __name__ == "__main__":
main()
| [
"xuminsong@xuminsongdeMacBook-Pro.local"
] | xuminsong@xuminsongdeMacBook-Pro.local |
f85399095b8e9d78821d2f926b73f4272a10b01d | 14fb0e7645b9d7a8672a87ae369880dba0737521 | /kmeans_tutorial/iris_plot.py | e2114556593538b0336c7eb7595279385dbb933d | [] | no_license | danielgwak/GA_homework-1 | 4a5f40de42f420194299da6437082b8147e8aa49 | 472043d5527d8bad4cb7d298576d09f2d860adfc | refs/heads/master | 2021-01-19T18:08:10.738188 | 2014-01-15T04:09:51 | 2014-01-15T04:09:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | py | import matplotlib
matplotlib.use("AGG")
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
import numpy as np
# We load the data with load_iris from sklearn
data = load_iris()
features = data['data']
feature_names = data['feature_names']
target = data['target']
plt.figure(1)
for t,marker,c in zip(xrange(3),">ox","rgb") :
# We plot each class on its own to get different colored markers
plt.scatter(features[target == t,0],
features[target == t,1],
marker=marker,
c=c)
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[1])
plt.savefig('iris_plot1.png')
#plt.close()
plt.figure(2)
for t,marker,c in zip(xrange(3),">ox","rgb") :
plt.scatter(features[target == t,0],
features[target == t,2],
marker=marker,
c=c)
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[2])
plt.savefig('iris_plot2.png')
#plt.clf()
plt.figure(3)
for t,marker,c in zip(xrange(3),">ox","rgb") :
plt.scatter(features[target == t,0],
features[target == t,3],
marker=marker,
c=c)
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[3])
plt.savefig('iris_plot3.png')
#plt.clf()
plt.figure(4)
for t,marker,c in zip(xrange(3),">ox","rgb") :
plt.scatter(features[target == t,1],
features[target == t,2],
marker=marker,
c=c)
plt.xlabel(feature_names[1])
plt.ylabel(feature_names[2])
plt.savefig('iris_plot4.png')
#plt.clf()
plt.figure(5)
for t,marker,c in zip(xrange(3),">ox","rgb") :
plt.scatter(features[target == t,1],
features[target == t,3],
marker=marker,
c=c)
plt.xlabel(feature_names[1])
plt.ylabel(feature_names[3])
plt.savefig('iris_plot5.png')
plt.figure(6)
for t,marker,c in zip(xrange(3),">ox","rgb") :
plt.scatter(features[target == t,2],
features[target == t,3],
marker=marker,
c=c)
plt.xlabel(feature_names[2])
plt.ylabel(feature_names[3])
plt.savefig('iris_plot6.png')
| [
"rob@ip-172-31-33-228.us-west-2.compute.internal"
] | rob@ip-172-31-33-228.us-west-2.compute.internal |
a392e98adcfc9228bc16e69bb9ab4986edb5cd45 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/tornado/2018/4/iostream_test.py | cb0967f8c09ce908838377aba724ed3bab180f85 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 51,339 | py | from __future__ import absolute_import, division, print_function
from tornado.concurrent import Future
from tornado import gen
from tornado import netutil
from tornado.iostream import IOStream, SSLIOStream, PipeIOStream, StreamClosedError, _StreamBuffer
from tornado.httputil import HTTPHeaders
from tornado.locks import Condition, Event
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket
from tornado.stack_context import NullContext
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog, gen_test # noqa: E501
from tornado.test.util import (unittest, skipIfNonUnix, refusing_port, skipPypy3V58,
ignore_deprecation)
from tornado.web import RequestHandler, Application
import errno
import hashlib
import logging
import os
import platform
import random
import socket
import ssl
import sys
try:
from unittest import mock # type: ignore
except ImportError:
try:
import mock # type: ignore
except ImportError:
mock = None
def _server_ssl_options():
return dict(
certfile=os.path.join(os.path.dirname(__file__), 'test.crt'),
keyfile=os.path.join(os.path.dirname(__file__), 'test.key'),
)
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello")
class TestIOStreamWebMixin(object):
def _make_client_iostream(self):
raise NotImplementedError()
def get_app(self):
return Application([('/', HelloHandler)])
def test_connection_closed(self):
# When a server sends a response and then closes the connection,
# the client must be allowed to read the data before the IOStream
# closes itself. Epoll reports closed connections with a separate
# EPOLLRDHUP event delivered at the same time as the read event,
# while kqueue reports them as a second read/write event with an EOF
# flag.
response = self.fetch("/", headers={"Connection": "close"})
response.rethrow()
@gen_test
def test_read_until_close(self):
stream = self._make_client_iostream()
yield stream.connect(('127.0.0.1', self.get_http_port()))
stream.write(b"GET / HTTP/1.0\r\n\r\n")
data = yield stream.read_until_close()
self.assertTrue(data.startswith(b"HTTP/1.1 200"))
self.assertTrue(data.endswith(b"Hello"))
@gen_test
def test_read_zero_bytes(self):
self.stream = self._make_client_iostream()
yield self.stream.connect(("127.0.0.1", self.get_http_port()))
self.stream.write(b"GET / HTTP/1.0\r\n\r\n")
# normal read
data = yield self.stream.read_bytes(9)
self.assertEqual(data, b"HTTP/1.1 ")
# zero bytes
data = yield self.stream.read_bytes(0)
self.assertEqual(data, b"")
# another normal read
data = yield self.stream.read_bytes(3)
self.assertEqual(data, b"200")
self.stream.close()
@gen_test
def test_write_while_connecting(self):
stream = self._make_client_iostream()
connected = [False]
cond = Condition()
def connected_callback():
connected[0] = True
cond.notify()
with ignore_deprecation():
stream.connect(("127.0.0.1", self.get_http_port()),
callback=connected_callback)
# unlike the previous tests, try to write before the connection
# is complete.
written = [False]
def write_callback():
written[0] = True
cond.notify()
with ignore_deprecation():
stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n",
callback=write_callback)
self.assertTrue(not connected[0])
# by the time the write has flushed, the connection callback has
# also run
try:
while not (connected[0] and written[0]):
yield cond.wait()
finally:
logging.debug((connected, written))
data = yield stream.read_until_close()
self.assertTrue(data.endswith(b"Hello"))
stream.close()
@gen_test
def test_future_interface(self):
"""Basic test of IOStream's ability to return Futures."""
stream = self._make_client_iostream()
connect_result = yield stream.connect(
("127.0.0.1", self.get_http_port()))
self.assertIs(connect_result, stream)
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
first_line = yield stream.read_until(b"\r\n")
self.assertEqual(first_line, b"HTTP/1.1 200 OK\r\n")
# callback=None is equivalent to no callback.
header_data = yield stream.read_until(b"\r\n\r\n", callback=None)
headers = HTTPHeaders.parse(header_data.decode('latin1'))
content_length = int(headers['Content-Length'])
body = yield stream.read_bytes(content_length)
self.assertEqual(body, b'Hello')
stream.close()
@gen_test
def test_future_close_while_reading(self):
stream = self._make_client_iostream()
yield stream.connect(("127.0.0.1", self.get_http_port()))
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
with self.assertRaises(StreamClosedError):
yield stream.read_bytes(1024 * 1024)
stream.close()
@gen_test
def test_future_read_until_close(self):
# Ensure that the data comes through before the StreamClosedError.
stream = self._make_client_iostream()
yield stream.connect(("127.0.0.1", self.get_http_port()))
yield stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n")
yield stream.read_until(b"\r\n\r\n")
body = yield stream.read_until_close()
self.assertEqual(body, b"Hello")
# Nothing else to read; the error comes immediately without waiting
# for yield.
with self.assertRaises(StreamClosedError):
stream.read_bytes(1)
class TestReadWriteMixin(object):
# Tests where one stream reads and the other writes.
# These should work for BaseIOStream implementations.
def make_iostream_pair(self, **kwargs):
raise NotImplementedError
@gen_test
def test_write_zero_bytes(self):
# Attempting to write zero bytes should run the callback without
# going into an infinite loop.
rs, ws = yield self.make_iostream_pair()
yield ws.write(b'')
ws.close()
rs.close()
@gen_test
def test_streaming_callback(self):
rs, ws = yield self.make_iostream_pair()
try:
chunks = []
cond = Condition()
def streaming_callback(data):
chunks.append(data)
cond.notify()
with ignore_deprecation():
fut = rs.read_bytes(6, streaming_callback=streaming_callback)
ws.write(b"1234")
while not chunks:
yield cond.wait()
ws.write(b"5678")
final_data = yield(fut)
self.assertFalse(final_data)
self.assertEqual(chunks, [b"1234", b"56"])
# the rest of the last chunk is still in the buffer
data = yield rs.read_bytes(2)
self.assertEqual(data, b"78")
finally:
rs.close()
ws.close()
@gen_test
def test_streaming_callback_with_final_callback(self):
rs, ws = yield self.make_iostream_pair()
try:
chunks = []
final_called = []
cond = Condition()
def streaming_callback(data):
chunks.append(data)
cond.notify()
def final_callback(data):
self.assertFalse(data)
final_called.append(True)
cond.notify()
with ignore_deprecation():
rs.read_bytes(6, callback=final_callback,
streaming_callback=streaming_callback)
ws.write(b"1234")
while not chunks:
yield cond.wait()
ws.write(b"5678")
while not final_called:
yield cond.wait()
self.assertEqual(chunks, [b"1234", b"56"])
# the rest of the last chunk is still in the buffer
data = yield rs.read_bytes(2)
self.assertEqual(data, b"78")
finally:
rs.close()
ws.close()
@gen_test
def test_streaming_callback_with_data_in_buffer(self):
rs, ws = yield self.make_iostream_pair()
ws.write(b"abcd\r\nefgh")
data = yield rs.read_until(b"\r\n")
self.assertEqual(data, b"abcd\r\n")
streaming_fut = Future()
with ignore_deprecation():
rs.read_until_close(streaming_callback=streaming_fut.set_result)
data = yield streaming_fut
self.assertEqual(data, b"efgh")
rs.close()
ws.close()
@gen_test
def test_streaming_until_close(self):
rs, ws = yield self.make_iostream_pair()
try:
chunks = []
closed = [False]
cond = Condition()
def streaming_callback(data):
chunks.append(data)
cond.notify()
def close_callback(data):
assert not data, data
closed[0] = True
cond.notify()
with ignore_deprecation():
rs.read_until_close(callback=close_callback,
streaming_callback=streaming_callback)
ws.write(b"1234")
while len(chunks) != 1:
yield cond.wait()
yield ws.write(b"5678")
ws.close()
while not closed[0]:
yield cond.wait()
self.assertEqual(chunks, [b"1234", b"5678"])
finally:
ws.close()
rs.close()
@gen_test
def test_streaming_until_close_future(self):
rs, ws = yield self.make_iostream_pair()
try:
chunks = []
@gen.coroutine
def rs_task():
with ignore_deprecation():
yield rs.read_until_close(streaming_callback=chunks.append)
@gen.coroutine
def ws_task():
yield ws.write(b"1234")
yield gen.sleep(0.01)
yield ws.write(b"5678")
ws.close()
yield [rs_task(), ws_task()]
self.assertEqual(chunks, [b"1234", b"5678"])
finally:
ws.close()
rs.close()
@gen_test
def test_delayed_close_callback(self):
# The scenario: Server closes the connection while there is a pending
# read that can be served out of buffered data. The client does not
# run the close_callback as soon as it detects the close, but rather
# defers it until after the buffered read has finished.
rs, ws = yield self.make_iostream_pair()
try:
event = Event()
rs.set_close_callback(event.set)
ws.write(b"12")
chunks = []
def callback1(data):
chunks.append(data)
with ignore_deprecation():
rs.read_bytes(1, callback2)
ws.close()
def callback2(data):
chunks.append(data)
with ignore_deprecation():
rs.read_bytes(1, callback1)
yield event.wait() # stopped by close_callback
self.assertEqual(chunks, [b"1", b"2"])
finally:
ws.close()
rs.close()
@gen_test
def test_future_delayed_close_callback(self):
# Same as test_delayed_close_callback, but with the future interface.
rs, ws = yield self.make_iostream_pair()
try:
ws.write(b"12")
chunks = []
chunks.append((yield rs.read_bytes(1)))
ws.close()
chunks.append((yield rs.read_bytes(1)))
self.assertEqual(chunks, [b"1", b"2"])
finally:
ws.close()
rs.close()
@gen_test
def test_close_buffered_data(self):
# Similar to the previous test, but with data stored in the OS's
# socket buffers instead of the IOStream's read buffer. Out-of-band
# close notifications must be delayed until all data has been
# drained into the IOStream buffer. (epoll used to use out-of-band
# close events with EPOLLRDHUP, but no longer)
#
# This depends on the read_chunk_size being smaller than the
# OS socket buffer, so make it small.
rs, ws = yield self.make_iostream_pair(read_chunk_size=256)
try:
ws.write(b"A" * 512)
data = yield rs.read_bytes(256)
self.assertEqual(b"A" * 256, data)
ws.close()
# Allow the close to propagate to the `rs` side of the
# connection. Using add_callback instead of add_timeout
# doesn't seem to work, even with multiple iterations
yield gen.sleep(0.01)
data = yield rs.read_bytes(256)
self.assertEqual(b"A" * 256, data)
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_close_after_close(self):
# Similar to test_delayed_close_callback, but read_until_close takes
# a separate code path so test it separately.
rs, ws = yield self.make_iostream_pair()
try:
ws.write(b"1234")
ws.close()
# Read one byte to make sure the client has received the data.
# It won't run the close callback as long as there is more buffered
# data that could satisfy a later read.
data = yield rs.read_bytes(1)
self.assertEqual(data, b"1")
data = yield rs.read_until_close()
self.assertEqual(data, b"234")
finally:
ws.close()
rs.close()
@gen_test
def test_streaming_read_until_close_after_close(self):
# Same as the preceding test but with a streaming_callback.
# All data should go through the streaming callback,
# and the final read callback just gets an empty string.
rs, ws = yield self.make_iostream_pair()
try:
ws.write(b"1234")
ws.close()
data = yield rs.read_bytes(1)
self.assertEqual(data, b"1")
streaming_data = []
final_future = Future()
with ignore_deprecation():
rs.read_until_close(final_future.set_result,
streaming_callback=streaming_data.append)
final_data = yield final_future
self.assertEqual(b'', final_data)
self.assertEqual(b''.join(streaming_data), b"234")
finally:
ws.close()
rs.close()
@gen_test
def test_large_read_until(self):
# Performance test: read_until used to have a quadratic component
# so a read_until of 4MB would take 8 seconds; now it takes 0.25
# seconds.
rs, ws = yield self.make_iostream_pair()
try:
# This test fails on pypy with ssl. I think it's because
# pypy's gc defeats moves objects, breaking the
# "frozen write buffer" assumption.
if (isinstance(rs, SSLIOStream) and
platform.python_implementation() == 'PyPy'):
raise unittest.SkipTest(
"pypy gc causes problems with openssl")
NUM_KB = 4096
for i in range(NUM_KB):
ws.write(b"A" * 1024)
ws.write(b"\r\n")
data = yield rs.read_until(b"\r\n")
self.assertEqual(len(data), NUM_KB * 1024 + 2)
finally:
ws.close()
rs.close()
@gen_test
def test_close_callback_with_pending_read(self):
# Regression test for a bug that was introduced in 2.3
# where the IOStream._close_callback would never be called
# if there were pending reads.
OK = b"OK\r\n"
rs, ws = yield self.make_iostream_pair()
event = Event()
rs.set_close_callback(event.set)
try:
ws.write(OK)
res = yield rs.read_until(b"\r\n")
self.assertEqual(res, OK)
ws.close()
with ignore_deprecation():
rs.read_until(b"\r\n", lambda x: x)
# If _close_callback (self.stop) is not called,
# an AssertionError: Async operation timed out after 5 seconds
# will be raised.
yield event.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_future_close_callback(self):
# Regression test for interaction between the Future read interfaces
# and IOStream._maybe_add_error_listener.
rs, ws = yield self.make_iostream_pair()
closed = [False]
cond = Condition()
def close_callback():
closed[0] = True
cond.notify()
rs.set_close_callback(close_callback)
try:
ws.write(b'a')
res = yield rs.read_bytes(1)
self.assertEqual(res, b'a')
self.assertFalse(closed[0])
ws.close()
yield cond.wait()
self.assertTrue(closed[0])
finally:
rs.close()
ws.close()
@gen_test
def test_write_memoryview(self):
rs, ws = yield self.make_iostream_pair()
try:
fut = rs.read_bytes(4)
ws.write(memoryview(b"hello"))
data = yield fut
self.assertEqual(data, b"hell")
finally:
ws.close()
rs.close()
@gen_test
def test_read_bytes_partial(self):
rs, ws = yield self.make_iostream_pair()
try:
# Ask for more than is available with partial=True
fut = rs.read_bytes(50, partial=True)
ws.write(b"hello")
data = yield fut
self.assertEqual(data, b"hello")
# Ask for less than what is available; num_bytes is still
# respected.
fut = rs.read_bytes(3, partial=True)
ws.write(b"world")
data = yield fut
self.assertEqual(data, b"wor")
# Partial reads won't return an empty string, but read_bytes(0)
# will.
data = yield rs.read_bytes(0, partial=True)
self.assertEqual(data, b'')
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_max_bytes(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Extra room under the limit
fut = rs.read_until(b"def", max_bytes=50)
ws.write(b"abcdef")
data = yield fut
self.assertEqual(data, b"abcdef")
# Just enough space
fut = rs.read_until(b"def", max_bytes=6)
ws.write(b"abcdef")
data = yield fut
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
with ExpectLog(gen_log, "Unsatisfiable read"):
fut = rs.read_until(b"def", max_bytes=5)
ws.write(b"123456")
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_max_bytes_inline(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Similar to the error case in the previous test, but the
# ws writes first so rs reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
ws.write(b"123456")
with ExpectLog(gen_log, "Unsatisfiable read"):
with ignore_deprecation():
rs.read_until(b"def", callback=lambda x: self.fail(), max_bytes=5)
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_max_bytes_ignores_extra(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
ws.write(b"abcdef")
with ExpectLog(gen_log, "Unsatisfiable read"):
rs.read_until(b"def", max_bytes=5)
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_regex_max_bytes(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Extra room under the limit
fut = rs.read_until_regex(b"def", max_bytes=50)
ws.write(b"abcdef")
data = yield fut
self.assertEqual(data, b"abcdef")
# Just enough space
fut = rs.read_until_regex(b"def", max_bytes=6)
ws.write(b"abcdef")
data = yield fut
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
with ExpectLog(gen_log, "Unsatisfiable read"):
rs.read_until_regex(b"def", max_bytes=5)
ws.write(b"123456")
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_regex_max_bytes_inline(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Similar to the error case in the previous test, but the
# ws writes first so rs reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
ws.write(b"123456")
with ExpectLog(gen_log, "Unsatisfiable read"):
rs.read_until_regex(b"def", max_bytes=5)
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_regex_max_bytes_ignores_extra(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
ws.write(b"abcdef")
with ExpectLog(gen_log, "Unsatisfiable read"):
rs.read_until_regex(b"def", max_bytes=5)
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_small_reads_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
rs, ws = yield self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
ws.write(b"a" * 1024 * 100)
for i in range(100):
data = yield rs.read_bytes(1024)
self.assertEqual(data, b"a" * 1024)
finally:
ws.close()
rs.close()
@gen_test
def test_small_read_untils_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
rs, ws = yield self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
ws.write((b"a" * 1023 + b"\n") * 100)
for i in range(100):
data = yield rs.read_until(b"\n", max_bytes=4096)
self.assertEqual(data, b"a" * 1023 + b"\n")
finally:
ws.close()
rs.close()
@gen_test
def test_flow_control(self):
MB = 1024 * 1024
rs, ws = yield self.make_iostream_pair(max_buffer_size=5 * MB)
try:
# Client writes more than the rs will accept.
ws.write(b"a" * 10 * MB)
# The rs pauses while reading.
yield rs.read_bytes(MB)
yield gen.sleep(0.1)
# The ws's writes have been blocked; the rs can
# continue to read gradually.
for i in range(9):
yield rs.read_bytes(MB)
finally:
rs.close()
ws.close()
@gen_test
def test_read_into(self):
rs, ws = yield self.make_iostream_pair()
def sleep_some():
self.io_loop.run_sync(lambda: gen.sleep(0.05))
try:
buf = bytearray(10)
fut = rs.read_into(buf)
ws.write(b"hello")
yield gen.sleep(0.05)
self.assertTrue(rs.reading())
ws.write(b"world!!")
data = yield fut
self.assertFalse(rs.reading())
self.assertEqual(data, 10)
self.assertEqual(bytes(buf), b"helloworld")
# Existing buffer is fed into user buffer
fut = rs.read_into(buf)
yield gen.sleep(0.05)
self.assertTrue(rs.reading())
ws.write(b"1234567890")
data = yield fut
self.assertFalse(rs.reading())
self.assertEqual(data, 10)
self.assertEqual(bytes(buf), b"!!12345678")
# Existing buffer can satisfy read immediately
buf = bytearray(4)
ws.write(b"abcdefghi")
data = yield rs.read_into(buf)
self.assertEqual(data, 4)
self.assertEqual(bytes(buf), b"90ab")
data = yield rs.read_bytes(7)
self.assertEqual(data, b"cdefghi")
finally:
ws.close()
rs.close()
@gen_test
def test_read_into_partial(self):
rs, ws = yield self.make_iostream_pair()
try:
# Partial read
buf = bytearray(10)
fut = rs.read_into(buf, partial=True)
ws.write(b"hello")
data = yield fut
self.assertFalse(rs.reading())
self.assertEqual(data, 5)
self.assertEqual(bytes(buf), b"hello\0\0\0\0\0")
# Full read despite partial=True
ws.write(b"world!1234567890")
data = yield rs.read_into(buf, partial=True)
self.assertEqual(data, 10)
self.assertEqual(bytes(buf), b"world!1234")
# Existing buffer can satisfy read immediately
data = yield rs.read_into(buf, partial=True)
self.assertEqual(data, 6)
self.assertEqual(bytes(buf), b"5678901234")
finally:
ws.close()
rs.close()
@gen_test
def test_read_into_zero_bytes(self):
rs, ws = yield self.make_iostream_pair()
try:
buf = bytearray()
fut = rs.read_into(buf)
self.assertEqual(fut.result(), 0)
finally:
ws.close()
rs.close()
@gen_test
def test_many_mixed_reads(self):
# Stress buffer handling when going back and forth between
# read_bytes() (using an internal buffer) and read_into()
# (using a user-allocated buffer).
r = random.Random(42)
nbytes = 1000000
rs, ws = yield self.make_iostream_pair()
produce_hash = hashlib.sha1()
consume_hash = hashlib.sha1()
@gen.coroutine
def produce():
remaining = nbytes
while remaining > 0:
size = r.randint(1, min(1000, remaining))
data = os.urandom(size)
produce_hash.update(data)
yield ws.write(data)
remaining -= size
assert remaining == 0
@gen.coroutine
def consume():
remaining = nbytes
while remaining > 0:
if r.random() > 0.5:
# read_bytes()
size = r.randint(1, min(1000, remaining))
data = yield rs.read_bytes(size)
consume_hash.update(data)
remaining -= size
else:
# read_into()
size = r.randint(1, min(1000, remaining))
buf = bytearray(size)
n = yield rs.read_into(buf)
assert n == size
consume_hash.update(buf)
remaining -= size
assert remaining == 0
try:
yield [produce(), consume()]
assert produce_hash.hexdigest() == consume_hash.hexdigest()
finally:
ws.close()
rs.close()
class TestIOStreamMixin(TestReadWriteMixin):
def _make_server_iostream(self, connection, **kwargs):
raise NotImplementedError()
def _make_client_iostream(self, connection, **kwargs):
raise NotImplementedError()
@gen.coroutine
def make_iostream_pair(self, **kwargs):
listener, port = bind_unused_port()
server_stream_fut = Future()
def accept_callback(connection, address):
server_stream_fut.set_result(self._make_server_iostream(connection, **kwargs))
netutil.add_accept_handler(listener, accept_callback)
client_stream = self._make_client_iostream(socket.socket(), **kwargs)
connect_fut = client_stream.connect(('127.0.0.1', port))
server_stream, client_stream = yield [server_stream_fut, connect_fut]
self.io_loop.remove_handler(listener.fileno())
listener.close()
raise gen.Return((server_stream, client_stream))
def test_connection_refused(self):
# When a connection is refused, the connect callback should not
# be run. (The kqueue IOLoop used to behave differently from the
# epoll IOLoop in this respect)
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
stream = IOStream(socket.socket())
self.connect_called = False
def connect_callback():
self.connect_called = True
self.stop()
stream.set_close_callback(self.stop)
# log messages vary by platform and ioloop implementation
with ExpectLog(gen_log, ".*", required=False):
with ignore_deprecation():
stream.connect(("127.0.0.1", port), connect_callback)
self.wait()
self.assertFalse(self.connect_called)
self.assertTrue(isinstance(stream.error, socket.error), stream.error)
if sys.platform != 'cygwin':
_ERRNO_CONNREFUSED = (errno.ECONNREFUSED,)
if hasattr(errno, "WSAECONNREFUSED"):
_ERRNO_CONNREFUSED += (errno.WSAECONNREFUSED,)
# cygwin's errnos don't match those used on native windows python
self.assertTrue(stream.error.args[0] in _ERRNO_CONNREFUSED)
@unittest.skipIf(mock is None, 'mock package not present')
def test_gaierror(self):
# Test that IOStream sets its exc_info on getaddrinfo error.
# It's difficult to reliably trigger a getaddrinfo error;
# some resolvers own't even return errors for malformed names,
# so we mock it instead. If IOStream changes to call a Resolver
# before sock.connect, the mock target will need to change too.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = IOStream(s)
stream.set_close_callback(self.stop)
with mock.patch('socket.socket.connect',
side_effect=socket.gaierror(errno.EIO, 'boom')):
with ExpectLog(gen_log, "Connect error"):
with ignore_deprecation():
stream.connect(('localhost', 80), callback=self.stop)
self.wait()
self.assertIsInstance(stream.error, socket.gaierror)
@gen_test
def test_read_callback_error(self):
# Test that IOStream sets its exc_info when a read callback throws
server, client = yield self.make_iostream_pair()
try:
closed = Event()
server.set_close_callback(closed.set)
with ExpectLog(
app_log, "(Uncaught exception|Exception in callback)"
):
# Clear ExceptionStackContext so IOStream catches error
with NullContext():
with ignore_deprecation():
server.read_bytes(1, callback=lambda data: 1 / 0)
client.write(b"1")
yield closed.wait()
self.assertTrue(isinstance(server.error, ZeroDivisionError))
finally:
server.close()
client.close()
@unittest.skipIf(mock is None, 'mock package not present')
@gen_test
def test_read_until_close_with_error(self):
server, client = yield self.make_iostream_pair()
try:
with mock.patch('tornado.iostream.BaseIOStream._try_inline_read',
side_effect=IOError('boom')):
with self.assertRaisesRegexp(IOError, 'boom'):
with ignore_deprecation():
client.read_until_close(lambda x: None)
finally:
server.close()
client.close()
@skipIfNonUnix
@skipPypy3V58
@gen_test
def test_inline_read_error(self):
# An error on an inline read is raised without logging (on the
# assumption that it will eventually be noticed or logged further
# up the stack).
#
# This test is posix-only because windows os.close() doesn't work
# on socket FDs, but we can't close the socket object normally
# because we won't get the error we want if the socket knows
# it's closed.
server, client = yield self.make_iostream_pair()
try:
os.close(server.socket.fileno())
with self.assertRaises(socket.error):
server.read_bytes(1)
finally:
server.close()
client.close()
@skipPypy3V58
@gen_test
def test_async_read_error_logging(self):
# Socket errors on asynchronous reads should be logged (but only
# once).
server, client = yield self.make_iostream_pair()
closed = Event()
server.set_close_callback(closed.set)
try:
# Start a read that will be fulfilled asynchronously.
with ignore_deprecation():
server.read_bytes(1, lambda data: None)
client.write(b'a')
# Stub out read_from_fd to make it fail.
def fake_read_from_fd():
os.close(server.socket.fileno())
server.__class__.read_from_fd(server)
server.read_from_fd = fake_read_from_fd
# This log message is from _handle_read (not read_from_fd).
with ExpectLog(gen_log, "error on read"):
yield closed.wait()
finally:
server.close()
client.close()
@gen_test
def test_future_write(self):
"""
Test that write() Futures are never orphaned.
"""
# Run concurrent writers that will write enough bytes so as to
# clog the socket buffer and accumulate bytes in our write buffer.
m, n = 10000, 1000
nproducers = 10
total_bytes = m * n * nproducers
server, client = yield self.make_iostream_pair(max_buffer_size=total_bytes)
@gen.coroutine
def produce():
data = b'x' * m
for i in range(n):
yield server.write(data)
@gen.coroutine
def consume():
nread = 0
while nread < total_bytes:
res = yield client.read_bytes(m)
nread += len(res)
try:
yield [produce() for i in range(nproducers)] + [consume()]
finally:
server.close()
client.close()
class TestIOStreamWebHTTP(TestIOStreamWebMixin, AsyncHTTPTestCase):
def _make_client_iostream(self):
return IOStream(socket.socket())
class TestIOStreamWebHTTPS(TestIOStreamWebMixin, AsyncHTTPSTestCase):
def _make_client_iostream(self):
return SSLIOStream(socket.socket(),
ssl_options=dict(cert_reqs=ssl.CERT_NONE))
class TestIOStream(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
return IOStream(connection, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
return IOStream(connection, **kwargs)
class TestIOStreamSSL(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
connection = ssl.wrap_socket(connection,
server_side=True,
do_handshake_on_connect=False,
**_server_ssl_options())
return SSLIOStream(connection, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
return SSLIOStream(connection,
ssl_options=dict(cert_reqs=ssl.CERT_NONE),
**kwargs)
# This will run some tests that are basically redundant but it's the
# simplest way to make sure that it works to pass an SSLContext
# instead of an ssl_options dict to the SSLIOStream constructor.
class TestIOStreamSSLContext(TestIOStreamMixin, AsyncTestCase):
def _make_server_iostream(self, connection, **kwargs):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(
os.path.join(os.path.dirname(__file__), 'test.crt'),
os.path.join(os.path.dirname(__file__), 'test.key'))
connection = ssl_wrap_socket(connection, context,
server_side=True,
do_handshake_on_connect=False)
return SSLIOStream(connection, **kwargs)
def _make_client_iostream(self, connection, **kwargs):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
return SSLIOStream(connection, ssl_options=context, **kwargs)
class TestIOStreamStartTLS(AsyncTestCase):
def setUp(self):
try:
super(TestIOStreamStartTLS, self).setUp()
self.listener, self.port = bind_unused_port()
self.server_stream = None
self.server_accepted = Future()
netutil.add_accept_handler(self.listener, self.accept)
self.client_stream = IOStream(socket.socket())
self.io_loop.add_future(self.client_stream.connect(
('127.0.0.1', self.port)), self.stop)
self.wait()
self.io_loop.add_future(self.server_accepted, self.stop)
self.wait()
except Exception as e:
print(e)
raise
def tearDown(self):
if self.server_stream is not None:
self.server_stream.close()
if self.client_stream is not None:
self.client_stream.close()
self.listener.close()
super(TestIOStreamStartTLS, self).tearDown()
def accept(self, connection, address):
if self.server_stream is not None:
self.fail("should only get one connection")
self.server_stream = IOStream(connection)
self.server_accepted.set_result(None)
@gen.coroutine
def client_send_line(self, line):
self.client_stream.write(line)
recv_line = yield self.server_stream.read_until(b"\r\n")
self.assertEqual(line, recv_line)
@gen.coroutine
def server_send_line(self, line):
self.server_stream.write(line)
recv_line = yield self.client_stream.read_until(b"\r\n")
self.assertEqual(line, recv_line)
def client_start_tls(self, ssl_options=None, server_hostname=None):
client_stream = self.client_stream
self.client_stream = None
return client_stream.start_tls(False, ssl_options, server_hostname)
def server_start_tls(self, ssl_options=None):
server_stream = self.server_stream
self.server_stream = None
return server_stream.start_tls(True, ssl_options)
@gen_test
def test_start_tls_smtp(self):
# This flow is simplified from RFC 3207 section 5.
# We don't really need all of this, but it helps to make sure
# that after realistic back-and-forth traffic the buffers end up
# in a sane state.
yield self.server_send_line(b"220 mail.example.com ready\r\n")
yield self.client_send_line(b"EHLO mail.example.com\r\n")
yield self.server_send_line(b"250-mail.example.com welcome\r\n")
yield self.server_send_line(b"250 STARTTLS\r\n")
yield self.client_send_line(b"STARTTLS\r\n")
yield self.server_send_line(b"220 Go ahead\r\n")
client_future = self.client_start_tls(dict(cert_reqs=ssl.CERT_NONE))
server_future = self.server_start_tls(_server_ssl_options())
self.client_stream = yield client_future
self.server_stream = yield server_future
self.assertTrue(isinstance(self.client_stream, SSLIOStream))
self.assertTrue(isinstance(self.server_stream, SSLIOStream))
yield self.client_send_line(b"EHLO mail.example.com\r\n")
yield self.server_send_line(b"250 mail.example.com welcome\r\n")
@gen_test
def test_handshake_fail(self):
server_future = self.server_start_tls(_server_ssl_options())
# Certificates are verified with the default configuration.
client_future = self.client_start_tls(server_hostname="localhost")
with ExpectLog(gen_log, "SSL Error"):
with self.assertRaises(ssl.SSLError):
yield client_future
with self.assertRaises((ssl.SSLError, socket.error)):
yield server_future
@gen_test
def test_check_hostname(self):
# Test that server_hostname parameter to start_tls is being used.
# The check_hostname functionality is only available in python 2.7 and
# up and in python 3.4 and up.
server_future = self.server_start_tls(_server_ssl_options())
client_future = self.client_start_tls(
ssl.create_default_context(),
server_hostname='127.0.0.1')
with ExpectLog(gen_log, "SSL Error"):
with self.assertRaises(ssl.SSLError):
# The client fails to connect with an SSL error.
yield client_future
with self.assertRaises(Exception):
# The server fails to connect, but the exact error is unspecified.
yield server_future
class WaitForHandshakeTest(AsyncTestCase):
@gen.coroutine
def connect_to_server(self, server_cls):
server = client = None
try:
sock, port = bind_unused_port()
server = server_cls(ssl_options=_server_ssl_options())
server.add_socket(sock)
client = SSLIOStream(socket.socket(),
ssl_options=dict(cert_reqs=ssl.CERT_NONE))
yield client.connect(('127.0.0.1', port))
self.assertIsNotNone(client.socket.cipher())
finally:
if server is not None:
server.stop()
if client is not None:
client.close()
@gen_test
def test_wait_for_handshake_callback(self):
test = self
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
# The handshake has not yet completed.
test.assertIsNone(stream.socket.cipher())
self.stream = stream
with ignore_deprecation():
stream.wait_for_handshake(self.handshake_done)
def handshake_done(self):
# Now the handshake is done and ssl information is available.
test.assertIsNotNone(self.stream.socket.cipher())
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@gen_test
def test_wait_for_handshake_future(self):
test = self
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
test.assertIsNone(stream.socket.cipher())
test.io_loop.spawn_callback(self.handle_connection, stream)
@gen.coroutine
def handle_connection(self, stream):
yield stream.wait_for_handshake()
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@gen_test
def test_wait_for_handshake_already_waiting_error(self):
test = self
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
with ignore_deprecation():
stream.wait_for_handshake(self.handshake_done)
test.assertRaises(RuntimeError, stream.wait_for_handshake)
def handshake_done(self):
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@gen_test
def test_wait_for_handshake_already_connected(self):
handshake_future = Future()
class TestServer(TCPServer):
def handle_stream(self, stream, address):
self.stream = stream
with ignore_deprecation():
stream.wait_for_handshake(self.handshake_done)
def handshake_done(self):
with ignore_deprecation():
self.stream.wait_for_handshake(self.handshake2_done)
def handshake2_done(self):
handshake_future.set_result(None)
yield self.connect_to_server(TestServer)
yield handshake_future
@skipIfNonUnix
class TestPipeIOStream(TestReadWriteMixin, AsyncTestCase):
@gen.coroutine
def make_iostream_pair(self, **kwargs):
r, w = os.pipe()
return PipeIOStream(r, **kwargs), PipeIOStream(w, **kwargs)
@gen_test
def test_pipe_iostream(self):
rs, ws = yield self.make_iostream_pair()
ws.write(b"hel")
ws.write(b"lo world")
data = yield rs.read_until(b' ')
self.assertEqual(data, b"hello ")
data = yield rs.read_bytes(3)
self.assertEqual(data, b"wor")
ws.close()
data = yield rs.read_until_close()
self.assertEqual(data, b"ld")
rs.close()
@gen_test
def test_pipe_iostream_big_write(self):
rs, ws = yield self.make_iostream_pair()
NUM_BYTES = 1048576
# Write 1MB of data, which should fill the buffer
ws.write(b"1" * NUM_BYTES)
data = yield rs.read_bytes(NUM_BYTES)
self.assertEqual(data, b"1" * NUM_BYTES)
ws.close()
rs.close()
class TestStreamBuffer(unittest.TestCase):
"""
Unit tests for the private _StreamBuffer class.
"""
def setUp(self):
self.random = random.Random(42)
def to_bytes(self, b):
if isinstance(b, (bytes, bytearray)):
return bytes(b)
elif isinstance(b, memoryview):
return b.tobytes() # For py2
else:
raise TypeError(b)
def make_streambuffer(self, large_buf_threshold=10):
buf = _StreamBuffer()
assert buf._large_buf_threshold
buf._large_buf_threshold = large_buf_threshold
return buf
def check_peek(self, buf, expected):
size = 1
while size < 2 * len(expected):
got = self.to_bytes(buf.peek(size))
self.assertTrue(got) # Not empty
self.assertLessEqual(len(got), size)
self.assertTrue(expected.startswith(got), (expected, got))
size = (size * 3 + 1) // 2
def check_append_all_then_skip_all(self, buf, objs, input_type):
self.assertEqual(len(buf), 0)
expected = b''
for o in objs:
expected += o
buf.append(input_type(o))
self.assertEqual(len(buf), len(expected))
self.check_peek(buf, expected)
while expected:
n = self.random.randrange(1, len(expected) + 1)
expected = expected[n:]
buf.advance(n)
self.assertEqual(len(buf), len(expected))
self.check_peek(buf, expected)
self.assertEqual(len(buf), 0)
def test_small(self):
objs = [b'12', b'345', b'67', b'89a', b'bcde', b'fgh', b'ijklmn']
buf = self.make_streambuffer()
self.check_append_all_then_skip_all(buf, objs, bytes)
buf = self.make_streambuffer()
self.check_append_all_then_skip_all(buf, objs, bytearray)
buf = self.make_streambuffer()
self.check_append_all_then_skip_all(buf, objs, memoryview)
# Test internal algorithm
buf = self.make_streambuffer(10)
for i in range(9):
buf.append(b'x')
self.assertEqual(len(buf._buffers), 1)
for i in range(9):
buf.append(b'x')
self.assertEqual(len(buf._buffers), 2)
buf.advance(10)
self.assertEqual(len(buf._buffers), 1)
buf.advance(8)
self.assertEqual(len(buf._buffers), 0)
self.assertEqual(len(buf), 0)
def test_large(self):
objs = [b'12' * 5,
b'345' * 2,
b'67' * 20,
b'89a' * 12,
b'bcde' * 1,
b'fgh' * 7,
b'ijklmn' * 2]
buf = self.make_streambuffer()
self.check_append_all_then_skip_all(buf, objs, bytes)
buf = self.make_streambuffer()
self.check_append_all_then_skip_all(buf, objs, bytearray)
buf = self.make_streambuffer()
self.check_append_all_then_skip_all(buf, objs, memoryview)
# Test internal algorithm
buf = self.make_streambuffer(10)
for i in range(3):
buf.append(b'x' * 11)
self.assertEqual(len(buf._buffers), 3)
buf.append(b'y')
self.assertEqual(len(buf._buffers), 4)
buf.append(b'z')
self.assertEqual(len(buf._buffers), 4)
buf.advance(33)
self.assertEqual(len(buf._buffers), 1)
buf.advance(2)
self.assertEqual(len(buf._buffers), 0)
self.assertEqual(len(buf), 0)
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
e8966e77faf7d525ddb1849377f1a78e5cbe9e59 | f98f34290455c7845a6deb31abc8b1d5cfa3a384 | /coati/web/api/auth/__init__.py | 732da7cba743a8123a85bcbdec1e04236a43ba2c | [] | no_license | gastonrobledo/coati | a8d19a88bbc49178786ad0d6e0037294c28bf8e6 | 5022d252d79a62d097608727b40295f7970175b5 | refs/heads/master | 2021-01-12T13:52:09.431004 | 2015-05-10T19:29:26 | 2015-05-10T19:29:26 | 29,596,215 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,886 | py | """
Authentication resource.
"""
from flask import request, current_app, g as flask_g
from flask.ext.restful import Resource
from coati.core.models.user import User
from coati.web.api import errors
from coati.web.api.auth import oauth, utils, decorators
from coati.web.api.auth.utils import current_user # noqa
oauth_handler = None
def get_user_from_token():
"""
Parses an Access Token and stores either the User or an error on Flask's
globals object.
It's important to use this function just once, in order to validate the
token only at the beginning of the request.
"""
try:
token = utils.parse_auth_header()
except errors.BasicAPIException as ex:
flask_g.auth_error = ex
else:
user_obj = utils.parse_auth_token(token)
if user_obj:
# Store the user for the current request
utils.set_current_user(user_obj)
class Authorized(Resource):
"""
Social authorization resource.
"""
def post(self):
"""
Social authorization endpoint.
"""
request_data = request.get_json(silent=True)
# Check required data
if not request_data:
raise errors.InvalidAPIUsage(errors.INVALID_JSON_BODY_MSG)
provider_name = request_data.get('provider')
provider = oauth_handler.get_provider(provider_name)
if not provider:
raise errors.InvalidAPIUsage(errors.PROVIDER_INVALID_MSG)
access_token = request_data.get('token')
if not access_token:
raise errors.InvalidAPIUsage(errors.MISSING_PROVIDER_TOKEN_MSG)
user_id = request_data.get('user_id')
if not user_id:
raise errors.InvalidAPIUsage(errors.MISSING_PROVIDER_USER_ID_MSG)
# Validate the token
error_msg = provider.validate_token(access_token, user_id)
if error_msg:
raise errors.UnauthorizedRequest(errors.PROVIDER_INVALID_TOKEN_MSG)
user_data = provider.get_user_data(access_token)
if not user_data:
raise errors.BasicAPIException(errors.PROVIDER_INVALID_RESP_MSG)
# On new email, register the user
user, _ = User.get_or_create(**user_data)
user.save()
tokens_dict = current_app.token_handler.generate_tokens_dict(user.id)
return dict(tokens_dict), 200
class AuthResource(Resource):
"""
Base resource that handles authentication and permissions.
"""
decorators = [
#decorators.require_permissions,
decorators.require_authentication
]
def init_app(app):
"""
Perform authentication initialization.
:param app: Flask application.
"""
# A global is used instead of a current_app attribute because the handler
# is only required here
global oauth_handler
oauth_handler = oauth.get_oauth_handler(app.config)
| [
"gaston.robledo@santexgroup.com"
] | gaston.robledo@santexgroup.com |
cb948eb99e8c59e381efd73f6582454c59444afc | f52a45048f169175286b52b1f24012642a6e4c99 | /tools/giza-align/scripts/a3ToTalp.py | 610c7259bf865c81bc3c6efde7bdf9963e9a6a92 | [
"MIT"
] | permissive | bingrao/Bug-Transformer | 8efd868e128b0cc383ee588083cf0ab6f857d61b | 9e39dc553c281f6372b7a8cfc8205aa186645899 | refs/heads/master | 2023-08-15T03:25:14.735190 | 2021-09-28T13:58:03 | 2021-09-28T13:58:03 | 266,435,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | #!/usr/bin/env python3
import fileinput
import re
REGEX_ALIGNMENTS = re.compile(r'\({[^}]*}\)')
FAIL_STRING = "ASDFFDS42fsdafads"
def get_id(line):
# line_id is enclosed in parenthesis
# e.g.: Sentence pair (79) source length 12 target length 15 alignment score : 5.75958e-25
start_pos = line.find("(") + 1
end_pos = line.find(")")
sentence_id = int(line[start_pos: end_pos])
return sentence_id
def get_talp_string(line):
alignments = set()
for src_pos, tgt_al_group in enumerate(REGEX_ALIGNMENTS.finditer(line)):
# we skip alignments to NULL
if src_pos == 0:
continue
# [2:-2] removes ({ at the beginning and }) at the end of the string
tgt_al_string = tgt_al_group.group()[2:-2]
try:
tgt_pos_set = {int(x) for x in tgt_al_string.split()}
except:
print(line)
print(tgt_al_group.group())
exit(1)
for tgt_pos in tgt_pos_set:
# make it 0 based instead of 1 based
talp_string = "{}-{}".format(src_pos - 1, tgt_pos - 1)
alignments.add(talp_string)
return " ".join(alignments)
if __name__ == "__main__":
alignments = {}
lines = []
skipped_max = 0
error = False
for line in fileinput.input(mode='rb'):
try:
line = line.decode("utf-8")
lines.append(line)
except UnicodeDecodeError:
lines.append(FAIL_STRING)
# 3 lines describe one sentence
assert len(lines) <= 3
if len(lines) == 3:
sentence_id = get_id(lines[0])
if FAIL_STRING not in lines:
talp_string = get_talp_string(lines[2])
# mgiza produced multiple times the same sentence id
alignments[sentence_id] = talp_string
else:
skipped_max = max(skipped_max, sentence_id)
lines = []
for sentence_id in sorted(alignments.keys()):
print(alignments[sentence_id], flush=True)
# print(skipped_max)
| [
"bing.rao@outlook.com"
] | bing.rao@outlook.com |
927cf692f6e4c73c01ee931bcd6f0ccc32a25f3a | 79aae87837d01e0ddd82e77d5b3bbf2e60e3d076 | /.history/my_django_app/tareapp/views_20210501203254.py | 45455eaceb2a112dc29990d8d6dfbaa6757ca4dd | [] | no_license | jiisamitt/Taller-de-integracion | d4c25c8692a8201f5fe5846c6214f574ecb399a6 | cedc999149a63c60b445d726d67a33b1c9cc1287 | refs/heads/main | 2023-05-06T17:55:08.812455 | 2021-05-19T01:38:56 | 2021-05-19T01:38:56 | 363,818,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | from django.shortcuts import render
from .models import Artist, Album, Track
# Create your views here.
from ninja import NinjaAPI
from django.http import JsonResponse
api = NinjaAPI()
@api.get("/hello")
def hello(request):
return JsonResponse("Hello world", safe=False)
#########GET#######
@api.get("/artists")
def read_artist(request):
artistas = Artista.objects.all()
lista= []
for artista in artistas:
lista.append(artista.diccionario())
return JsonResponse(lista, safe=False)
@api.get("/artists/{artist_id}")
def read_artist(request, artist_id: int):
a = Artist.models.get(id = artist_id)
return JsonResponse(a.diccionario())
############POST###########
@api.post("/path")
def list_artistas(request):
body = json.load(request)
if 'name' in body and 'age' in body:
name = body['name']
age = body['age']
artist_id = coder(name)
if Artista.objects.filter(id=artist_id).exists():
data = 'Ya existe este artista'
return JsonResponse(data,status = 409, safe=False)
else:
to_create = {
'id': artist_id,
'name': name,
'age': age,
'albums': base_url+'atists/'+str(artist_id)+'/albums',
'tracks': base_url+'atists/'+str(artist_id)+'/tracks',
'Self': base_url+'atists/'+str(artist_id),
}
artist = Artista.objects.create(**to_create)
return JsonResponse(artist.diccionario(), status=201)
| [
"jiisamitt@uc.cl"
] | jiisamitt@uc.cl |
6313ce3c9888ef5c1cb94158eac8d12924ff763f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02865/s890600458.py | 1a486eb538995fa9afae5b083b50858eda56c26a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34 | py | n=int(input())
print(int((n-1)/2)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c5750e26f74d1e9f2ae44c7e2ada0083a67e0fc8 | 29332b729802dd04b973167c357a7db288a8ec3e | /blog/templatetags/blog_tags.py | dd552f0e1f86ed3835671766f3ee1b394ca64104 | [
"MIT"
] | permissive | michiko5173/ano | 017912cbd7d9b29617e3e559f907127dc11de26f | 7b9e1bdf56098948fbcc7f9897cc1cd94752b007 | refs/heads/master | 2022-12-12T02:15:43.599188 | 2018-07-13T01:48:38 | 2018-07-13T01:48:38 | 140,777,273 | 0 | 0 | MIT | 2022-12-08T02:16:25 | 2018-07-13T00:27:17 | Python | UTF-8 | Python | false | false | 9,294 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: liangliangyy@gmail.com
@site: https://www.lylinux.org/
@software: PyCharm
@file: blog_tags.py
@time: 2016/11/2 下午11:10
"""
from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
import random
from django.urls import reverse
from blog.models import Article, Category, Tag, Links, SideBar
from django.utils.encoding import force_text
from django.shortcuts import get_object_or_404
import hashlib
import urllib
from comments.models import Comment
from DjangoBlog.utils import cache_decorator
from django.contrib.auth import get_user_model
from oauth.models import OAuthUser
from django.contrib.sites.models import Site
import logging
logger = logging.getLogger(__name__)
register = template.Library()
@register.simple_tag
def timeformat(data):
try:
return data.strftime(settings.TIME_FORMAT)
# print(data.strftime(settings.TIME_FORMAT))
# return "ddd"
except Exception as e:
logger.error(e)
return ""
@register.simple_tag
def datetimeformat(data):
try:
return data.strftime(settings.DATE_TIME_FORMAT)
except Exception as e:
logger.error(e)
return ""
@register.filter(is_safe=True)
@stringfilter
def custom_markdown(content):
from DjangoBlog.utils import CommonMarkdown
return mark_safe(CommonMarkdown.get_markdown(content))
@register.filter(is_safe=True)
@stringfilter
def truncatechars_content(content):
"""
获得文章内容的摘要
:param content:
:return:
"""
from django.template.defaultfilters import truncatechars_html
from DjangoBlog.utils import get_blog_setting
blogsetting = get_blog_setting()
return truncatechars_html(content, blogsetting.article_sub_length)
@register.filter(is_safe=True)
@stringfilter
def truncate(content):
from django.utils.html import strip_tags
return strip_tags(content)[:150]
@register.inclusion_tag('blog/tags/breadcrumb.html')
def load_breadcrumb(article):
"""
获得文章面包屑
:param article:
:return:
"""
names = article.get_category_tree()
from DjangoBlog.utils import get_blog_setting
blogsetting = get_blog_setting()
site = Site.objects.get_current().domain
names.append((blogsetting.sitename, site))
names = names[::-1]
return {
'names': names,
'title': article.title
}
@register.inclusion_tag('blog/tags/article_tag_list.html')
def load_articletags(article):
"""
文章标签
:param article:
:return:
"""
tags = article.tags.all()
tags_list = []
for tag in tags:
url = tag.get_absolute_url()
count = tag.get_article_count()
tags_list.append((
url, count, tag, random.choice(settings.BOOTSTRAP_COLOR_TYPES)
))
return {
'article_tags_list': tags_list
}
@register.inclusion_tag('blog/tags/sidebar.html')
def load_sidebar(user):
"""
加载侧边栏
:return:
"""
logger.info('load sidebar')
from DjangoBlog.utils import get_blog_setting
blogsetting = get_blog_setting()
recent_articles = Article.objects.filter(status='p')[:blogsetting.sidebar_article_count]
sidebar_categorys = Category.objects.all()
extra_sidebars = SideBar.objects.filter(is_enable=True).order_by('sequence')
most_read_articles = Article.objects.filter(status='p').order_by('-views')[:blogsetting.sidebar_article_count]
dates = Article.objects.datetimes('created_time', 'month', order='DESC')
links = Links.objects.all()
commment_list = Comment.objects.filter(is_enable=True).order_by('-id')[:blogsetting.sidebar_comment_count]
# show_adsense = settings.SHOW_GOOGLE_ADSENSE
# 标签云 计算字体大小
# 根据总数计算出平均值 大小为 (数目/平均值)*步长
increment = 5
tags = Tag.objects.all()
sidebar_tags = None
if tags and len(tags) > 0:
s = list(map(lambda t: (t, t.get_article_count()), tags))
count = sum(map(lambda t: t[1], s))
dd = 1 if count == 0 else count / len(tags)
sidebar_tags = list(map(lambda x: (x[0], x[1], (x[1] / dd) * increment + 10), s))
return {
'recent_articles': recent_articles,
'sidebar_categorys': sidebar_categorys,
'most_read_articles': most_read_articles,
'article_dates': dates,
'sidabar_links': links,
'sidebar_comments': commment_list,
'user': user,
'show_google_adsense': blogsetting.show_google_adsense,
'google_adsense_codes': blogsetting.google_adsense_codes,
'open_site_comment': blogsetting.open_site_comment,
'show_gongan_code': blogsetting.show_gongan_code,
'sidebar_tags': sidebar_tags,
'extra_sidebars': extra_sidebars
}
@register.inclusion_tag('blog/tags/article_meta_info.html')
def load_article_metas(article, user):
"""
获得文章meta信息
:param article:
:return:
"""
return {
'article': article,
'user': user
}
@register.inclusion_tag('blog/tags/article_pagination.html')
def load_pagination_info(page_obj, page_type, tag_name):
previous_url = ''
next_url = ''
if page_type == '':
if page_obj.has_next():
next_number = page_obj.next_page_number()
next_url = reverse('blog:index_page', kwargs={'page': next_number})
if page_obj.has_previous():
previous_number = page_obj.previous_page_number()
previous_url = reverse('blog:index_page', kwargs={'page': previous_number})
if page_type == '分类标签归档':
tag = get_object_or_404(Tag, name=tag_name)
if page_obj.has_next():
next_number = page_obj.next_page_number()
next_url = reverse('blog:tag_detail_page', kwargs={'page': next_number, 'tag_name': tag.slug})
if page_obj.has_previous():
previous_number = page_obj.previous_page_number()
previous_url = reverse('blog:tag_detail_page', kwargs={'page': previous_number, 'tag_name': tag.slug})
if page_type == '作者文章归档':
if page_obj.has_next():
next_number = page_obj.next_page_number()
next_url = reverse('blog:author_detail_page', kwargs={'page': next_number, 'author_name': tag_name})
if page_obj.has_previous():
previous_number = page_obj.previous_page_number()
previous_url = reverse('blog:author_detail_page', kwargs={'page': previous_number, 'author_name': tag_name})
if page_type == '分类目录归档':
category = get_object_or_404(Category, name=tag_name)
if page_obj.has_next():
next_number = page_obj.next_page_number()
next_url = reverse('blog:category_detail_page',
kwargs={'page': next_number, 'category_name': category.slug})
if page_obj.has_previous():
previous_number = page_obj.previous_page_number()
previous_url = reverse('blog:category_detail_page',
kwargs={'page': previous_number, 'category_name': category.slug})
return {
'previous_url': previous_url,
'next_url': next_url,
'page_obj': page_obj
}
"""
@register.inclusion_tag('nav.html')
def load_nav_info():
category_list = Category.objects.all()
return {
'nav_category_list': category_list
}
"""
@register.inclusion_tag('blog/tags/article_info.html')
def load_article_detail(article, isindex, user):
"""
加载文章详情
:param article:
:param isindex:是否列表页,若是列表页只显示摘要
:return:
"""
from DjangoBlog.utils import get_blog_setting
blogsetting = get_blog_setting()
return {
'article': article,
'isindex': isindex,
'user': user,
'open_site_comment': blogsetting.open_site_comment,
}
# return only the URL of the gravatar
# TEMPLATE USE: {{ email|gravatar_url:150 }}
@register.filter
def gravatar_url(email, size=40):
"""获得gravatar头像"""
usermodels = OAuthUser.objects.filter(email=email)
if usermodels:
o = list(filter(lambda x: x.picture is not None, usermodels))
if o:
return o[0].picture
email = email.encode('utf-8')
default = "https://resource.lylinux.net/image/2017/03/26/120117.jpg".encode('utf-8')
return "https://www.gravatar.com/avatar/%s?%s" % (
hashlib.md5(email.lower()).hexdigest(), urllib.parse.urlencode({'d': default, 's': str(size)}))
# return an image tag with the gravatar
# TEMPLATE USE: {{ email|gravatar:150 }}
@register.filter
def gravatar(email, size=40):
"""获得gravatar头像"""
url = gravatar_url(email, size)
return mark_safe('<img src="%s" height="%d" width="%d">' % (url, size, size))
@register.simple_tag
def query(qs, **kwargs):
""" template tag which allows queryset filtering. Usage:
{% query books author=author as mybooks %}
{% for book in mybooks %}
...
{% endfor %}
"""
return qs.filter(**kwargs)
| [
"liangliangyy@gmail.com"
] | liangliangyy@gmail.com |
a13201cdc3fe10e5273d7eda84cf72171719be25 | 42044df01721592b3a8de98ade7d1b322e8cb0db | /project v2/title_state.py | 0f391af5debe5de9bb1117514610ed0cc8be1083 | [] | no_license | jingle23/2D | 4069d0ca304ac68e4858c990ba993def4a3850f6 | 5e33b96d9c782fcc712df34fd2b90660d658cecb | refs/heads/master | 2020-06-05T08:40:37.733669 | 2015-12-14T18:35:26 | 2015-12-14T18:35:26 | 42,495,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | import game_framework
import main_state
from pico2d import *
name = "TitleState"
image = None
def enter():
global image
image = load_image('Background/title.png')
def exit():
global image
del(image)
def handle_events():
events = get_events()
for event in events:
if event.type == SDL_QUIT:
game_framework.quit()
else:
if (event.type, event.key) == (SDL_KEYDOWN, SDLK_ESCAPE):
game_framework.quit()
elif (event.type, event.key) == (SDL_KEYDOWN, SDLK_SPACE):
game_framework.change_state(main_state)
def draw():
clear_canvas()
image.draw(400, 300)
update_canvas()
def update():
pass
def pause():
pass
def resume():
pass
| [
"wlsrms23@naver.com"
] | wlsrms23@naver.com |
ad6d42e85d7e43cb070f0ced7a2556eec4858ae0 | 24a9165aad8676a58a48325d21403c1a8f2d8b63 | /actionToMenu.py | bb2353266320e833ec178d9c27a417ed70438fd5 | [] | no_license | xiajun325/IDA7script | e8b4dad5f7802b5d2780fbe0726cdcf0c7c76588 | c81b5fcb613201b7f03a75e5865cfbd00c845b5c | refs/heads/master | 2023-03-30T15:06:52.472695 | 2021-04-03T13:33:43 | 2021-04-03T13:33:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | # coding=utf-8
import idaapi
from Qing.action_utils import Action
from PyQt5 import QtWidgets
import idc
class TestAction(Action):
description = "Test"
hotkey = ''
def __init__(self):
super(TestAction, self).__init__()
def activate(self, ctx):
print("testAction activate")
return 1
def update(self, ctx):
print("testAction update")
return idaapi.AST_ENABLE_ALWAYS
class MenuAttach(idaapi.plugin_t):
wanted_name = "menu attach"
wanted_hotkey = ''
# flags = idaapi.PLUGIN_MOD
flags = 0
comment = ''
help = ''
menu_name = 'View/Graphs/'
def __init__(self):
super(MenuAttach, self).__init__()
self.testAction = TestAction()
def init(self):
testAction = self.testAction
action_desc = idaapi.action_desc_t(testAction.name, testAction.description, testAction, testAction.hotkey,
'TestActio tip', 199)
idaapi.register_action(action_desc)
idaapi.attach_action_to_menu(MenuAttach.menu_name, testAction.name, idaapi.SETMENU_APP)
return idaapi.PLUGIN_KEEP
def term(self):
idaapi.detach_action_from_menu(MenuAttach.menu_name, self.testAction.name)
def run(self, arg):
text, confirmed = QtWidgets.QInputDialog.getText(
None,
"Input Dialog",
"Please enter an hexadecimal address:",
text="%X" % 123,
)
if confirmed:
print(text)
# z = idc.AskStr("hello", "地址或函数名")
# print(z)
def PLUGIN_ENTRY():
return MenuAttach()
| [
"1109162935@qq.com"
] | 1109162935@qq.com |
16b523fe67d2ae523036aafa03ab2b9a7446c5f1 | 1f124b3e177506f9df9d28829f4db2fff995822f | /smic/smic.py | 27ef197f5843d8ad6f2863e28ef2a66c0cb6e6f9 | [
"MIT"
] | permissive | dgreyling/smart_image_classifier | 9ed8186bc66639e76843007f6880d3da6ad67615 | 7f41011974f09437f2dd604786501e124ef3759a | refs/heads/master | 2020-03-27T10:55:09.712607 | 2017-11-07T13:24:51 | 2017-11-07T13:24:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,778 | py | from keras import applications
from keras.models import Model
from keras.optimizers import SGD, RMSprop, Adam
from keras.models import Sequential
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.metrics import categorical_accuracy
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical, multi_gpu_model
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_v3 import InceptionV3
from keras.models import load_model
import pandas as pd
import numpy as np
import cv2, os, sys, ast
from tqdm import tqdm
from collections import defaultdict
class SMIC():
def __init__(self, color = 1, dim = 256, gpu=0):
self.color = color
self.dim = dim
self.gpu = gpu
self.TRAIN_PATH = 'train/'
self.TEST_PATH = 'test/'
self.model = None
self.label_map = {}
self.rev_label_map = {}
self.train_images = []
self.train_labels = []
self.num_classes = -1
self.hyperparameters = {}
self.transfer_models = {'vgg16' : VGG16, 'vgg19' : VGG19, 'resnet50' : ResNet50, 'inception_v3' : InceptionV3}
self.optimizers = {'sgd' : 'SGD', 'rmsprop' : 'RMSprop', 'adam' : 'Adam'}
self.layers = {'dense' : Dense, 'dropout' : Dropout}
def read_image(self, path):
image = cv2.imread(path, self.color)
image = cv2.resize(image, (self.dim, self.dim))
iamge = np.array(image, np.float32) / 255.
return image
def prepare_train_data(self, data_location):
try:
train = pd.read_csv(os.path.join(data_location,'trainLabels.csv'))
for image_name in tqdm(train['image_id'].values):
try:
self.train_images.append(self.read_image(os.path.join(data_location, self.TRAIN_PATH, image_name)+'.png'))
except Exception as e:
print("Error reading image: " + repr(e))
except:
print("Error: Invalid location/ File not present.")
exit()
labels = train['label'].tolist()
self.num_classes = len(set(labels))
self.label_map = {k:v for v,k in enumerate(set(labels))}
self.rev_label_map = {v:k for v,k in enumerate(set(labels))}
self.train_labels = np.asarray([self.label_map[label] for label in labels])
def prepare_test_data(self):
pass
def create_model(self, hyperparameters):
base_model = self.transfer_models[hyperparameters['transfer_model']](weights='imagenet', include_top=False, input_shape=(self.dim, self.dim, 3))
for layer in base_model.layers:
layer.trainable=False
classifier = Flatten()(base_model.output)
for layer_param in hyperparameters['top_layers']:
classifier = self.layers[layer_param[0]](layer_param[1], activation=layer_param[2])(classifier)
classifier = Dense(self.num_classes, activation='softmax')(classifier)
model = Model(base_model.input, classifier)
model.compile(loss='categorical_crossentropy', optimizer = self.optimizers[hyperparameters['optimizer']], metrics=['accuracy'])
return model
def search_optimal_hyperparameters(self, samples_per_class = 50, check_epochs = 10):
search_result = {}
sample_train_images=[]
sample_labels_catgorical=[]
class_image_dict = defaultdict(list)
for ind in range(len(self.train_labels)):
class_image_dict[self.train_labels[ind]].append(self.train_images[ind])
for class_name in class_image_dict.keys():
class_images = class_image_dict[class_name][:samples_per_class]
sample_train_images.extend(class_images)
sample_labels_catgorical.extend([class_name]*len(class_images))
sample_labels_catgorical = to_categorical(sample_labels_catgorical)
for transfer_model in self.transfer_models.keys():
for optimizer in self.optimizers.keys():
layers=[]
for layer_count in range(1,3):
layers.append(['dense', 512, 'relu'])
hyperparameters={'transfer_model' : transfer_model, 'optimizer' : optimizer, 'top_layers' : layers}
model = self.create_model(hyperparameters)
history = model.fit(np.asarray(sample_train_images), np.asarray(sample_labels_catgorical), batch_size=32, epochs = check_epochs, validation_split = 0.1)
print history.history['acc']
search_result[str(hyperparameters)]=[history.history['acc'][-1], history.history['val_acc'][-1]]
print search_result
for hyperparameters, results in search_result.items():
if results[1] > results[0]*1.05:
del search_result[hyperparameters]
search_result = sorted(search_result.items(), key = lambda x: x[1], reverse=True)
print search_result
return ast.literal_eval(search_result[0][0])
def fit(self, hyperparameters, epochs, batch_size, fine_tune = False):
labels_categorical = to_categorical(self.train_labels)
self.model = self.create_model(hyperparameters)
history = self.model.fit(np.asarray(self.train_images), np.asarray(labels_categorical), batch_size=batch_size, epochs = epochs, validation_split = 0.1)
if fine_tune:
for layer in self.model.layers:
layer.trainable = False
self.model.compile(loss='categorical_crossentropy', optimizer= SGD(lr=1e-4, momentum=0.9), metrics=['accuracy'])
history_fine = self.model.fit(np.asarray(self.train_images), np.asarray(labels_categorical), batch_size=batch_size, epochs = epochs, validation_split = 0.1)
history.extend(history_fine)
return history
def predict(self, image_path):
image = self.read_image(image_name)
prediction = self.model.predict(np.asarray([image]))
prediction = np.argmax(prediction, axis=1)
return self.rev_label_map[prediction]
def visualize(self, summary=False):
if summary:
print self.model.summary()
def save(self, path):
self.model.save(path)
def load(self, path):
self.model = load_model(path)
| [
"anuragmishracse@gmail.com"
] | anuragmishracse@gmail.com |
1c9bc6c6faafc6aedb79166907f3152dcbcdea0e | 48e26473938aecf6ce172da231d47127c650be44 | /决策树(Decision_Tree)算法/Decision_Tree_1/tree_Fish.py | 5496f00f218fe3b4958f053cb607d801421b325f | [] | no_license | JunchuangYang/Machine-Learning | fa5ce6ecea6d2a54d67032cbe9540cff2211b021 | 9609d7cd4967318efd81c2490debfbc2989bfb4a | refs/heads/master | 2020-03-29T22:17:43.845422 | 2019-05-15T04:13:41 | 2019-05-15T04:13:41 | 150,412,820 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,657 | py | # -*- coding=utf-8 -*-
# 创建数据集
import numpy as np
import pandas as pd
def createDataSet():
row_data = {'no surfacing':[1,1,1,0,0],
'flippers':[1,1,0,1,1],
'fish':['yes','yes','no','no','no']}
dataSet = pd.DataFrame(row_data)
return dataSet
'''
函数功能:计算香农熵
参数说明:
dataSet :原始数据集
返回:
end:香农熵的值
'''
def calEnt(dataSet):
n = dataSet.shape[0] #数据总行数
#print(dataSet)
iset = dataSet.iloc[:,-1].value_counts() #标签的所有类别
#print(list(iset))
p = iset / n # 每一类标签所占比
#print(list(p))
ent = (-p * np.log2(p)).sum() # 计算信息熵
return ent
'''
函数功能:根据信息增益选择出最佳数据集切分的列
参数说明:
dataSet:原始数据集
返回:
axis:数据集最佳切分列的索引
'''
# 选择最优的列进行切分
def bestSplit(dataSet):
baseEnt = calEnt(dataSet) # 计算原始熵
bestGain = 0 # 初始化信息增益
axis = -1 # 初始化最佳分列,标签列
for i in range( dataSet.shape[1] - 1): # 对特征的每一列进行循环
levels = dataSet.iloc[:,i].value_counts().index # 提取出当前列的所有值
#print(levels)
ents = 0 #初始化子节点的信息熵
for j in levels: # 对当前列的每一个取值进行循环
childSet = dataSet[dataSet.iloc[:,i]==j] # 某一个子节点的dataFrame
#print(childSet)
ent = calEnt(childSet) # 计算子节点的信息熵
ents += (childSet.shape[0]/dataSet.shape[0])*ent # 计算当前列的信息熵
infoGain = baseEnt - ents # 计算当前列的信息增益
if(infoGain > bestGain):
bestGain = infoGain# 选取最大的信息增益
axis = i # 最大信息增益列所在的索引
return axis
'''
函数功能:按照给定的列划分数据集
参数说明:
dataSet: 原始数据集
axis: 指定的列索引
value:指定的属性值
返回:
redataSet:按照指定列数索引和属性值切分后的数据集
'''
def mySplit(dataSet,axis,value):
col = dataSet.columns[axis] # col = no surfacing
#print(col)
redataSet = dataSet.loc[dataSet[col] == value,:].drop(col,axis=1)
return redataSet
'''
函数功能:基于最大信息增益切分数据集,递归构建决策树
参数说明:
dataSet: 原始数据集(最后一列是标签)
返回:
myTree:字典形式的树
'''
def createTree(dataSet):
featlist = list(dataSet.columns) # 提取出数据集所有的列
# print(featlist)--->['no surfacing', 'flippers', 'fish']
classlist = dataSet.iloc[:,-1].value_counts() # 获取最后一列类标签
# print(list(classlist))-->[3, 2]
# 判断最多标签数目是否等于数据集行数,或者数据集是否只有一列
if classlist[0] == dataSet.shape[0] or dataSet.shape[1] == 1:
return classlist.index[0] # 如果是,返回类标签
axis = bestSplit(dataSet) # 确定出当前最佳分裂的索引
bestfeat = featlist[axis] # 获取该索引列对应的特征
myTree = {bestfeat:{}} # 采用字典嵌套的方式存储树信息
del featlist[axis] # 删除当前特征
valuelist = set(dataSet.iloc[:,axis]) # 提取最佳分列所有属性值
for value in valuelist: # 对每一个属性值递归建树
myTree[bestfeat][value] = createTree(mySplit(dataSet,axis,value))
#print(myTree)
return myTree
'''
函数功能:对一个测试实例进行分类
参数说明:
inputTree:已经生成的决策树
labels:存储选择的最优特征标签
testVec:测试数据列表,顺序对应原数据
返回:
classlabel:分类结果
'''
def classify(inputTree,labels,testVec):
firstStr = next(iter(inputTree)) # 获取决策树的第一个节点
secondDict = inputTree[firstStr] # 下一个字典
featIndex = labels.index(firstStr) # 第一个节点所在列的索引
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]) == dict:
classLabel = classify(secondDict[key],labels,testVec)
else:
classLabel = secondDict[key]
return classLabel
'''
函数功能:对测试集进行预测,并返回预测后的结果
函数说明:
train: 训练集
test:测试集
返回:
test:预测好分类的测试集
'''
def acc_classify(train,test):
inputTree = createTree(train) # 根据训练集生成一棵树
labels = list(train.columns) # 数据集所有的列名称
result = []
for i in range(test.shape[0]): # 对测试集中每一天数据进行循环
testVec = test.iloc[i,: -1] # 测试集中的一个实例
classLabel = classify(inputTree,labels,testVec) # 预测该实例的分类
result.append(classLabel)#将预测结果追加到result列表中
test['predict'] = result #aa将预测结果追加到测试集的最后一列
acc = (test.iloc[:,-1] == test.iloc[:,-2]).mean() # 计算准确率
print('模型预测准确率为{%.2f}'%acc)
return test
# 测试函数
def main():
dataSet = createDataSet()
train = dataSet
#.iloc:根据标签的所在位置,从0开始计数,选取列
#.loc:根据DataFrame的具体标签选取列
#data.iloc[0:2,8] # ',' 前的部分标明选取的行,‘,’后的部分标明选取的列
test = dataSet.iloc[:3,:] # 0,1,2行的数据作为测试数据
print(acc_classify(train , test))
if __name__ == '__main__':
main()
| [
"554002970@qq.com"
] | 554002970@qq.com |
17944a1883dd42251678b15ef4124732d9e722fa | a8042cb7f6a4daec26b8cea6b7da2cb7cb880a84 | /496_NextGreaterElementI.py | 3962e7c60ac961dc07daa18ad0a1a1e5d8f34394 | [] | no_license | renukadeshmukh/Leetcode_Solutions | 0108edf6c5849946623a75c2dfd57cbf9bb338e4 | 1211eac167f33084f536007468ea10c1a0ceab08 | refs/heads/master | 2022-11-10T20:48:42.108834 | 2022-10-18T07:24:36 | 2022-10-18T07:24:36 | 80,702,452 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | '''
496. Next Greater Element I
You are given two arrays (without duplicates) nums1 and nums2 where nums1’s elements are subset of nums2. Find all the
next greater numbers for nums1's elements in the corresponding places of nums2.
The Next Greater Number of a number x in nums1 is the first greater number to its right
in nums2. If it does not exist, output -1 for this number.
Example 1:
Input: nums1 = [4,1,2], nums2 = [1,3,4,2].
Output: [-1,3,-1]
Explanation:
For number 4 in the first array, you cannot find the next greater number for it in the second array, so output -1.
For number 1 in the first array, the next greater number for it in the second array is 3.
For number 2 in the first array, there is no next greater number for it in the second array, so output -1.
Example 2:
Input: nums1 = [2,4], nums2 = [1,2,3,4].
Output: [3,-1]
Explanation:
For number 2 in the first array, the next greater number for it in the second array is 3.
For number 4 in the first array, there is no next greater number for it in the second array, so output -1.
Note:
All elements in nums1 and nums2 are unique.
The length of both nums1 and nums2 would not exceed 1000.
'''
class Solution(object):
def nextGreaterElement(self, findNums, nums):
"""
:type findNums: List[int]
:type nums: List[int]
:rtype: List[int]
"""
result = []
for fnum in findNums:
flag = False
nxtGreater = -1
for n in nums:
if n == fnum:
flag = True
if flag == True and n > fnum:
nxtGreater = n
break
result.append(nxtGreater)
return result | [
"renud1988@gmail.com"
] | renud1988@gmail.com |
ac1317e1f649bb38d9f032a1159d414fce1b195e | ab4ede55a042fdbf8f4ae72943c644e3d125b48b | /app/config/views.py | ec6bf64e82359646c04ec36783ab542193d5d992 | [] | no_license | rkfflrtodn/instagram | 40f1632bdf8e5840cc696bf670b12f3c6ecee346 | 3b138cc89da88ce01e9df3005881afcb058ec9b0 | refs/heads/master | 2022-12-12T17:01:08.182474 | 2018-11-22T13:25:59 | 2018-11-22T13:25:59 | 152,712,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | # from django.shortcuts import redirect
#
#
# def index(request):
# return redirect('posts:post-list') | [
"zxcehdghks@gmail.com"
] | zxcehdghks@gmail.com |
f6e0bd5c8a3d4cb5a8bfebd68aa95541db5b50c6 | 55766b93376f35c9959e5ab9ac292de5188c1d0f | /1.py | c21ec42f4a7b776a2bfbc4cd0122c34703d1ae47 | [] | no_license | AnatolyDomrachev/test-1 | 86284a4fbee63c213e7c75b8d3486cdd1fbf7ea1 | dcede87360bc73898d6a2179323c4e1dbfc154e9 | refs/heads/master | 2022-11-29T16:43:38.709219 | 2020-08-18T15:38:22 | 2020-08-18T15:38:22 | 288,495,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | print('hello');
print('good bye');
| [
"you@example.com"
] | you@example.com |
0978c5a042390a51f314350f00cfe245b9357ea9 | 5fff2532a7dacda0c48c409fea4700b9b3f31bd4 | /src-python/day22.py | 8a70f984b5b9e74580626da0a653188e30fee1e4 | [
"MIT"
] | permissive | virtuNat/aoc-12020 | 857faccc575fc5b89a31fdfd0b304d378bc8b448 | d08879ade8aaae0eb2f3ee3f4bca818646dd6c36 | refs/heads/main | 2023-02-08T21:29:45.376704 | 2020-12-26T15:52:07 | 2020-12-26T15:52:07 | 324,528,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | #!/usr/bin/env python
from functools import cache
from itertools import accumulate
from aoc import get_input
@cache
def combat(deck1, deck2, recurse, level=0):
if level:
m1 = max(deck1); m2 = max(deck2)
if m1 > m2 and m1 > 1 + len(deck1) + len(deck2):
return deck1, deck2
deck1 = list(deck1); deck2 = list(deck2)
seen = set()
while deck1 and deck2:
state = (tuple(deck1), tuple(deck2))
if state in seen: break
seen.add(state)
c1 = deck1.pop(0); c2 = deck2.pop(0)
if recurse and c1 <= len(deck1) and c2 <= len(deck2):
if combat(tuple(deck1[:c1]), tuple(deck2[:c2]), recurse, level+1)[0]:
deck1.extend((c1, c2))
else:
deck2.extend((c2, c1))
elif c1 > c2:
deck1.extend((c1, c2))
else:
deck2.extend((c2, c1))
return deck1, deck2
def main():
with get_input(__file__) as ifile:
deck1, deck2 = (
tuple(map(int, line.split('\n')[1:]))
for line in ifile.read().split('\n\n')
)
for flag in range(2):
w1, w2 = combat(deck1, deck2, flag)
print(sum(accumulate(w1 or w2))) # 1, 2
if __name__ == '__main__':
main()
| [
"janlothan@gmail.com"
] | janlothan@gmail.com |
562cc4a952c6cfa40b2f83a026396169f3908a41 | 1e6cb730dea7fb3afb3d166fd9388d53d226d7d5 | /WSGI/venv/lib/python3.4/heapq.py | 9744325bfdb6079ec82ea572f48c9fc8b327f177 | [] | no_license | colinfrankb/py10000 | adc761f1b71eeac40090c91390d0534f0cd53543 | 126e04c99182c72908666f5aecf460725c1ae9ec | refs/heads/master | 2021-01-10T21:05:36.459624 | 2016-09-26T07:52:45 | 2016-09-26T07:52:45 | 31,061,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | /Users/Frank/.pyenv/versions/3.4.2/lib/python3.4/heapq.py | [
"colinfrankb@gmail.com"
] | colinfrankb@gmail.com |
83eb959651aa8bd7a6a586e2cd39c7c55930733b | c9a49cbd10348e8023df2ebdb067486d14c9deea | /073/name_art.py | 57d9ede07be275b3a82346fa9d979beb485e6e0d | [] | no_license | kay0/programming-projects-for-n00bz | 48d5ba40cb44bed1a984667fbff6761c951466c0 | 0786dc173814e1a0cd887d49893e55c0072f1919 | refs/heads/master | 2021-01-22T08:48:20.872295 | 2014-02-17T19:11:51 | 2014-02-17T19:11:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,361 | py | #!/usr/bin/env python3
letters = {" 0": " ",
" 1": " ",
" 2": " ",
" 3": " ",
" 4": " ",
" 5": " ",
" 6": " ",
"a0": " ",
"a1": " ",
"a2": " _____ ",
"a3": "| _ | ",
"a4": "|___ | ",
"a5": " |_| ",
"a6": " ",
"b0": " _ ",
"b1": "| | ",
"b2": "| |___ ",
"b3": "| _ | ",
"b4": "| |_| | ",
"b5": "|_____| ",
"b6": " ",
"c0": " ",
"c1": " ",
"c2": " _____ ",
"c3": "| ___| ",
"c4": "| |___ ",
"c5": "|_____| ",
"c6": " ",
"d0": " _ ",
"d1": " | | ",
"d2": " ___| | ",
"d3": "| _ | ",
"d4": "| |_| | ",
"d5": "|_____| ",
"d6": " ",
"e0": " ",
"e1": " ",
"e2": " _____ ",
"e3": "| __ | ",
"e4": "| ___| ",
"e5": "|_____| ",
"e6": " ",
"f0": " ___ ",
"f1": " | _| ",
"f2": " _| |_ ",
"f3": "|_ _| ",
"f4": " | | ",
"f5": " |_| ",
"f6": " ",
"g0": " ",
"g1": " ",
"g2": " _____ ",
"g3": "| _ | ",
"g4": "| |_| | ",
"g5": "|___ | ",
"g6": "|_____| ",
"h0": " _ ",
"h1": "| | ",
"h2": "| |___ ",
"h3": "| _ | ",
"h4": "| | | | ",
"h5": "|_| |_| ",
"h6": " ",
"i0": " _ ",
"i1": "|_| ",
"i2": " _ ",
"i3": "| | ",
"i4": "| | ",
"i5": "|_| ",
"i6": " ",
"j0": " _ ",
"j1": " |_| ",
"j2": " _ ",
"j3": " | | ",
"j4": " _ | | ",
"j5": "| |_| | ",
"j6": "|_____| ",
"k0": " _ ",
"k1": "| | _ ",
"k2": "| |/ / ",
"k3": "| _/_ ",
"k4": "| _ | ",
"k5": "|_| |_| ",
"k6": " ",
"l0": " _ ",
"l1": "| | ",
"l2": "| | ",
"l3": "| | ",
"l4": "| | ",
"l5": "|_| ",
"l6": " ",
"m0": " ",
"m1": " _ ",
"m2": "| |_______ ",
"m3": "| _ _ | ",
"m4": "| | | | | | ",
"m5": "|_| |_| |_| ",
"m6": " ",
"n0": " ",
"n1": " ",
"n2": " _____ ",
"n3": "| _ | ",
"n4": "| | | | ",
"n5": "|_| |_| ",
"n6": " ",
"o0": " ",
"o1": " ",
"o2": " _____ ",
"o3": "| _ | ",
"o4": "| |_| | ",
"o5": "|_____| ",
"o6": " ",
"p0": " ",
"p1": " ",
"p2": " _____ ",
"p3": "| _ | ",
"p4": "| |_| | ",
"p5": "| ___| ",
"p6": "|_| ",
"q0": " ",
"q1": " ",
"q2": " _____ ",
"q3": "| _ | ",
"q4": "| |_| | ",
"q5": "|___ | ",
"q6": " |_| ",
"r0": " ",
"r1": " ",
"r2": " ____ ",
"r3": "| __| ",
"r4": "| | ",
"r5": "|_| ",
"r6": " ",
"s0": " ",
"s1": " ",
"s2": " _____ ",
"s3": "| ___| ",
"s4": "|____ | ",
"s5": "|_____| ",
"s6": " ",
"t0": " _ ",
"t1": " | | ",
"t2": "|_ _| ",
"t3": " | | ",
"t4": " | | ",
"t5": " |_| ",
"t6": " ",
"u0": " ",
"u1": " ",
"u2": " _ _ ",
"u3": "| | | | ",
"u4": "| |_| | ",
"u5": "|_____| ",
"u6": " ",
"v0": " ",
"v1": " ",
"v2": " _ _ ",
"v3": "| | | | ",
"v4": "| |_| | ",
"v5": "|____/ ",
"v6": " ",
"w0": " ",
"w1": " ",
"w2": " _ _ _ ",
"w3": "| | | || | ",
"w4": "| |_| || | ",
"w5": "|____//__| ",
"w6": " ",
"x0": " ",
"x1": " ",
"x2": "__ __ ",
"x3": "\ \_/ / ",
"x4": " | _ | ",
"x5": "/_/ \_\ ",
"x6": " ",
"y0": " ",
"y1": " ",
"y2": " _ _ ",
"y3": "| | | | ",
"y4": "| |_| | ",
"y5": "|___ | ",
"y6": "|_____| ",
"z0": " ",
"z1": " ",
"z2": " _____ ",
"z3": "|___ | ",
"z4": "| ___| ",
"z5": "|_____| ",
"z6": " ",
}
def get_input():
print("Type something fun:")
string_input = input("> ")
return string_input
def generate_art(s):
for i in range(0, len(s)):
print(letters[s[i].lower() + '0'], end="")
print()
for i in range(0, len(s)):
print(letters[s[i].lower() + '1'], end="")
print()
for i in range(0, len(s)):
print(letters[s[i].lower() + '2'], end="")
print()
for i in range(0, len(s)):
print(letters[s[i].lower() + '3'], end="")
print()
for i in range(0, len(s)):
print(letters[s[i].lower() + '4'], end="")
print()
for i in range(0, len(s)):
print(letters[s[i].lower() + '5'], end="")
print()
for i in range(0, len(s)):
print(letters[s[i].lower() + '6'], end="")
def main():
s = get_input()
generate_art(s)
if __name__ == "__main__":
main()
| [
"kayoproject@gmail.com"
] | kayoproject@gmail.com |
d76f17a3e54122471383f7595237bdae17a61185 | 3a68cd19b35fd59ab5c8af285b3be143479de396 | /pysc2/pysc2/tests/versions_test.py | 32d4f6052e7725782c4db512a00d0a234726c831 | [
"Apache-2.0"
] | permissive | kiriphorito/COMP3096---MARL | 7abe7f3e93d8740394f08d03898723392597ac11 | 5e05413b0980d60f4a3f2a17123178c93bb0b763 | refs/heads/master | 2021-09-10T15:55:44.022893 | 2018-03-29T00:03:11 | 2018-03-29T00:03:11 | 119,538,854 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,923 | py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that every version in run_configs.google actually runs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
from pysc2 import run_configs
from pysc2.run_configs.platforms import VERSIONS
def major_version(v):
return ".".join(v.split(".")[:2])
class TestVersions(parameterized.TestCase):
@parameterized.parameters(sorted(VERSIONS.items()))
def test_versions(self, game_version, version):
self.assertEqual(game_version, version.game_version)
logging.info((" starting: %s " % game_version).center(80, "-"))
with run_configs.get().start(version=game_version) as controller:
ping = controller.ping()
logging.info("expected: %s", version)
logging.info("actual: %s", ", ".join(str(ping).strip().split("\n")))
self.assertEqual(major_version(ping.game_version),
major_version(version.game_version))
self.assertEqual(version.build_version, ping.base_build)
self.assertEqual(version.data_version.lower(),
ping.data_version.lower())
logging.info((" success: %s " % game_version).center(80, "-"))
if __name__ == "__main__":
absltest.main()
| [
"now-raymond@users.noreply.github.com"
] | now-raymond@users.noreply.github.com |
27fc6dcd930ae36d00ebec839c5c3f009e5a42be | ef158af9d47fb1f0c974b49405174ba5b34e4721 | /polu/a_la_main/particule.py | 48e5982b2cd644c636af5c45672802a034cecf13 | [] | no_license | LeGrosLezard/bobo | 1227bcae22d9eb7d9e0423009cae154df5466994 | 7c50de512fb22c8bdf1a1127307fc4fd2f371152 | refs/heads/master | 2020-07-01T17:38:14.145955 | 2019-07-01T21:29:49 | 2019-07-01T21:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | """Here we define for database
the numbers of particle,
ranking pollute in France and industrail poles"""
import requests
import datetime
import urllib.request
from bs4 import *
from CONFIG import PATH_PARTICLE_RATE
def particule2(lieu):
"""we search particule rate from plumelabs"""
nb = []
liste = []
path = PATH_PARTICLE_RATE.format(lieu)
request = requests.get(path)
page = request.content
soup_html = BeautifulSoup(page, "html.parser")
Property = soup_html.find_all("div", {'class':'report__pi-number'})
for i in Property:
liste.append(i.get_text())
for i in liste:
for j in i:
try:
j = int(j)
if j == int(j):
nb.append(str(j))
except:
pass
nb = ''.join(nb)
nb = int(nb)
polution = nb
return polution
def france(lieu):
liste = ["lyon", "marseille","paris","roubaix"]
c = 0
for i in liste:
if lieu == liste[0]:
return 'un'
break
elif lieu == liste[1]:
return 'deux'
break
elif lieu == liste[2]:
return 'trois'
break
elif lieu == liste[3]:
return 'quattre'
break
else:
return 'non'
break
c+=1
def industrie(lieu):
if lieu == 'lyon':
return 'oui'
elif lieu == 'paris':
return 'non'
elif lieu == 'marseille':
return 'oui'
| [
"noreply@github.com"
] | LeGrosLezard.noreply@github.com |
e2b6852e10dedc2433da7e611507517cfb5a6207 | d7641647d67d110e08997767e85bbea081c2537b | /bitmovin_api_sdk/encoding/statistics/statistics_api.py | 8cb831b413480ef89454e83eec9181f76b9fcaf5 | [
"MIT"
] | permissive | aachenmax/bitmovin-api-sdk-python | d3ded77c459852cbea4927ff28c2a4ad39e6026a | 931bcd8c4695a7eb224a7f4aa5a189ba2430e639 | refs/heads/master | 2022-11-16T08:59:06.830567 | 2020-07-06T07:16:51 | 2020-07-06T07:16:51 | 267,538,689 | 0 | 1 | MIT | 2020-07-06T07:16:52 | 2020-05-28T08:44:44 | Python | UTF-8 | Python | false | false | 2,754 | py | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.models.statistics import Statistics
from bitmovin_api_sdk.encoding.statistics.daily.daily_api import DailyApi
from bitmovin_api_sdk.encoding.statistics.encodings.encodings_api import EncodingsApi
from bitmovin_api_sdk.encoding.statistics.labels.labels_api import LabelsApi
from bitmovin_api_sdk.encoding.statistics.statistics_list_query_params import StatisticsListQueryParams
class StatisticsApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(StatisticsApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.daily = DailyApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.encodings = EncodingsApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.labels = LabelsApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def get(self, **kwargs):
# type: (dict) -> Statistics
"""Show Overall Statistics
:return: Service specific result
:rtype: Statistics
"""
return self.api_client.get(
'/encoding/statistics',
type=Statistics,
**kwargs
)
def list(self, from_, to, query_params=None, **kwargs):
# type: (date, date, StatisticsListQueryParams, dict) -> Statistics
"""Show Overall Statistics Within Specific Dates
:param from_: Start date, format: yyyy-MM-dd
:type from_: date, required
:param to: End date, format: yyyy-MM-dd
:type to: date, required
:param query_params: Query parameters
:type query_params: StatisticsListQueryParams
:return: Service specific result
:rtype: Statistics
"""
return self.api_client.get(
'/encoding/statistics/{from}/{to}',
path_params={'from': from_, 'to': to},
query_params=query_params,
pagination_response=True,
type=Statistics,
**kwargs
)
| [
"openapi@bitmovin.com"
] | openapi@bitmovin.com |
3fa6277d5fed03298c5e2cbebf437ed5eb267d71 | a2c90d183ac66f39401cd8ece5207c492c811158 | /Solving_Problem/daily_222/1216/1941.py | 3fcd0ee746db59aa838e64510132779712bee202 | [] | no_license | kwoneyng/TIL | 0498cfc4dbebbb1f2c193cb7c9459aab7ebad02a | c6fbaa609b2e805f298b17b1f9504fd12cb63e8a | refs/heads/master | 2020-06-17T11:53:38.685202 | 2020-03-18T01:29:36 | 2020-03-18T01:29:36 | 195,916,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | near = [[-1,0], [0,1], [1,0], [0,-1]]
def sellect(st, ls=[], rs=0):
if rs > 3:
return
if len(ls) == 7:
perm.append(ls)
return
for i in range(st, 25):
sellect(i+1,ls+[i],rs+bd[ht[i][0]][ht[i][1]])
def check():
global rs
for i in perm:
q = [i[0]]
vis = [[0]*5 for i in range(5)]
count = 0
while q:
idx = q.pop(0)
x,y = ht[idx]
if vis[x][y] == 0:
vis[x][y] = 1
count += 1
for a,b in near:
xi, yi = a+x, b+y
if 0 <= xi < 5 and 0 <= yi < 5:
if vis[xi][yi] == 0:
nidx = rht[(xi,yi)]
if nidx in i:
q.append(nidx)
if count == 7:
rs += 1
bd = []
for i in range(5):
data = list(input())
ls = []
for j in data:
if j == 'S':
ls.append(0)
else:
ls.append(1)
bd.append(ls)
cnt = 0
ht = {}
rht = {}
rs = []
start = [-1,-1]
perm = []
rs = 0
for x in range(5):
for y in range(5):
if bd[x][y] == 0 and start == [-1,-1]:
start = (x,y)
ht[cnt] = (x,y)
rht[(x,y)] = cnt
cnt += 1
sellect(rht[start])
check()
print(rs) | [
"nan308@naver.com"
] | nan308@naver.com |
47f86cfcedff9c6e9486d4d13f24348ee2f16128 | b595a92331fd798739abb1190650258c5998cd40 | /patterns/filtering.py | 7982a540833751afdc113d1337c4afc3bacaa63f | [] | no_license | dbulthuis/Class_Problems | 526b175b2b216b8d3722daa89ebc189ef2b65e25 | 261ffe0197427af619c5627607044550b8a848e9 | refs/heads/master | 2021-01-17T09:05:10.864706 | 2016-08-03T16:22:32 | 2016-08-03T16:22:32 | 64,411,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,317 | py | #!/usr/bin/python
import sys
import csv
import re
# To run this code on the actual data, please download the additional dataset.
# You can find instructions in the course materials (wiki) and in the instructor notes.
# There are some things in this data file that are different from what you saw
# in Lesson 3. The dataset is more complicated and closer to what you might
# see in the real world. It was generated by exporting data from a SQL database.
#
# The data in at least one of the fields (the body field) can include newline
# characters, and all the fields are enclosed in double quotes. Therefore, we
# will need to process the data file in a way other than using split(","). To do this,
# we have provided sample code for using the csv module of Python. Each 'line'
# will be a list that contains each field in sequential order.
#
# In this exercise, we are interested in the field 'body' (which is the 5th field,
# line[4]). The objective is to count the number of forum nodes where 'body' either
# contains none of the three punctuation marks: period ('.'), exclamation point ('!'),
# question mark ('?'), or else 'body' contains exactly one such punctuation mark as the
# last character. There is no need to parse the HTML inside 'body'. Also, do not pay
# special attention to newline characters.
def mapper():
reader = csv.reader(sys.stdin, delimiter='\t')
writer = csv.writer(sys.stdout, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL)
for line in reader:
# YOUR CODE HERE
if not re.findall(r'[.?!]',line[4]) or \
(len(re.findall(r'\.$|\?$|!$',line[4])) >0 and re.search(r'[.?!]',line[4]).end() == len(line[4]) ):
writer.writerow(line)
test_text = """\"\"\t\"\"\t\"\"\t\"\"\t\"This is one sentence\"\t\"\"
\"\"\t\"\"\t\"\"\t\"\"\t\"Also one sentence!\"\t\"\"
\"\"\t\"\"\t\"\"\t\"\"\t\"Hey!\nTwo sentences!\"\t\"\"
\"\"\t\"\"\t\"\"\t\"\"\t\"One. Two! Three?\"\t\"\"
\"\"\t\"\"\t\"\"\t\"\"\t\"One Period. Two Sentences\"\t\"\"
\"\"\t\"\"\t\"\"\t\"\"\t\"Three\nlines, one sentence\n\"\t\"\"
"""
# This function allows you to test the mapper with the provided test string
def main():
import StringIO
sys.stdin = StringIO.StringIO(test_text)
mapper()
sys.stdin = sys.__stdin__
if __name__ == "__main__":
main() | [
"dbulthuis@gmail.com"
] | dbulthuis@gmail.com |
7e3e9ece6ae1c2ecd56110b1f3fcb44de6fb286a | 4799cb1f6bdb40b5a2fd6ec739346da3d087fe33 | /explog/config/urls.py | 640dad04e1f2904b7678991709911a7a5cfb2955 | [] | no_license | gangbok119/Team-project | fe4527ae53bec30f4e5738d57978b5c6f71bcebe | 8418e611699143e43f5113bf5e83975e1829883a | refs/heads/master | 2021-09-03T13:16:23.109377 | 2017-11-24T01:58:34 | 2017-11-24T01:58:34 | 111,489,261 | 0 | 0 | null | 2017-11-21T02:38:18 | 2017-11-21T02:38:18 | null | UTF-8 | Python | false | false | 841 | py | """explog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.IndexView.as_view(), name='index'),
]
| [
"kaythechemist@gmail.com"
] | kaythechemist@gmail.com |
62fb4bd8b4a3c2dce98be1e19cf29ad7013b7988 | 59b74e5ffc0b592ce80a6e6d2d6257dd46c4fc8c | /http-key-value-tester.py | 8eff29492f9c1043d50ffac5ae95253becf57ee3 | [] | no_license | msb1/async-http-keyvalue-tester | f0b4dbfabcce7e100ff4be423bb58efa2509ca8c | 0e8466f359661d53d91619d352dafe26c620f043 | refs/heads/master | 2022-11-20T00:15:12.682264 | 2020-07-08T13:31:39 | 2020-07-08T13:31:39 | 278,097,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,496 | py | import json
import random
import aiohttp
import asyncio
url = 'http://localhost:8080'
iterations = 1000000
def generate_random_string(strlength):
chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
s = ""
for _ in range(strlength):
idx = random.randint(0, 61)
s += chars[idx]
return s
async def main():
async with aiohttp.ClientSession() as session:
teststore = []
for it in range(iterations):
# generate random value string
v1 = random.randint(100, 500)
value = generate_random_string(v1)
payload = {'action': 'insert', 'key': str(it), 'val': str(value)}
async with session.post(url, json=payload) as resp:
status = resp.status
if status != 200:
print("Unsuccessful insert from HTTP - code={} with payload:\n{} ".format(status, payload))
continue
msg = await resp.text()
answer = json.loads(msg)
# print("Response {} received: {}".format(resp.status, msg))
# print("action: {}, key: {}, value: {}, success: {}".format(jmsg['action'], jmsg['key'], jmsg['val'], jmsg['success']))
if not answer['success']:
print("Unsuccessful insert at Database: ", answer)
continue
teststore.append(value)
if it % 10000 == 0:
print("---> Insert Iteration: ", it)
for it in range(iterations):
payload = {'action': 'find', 'key': str(it), 'val': ''}
async with session.post(url, json=payload) as resp:
status = resp.status
if status != 200:
print("Unsuccessful retrieve from HTTP - code={} with payload:\n{} ".format(status, payload))
continue
msg = await resp.text()
answer = json.loads(msg)
if not answer['success']:
print("Unsuccessful retrieve from Database: ", answer)
continue
if answer['val'] != teststore[it]:
print("Retrieve does not match teststore: ", answer)
continue
if it % 10000 == 0:
print("---> Retrieve Iteration: ", it)
if __name__ == "__main__":
# main()
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | [
"44041613+msb1@users.noreply.github.com"
] | 44041613+msb1@users.noreply.github.com |
9784160e26ac966f5afa53feef970bd9880c33df | bcf7a2d7d3220972316a374f5cb831706d50dad1 | /src/hub_pip/api/model/MetadataView.py | 080795182d2cd5809f99a3723881e801a5a6bf39 | [
"Apache-2.0"
] | permissive | blackducksoftware/hub-pip | 64d52565e2a384d08757d23c2b536c47d289c624 | fa37ac208827461d4c5918b0d54a802dbde5ca37 | refs/heads/master | 2021-07-10T03:13:55.243296 | 2017-10-11T15:26:32 | 2017-10-11T15:26:32 | 85,363,641 | 1 | 3 | null | 2017-05-04T15:25:01 | 2017-03-17T23:47:01 | Python | UTF-8 | Python | false | false | 221 | py | class MetadataView(object):
allow = None
href = None
links = None
attribute_map = {
"allow": "allow",
"href": "href",
"links": "links"
}
def __init__(self):
pass
| [
"mathewsj2@wit.edu"
] | mathewsj2@wit.edu |
066ce5740925cc80fbe9b0006bafe587fe53762a | 0ca8e83f1eccc18d59c6f98eb1dea70e2ba1e132 | /smtp_ex.py | 751a25d82ece9fc7f62a670c8e4ee8f970be8806 | [] | no_license | isarkhan95/python_project | 0d54f69ed3db224c9e909a2bf525bf1e1b0c3902 | d7292ff4288124c130b213c3d57c9fe10d9defdd | refs/heads/master | 2022-11-16T20:26:24.181816 | 2020-07-18T14:11:56 | 2020-07-18T14:11:56 | 280,668,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import email.mime.image as img
# me == my email address
# you == recipient's email address
me = "isarkhan95@gmail.com"
you = "isarkhan95@gmail.com"
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = "Link"
msg['From'] = me
msg['To'] = you
# Create the body of the message (a plain-text and an HTML version).
text = "Hi! Isaar"
html = """\
<html>
<head></head>
<body>
<p>Hi!<br>
How are you?<br>
Here is the <a href="http://www.python.org">link</a> you wanted.
</p>
<div>
<img src="https://drive.google.com/file/d/1Y84K599cR5b1cJsWak7AEwDjSVayJXKe/view?usp=sharing" alt="Smiley face" height="300" width="900">
</div>
<div>
<img src="D:/text.png" alt="Smiley face" height="300" width="900">
</div>
</body>
</html>
"""
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# img_data = open('D:/chart.png', 'rb').read()
# img1=img.MIMEImage(img_data,'png')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# msg.attach(img1)
# Send the message via local SMTP server.
mail = smtplib.SMTP('smtp.gmail.com', 587)
mail.ehlo()
mail.starttls()
mail.login('isarkhan95@gmail.com', 'N@ureenaslam786')
mail.sendmail(me, you, msg.as_string())
mail.quit() | [
"isarkhan95@gmail.com"
] | isarkhan95@gmail.com |
2982213b633aae5378196ba8985cb4851b3f8543 | 8dce9bdf64bd16ace5580444f45867a90f324dcf | /pyspark_scripts_logfiles/Python test scripts/2018-12-31/mife-copy4.py | 816624fde2c4764cac9bffec9da6059f29a07e85 | [] | no_license | SaralaSewwandi/pyspark | 160d91ef9f09102b8945c4471cdc21879f0b8041 | 8e41aa88778b89e29798c95691422cc984151421 | refs/heads/master | 2020-05-02T03:06:39.556096 | 2019-03-26T06:04:09 | 2019-03-26T06:04:09 | 177,719,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("MIFE Application").setMaster("local")
sc = SparkContext(conf=conf)
print("=====================================")
print(sc)
print(sc.version)
print("=====================================")
lines = sc.textFile("hdfs://sandbox:9000/user/root/bl/carbon.log")
#firstLine=lines.first()
#lineLengths = lines.map(lambda s: len(s))
#totalLength = lineLengths.reduce(lambda a, b: a + b)
print("===============first line==========")
#print(totalLength)
#print(firstLine)
print("==========RID Lines=========")
linesWithRID_count = lines.filter(lambda line: "DPIUsageInfoAPI" in line).count()
error_lines = lines.filter(lambda line: "ERROR" in line)
error_line_parts=error_lines.map(lambda line:line.split(" "))
#info_rows=info_lines.map(lambda line:line.split(" "))
print("==========================")
#request ==> no of columns 9
for row in error_line_parts.take(error_line_parts.count()):print(row[9])
print("============================")
| [
"sarala.kumarage@boolean-lab.com"
] | sarala.kumarage@boolean-lab.com |
36e9e5f41b2dce0377e76d241ecf9ea929a12da6 | d06a954f54ef6d1ddf19396618d36f8ddfe4c5fb | /demo2/users/views.py | 6dbe7294258050051778137b32cede53b134ea80 | [] | no_license | land-pack/flask-example | 76be2eb370505a895aa99ed4e189046887ec88bf | 16d55d693c99a03e0f492306ecc4f72f7e7c5b45 | refs/heads/master | 2020-03-10T03:54:45.835797 | 2018-07-05T06:28:10 | 2018-07-05T06:28:10 | 129,178,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | from flask import render_template, Blueprint
# config
users_blueprint = Blueprint('users', __name__, template_folder='templates')
@users_blueprint.route('/register')
def user_register():
return render_template('register.html')
@users_blueprint.route('/login')
def user_login():
return render_template('login.html')
@users_blueprint.route('/logout')
def user_logout():
return render_template('logout.html')
| [
"xuhongtian@seeletech.net"
] | xuhongtian@seeletech.net |
08f5e185ee56706b489941cc2f8531ad1dbbf181 | 14a9d65b4599392f1f81111a8dd0a15d9d235fb4 | /word_vector/fasttextProcess.py | c0b8036f72762de28e4abc32b4fd41365751baad | [] | no_license | AshkenSC/Python-Gadgets | ca39be842f59aebe35ba4be37a83601631ba721f | 8ff86c68eb332a1441ee87434b72f43864395566 | refs/heads/master | 2021-04-05T23:47:18.166385 | 2020-08-11T16:14:39 | 2020-08-11T16:14:39 | 125,301,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,937 | py | from random import shuffle
import pandas as pd
class _MD(object):
mapper = {
str: '',
int: 0,
list: list,
dict: dict,
set: set,
bool: False,
float: .0
}
def __init__(self, obj, default=None):
self.dict = {}
assert obj in self.mapper, \
'got a error type'
self.t = obj
if default is None:
return
assert isinstance(default, obj), \
f'default ({default}) must be {obj}'
self.v = default
def __setitem__(self, key, value):
self.dict[key] = value
def __getitem__(self, item):
if item not in self.dict and hasattr(self, 'v'):
self.dict[item] = self.v
return self.v
elif item not in self.dict:
if callable(self.mapper[self.t]):
self.dict[item] = self.mapper[self.t]()
else:
self.dict[item] = self.mapper[self.t]
return self.dict[item]
return self.dict[item]
def defaultdict(obj, default=None):
return _MD(obj, default)
class TransformData(object):
def to_csv(self, handler, output, index=False):
dd = defaultdict(list)
for line in handler:
label, content = line.split(',', 1)
dd[label.strip('__label__').strip()].append(content.strip())
df = pd.DataFrame()
for key in dd.dict:
col = pd.Series(dd[key], name=key)
df = pd.concat([df, col], axis=1)
return df.to_csv(output, index=index, encoding='utf-8')
def split_train_test(source, auth_data=False):
if not auth_data:
train_proportion = 0.8
else:
train_proportion = 0.98
basename = source.rsplit('.', 1)[0]
train_file = basename + '_train.txt'
test_file = basename + '_test.txt'
handel = pd.read_csv(source, index_col=False, low_memory=False)
train_data_set = []
test_data_set = []
for head in list(handel.head()):
train_num = int(handel[head].dropna().__len__() * train_proportion)
sub_list = [f'__label__{head} , {item.strip()}\n' for item in handel[head].dropna().tolist()]
train_data_set.extend(sub_list[:train_num])
test_data_set.extend(sub_list[train_num:])
shuffle(train_data_set)
shuffle(test_data_set)
with open(train_file, 'w', encoding='utf-8') as trainf,\
open(test_file, 'w', encoding='utf-8') as testf:
for tds in train_data_set:
trainf.write(tds)
for i in test_data_set:
testf.write(i)
return train_file, test_file
# 转化成csv
td = TransformData()
handler = open('data.txt', encoding='utf-8') # 使用utf-8格式,否则会报错
td.to_csv(handler, 'data.csv')
handler.close()
# 将csv文件切割,会生成两个文件(data_train.txt和data_test.txt)
train_file, test_file = split_train_test('data.csv', auth_data=True) | [
"393940378@qq.com"
] | 393940378@qq.com |
128fd01d414ac7e06e8f5bf213ad56fd619862fe | f7827259ab40a903ac52cd77176be47c35e22125 | /anunidecode/x7d.py | 0eb4b59dba0c24cd57a72c93209cfb18d9377ca9 | [] | no_license | DevTable/anunidecode | 143fec2df17c494aa3adbc0bfad9de5c0c7011eb | d59236a822e578ba3a0e5e5abbd3855873fa7a88 | refs/heads/master | 2021-01-10T19:20:45.172892 | 2014-08-01T19:14:46 | 2014-08-01T19:14:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | page = [
'Ji ', 'Cha ', 'Zhou ', 'Xun ', 'Yue ', 'Hong ', 'Yu ', 'He ', 'Wan ', 'Ren ', 'Wen ', 'Wen ', 'Qiu ', 'Na ', 'Zi ', 'Tou ',
'Niu ', 'Fou ', 'Jie ', 'Shu ', 'Chun ', 'Pi ', 'Yin ', 'Sha ', 'Hong ', 'Zhi ', 'Ji ', 'Fen ', 'Yun ', 'Ren ', 'Dan ', 'Jin ',
'Su ', 'Fang ', 'Suo ', 'Cui ', 'Jiu ', 'Zha ', 'Kinu ', 'Jin ', 'Fu ', 'Zhi ', 'Ci ', 'Zi ', 'Chou ', 'Hong ', 'Zha ', 'Lei ',
'Xi ', 'Fu ', 'Xie ', 'Shen ', 'Bei ', 'Zhu ', 'Qu ', 'Ling ', 'Zhu ', 'Shao ', 'Gan ', 'Yang ', 'Fu ', 'Tuo ', 'Zhen ', 'Dai ',
'Zhuo ', 'Shi ', 'Zhong ', 'Xian ', 'Zu ', 'Jiong ', 'Ban ', 'Ju ', 'Mo ', 'Shu ', 'Zui ', 'Wata ', 'Jing ', 'Ren ', 'Heng ', 'Xie ',
'Jie ', 'Zhu ', 'Chou ', 'Gua ', 'Bai ', 'Jue ', 'Kuang ', 'Hu ', 'Ci ', 'Geng ', 'Geng ', 'Tao ', 'Xie ', 'Ku ', 'Jiao ', 'Quan ',
'Gai ', 'Luo ', 'Xuan ', 'Bing ', 'Xian ', 'Fu ', 'Gei ', 'Tong ', 'Rong ', 'Tiao ', 'Yin ', 'Lei ', 'Xie ', 'Quan ', 'Xu ', 'Lun ',
'Die ', 'Tong ', 'Si ', 'Jiang ', 'Xiang ', 'Hui ', 'Jue ', 'Zhi ', 'Jian ', 'Juan ', 'Chi ', 'Mian ', 'Zhen ', 'Lu ', 'Cheng ', 'Qiu ',
'Shu ', 'Bang ', 'Tong ', 'Xiao ', 'Wan ', 'Qin ', 'Geng ', 'Xiu ', 'Ti ', 'Xiu ', 'Xie ', 'Hong ', 'Xi ', 'Fu ', 'Ting ', 'Sui ',
'Dui ', 'Kun ', 'Fu ', 'Jing ', 'Hu ', 'Zhi ', 'Yan ', 'Jiong ', 'Feng ', 'Ji ', 'Sok ', 'Kase ', 'Zong ', 'Lin ', 'Duo ', 'Li ',
'Lu ', 'Liang ', 'Chou ', 'Quan ', 'Shao ', 'Qi ', 'Qi ', 'Zhun ', 'Qi ', 'Wan ', 'Qian ', 'Xian ', 'Shou ', 'Wei ', 'Qi ', 'Tao ',
'Wan ', 'Gang ', 'Wang ', 'Beng ', 'Zhui ', 'Cai ', 'Guo ', 'Cui ', 'Lun ', 'Liu ', 'Qi ', 'Zhan ', 'Bei ', 'Chuo ', 'Ling ', 'Mian ',
'Qi ', 'Qie ', 'Tan ', 'Zong ', 'Gun ', 'Zou ', 'Yi ', 'Zi ', 'Xing ', 'Liang ', 'Jin ', 'Fei ', 'Rui ', 'Min ', 'Yu ', 'Zong ',
'Fan ', 'Lu ', 'Xu ', 'Yingl ', 'Zhang ', 'Kasuri ', 'Xu ', 'Xiang ', 'Jian ', 'Ke ', 'Xian ', 'Ruan ', 'Mian ', 'Qi ', 'Duan ', 'Zhong ',
'Di ', 'Min ', 'Miao ', 'Yuan ', 'Xie ', 'Bao ', 'Si ', 'Qiu ', 'Bian ', 'Huan ', 'Geng ', 'Cong ', 'Mian ', 'Wei ', 'Fu ', 'Wei ',
'Yu ', 'Gou ', 'Miao ', 'Xie ', 'Lian ', 'Zong ', 'Bian ', 'Yun ', 'Yin ', 'Ti ', 'Gua ', 'Zhi ', 'Yun ', 'Cheng ', 'Chan ', 'Dai ',
]
| [
"jake@devtable.com"
] | jake@devtable.com |
dec1c98e80a8749dc73e49e0d50a6e2cd8e73cc5 | 0f7f1d8737f66da2443b3fcd5a2140ed34163146 | /Tools/broadAudio.py | 1f8468be93c5e9518e75cef686aed0e7b00fea9e | [] | no_license | lxy1492/TradeHelper | 2fd43e29b17e621953bee1a018c4a9642be70fac | 6df99dd25d0ecd8ddbd5aa9dcb0733d8784ed2c3 | refs/heads/main | 2023-01-20T13:57:09.340159 | 2020-11-30T11:54:58 | 2020-11-30T11:54:58 | 317,207,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | import os
import pyttsx3
try:
import win32com.client as win
except:
win=None
def voiceBroad(text):
if win==None:
return -1
if isinstance(text, str):
speak = win.Dispatch("SAPI.SpVoice")
speak.Speak(text)
elif isinstance(text, list):
speak = win.Dispatch("SAPI.SpVoice")
if len(text) > 0:
for each in text:
if each != "":
speak.Speak(each)
return 0
if __name__ == '__main__':
voiceBroad("测试一下能发出声音吗") | [
"415997348@qq.com"
] | 415997348@qq.com |
8799d4cb98eb9ede7976808b49107eafbecb05dc | c848015268e430b10f1bc39a2fd5a6f7a8cda44d | /bin/Compare_RILs_SV/lumpy/bin/vcf2gff.py | f189d20c3f24843a8e5d8b47d801c3effcbfef89 | [] | no_license | wangpanqiao/Transposition | 36b87d2f9032170112fce993f17454b6562bb108 | e102de63df2bcd5f7b41075a447eb937ee753832 | refs/heads/master | 2020-08-27T00:07:00.923134 | 2015-12-01T22:30:45 | 2015-12-01T22:30:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,819 | py | #!/opt/Python/2.7.3/bin/python
import sys
from collections import defaultdict
import numpy as np
import re
import os
import argparse
import glob
from Bio import SeqIO
sys.path.append('/rhome/cjinfeng/BigData/software/ProgramPython/lib')
from utility import gff_parser, createdir
import gzip
def usage():
test="name"
message='''
python vcf2gff.py --input GN22.sv.vcf.gz
'''
print message
def fasta_id(fastafile):
fastaid = defaultdict(str)
for record in SeqIO.parse(fastafile,"fasta"):
fastaid[record.id] = 1
return fastaid
#1 792446 1 N <DEL> . . SVTYPE=DEL;SVLEN=-1021;END=793467;STRANDS=+-:5;IMPRECISE;CIPOS=-10,10;CIEND=-10,6;CIPOS95=-1,1;CIEND95=-1,0;SU=5;PE=2;SR=3 GT:SU:PE:SR:CN ./.:5:2:3:2.07
#Chr1 Pindel1 Deletion 1033184 1033405 . . . Size=222;
def vcf2gff(infile, outfile):
ofile = open(outfile, 'w')
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2 and not line.startswith(r'#'):
unit = re.split(r'\t',line)
data = defaultdict(lambda : str())
annos = re.split(r';', unit[7])
for anno in annos:
print anno
#feature, value = re.split(r'\=', anno)
#data[feature] = value
#if data['SVTYPE'] == 'DEL':
# start = unit[1]
# end = data['END']
# size = abs(data['SVLEN'])
# print >> ofile, 'Chr%s\tSpeedseq\tDeletion\t%s\t%s\t.\t.\t.\tSize=%s;' %(unit[0], start, end, size)
ofile.close()
#1 792446 1 N <DEL> . . SVTYPE=DEL;SVLEN=-1021;END=793467;STRANDS=+-:5;IMPRECISE;CIPOS=-10,10;CIEND=-10,6;CIPOS95=-1,1;CIEND95=-1,0;SU=5;PE=2;SR=3 GT:SU:PE:SR:CN ./.:5:2:3:2.07
#Chr1 Pindel1 Deletion 1033184 1033405 . . . Size=222;
def vcf2gff_gz(infile):
cutoff = 500
filehd = ''
outfile= ''
if os.path.splitext(infile)[1] == '.gz':
outfile = re.sub(r'.vcf.gz', r'.gff', infile)
filehd = gzip.open (infile, 'rb')
else:
outfile = re.sub(r'.vcf', r'.gff', infile)
filehd = open (infile, 'rb')
ofile = open(outfile, 'w')
for line in filehd:
line = line.rstrip()
if len(line) > 2 and not line.startswith(r'#'):
unit = re.split(r'\t',line)
data = defaultdict(lambda : str())
annos = re.split(r';', unit[7])
for anno in annos:
#print anno
try:
feature, value = re.split(r'=', anno)
data[feature] = value
except:
continue
if data['SVTYPE'] == 'DEL':
start = unit[1]
end = data['END']
size = abs(int(data['SVLEN']))
if size >= cutoff:
print >> ofile, 'Chr%s\tSpeedseq\tDeletion\t%s\t%s\t.\t.\t.\tSize=%s;' %(unit[0], start, end, size)
ofile.close()
filehd.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('-o', '--output')
parser.add_argument('-v', dest='verbose', action='store_true')
args = parser.parse_args()
try:
len(args.input) > 0
except:
usage()
sys.exit(2)
vcf2gff_gz(args.input)
#if os.path.splitext(args.input)[1] == '.gz':
# outfile = re.sub(r'.vcf.gz', r'.gff', args.input)
# vcf2gff_gz(args.input, outfile)
#else:
# outfile = re.sub(r'.vcf', r'.gff', args.input)
# vcf2gff(args.input, outfile)
if __name__ == '__main__':
main()
| [
"jinfeng7chen@gmail.com"
] | jinfeng7chen@gmail.com |
ffba8fa530d1a9d8530420700237e7d66a45468b | 98d5a58202ce76741c0257b8a5fe27029536aef3 | /modules/extractNet_resnet.py | 2c67812f7765bbcb8757561233f73d47af1b03ed | [] | permissive | MNRKhan/aps360-project | 885ebc2bea1b1e7b28955e9b5be4a882e8e60813 | 1d91a4262c95cd6b5610aae16e1a30f2749a4373 | refs/heads/master | 2021-07-02T12:49:10.344370 | 2020-09-20T00:41:49 | 2020-09-20T00:41:49 | 170,795,039 | 3 | 2 | MIT | 2019-03-17T17:46:11 | 2019-02-15T03:17:12 | Jupyter Notebook | UTF-8 | Python | false | false | 2,935 | py | # extractNet_resnet.py
# Contains Interconnected Autoencoder model (Encoder ResNet, Decoder Resnet-mirror)
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
encode_out_r = []
def hook_r(module, input, output):
encode_out_r.append(output)
class extractNet_resnet(nn.Module):
def __init__(self, r_size = 152):
super(extractNet_resnet, self).__init__()
if r_size == 152:
resnet = torchvision.models.resnet152(pretrained=True)
elif r_size == 101:
resnet = torchvision.models.resnet101(pretrained=True)
else:
resnet = torchvision.models.resnet50(pretrained=True)
# Maxpool output layers
self.encoder_out_layers = [resnet.conv1,
resnet.maxpool,
resnet.layer1[0].downsample[-1],
resnet.layer2[0].downsample[-1],
resnet.layer3[0].downsample[-1],
resnet.layer4[-1].relu]
self.res = nn.Sequential(*list(resnet.children())[:-2])
# Freeze weights
for param in self.res.parameters():
param.requires_grad = False
# Save intermediate output values
for layer in self.encoder_out_layers:
layer.register_forward_hook(hook_r)
self.deconv1 = nn.ConvTranspose2d(2048, 1024, 3, stride=2, padding=1, output_padding=1)
self.deconv2 = nn.ConvTranspose2d(1024 + 1024, 512, 3, stride=2, padding=1, output_padding=1)
self.deconv3 = nn.ConvTranspose2d(512 + 512, 256, 3, stride=2, padding=1, output_padding=1)
self.deconv4 = nn.ConvTranspose2d(256 + 256, 64, 3, stride=1, padding=1)
self.deconv5 = nn.ConvTranspose2d(64 + 64, 64, 3, stride=2, padding=1, output_padding=1)
self.deconv6 = nn.ConvTranspose2d(64 + 64, 3, 3, stride=2, padding=1, output_padding=1)
self.deconv7 = nn.ConvTranspose2d(3, 1, 3, stride=1, padding=1)
def forward(self, img):
global encode_out_r
encode_out_r = []
out_res = self.res(img)
out = F.relu(self.deconv1(encode_out_r[-1]))
# print(out.shape)
out = torch.cat((out, encode_out_r[-4]), 1)
out = F.relu(self.deconv2(out))
# print(out.shape)
out = torch.cat((out, encode_out_r[-5]), 1)
out = F.relu(self.deconv3(out))
# print(out.shape)
out = torch.cat((out, encode_out_r[-6]), 1)
out = F.relu(self.deconv4(out))
# print(out.shape)
out = torch.cat((out, encode_out_r[-7]), 1)
out = F.relu(self.deconv5(out))
# print(out.shape)
out = torch.cat((out, encode_out_r[-8]), 1)
out = F.relu(self.deconv6(out))
# print(out.shape)
#out = torch.cat((out, img), 1)
out = self.deconv7(out)
return out
| [
"mohsin.hasan@mail.utoronto.ca"
] | mohsin.hasan@mail.utoronto.ca |
4b4f5409ba78d98922a6b99bf6dc945f87eab890 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /iP4ixkQffELyHvHi5_20.py | 05b3289c03b50db6a166632e2631ae1c484a3db9 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | """
Given radius `r` and height `h` (in cm), calculate the **mass** of a cylinder
when it's filled with water and the cylinder itself doesn't weigh anything.
The desired output should be given in kg and rounded to two decimal places.
How to solve:
* Calculate the volume of the cylinder.
* Convert cm³ into dm³.
* 1dm³ = 1L, 1L is 1Kg.
### Examples
weight(4, 10) ➞ 0.5
weight(30, 60) ➞ 169.65
weight(15, 10) ➞ 7.07
### Notes
* I recommend importing `math`.
* If you get stuck on a challenge, find help in **Resources**.
"""
from math import *
def weight(r, h):
return round((pi*pow(r,2)*h)/1000,2)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
997d5dd9d73da798d2cd7ce51c64790f47654917 | 5a522d6950c081611316d7b3c5d7bc6a62730873 | /venv/Scripts/easy_install-3.6-script.py | da09ab1a06b19c2b0326d2f01732f0f5e5cdaee9 | [] | no_license | Tree-lcf/Api_Test_framework | 219cec640b2a7b2651aa7dbd1cb94fa895640648 | a3eea20048c4d146002174e5ade7a9894841eebc | refs/heads/master | 2020-03-18T12:25:23.337664 | 2018-05-24T01:59:26 | 2018-05-24T01:59:41 | 134,644,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #!F:\python_work\Api\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"lincf97@163.com"
] | lincf97@163.com |
f391943f2a15c651c6a501877029913d0b83fdf6 | 941f7fe303318c1b71e94181b4e2e266efb043db | /run.py | 1b0c7744db5e9b0d65a7bb79a7165a2f0380a9cc | [] | no_license | jshiv/cronicle-sample | 86c13a5e7a81a79467d5c34a3e2aecc445595da8 | f99ad6af7dec32830d21dc555906123474509cf8 | refs/heads/master | 2020-07-25T02:21:20.386878 | 2019-09-21T05:12:20 | 2019-09-21T05:12:20 | 208,131,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | import random
if __name__=="__main__":
x = random.random()
print("X: {}".format(x))
| [
"jason.shiverick@udemy.com"
] | jason.shiverick@udemy.com |
bcfa68567d4997a04693e4b0c4ec9db821fb7a24 | f452078b7a73161b7676922867df62d9a89f6fd5 | /config/jinja2.py | d9a9afefa6aa8698299abcebc52db498fe84a23f | [] | no_license | arthexis/django-template | 07a11b4c6fe6a9029153c95fdeaaec715315c1bc | be9fa72f804f7fb1aa0992527f74626fec664e52 | refs/heads/master | 2021-01-19T10:42:52.932445 | 2016-07-09T01:07:51 | 2016-07-09T01:07:51 | 62,763,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.urlresolvers import reverse
from jinja2 import Environment
from django.conf import settings
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url': reverse,
'settings': settings,
})
return env
| [
"arthexis@gmail.com"
] | arthexis@gmail.com |
c727646241ae9d0f8a55917eecd853bf202125fb | 1e43fd5e134157e6f034327ffbf3e6501c67275d | /mlps/core/apeflow/interface/utils/__init__.py | b86fbf8d8db90190972c4e419cdd67cbb59838e6 | [
"Apache-2.0"
] | permissive | sone777/automl-mlps | f15780e23142e0f3f368815678959c7954966e71 | a568b272333bc22dc979ac3affc9762ac324efd8 | refs/heads/main | 2023-08-24T10:07:30.834883 | 2021-11-03T07:41:15 | 2021-11-03T07:41:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | from . import gs, pytorch, tf
| [
"wonjoon.lee@seculayer.com"
] | wonjoon.lee@seculayer.com |
3bc90fdc10f6e6da998132d2552b5caf83b7a148 | 5ae6d9dff1536e0f45d02a35024d2406de1934bd | /job_board/apis/authentication.py | 4b5eab0ce42bf4e5d2f2b0ff4ae3a2da525e5df9 | [] | no_license | Mediusware-Ltd/dajngo-job-board | 879a397a05d0ab1f31da81e3363c4d9f9bf4f6ec | ea130e2ef81ab332067fc5dbfb20752b627c2f0d | refs/heads/master | 2023-06-25T01:15:21.461024 | 2021-07-29T16:34:18 | 2021-07-29T16:34:18 | 390,732,834 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py | from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import CreateModelMixin
from rest_framework.response import Response
from rest_framework.views import APIView
from job_board.auth.CandidateAuth import CandidateAuth, CredentialsSerializer
from job_board.models import Candidate
from job_board.serializers.candidate_serializer import CandidateSerializer, CandidateUpdateSerializer
from job_board.serializers.password_reset import SendOTPSerializer, ResetPasswordSerializer
class Registration(CreateModelMixin, GenericAPIView):
"""
Candidate registration requires a form data with
"""
queryset = Candidate.objects.all()
serializer_class = CandidateSerializer
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class Login(GenericAPIView, CreateModelMixin):
"""
Candidate Login
candidate only can able to login with email & password
send a post request with a valid json format { "email" : "<your@email>", "password": "<your password>" }
"""
serializer_class = CredentialsSerializer
def post(self, request, format=None):
auth = CandidateAuth()
return auth.auth_token(request)
class User(APIView):
"""
Candidate information
TODO : update profile update will be in post method
"""
authentication_classes = [CandidateAuth]
def get(self, request, format=None):
serialize = CandidateSerializer(request.user, context={"request": request})
return Response(serialize.data)
def post(self, request, format=None):
serialize = CandidateUpdateSerializer(data=request.data)
if serialize.is_valid():
serialize.update(instance=request.user, validated_data=serialize.validated_data)
return Response(serialize.data)
return Response(serialize.errors, status=status.HTTP_400_BAD_REQUEST)
class SendOTP(GenericAPIView, CreateModelMixin):
serializer_class = SendOTPSerializer
queryset = Candidate.objects.all()
def post(self, request, *args, **kwargs):
self.create(request, *args, **kwargs)
return Response({'message': 'OTP has been sent'}, status=status.HTTP_200_OK)
class ResetPasswordView(GenericAPIView, CreateModelMixin):
serializer_class = ResetPasswordSerializer
def post(self, request, *args, **kwargs):
self.create(request, *args, **kwargs)
return Response({'message': 'Candidate password has been updated successfully'})
class ChangeCandidatePassword(GenericAPIView):
authentication_classes = [CandidateAuth]
| [
"kmrifat@gmail.com"
] | kmrifat@gmail.com |
75b8da8e95f8839860db8e1e3e2331f914bb49fe | 9e567b8241ce00e9d53843f5aba11c4a119b079f | /tags/v0_64/htdocs/examples/color_demo.py | 0acc32eb62a054c76bf5460b61b170340a7868bb | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | neilpanchal/matplotlib | 3d2a7133e858c4eefbb6c2939eb3f7a328b18118 | 7565d1f2943e0e7b4a3f11ce692dfb9b548d0b83 | refs/heads/master | 2020-06-11T09:20:43.941323 | 2011-01-21T21:50:16 | 2011-01-21T21:50:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | #!/usr/bin/env python
"""
matplotlib gives you 3 ways to specify colors,
1) as a single letter string, ala matab
2) as an html style hex string
3) as an R,G,B tuple, where R,G,B, range from 0-1
See help(colors) for more info.
"""
from matplotlib.matlab import *
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
#subplot(111, axisbg='#ababab')
t = arange(0.0, 2.0, 0.01)
s = sin(2*pi*t)
plot(t, s, 'y')
xlabel('time (s)', color='r')
ylabel('voltage (mV)', color='k')
title('About as silly as it gets, folks', color='#afeeee')
show()
| [
"(no author)@f61c4167-ca0d-0410-bb4a-bb21726e55ed"
] | (no author)@f61c4167-ca0d-0410-bb4a-bb21726e55ed |
bda4691d48a3c76ade290758ddb17e4cdf7774a4 | b55988591d370a26ac62dcc99f7033364b9ec035 | /easy/116-morse-code/morse_code.py | 635155c730ede46af91d26a5b162481542e8ed98 | [
"MIT"
] | permissive | cyr1z/codeeval-problem-statements | 979a1b5d6547ca4f0ed25e618b545858eda7a36e | dff8e46c353c8fdd14cbd0243d878e863d76ec41 | refs/heads/master | 2022-11-24T11:50:34.144785 | 2020-08-03T20:20:24 | 2020-08-03T20:20:24 | 282,584,302 | 1 | 0 | null | 2020-07-26T05:41:36 | 2020-07-26T05:41:36 | null | UTF-8 | Python | false | false | 2,266 | py | import sys
morse_code = {
"0": "-----",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
".": ".-.-.-",
",": "--..--",
"?": "..--..",
"!": "-.-.--",
"-": "-....-",
"/": "-..-.",
"@": ".--.-.",
"(": "-.--.",
")": "-.--.-"
}
morse_code_revert = dict((val, key.upper()) for key, val in morse_code.items())
def morse_word(x):
"""
convert an alphabet word to space separated word in morse code
:param x: str word
:return: str space separated word in morse code
"""
d = [x.lower() for x in list(x)]
return ' '.join(morse_code[i] for i in d)
def morse_string(x):
"""
convert an alphabet string to double space separated word in morse code
:param x: str string
:return: str double space separated string in morse code
"""
words_list = [x for x in x.strip().split()]
words = list(map(morse_word, words_list))
return ' '.join(words).strip()
def de_morse_word(x):
"""
convert a space separated word in morse code to alphabet word
:param x: str each letter separated by space char
:return: str word
"""
return ''.join(morse_code_revert[i] for i in x.split())
def de_morse_string(x):
"""
convert a double space separated string in morse code to alphabet string
:param x: str each word separated by 2 space chars.
:return: str text string
"""
words_list = [x for x in x.strip().split(' ')]
words = list(map(de_morse_word, words_list))
return ' '.join(words).strip()
if __name__ == '__main__':
filename = "input.txt"
if len(sys.argv) == 2:
filename = sys.argv[1]
with open(filename, "r") as read_file:
for line in read_file:
print(de_morse_string(line))
# print(morse_string(de_morse_string(line)) == line.strip())
| [
"cyr@zolotarev.pp.ua"
] | cyr@zolotarev.pp.ua |
a308a6b52879f1727b9b927f7284ace2d5ff57a2 | a95a9e8482317c89bd0d5ed4b2dac87dcb1c99d7 | /SyndromeMeninge.py | 881298919fe57cd89ea9880e2dd59ab613da3a01 | [] | no_license | edlingerpm/DetectionMvtFinal | 277cbbcd0e616a9e4947b48e4b91d85cd12b7d2b | 566ab0a54f7894271e787d67969430b5c9c6132d | refs/heads/master | 2021-03-11T16:32:00.858087 | 2020-03-25T10:43:45 | 2020-03-25T10:43:45 | 246,542,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,822 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 28 2020
@author: Pierre-Marie EDLINGER
"""
import cv2
import RutaipCommonFunctions as Rtp
from datetime import datetime
testMenton=False
testProfilGauche=False
testProfilDroit=False
visageFaceDecele=False
profileDecele=False
retourFace=False
testMentonCommence = False
testProfGaucheCommence = False
testProfDroitCommence = False
#création d'un répertoire + nommage du fichier final
Rtp.creationRepertoireImage()
cheminImage = './Images/images_TestYeux.jpg'
#Ouverture de la camera
cap = Rtp.choixCamera()
#cap = cv2.VideoCapture(0)
# initialize the recognizers
face_cascade=cv2.CascadeClassifier("./Haarcascade/haarcascade_frontalface_alt2.xml")
profile_cascade=cv2.CascadeClassifier("./Haarcascade/haarcascade_profileface.xml")
now = datetime.now()
"""
trouver le visage de face
lui demander de baisser le menton --> le visage ne doit plus être décelé
merci, regardez en face de vous
lui demander de tourner la tête à droite --> le profil gauche doit être décelé
merci, regardez en face de vous
lui demander de tourner la tête à gauche --> un profil droit doit être décelé
merci, cet exercice a-t-il été douloureux pour vous?
Proposer fenêtre de réponse
donner résultat
"""
while True:
later = datetime.now()
difference = (later - now).total_seconds()
# read the image from the cam
ret, frame = cap.read()
# converting to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect all the faces in the image
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# detect all the profiles in the image
profiles = profile_cascade.detectMultiScale(gray, 1.2, 5)
#Rtp.joueSon("./Sons/OpenEyes.mp3")
# for every face, draw a blue rectangle
# =============================================================================
# for x, y, width, height in faces:
# cv2.rectangle(frame, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
# =============================================================================
if len(faces)>0:
visageFaceDecele=True
else:
visageFaceDecele=False
# for every profile, draw a green rectangle
# =============================================================================
# for x1, y1, width1, height1 in profiles:
# cv2.rectangle(frame, (x1, y1), (x1 + width1, y1 + height1), color=(0, 255, 0), thickness=2)
# =============================================================================
if len(profiles)>0:
profileDecele=True
else:
profileDecele=False
# """
# trouver le visage de face
# lui demander de baisser le menton --> le visage ne doit plus être décelé
# merci, regardez en face de vous
# lui demander de tourner la tête à droite --> le profil gauche doit être décelé
# merci, regardez en face de vous
# lui demander de tourner la tête à gauche --> un profil droit doit être décelé
# merci, cet exercice a-t-il été douloureux pour vous?
# Proposer fenêtre de réponse
# donner résultat
# """
if (testMenton==False):
if testMentonCommence==False:
print("test menton")
Rtp.joueSon("./Sons/BaisserMenton.mp3")
testMentonCommence=True
if visageFaceDecele==False:
print("test menton fait")
Rtp.joueSon("./Sons/LookForward.mp3")
testMenton=True
now = datetime.now()
else : # le test du menton a été fait, on passe aux tests suivants
if (testProfilGauche==False):
later = datetime.now()
difference = (later - now).total_seconds()
if difference >=4 :
if testProfGaucheCommence == False:
print("test profile gauche")
Rtp.joueSon("./Sons/TeteADroite.mp3")
testProfGaucheCommence=True
if (profileDecele==True)&(visageFaceDecele==False):
print("test profil gauche fait")
testProfilGauche=True
retourFace=False
now = datetime.now()
Rtp.joueSon("./Sons/LookForward.mp3")
else : # le test du profil gauche a été fait, on passe au profil droit
if (testProfilDroit==False):
later = datetime.now()
difference = (later - now).total_seconds()
if difference >=4 :
if testProfDroitCommence == False:
print("test profile droit")
Rtp.joueSon("./Sons/TeteAGauche.mp3")
testProfDroitCommence=True
if (profileDecele==True)&(visageFaceDecele==False)&(retourFace==True):
print("test profil droit fait")
testProfilDroit=True
if (testMenton)&(testProfilGauche)&(visageFaceDecele):
retourFace = True
if (testMenton)&(testProfilGauche)&(testProfilDroit):
#print("Demander si douleur")
Rtp.joueSon("./Sons/Douloureux.mp3")
#Rtp.dormir(1)
if Rtp.poseQuestion("Questionnaire", "Ce test a-t-il été douloureux pour vous?"):
print("Douloureux")
else:
print("Pas douloureux")
break
cv2.imshow("image", frame)
#On quitte lorsque la touche "q" est pressée
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | edlingerpm.noreply@github.com |
e527bf93849739260c31b26f65301c4e9d1dcf95 | a67f7ae56ff7fcbd1fe665c77e7efc1ba54b7675 | /MazeGame/qlearning.py | 2c6b03bfd215a40e2f6b7f102d5c8844f9e6093e | [] | no_license | thdiaman/deeplearning | a013d16c59932ccfe62d87012613093d3a46e9d3 | 9ddc46b6ad257ad0885d1d5dc8eb64589d618f2b | refs/heads/master | 2021-07-02T02:08:52.778443 | 2019-05-31T23:42:14 | 2019-05-31T23:42:14 | 133,405,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,806 | py | import random
import numpy as np
from qmaze import Qmaze, show, completion_check
class Experience(object):
def __init__(self, model, max_memory=100, discount=0.95):
self.model = model
self.max_memory = max_memory
self.discount = discount
self.memory = list()
self.num_actions = model.output_shape[-1]
def remember(self, episode):
# episode = [envstate, action, reward, envstate_next, game_over]
# memory[i] = episode
# envstate == flattened 1d maze cells info, including rat cell (see method: observe)
self.memory.append(episode)
if len(self.memory) > self.max_memory:
del self.memory[0]
def predict(self, envstate):
return self.model.predict(envstate)[0]
def get_data(self, data_size=10):
env_size = self.memory[0][0].shape[1] # envstate 1d size (1st element of episode)
mem_size = len(self.memory)
data_size = min(mem_size, data_size)
inputs = np.zeros((data_size, env_size))
targets = np.zeros((data_size, self.num_actions))
for i, j in enumerate(np.random.choice(range(mem_size), data_size, replace=False)):
envstate, action, reward, envstate_next, game_over = self.memory[j]
inputs[i] = envstate
# There should be no target values for actions not taken.
targets[i] = self.predict(envstate)
# Q_sa = derived policy = max quality env/action = max_a' Q(s', a')
Q_sa = np.max(self.predict(envstate_next))
if game_over:
targets[i, action] = reward
else:
# reward + gamma * max_a' Q(s', a')
targets[i, action] = reward + self.discount * Q_sa
return inputs, targets
# Exploration factor
epsilon = 0.1
def qtrain(model, maze, **opt):
global epsilon
n_epoch = opt.get('n_epoch', 15000)
max_memory = opt.get('max_memory', 1000)
data_size = opt.get('data_size', 50)
weights_file = opt.get('weights_file', "")
name = opt.get('name', 'model')
visualize = opt.get('visualize', False)
# If you want to continue training from a previous model,
# just supply the h5 file name to weights_file option
if weights_file:
print("loading weights from file: %s" % (weights_file,))
model.load_weights(weights_file)
# Construct environment/game from numpy array: maze (see above)
qmaze = Qmaze(maze)
# Initialize experience replay object
experience = Experience(model, max_memory=max_memory)
win_history = [] # history of win/lose game
hsize = qmaze.maze.size//2 # history window size
win_rate = 0.0
for epoch in range(n_epoch):
rat_cell = random.choice(qmaze.free_cells) # or (0, 0)
qmaze.reset(rat_cell)
game_over = False
# get initial envstate (1d flattened canvas)
envstate = qmaze.observe()
n_episodes = 0
while not game_over:
valid_actions = qmaze.valid_actions()
if not valid_actions: break
prev_envstate = envstate
# Get next action
if np.random.rand() < epsilon:
action = random.choice(valid_actions)
else:
action = np.argmax(experience.predict(prev_envstate))
# Apply action, get reward and new envstate
envstate, reward, game_status = qmaze.act(action)
if visualize:
show(qmaze)
if game_status == 'win':
win_history.append(1)
game_over = True
elif game_status == 'lose':
win_history.append(0)
game_over = True
else:
game_over = False
# Store episode (experience)
episode = [prev_envstate, action, reward, envstate, game_over]
experience.remember(episode)
n_episodes += 1
# Train neural network model
inputs, targets = experience.get_data(data_size=data_size)
model.fit(inputs, targets, epochs=8, batch_size=16, verbose=0)
loss = model.evaluate(inputs, targets, verbose=0)
# Print stats
print("Epoch %d/%d | Loss: %.2f | Episodes: %d | Win count: %d" %(epoch+1, n_epoch, loss, n_episodes, sum(win_history)))
if len(win_history) > hsize:
win_rate = sum(win_history[-hsize:]) / hsize
# we simply check if training has exhausted all free cells and if in all
# cases the agent won
if win_rate > 0.9 : epsilon = 0.05
if sum(win_history[-hsize:]) == hsize and completion_check(model, qmaze):
print("Reached 100%% win rate at epoch: %d" % (epoch,))
break
return model
| [
"themisdiamantopoulos@hotmail.com"
] | themisdiamantopoulos@hotmail.com |
9b00c0d2275d1ad3ca8db698d431c061f2b019ba | bbd863498dc6104867a9148f7a0be281f7127a41 | /src/perfect_bot.py | 4dfb67d23046fe0c6d36863bb704d8d880035c2e | [
"MIT"
] | permissive | tongplw/Pretty-Perfect-Connect4 | ee37bebde384997fa4e0b9d8f16a2e86f87333b5 | 24d3d9c989ba2f3d77a824b00e0b07bf3776bad5 | refs/heads/main | 2023-01-12T17:22:45.500694 | 2020-11-11T15:12:15 | 2020-11-11T15:12:15 | 312,004,785 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | import time
import random
import requests
import numpy as np
import pandas as pd
URL = 'http://connect4.ist.tugraz.at:8080/moveinfo'
df = pd.read_csv('data/perfect_cache.csv')
d = df.set_index('k').to_dict()['v']
def get_perfect_move(game, cache=False):
board = game.board
if cache:
k = ''.join(map(str, board.ravel().tolist()))
if k in d:
return d[k]
k2 = ''.join(map(str, board[:,::-1].ravel().tolist()))
if k2 in d:
return 6 - d[k2]
board = np.array(board[::-1], dtype=object)
board[board == 0] = "e"
board[board == 1] = "a"
board[board == 2] = "b"
board = str(board.tolist()).replace("'", '"')
data = {
'board': board,
'player': 'a' if game.turn == 1 else 'b',
'timestamp': int(time.time() * 1000),
'uuid': '698accdb-974a-70c6-3356-d091b66476a5',
}
r = requests.post(URL, data=data)
move_info = r.json()['moveInfos']
best_val = 999
best_col = -1
# check for best winning move (lower score)
for index, value in enumerate(move_info):
if value != 0 and value != 200 and (value % 2 == 0 or value < 0):
if (value < best_val):
best_val = value
best_col = index
# check for draw move, if no winning move was found
if best_col == -1:
best_val = -999
for index, value in enumerate(move_info):
if value == 200:
best_val = value
best_col = index
# check for best losing move, if no col has been selected yet
if best_col == -1:
best_val = -999
for index, value in enumerate(move_info):
if value != 0 and value > best_val:
best_val = value
best_col = index
res_cols = []
for index, value in enumerate(move_info):
if value == best_val:
res_cols += [index]
# Choose a random best column
if len(res_cols) > 0:
best_col = random.choice(res_cols)
if cache:
d[k] = best_col
pd.DataFrame([[k, best_col]]).to_csv('data/perfect_cache.csv', mode='a', header=False, index=False)
return best_col | [
"bangkok.tong@gmail.com"
] | bangkok.tong@gmail.com |
201c525e8d07c192a2c57f48953459ae11439cea | 1e1a7d770eec7c384ebda97f1d366c3161a6180b | /backend/last_man_standing_24889/urls.py | 0f3905ee6f3bc9f91a99cb05ac1c145a2ee1cc19 | [] | no_license | crowdbotics-apps/last-man-standing-24889 | 71156b8fc0410aac4fa2297d695895e80385d67c | 2d86807701b0f8fc2cf6327e23ba17e7ede88879 | refs/heads/master | 2023-03-24T20:29:10.536058 | 2021-03-06T23:48:34 | 2021-03-06T23:48:34 | 345,219,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,393 | py | """last_man_standing_24889 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("course.api.v1.urls")),
path("course/", include("course.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "Last man standing"
admin.site.site_title = "Last man standing Admin Portal"
admin.site.index_title = "Last man standing Admin"
# swagger
api_info = openapi.Info(
title="Last man standing API",
default_version="v1",
description="API documentation for Last man standing App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name="index.html"))]
urlpatterns += [
re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name="index.html"))
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
0affb5edcfe1c576e69b4027f3da3a0ac9436a78 | a6f149a095086ba7a75831eab0c770c7469a321e | /demo/fastlane/send-qq.py | 5d7fff4908e04753cf48c30ffc112f1059f4d5e1 | [
"MIT"
] | permissive | zmn-dc/travis-ci | f4b185e0443906d0f321292b12d2547a0170b154 | 9cd6abe973339b097883da80aa7184c3c9a8721f | refs/heads/main | 2023-06-14T07:11:23.612019 | 2021-07-09T01:55:27 | 2021-07-09T01:55:27 | 384,062,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | #!/usr/bin/env python3
# coding=utf-8
# sendEmail title content
import sys
import smtplib
from email.mime.text import MIMEText
from email.header import Header
#配置发送的邮箱
sender = '965524749@qq.com;'
#配置接收的邮箱
receiver = 'daichao@xiaoma.cn;'
#SMTP邮件服务器 以QQ邮箱配置的
smtpserver = 'smtp.qq.com'
#smtpserver = 'smtp.exmail.qq.com'
#配置SMTP开启服务的账号和授权密码密码
username = '965524749@qq.com'
password = 'kctfxwvaaxllbdja'
#这是配置发送邮件的python代码
def send_mail(title, content):
#title代表标题 content代表邮件内容
try:
msg = MIMEText(content,'plain','utf-8')
if not isinstance(title,unicode):
title = unicode(title, 'utf-8')
msg['Subject'] = title
msg['From'] = sender
msg['To'] = receiver
msg["Accept-Language"]="zh-CN"
msg["Accept-Charset"]="ISO-8859-1,utf-8"
smtp = smtplib.SMTP_SSL(smtpserver,465)
smtp.login(username, password)
smtp.sendmail(sender, receiver, msg.as_string())
smtp.quit()
return True
except Exception, e:
print str(e)
return False
if send_mail(sys.argv[1], sys.argv[2]):
print "done!"
else:
print "failed!"
| [
"daichao@xiaoma.cn"
] | daichao@xiaoma.cn |
e8b4799621687dda853e01de4ed16064e980d8b6 | 4607cff205c99c7e45850a03e4f0b737dd8c285c | /env/bin/django-admin.py | 6679e447f87d282011865091c9d9e6bc250e158d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | k-koech/gallery_django | e81aefe21a09559886abc383c5f9dbd2e6b66af8 | 20250f354d8846bf4ac7312c29f6a7ded54d9ea7 | refs/heads/master | 2023-08-05T09:33:45.566345 | 2021-09-15T21:40:23 | 2021-09-15T21:40:23 | 402,822,297 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | #!/home/moringa/Desktop/DJANGO/ip1/env/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"kelvin.koech@student.moringaschool.com"
] | kelvin.koech@student.moringaschool.com |
2b87163c16099d48e7c7ea96de41b9ee650ad3d5 | 7b5828edda7751700ca7002b40a214e39e5f48a8 | /EA/simulation/carry/carry_sim_posture.py | ad984a857162d8fc98d41b913d4ffe33fe12bc11 | [] | no_license | daniela-venuta/Sims-4-Python-Script-Workspace | 54c33dac02f84daed66f46b7307f222fede0fa62 | f408b28fb34626b2e3b2953152343d591a328d66 | refs/heads/main | 2023-03-29T18:08:39.202803 | 2021-03-30T19:00:42 | 2021-03-30T19:00:42 | 353,111,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,730 | py | from animation.animation_utils import flush_all_animations
from animation.arb import Arb
from animation.arb_element import distribute_arb_element
from animation.posture_manifest import Hand
from carry.carry_postures import CarryingObject
from carry.carry_utils import SCRIPT_EVENT_ID_STOP_CARRY, SCRIPT_EVENT_ID_START_CARRY
from element_utils import build_critical_section, build_critical_section_with_finally
from interactions.aop import AffordanceObjectPair
from interactions.context import InteractionContext
from interactions.priority import Priority
from postures.posture import Posture, TRANSITION_POSTURE_PARAM_NAME
from postures.posture_animation_data import AnimationDataByActorAndTargetSpecies
from postures.posture_specs import PostureSpecVariable, PostureAspectBody, PostureAspectSurface
from postures.posture_state import PostureState
from sims4.tuning.tunable import Tunable
from sims4.tuning.tunable_base import GroupNames
import element_utils
import sims4.log
logger = sims4.log.Logger('Carry', default_owner='epanero')
class CarryingSim(CarryingObject):
INSTANCE_TUNABLES = {'_animation_data': AnimationDataByActorAndTargetSpecies.TunableFactory(animation_data_options={'locked_args': {'_idle_animation': None}, 'is_two_handed_carry': Tunable(description='\n If checked, then this is a two-handed carry, and Sims will\n not be able to simultaneously run interactions requiring\n either hand while in this posture.\n ', tunable_type=bool, default=False)}, tuning_group=GroupNames.ANIMATION), 'carried_linked_posture_type': Posture.TunableReference(description='\n The posture to be linked to this carry. This is the body posture\n that is set on the carried Sim. The source interaction for this\n posture is whichever posture providing interaction can be found on\n the Sim that is doing the carrying.\n ', tuning_group=GroupNames.POSTURE)}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._carried_linked_posture = None
self._carried_linked_previous_posture_state = self.target.posture_state
self._carried_linked_posture_spec = self._carried_linked_previous_posture_state.spec.clone(body=PostureAspectBody((self.carried_linked_posture_type, self.sim)), surface=PostureAspectSurface((None, None, None)))
self._carried_linked_posture_exit_transition = None
@property
def is_two_handed_carry(self):
animation_data = self.get_animation_data()
return animation_data.is_two_handed_carry
def set_carried_linked_posture_exit_transition(self, transition, next_body_posture):
next_body_posture.previous_posture = self._carried_linked_posture
self._carried_linked_posture_exit_transition = transition
def _get_carried_linked_source_interaction(self):
for super_affordance in self.sim.super_affordances():
if super_affordance.provided_posture_type is self.carried_linked_posture_type and super_affordance._provided_posture_type_species == self.target.species:
break
raise RuntimeError('{} does not provide an appropriate affordance to {}'.format(self.sim, self))
context = InteractionContext(self.target, InteractionContext.SOURCE_SCRIPT, Priority.Low)
aop = AffordanceObjectPair(super_affordance, self.sim, super_affordance, None, force_inertial=True)
result = aop.interaction_factory(context)
if not result:
raise RuntimeError("Unable to execute 'Be Carried' posture providing AOP: {} ({})".format(aop, result.reason))
return result.interaction
def set_target_linked_posture_data(self):
posture_state = PostureState(self.target, self._carried_linked_previous_posture_state, self._carried_linked_posture_spec, {PostureSpecVariable.HAND: (Hand.LEFT,)})
self._carried_linked_posture = posture_state.body
self._carried_linked_posture.previous_posture = self._carried_linked_previous_posture_state.body
self._carried_linked_posture.rebind(self.sim, animation_context=self.animation_context)
self._carried_linked_posture.source_interaction = self._get_carried_linked_source_interaction()
return posture_state
def _start_carried_linked_posture_gen(self, timeline):
posture_state = self.set_target_linked_posture_data()
self.target.posture_state = posture_state
def kickstart_linked_carried_posture_gen(self, timeline):
yield from element_utils.run_child(timeline, (self.target.posture.get_idle_behavior(), flush_all_animations))
begin_element = self._carried_linked_posture.get_begin(Arb(), self.target.posture_state, self.target.routing_surface)
yield from element_utils.run_child(timeline, begin_element)
yield from self._carried_linked_posture.kickstart_source_interaction_gen(timeline)
yield from element_utils.run_child(timeline, self._carried_linked_previous_posture_state.body.end())
def _setup_asm_target_for_transition(self, *args, **kwargs):
result = super()._setup_asm_target_for_transition(*args, **kwargs)
if self._carried_linked_posture_exit_transition is None:
transition_posture = self._carried_linked_previous_posture_state.body
else:
previous_posture = self._carried_linked_previous_posture_state.body
(previous_target, previous_target_name) = previous_posture.get_target_and_target_name()
if previous_target is not None and previous_target_name is not None:
self.asm.remove_virtual_actor(previous_target_name, previous_target, previous_posture.get_part_suffix())
transition_posture = self._carried_linked_posture_exit_transition.dest_state.body
(transition_target, transition_target_name) = transition_posture.get_target_and_target_name()
if transition_target is not None and transition_target_name is not None:
self.asm.add_potentially_virtual_actor(self.get_target_name(), self.target, transition_target_name, transition_target)
self.asm.set_actor_parameter(self.get_target_name(), self.target, TRANSITION_POSTURE_PARAM_NAME, transition_posture.name)
return result
def add_transition_extras(self, sequence, **kwargs):
sequence = super().add_transition_extras(sequence, **kwargs)
sequence = build_critical_section(self._start_carried_linked_posture_gen, sequence, self.kickstart_linked_carried_posture_gen)
return sequence
def append_transition_to_arb(self, arb, *args, in_xevt_handler=False, **kwargs):
def _on_linked_posture_transition(*_, **__):
self._carried_linked_posture.append_transition_to_arb(arb, *args, in_xevt_handler=in_xevt_handler, **kwargs)
if in_xevt_handler:
self._carried_linked_posture.append_idle_to_arb(arb)
if in_xevt_handler:
_on_linked_posture_transition()
else:
arb.register_event_handler(_on_linked_posture_transition, handler_id=SCRIPT_EVENT_ID_START_CARRY)
return super().append_transition_to_arb(arb, *args, in_xevt_handler=in_xevt_handler, **kwargs)
def append_idle_to_arb(self, arb):
self._carried_linked_posture.append_idle_to_arb(arb)
return super().append_idle_to_arb(arb)
def append_exit_to_arb(self, arb, *args, exit_while_holding=False, **kwargs):
if self._carried_linked_posture_exit_transition is not None:
destination_posture = self._carried_linked_posture_exit_transition.dest_state.body
else:
destination_posture = None
def _on_linked_posture_exit(*_, **__):
linked_arb = Arb()
self._carried_linked_posture.append_exit_to_arb(linked_arb, *args, **kwargs)
if destination_posture is not None:
destination_posture.append_transition_to_arb(linked_arb, self._carried_linked_posture)
destination_posture.append_idle_to_arb(linked_arb)
distribute_arb_element(linked_arb, master=self.target)
arb.register_event_handler(_on_linked_posture_exit, handler_id=SCRIPT_EVENT_ID_STOP_CARRY)
return super().append_exit_to_arb(arb, *args, exit_while_holding=exit_while_holding, **kwargs)
def _on_reset(self):
super()._on_reset()
if self.target is not None:
routing_surface = self.target.routing_surface
self.target.move_to(parent=None, translation=self.sim.position, routing_surface=routing_surface)
| [
"44103490+daniela-venuta@users.noreply.github.com"
] | 44103490+daniela-venuta@users.noreply.github.com |
93bc94442f0b56d3348d91e0d767235eff9f2d13 | f62fd455e593a7ad203a5c268e23129473d968b6 | /tacker-0.7.0/tacker/common/config.py | ecb277d892bf8f96651169cd778b6c9f9a76f9a5 | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 4,829 | py | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Tacker
"""
import os
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_log import log as logging
import oslo_messaging
from paste import deploy
from tacker.common import utils
from tacker import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9890,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.ListOpt('service_plugins', default=['nfvo', 'vnfm'],
help=_("The service plugins Tacker will use")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Tacker is running on")),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/tacker',
help=_("Where to store Tacker state files. "
"This directory must be writable by the agent.")),
]
logging.register_options(cfg.CONF)
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
def config_opts():
return [(None, core_opts), (None, core_cli_opts)]
# Ensure that the control exchange is set correctly
oslo_messaging.set_transport_defaults(control_exchange='tacker')
def set_db_defaults():
# Update the default QueuePool parameters. These can be tweaked by the
# conf variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(
cfg.CONF,
connection='sqlite://',
max_pool_size=10,
max_overflow=20, pool_timeout=10)
set_db_defaults()
def init(args, **kwargs):
cfg.CONF(args=args, project='tacker',
version='%%prog %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from tacker.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
def setup_logging(conf):
"""Sets up the logging options for a log with supplied name.
:param conf: a cfg.ConfOpts object
"""
product_name = "tacker"
logging.setup(conf, product_name)
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| [
"gongwayne@hotmail.com"
] | gongwayne@hotmail.com |
f42db952ff91b058705876e37bd662b1f8ac5259 | d4ea7073d36c7c96b30c5e3e60af8554243ec9c2 | /S01/preprocessfile.py | 7752460f732fc5b6cf55ac2e76838be8d6f0b5c5 | [
"MIT"
] | permissive | vishwesh5/FRIENDS-RNN | 6d6679258b156e17369d1a2855fdd2a943cb1596 | 136210d426a8a533f1531871c1a0d41533ab3daa | refs/heads/master | 2020-03-31T03:42:04.170827 | 2018-10-08T17:15:45 | 2018-10-08T17:15:45 | 151,874,494 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | f = open('0101.txt')
lines = f.readlines()
file_ = open('0101_new.txt', 'w')
for line in lines:
if(len(line) != 1):
print(len(line))
file_.write(line.strip("\n"))
file_.close()
| [
"ubuntu@ip-172-31-40-180.us-east-2.compute.internal"
] | ubuntu@ip-172-31-40-180.us-east-2.compute.internal |
ffbe12694526ef1d024fb44c3213fd6d5b2276ca | 1ca9cd89f14510a0084d572df3de3063cbac1847 | /bin/concept_lattice_manager.py | 48fef4307e4223ef90896d592a0b44778ade1f96 | [
"MIT"
] | permissive | zeal4u/FCA_Faceted_Search | 379d4d5b4a840361bdc15ddd8532267e627e027c | fb43fb4c35a906fd0484e8bb422f54656772ab41 | refs/heads/master | 2021-01-24T06:57:04.902769 | 2017-06-30T13:46:37 | 2017-06-30T13:46:37 | 93,328,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,474 | py | # -*- coding: utf-8 -*-
__author__ = 'jsz'
__version__ = 0.1
import networkx as nx
import hashlib
import base64
from networkx.exception import NetworkXError
from db_stuff import DBHelper
def intersect(l, i):
u"""
:param l:
:param i:
:return:
"""
t = [index for index in xrange(i)]
return [val for val in l if val in t]
def difference(a, b):
u"""
比较两个数组的差别
:param a:
:param b:
:return:
"""
return list(set(a) - set(b))
class Background(object):
def __init__(self, u, m, r=None, triple=None):
u"""
:param u: list 对象列表
:param m: list 属性列表
:param r: list[list] 对象与属性的关系矩阵
"""
self.u = u
self.m = m
self.u_dict = {val:i for i, val in enumerate(self.u)}
self.m_dict = {val:i for i, val in enumerate(self.m)}
if r:
self.r_triple = self._construct_triple_tuple(r)
if triple:
self.r_triple = triple
def _construct_triple_tuple(self, r):
res = {}
for i in xrange(0, len(r)):
for j in xrange(0, len(self.m)):
if r[i][j]:
res[i*len(self.m) + j] = True
return res
##TODO Need Much Improvement
@staticmethod
def build_bg_from_db(trans_table, facet_codes_table):
sql1 = u"""
select book_id, author_id, publisher_id, year_id from %s;
""" % trans_table
sql2 = u"""
select id from %s order by id;
""" % facet_codes_table
session = DBHelper.get_session()
books = session.execute(sql1).fetchall()
codes = session.execute(sql2).fetchall()
codes_len = len(codes)
triple = {}
u = [book_id for book_id, author_id, publisher_id, year_id in books]
m = [code.id for code in codes]
bg = Background(u, m)
for i, book in enumerate(books):
for j in xrange(1, len(book)):
mark = i * codes_len + bg.get_attr_index(book[j])
triple[mark] = True
bg.r_triple = triple
return bg
def is_in_background(self, i, j):
return True if i*len(self.m)+j in self.r_triple else False
def get_obj_index(self, obj):
u"""
:param obj:
:return: index of attr
"""
if obj in self.u_dict:
return self.u_dict[obj]
else:
return None
def get_attr_index(self, attr):
u"""
:param attr:
:return: index of attr
"""
if attr in self.m_dict:
return self.m_dict[attr]
else:
return None
def get_obj(self, i):
u"""
:param i: index of obj in u
:return: the ith obj of u
"""
assert 0 <= i < len(self.u), "Index out of range!"
return self.u[i]
def get_attr(self, i):
u"""
:param i: index of attr in m
:return: the ith attr of m
"""
assert 0 <= i < len(self.m), "Index out of range!"
return self.m[i]
class BaseFCL(object):
def __init__(self, background):
u"""
:param background: Background obj
"""
self.bg = background
def g(self, attrs):
u"""
输入属性列表,输出对应的对象列表
最坏情况下时间复杂度 为 O(n^2)
:param attrs: list of attrs
:return: list of objs
"""
res = []
for obj in xrange(len(self.bg.u)):
for attr in attrs:
if not self.bg.is_in_background(obj, attr):
break
else:
res.append(obj)
return res
def f(self, objs):
u"""
输入对象列表,输出对应的属性列表
最坏情况下时间复杂度 为 O(n^2)
:param attrs: list of objs
:return: list of attrs
"""
res = []
for attr in xrange(len(self.bg.m)):
for obj in objs:
if not self.bg.is_in_background(obj, attr):
break
else:
res.append(attr)
return res
class Node(object):
def __init__(self, attr_set, ref_seq=None, obj_set=None, supp=None):
u"""
每一个节点都是一个概念
:param attr_set: list 用于查询的 属性集
:param supp: int 支持度,其实就是这个概念对应的外延大小
:param ref_seq: list[list[2]] key:属性差值k value:attr_set+k 对应的外延
:param parents: list 该节点的父节点
:param children: list 该节点的孩子节点
"""
if attr_set:
self.attr_set = attr_set
else:
self.attr_set = set()
if obj_set:
self.obj_set = obj_set
else:
self.obj_set = set()
if supp:
self.supp = supp
else:
self.supp = len(self.obj_set)
self.ref_seq = ref_seq
@staticmethod
def stringizer(node):
def set_to_str(set):
res = ''
for i in set:
res += '%s ' % i
return res
attr_set_str = 'attr_set:%s;' % set_to_str(node.attr_set)
obj_set_str = 'obj_set:%s;' % set_to_str(node.obj_set)
supp_str = 'supp:%d' % node.supp
return "%s%s%s" %(attr_set_str, obj_set_str, supp_str)
@staticmethod
def destringizer(string):
def str_to_set(str):
nums = str.split()
res = set()
for n in nums:
res.add(int(n))
return res
properties = string.split(';')
attr_set = str_to_set(properties[0].split(':')[1])
obj_set = str_to_set(properties[1].split(':')[1])
supp = int(properties[2].split(':')[1])
return Node(attr_set=attr_set, obj_set=obj_set, supp=supp)
def __hash__(self):
result = 17
for attr in self.attr_set:
result = 1001 * result + attr
return result
def __eq__(self, other):
if self.attr_set == other.attr_set:
return True
return False
class ConceptLattice(object):
ROOT_NODE = 0
LEAF_NODE = 1
NORMAL_NODE = 2
BOTTOM_NODE = 3
def __init__(self, graph, root=None, leaf_nodes=None, bottom=None):
self.graph = graph
self.root = root
if leaf_nodes:
self.leaf_nodes = leaf_nodes
else:
self.leaf_nodes = []
self.bottom = bottom
def _get_root(self):
u"""
获取格的根节点
"""
if self.root:
return self.root
res = None
for node, data in self.graph.nodes_iter(data=True):
if data['type'] == ConceptLattice.ROOT_NODE:
res = node
break
self.root = res
return res
def _get_leaf(self):
u"""
获取格的叶子节点
"""
if self.leaf_nodes:
return self.leaf_nodes
res = []
for node, data in self.graph.nodes_iter(data=True):
if data['type'] == ConceptLattice.LEAF_NODE:
res.append(node)
self.leaf_nodes = res
return res
def print_lattice(self):
for node in self.graph.nodes():
print node.obj_set, ';',
print 'Predecessors:',
for nr in self.graph.predecessors(node):
print nr.obj_set,
print 'Successors:',
for nr in self.graph.successors(node):
print nr.obj_set,
print
def __getattr__(self, name):
if name == 'depth':
return self._get_depth()
def _get_depth(self):
u"""
获取格的深度
"""
if not hasattr(self, 'depth'):
self.depth = 0
def _travel(g, cur_node, cur_depth):
cur_depth += 1
if self.depth < cur_depth:
self.depth = cur_depth
for node in g.successors(cur_node):
_travel(g, node, cur_depth)
_travel(self.graph, self.root, 0)
return self.depth
@staticmethod
def parse_lattice(path):
g = nx.read_gml(path, destringizer=Node.destringizer)
cl = ConceptLattice(g)
cl._get_root()
cl._get_leaf()
return cl
def save_lattice(self, path):
nx.write_gml(self.graph, path, Node.stringizer)
def locate_by_extent(self, query_obj_set):
u"""
"""
target = self.root
node = self.root
while self.graph.successors(node):
for child_node in self.graph.successors(node):
if query_obj_set.issubset(child_node.obj_set):
flag = True
node = child_node
target = node
break
else:
#针对当前节点的父节点,没有找到属性包含query_attr_set的节点,则当前节点为一个极大节点
break
return target
def locate(self, query_attr_set):
u"""
根据一个查询属性集定位某个最匹配的格节点
:param query_attr_set:
:return:
"""
# 如果父节点目前还没有后继,则说明新生成的子节点对父节点的后继毫无影响,子节点的后继也尚未产生
if not self.leaf_nodes:
return None
target = None
for node in self.leaf_nodes:
if query_attr_set.issubset(node.attr_set):
target = node
while self.graph.predecessors(node):
for parent_node in self.graph.predecessors(node):
if query_attr_set.issubset(parent_node.attr_set):
flag = True
node = parent_node
target = node
break
else:
#针对当前节点的父节点,没有找到属性包含query_attr_set的节点,则当前节点为一个极大节点
break
break
return target
def locate_from(self, query_attr_set, start_node):
u"""
根据一个查询属性集定位某个最匹配的格节点
:param query_attr_set:
:param start_node: node 查询的起点节点
:return:
"""
target = start_node
while self.graph.predecessors(start_node):
#print start_node.__hash__()
for parent_node in self.graph.predecessors(start_node):
if query_attr_set.issubset(parent_node.attr_set):
start_node = parent_node
target = start_node
break
else:
break
return target
def locate_from_versa(self, query_attr_set, start_node):
u"""
根据一个查询属性集定位某个最匹配的格节点
:param query_attr_set:
:param start_node: node 查询的起点节点
:return:
"""
target = start_node
while self.graph.successors(start_node):
for child_node in self.graph.successors(start_node):
if query_attr_set.issubset(child_node.attr_set):
start_node = child_node
target = start_node
break
else:
break
return target
def get_average_supp(self, nodes):
u"""
分析目标节点集的平均支持度
"""
sum_supp = 0
for node in nodes:
sum_supp += node.supp
return sum_supp*1.0/len(nodes)
def get_partion_max(self, nodes):
u"""
从某一节点集合中选取极大值集合
:param nodes: 某一节点集合
"""
res = []
flags = [True for i in nodes]
nodes_len = len(nodes)
for i in xrange(0, nodes_len):
for j in xrange(i+1, nodes_len):
if flags[j]:
intersection = nodes[i].attr_set & nodes[j].attr_set
inter_len = len(intersection)
# 如果有j比i更高,则他们attr_set的交集等于j的attr_set,这说明i并非极大格节点
if inter_len == len(nodes[j].attr_set):
flags[i] = False
break
# 如果有j比i更低,则他们attr_set的交集等于i的attr_set,这说明j并非极大格节点
if inter_len == len(nodes[i].attr_set):
flags[j] = False
# 如果i和j无法比较,说明i和j都有可能为极大格节点
for i in xrange(0, nodes_len):
if flags[i]:
res.append(nodes[i])
return res
def count_attr_supp(self, cur_node):
u"""
计算以当前节点为上确界的子形式概念格中各属性的支持度
:param cur_node: 当前节点
:return: dict {attr: supp}
"""
res = {}
cur_nodes_set = set([cur_node])
while cur_nodes_set:
next_nodes_set = []
for node in cur_nodes_set:
next_nodes_set.extend(self.graph.successors(node))
for attr in node.attr_set.difference(cur_node.attr_set):
if attr not in res:
res[attr] = node.supp
cur_nodes_set = set(next_nodes_set)
return res
def _distance_match(self, cur_node, k, direction_method, limit=5):
u"""
其中 k = 1-|X1.obj_set intersect X2.obj_set|/|X1.obj_set union X2.obj_set|
:param cur_node: 当前查询节点
:param direction_method: 方向函数,向上遍历还是向下遍历
:param limit: int 个数限制
:return: nodes 符合条件的节点集
"""
def match(node):
# 找到了范围内的极远点
flag = True
# 距离还在范围内的节点
in_range_nodes = []
for parent in direction_method(node):
# 求 Jaccard距离
distance = 1 - 1.0 * len(cur_node.obj_set & parent.obj_set)/len(cur_node.obj_set | parent.obj_set)
if distance <= k:
flag = False
in_range_nodes.append(parent)
return flag, in_range_nodes
result = set()
cur_loop_nodes = direction_method(cur_node)
while cur_loop_nodes:
next_loop_nodes = []
for parent in cur_loop_nodes:
flag, in_range_nodes = match(parent)
if flag:
result.add(parent)
if len(result) == limit:
return result
else:
next_loop_nodes.extend(in_range_nodes)
cur_loop_nodes = next_loop_nodes
return result
def expand(self, cur_node, k):
u"""
泛化,返回距离节点node为k的祖先节点
其中 k = 1-|X1.obj_set intersect X2.obj_set|/|X1.obj_set union X2.obj_set|
:param cur_node: 当前查询节点
:return: nodes 符合条件的节点集
"""
return self._distance_match(cur_node, k, self.graph.predecessors)
def refine(self, cur_node, k):
u"""
细化,返回距离节点node为k的后代节点
其中 k = 1-|X1.obj_set intersect X2.obj_set|/|X1.obj_set union X2.obj_set|
:param cur_node: 当前查询节点
:return: nodes 符合条件的节点集
"""
return self._distance_match(cur_node, k, self.graph.successors)
def meet(self, nodes):
u"""
返回节点集nodes的下确界
:param nodes: 节点的集合
:return node:
"""
meet_attr_set = set()
for node in nodes:
meet_attr_set |= node.attr_set
return self.locate(meet_attr_set)
def join(self, nodes):
u"""
返回节点集nodes的上确界
:param nodes: 节点的集合
:return node:
"""
join_attr_set = set()
for node in nodes:
join_attr_set &= node.attr_set
return self.locate_from(join_attr_set, nodes[0])
class FCLMiner(BaseFCL):
def __init__(self, background):
# 当前生成的所有节点
self.all_extent_dict = {}
BaseFCL.__init__(self, background)
def gen_lattice(self):
leaf_nodes = []
self.graph = nx.DiGraph()
start_obj_set = self.g([])
start_attr_set = self.f(start_obj_set)
# 属性/对象 用下标代指
start_ref_attr_set = difference(range(0, len(self.bg.m)), start_attr_set)
ref_seq = []
for ref_attr in start_ref_attr_set:
ref_seq.append([ref_attr, set(self.g([ref_attr] + start_attr_set))])
# 生成root节点
root = Node(set(start_attr_set), ref_seq, set(start_obj_set))
# 将根节点添加到图中,并进行标注
self.graph.add_node(root, type=ConceptLattice.ROOT_NODE)
self.lattice = ConceptLattice(self.graph, root, leaf_nodes)
self._create_children(root)
return self.lattice
def print_value(self, node):
print '[',
for attr_index in node.attr_set:
print self.bg.get_attr(attr_index),
print '],[',
for ref_pair in node.ref_seq:
print '[', self.bg.get_attr(ref_pair[0]), ',', ref_pair[1], '],',
print ']'
def print_tree(self, root):
if not root:
return
for child in root.children:
self.print_value(child)
self.print_tree(child)
def _create_children(self, parent_node):
u"""
递归方法,根据root节点生成一颗类别集枚举树
:param parent_node: 父节点
:return:
"""
self.all_extent_dict[hash(str(parent_node.obj_set))] = 0
for i in range(0, len(parent_node.ref_seq)):
cur_ref_pair = parent_node.ref_seq[i]
# 如果 在当前 收缩对 之前没有包含 其trans的其他收缩对,则针对该收缩对有子节点
for cmp_ref_pair in parent_node.ref_seq[0:i]:
if cur_ref_pair[1].issubset(cmp_ref_pair[1]):
break
else:
if hash(str(cur_ref_pair[1])) not in self.all_extent_dict:
child = self._create_child(parent_node, cur_ref_pair)
self.graph.add_node(child, type=ConceptLattice.NORMAL_NODE)
## 边更新代码
# step1 更新子节点的后继,前驱
targets = []
for c in self.graph.successors(parent_node):
cur_attr_set = c.attr_set | set([cur_ref_pair[0]])
target = self.lattice.locate(cur_attr_set)
if target:
targets.append(target)
for real_target in self.lattice.get_partion_max(targets):
# 将边添加到图中
self.graph.add_edge(child, real_target)
# step2 更新父节点的后继,前驱
delete_edge_to_nodes = []
for node in self.graph[parent_node]:
if node in self.graph[child]:
delete_edge_to_nodes.append(node)
for node in delete_edge_to_nodes:
self.graph.remove_edge(parent_node, node)
# 建立父子关系
self.graph.add_edge(parent_node, child)
# 如果 child的收缩序列为空且没有孩子,则将其添加到叶子节点集当中
if not child.ref_seq and not self.graph.successors(child):
self.lattice.leaf_nodes.append(child)
self.graph.node[child]['type'] = ConceptLattice.LEAF_NODE
self._create_children(child)
def _create_child(self, parent_node, cur_ref_pair):
u"""
构建当前父节点对应收缩对的子节点
:param parent_node: 当前父节点
:param cur_ref_pair: 当前收缩对
:return:
"""
# 子节点属性集
attr_set = parent_node.attr_set | set([cur_ref_pair[0]])
# 子节点收缩对
ref_seq = []
# 收缩对的trans是 当前收缩对的超集,且排在当前收缩对之后
# 那么就将它的属性集添加到子节点的属性集中(内涵)
for i, ref_pair in enumerate(parent_node.ref_seq):
if ref_pair[0] == cur_ref_pair[0]:
for j in range(i+1, len(parent_node.ref_seq)):
ref_pair_j = parent_node.ref_seq[j]
# 计算子节点的内涵
attr_set |= set([ref_pair_j[0]]) if cur_ref_pair[1].issubset(ref_pair_j[1]) \
else set([])
# 计算子节点的收缩对
intersect_set = cur_ref_pair[1] & ref_pair_j[1]
if 0 < len(intersect_set) < len(cur_ref_pair[1]):
ref_seq.append([ref_pair_j[0], intersect_set])
break
node = Node(attr_set, ref_seq, cur_ref_pair[1])
return node
class ConceptLatticeFactory(object):
def __init__(self, objs):
u"""
:param objs: list[list[2]] [0] the obj id, [1] the attr_set
:param attr_set_m: set all attributes
"""
self.objs = objs
self.node_dict = {}
self.md5 = hashlib.md5()
self.total_steps = 0
self.skip_steps = 0
def intent_hash(self, attr_set):
attr_list = list(attr_set)
attr_list.sort()
md5 = hashlib.md5()
for attr in attr_list:
md5.update(str(attr)+',')
return md5.hexdigest()
def create_lattice_incrementally(self):
u"""
渐增式概念生成算法
"""
bottom = Node(attr_set=set([-1]), obj_set=set())
self.graph = nx.DiGraph()
self.graph.add_node(bottom, type=ConceptLattice.BOTTOM_NODE)
self.lattice = ConceptLattice(self.graph, bottom=bottom)
for obj in self.objs:
bottom.attr_set.union(obj[1])
concept = self.add_intent(obj[1], bottom)
self.update_extent(concept, obj[0])
hash_code = self.intent_hash(concept.attr_set)
self.lattice.graph.remove_node(bottom)
for node in self.lattice.graph.nodes():
if not self.graph.predecessors(node):
self.graph.node[node]['type'] = ConceptLattice.ROOT_NODE
self.lattice.root = node
continue
if not self.graph.successors(node):
self.graph.node[node]['type'] = ConceptLattice.LEAF_NODE
self.lattice.leaf_nodes.append(node)
return self.lattice
def add_intent(self, attr_set, generator):
u"""
生成概念格的关键方法
:param attr_set: set 准备用来构造概念的属性集
:param genrator: Node 当前的生成器
"""
# 记录该函数调用总数
self.total_steps += 1
hash_code = self.intent_hash(attr_set)
if hash_code in self.node_dict:
# 记录Skip数
self.skip_steps += 1
return self.node_dict[hash_code]
# 获取标准生成器
generator = self.lattice.locate_from(attr_set, generator)
#if generator.attr_set == attr_set:
# return generator
# 计算候选直接父概念
generator_parents = self.graph.predecessors(generator)
new_parents = set()
for candidate in generator_parents:
if not candidate.attr_set.issubset(attr_set):
candidate = self.add_intent(candidate.attr_set & attr_set, candidate)
new_parents.add(candidate)
# 构造新概念,并添加到哈希表中
new_concept = Node(attr_set=attr_set, obj_set=set(generator.obj_set))
if hash_code not in self.node_dict:
self.node_dict[hash_code] = new_concept
self.graph.add_node(new_concept, type=ConceptLattice.NORMAL_NODE)
for parent_node in new_parents:
is_true_parent = True
for child in self.graph.successors(parent_node):
if child.attr_set.issubset(attr_set):
is_true_parent = False
break
if is_true_parent:
try:
self.graph.remove_edge(parent_node, generator)
except NetworkXError:
pass
self.graph.add_edge(parent_node, new_concept)
# 设置父子关系
self.graph.add_edge(new_concept, generator)
return new_concept
def update_extent(self, concept, obj_mark):
u"""
更新当前concept以及其上所有概念的外延,将obj添加到其中
:param concept: Node 一个概念
:param obj_mark: integer obj 的标识符
"""
concept.obj_set.add(obj_mark)
concept.supp = len(concept.obj_set)
#print concept.obj_set, concept.attr_set
for parent_node in self.graph.predecessors(concept):
self.update_extent(parent_node, obj_mark)
if __name__ == "__main__":
u = [i for i in range(1, 9)]
m = [chr(97 + i) for i in range(9)]
r = [
[True, False, False, False, True, False, True, False, True],
[True, True, True, True, False, False, False, False, False],
[True, False, False, True, False, True, False, False, False],
[True, False, False, True, True, False, False, False, False],
[True, False, False, False, True, True, True, True, False],
[True, True, True, False, False, False, True, False, False],
[True, False, False, True, False, True, False, False, False],
[True, False, False, False, True, True, True, True, True]
]
u1 = range(1, 5)
m1 = [chr(97 + i) for i in range(5)]
r1 = [
[True, False, True, False, False],
[True, True, True, False, True],
[False, True, False, False, True],
[False, False, True, True, True],
]
u2 = range(1, 6)
m2 = [chr(97 + i) for i in range(6)]
r2 = [
[True, True, False, False, True, True],
[False, True, True, False, False, True],
[False, False, True, True, False, True],
[True, True, True, True, False, True],
[False, False, True, True, False, False]
]
bg = Background(u, m, r)
#baseFCL = BaseFCL(bg)
#fcl_miner = FCLMiner(bg)
#lattice = fcl_miner.gen_lattice()
#print lattice.count_attr_supp(lattice.root)
objs_r = [
(0,set([0, 4, 6, 8])),
(1,set([0, 1, 2, 3])),
(2,set([0, 3, 5])),
(3,set([0, 3, 4])),
(4,set([0, 4, 5, 6, 7])),
(5,set([0, 1, 2, 6])),
(6,set([0, 3, 5])),
(7,set([0, 4, 5, 6, 7, 8]))
]
add_intent_algo = ConceptLatticeFactory(objs_r)
add_intent_algo.create_lattice_incrementally().save_lattice('test.gml')
| [
"jsz1995@live.com"
] | jsz1995@live.com |
2258933f2f9a6ea29c2577e6a2dabf52d0b9e192 | 6e652507874d4f6835092e6e504525d71bc1a48d | /Python/hackerrank/compress the string.py | baf25dff7960029cfcf4faed0caebd3b8bef0e3b | [] | no_license | Geek-Tekina/Coding | be4876b2b3b9a16f32c92bb1cabb3694fb03a837 | 15ee5c830dbf02d90cc972355c5054471985ebc5 | refs/heads/main | 2023-08-07T11:17:58.362604 | 2021-10-01T08:47:25 | 2021-10-01T08:47:25 | 412,362,390 | 2 | 0 | null | 2021-10-01T06:58:49 | 2021-10-01T06:58:49 | null | UTF-8 | Python | false | false | 140 | py | from itertools import groupby
user = input()
for key, group in groupby(user):
a = len(list(group)), int(key)
print(tuple(a))
| [
"noreply@github.com"
] | Geek-Tekina.noreply@github.com |
d286a5b5fa3e6d8eedf81d2b02625c19ac94bd85 | 91b3c095f600a18d6dee1ca347eef098debcdf6a | /scoreboard.py | f04b56e14e92b00e3676b1c5f130996f3678ed44 | [] | no_license | panxogol/python-snake-game | 3aa4fa30a55790dfff5b8f0d4b890562f5e991df | ccfbbf145db9740b8d97abe707aa2329f9b747f2 | refs/heads/master | 2023-02-01T08:32:12.583095 | 2020-12-20T02:12:48 | 2020-12-20T02:12:48 | 321,801,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | # ---IMPORTS---
from turtle import Turtle
from constants import *
# ---FUNCTIONS---
def getHighestScore():
with open(file="data.txt", mode="r") as file:
highest_score = int(file.read())
return highest_score
# ---CLASSES---
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.speed(SCOREBOARD_SPEED)
self.penup()
self.hideturtle()
self.score = 0
self.highest_score = getHighestScore()
self.refresh()
def refresh(self):
self.clear()
self.goto(SCOREBOARD_X_POSITION, SCOREBOARD_Y_POSITION)
self.color(SCOREBOARD_TEXT_COLOR)
text_arg = f"Score: {self.score} Highest Score: {self.highest_score}"
self.write(arg=text_arg, move=True,
align=SCOREBOARD_TEXT_ALIGN, font=SCOREBOARD_TEXT_TUPLE)
def endgame(self):
self.home()
self.write(arg=SCOREBOARD_GAME_OVER_TEXT, move=True,
align=SCOREBOARD_TEXT_ALIGN, font=SCOREBOARD_GAME_OVER_TEXT_TUPLE)
def resetHighestScore(self):
if self.score > self.highest_score:
with open(file="data.txt", mode="w") as file:
file.write(str(self.score))
self.highest_score = getHighestScore()
self.score = 0
self.refresh()
| [
"juan.martinez.p@usach.cl"
] | juan.martinez.p@usach.cl |
a893e66ac629cabd0ef3a579f175bfafa8cd05ff | 5390755819caea3801cf1d448409ffd57bd02b9c | /Scripts/Figure4/Figure4_MetricComparisons.py | c16cc287164e65e59c03b2894376fef67be2ad58 | [] | no_license | PhillipPapastefanou/DroughtAnalysis | ccb43e252f8a355f1e59824e9abc2f8a16b10f17 | c73eea678709079eeaa5fe45e6f6d0061629e485 | refs/heads/master | 2023-07-17T17:20:07.607105 | 2021-08-27T11:24:26 | 2021-08-27T11:24:26 | 287,946,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,515 | py |
from A02_MCWD_Dataset_Analysis.Setup2016 import Setup2016
from A02_MCWD_Dataset_Analysis.Pylibs.MCWD_Analysis21 import MCWDFile
from A02_MCWD_Dataset_Analysis.Pylibs.PrecAnomalyDry21 import PrecAnomaly
from A02_MCWD_Dataset_Analysis.Pylibs.scPDSI2021 import scPDSI
import pandas
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
#import matplotlib.pylab as plt
import matplotlib as mpl
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cartopy.feature as cfeature
from cartopy.io.shapereader import Reader
from cartopy.feature import ShapelyFeature
rawData = pandas.read_csv(r"F:\Dropbox\ClimateData\Coords\SA_Amazon_basin\Amazon_basin_05_area.txt",
sep = ',',
header=0).values
areasRelative = rawData[:,4]
areasAbs = rawData[:,3]
raisg_mask = r"F:\Dropbox\ClimateData\AmazonBasin\AB-SHAPE\amazon_shape.shp"
mask = ShapelyFeature(Reader(raisg_mask).geometries(),
ccrs.PlateCarree())
bounds = np.arange(1,9)
#cmap = plt.get_cmap('coolwarm', 7)
cmap = matplotlib.colors.LinearSegmentedColormap.from_list("",
['#ffffd9' ,
'#edf8b1',
'#c7e9b4',
'#7fcdbb',
'#41b6c4',
'#1d91c0',
'#225ea8',
'#0c2c84'
], 8)
cmap = matplotlib.colors.LinearSegmentedColormap.from_list("",
['#B0B8B4FF',
'#FC766AFF',
'#184A45FF'
], 7)
cmap = matplotlib.colors.LinearSegmentedColormap.from_list("",
['#F2A104',
'#00743F',
'#72A2C0',
'#1D65A6',
'#192E5B'
], 7)
#cmap.set_over('white')
cmap.set_under('white')
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
setup = Setup2016()
files = setup.files
#files = [
#["CHR", "CHRIPS_AB_Monthly_05_mcwd.nc", "CHRIPS_AB_monthly_05-scPDSI-2016.txt", "CHRIPS_AB_monthly_05.nc"],
#["CRU", "CRU_NCEP_V8_AB_Yearly_05_MCWD.nc", "CRU_NCEP_V8_AB_Monthly_05-scPDSI-2016.txt", "CRU_NCEP_V8_AB_Monthly_05.nc"]]
MCWDFiles = []
scPDSIFiles = []
precFiels = []
data2005 = np.zeros((len(files), 3, 1946))
data2010 = np.zeros((len(files), 3, 1946))
data2016 = np.zeros((len(files), 3 , 1946))
for file in files:
MCWDFiles.append(MCWDFile(setup.MCWDrootPAth + "\\" + file[1]))
scPDSIFiles.append(scPDSI(setup.scPDSIrootPAth + "\\" + file[2]))
precFiels.append(PrecAnomaly(setup.PRECrootPAth + "\\" + file[3]))
i = 0
for MCWDFile in MCWDFiles:
data = MCWDFile.ParseRelativeDeviation(2001, 2016)
data2005[i, 0] = data[2005 - 2001]
data2010[i, 0] = data[2010 - 2001]
data2016[i, 0] = data[2016 - 2001]
i+=1
i = 0
for scPDSIFile in scPDSIFiles:
data = scPDSIFile.ParseRelativeDeviation(2000, 2016)
data2005[i, 1] = data[2005 - 2000]
data2010[i, 1] = data[2010 - 2000]
data2016[i, 1] = data[2016 - 2000]
i+=1
i = 0
for precFile in precFiels:
data = precFile.ParseRelativeDeviation(2001, 2016)
data2005[i, 2] = data[2005 - 2001]
data2010[i, 2] = data[2010 - 2001]
data2016[i, 2] = data[2016 - 2001]
i+=1
dataCount = np.zeros((9, 1946))
#dataCount[0] = ((-100 < dataSlice) & (dataSlice < -25)).sum(axis = 0)
#dataCount[1] = ((-150 < dataSlice) & (dataSlice < -100)).sum(axis = 0)
#dataCount[2] = (dataSlice < -150).sum(axis = 0)
l = -0.5
dataCount[0] = (data2005[:,2,:] < l).sum(axis = 0)
dataCount[1] = (data2010[:,2,:] < l).sum(axis = 0)
dataCount[2] = (data2016[:,2,:] < l).sum(axis = 0)
dataCount[3] = (data2005[:,0,:] < l).sum(axis = 0)
dataCount[4] = (data2010[:,0,:] < l).sum(axis = 0)
dataCount[5] = (data2016[:,0,:] < l).sum(axis = 0)
dataCount[6] = (data2005[:,1,:] < l).sum(axis = 0)
dataCount[7] = (data2010[:,1,:] < l).sum(axis = 0)
dataCount[8] = (data2016[:,1,:] < l).sum(axis = 0)
fig = plt.figure(figsize=(9,7))
index = 1
import string
lowerletters = string.ascii_lowercase[0:26]
df = pandas.DataFrame()
acc = np.zeros((len(files), 6))
for countFile in range(0, dataCount.shape[0]):
img = MCWDFiles[0].CreateImage(dataCount[index - 1])
img_extent = MCWDFiles[0].GeoFile.IMG_extent
offset = [-3, 3, -3, 3]
axGeo = fig.add_subplot(3, 3, index, projection=ccrs.PlateCarree())
lon_formatter = LongitudeFormatter(zero_direction_label=True, number_format='g')
lat_formatter = LatitudeFormatter()
axGeo.xaxis.set_major_formatter(lon_formatter)
axGeo.yaxis.set_major_formatter(lat_formatter)
axGeo.add_feature(cfeature.BORDERS, edgecolor='tab:grey')
axGeo.coastlines(resolution='110m', linewidth=1, color='tab:grey')
# axGeo.set_title("Precipitation")
axGeo.set_extent(list(np.array(img_extent) + np.array(offset)), crs=ccrs.PlateCarree())
axGeo.add_feature(mask, edgecolor='black', linewidth=1.3, facecolor="None")
axGeo.text(-80.8, 4.7, lowerletters[index-1] +')',
fontsize=12, horizontalalignment='left', verticalalignment='center',
bbox=dict(facecolor='white', alpha=0.8, edgecolor='white'))
# titleTxt = axGeo.set_title(vulnerabilites[i], size=16)
# titleTxt.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='black')])
# axGeo.text(0.02, 0.93, textborder[i - 1], horizontalalignment='left', verticalalignment='center',
# transform=axGeo.transAxes, size=14)
axGeo.set_xticks([-80, -70, -60, -50], crs=ccrs.PlateCarree())
axGeo.set_yticks([-20, -15, -10, -5, 0, 5], crs=ccrs.PlateCarree())
axGeo.set_xlabel(r'Longitude')
if index % 3 == 1:
axGeo.set_ylabel(r'Latitude' )
imsh = axGeo.imshow(img, transform=ccrs.PlateCarree(), extent=img_extent, cmap=cmap, norm=norm)
index+=1
df = pandas.DataFrame()
acc = np.zeros((len(files), 9))
metricPlain = ""
l = -2.5
dataCountIndv = np.zeros((9, 6))
dataCountIndv[0] = (data2005[:,2,:] < l).sum(axis = 1)
dataCountIndv[1] = (data2010[:,2,:] < l).sum(axis = 1)
dataCountIndv[2] = (data2016[:,2,:] < l).sum(axis = 1)
dataCountIndv[3] = (data2005[:,0,:] < l).sum(axis =1)
dataCountIndv[4] = (data2010[:,0,:] < l).sum(axis = 1)
dataCountIndv[5] = (data2016[:,0,:] < l).sum(axis = 1)
dataCountIndv[6] = (data2005[:,1,:] < l).sum(axis = 1)
dataCountIndv[7] = (data2010[:,1,:] < l).sum(axis = 1)
dataCountIndv[8] = (data2016[:,1,:] < l).sum(axis = 1)
for d in range(0, 9):
for j in range(0, len(files)):
if d < 3:
metric = "$r\mathrm{RAI}"
metricPlain = "rRAI"
elif d < 6:
metric = "$r\mathrm{MCWD}"
metricPlain = "rMCWD"
else:
metric = "$r\mathrm{scPDSI}"
metricPlain = "rscPDSI"
if d % 3 == 0:
year = 2005
elif d % 3 == 1:
year = 2010
else:
year = 2016
subdf = pandas.DataFrame({"metric": metricPlain,
"Year": year,
'Dataset': files[j][0],
'Condition': d % 3,
'TotalArea': dataCountIndv[d, j] / 1946.0 * 5.94*10**6 ,
'RelativeArea': dataCountIndv[d, j] / 1946.0
}, index=[0])
df = df.append(subdf)
df.to_csv(r'AgreementMetricExtreme.tsv', sep= '\t', header = True)
plt.subplots_adjust(bottom= 0.15, top = 0.8, left = 0.18)
fig.text(0.16, 0.31, '$r\mathrm{scPDSI}$', ha='center', va='center', rotation='vertical', fontsize=12)
fig.text(0.16, 0.51, "$r\mathrm{MCWD}$", ha='center', va='center', rotation='vertical', fontsize=12)
fig.text(0.16, 0.71, "$r\mathrm{RAI}$", ha='center', va='center', rotation='vertical', fontsize=12)
fig.text(0.3, 0.82, '2005', ha='center', va='center', fontsize=12, fontweight='bold')
fig.text(0.54, 0.82, '2010', ha='center', va='center', fontsize=12, fontweight='bold')
fig.text(0.77, 0.82, '2016', ha='center', va='center', fontsize=12, fontweight='bold')
#fig.tight_layout(pad = 3)
cax = plt.axes([0.225, 0.1, 0.63, 0.03])
#bar = plt.colorbar(imsh, cax=cax, orientation="horizontal")
#cb1 = mpl.colorbar.ColorbarBase(cax, cmap=cmap,
# norm=norm,
# orientation='horizontal', boundaries= bounds, ticks = bounds, format='%1i')
cb2 = mpl.colorbar.ColorbarBase(cax,
cmap=cmap,
norm=norm,
boundaries=np.arange(0,8) + 0.5,
ticks=np.arange(1,8),
orientation='horizontal')
cb2.set_label('Datasets in agreement', fontsize=16)
#cb2.ax.xaxis.set_ticks_position('top')
cb2.ax.xaxis.set_label_position('top')
cb2.ax.set_xticklabels(['1 (None)', '2', '3', '4', '5', '6', '7 (All)'])
plt.subplots_adjust(bottom = 0.23 , wspace= -0.2)
#bar = plt.colorbar.ColorbarBase(cax = cax, cmap=cmap, norm=norm, spacing='proportional', format='%1i')
plt.savefig("Drought metric comparison.png", dpi=600, bbox_inches = 'tight',
pad_inches = 0.3)
| [
"papa@tum.de"
] | papa@tum.de |
814eaee8516a6718559e22ca73e1626aeb25f1e4 | df7238046190e6245d88fc97045e40967520a6e0 | /test_rest_api.py | 32a03a0d2e3e0ba445892f9104b6ccb98c13ce09 | [] | no_license | Hsing-Wei/reqres-api-test-python | ad33ae7d22e12fff6c7be834807a806a051696b9 | 3c3818d4eadb7963295f8a9515ca86d2b09c0f76 | refs/heads/master | 2022-12-17T19:46:56.004437 | 2020-09-17T10:58:00 | 2020-09-17T10:59:26 | 296,300,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | import pytest
import requests
def register_unsuccessful_data():
return [
{
"email": "eve.holt@reqres.in",
"password": "",
},
{
"email": "",
"password": "cityslicka",
},
{
"email": "",
"password": "",
},
{
"email": "email",
"password": "password",
},
{
"email": "eve.holt@reqres.in",
"password": "cityslickacityslickacityslicka",
},
]
@pytest.mark.parametrize('email, passwd',
[('eve.holt@reqres.in', 'cityslicka'),])
def test_api_register_successful(email, passwd):
data = {'email': email,
'password': passwd}
resp = requests.post(url="https://reqres.in/api/register", data=data)
data = resp.json()
assert (resp.status_code == 200), "Status code is not 200. Rather found : "\
+ str(resp.status_code)
assert data['token'] is not None, "token is None."
@pytest.mark.parametrize("email, passwd",register_unsuccessful_data())
def test_api_register_unsuccessful(email, passwd):
data = {'email': email,
'password': passwd}
resp = requests.post(url="https://reqres.in/api/register", data=data)
data = resp.json()
assert (resp.status_code == 400), "Status code is not 400. Rather found : "\
+ str(resp.status_code)
| [
"ijerrycen@gmail.com"
] | ijerrycen@gmail.com |
b7f829944132ef7f53e103fbcdf10515528d36d8 | 19bbbf2a0f9de41f4c58d276520981ad442f40df | /Algorithms/Sorting/CountingSort1.py | ca95cac6af527b788bd76792d1be182ceeb4e602 | [] | no_license | benjdj6/Hackerrank | b6e2f43d793eaf6774d5dcee330d578849f99c38 | d92ef462601f2a1cd9a0bc0ecc9438333f5769b5 | refs/heads/master | 2020-12-25T16:57:25.074925 | 2017-10-16T04:03:22 | 2017-10-16T04:03:22 | 41,611,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
n = int(raw_input())
ar = map(int, raw_input().split())
count = [0]*100
for num in ar:
count[num] += 1
for val in count:
print val, | [
"benjdj6@vt.edu"
] | benjdj6@vt.edu |
673f462419396445b10e6a597d0e1216a48926d0 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveSingleTaskForUpdateProhibitionLockRequest.py | 91ad7fbc5e3a6b99891b61c97dac6d191d4a8b14 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SaveSingleTaskForUpdateProhibitionLockRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveSingleTaskForUpdateProhibitionLock')
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.