hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4090bb4b6d1ad59682a210fa58e3049a7296547f | 4,103 | py | Python | castle.py | codyd51/castle | 93e7f8c18a0dacd5437b7503b7f3420d6ebc6256 | [
"MIT"
] | 2 | 2018-08-07T16:18:58.000Z | 2018-08-09T16:59:48.000Z | castle.py | codyd51/castle | 93e7f8c18a0dacd5437b7503b7f3420d6ebc6256 | [
"MIT"
] | null | null | null | castle.py | codyd51/castle | 93e7f8c18a0dacd5437b7503b7f3420d6ebc6256 | [
"MIT"
] | null | null | null | import castle
from typing import Tuple
if __name__ == '__main__':
main()
| 37.3 | 95 | 0.663417 |
40914f27511088ce3ade62cbe86245a30a969a5b | 2,603 | py | Python | pyfos/utils/configure/switch_configuration_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
] | 44 | 2017-11-17T12:03:11.000Z | 2022-02-03T20:57:56.000Z | pyfos/utils/configure/switch_configuration_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
] | 13 | 2018-10-09T15:34:15.000Z | 2022-02-24T20:03:17.000Z | pyfos/utils/configure/switch_configuration_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
] | 23 | 2017-12-14T18:08:33.000Z | 2022-02-03T15:33:40.000Z | #!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`switch_configuration_show` - PyFOS util for configuring switch operation
********************************************************************************
The :mod:`switch_configuration_show` util provides for configuring switch \
operation.
This module is a stand-alone script that can be used to display switch
attributes.
* Input:
* -L=<login>: The login ID. If not provided, an interactive
prompt will request one.
* -P=<password>: The password. If not provided, an interactive
prompt will request one.
* -i=<IP address>: The IP address.
* -f=<VFID>: The VFID or -1 if VF is disabled. If unspecified,
a VFID of 128 is assumed.
* Output:
* The switch attributes in JSON format.
.. function:: show_switch_conf(session)
Example Usage of the Method::
ret = switch_configuration_show.show_switch_conf(session)
print (ret)
Details::
switch_conf_obj = switch_configuration()
result = switch_conf_obj.get(session)
return result
* Input:
:param session: The session returned by login.
* Output:
:rtype: A dictionary of return status matching the REST response.
*Use Cases*
1. Retrieve the configuration parameters of the switch.
"""
import sys
from pyfos import pyfos_auth
import pyfos.pyfos_brocade_fibrechannel_configuration as py_fc
from pyfos import pyfos_util
from pyfos.utils import brcd_util
switch = py_fc.switch_configuration
if __name__ == "__main__":
main(sys.argv[1:])
| 28.293478 | 80 | 0.683826 |
409191dc8e0d6b6abf220bc3d47860d3cfbeff3a | 31,078 | py | Python | vehicle/views.py | BernardAli/vehicle-service-mgt | 242d9797f9138a23d1c649d63660c2ee0b6cc462 | [
"MIT"
] | 105 | 2020-09-27T16:10:44.000Z | 2022-03-31T18:08:36.000Z | vehicle/views.py | BernardAli/vehicle-service-mgt | 242d9797f9138a23d1c649d63660c2ee0b6cc462 | [
"MIT"
] | 1 | 2021-07-15T21:36:09.000Z | 2021-07-15T21:36:09.000Z | vehicle/views.py | BernardAli/vehicle-service-mgt | 242d9797f9138a23d1c649d63660c2ee0b6cc462 | [
"MIT"
] | 87 | 2020-10-02T11:45:42.000Z | 2022-03-25T16:43:22.000Z | from django.shortcuts import render,redirect,reverse
from . import forms,models
from django.db.models import Sum
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required,user_passes_test
from django.conf import settings
from django.db.models import Q
#for showing signup/login button for customer
#for showing signup/login button for mechanics
#for showing signup/login button for ADMIN(by sumit)
#for checking user customer, mechanic or admin(by sumit)
#============================================================================================
# ADMIN RELATED views start
#============================================================================================
#============================================================================================
# ADMIN RELATED views END
#============================================================================================
#============================================================================================
# CUSTOMER RELATED views start
#============================================================================================
#============================================================================================
# CUSTOMER RELATED views END
#============================================================================================
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
#============================================================================================
# MECHANIC RELATED views start
#============================================================================================
# for aboutus and contact
def aboutus_view(request):
return render(request,'vehicle/aboutus.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name=sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
send_mail(str(name)+' || '+str(email),message,settings.EMAIL_HOST_USER, settings.EMAIL_RECEIVING_USER, fail_silently = False)
return render(request, 'vehicle/contactussuccess.html')
return render(request, 'vehicle/contactus.html', {'form':sub})
| 41.327128 | 150 | 0.702491 |
409204c88e09d10160109d7dfc196e9a1647012b | 28,322 | py | Python | deep_disfluency/utils/tools.py | treena908/deep_disfluency | 4e18bc17e74c356cd3a9c26fc80bf1c4a5487d59 | [
"MIT"
] | null | null | null | deep_disfluency/utils/tools.py | treena908/deep_disfluency | 4e18bc17e74c356cd3a9c26fc80bf1c4a5487d59 | [
"MIT"
] | null | null | null | deep_disfluency/utils/tools.py | treena908/deep_disfluency | 4e18bc17e74c356cd3a9c26fc80bf1c4a5487d59 | [
"MIT"
] | null | null | null | import random
import numpy as np
import itertools
import re
from collections import defaultdict
import os
def convert_to_simple_label(tag, rep="disf1_uttseg"):
"""Takes the complex tag set and gives back the simple,
smaller version with ten tags:
"""
disftag = "<f/>"
if "<rm-" in tag:
disftag = "<rm-0/>"
elif "<e" in tag:
disftag = "<e/>"
if "uttseg" in rep: # if combined task with TTO
m = re.search(r'<[ct]*/>', tag)
if m:
return disftag + m.group(0)
else:
print("WARNING NO TAG", +tag)
return ""
return disftag # if not TT0
def convert_from_full_tag_set_to_idx(tag, rep, idx_to_label):
"""Maps from the full tag set of trp repairs to the new dictionary"""
if "simple" in rep:
tag = convert_to_simple_label(tag)
for k, v in idx_to_label.items():
if v in tag: # a substring relation
return k
def add_word_continuation_tags(tags):
"""In place, add a continutation tag to each word:
<cc/> -word continues current dialogue act and the next word will also
continue it
<ct/> -word continues current dialogue act and is the last word of it
<tc/> -word starts this dialogue act tag and the next word continues it
<tt/> -word starts and ends dialogue act (single word dialogue act)
"""
tags = list(tags)
for i in range(0, len(tags)):
if i == 0:
tags[i] = tags[i] + "<t"
else:
tags[i] = tags[i] + "<c"
if i == len(tags)-1:
tags[i] = tags[i] + "t/>"
else:
tags[i] = tags[i] + "c/>"
return tags
def verify_disfluency_tags(tags, normalize_ID=False):
"""Check that the repair tags sequence is valid.
Keyword arguments:
normalize_ID -- boolean, whether to convert the repair ID
numbers to be derivable from their unique RPS position in the utterance.
"""
id_map = dict() # map between old ID and new ID
# in first pass get old and new IDs
for i in range(0, len(tags)):
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[i])
if rps:
id_map[rps[0][rps[0].find("=")+2:-3]] = str(i)
# key: old repair ID, value, list [reparandum,interregnum,repair]
# all True when repair is all there
repairs = defaultdict(list)
for r in id_map.keys():
repairs[r] = [None, None, None] # three valued None<False<True
# print(repairs)
# second pass verify the validity of the tags
# and (optionally) modify the IDs
for i in range(0, len(tags)): # iterate over all tag strings
new_tags = []
if tags[i] == "":
assert(all([repairs[ID][2] or
repairs[ID] == [None, None, None]
for ID in repairs.keys()])),\
"Unresolved repairs at fluent tag\n\t" + str(repairs)
for tag in get_tags(tags[i]): # iterate over all tags
# print(i)
# print(tag)
if tag == "<e/>":
new_tags.append(tag)
continue
ID = tag[tag.find("=")+2:-3]
if "<rms" in tag:
assert repairs[ID][0] == None,\
"reparandum started parsed more than once " + ID
assert repairs[ID][1] == None,\
"reparandum start again during interregnum phase " + ID
assert repairs[ID][2] == None,\
"reparandum start again during repair phase " + ID
repairs[ID][0] = False # set in progress
elif "<rm " in tag:
assert repairs[ID][0] != None,\
"mid reparandum tag before reparandum start " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a interregnum phase or beyond " + ID
assert repairs[ID][2] == None,\
"mid reparandum tag in a repair phase or beyond " + ID
elif "<i" in tag:
assert repairs[ID][0] != None,\
"interregnum start before reparandum start " + ID
assert repairs[ID][2] == None,\
"interregnum in a repair phase " + ID
if repairs[ID][1] == None: # interregnum not reached yet
repairs[ID][0] = True # reparandum completed
repairs[ID][1] = False # interregnum in progress
elif "<rps" in tag:
assert repairs[ID][0] != None,\
"repair start before reparandum start " + ID
assert repairs[ID][1] != True,\
"interregnum over before repair start " + ID
assert repairs[ID][2] == None,\
"repair start parsed twice " + ID
repairs[ID][0] = True # reparanudm complete
repairs[ID][1] = True # interregnum complete
repairs[ID][2] = False # repair in progress
elif "<rp " in tag:
assert repairs[ID][0] == True,\
"mid repair word start before reparandum end " + ID
assert repairs[ID][1] == True,\
"mid repair word start before interregnum end " + ID
assert repairs[ID][2] == False,\
"mid repair tag before repair start tag " + ID
elif "<rpn" in tag:
# make sure the rps is order in tag string is before
assert repairs[ID][0] == True,\
"repair end before reparandum end " + ID
assert repairs[ID][1] == True,\
"repair end before interregnum end " + ID
assert repairs[ID][2] == False,\
"repair end before repair start " + ID
repairs[ID][2] = True
# do the replacement of the tag's ID after checking
new_tags.append(tag.replace(ID, id_map[ID]))
if normalize_ID:
tags[i] = "".join(new_tags)
assert all([repairs[ID][2] for ID in repairs.keys()]),\
"Unresolved repairs:\n\t" + str(repairs)
def shuffle(lol, seed):
"""Shuffle inplace each list in the same order.
lol :: list of list as input
seed :: seed the shuffling
"""
for l in lol:
random.seed(seed)
random.shuffle(l)
def minibatch(l, bs):
"""Returns a list of minibatches of indexes
which size is equal to bs
border cases are treated as follow:
eg: [0,1,2,3] and bs = 3
will output:
[[0],[0,1],[0,1,2],[1,2,3]]
l :: list of word idxs
"""
out = [l[:i] for i in xrange(1, min(bs, len(l)+1))]
out += [l[i-bs:i] for i in xrange(bs, len(l)+1)]
assert len(l) == len(out)
return out
def indices_from_length(sentence_length, bs, start_index=0):
"""Return a list of indexes pairs (start/stop) for each word
max difference between start and stop equal to bs
border cases are treated as follow:
eg: sentenceLength=4 and bs = 3
will output:
[[0,0],[0,1],[0,2],[1,3]]
"""
l = map(lambda x: start_index+x, xrange(sentence_length))
out = []
for i in xrange(0, min(bs, len(l))):
out.append([l[0], l[i]])
for i in xrange(bs+1, len(l)+1):
out.append([l[i-bs], l[i-1]])
assert len(l) == sentence_length
return out
def context_win(l, win):
"""Return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
given a list of indexes composing a sentence.
win :: int corresponding to the size of the window
"""
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win/2 * [-1] + l + win/2 * [-1]
out = [lpadded[i:i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def context_win_backwards(l, win):
'''Same as contextwin except only backwards context
(i.e. like an n-gram model)
'''
assert win >= 1
l = list(l)
lpadded = (win-1) * [-1] + l
out = [lpadded[i: i+win] for i in range(len(l))]
assert len(out) == len(l)
return out
def corpus_to_indexed_matrix(my_array_list, win, bs, sentence=False):
"""Returns a matrix of contextwins for a list of utterances of
dimensions win * n_words_in_corpus
(i.e. total length of all arrays in my_array_list)
and corresponding matrix of indexes (of just start/stop for each one)
so 2 * n_words_in_corpus
of where to access these, using bs (backprop distance)
as the limiting history size
"""
sentences = [] # a list (of arrays, or lists?), returned as matrix
indices = [] # a list of index pairs (arrays?), returned as matrix
totalSize = 0
if sentence:
for sent in my_array_list:
mysent = np.asarray([-1] * (bs-1) + list(sent)) # padding with eos
# get list of context windows
mywords = context_win_backwards(mysent, win)
# just one per utterance for now..
cindices = [[totalSize, totalSize+len(mywords)-1]]
cwords = []
for i in range(bs, len(mywords)+1):
words = list(itertools.chain(*mywords[(i-bs):i]))
cwords.append(words) # always (bs * n) words long
# print cwords
sentences.extend(cwords)
indices.extend(cindices)
totalSize += len(cwords)
else:
for sentence in my_array_list:
# get list of context windows
cwords = context_win_backwards(sentence, win)
cindices = indices_from_length(len(cwords), bs, totalSize)
indices.extend(cindices)
sentences.extend(cwords)
totalSize += len(cwords)
for s in sentences:
if any([x is None for x in s]):
print(s)
return np.matrix(sentences, dtype='int32'), indices
def convert_from_eval_tags_to_inc_disfluency_tags(tags, words,
representation="disf1",
limit=8):
"""Conversion from disfluency tagged corpus with xml-style tags
as from STIR (https://bitbucket.org/julianhough/stir)
to the strictly left-to-right schemas as
described by Hough and Schlangen 2015 Interspeech paper,
which are used by RNN architectures at runtime.
Keyword arguments:
tags -- the STIR eval style disfluency tags
words -- the words in the utterance
representation -- the number corresponding to the type of tagging system
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
limit -- the limit on the distance back from the repair start
"""
repair_dict = defaultdict(list)
new_tags = []
# print("tags")
# print(tags)
# print('words')
# print(words)
for t in range(0, len(tags)):
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
tags[t] = tags[t].replace(TTO_tag, "")
if "dact" in representation:
m = re.search(r'<diact type="[^\s]*"/>', tags[t])
if m:
dact_tag = m.group(0)
tags[t] = tags[t].replace(dact_tag, "")
if "laugh" in representation:
m = re.search(r'<speechLaugh/>|<laughter/>', tags[t])
if m:
laughter_tag = m.group(0)
else:
laughter_tag = "<nolaughter/>"
tags[t] = tags[t].replace(laughter_tag, "")
current_tag = ""
if "<e/>" in tags[t] or "<i" in tags[t]:
current_tag = "<e/>" # TODO may make this an interregnum
if "<rms" in tags[t]:
rms = re.findall("<rms id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rms:
repairID = r[r.find("=")+2:-3]
repair_dict[repairID] = [t, 0]
if "<rps" in tags[t]:
rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[t], re.S)
for r in rps:
repairID = r[r.find("=")+2:-3]
# print('repairID')
# print(repairID)
# print(repair_dict.get(repairID))
# print(str(repairID)+str(tags)+str(words))
assert repair_dict.get(repairID), str(repairID)+str(tags)+str(words)
repair_dict[repairID][1] = t
dist = min(t-repair_dict[repairID][0], limit)
# adjust in case the reparandum is shortened due to the limit
repair_dict[repairID][0] = t-dist
current_tag += "<rm-{}/>".format(dist) + "<rpMid/>"
if "<rpn" in tags[t]:
rpns = re.findall("<rpnrep id\=\"[0-9]+\"\/>", tags[t], re.S) +\
re.findall("<rpnsub id\=\"[0-9]+\"\/>", tags[t], re.S)
rpns_del = re.findall("<rpndel id\=\"[0-9]+\"\/>", tags[t], re.S)
# slight simplifying assumption is to take the repair with
# the longest reparandum as the end category
repair_type = ""
longestlength = 0
for r in rpns:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Sub"
for r in rpns_del:
repairID = r[r.find("=")+2:-3]
l = repair_dict[repairID]
if l[1]-l[0] > longestlength:
longestlength = l[1]-l[0]
repair_type = "Del"
if repair_type == "":
raise Exception("Repair not passed \
correctly."+str(words)+str(tags))
current_tag += "<rpEnd"+repair_type+"/>"
current_tag = current_tag.replace("<rpMid/>", "")
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
if "dact" in representation:
current_tag += dact_tag
if "laugh" in representation:
current_tag += laughter_tag
new_tags.append(current_tag)
return new_tags
def convert_from_inc_disfluency_tags_to_eval_tags(
tags, words,
start=0,
representation="disf1_uttseg"):
"""Converts the incremental style output tags of the RNN to the standard
STIR eval output tags.
The exact inverse of convertFromEvalTagsToIncrementalDisfluencyTags.
Keyword arguments:
tags -- the RNN style disfluency tags
words -- the words in the utterance
start -- position from where to begin changing the tags from
representation -- the number corresponding to the type of tagging system,
1=standard, 2=rm-N values where N does not count intervening edit terms
3=same as 2 but with a 'c' tag after edit terms have ended.
"""
# maps from the repair ID to a list of
# [reparandumStart,repairStart,repairOver]
repair_dict = defaultdict(list)
new_tags = []
if start > 0:
# assuming the tags up to this point are already converted
new_tags = tags[:start]
if "mid" not in representation:
rps_s = re.findall("<rps id\=\"[0-9]+\"\/>", tags[start-1])
rpmid = re.findall("<rp id\=\"[0-9]+\"\/>", tags[start-1])
if rps_s:
for r in rps_s:
repairID = r[r.find("=")+2:-3]
resolved_repair = re.findall(
"<rpn[repsubdl]+ id\=\"{}\"\/>"
.format(repairID), tags[start-1])
if not resolved_repair:
if not rpmid:
rpmid = []
rpmid.append(r.replace("rps ", "rp "))
if rpmid:
newstart = start-1
for rp in rpmid:
rps = rp.replace("rp ", "rps ")
repairID = rp[rp.find("=")+2:-3]
# go back and find the repair
for b in range(newstart, -1, -1):
if rps in tags[b]:
repair_dict[repairID] = [b, b, False]
break
for t in range(start, len(tags)):
current_tag = ""
if "uttseg" in representation:
m = re.search(r'<[ct]*/>', tags[t])
if m:
TTO_tag = m.group(0)
if "<e/>" in tags[t] or "<i/>" in tags[t]:
current_tag = "<e/>"
if "<rm-" in tags[t]:
rps = re.findall("<rm-[0-9]+\/>", tags[t], re.S)
for r in rps: # should only be one
current_tag += '<rps id="{}"/>'.format(t)
# print t-dist
if "simple" in representation:
# simply tagging the rps
pass
else:
dist = int(r[r.find("-")+1:-2])
repair_dict[str(t)] = [max([0, t-dist]), t, False]
# backwards looking search if full set
# print new_tags, t, dist, t-dist, max([0, t-dist])
# print tags[:t+1]
rms_start_idx = max([0, t-dist])
new_tags[rms_start_idx] = '<rms id="{}"/>'\
.format(t) + new_tags[rms_start_idx]\
.replace("<f/>", "")
reparandum = False # interregnum if edit term
for b in range(t-1, max([0, t-dist]), -1):
if "<e" not in new_tags[b]:
reparandum = True
new_tags[b] = '<rm id="{}"/>'.format(t) +\
new_tags[b].replace("<f/>", "")
if reparandum is False and "<e" in new_tags[b]:
new_tags[b] = '<i id="{}"/>'.\
format(t) + new_tags[b]
# repair ends
if "<rpEnd" in tags[t]:
rpns = re.findall("<rpEndSub/>", tags[t], re.S)
rpns_del = re.findall("<rpEndDel/>", tags[t], re.S)
rpnAll = rpns + rpns_del
if rpnAll:
for k, v in repair_dict.items():
if t >= int(k) and v[2] is False:
repair_dict[k][2] = True
# classify the repair
if rpns_del: # a delete
current_tag += '<rpndel id="{}"/>'.format(k)
rpns_del.pop(0)
continue
reparandum = [words[i] for i in range(0, len(new_tags))
if '<rms id="{}"/>'.
format(k) in new_tags[i] or
'<rm id="{}"/>'.
format(k) in new_tags[i]]
repair = [words[i] for i in range(0, len(new_tags))
if '<rps id="{}"/>'.format(k)
in new_tags[i] or '<rp id="{}"/>'.format(k)
in new_tags[i]] + [words[t]]
if reparandum == repair:
current_tag += '<rpnrep id="{}"/>'.format(k)
else:
current_tag += '<rpnsub id="{}"/>'.format(k)
# mid repair phases still in progress
for k, v in repair_dict.items():
if t > int(k) and v[2] is False:
current_tag += '<rp id="{}"/>'.format(k)
if current_tag == "":
current_tag = "<f/>"
if "uttseg" in representation:
current_tag += TTO_tag
new_tags.append(current_tag)
return new_tags
def verify_dialogue_data_matrix(dialogue_data_matrix, word_dict=None,
pos_dict=None, tag_dict=None, n_lm=0,
n_acoustic=0):
"""Boolean check of whether dialogue data consistent
with args. Checks all idxs are valid and number of features is correct.
Standard form of each row of the matrix should be:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
"""
l = 3 + n_acoustic + n_lm + 1 # row length
try:
for i, row in enumerate(dialogue_data_matrix):
assert len(row) == l,\
"row {} wrong length {}, should be {}".format(i, len(row), l)
assert word_dict[row[1]] is not None,\
"row[1][{}] {} not in word dict".format(i, row[1])
assert pos_dict[row[2]] is not None,\
"row[2][{}] {} not in POS dict".format(i, row[2])
assert tag_dict[row[-1]] is not None,\
"row[-1][{}] {} not in tag dict".format(i, row[-1])
except AssertionError as a:
print(a)
return False
return True
def verify_dialogue_data_matrices_from_folder(matrices_folder_filepath,
word_dict=None,
pos_dict=None,
tag_dict=None,
n_lm=0,
n_acoustic=0):
"""A boolean check that the dialogue matrices make sense for the
particular configuration in args and tag2idx dicts.
"""
for dialogue_file in os.listdir(matrices_folder_filepath):
v = np.load(matrices_folder_filepath + "/" + dialogue_file,allow_pickle=True)
if not verify_dialogue_data_matrix(v,
word_dict=word_dict,
pos_dict=pos_dict,
tag_dict=tag_dict,
n_lm=n_lm,
n_acoustic=n_acoustic):
# print"{} failed test".format(dialogue_file)
return False
return True
def dialogue_data_and_indices_from_matrix(d_matrix,
n_extra,
pre_seg=False,
window_size=2,
bs=9,
tag_rep="disf1_uttseg",
tag_to_idx_map=None,
in_utterances=False):
"""Transforming from input format of row:
utt_index, word_idx, pos_idx, word_duration,
acoustic_feats.., lm_feats....,label
to 5-tuple of:
word_idx, pos_idx, extra, labels, indices
where :word_idx: and :pos_idx: have the correct window context
according to @window_size
and :indices: is the start and stop points for consumption by the
net in training for each label in :labels:. :extra: is the matrix
of extra features.
"""
if len(d_matrix)==0:
return
utt_indices = d_matrix[:, 0]
words = d_matrix[:, 1]
pos = d_matrix[:, 2]
extra = None if n_extra == 0 else d_matrix[:, 3: -1]
labels = d_matrix[:, -1]
word_idx = []
pos_idx = []
current = []
indices = []
previous_idx = -1
for i, a_tuple in enumerate(zip(utt_indices, words, pos, labels)):
utt_idx, w, p, l = a_tuple
# print(w)
current.append((w, p, l))
if pre_seg:
if previous_idx != utt_idx or i == len(labels)-1:
if in_utterances:
start = 0 if indices == [] else indices[-1][1]+1
indices.append([start, start + (len(current)-1)])
else:
indices.extend(indices_from_length(len(current), bs,
start_index=len(indices)))
word_idx.extend(context_win_backwards([x[0] for x in current],
window_size))
pos_idx.extend(context_win_backwards([x[1] for x in current],
window_size))
current = []
# print('final')
# print(w)
# print(word_idx)
elif i == len(labels)-1:
# indices = indices_from_length(len(current), bs)
# currently a simple window of same size
indices = [[j, j + bs] for j in range(0, len(current))]
padding = [[-1, -1]] * (bs - window_size)
word_idx = padding + context_win_backwards([x[0] for x in current],
window_size)
pos_idx = padding + context_win_backwards([x[1] for x in current],
window_size)
previous_idx = utt_idx
# print(pos_idx)
# print(word_idx)
# print(extra)
# print(labels)
# print(indices)
# return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
# dtype=np.int32),\
# labels,\
# np.asarray(indices, dtype=np.int32)
return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx,
dtype=np.int32),\
extra,\
labels,\
np.asarray(indices, dtype=np.int32)
if __name__ == '__main__':
tags = '<f/>,<rms id="3"/>,<i id="3"/><e/>,<rps id="3"/>' +\
'<rpnsub id="3"/>,<f/>,<e/>,<f/>,' + \
'<f/>'
tags = tags.split(",")
words = "i,like,uh,love,to,uh,love,alot".split(",")
# print(tags)
# print(len(tags))
# print(len(words))
new_tags = convert_from_eval_tags_to_inc_disfluency_tags(
tags,
words,
representation="disf1")
# print(new_tags)
old_tags = convert_from_inc_disfluency_tags_to_eval_tags(
new_tags,
words,
representation="disf1")
assert old_tags == tags, "\n " + str(old_tags) + "\n" + str(tags)
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# print(context_win_backwards(x, 2))
# print "indices", indices_from_length(11, 9)
| 40.634146 | 85 | 0.49541 |
4093599c05b72acbbffdbf768053f07822e78e65 | 124 | py | Python | library/favourite/api/pagination.py | furkan-34/library-DRF-django-api | 3634133b7c543d6d05845dd8fa1f206386c1badb | [
"MIT"
] | null | null | null | library/favourite/api/pagination.py | furkan-34/library-DRF-django-api | 3634133b7c543d6d05845dd8fa1f206386c1badb | [
"MIT"
] | null | null | null | library/favourite/api/pagination.py | furkan-34/library-DRF-django-api | 3634133b7c543d6d05845dd8fa1f206386c1badb | [
"MIT"
] | null | null | null | from rest_framework.pagination import PageNumberPagination | 31 | 58 | 0.870968 |
40946ed59b952cc97c649459f7de1a75d4265832 | 564 | py | Python | Python-Math/Python-Math/check_prime.py | rgabeflores/Scripts | c8138cb4543e576924de2107abb5a65f0b84264c | [
"MIT"
] | 2 | 2018-05-12T10:58:51.000Z | 2021-11-16T11:52:27.000Z | src/Python-Math/check_prime.py | learn-py/Posts | da394236db0a52c93ca1c0374ad121b263555272 | [
"MIT"
] | null | null | null | src/Python-Math/check_prime.py | learn-py/Posts | da394236db0a52c93ca1c0374ad121b263555272 | [
"MIT"
] | null | null | null | '''
@author Gabriel Flores
Checks the primality of an integer.
'''
def is_prime(x):
'''
Checks the primality of an integer.
'''
sqrt = int(x ** (1/2))
for i in range(2, sqrt, 1):
if x % i == 0:
return False
return True
if __name__ == "__main__":
main() | 18.8 | 66 | 0.592199 |
4095239ac8155507cd8501376f1d1a88028e9392 | 1,580 | py | Python | src/contrib/cortex-strings/scripts/plot-top.py | lastweek/source-freebsd | 0821950b0c40cbc891a27964b342e0202a3859ec | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | src/contrib/cortex-strings/scripts/plot-top.py | lastweek/source-freebsd | 0821950b0c40cbc891a27964b342e0202a3859ec | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | src/contrib/cortex-strings/scripts/plot-top.py | lastweek/source-freebsd | 0821950b0c40cbc891a27964b342e0202a3859ec | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | #!/usr/bin/env python
"""Plot the performance of different variants of the string routines
for one size.
"""
import libplot
import pylab
if __name__ == '__main__':
main()
| 25.483871 | 112 | 0.61519 |
40958b5deb96439390eb8a34bb5ed7d5f2983d33 | 3,292 | py | Python | part1.py | aspiringguru/python_sqlite_demo | 01422c69493b7301f66dee5a0c99e358aec9746b | [
"MIT"
] | null | null | null | part1.py | aspiringguru/python_sqlite_demo | 01422c69493b7301f66dee5a0c99e358aec9746b | [
"MIT"
] | null | null | null | part1.py | aspiringguru/python_sqlite_demo | 01422c69493b7301f66dee5a0c99e358aec9746b | [
"MIT"
] | null | null | null | import sqlite3
import time, datetime, random
import matplotlib
matplotlib.use("Agg")
#added due to error, possibly due to install configuration
import matplotlib.pyplot as plt
print(matplotlib.get_backend())
import matplotlib.dates as mdates
from matplotlib import style
style.use('fivethirtyeight')
conn = sqlite3.connect("part1.db")
c = conn.cursor()
def select_all_tasks(c):
"""
Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
c.execute("SELECT * FROM stufftoplot")
rows = c.fetchall()
for row in rows:
print(row)
create_table()
#data_entry()
#data_insert(1111, "2016-01-02", "more keywords", 1)
#data_insert(2222, "2016-01-03", "less keywords", 2)
#dynamic_data_entry()
# time.sleep(1)
#select_all_tasks(c)
#read_from_db()
#graph_data()
create_n_rows(10)
del_and_update()
c.close()
conn.close()
| 25.92126 | 124 | 0.65401 |
40964229b92108c25937fb12522f648ac39e3e91 | 42,098 | py | Python | tests/test_oic_consumer.py | infohash/pyoidc | 62c7318e68c22b7933100d1c06ecc0c78f77f0d9 | [
"Apache-2.0"
] | null | null | null | tests/test_oic_consumer.py | infohash/pyoidc | 62c7318e68c22b7933100d1c06ecc0c78f77f0d9 | [
"Apache-2.0"
] | null | null | null | tests/test_oic_consumer.py | infohash/pyoidc | 62c7318e68c22b7933100d1c06ecc0c78f77f0d9 | [
"Apache-2.0"
] | null | null | null | import json
import os
from urllib.parse import parse_qs
from urllib.parse import urlparse
import pytest
import responses
from freezegun import freeze_time
from jwkest import BadSignature
from jwkest.jwk import SYMKey
from oic.oauth2.message import MissingSigningKey
from oic.oauth2.message import WrongSigningAlgorithm
from oic.oic import DEF_SIGN_ALG
from oic.oic import Server
from oic.oic import response_types_to_grant_types
from oic.oic.consumer import IGNORE
from oic.oic.consumer import Consumer
from oic.oic.consumer import clean_response
from oic.oic.message import AccessTokenRequest
from oic.oic.message import AccessTokenResponse
from oic.oic.message import AuthorizationResponse
from oic.oic.message import IdToken
from oic.oic.message import OpenIDSchema
from oic.oic.message import ProviderConfigurationResponse
from oic.oic.message import RegistrationResponse
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.utils.keyio import KeyBundle
from oic.utils.keyio import KeyJar
from oic.utils.keyio import keybundle_from_local_file
from oic.utils.sdb import DictSessionBackend
from oic.utils.sdb import session_get
from oic.utils.time_util import utc_time_sans_frac
__author__ = "rohe0002"
KC_SYM_VS = KeyBundle({"kty": "oct", "key": "abcdefghijklmnop", "use": "ver"})
KC_SYM_S = KeyBundle({"kty": "oct", "key": "abcdefghijklmnop", "use": "sig"})
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/keys"))
KC_RSA = keybundle_from_local_file(
os.path.join(BASE_PATH, "rsa.key"), "rsa", ["ver", "sig"]
)
SRVKEYS = KeyJar()
SRVKEYS[""] = [KC_RSA]
SRVKEYS["client_1"] = [KC_SYM_VS, KC_RSA]
CLIKEYS = KeyJar()
CLIKEYS["http://localhost:8088"] = [KC_RSA]
CLIKEYS[""] = [KC_RSA, KC_SYM_VS]
CLIKEYS["https://example.com"] = [KC_RSA]
SERVER_INFO = {
"version": "3.0",
"issuer": "https://localhost:8088",
"authorization_endpoint": "http://localhost:8088/authorization",
"token_endpoint": "http://localhost:8088/token",
"userinfo_endpoint": "http://localhost:8088/userinfo",
"flows_supported": ["code", "token"],
}
CONFIG = {
"authz_page": "authz",
"scope": ["openid"],
"response_type": "code",
"request_method": "parameter",
"password": "hemligt",
"max_age": 3600,
"user_info": {"name": None},
}
| 35.585799 | 114 | 0.551071 |
409660d0cd505763586410c6b2b0e9f378f6b60a | 2,338 | py | Python | setup.py | CristianPachacama/cartoframes | 3dc4e10d175069a7d7b734db3d9526127aad9dec | [
"BSD-3-Clause"
] | 1 | 2020-11-23T23:44:32.000Z | 2020-11-23T23:44:32.000Z | setup.py | CristianPachacama/cartoframes | 3dc4e10d175069a7d7b734db3d9526127aad9dec | [
"BSD-3-Clause"
] | null | null | null | setup.py | CristianPachacama/cartoframes | 3dc4e10d175069a7d7b734db3d9526127aad9dec | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import io
from codecs import open
from setuptools import setup, find_packages
REQUIRES = [
'appdirs>=1.4.3,<2.0',
'carto>=1.6.0,<2.0',
'jinja2>=2.10.1,<3.0',
'pandas>=0.24.2<1.0',
'shapely>=1.6.4,<2.0',
'tqdm>=4.32.1,<5.0',
'unidecode>=1.1.0,<2.0',
'webcolors>=1.9.1,<2.0'
]
PACKAGE_DATA = {
'': [
'LICENSE',
'CONTRIBUTORS',
],
'cartoframes': [
'assets/*',
'assets/*.j2'
] + walk_subpkg('assets'),
}
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
with open(os.path.join(here, 'cartoframes', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=long_description,
url=about['__url__'],
author=about['__author__'],
author_email=about['__email__'],
license=about['__license__'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords='carto data science maps spatial pandas',
packages=find_packages(),
install_requires=REQUIRES,
python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
include_package_data=True,
package_dir={'cartoframes': 'cartoframes'},
package_data=PACKAGE_DATA,
)
| 28.168675 | 82 | 0.597092 |
40967bb7a1cadc5b1fb7c5cf4f834747f6a13132 | 1,026 | py | Python | Tests/test_BioSQL_mysql_connector_online.py | bioinf-mcb/biopython | 1a1f4a7ee4e0efba517d3d607c56c27e72e399cc | [
"BSD-3-Clause"
] | 2 | 2019-10-25T18:20:34.000Z | 2019-10-28T15:26:40.000Z | Tests/test_BioSQL_mysql_connector_online.py | cosign070128/biopython | 2f02e34ba76306e9c27eec9e051809bec2cece9b | [
"BSD-3-Clause"
] | 9 | 2020-05-05T00:54:23.000Z | 2020-06-09T17:10:45.000Z | Tests/test_BioSQL_mysql_connector_online.py | cosign070128/biopython | 2f02e34ba76306e9c27eec9e051809bec2cece9b | [
"BSD-3-Clause"
] | 3 | 2020-06-29T13:07:46.000Z | 2021-06-14T20:11:55.000Z | #!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Run BioSQL tests using MySQL."""
import unittest
# Really do want "import *" to get all the test clases:
from common_BioSQL import * # noqa: F403
from common_BioSQL_online import * # noqa: F403
# Import these explicitly to avoid flake8 F405 below:
from common_BioSQL import load_biosql_ini, check_config
from common_BioSQL_online import share_config
import requires_internet
requires_internet.check()
DBDRIVER = "mysql.connector"
DBTYPE = "mysql"
DBHOST, DBUSER, DBPASSWD, TESTDB = load_biosql_ini(DBTYPE)
# This will abort if driver not installed etc:
check_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
share_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB)
if __name__ == "__main__":
# Run the test cases
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 30.176471 | 70 | 0.770955 |
4096eff81f78a7602d75dd243df5e2e64ac51f0d | 429 | py | Python | kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 7a46be7e75bad0500914e5a7c44662c6740ebaa2 | [
"MIT"
] | null | null | null | kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 7a46be7e75bad0500914e5a7c44662c6740ebaa2 | [
"MIT"
] | 3 | 2021-03-19T01:19:04.000Z | 2021-06-04T22:44:35.000Z | kalachakra/saraswati/migrations/0004_ritual_people_name.py | tony-mikhailov/Kalachakra | 7a46be7e75bad0500914e5a7c44662c6740ebaa2 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2020-04-05 07:50
from django.db import migrations, models
| 22.578947 | 88 | 0.617716 |
40997199af5c3427ea68e5bd37b9d827653408fe | 14,709 | py | Python | src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 3a81f1114ec7f347e6e7bfd861073d897a9188ec | [
"Apache-2.0"
] | null | null | null | src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 3a81f1114ec7f347e6e7bfd861073d897a9188ec | [
"Apache-2.0"
] | null | null | null | src/toil/jobStores/abstractJobStore.py | adamnovak/toil | 3a81f1114ec7f347e6e7bfd861073d897a9188ec | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import re
try:
import cPickle
except ImportError:
import pickle as cPickle
##########################################
#The following methods deal with creating/loading/updating/writing/checking for the
#existence of jobs
##########################################
def jobs(self):
"""
Returns iterator on the jobs in the store.
:rtype : iterator
"""
raise NotImplementedError( )
##########################################
#The following provide an way of creating/reading/writing/updating files
#associated with a given job.
##########################################
##########################################
#The following methods deal with shared files, i.e. files not associated
#with specific jobs.
##########################################
sharedFileNameRegex = re.compile( r'^[a-zA-Z0-9._-]+$' )
# FIXME: Rename to updateSharedFileStream
## Helper methods for subclasses
def _defaultTryCount( self ):
return int( self.config.retryCount+1 )
| 37.143939 | 102 | 0.615066 |
409a342355b661973139a052737ed840078d30d8 | 9,819 | py | Python | dashboard.py | TheCrypticMusic/COVID-19 | b813d6abeb8031f1165ad2981f14bfd75853e083 | [
"MIT"
] | null | null | null | dashboard.py | TheCrypticMusic/COVID-19 | b813d6abeb8031f1165ad2981f14bfd75853e083 | [
"MIT"
] | null | null | null | dashboard.py | TheCrypticMusic/COVID-19 | b813d6abeb8031f1165ad2981f14bfd75853e083 | [
"MIT"
] | null | null | null | from datetime import date
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.express as px
from dash.dependencies import Input, Output
test_data = pd.read_csv("data/world_data.csv")
today = date.today()
external_stylesheets = [dbc.themes.BOOTSTRAP]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.title = "COVID Dashboard - UK Edition"
app.layout = html.Div([
html.Nav(className="navbar navbar-dark fixed-top bg-dark flex-md-nowrap p-0 shadow", children=[
html.A(className="navbar-brand col-sm-3 col-md-2 mr-0", children="COVID-19"),
# dcc.DatePickerRange(className="date-and-location",
# id="month-picker",
# min_date_allowed=date(2020, 1, 30),
# max_date_allowed=date(today.year, today.month, today.day),
# start_date=date(2020, 3, 1),
# end_date=date(today.year, today.month, today.day),
# style={"height": "50%"}
# ),
]),
html.Div(className="container-fluid", children=[
html.Div(className="row", children=[
html.Nav(className="col-md-2 d-none d-md-block bg-light sidebar", children=[
html.Div(className="sidebar-sticky", children=[
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Custom Search"),
]),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("User Search", href="/home"),
])]),
html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[
html.Span("Preset Search"),
]),
dcc.Location(id="url", refresh=False),
html.Ul(className="nav flex-column", children=[
html.Li(className="nav-item", children=[
dcc.Link("Africa", href="/africa"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Asia", href="/asia"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Europe", href="/europe"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("North America", href="/northamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("South America", href="/southamerica"),
html.Span(className="sr-only"),
]),
html.Li(className="nav-item", children=[
dcc.Link("Oceania", href="/oceania"),
html.Span(className="sr-only"),
]),
]),
html.Div(id='page-content'),
html.Ul(className="nav flex-column mb-2")
]),
]),
html.Main(role="main", className="col-md-9 ml-sm-auto col-lg-10 px-4", children=[
html.Div(className="chartjs-size-monitor", style={"position": "absolute", "left": "0px", "top": "0px", "right": "0px", "bottom": "0px", "overflow": "hidden", "pointer-events": "none", "visibility": "hidden", "z-index": "-1"}),
html.Div(className="box-shadow", children=[
]),
dbc.Row(
[
dbc.Col(children=[
html.H1(children="Deaths"),
html.Hr(className="lead"),
html.Div(id="death-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Cases"),
html.Hr(className="lead"),
html.Div(id="cases-stats", children="######"),
]),
dbc.Col(children=[
html.H1(children="Vaccines"),
html.Hr(className="lead"),
html.Div(id="vaccines-stats", children="######"),
]),
]
),
html.Div(className="graphs", children=[
dcc.Graph(
id="cases-graph"
),
dcc.Graph(
id="deaths-graph",
),
]),
])])])])
if __name__ == "__main__":
app.run_server(debug=True, dev_tools_ui=False)
| 51.952381 | 250 | 0.440778 |
409ac3a28f63c2603ac7a86d7009827a8fa89371 | 979 | py | Python | dataset/load_data_queue.py | hezhujun/autofocus-rnn | dd21ec5cfce07990172048b74e5fc8e3d5b55229 | [
"MIT"
] | 7 | 2020-08-19T01:32:34.000Z | 2021-12-06T07:31:32.000Z | dataset/load_data_queue.py | hezhujun/autofocus-rnn | dd21ec5cfce07990172048b74e5fc8e3d5b55229 | [
"MIT"
] | 2 | 2021-01-28T07:35:45.000Z | 2021-06-20T14:19:01.000Z | dataset/load_data_queue.py | hezhujun/autofocus-rnn | dd21ec5cfce07990172048b74e5fc8e3d5b55229 | [
"MIT"
] | null | null | null | from collections import OrderedDict
import skimage.io as io
from config import get_config
config = get_config()
_cache = LRUCache(config["data_queue_len"])
| 23.309524 | 63 | 0.6476 |
409ad3c2aaa2132563a0928975965afc50081365 | 1,852 | py | Python | algs/astar.py | jakedolan443/search-algorithm-visualizer | 331c22886ef8017add16bc63a8e75df9643f4fe9 | [
"MIT"
] | null | null | null | algs/astar.py | jakedolan443/search-algorithm-visualizer | 331c22886ef8017add16bc63a8e75df9643f4fe9 | [
"MIT"
] | null | null | null | algs/astar.py | jakedolan443/search-algorithm-visualizer | 331c22886ef8017add16bc63a8e75df9643f4fe9 | [
"MIT"
] | null | null | null | import numpy
from heapq import *
import time
| 30.360656 | 100 | 0.532937 |
409bc944bcc8474410d41d3c5ed935bde146869f | 1,119 | py | Python | examples/serial_client.py | marcinbor85/qupy | 219563523c975d1d5ae2aa47bbd02862c906ab43 | [
"MIT"
] | null | null | null | examples/serial_client.py | marcinbor85/qupy | 219563523c975d1d5ae2aa47bbd02862c906ab43 | [
"MIT"
] | null | null | null | examples/serial_client.py | marcinbor85/qupy | 219563523c975d1d5ae2aa47bbd02862c906ab43 | [
"MIT"
] | null | null | null | import logging
import time
from qupy.framing.slip import Slip
from qupy.interface.serial import SerialPort
from qupy.interface.errors import InterfaceTimeoutError, InterfaceIOError, InterfaceError
from qupy.comm.client import CommClient
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
s = SerialPort()
f = Slip()
c = CommClient(s, f)
connect = True
while True:
if connect:
try:
s.open()
except InterfaceIOError as e:
time.sleep(1.0)
continue
c.start()
connect = False
try:
print('ask...')
data = input()
d = c.ask(data.encode('utf-8'))
print('data:',d)
if len(d) > 0 and d[0] == ord('p'):
break
except InterfaceIOError as e:
print('ask io error', str(e))
c.stop()
s.close()
connect = True
except InterfaceTimeoutError as e:
print('timeout')
c.stop()
s.close()
| 22.836735 | 89 | 0.513852 |
409c909153fb2318680014346b00ba060e9d1ace | 699 | py | Python | summary/abs_summarization.py | solarpark7346/sukjulyo | 52caaa7f49294898b055062d7c0b2cb5c5393c24 | [
"MIT"
] | null | null | null | summary/abs_summarization.py | solarpark7346/sukjulyo | 52caaa7f49294898b055062d7c0b2cb5c5393c24 | [
"MIT"
] | null | null | null | summary/abs_summarization.py | solarpark7346/sukjulyo | 52caaa7f49294898b055062d7c0b2cb5c5393c24 | [
"MIT"
] | 3 | 2021-10-31T08:23:44.000Z | 2022-01-13T03:59:22.000Z | import torch
from transformers import PreTrainedTokenizerFast
from transformers import BartForConditionalGeneration
abs_summary = AbsSummarization() | 38.833333 | 91 | 0.815451 |
409d329c8dc7ebfbbdbfdb66ef4f8976ba9ec528 | 12,413 | py | Python | dp_tornado/helper/io/image/__init__.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 18 | 2015-04-07T14:28:39.000Z | 2020-02-08T14:03:38.000Z | dp_tornado/helper/io/image/__init__.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 7 | 2016-10-05T05:14:06.000Z | 2021-05-20T02:07:22.000Z | dp_tornado/helper/io/image/__init__.py | donghak-shin/dp-tornado | 095bb293661af35cce5f917d8a2228d273489496 | [
"MIT"
] | 11 | 2015-12-15T09:49:39.000Z | 2021-09-06T18:38:21.000Z | # -*- coding: utf-8 -*-
import tempfile
from dp_tornado.engine.helper import Helper as dpHelper
| 30.573892 | 118 | 0.512688 |
409e06685c9ecbd99f82a4b27833a85d0c5a9b1e | 4,385 | py | Python | script.py | triethyl/wbut-results-parsed | 9ca8f5dd6afab1eb2b0436093b3a20e6e07f923d | [
"MIT"
] | 1 | 2019-02-28T05:32:51.000Z | 2019-02-28T05:32:51.000Z | script.py | triethyl/wbut-results-parsed | 9ca8f5dd6afab1eb2b0436093b3a20e6e07f923d | [
"MIT"
] | null | null | null | script.py | triethyl/wbut-results-parsed | 9ca8f5dd6afab1eb2b0436093b3a20e6e07f923d | [
"MIT"
] | 2 | 2019-03-15T19:40:17.000Z | 2019-05-24T17:15:59.000Z | import requests
from bs4 import BeautifulSoup
import json
import re
# Range of Roll Number - User Input
start_roll = int(input("Starting Roll Number: "))
end_roll = int(input("Ending Roll Number: "))
# Semester - User Input
sem = int(input("Which Semester[1-8]: "))
# Verbosity
verbose = int(input("Verbosity Level (1 for just data, 2 for detailed data): "))
# Roll Number Tuple
roll_tuple = tuple(range(start_roll, end_roll+1))
# Getting the Websites
result_url = 'https://makaut.ucanapply.com/smartexam/public/result-details'
get_result_details = 'https://makaut.ucanapply.com/smartexam/public//get-result-details'
# Semester Codes
semcode = ('SM01', 'SM02', 'SM03', 'SM04', 'SM05', 'SM06', 'SM07', 'SM08')
if verbose == 1:
# Disply most recent
for roll in roll_tuple:
data = get_marks_of(roll, sem)
try:
print(f"({data['name']}, {data['sgpa_odd' if sem%2!=0 else 'sgpa_even']})")
except:
pass
elif verbose == 2:
for roll in roll_tuple:
print_marks_properly(roll, sem)
else:
print("[!] Verbosity Level Wrong!")
| 35.650407 | 140 | 0.575143 |
409f7a2dc9434e9656e7bedb75a00b02b076a630 | 1,411 | py | Python | cartoonify.py | adl1995/image-processing-filters | 850e4a6e23ef0f3843cc306cf1e42569f705f07e | [
"MIT"
] | null | null | null | cartoonify.py | adl1995/image-processing-filters | 850e4a6e23ef0f3843cc306cf1e42569f705f07e | [
"MIT"
] | null | null | null | cartoonify.py | adl1995/image-processing-filters | 850e4a6e23ef0f3843cc306cf1e42569f705f07e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__author__ = "Adeel Ahmad"
__email__ = "adeelahmad14@hotmail.com"
__status__ = "Production"
import matplotlib.pyplot as plt
import numpy as np
import skimage as ski
import Image
def cartoonify(im, display=False):
"""
function receives an image and add its gradient magnitude in it and add it
to the original image to return a semi-cartoon image.
Note: You will have to scale the gradient-magnitue image
before adding it back to the input image.
Input:
im: input image to cartoonify
display: whether to display image or not...
NOTE: This function expects a gaussian filtered image
"""
kernel, kern_size = np.array([[-1,-1,-1] ,[0,0,0] ,[1,1,1]]), 3
gx, gy = np.zeros_like(im, dtype=float), np.zeros_like(im, dtype=float)
for i in range(im.shape[0] - (kern_size-1)):
for j in range(im.shape[1] - (kern_size-1)):
window = im[i:i + kern_size, j:j + kern_size]
gx[i,j], gy[i,j] = np.sum(window * kernel.T), np.sum(window * kernel)
magnitude = np.sqrt(gx**2 + gy**2)
magnitude = magnitude.astype(np.int64, copy=False)
cartoon = im + (im + magnitude)
if display == 1:
plt.imshow(cartoon, cmap='gray')
plt.suptitle('Cartoon')
plt.show()
return cartoon
| 31.355556 | 255 | 0.59674 |
40a00c80a3cc741480575d8150f065c48c9b4231 | 4,341 | py | Python | keymapper/__init__.py | rburns629/KeyMapper | ba1f463bdfa7710f3b9487974874db9424632d85 | [
"MIT"
] | null | null | null | keymapper/__init__.py | rburns629/KeyMapper | ba1f463bdfa7710f3b9487974874db9424632d85 | [
"MIT"
] | null | null | null | keymapper/__init__.py | rburns629/KeyMapper | ba1f463bdfa7710f3b9487974874db9424632d85 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import json
import re
| 33.651163 | 135 | 0.476618 |
40a02814845a829728726e29b79dfead7feb2132 | 3,401 | py | Python | PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
] | null | null | null | PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
] | null | null | null | PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | f7a4215d4615b3f8460c3a1944a585628cf6930d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Demo37_PythonforDataScience.ipynb
# PYTHON FOR DATA SCIENCE
We will take our python programming skills a step further and process large data in it. Python is an excellent language for deployment. Hence we will be using open source data during the learning process!!
This will make sure we understand the challenges a Data Scientist can face and how to deal with them. In my experience, Data Preprocessing takes 70% of the time in any project. Hence it is crucial for any Data Scientist to know what it is and how it is done.
This may be the boring portion of the course but I assure you, you will feel accomplished by the end of this tutorial.
- Python Basics
- Object Oriented Python
- **Python for Data Science**
- NumPy
- Pandas
- Plotting
- Matplotlib
- Seaborn
Let's get coding !!
"""
#Variables can not start with a number
12var = 1
_13var = 1
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {} {} and I am {} years old.".format(name, surname, age))
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {_1} {_2} and I am {_3} years old.".format(_1 = name, _2= surname, _3 = age))
"""### INDEXING AND SLICING
One of the most important Python concept for data scientists is the slicing operator ':'
"""
str = "ONE TWO THREE FOUR FIVE"
print(str[0])
print(str[5])
print(str[len(str)-1])
str[:5]
str[5:]
str[1]="a"
nested = [1,2,3,['_1','_2','_3',['__1']]]
nested[0]
nested[3][0]
len(nested)
len(nested[3])
nested[3][3]
nested[3][3][0]
dict = {'key1':'value1', \
'key2': 'value2', \
'key3':'value3'}
dict['key1']
T = True
F = False
var = 10
for i in range(var):
print(i)
for i in range(var):
bool = (i==2)
if bool:
break
print(i)
[1,2,3,1,1,2,3,4]
(1,2,3,1,1,2,3,4)
{1,2,3,1,1,2,3,4}
new_set = set([1,2,3,1,1,2,3,4])
new_set.add(5)
new_set
for item in new_set:
print(item)
list(range(4))
my_list = list(range(5,10))
output = []
for number in my_list:
output.append(number**3)
output
output = [num**3 for num in my_list]
output
"""### FUNCTIONS"""
my_function("Jalebi (Hungry okay?)")
my_function()
num = 4
change(num)
num
num = 4
change(num)
num
num = [4]
change(num)
num
my_list
"""### LAMBDA EXPRESSIONS"""
list(map(square, my_list))
list(map(lambda x:x*x, my_list))
"""### BUILT-IN FUNCTIONS"""
s = "We have a hulk !!!"
s.lower()
s.upper()
s.split()
dict = {'key1':1,'key2':2}
dict.keys()
dict.values()
dict.items()
my_list.pop()
my_list
"""### TUPLE UNPACKING"""
list_of_tuples =[(1,2),(3,4),(5,6)]
for (a,b) in list_of_tuples:
print (a)
print (b)
"""### WELCOME TO THE END OF THE TUTORIAL
You made it!! Hope you enjoyed taking this tutorial as much as I enjoyed coding it. From the next tutorial, we will be starting our first Data Science Library called NumPy. Until then, happy coding.
---------------------------------------------------------------------------------
Copyrights 2018, All Rights Reserved.
- Author: Mahnoor Anjum.
- Course: The Complete Hands-On Machine Learning Course
- Date Created: 2018-06-27
- Date Modified: -
""" | 18.284946 | 259 | 0.643046 |
40a0b272172c34d79349dc117521f3988050cbff | 4,401 | py | Python | quantrocket/db.py | Jay-Jay-D/quantrocket-client | b70ac199382d22d56fad923ca2233ce027f3264a | [
"Apache-2.0"
] | null | null | null | quantrocket/db.py | Jay-Jay-D/quantrocket-client | b70ac199382d22d56fad923ca2233ce027f3264a | [
"Apache-2.0"
] | null | null | null | quantrocket/db.py | Jay-Jay-D/quantrocket-client | b70ac199382d22d56fad923ca2233ce027f3264a | [
"Apache-2.0"
] | 1 | 2019-06-12T11:34:27.000Z | 2019-06-12T11:34:27.000Z | # Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from quantrocket.houston import houston
from quantrocket.cli.utils.output import json_to_cli
def list_databases(service=None):
"""
List databases.
Parameters
----------
service : str, optional
only list databases for this service
Returns
-------
list
list of databases
"""
params = {}
if service:
params["service"] = service
response = houston.get("/db/databases", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def download_database(database, outfile):
"""
Download a database from the db service and write to a local file.
Parameters
----------
database : str, required
the filename of the database (as returned by the list_databases)
outfile: str, required
filename to write the database to
Returns
-------
None
"""
response = houston.get("/db/databases/{0}".format(database), stream=True)
houston.raise_for_status_with_json(response)
with open(outfile, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
def s3_push_databases(service, codes=None):
"""
Push database(s) to Amazon S3.
Parameters
----------
serivce : str, required
only push databases for this service (specify 'all' to push all services)
codes: list of str, optional
only push databases identified by these codes (omit to push all databases for service)
Returns
-------
json
status message
"""
data = {}
if codes:
data["codes"] = codes
response = houston.put("/db/s3/{0}".format(service), data=data)
houston.raise_for_status_with_json(response)
return response.json()
def s3_pull_databases(service, codes=None, force=False):
"""
Pull database(s) from Amazon S3 to the db service.
Parameters
----------
serivce : str, required
only pull databases for this service (specify 'all' to pull all services)
codes: list of str, optional
only pull databases identified by these codes (omit to pull all databases for service)
force: bool
overwrite existing database if one exists (default is to fail if one exists)
Returns
-------
json
status message
"""
params = {}
if codes:
params["codes"] = codes
if force:
params["force"] = force
response = houston.get("/db/s3/{0}".format(service), params=params)
houston.raise_for_status_with_json(response)
return response.json()
def optimize_databases(service, codes=None):
"""
Optimize database file(s) to improve performance.
Parameters
----------
serivce : str, required
only optimize databases for this service (specify 'all' to optimize all services)
codes: list of str, optional
only optimize databases identified by these codes (omit to optimize all databases for service)
Returns
-------
json
status message
"""
data = {}
if codes:
data["codes"] = codes
response = houston.post("/db/optimizations/{0}".format(service), data=data)
houston.raise_for_status_with_json(response)
return response.json()
| 29.536913 | 102 | 0.667803 |
40a0c02ad22b941af8159f65f284f536c99461a2 | 3,889 | py | Python | ink2canvas/GradientHelper.py | greipfrut/pdftohtml5canvas | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | [
"MIT"
] | 4 | 2016-05-06T21:29:39.000Z | 2020-02-25T08:47:48.000Z | ink2canvas/GradientHelper.py | letw/pdftohtml5canvas | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | [
"MIT"
] | null | null | null | ink2canvas/GradientHelper.py | letw/pdftohtml5canvas | bd4b829a5fd02b503e6b32c268b265daa92e92e5 | [
"MIT"
] | null | null | null | from ink2canvas.lib.simpletransform import parseTransform | 43.696629 | 141 | 0.609154 |
40a15dfaa12f9d16539bfd378e8a390a22b70eb7 | 529 | py | Python | project/manage.py | yosukesuzuki/let-me-notify | 39f50214403822712329c1cd953167d6e9b315d6 | [
"MIT"
] | null | null | null | project/manage.py | yosukesuzuki/let-me-notify | 39f50214403822712329c1cd953167d6e9b315d6 | [
"MIT"
] | null | null | null | project/manage.py | yosukesuzuki/let-me-notify | 39f50214403822712329c1cd953167d6e9b315d6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Kay management script.
:Copyright: (c) 2009 Accense Technology, Inc. All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import logging
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
import kay
kay.setup_env(manage_py_env=True)
from werkzeug import script
from kay.management import *
import appengine_config
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.argv.append("--help")
script.run()
| 19.592593 | 66 | 0.701323 |
40a223cfd00d5ab5d2f6c9db56030a295e86ca65 | 1,624 | py | Python | examples/plotting/field_pole_figure.py | heprom/pymicro | 176bf3a829dbf67796a3d4471f18868a3da229a7 | [
"MIT"
] | 30 | 2017-03-02T14:43:48.000Z | 2022-02-25T13:22:22.000Z | examples/plotting/field_pole_figure.py | heprom/pymicro | 176bf3a829dbf67796a3d4471f18868a3da229a7 | [
"MIT"
] | 14 | 2019-12-29T12:41:29.000Z | 2021-12-01T21:13:20.000Z | examples/plotting/field_pole_figure.py | heprom/pymicro | 176bf3a829dbf67796a3d4471f18868a3da229a7 | [
"MIT"
] | 18 | 2017-03-21T12:43:19.000Z | 2022-03-22T14:30:06.000Z | from pymicro.crystal.microstructure import *
from pymicro.crystal.texture import *
from pymicro.examples import PYMICRO_EXAMPLES_DATA_DIR
from matplotlib import pyplot as plt, colors, colorbar, cm
import pathlib as pl
'''This example demonstrate how a field can be used to color each symbol on
the pole figure with the :py:meth:~`pymicro.crystal.texture.set_map_field`
method.
'''
#orientations = Orientation.read_euler_txt('../data/orientation_set.inp')
#for i in range(600):
# micro.grains.append(Grain(i, orientations[i + 1]))
euler_list = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'orientation_set.inp').tolist()
micro = Microstructure(name='field', autodelete=True)
micro.add_grains(euler_list)
# load strain from dat files
strain_field = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'strain_avg_per_grain.dat')[19, ::2]
# build custom pole figures
pf = PoleFigure(microstructure=micro)
pf.mksize = 40
pf.set_map_field('strain', strain_field, field_min_level=0.015, field_max_level=0.025)
fig = plt.figure()
# direct PF
ax1 = fig.add_axes([0.05, 0.05, 0.8, 0.9], aspect='equal')
pf.plot_pf(ax=ax1)
plt.title('111 pole figure, cubic elasticity')
# to add the color bar
ax2 = fig.add_axes([0.8, 0.05, 0.05, 0.9])
norm = colors.Normalize(vmin=0.015, vmax=0.025)
cb = colorbar.ColorbarBase(ax2, cmap=cm.hot, norm=norm, orientation='vertical')
cb.set_label('Average strain (mm/mm)')
image_name = os.path.splitext(__file__)[0] + '.png'
print('writing %s' % image_name)
plt.savefig('%s' % image_name, format='png')
del pf
del micro
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| 34.553191 | 93 | 0.75431 |
40a57bcb86583811e90d8029258fdefae6a38a7d | 11,634 | py | Python | model/img2seq_torch.py | marcoleewow/LaTeX_OCR | 0980ea719f8d3175a6bbf6af18873dd72d04b8c7 | [
"Apache-2.0"
] | 290 | 2019-04-04T01:52:32.000Z | 2022-03-30T08:07:53.000Z | model/img2seq_torch.py | w32zhong/LaTeX_OCR | 1e1f196468e678c93dfa2d8ab2ba02fbda38a3c0 | [
"Apache-2.0"
] | 23 | 2019-06-11T05:07:58.000Z | 2022-03-11T23:44:17.000Z | model/img2seq_torch.py | w32zhong/LaTeX_OCR | 1e1f196468e678c93dfa2d8ab2ba02fbda38a3c0 | [
"Apache-2.0"
] | 81 | 2019-04-06T11:40:34.000Z | 2022-02-28T15:08:05.000Z | import time
import sys
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
from model.base_torch import BaseModel
from model.utils.general import init_dir, get_logger
from model.utils.general import Progbar
from model.utils.general import Config
from model.utils.general import minibatches
from model.components.SimpleCNN import SimpleCNN
from model.components.ResNet import ResNet9
from model.components.DenseNet import DenseNet169
from model.components.seq2seq_torch import EncoderCNN, DecoderWithAttention, Img2Seq
from model.evaluation.text import score_files, truncate_end, write_answers
from model.utils.image import pad_batch_images_2
from model.utils.text import pad_batch_formulas
from torch.utils.data import Dataset
import h5py
import json
from model.utils.data_generator import DataGenerator
| 42.772059 | 147 | 0.577016 |
40a5a49f8963d40bc4247496570aa980197c909d | 719 | py | Python | src/third_party/dart/tools/dom/scripts/all_tests.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 21 | 2021-06-04T21:08:21.000Z | 2022-03-04T14:21:34.000Z | src/third_party/dart/tools/dom/scripts/all_tests.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 1 | 2021-01-21T14:45:59.000Z | 2021-01-21T14:45:59.000Z | src/third_party/dart/tools/dom/scripts/all_tests.py | rhencke/engine | 1016db292c4e73374a0a11536b18303c9522a224 | [
"BSD-3-Clause"
] | 9 | 2021-03-16T09:29:26.000Z | 2022-01-06T08:38:10.000Z | #!/usr/bin/python
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""This entry point runs all script tests."""
import logging.config
import unittest
if __name__ == '__main__':
logging.config.fileConfig('logging.conf')
suite = unittest.TestLoader().loadTestsFromNames([
'templateloader_test', 'pegparser_test', 'idlparser_test',
'idlnode_test', 'idlrenderer_test', 'database_test',
'databasebuilder_test', 'emitter_test', 'dartgenerator_test',
'multiemitter_test'
])
unittest.TextTestRunner().run(suite)
| 37.842105 | 76 | 0.719054 |
40a5c13d7bfe8ebdc535f6e928718db2cd73a81f | 623 | py | Python | src/11/11367.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 11 | 2020-09-20T15:17:11.000Z | 2022-03-17T12:43:33.000Z | src/11/11367.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 3 | 2021-10-30T07:51:36.000Z | 2022-03-09T05:19:23.000Z | src/11/11367.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 13 | 2021-01-21T03:19:08.000Z | 2022-03-28T10:44:58.000Z | """
11367. Report Card Time
: xCrypt0r
: Python 3
: 29,380 KB
: 64 ms
: 2020 9 18
"""
if __name__ == '__main__':
main()
| 20.096774 | 37 | 0.499197 |
40a5dc8510f8fdf8f4c9c7d29bd318a4e6deedc1 | 49,691 | py | Python | imgaug/augmentables/bbs.py | bill0714/imgaug | 5abdc4d9d7f512ba34c78955557b174a64ad22a6 | [
"MIT"
] | 1 | 2019-10-25T17:43:20.000Z | 2019-10-25T17:43:20.000Z | imgaug/augmentables/bbs.py | RTANC/imgaug | 2a3161550a4a1895a227bb8856d525e69a7d503d | [
"MIT"
] | null | null | null | imgaug/augmentables/bbs.py | RTANC/imgaug | 2a3161550a4a1895a227bb8856d525e69a7d503d | [
"MIT"
] | null | null | null | from __future__ import print_function, division, absolute_import
import copy
import numpy as np
import skimage.draw
import skimage.measure
from .. import imgaug as ia
from .utils import normalize_shape, project_coords
# TODO functions: square(), to_aspect_ratio(), contains_point()
# TODO add tests for ndarray inputs
def project(self, from_shape, to_shape):
"""Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
``x1=(10 of 100 pixels)`` and ``y1=(20 of 100 pixels)`` and is
projected onto a new image with size ``(width=200, height=200)``,
its new position will be ``(x1=20, y1=40)``.
(Analogous for ``x2``/``y2``.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
imgaug.augmentables.bbs.BoundingBox
``BoundingBox`` instance with new coordinates.
"""
coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],
from_shape, to_shape)
return self.copy(
x1=coords_proj[0][0],
y1=coords_proj[0][1],
x2=coords_proj[1][0],
y2=coords_proj[1][1],
label=self.label)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all
sides.
top : number, optional
Value by which to extend the bounding box size along its top
side.
right : number, optional
Value by which to extend the bounding box size along its right
side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom
side.
left : number, optional
Value by which to extend the bounding box size along its left
side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""Compute the intersection BB between this BB and another BB.
Note that in extreme cases, the intersection can be a single point.
In that case the intersection bounding box exists and it will be
returned, but it will have a height and width of zero.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.augmentables.bbs.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is
an intersection.
If there is no intersection, the default value will be returned,
which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""Compute the union BB between this BB and another BB.
This is equivalent to drawing a bounding box around all corner points
of both bounding boxes.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""Compute the IoU between this bounding box and another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B))
/ (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is fully inside the image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return (
self.x1 >= 0
and self.x2 < width
and self.y1 >= 0
and self.y2 < height)
def is_partly_within_image(self, image):
"""Estimate whether the BB is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is at least partially inside the
image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""Estimate whether the BB is partially/fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
fully : bool, optional
Whether to return ``True`` if the bounding box is fully outside
of the image area.
partly : bool, optional
Whether to return ``True`` if the bounding box is at least
partially outside fo the image area.
Returns
-------
bool
``True`` if the bounding box is partially/fully outside of the
image area, depending on defined parameters.
``False`` otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
return fully
def clip_out_of_image(self, image):
"""Clip off all parts of the BB box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
assert height > 0, (
"Expected image with height>0, got shape %s." % (image.shape,))
assert width > 0, (
"Expected image with width>0, got shape %s." % (image.shape,))
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move this bounding box along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift this object *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift this object *from* the
right (towards the left).
bottom : None or int, optional
Amount of pixels by which to shift this object *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift this object *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding box.
Currently expected to be ``uint8``.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where ``1.0`` denotes
no transparency and ``0.0`` is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is
larger than ``1``, then additional pixels will be added around
the bounding box (i.e. extension towards the outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of
the image. If set to ``False``, no error will be raised and only
the parts inside the image will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if thickness is not None:
ia.warn_deprecated(
"Usage of argument 'thickness' in BoundingBox.draw_on_image() "
"is deprecated. The argument was renamed to 'size'.")
size = thickness
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception(
"Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f "
"on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(size):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case
# of drawing means that the border lies just barely outside of
# the image, making the border disappear, even though the BB is
# fully inside the image. Here we correct for that because of
# beauty reasons. Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if ia.is_float_array(result):
# TODO use blend_alpha here
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255).astype(input_dtype)
return result
# TODO add tests for pad and pad_max
def extract_from_image(self, image, pad=True, pad_max=None,
prevent_zero_size=True):
"""Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is
partially/fully outside of the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent the height or width of the extracted image from
becoming zero.
If this is set to ``True`` and the height or width of the bounding
box is below ``1``, the height/width will be increased to ``1``.
This can be useful to prevent problems, e.g. with image saving or
plotting.
If it is set to ``False``, images will be returned as ``(H', W')``
or ``(H', W', 3)`` with ``H`` or ``W`` potentially being 0.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box
is partially/fully outside of the image.
If `prevent_zero_size` is activated, it is guarantueed that
``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case of
# extraction leads to a black border, which is both ugly and
# unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons. Same is the case for x coordinates.
fully_within = self.is_fully_within_image(image)
if fully_within:
y1, y2 = np.clip([y1, y2], 0, height-1)
x1, x2 = np.clip([x1, x2], 0, width-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
if pad:
# if the bb is outside of the image area, the following pads the
# image first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that
# are natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + pad_left
width = width + pad_left
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + pad_top
height = height + pad_top
y1 = 0
if x2 >= width:
pad_right = x2 - width
if y2 >= height:
pad_bottom = y2 - height
paddings = [pad_top, pad_right, pad_bottom, pad_left]
any_padded = any([val > 0 for val in paddings])
if any_padded:
if pad_max is None:
pad_max = max(paddings)
image = ia.pad(
image,
top=min(pad_top, pad_max),
right=min(pad_right, pad_max),
bottom=min(pad_bottom, pad_max),
left=min(pad_left, pad_max)
)
return image[y1:y2, x1:x2]
else:
within_image = (
(0, 0, 0, 0)
<= (x1, y1, x2, y2)
< (width, height, width, height)
)
out_height, out_width = (y2 - y1), (x2 - x1)
nonzero_height = (out_height > 0)
nonzero_width = (out_width > 0)
if within_image and nonzero_height and nonzero_width:
return image[y1:y2, x1:x2]
if prevent_zero_size:
out_height = 1
out_width = 1
else:
out_height = 0
out_width = 0
if image.ndim == 2:
return np.zeros((out_height, out_width), dtype=image.dtype)
return np.zeros((out_height, out_width, image.shape[-1]),
dtype=image.dtype)
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""Convert the BB's corners to keypoints (clockwise, from top left).
Returns
-------
list of imgaug.augmentables.kps.Keypoint
Corners of the bounding box as keypoints.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def coords_almost_equals(self, other, max_distance=1e-4):
"""Estimate if this and another BB have almost identical coordinates.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other bounding box with which to compare this one.
If this is an ``iterable``, it is assumed to represent the top-left
and bottom-right coordinates of that bounding box, given as e.g.
an ``(2,2)`` ndarray or an ``(4,)`` ndarray or as a similar list.
max_distance : number, optional
The maximum euclidean distance between a corner on one bounding
box and the closest corner on the other bounding box. If the
distance is exceeded for any such pair, the two BBs are not
viewed as equal.
Returns
-------
bool
Whether the two bounding boxes have almost identical corner
coordinates.
"""
if ia.is_np_array(other):
# we use flat here in case other is (N,2) instead of (4,)
coords_b = other.flat
elif ia.is_iterable(other):
coords_b = list(ia.flatten(other))
else:
assert isinstance(other, BoundingBox), (
"Expected 'other' to be an iterable containing two "
"(x,y)-coordinate pairs or a BoundingBox. "
"Got type %s." % (type(other),))
coords_b = other.coords.flat
coords_a = self.coords
return np.allclose(coords_a.flat, coords_b, atol=max_distance, rtol=0)
def almost_equals(self, other, max_distance=1e-4):
"""Compare this and another BB's label and coordinates.
This is the same as
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals` but
additionally compares the labels.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other object to compare against. Expected to be a
``BoundingBox``.
max_distance : number, optional
See
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals`.
Returns
-------
bool
``True`` if the coordinates are almost equal and additionally
the labels are equal. Otherwise ``False``.
"""
if self.label != other.label:
return False
return self.coords_almost_equals(other, max_distance=max_distance)
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""Create a shallow copy of this BoundingBox instance.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=copy.deepcopy(self.label) if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Deep copy.
"""
# TODO write specific copy routine with deepcopy for label and remove
# the deepcopy from copy()
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
class BoundingBoxesOnImage(object):
"""Container for the list of all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.augmentables.bbs.BoundingBox
List of bounding boxes on the image.
shape : tuple of int or ndarray
The shape of the image on which the objects are placed.
Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting
such an image shape.
Examples
--------
>>> import numpy as np
>>> from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
>>>
>>> image = np.zeros((100, 100))
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
# TODO remove this? here it is image height, but in BoundingBox it is
# bounding box height
# TODO remove this? here it is image width, but in BoundingBox it is
# bounding box width
def on(self, image):
"""Project bounding boxes from one image (shape) to a another one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing the same bounding boxes after projection to
the new image shape.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
bounding_boxes = [bb.project(self.shape, shape)
for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,4) ndarray``.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
``(N,4) ndarray``, where ``N`` denotes the number of bounding
boxes and ``4`` denotes the top-left and bottom-right bounding
box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def to_xy_array(self):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,2) ndarray``.
Returns
-------
ndarray
``(2*B,2) ndarray`` of xy-coordinates, where ``B`` denotes the
number of bounding boxes.
"""
return self.to_xyxy_array().reshape((-1, 2))
def fill_from_xyxy_array_(self, xyxy):
"""Modify the BB coordinates of this instance in-place.
.. note ::
This currently expects exactly one entry in `xyxy` per bounding
in this instance. (I.e. two corner coordinates per instance.)
Otherwise, an ``AssertionError`` will be raised.
.. note ::
This method will automatically flip x-coordinates if ``x1>x2``
for a bounding box. (Analogous for y-coordinates.)
Parameters
----------
xyxy : (N, 4) ndarray or iterable of iterable of number
Coordinates of ``N`` bounding boxes on an image, given as
a ``(N,4)`` array of two corner xy-coordinates per bounding box.
``N`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 4)
assert xyxy.shape[0] == 0 or (xyxy.ndim == 2 and xyxy.shape[-1] == 4), (
"Expected input array to have shape (N,4), "
"got shape %s." % (xyxy.shape,))
assert len(xyxy) == len(self.bounding_boxes), (
"Expected to receive an array with as many rows there are "
"bounding boxes in this instance. Got %d rows, expected %d." % (
len(xyxy), len(self.bounding_boxes)))
for bb, (x1, y1, x2, y2) in zip(self.bounding_boxes, xyxy):
bb.x1 = min([x1, x2])
bb.y1 = min([y1, y2])
bb.x2 = max([x1, x2])
bb.y2 = max([y1, y2])
return self
def fill_from_xy_array_(self, xy):
"""Modify the BB coordinates of this instance in-place.
See
:func:`imgaug.augmentables.bbs.BoundingBoxesOnImage.fill_from_xyxy_array_`.
Parameters
----------
xy : (2*B, 2) ndarray or iterable of iterable of number
Coordinates of ``B`` bounding boxes on an image, given as
a ``(2*B,2)`` array of two corner xy-coordinates per bounding box.
``B`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xy = np.array(xy, dtype=np.float32)
return self.fill_from_xyxy_array_(xy.reshape((-1, 4)))
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``BoundingBoxesOnImage.shape``.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes.
If a single ``int`` ``C``, then that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
size=size,
copy=False,
raise_if_out_of_image=raise_if_out_of_image,
thickness=thickness
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""Remove all BBs that are fully/partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the
image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of
the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were
fully/partially outside of the image being removed.
"""
bbs_clean = [
bb
for bb
in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def clip_out_of_image(self):
"""Clip off all parts from all BBs that are outside of the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [
bb.clip_out_of_image(self.shape)
for bb
in self.bounding_boxes
if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move all all BBs along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all objects *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift all objects *from* the
right (towads the left).
bottom : None or int, optional
Amount of pixels by which to shift all objects *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift all objects *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [
bb.shift(top=top, right=right, bottom=bottom, left=left)
for bb
in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def to_keypoints_on_image(self):
"""Convert the bounding boxes to one ``KeypointsOnImage`` instance.
Returns
-------
imgaug.augmentables.kps.KeypointsOnImage
A keypoints instance containing ``N*4`` coordinates for ``N``
bounding boxes. Order matches the order in ``bounding_boxes``.
"""
from .kps import KeypointsOnImage
# This currently uses 4 points instead of 2 points as the method
# is primarily used during augmentation and 4 points are overall
# the better choice there.
arr = np.zeros((len(self.bounding_boxes), 2*4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
arr[i] = [
box.x1, box.y1,
box.x2, box.y1,
box.x2, box.y2,
box.x1, box.y2
]
return KeypointsOnImage.from_xy_array(
arr.reshape((-1, 2)),
shape=self.shape
)
def invert_to_keypoints_on_image_(self, kpsoi):
"""Invert the output of ``to_keypoints_on_image()`` in-place.
This function writes in-place into this ``BoundingBoxesOnImage``
instance.
Parameters
----------
kpsoi : imgaug.augmentables.kps.KeypointsOnImages
Keypoints to convert back to bounding boxes, i.e. the outputs
of ``to_keypoints_on_image()``.
Returns
-------
BoundingBoxesOnImage
Bounding boxes container with updated coordinates.
Note that the instance is also updated in-place.
"""
assert len(kpsoi.keypoints) == len(self.bounding_boxes) * 4, (
"Expected %d coordinates, got %d." % (
len(self.bounding_boxes) * 2, len(kpsoi.keypoints)))
for i, bb in enumerate(self.bounding_boxes):
xx = [kpsoi.keypoints[4*i+0].x, kpsoi.keypoints[4*i+1].x,
kpsoi.keypoints[4*i+2].x, kpsoi.keypoints[4*i+3].x]
yy = [kpsoi.keypoints[4*i+0].y, kpsoi.keypoints[4*i+1].y,
kpsoi.keypoints[4*i+2].y, kpsoi.keypoints[4*i+3].y]
bb.x1 = min(xx)
bb.y1 = min(yy)
bb.x2 = max(xx)
bb.y2 = max(yy)
self.shape = kpsoi.shape
return self
def copy(self):
"""Create a shallow copy of the ``BoundingBoxesOnImage`` instance.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""Create a deep copy of the ``BoundingBoxesOnImage`` object.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
| 33.327297 | 127 | 0.55666 |
40a5de5724e10b92314c5b47739791ffcddafb72 | 2,891 | py | Python | scanner_relay/run.py | breakds/brokering | fa63d5ed8057a8018bcb11aaebce689c8d18e7ba | [
"MIT"
] | null | null | null | scanner_relay/run.py | breakds/brokering | fa63d5ed8057a8018bcb11aaebce689c8d18e7ba | [
"MIT"
] | null | null | null | scanner_relay/run.py | breakds/brokering | fa63d5ed8057a8018bcb11aaebce689c8d18e7ba | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from twisted.internet import endpoints
from twisted.internet import protocol
from twisted.internet import defer
from twisted.mail import imap4
from scanner_relay.pipeline import Pipeline
from scanner_relay.authentication import PassStoreFetcher, PlainPasswordFetcher
import logging
# Global configuration for the logging. Note that we set the level to
# INFO so that only DEBUG logging does not get to stdout.
FORMAT = '[%(levelname)s] (%(name)s) %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger('run')
# TODO(breakds): And a more graceful (singal handling) way to terminate the program.
def clean_up(unused):
from twisted.internet import reactor
reactor.stop()
print('All workd done!')
if __name__ == '__main__':
# FIXME: Make these configurable
hostname = 'mail.breakds.org'
username = 'bds@breakds.org'.encode('ascii')
pass_store_entry = 'mail.breakds.org/bds'
port = 143
from twisted.internet import reactor
endpoint = endpoints.HostnameEndpoint(reactor, hostname, port)
factory = ScannerRelayProtocolFactory(
username, PassStoreFetcher(pass_store_entry), clean_up)
endpoint.connect(factory)
reactor.run()
| 34.416667 | 93 | 0.71982 |
40a624029504fa50d779ccdfdaa5ed5b7ed61a95 | 1,202 | py | Python | cubes_pilingup.py | akiselev1/hackerrank-solutions | 53c2a76c71c9b3553c077ccfde5178b27594ae72 | [
"MIT"
] | null | null | null | cubes_pilingup.py | akiselev1/hackerrank-solutions | 53c2a76c71c9b3553c077ccfde5178b27594ae72 | [
"MIT"
] | null | null | null | cubes_pilingup.py | akiselev1/hackerrank-solutions | 53c2a76c71c9b3553c077ccfde5178b27594ae72 | [
"MIT"
] | null | null | null | """
Created by akiselev on 2019-06-14
There is a horizontal row of cubes. The length of each cube is given. You need to create a new vertical pile of cubes. The new pile should follow these directions: if is on top of then
.
When stacking the cubes, you can only pick up either the leftmost or the rightmost cube each time. Print "Yes" if it is possible to stack the cubes. Otherwise, print "No". Do not print the quotation marks.
Input Format
The first line contains a single integer
, the number of test cases.
For each test case, there are lines.
The first line of each test case contains , the number of cubes.
The second line contains
space separated integers, denoting the sideLengths of each cube in that order.
Constraints
Output Format
For each test case, output a single line containing either "Yes" or "No" without the quotes.
Sample Input
2
6
4 3 2 1 3 4
3
1 3 2
Sample Output
Yes
No
"""
for T in range(int(input())):
n = int(input())
cubes_h = list(map(int, input().split()))
i = 0
while i < n - 1 and cubes_h[i] >= cubes_h[i+1]:
i += 1
while i < n - 1 and cubes_h[i] <= cubes_h[i+1]:
i += 1
print("Yes" if i == n - 1 else "No")
| 23.115385 | 205 | 0.689684 |
40a999434640883a137547c775c396581c90f8a2 | 14,674 | py | Python | flask_web/bootstrap_web_core_py3.py | bopopescu/docker_images_a | 348d0982c5962f2ae34d10183ed9522b7a6fe286 | [
"MIT"
] | null | null | null | flask_web/bootstrap_web_core_py3.py | bopopescu/docker_images_a | 348d0982c5962f2ae34d10183ed9522b7a6fe286 | [
"MIT"
] | null | null | null | flask_web/bootstrap_web_core_py3.py | bopopescu/docker_images_a | 348d0982c5962f2ae34d10183ed9522b7a6fe286 | [
"MIT"
] | null | null | null | #
#
# File: flask_web_py3.py
#
#
#
import os
import json
import redis
import urllib
import flask
from flask import Flask
from flask import render_template,jsonify
from flask_httpauth import HTTPDigestAuth
from flask import request, session, url_for
from redis_support_py3.graph_query_support_py3 import Query_Support
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
from web_core.load_static_pages_py3 import Load_Static_Files
from web_core.load_redis_access_py3 import Load_Redis_Access
from redis_support_py3.construct_data_handlers_py3 import Redis_RPC_Client
from bootstrap_web_system_control_py3 import PI_Web_System_Control
from bootstrap_web_monitoring_py3 import PI_Web_Monitor_Server
from bootstrap_mqtt_client_py3 import PI_MQTT_Client_Monitor
from bootstrap_eto_py3 import ETO_Management
from file_server_library.file_server_lib_py3 import Construct_RPC_Library
from bootstrap_irrigation_scheduling_py3 import Irrigation_Scheduling
from irrigation_control.load_irrigation_control_py3 import Load_Irrigation_Control
if __name__ == "__main__":
file_handle = open("/data/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site_data = json.loads(data)
pi_web_server = PI_Web_Server_Core(__name__, redis_site_data )
pi_web_server.generate_menu_page()
pi_web_server.generate_site_map()
pi_web_server.generate_default_index_page()
port = pi_web_server.result["port"]
pi_web_server.port = port
debug = pi_web_server.result["debug"]
pi_web_server.debug = debug
https_flag = pi_web_server.result["https"]
if https_flag == False:
pi_web_server.run_https()
else:
pi_web_server.run_https()
| 34.855107 | 147 | 0.568284 |
40a99a0c9d4869b889926f6fe54b50b768c6cb98 | 4,160 | py | Python | git_talk/lib/changelog/main.py | cove9988/git-talk | 9f549d8565948a150834bcaa704b55ae15c094c1 | [
"MIT"
] | 5 | 2020-04-06T11:00:27.000Z | 2020-09-30T15:16:56.000Z | git_talk/lib/changelog/main.py | ggdrg/git-talk | 89ed00caa6a426ea9d5fa84cbef588d07aebc1f0 | [
"MIT"
] | 3 | 2020-09-26T02:53:30.000Z | 2020-10-09T01:46:37.000Z | git_talk/lib/changelog/main.py | ggdrg/git-talk | 89ed00caa6a426ea9d5fa84cbef588d07aebc1f0 | [
"MIT"
] | 1 | 2020-09-25T23:41:54.000Z | 2020-09-25T23:41:54.000Z |
import os
import logging
from typing import Optional
import click
from git_talk.lib.changelog import generate_changelog
from git_talk.lib.changelog.presenter import MarkdownPresenter
from git_talk.lib.changelog.repository import GitRepository
# @click.command()
# @click.option(
# "-r",
# "--repo",
# type=click.Path(exists=True),
# default=".",
# help="Path to the repository's root directory [Default: .]",
# )
# @click.option("-t", "--title", default="Changelog", help="The changelog's title [Default: Changelog]")
# @click.option("-d", "--description", help="Your project's description")
# @click.option(
# "-o",
# "--output",
# type=click.File("w"),
# default="CHANGELOG.md",
# help="The place to save the generated changelog [Default: CHANGELOG.md]",
# )
# @click.option("-r", "--remote", default="origin", help="Specify git remote to use for links")
# @click.option("-v", "--latest-version", type=str, help="use specified version as latest release")
# @click.option("-u", "--unreleased", is_flag=True, default=False, help="Include section for unreleased changes")
# @click.option("--diff-url", default=None, help="override url for compares, use {current} and {previous} for tags")
# @click.option("--issue-url", default=None, help="Override url for issues, use {id} for issue id")
# @click.option(
# "--issue-pattern",
# default=r"(#([\w-]+))",
# help="Override regex pattern for issues in commit messages. Should contain two groups, original match and ID used "
# "by issue-url.",
# )
# @click.option(
# "--tag-pattern",
# default=None,
# help="override regex pattern for release tags. "
# "By default use semver tag names semantic. "
# "tag should be contain in one group named 'version'.",
# )
# @click.option("--tag-prefix", default="", help='prefix used in version tags, default: "" ')
# @click.option("--stdout", is_flag=True)
# @click.option("--tag-pattern", default=None, help="Override regex pattern for release tags")
# @click.option("--starting-commit", help="Starting commit to use for changelog generation", default="")
# @click.option("--stopping-commit", help="Stopping commit to use for changelog generation", default="HEAD")
# @click.option(
# "--debug", is_flag=True, help="set logging level to DEBUG",
# )
if __name__ == "__main__":
main() | 33.821138 | 122 | 0.629567 |
40ab6e634c98f0a3601e54792ef5853e9f4bf06f | 6,429 | py | Python | SQED-Generator/Generators/constraint_generator.py | upscale-project/generic-sqed-demo | aa44a7563e6249c6f0641c13e8ca0b013c497df1 | [
"BSD-3-Clause"
] | 6 | 2019-09-10T02:56:47.000Z | 2021-12-18T11:17:16.000Z | SQED-Generator/Generators/constraint_generator.py | upscale-project/generic-sqed-demo | aa44a7563e6249c6f0641c13e8ca0b013c497df1 | [
"BSD-3-Clause"
] | 1 | 2019-07-19T17:05:20.000Z | 2019-07-19T17:05:20.000Z | SQED-Generator/Generators/constraint_generator.py | upscale-project/generic-sqed-demo | aa44a7563e6249c6f0641c13e8ca0b013c497df1 | [
"BSD-3-Clause"
] | 2 | 2019-10-02T00:35:14.000Z | 2019-10-23T21:13:14.000Z | # Copyright (c) Stanford University
#
# This source code is patent protected and being made available under the
# terms explained in the ../LICENSE-Academic and ../LICENSE-GOV files.
# Author: Mario J Srouji
# Email: msrouji@stanford.edu
import copy
import sys
sys.path.append("../FormatParsers/")
sys.path.append("../Interface/")
import format_parser as P
import module_interface as I
| 36.737143 | 116 | 0.586872 |
40abfa616e38a32a5059cf71c64a0d859cac8dd9 | 3,624 | py | Python | modules/losses.py | Sapperdomonik/retinaface-tf2 | af06baffb7f5bd099e5ca6fec51c94ed298a3926 | [
"MIT"
] | null | null | null | modules/losses.py | Sapperdomonik/retinaface-tf2 | af06baffb7f5bd099e5ca6fec51c94ed298a3926 | [
"MIT"
] | null | null | null | modules/losses.py | Sapperdomonik/retinaface-tf2 | af06baffb7f5bd099e5ca6fec51c94ed298a3926 | [
"MIT"
] | null | null | null | import tensorflow as tf
def MultiBoxLoss(num_class=2, neg_pos_ratio=3):
"""multi-box loss"""
return multi_box_loss
| 48.32 | 79 | 0.625828 |
40ac4ec777b7bc387be14a996d46bdf5f0da5291 | 2,416 | py | Python | tests.py | ckelly/pybingmaps | 9214e3a4c2c9e756848fac7c0d76c46dcc64b65d | [
"MIT"
] | null | null | null | tests.py | ckelly/pybingmaps | 9214e3a4c2c9e756848fac7c0d76c46dcc64b65d | [
"MIT"
] | null | null | null | tests.py | ckelly/pybingmaps | 9214e3a4c2c9e756848fac7c0d76c46dcc64b65d | [
"MIT"
] | null | null | null | import unittest
import random
from time import sleep
import os
from bingmaps import *
# TODO: enter your key for testing
api_key = ''
if __name__ == '__main__':
unittest.main() | 29.82716 | 78 | 0.598096 |
40acba1acfb883bbd0db070af8041dc100486a53 | 1,153 | py | Python | fds/config.py | dvershinin/fds | 4c4c96deb3d2bbe4d0853f601c3dc2b87801ced4 | [
"BSD-2-Clause"
] | 9 | 2020-04-29T14:25:06.000Z | 2021-12-30T03:28:05.000Z | fds/config.py | dvershinin/fds | 4c4c96deb3d2bbe4d0853f601c3dc2b87801ced4 | [
"BSD-2-Clause"
] | 18 | 2020-01-28T22:07:07.000Z | 2022-03-20T16:06:12.000Z | fds/config.py | dvershinin/fds | 4c4c96deb3d2bbe4d0853f601c3dc2b87801ced4 | [
"BSD-2-Clause"
] | null | null | null | from cds.CloudflareWrapper import suggest_set_up, cf_config_filename
from .FirewallWrapper import FirewallWrapper
import logging as log
| 36.03125 | 86 | 0.692975 |
40adb16a80ad4faf260352c08db6efc0124c7ac3 | 450 | py | Python | awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 269bfbe45a35338fab9c71fc7d8de48b61b1580b | [
"MIT"
] | null | null | null | awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 269bfbe45a35338fab9c71fc7d8de48b61b1580b | [
"MIT"
] | null | null | null | awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 269bfbe45a35338fab9c71fc7d8de48b61b1580b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-24 16:07
from __future__ import unicode_literals
from django.db import migrations, models
| 21.428571 | 51 | 0.617778 |
40adc41d2f17560f722bc8589f813ea290672937 | 21,395 | py | Python | hikari/events/channel_events.py | Reliku/hikari | c6e62b750ce35885a5e4124ffe8df6445ab34acd | [
"MIT"
] | null | null | null | hikari/events/channel_events.py | Reliku/hikari | c6e62b750ce35885a5e4124ffe8df6445ab34acd | [
"MIT"
] | null | null | null | hikari/events/channel_events.py | Reliku/hikari | c6e62b750ce35885a5e4124ffe8df6445ab34acd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Events that fire when channels are modified.
This does not include message events, nor reaction events.
"""
from __future__ import annotations
__all__: typing.List[str] = [
"ChannelEvent",
"GuildChannelEvent",
"DMChannelEvent",
"ChannelCreateEvent",
"GuildChannelCreateEvent",
"ChannelUpdateEvent",
"GuildChannelUpdateEvent",
"ChannelDeleteEvent",
"GuildChannelDeleteEvent",
"PinsUpdateEvent",
"GuildPinsUpdateEvent",
"DMPinsUpdateEvent",
"InviteCreateEvent",
"InviteDeleteEvent",
"WebhookUpdateEvent",
]
import abc
import typing
import attr
from hikari import channels
from hikari import intents
from hikari import traits
from hikari.events import base_events
from hikari.events import shard_events
from hikari.internal import attr_extensions
if typing.TYPE_CHECKING:
import datetime
from hikari import guilds
from hikari import invites
from hikari import messages
from hikari import snowflakes
from hikari import webhooks
from hikari.api import shard as gateway_shard
# TODO: find out what private message intents are needed.
# TODO: This is not documented as having an intent, is this right? The guild version requires GUILDS intent.
| 33.906498 | 119 | 0.686282 |
40af0b1139a38fce3114910895fc7959fcc89bca | 8,649 | py | Python | tests/unit/test_coordinator.py | sopel39/presto-admin | 6e7aee3427bdbea6da2deb41b7f090ef6fdcadd9 | [
"Apache-2.0"
] | 34 | 2016-01-08T21:02:13.000Z | 2017-03-10T02:01:03.000Z | tests/unit/test_coordinator.py | sopel39/presto-admin | 6e7aee3427bdbea6da2deb41b7f090ef6fdcadd9 | [
"Apache-2.0"
] | 3 | 2016-01-27T19:11:14.000Z | 2016-12-02T21:29:53.000Z | tests/unit/test_coordinator.py | sopel39/presto-admin | 6e7aee3427bdbea6da2deb41b7f090ef6fdcadd9 | [
"Apache-2.0"
] | 5 | 2016-04-29T05:27:43.000Z | 2018-01-12T07:50:25.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the coordinator module
"""
from fabric.api import env
from mock import patch
from prestoadmin import coordinator
from prestoadmin.util.exception import ConfigurationError
from tests.base_test_case import BaseTestCase
| 47.78453 | 78 | 0.492889 |
40af4a6cd5eabd173caad42e65be14761273bd46 | 3,911 | py | Python | other/string chains/strings4.py | saulc/myth-math | 5e278eb8fbaf16a01f5f021aca2142e5ce3131ec | [
"MIT"
] | null | null | null | other/string chains/strings4.py | saulc/myth-math | 5e278eb8fbaf16a01f5f021aca2142e5ce3131ec | [
"MIT"
] | null | null | null | other/string chains/strings4.py | saulc/myth-math | 5e278eb8fbaf16a01f5f021aca2142e5ce3131ec | [
"MIT"
] | null | null | null | # Saul Castro
# Hiralben Hirpara
# config file format
import random
# list to string with sep 'char'
# string to list, count
# compare words, return 0 for no match,
# 1 if end of a == start of b
# 2 if end of b == start of a
if __name__ == '__main__':
readInput()
| 25.730263 | 85 | 0.504475 |
40b182cffd8ba6689e9b3d11caa57c733d863c65 | 2,646 | py | Python | supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | d5351abe45c571a075c24bd04d328e76293f9230 | [
"MIT"
] | null | null | null | supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | d5351abe45c571a075c24bd04d328e76293f9230 | [
"MIT"
] | 2 | 2021-04-10T18:12:44.000Z | 2022-02-09T23:36:43.000Z | supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | d5351abe45c571a075c24bd04d328e76293f9230 | [
"MIT"
] | null | null | null | from global_utils import *
# target word
TARGET_WORD = 'right'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wavfile',
help='Path to the .wav files',
required=False
)
parser.add_argument(
'--indir',
help='Absolute path to data directory containing .wav files',
required=False
)
args = parser.parse_args()
main(args)
| 29.730337 | 103 | 0.708239 |
40b1a05b02e671eeb4b12cc51ccc3740e6e21280 | 2,010 | py | Python | qnarre/base/proof.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/base/proof.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/base/proof.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | # Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .claim import Claim
from .narrative import Node
from .author import Authority
| 30.454545 | 79 | 0.570149 |
40b2a0543dc5434ad9bc96313563cef627bd5d42 | 6,636 | py | Python | learningPygame/Dave/06-SpaceInvaders/space_invaders.py | Rosebotics/catapult2019 | 4f125632f4d144b97ee3ecaf00a517780d510a70 | [
"MIT"
] | null | null | null | learningPygame/Dave/06-SpaceInvaders/space_invaders.py | Rosebotics/catapult2019 | 4f125632f4d144b97ee3ecaf00a517780d510a70 | [
"MIT"
] | null | null | null | learningPygame/Dave/06-SpaceInvaders/space_invaders.py | Rosebotics/catapult2019 | 4f125632f4d144b97ee3ecaf00a517780d510a70 | [
"MIT"
] | null | null | null | import pygame, sys, random, time
from pygame.locals import *
# Create a Scoreboard class (from scratch)
# Instance variables: screen, x, y, score, and font (size 30)
# Methods: draw (and __init__)
# Create a scoreboard at location 5, 5
# Draw the scoreboard in the game loop
def main():
pygame.init()
clock = pygame.time.Clock()
pygame.display.set_caption("SPACE INVADERS!")
screen = pygame.display.set_mode((640, 650))
enemy_rows = 3
enemy = EnemyFleet(screen, enemy_rows)
fighter = Fighter(screen, 320, 590)
scoreboard = Scoreboard(screen)
gameover_image = pygame.image.load("gameover.png")
is_game_over = False
while True:
clock.tick(60)
for event in pygame.event.get():
pressed_keys = pygame.key.get_pressed()
if event.type == KEYDOWN and pressed_keys[K_SPACE]:
fighter.fire()
if event.type == QUIT:
sys.exit()
screen.fill((0, 0, 0))
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_LEFT] and fighter.x > -50:
fighter.x = fighter.x - 5
if pressed_keys[K_RIGHT] and fighter.x < 590:
fighter.x = fighter.x + 5
fighter.draw()
enemy.move()
enemy.draw()
for missile in fighter.missiles:
missile.move()
missile.draw()
for badguy in enemy.badguys:
for missile in fighter.missiles:
if badguy.hit_by(missile):
scoreboard.score = scoreboard.score + 100
badguy.dead = True
missile.exploded = True
fighter.remove_exploded_missiles()
enemy.remove_dead_badguys()
if enemy.is_defeated:
enemy_rows = enemy_rows + 1
enemy = EnemyFleet(screen, enemy_rows)
scoreboard.draw()
if not is_game_over:
pygame.display.update()
for badguy in enemy.badguys:
if badguy.y > 545:
screen.blit(gameover_image, (170, 200))
pygame.display.update()
is_game_over = True
main()
| 32.851485 | 104 | 0.583484 |
40b2c91082ea21890c36b449104ec87a0b8d9b4b | 266 | py | Python | tests/test_app/rest_app/rest_app/controllers/config_controller.py | jadbin/guniflask | 36253a962c056abf34884263c6919b02b921ad9c | [
"MIT"
] | 12 | 2018-09-06T06:14:59.000Z | 2021-04-18T06:30:44.000Z | tests/test_app/rest_app/rest_app/controllers/config_controller.py | jadbin/guniflask | 36253a962c056abf34884263c6919b02b921ad9c | [
"MIT"
] | null | null | null | tests/test_app/rest_app/rest_app/controllers/config_controller.py | jadbin/guniflask | 36253a962c056abf34884263c6919b02b921ad9c | [
"MIT"
] | 2 | 2019-09-08T22:01:26.000Z | 2020-08-03T07:23:29.000Z | from guniflask.config import settings
from guniflask.web import blueprint, get_route
| 20.461538 | 46 | 0.703008 |
40b4eef32d47c4960807376665ec44995d7e4116 | 14,062 | py | Python | model/_UNet_trainer.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
] | null | null | null | model/_UNet_trainer.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
] | null | null | null | model/_UNet_trainer.py | yasahi-hpc/AMRNet | 5858d464bdfe409a5ab50889104768dda3c70508 | [
"MIT"
] | null | null | null | from ._base_trainer import _BaseTrainer, MeasureMemory
import pathlib
import torch.multiprocessing as mp
import torch
from torch import nn
import horovod.torch as hvd
import numpy as np
import xarray as xr
import itertools
from .flow_dataset import FlowDataset
from .unet import UNet
import sys
from .visualization import save_flows
from .converter import save_as_netcdf
| 39.061111 | 147 | 0.595435 |
40b61330deb7990837ff7794fbef7bc995f41538 | 5,557 | py | Python | agents/vpg_policy_translation_with_dislocation.py | pjarosik/rlus | 5804a37ed9221362c470ad4eb0e6b03e533bf1d8 | [
"Apache-2.0"
] | 3 | 2020-09-20T20:02:43.000Z | 2022-01-14T09:37:46.000Z | agents/vpg_policy_translation_with_dislocation.py | pjarosik/rlus | 5804a37ed9221362c470ad4eb0e6b03e533bf1d8 | [
"Apache-2.0"
] | null | null | null | agents/vpg_policy_translation_with_dislocation.py | pjarosik/rlus | 5804a37ed9221362c470ad4eb0e6b03e533bf1d8 | [
"Apache-2.0"
] | 1 | 2021-06-29T07:02:19.000Z | 2021-06-29T07:02:19.000Z | from spinup import vpg
import tensorflow as tf
import numpy as np
from gym.spaces import Box, Discrete
from envs.focal_point_task_us_env import FocalPointTaskUsEnv
from envs.phantom import (
ScatterersPhantom,
Ball,
Teddy
)
from envs.imaging import ImagingSystem, Probe
from envs.generator import ConstPhantomGenerator, RandomProbeGenerator
import envs.logger
import matplotlib
import argparse
N_STEPS_PER_EPISODE = 16
N_STEPS_PER_EPOCH = 64
EPOCHS = 251 # NO_EPISODES = (NSTEPS_PER_EPOCH/NSTEPS_PER_EPISODE)*EPOCHS
N_WORKERS = 4
AC_KWARGS = dict(
hidden_sizes=[16, 32],
activation=tf.nn.relu
)
# Below functions base on openai.spinup's A-C scheme implementation.
if __name__ == "__main__":
main()
| 31.936782 | 81 | 0.620659 |
40b7ac2bc7f8f3621a710ed64cc2cd9096f796f1 | 791 | py | Python | lib/losses/dice.py | zongdaoming/CMT | fc3773bb6c6b1ab091688addfffca3fb1e382ae4 | [
"MIT"
] | 3 | 2021-05-10T20:12:23.000Z | 2021-11-24T18:01:13.000Z | lib/losses/dice.py | zongdaoming/CMT | fc3773bb6c6b1ab091688addfffca3fb1e382ae4 | [
"MIT"
] | null | null | null | lib/losses/dice.py | zongdaoming/CMT | fc3773bb6c6b1ab091688addfffca3fb1e382ae4 | [
"MIT"
] | null | null | null | import sys,os
sys.path.append('/home/zongdaoming/cv/multi-organ/multi-organ-ijcai')
from lib.losses.BaseClass import _AbstractDiceLoss
from lib.losses.basic import *
| 35.954545 | 102 | 0.737042 |
40b7ae2027514475dd06028f5df19e3941be6b6d | 1,190 | py | Python | icons.py | jasantunes/alfred-golinks | f35fa87adedf07329469c0161c8808401a6925d0 | [
"MIT"
] | 312 | 2015-01-02T12:44:03.000Z | 2020-06-21T03:53:29.000Z | icons.py | jasantunes/alfred-golinks | f35fa87adedf07329469c0161c8808401a6925d0 | [
"MIT"
] | 7 | 2015-03-22T11:57:50.000Z | 2020-02-09T08:35:05.000Z | icons.py | jasantunes/alfred-golinks | f35fa87adedf07329469c0161c8808401a6925d0 | [
"MIT"
] | 35 | 2015-09-06T09:36:32.000Z | 2020-06-21T19:17:55.000Z | # encoding: utf-8
#
# Copyright (c) 2019 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2019-09-06
#
"""Overlay check mark on icons."""
from __future__ import print_function, absolute_import
from Cocoa import (
NSBitmapImageRep,
NSPNGFileType,
NSImage,
NSMakeSize,
NSCompositeCopy,
NSSizeToCGSize,
NSZeroPoint,
)
from CoreGraphics import CGRectZero
def overlay(src, overlay, dest):
"""Create image ``dest`` by putting ``overlay`` on top of ``src``.
Args:
src (str): Path to source image.
overlay (str): Path to overlay image.
dest (str): Path to save combined image to.
"""
src = NSImage.alloc().initWithContentsOfFile_(src)
overlay = NSImage.alloc().initWithContentsOfFile_(overlay)
img = NSImage.alloc().initWithSize_(src.size())
img.lockFocus()
rect = (0, 0), src.size()
src.drawInRect_(rect)
overlay.drawInRect_(rect)
img.unlockFocus()
rep = NSBitmapImageRep.imageRepWithData_(img.TIFFRepresentation())
data = rep.representationUsingType_properties_(NSPNGFileType,{})
data.writeToFile_atomically_(dest, False)
| 25.869565 | 70 | 0.691597 |
40b9ac7b9f67d52c5c73796669c1ff5e0996665b | 2,491 | py | Python | project/python/Main/CTRL/tracker.py | warak/IOT-GrannyWarden | 54ad51b7bf7377ce7b87e72091c9dbf7f686050d | [
"MIT"
] | null | null | null | project/python/Main/CTRL/tracker.py | warak/IOT-GrannyWarden | 54ad51b7bf7377ce7b87e72091c9dbf7f686050d | [
"MIT"
] | null | null | null | project/python/Main/CTRL/tracker.py | warak/IOT-GrannyWarden | 54ad51b7bf7377ce7b87e72091c9dbf7f686050d | [
"MIT"
] | null | null | null | import datetime
from threading import Thread
from time import sleep
import DBC.dbcreate as dbc
| 30.753086 | 91 | 0.470092 |
40ba1c6c5aded5c9a1f75bcde2e5830a948185e5 | 39,197 | py | Python | tests/unit/test_snapshot.py | cnnradams/python-spanner | 33055e577288cbcc848aa9abf43ccd382c9907a9 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_snapshot.py | cnnradams/python-spanner | 33055e577288cbcc848aa9abf43ccd382c9907a9 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_snapshot.py | cnnradams/python-spanner | 33055e577288cbcc848aa9abf43ccd382c9907a9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import google.api_core.gapic_v1.method
import mock
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
SQL_QUERY = """\
SELECT first_name, last_name, age FROM citizens ORDER BY age"""
SQL_QUERY_WITH_PARAM = """
SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age"""
PARAMS = {"max_age": 30}
PARAM_TYPES = {"max_age": "INT64"}
SQL_QUERY_WITH_BYTES_PARAM = """\
SELECT image_name FROM images WHERE @bytes IN image_data"""
PARAMS_WITH_BYTES = {"bytes": b"FACEDACE"}
RESUME_TOKEN = b"DEADBEEF"
TXN_ID = b"DEAFBEAD"
SECONDS = 3
MICROS = 123456
| 37.725698 | 88 | 0.672858 |
40ba39e3f0879514163c0009fb4d3d4f6df2004d | 592 | py | Python | hashtable.py | quake0day/oj | c09333d1738f8735de0d5d825db6f4b707585670 | [
"MIT"
] | null | null | null | hashtable.py | quake0day/oj | c09333d1738f8735de0d5d825db6f4b707585670 | [
"MIT"
] | null | null | null | hashtable.py | quake0day/oj | c09333d1738f8735de0d5d825db6f4b707585670 | [
"MIT"
] | null | null | null | A = ['a','b']
B = ['c','b','a']
print compareHash(A, B)
| 19.733333 | 43 | 0.496622 |
40bab880835594679397baae0088587d6d0269a6 | 2,904 | py | Python | Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 843770590a729c6aabf63367a3ab848e21ab78b9 | [
"MIT"
] | 1 | 2019-12-18T17:28:11.000Z | 2019-12-18T17:28:11.000Z | Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 843770590a729c6aabf63367a3ab848e21ab78b9 | [
"MIT"
] | null | null | null | Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 843770590a729c6aabf63367a3ab848e21ab78b9 | [
"MIT"
] | null | null | null | # Importing required packages:
import pandas as pd
from tkinter import *
from tkinter.ttk import *
root = Tk()
# To visualize input DataFrame:
def generate_plot(gui_root, df, x_axis, y_axis=None,
plot={'type':None, 'hue':None},
aesthetics={'style':'whitegrid', 'palette':'hsv',
'size':(10,7), 'dpi':100}):
"""
DESCRIPTION: Reads input Pandas DataFrame and returns a plot based on selected parameters.
PARAMETERS:
> gui_root : [Required] Accepts Tkinter application base class (Tk) initialized variable/instance.
> df : [Required] Accepts Pandas DataFrame.
"""
# Importing external dependencies:
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import seaborn as sns
sns.set(style=aesthetics['style'], palette=aesthetics['palette'])
import warnings
warnings.filterwarnings('ignore')
# Defining Tableau colors:
tableau_20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199,
199),(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scaling over RGB values to [0,1] range (Matplotlib acceptable format):
for i in range(len(tableau_20)):
r,g,b = tableau_20[i]
tableau_20[i] = (r/255., g/255., b/255.)
# Setting up Tkinter Frame:
lf = Labelframe(gui_root)
lf.grid(row=0, column=0, sticky='nwes', padx=3, pady=3)
# Setting up Canvas backed by Matplotlib:
fig = Figure(figsize=aesthetics['size'], dpi=aesthetics['dpi'])
ax = fig.add_subplot(111)
# Drawing various plots with Seaborn:
if plot['type']=='lineplot': # Lineplot
g = sns.lineplot(x=x_axis, y=y_axis, data=df, ax=ax)
elif plot['type']=='regplot': # Regplot
g = sns.regplot(x=x_axis, y=y_axis, data=df, color=tableau_20[16], ax=ax)
elif plot['type']=='distplot': # Distplot
g = sns.distplot(a=df[x_axis].dropna(), color=tableau_20[7],
hist_kws=dict(edgecolor='k', linewidth=0.5), ax=ax)
elif plot['type']=='barplot': # Grouped Barplot
g = sns.catplot(x=x_axis, y=y_axis, hue=plot['hue'], data=df,
kind="bar", palette='rocket', ax=ax)
g.despine(left=True)
else:
# More to be added later
pass
# Displaying plot on Canvas:
canvas = FigureCanvasTkAgg(fig, master=lf)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
generate_plot()
root.mainloop()
| 38.72 | 107 | 0.587466 |
40bac90ddb15602100d2d54bde81a9850ed02d27 | 4,737 | py | Python | utils/get_dataset.py | gautierdag/pytorch-attentive-lm | d08ce1813a5ee575c0aac86773cd95aa174ab5e1 | [
"MIT"
] | 16 | 2019-01-28T16:39:52.000Z | 2021-12-25T11:07:55.000Z | utils/get_dataset.py | MarkWuCL/pytorch-attentive-lm | d08ce1813a5ee575c0aac86773cd95aa174ab5e1 | [
"MIT"
] | 1 | 2021-09-14T22:14:28.000Z | 2021-09-15T16:12:43.000Z | utils/get_dataset.py | MarkWuCL/pytorch-attentive-lm | d08ce1813a5ee575c0aac86773cd95aa174ab5e1 | [
"MIT"
] | 6 | 2019-01-25T00:08:33.000Z | 2022-02-15T06:47:54.000Z | import os
import torch
from torch.utils.data import DataLoader, TensorDataset
import requests
import io
import zipfile
from .data_reader import read_vocabulary, read_lm_data, lm_data_producer
from .pre_process_wikitext import pre_process
def get_dataset(dataset, batch_size, device):
"""
Returns data iterator for each set and vocabulary
"""
download_dataset(dataset) # downloads and preprocess dataset if needed
if dataset == "wiki-02":
data_files = [".data/wikitext-2/wikitext-2/wiki.train.tokens.sents",
".data/wikitext-2/wikitext-2/wiki.valid.tokens.sents",
".data/wikitext-2/wikitext-2/wiki.test.tokens.sents"]
vocab_size = 33278 + 1 # add 1 to account for PAD
if dataset == 'ptb':
data_files = [".data/penn-treebank/ptb.train.txt",
".data/penn-treebank/ptb.valid.txt",
".data/penn-treebank/ptb.test.txt"]
vocab_size = 10000 + 1 # add 1 to account for PAD
vocabulary = read_vocabulary(data_files, vocab_size)
train_data, valid_data, test_data = read_lm_data(data_files,
vocabulary)
# Convert numpy to datasets and obtain iterators for each
train_data = lm_data_producer(train_data)
train_x = torch.tensor(train_data[0], dtype=torch.long, device=device)
train_y = torch.tensor(train_data[1], dtype=torch.long, device=device)
train_lengths = torch.tensor(
train_data[2], dtype=torch.float, device=device)
train_dataset = TensorDataset(train_x, train_y, train_lengths)
valid_data = lm_data_producer(valid_data)
valid_x = torch.tensor(valid_data[0], dtype=torch.long, device=device)
valid_y = torch.tensor(valid_data[1], dtype=torch.long, device=device)
valid_lengths = torch.tensor(
valid_data[2], dtype=torch.float, device=device)
valid_dataset = TensorDataset(valid_x, valid_y, valid_lengths)
test_data = lm_data_producer(test_data)
test_x = torch.tensor(test_data[0], dtype=torch.long, device=device)
test_y = torch.tensor(test_data[1], dtype=torch.long, device=device)
test_lengths = torch.tensor(test_data[2], dtype=torch.float, device=device)
test_dataset = TensorDataset(test_x, test_y, test_lengths)
train_iter = DataLoader(train_dataset, batch_size=batch_size)
valid_iter = DataLoader(valid_dataset, batch_size=batch_size)
test_iter = DataLoader(test_dataset, batch_size=batch_size)
return train_iter, valid_iter, test_iter, vocabulary
# downloading/preprocessing functions
| 37.896 | 90 | 0.677222 |
40bb7183ce1df8b018466acd5e09bcd49d75d2d5 | 289 | py | Python | Conteudo das Aulas/087/calc_est.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/087/calc_est.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/087/calc_est.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | from tkinter import *
#Cria a nossa tela
instancia = Tk()
#D um ttulo a tela
instancia.title('Calculadora para Estatstica')
#D um tamanho a tela
instancia.geometry("800x600")
#D um cone ao aplicativo
#instancia.wm_iconbitmap('icone.ico')
#Inicia o programa
instancia.mainloop()
| 17 | 47 | 0.761246 |
40bb7e3f95f2a2dc9b27a2c8dd06c761ef722a37 | 6,235 | py | Python | property_scraper.py | iplaughlin/property_scraping | 739d05a272eddb5f2b48f9fc85f407904067b931 | [
"MIT"
] | null | null | null | property_scraper.py | iplaughlin/property_scraping | 739d05a272eddb5f2b48f9fc85f407904067b931 | [
"MIT"
] | null | null | null | property_scraper.py | iplaughlin/property_scraping | 739d05a272eddb5f2b48f9fc85f407904067b931 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 19 09:42:09 2022
@author: iaala
"""
import requests
import sql_configs
import datetime
import os
from bs4 import BeautifulSoup
import time
from find_tables import (
table_information_one,
table_information_two,
table_information_three,
table_information_four,
)
from create_connection import create_sql_connection
import columns
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
if __name__ == "__main__":
main()
| 42.705479 | 121 | 0.559262 |
40be64921673daa0d8c5613e61a252c64c6e744e | 1,002 | py | Python | matches/tests/test_view_index.py | ToxaZ/nostradamus | adda478685d60df24da106c15d734d02f01ed339 | [
"WTFPL"
] | null | null | null | matches/tests/test_view_index.py | ToxaZ/nostradamus | adda478685d60df24da106c15d734d02f01ed339 | [
"WTFPL"
] | 4 | 2021-06-02T23:01:40.000Z | 2021-07-25T15:02:42.000Z | matches/tests/test_view_index.py | ToxaZ/nostradamus | adda478685d60df24da106c15d734d02f01ed339 | [
"WTFPL"
] | null | null | null | from django.urls import resolve, reverse
from django.test import TestCase
from matches.views import matches_index
from matches.models import Match
| 33.4 | 77 | 0.675649 |
40be6781a367fb391b4b06b4f46533ac8dd9e99d | 5,871 | py | Python | hmc/applications/banana/banana.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 7ee1b530db0eb536666dbc872fbf8200e53dd49b | [
"MIT"
] | 1 | 2021-11-23T15:40:07.000Z | 2021-11-23T15:40:07.000Z | hmc/applications/banana/banana.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 7ee1b530db0eb536666dbc872fbf8200e53dd49b | [
"MIT"
] | null | null | null | hmc/applications/banana/banana.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 7ee1b530db0eb536666dbc872fbf8200e53dd49b | [
"MIT"
] | null | null | null | from typing import Callable, Tuple
import numpy as np
def posterior_factory(y: np.ndarray, sigma_y: float, sigma_theta: float) -> Tuple[Callable]:
"""The banana distribution is a distribution that exhibits a characteristic
banana-shaped ridge that resembles the posterior that can emerge from
models that are not identifiable. The distribution is the posterior of the
following generative model.
y ~ Normal(theta[0] + theta[1]**2, sigma_sq_y)
theta[i] ~ Normal(0, sigma_sq_theta)
Args:
y: Observations of the banana model.
sigma_y: Standard deviation of the observations.
sigma_theta: Standard deviation of prior over linear coefficients.
Returns:
log_posterior: Function to compute the log-posterior.
metric: Function to compute the Fisher information metric.
euclidean_auxiliaries: Function to compute the log-posterior and its
gradient.
riemannian_auxiliaries: Function to compute the log-posterior, the
gradient of the log-posterior, the Fisher information metric, and the
derivatives of the Fisher information metric.
"""
sigma_sq_y = np.square(sigma_y)
sigma_sq_theta = np.square(sigma_theta)
def log_posterior(theta: np.ndarray) -> float:
"""The banana-shaped distribution posterior.
Args:
theta: Linear coefficients.
Returns:
out: The log-posterior of the banana-shaped distribution.
"""
p = theta[0] + np.square(theta[1])
ll = -0.5 / sigma_sq_y * np.square(y - p).sum()
lp = -0.5 / sigma_sq_theta * np.square(theta).sum()
return ll + lp
def grad_log_posterior(theta: np.ndarray) -> np.ndarray:
"""Gradient of the banana-shaped distribution with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
out: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
p = theta[0] + np.square(theta[1])
d = np.sum(y - p)
ga = d / sigma_sq_y - theta[0] / sigma_sq_theta
gb = 2.0*d / sigma_sq_y * theta[1] - theta[1] / sigma_sq_theta
return np.hstack((ga, gb))
def metric(theta: np.ndarray) -> np.ndarray:
"""The Fisher information is the negative expected outer product of the
gradient of the posterior.
Args:
theta: Linear coefficients.
Returns:
G: The Fisher information metric of the banana-shaped distribution.
"""
n = y.size
s = 2.0*n*theta[1] / sigma_sq_y
G = np.array([[n / sigma_sq_y + 1.0 / sigma_sq_theta, s],
[s, 4.0*n*np.square(theta[1]) / sigma_sq_y + 1.0 / sigma_sq_theta]])
return G
def grad_metric(theta: np.ndarray) -> np.ndarray:
"""The gradient of the Fisher information metric with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
n = y.size
dG = np.array([
[[0.0, 0.0], [0.0, 2.0*n / sigma_sq_y]],
[[0.0, 2.0*n / sigma_sq_y], [0.0, 8.0*n*theta[1] / sigma_sq_y]]
])
return dG
def euclidean_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior and the gradient of the
log-posterior.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
return lp, glp
def riemannnian_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior, the gradient of the log-posterior,
the Fisher information metric and the derivatives of the Fisher
information metric.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
G: The Fisher information metric of the banana-shaped distribution.
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
G = metric(theta)
dG = grad_metric(theta)
return lp, glp, G, dG
return log_posterior, metric, log_posterior_and_metric, euclidean_auxiliaries, riemannnian_auxiliaries
def generate_data(t: float, sigma_y: float, sigma_theta: float, num_obs: int) -> np.ndarray:
"""Generate data from the banana-shaped posterior distribution.
Args:
t: Free-parameter determining the thetas.
sigma_y: Noise standard deviation.
sigma_theta: Prior standard deviation over the thetas.
num_obs: Number of observations to generate.
Returns:
theta: Linear coefficients of the banana-shaped distribution.
y: Observations from the unidentifiable model.
"""
theta = np.array([t, np.sqrt(1.0 - t)])
y = theta[0] + np.square(theta[1]) + sigma_y * np.random.normal(size=(num_obs, ))
return theta, y
| 35.36747 | 106 | 0.62698 |
40c04be7b7e97b73786d758981e90307e422880f | 3,141 | py | Python | msp430.py | sprout42/binaryninja-msp430 | 9bc5a79b1c6232260c2abc3bb4334e5ca3478baf | [
"MIT"
] | null | null | null | msp430.py | sprout42/binaryninja-msp430 | 9bc5a79b1c6232260c2abc3bb4334e5ca3478baf | [
"MIT"
] | null | null | null | msp430.py | sprout42/binaryninja-msp430 | 9bc5a79b1c6232260c2abc3bb4334e5ca3478baf | [
"MIT"
] | null | null | null | from binaryninja import (
Architecture,
BranchType,
FlagRole,
InstructionInfo,
LowLevelILFlagCondition,
RegisterInfo,
)
from .instructions import TYPE3_INSTRUCTIONS, Instruction, Registers
from .lifter import Lifter
| 30.794118 | 86 | 0.617638 |
40c0c0515519976b7d3396916ff20c4b1d6edd0a | 126 | py | Python | app/domain/__init__.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
] | null | null | null | app/domain/__init__.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
] | null | null | null | app/domain/__init__.py | emge1/tracardi | 0a4a8a38f0f769464f50d3c1113b798107810810 | [
"MIT"
] | null | null | null | __all__ = [
'session',
'event',
'profile',
'consent',
'segment',
'source',
'rule',
'entity'
]
| 11.454545 | 14 | 0.444444 |
40c32fb91113902b7c534e034974797ba31567b9 | 3,868 | py | Python | metric_calculation/faster_metrics.py | imatge-upc/saliency-2018-videosalgan | 5e4bf3892d55d8b9f37490e119113c2094be3bce | [
"MIT"
] | 10 | 2018-09-06T03:56:59.000Z | 2020-07-26T11:02:50.000Z | metric_calculation/faster_metrics.py | imatge-upc/saliency-2018-videosalgan | 5e4bf3892d55d8b9f37490e119113c2094be3bce | [
"MIT"
] | null | null | null | metric_calculation/faster_metrics.py | imatge-upc/saliency-2018-videosalgan | 5e4bf3892d55d8b9f37490e119113c2094be3bce | [
"MIT"
] | 3 | 2019-01-07T19:34:12.000Z | 2019-07-03T07:41:48.000Z | from salience_metrics import auc_judd, auc_shuff, cc, nss, similarity, normalize_map
"""
DHF1K paper: "we employ five classic met-rics, namely Normalized Scanpath Saliency (NSS), Sim-ilarity Metric (SIM), Linear Correlation Coefficient (CC),AUC-Judd (AUC-J), and shuffled AUC (s-AUC).""
"""
import cv2
import os
import numpy as np
import time
import pickle
gt_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/maps"
sm_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/predictions"
final_metric_list = []
# The directories are named 1-1000 so it should be easy to iterate over them
for i in range(1,701):
start = time.clock()
gt_path = os.path.join(gt_directory, str(i))
sm_path = os.path.join(sm_directory, str(i))
gt_files = os.listdir(gt_path)
sm_files = os.listdir(sm_path)
#Now to sort based on their file number. The "key" parameter in sorted is a function based on which the sorting will happen (I use split to exclude the jpg/png from the).
gt_files_sorted = sorted(gt_files, key = lambda x: int(x.split(".")[0]) )
sm_files_sorted = sorted(sm_files, key = lambda x: int(x.split(".")[0]) )
pack = zip(gt_files_sorted, sm_files_sorted)
print("Files related to video {} sorted.".format(i))
##
##https://stackoverflow.com/questions/35663498/how-do-i-return-a-matrix-with-joblib-python
from joblib import Parallel, delayed
start = time.clock()
metric_list = Parallel(n_jobs=8)(delayed(inner_worker)(n, packed, gt_path, sm_path) for n, packed in enumerate(pack)) #run 8 frames simultaneously
aucj_mean = np.mean([x[0] for x in metric_list])
aucs_mean = np.mean([x[1] for x in metric_list])
nss_mean = np.mean([x[2] for x in metric_list])
cc_mean = np.mean([x[3] for x in metric_list])
sim_mean = np.mean([x[4] for x in metric_list])
print("For video number {} the metrics are:".format(i))
print("AUC-JUDD is {}".format(aucj_mean))
print("AUC-SHUFFLED is {}".format(aucs_mean))
print("NSS is {}".format(nss_mean))
print("CC is {}".format(cc_mean))
print("SIM is {}".format(sim_mean))
print("Time elapsed: {}".format(time.clock()-start))
print("==============================")
final_metric_list.append(( aucj_mean,
aucs_mean,
nss_mean,
cc_mean,
sim_mean ))
with open('metrics.txt', 'wb') as handle:
pickle.dump(final_metric_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
Aucj = np.mean([y[0] for y in final_metric_list])
Aucs = np.mean([y[1] for y in final_metric_list])
Nss = np.mean([y[2] for y in final_metric_list])
Cc = np.mean([y[3] for y in final_metric_list])
Sim = np.mean([y[4] for y in final_metric_list])
print("Final average of metrics is:")
print("AUC-JUDD is {}".format(Aucj))
print("AUC-SHUFFLED is {}".format(Aucs))
print("NSS is {}".format(Nss))
print("CC is {}".format(Cc))
print("SIM is {}".format(Sim))
| 39.876289 | 207 | 0.668046 |
40c346f9a8e289985909d8a308d6ecd6f7e032ea | 1,061 | py | Python | tests/rules/test_pacman_invalid_option.py | RogueScholar/thefuck-termux | cc33d5fa0077b2b2323b8a62f3478ff8efef3fba | [
"MIT"
] | null | null | null | tests/rules/test_pacman_invalid_option.py | RogueScholar/thefuck-termux | cc33d5fa0077b2b2323b8a62f3478ff8efef3fba | [
"MIT"
] | null | null | null | tests/rules/test_pacman_invalid_option.py | RogueScholar/thefuck-termux | cc33d5fa0077b2b2323b8a62f3478ff8efef3fba | [
"MIT"
] | null | null | null | import pytest
from thefuck.rules.pacman_invalid_option import get_new_command
from thefuck.rules.pacman_invalid_option import match
from thefuck.types import Command
good_output = """community/shared_meataxe 1.0-3
A set of programs for working with matrix representations over finite fields
"""
bad_output = "error: invalid option '-"
| 31.205882 | 80 | 0.748351 |
40c37866ffff9cefa653877b146a68fc96c42ddf | 11,329 | py | Python | dimod/reference/composites/scalecomposite.py | joseppinilla/dimod | e33ca5045e31ee2d9d58515f017fb6be5276cd8e | [
"Apache-2.0"
] | 1 | 2022-02-01T14:40:05.000Z | 2022-02-01T14:40:05.000Z | dimod/reference/composites/scalecomposite.py | xpin/dimod | 5e399317b0bfaae6ed20e22b9f2ef242f5fa5e6c | [
"Apache-2.0"
] | null | null | null | dimod/reference/composites/scalecomposite.py | xpin/dimod | 5e399317b0bfaae6ed20e22b9f2ef242f5fa5e6c | [
"Apache-2.0"
] | 1 | 2022-02-01T14:40:31.000Z | 2022-02-01T14:40:31.000Z | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
"""
A composite that scales problem variables as directed. if scalar is not given
calculates it based on quadratic and bias ranges.
"""
try:
import collections.abc as abc
except ImportError:
import collections as abc
from numbers import Number
import numpy as np
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.core.composite import ComposedSampler
__all__ = 'ScaleComposite',
def sample_ising(self, h, J, offset=0, scalar=None,
bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the problem provided by h, J, offset
if scalar is not given, problem is scaled based on bias and quadratic
ranges.
Args:
h (dict): linear biases
J (dict): quadratic or higher order biases
offset (float, optional): constant energy offset
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
if any(len(inter) > 2 for inter in J):
# handle HUBO
import warnings
msg = ("Support for higher order Ising models in ScaleComposite is "
"deprecated and will be removed in dimod 0.9.0. Please use "
"PolyScaleComposite.sample_hising instead.")
warnings.warn(msg, DeprecationWarning)
from dimod.reference.composites.higherordercomposites import PolyScaleComposite
from dimod.higherorder.polynomial import BinaryPolynomial
poly = BinaryPolynomial.from_hising(h, J, offset=offset)
ignored_terms = set()
if ignored_variables is not None:
ignored_terms.update(frozenset(v) for v in ignored_variables)
if ignored_interactions is not None:
ignored_terms.update(frozenset(inter) for inter in ignored_interactions)
if ignore_offset:
ignored_terms.add(frozenset())
return PolyScaleComposite(self.child).sample_poly(poly, scalar=scalar,
bias_range=bias_range,
poly_range=quadratic_range,
ignored_terms=ignored_terms,
**parameters)
bqm = BinaryQuadraticModel.from_ising(h, J, offset=offset)
return self.sample(bqm, scalar=scalar,
bias_range=bias_range,
quadratic_range=quadratic_range,
ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset, **parameters)
def _scale_back_response(bqm, response, scalar, ignored_interactions,
ignored_variables, ignore_offset):
"""Helper function to scale back the response of sample method"""
if len(ignored_interactions) + len(
ignored_variables) + ignore_offset == 0:
response.record.energy = np.divide(response.record.energy, scalar)
else:
response.record.energy = bqm.energies((response.record.sample,
response.variables))
return response
def _check_params(ignored_variables, ignored_interactions):
"""Helper for sample methods"""
if ignored_variables is None:
ignored_variables = set()
elif not isinstance(ignored_variables, abc.Container):
ignored_variables = set(ignored_variables)
if ignored_interactions is None:
ignored_interactions = set()
elif not isinstance(ignored_interactions, abc.Container):
ignored_interactions = set(ignored_interactions)
return ignored_variables, ignored_interactions
def _calc_norm_coeff(h, J, bias_range, quadratic_range, ignored_variables,
ignored_interactions):
"""Helper function to calculate normalization coefficient"""
if ignored_variables is None or ignored_interactions is None:
raise ValueError('ignored interactions or variables cannot be None')
if quadratic_range is None:
linear_range, quadratic_range = bias_range, bias_range
else:
linear_range = bias_range
lin_range, quad_range = map(parse_range, (linear_range,
quadratic_range))
lin_min, lin_max = min_and_max([v for k, v in h.items()
if k not in ignored_variables])
quad_min, quad_max = min_and_max([v for k, v in J.items()
if not check_isin(k,
ignored_interactions)])
inv_scalar = max(lin_min / lin_range[0], lin_max / lin_range[1],
quad_min / quad_range[0], quad_max / quad_range[1])
if inv_scalar != 0:
return 1. / inv_scalar
else:
return 1.
def _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset):
"""Helper function of sample for scaling"""
bqm_copy = bqm.copy()
if scalar is None:
scalar = _calc_norm_coeff(bqm_copy.linear, bqm_copy.quadratic,
bias_range, quadratic_range,
ignored_variables, ignored_interactions)
bqm_copy.scale(scalar, ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset)
bqm_copy.info.update({'scalar': scalar})
return bqm_copy
def check_isin(key, key_list):
return sum(set(key) == set(key_tmp) for key_tmp in key_list)
| 37.637874 | 101 | 0.603231 |
40c4517b7bccc080e6b7ec11639bdde005bb213a | 739 | py | Python | tests/test_config.py | savilard/flask-ecom-api | d94ee7873b9ec80645c05422e3355e8dc045ebeb | [
"MIT"
] | 1 | 2021-04-17T15:25:36.000Z | 2021-04-17T15:25:36.000Z | tests/test_config.py | savilard/flask-ecom-api | d94ee7873b9ec80645c05422e3355e8dc045ebeb | [
"MIT"
] | null | null | null | tests/test_config.py | savilard/flask-ecom-api | d94ee7873b9ec80645c05422e3355e8dc045ebeb | [
"MIT"
] | 1 | 2021-04-18T15:47:02.000Z | 2021-04-18T15:47:02.000Z | import os
| 36.95 | 92 | 0.783491 |
40c6d377ec913783afe6edc196ecab48e0003b36 | 6,122 | py | Python | leasing/forms.py | suutari-ai/mvj | c39dbc692afcb3b26366783414c2d5a88a57b25a | [
"MIT"
] | 1 | 2021-01-12T08:14:10.000Z | 2021-01-12T08:14:10.000Z | leasing/forms.py | suutari-ai/mvj | c39dbc692afcb3b26366783414c2d5a88a57b25a | [
"MIT"
] | 249 | 2017-04-18T14:00:13.000Z | 2022-03-30T12:18:03.000Z | leasing/forms.py | suutari-ai/mvj | c39dbc692afcb3b26366783414c2d5a88a57b25a | [
"MIT"
] | 7 | 2017-04-18T08:43:54.000Z | 2021-07-28T07:29:30.000Z | from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from leasing.enums import (
InfillDevelopmentCompensationState,
LeaseState,
TenantContactType,
)
from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality
from leasing.validators import validate_business_id
| 36.658683 | 87 | 0.679353 |
40c712bda8811c80835db84231a9e91605ae40b6 | 675 | py | Python | src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 932d509428d592ee573ff82821b9490c8da9600a | [
"Apache-2.0"
] | null | null | null | src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 932d509428d592ee573ff82821b9490c8da9600a | [
"Apache-2.0"
] | null | null | null | src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 932d509428d592ee573ff82821b9490c8da9600a | [
"Apache-2.0"
] | null | null | null | # polls/management/commands/create_admin_user.py
import sys
import logging
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.conf import settings
| 28.125 | 65 | 0.642963 |
40c7452a82c23c82f183d4188dfd8d42aa979d41 | 1,597 | py | Python | app.py | jdanper/incredipaper | 4c2d2dc1e2280f19e01d2e8faea4c9a1ae9b5449 | [
"MIT"
] | null | null | null | app.py | jdanper/incredipaper | 4c2d2dc1e2280f19e01d2e8faea4c9a1ae9b5449 | [
"MIT"
] | null | null | null | app.py | jdanper/incredipaper | 4c2d2dc1e2280f19e01d2e8faea4c9a1ae9b5449 | [
"MIT"
] | null | null | null | import unirest
import json
import requests
import os
import subprocess
import time
import argparse
rootUrl = "https://api.unsplash.com/"
unirest.default_header("Accept", "application/json")
unirest.default_header("Accept-Version", "v1")
unirest.default_header("Authorization","<CLIENT-ID>")
while True:
parser = argparse.ArgumentParser()
parser.add_argument('integers', metavar='int', type=int, help='time between wallpaper change (in seconds)')
args = parser.parse_args()
print "waiting for %s seconds" % args.integers
time.sleep(args.integers)
downloadPic(unirest.get(rootUrl + "photos/random", params={"orientation":"landscape"}))#.body["id"]
| 40.948718 | 204 | 0.707577 |
40c7af774d2446afa75acb06651e91eb7c9447fd | 2,358 | py | Python | tests/client_asyncio_test.py | ninchat/ninchat-python | 7e5fcadb7389ca8c7722c32d69839289675d7baa | [
"BSD-2-Clause"
] | null | null | null | tests/client_asyncio_test.py | ninchat/ninchat-python | 7e5fcadb7389ca8c7722c32d69839289675d7baa | [
"BSD-2-Clause"
] | 4 | 2017-10-12T21:05:12.000Z | 2018-05-17T22:19:08.000Z | tests/client_asyncio_test.py | ninchat/ninchat-python | 7e5fcadb7389ca8c7722c32d69839289675d7baa | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2017, Somia Reality Oy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import asyncio
import logging
from functools import partial
from ninchat.client.asyncio import Session
log = logging.getLogger(__name__)
| 38.655738 | 133 | 0.725615 |
40c853daa30d56c941424e6324401a40fa450528 | 878 | py | Python | msgraph/base.py | jstacoder/python-msgraph | be9a93f4baa2d89c97a8454ab1d31b8e5fdda38e | [
"Apache-2.0"
] | 2 | 2020-09-23T18:26:22.000Z | 2021-03-10T14:12:49.000Z | msgraph/base.py | jstacoder/python-msgraph | be9a93f4baa2d89c97a8454ab1d31b8e5fdda38e | [
"Apache-2.0"
] | 4 | 2020-09-23T18:25:32.000Z | 2021-03-22T11:07:32.000Z | msgraph/base.py | jstacoder/python-msgraph | be9a93f4baa2d89c97a8454ab1d31b8e5fdda38e | [
"Apache-2.0"
] | 1 | 2020-09-23T18:28:14.000Z | 2020-09-23T18:28:14.000Z | from datetime import datetime
| 35.12 | 158 | 0.633257 |
40c934e19e6344f536502d3f0e951d55cb483721 | 5,641 | py | Python | nc/models.py | caktus/Traffic-Stops | 2c6eda9477f1770c5ad1208a1937c3e828fbfb28 | [
"MIT"
] | 1 | 2021-12-10T14:58:11.000Z | 2021-12-10T14:58:11.000Z | nc/models.py | caktus/Traffic-Stops | 2c6eda9477f1770c5ad1208a1937c3e828fbfb28 | [
"MIT"
] | 5 | 2020-08-12T15:20:31.000Z | 2021-06-10T13:43:02.000Z | nc/models.py | caktus/Traffic-Stops | 2c6eda9477f1770c5ad1208a1937c3e828fbfb28 | [
"MIT"
] | null | null | null | from caching.base import CachingManager, CachingMixin
from django.db import models
from tsdata.models import CensusProfile
PURPOSE_CHOICES = (
(1, "Speed Limit Violation"),
(2, "Stop Light/Sign Violation"),
(3, "Driving While Impaired"),
(4, "Safe Movement Violation"),
(5, "Vehicle Equipment Violation"),
(6, "Vehicle Regulatory Violation"),
(7, "Seat Belt Violation"),
(8, "Investigation"),
(9, "Other Motor Vehicle Violation"),
(10, "Checkpoint"),
)
ACTION_CHOICES = (
(1, "Verbal Warning"),
(2, "Written Warning"),
(3, "Citation Issued"),
(4, "On-View Arrest"),
(5, "No Action Taken"),
)
PERSON_TYPE_CHOICES = (("D", "Driver"), ("P", "Passenger"))
GENDER_CHOICES = (("M", "Male"), ("F", "Female"))
ETHNICITY_CHOICES = (("H", "Hispanic"), ("N", "Non-Hispanic"))
RACE_CHOICES = (
("A", "Asian"),
("B", "Black"),
("I", "Native American"),
("U", "Other"),
("W", "White"),
)
SEARCH_TYPE_CHOICES = (
(1, "Consent"),
(2, "Search Warrant"),
(3, "Probable Cause"),
(4, "Search Incident to Arrest"),
(5, "Protective Frisk"),
)
SEARCH_BASIS_CHOICES = (
("ER", "Erratic/Suspicious Behavior"),
("OB", "Observation of Suspected Contraband"),
("OI", "Other Official Information"),
("SM", "Suspicious Movement"),
("TIP", "Informant Tip"),
("WTNS", "Witness Observation"),
)
| 34.607362 | 99 | 0.710867 |
40cc65a33578c41b6882d9360507c431c3bb4a45 | 74 | py | Python | flasky/auth/forms/__init__.py | by46/fasky | c6941972b57284c2167dfacf022f981939249256 | [
"MIT"
] | null | null | null | flasky/auth/forms/__init__.py | by46/fasky | c6941972b57284c2167dfacf022f981939249256 | [
"MIT"
] | null | null | null | flasky/auth/forms/__init__.py | by46/fasky | c6941972b57284c2167dfacf022f981939249256 | [
"MIT"
] | null | null | null | from .login import LoginForm
from .registration import RegistrationForm
| 24.666667 | 43 | 0.837838 |
40ce727b047c06c9d0537e694ab36bc40c4d524b | 552 | py | Python | API_SIMIT_Mail/multapp/urls.py | profefonso/Services-SM | 98b9949a237121451a13fce5bc8f2945fa5a3cee | [
"MIT"
] | null | null | null | API_SIMIT_Mail/multapp/urls.py | profefonso/Services-SM | 98b9949a237121451a13fce5bc8f2945fa5a3cee | [
"MIT"
] | 16 | 2019-12-04T23:02:52.000Z | 2022-02-10T11:57:03.000Z | API_SIMIT_Mail/multapp/urls.py | profefonso/Services-SM | 98b9949a237121451a13fce5bc8f2945fa5a3cee | [
"MIT"
] | null | null | null | from django.urls import path
from django.contrib import admin
from rest_framework_swagger.views import get_swagger_view
from .views import notification
schema_view = get_swagger_view(title='MAIL API')
urlpatterns = [
path('front/betsy/irish/embargo/admin/', admin.site.urls),
# Swagger API
path(
'api/',
schema_view,
name='api'
),
# notification
path(
'notification/',
notification.NotificationServicesRest.as_view(),
name=notification.NotificationServicesRest.name
),
]
| 21.230769 | 62 | 0.681159 |
40d10458dee4b20d938050badf13c455b5c17307 | 1,097 | py | Python | tests/legacy_mocket.py | jepler/Adafruit_CircuitPython_Requests | 9a9527110726036bfad94b14166e62aa61c1276e | [
"MIT"
] | null | null | null | tests/legacy_mocket.py | jepler/Adafruit_CircuitPython_Requests | 9a9527110726036bfad94b14166e62aa61c1276e | [
"MIT"
] | null | null | null | tests/legacy_mocket.py | jepler/Adafruit_CircuitPython_Requests | 9a9527110726036bfad94b14166e62aa61c1276e | [
"MIT"
] | null | null | null | from unittest import mock
SOCK_STREAM = 0
set_interface = mock.Mock()
interface = mock.MagicMock()
getaddrinfo = mock.Mock()
socket = mock.Mock()
| 26.756098 | 61 | 0.610757 |
40d10aff20e4192696c984a95bd52419f7e1299a | 2,228 | py | Python | run.py | romeroyakovlev/ii | ae9485df2c3565871994c146001a72db511f3700 | [
"CC0-1.0"
] | 1 | 2017-09-29T09:35:04.000Z | 2017-09-29T09:35:04.000Z | run.py | romeroyakovlev/ii | ae9485df2c3565871994c146001a72db511f3700 | [
"CC0-1.0"
] | null | null | null | run.py | romeroyakovlev/ii | ae9485df2c3565871994c146001a72db511f3700 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
import api,points
from api.bottle import *
II_PATH=os.path.dirname(__file__) or '.'
TEMPLATE_PATH.insert(0,II_PATH)
def _point_msg(pauth,tmsg):
msgfrom, addr = points.check_hash(pauth)
if not addr: return 'auth error!'
cfg = api.load_echo(False)
mo = api.toss(msgfrom,'%s,%s' % (cfg[0][1],addr),tmsg.strip())
if mo.msg.startswith('@repto:'):
tmpmsg = mo.msg.splitlines()
mo.repto = tmpmsg[0][7:]
mo.msg = '\n'.join(tmpmsg[1:])
# - api.toss
if len(mo.msg.encode('utf-8')) < 64100:
h = api.point_newmsg(mo)
if h:
return 'msg ok:%s: <a href="/%s">%s</a>' % (h, mo.echoarea, mo.echoarea)
else:
return 'error:unknown'
else:
return 'msg big!'
import iitpl
iitpl.II_PATH=II_PATH
run(host='127.0.0.1',port=62220,debug=False)
| 29.706667 | 84 | 0.62702 |
40d1ab4064e3ebc59780f56e6705f5033ef843d8 | 2,080 | py | Python | learning_labs/yang/01-yang/add_loopback_ip.py | hpreston/sbx_nxos | 8952d369b80810d22e0c86485b667cde1519272d | [
"MIT"
] | 33 | 2017-05-19T19:47:33.000Z | 2021-05-16T07:33:39.000Z | learning_labs/yang/01-yang/add_loopback_ip.py | AJNOURI/sbx_nxos | 449ee3c1c88cbd831fd3f90490fc28dd7e02d448 | [
"MIT"
] | 13 | 2017-08-25T16:48:16.000Z | 2021-09-23T23:21:58.000Z | learning_labs/yang/01-yang/add_loopback_ip.py | vbohinc/sbx_nxos | 744c34fffd32b1c973ac791123afd6cc811e0d8b | [
"MIT"
] | 20 | 2017-06-19T09:32:20.000Z | 2022-02-20T05:33:14.000Z | #!/usr/bin/env python
from ncclient import manager
import sys
from lxml import etree
# Set the device variables
DEVICES = ['172.16.30.101', '172.16.30.102']
USER = 'admin'
PASS = 'admin'
PORT = 830
LOOPBACK_IP = {
'172.16.30.101': '10.99.99.1/24',
'172.16.30.102': '10.99.99.2/24'
}
DEVICE_NAMES = {'172.16.30.101': '(nx-osv9000-1)',
'172.16.30.102': '(nx-osv9000-2)' }
# create a main() method
def main():
"""
Main method that adds an IP address to interface loopback 99 to
both the spine switches.
"""
loopback_ip_add = """
<config>
<System xmlns="http://cisco.com/ns/yang/cisco-nx-os-device">
<ipv4-items>
<inst-items>
<dom-items>
<Dom-list>
<name>default</name>
<if-items>
<If-list>
<id>lo99</id>
<addr-items>
<Addr-list>
<addr>{}</addr>
</Addr-list>
</addr-items>
</If-list>
</if-items>
</Dom-list>
</dom-items>
</inst-items>
</ipv4-items>
</System>
</config>"""
for device in DEVICES:
with manager.connect(host=device, port=PORT, username=USER,
password=PASS, hostkey_verify=False,
device_params={'name': 'nexus'},
look_for_keys=False, allow_agent=False) as m:
# Add the loopback interface
print("\nNow adding IP address {} to device {} {}...\n".format(LOOPBACK_IP[device], DEVICE_NAMES[device],
device))
new_ip = loopback_ip_add.format(LOOPBACK_IP[device])
netconf_response = m.edit_config(target='running', config=new_ip)
# Parse the XML response
print(netconf_response)
if __name__ == '__main__':
sys.exit(main())
| 30.144928 | 117 | 0.479327 |
40d1e35fdcc4995890f4efabdc25434ef9f00eb5 | 5,704 | py | Python | Plotly_Dash/spacex_dash_app.py | AtypicalLogic/Coursera-IBM_DS-Applied_Data_Science_Capstone | 2659fcae1a8d5fec13ab632aee1492a29e9585ee | [
"MIT"
] | null | null | null | Plotly_Dash/spacex_dash_app.py | AtypicalLogic/Coursera-IBM_DS-Applied_Data_Science_Capstone | 2659fcae1a8d5fec13ab632aee1492a29e9585ee | [
"MIT"
] | null | null | null | Plotly_Dash/spacex_dash_app.py | AtypicalLogic/Coursera-IBM_DS-Applied_Data_Science_Capstone | 2659fcae1a8d5fec13ab632aee1492a29e9585ee | [
"MIT"
] | null | null | null | # To run this file, Win Start > cmd > file dir > run: python spacex_dash_app.py
# Import required libraries
import pandas as pd
import dash
from dash import html
from dash import dcc
from dash.dependencies import Input, Output
import plotly.express as px
# Read the airline data into pandas dataframe
spacex_df = pd.read_csv("spacex_launch_dash.csv")
max_payload = spacex_df['Payload Mass (kg)'].max()
min_payload = spacex_df['Payload Mass (kg)'].min()
# Dropdown list(s)
launch_site_list = []
launch_site_list.append('ALL')
for index, row in spacex_df['Launch Site'].value_counts().to_frame().iterrows():
launch_site_list.append(row.name)
# Create a dash application
app = dash.Dash(__name__)
# Create an app layout
app.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard',
style={'textAlign': 'center', 'color': '#503D36',
'font-size': 40}),
# TASK 1: Add a dropdown list to enable Launch Site selection
# The default select value is for ALL sites
# dcc.Dropdown(id='site-dropdown',...)
dcc.Dropdown(id='site-dropdown',
options=[{'label': i, 'value': i} for i in launch_site_list],
style={'width':'100%', 'padding':'3px', 'font-size': '20px', 'text-align-last': 'left'},
value='ALL'),
html.Br(),
# TASK 2: Add a pie chart to show the total successful launches count for all sites
# If a specific launch site was selected, show the Success vs. Failed counts for the site
html.Div(dcc.Graph(id='success-pie-chart')),
html.Br(),
html.P("Payload range (Kg):"),
# TASK 3: Add a slider to select payload range
#dcc.RangeSlider(id='payload-slider',...)
dcc.RangeSlider(id='payload-slider', min=min_payload, max=max_payload, step=1000, value=[min_payload, max_payload]),
# TASK 4: Add a scatter chart to show the correlation between payload and launch success
html.Div(dcc.Graph(id='success-payload-scatter-chart')),
])
# TASK 2:
# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output
# TASK 4:
# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output
# Run the app
if __name__ == '__main__':
app.run_server(debug=True)
# Finding Insights Visually
# Now with the dashboard completed, you should be able to use it to analyze SpaceX launch data, and answer the following questions:
#
# Which site has the largest successful launches?
### KSC LC-39A
# Which site has the highest launch success rate?
### KSC LC-39A
# Which payload range(s) has the highest launch success rate?
### 2000 - 4000
# Which payload range(s) has the lowest launch success rate?
### 6000 - 9000
# Which F9 Booster version (v1.0, v1.1, FT, B4, B5, etc.) has the highest launch success rate?
### B5
| 52.330275 | 223 | 0.58082 |
40d2f193783c709a9e4416360cda1c6098d93420 | 1,430 | py | Python | configs/mnist_paper_residual_cnn_gp.py | rhaps0dy/cnn-gp | 5726bd10debbddcffb3dc1f5c671f5dceedf007d | [
"BSD-2-Clause"
] | 23 | 2019-06-21T15:03:45.000Z | 2022-01-24T11:34:16.000Z | configs/mnist_paper_residual_cnn_gp.py | rhaps0dy/cnn-gp | 5726bd10debbddcffb3dc1f5c671f5dceedf007d | [
"BSD-2-Clause"
] | null | null | null | configs/mnist_paper_residual_cnn_gp.py | rhaps0dy/cnn-gp | 5726bd10debbddcffb3dc1f5c671f5dceedf007d | [
"BSD-2-Clause"
] | 10 | 2019-06-21T14:52:55.000Z | 2021-12-01T19:05:38.000Z | """
The best randomly-searched ResNet reported in the paper.
In the original paper there is a bug. This network sums together layers after
the ReLU nonlinearity, which are not Gaussian, and also do not have mean 0. As
a result, the overall network does not converge to a Gaussian process. The
defined kernel is still valid, even if it doesn't correspond to a NN.
In the interest of making the results replicable, we have replicated this bug
as well.
The correct way to use ResNets is to sum things after a Conv2d layer, see for
example the `resnet_block` in `cnn_gp/kernels.py`.
"""
import torchvision
from cnn_gp import Conv2d, ReLU, Sequential, Sum
train_range = range(5000, 55000)
validation_range = list(range(55000, 60000)) + list(range(0, 5000))
test_range = range(60000, 70000)
dataset_name = "MNIST"
model_name = "ResNet"
dataset = torchvision.datasets.MNIST
transforms = []
epochs = 0
in_channels = 1
out_channels = 10
var_bias = 4.69
var_weight = 7.27
initial_model = Sequential(
*(Sum([
Sequential(),
Sequential(
Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2,
var_bias=var_bias),
ReLU(),
)]) for _ in range(8)),
Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2,
var_bias=var_bias),
ReLU(),
Conv2d(kernel_size=28, padding=0, var_weight=var_weight,
var_bias=var_bias),
)
| 31.086957 | 79 | 0.702098 |
40d397efdfc75b4459bce3aac322fa920256a163 | 2,956 | py | Python | python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 07/ch07_rec08_scatterplot.py | flyingwjw/Documentation | 567608f388ca369b864c2d75a94647801b5dfa1e | [
"Unlicense"
] | null | null | null | python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 07/ch07_rec08_scatterplot.py | flyingwjw/Documentation | 567608f388ca369b864c2d75a94647801b5dfa1e | [
"Unlicense"
] | null | null | null | python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 07/ch07_rec08_scatterplot.py | flyingwjw/Documentation | 567608f388ca369b864c2d75a94647801b5dfa1e | [
"Unlicense"
] | 2 | 2020-09-22T18:37:46.000Z | 2021-09-02T11:02:59.000Z | import matplotlib.pyplot as plt
import numpy as np
# daily search trend for keyword 'flowers' for a year
d = [
1.04, 1.04, 1.16, 1.22, 1.46, 2.34, 1.16, 1.12, 1.24, 1.30, 1.44, 1.22, 1.26,
1.34, 1.26, 1.40, 1.52, 2.56, 1.36, 1.30, 1.20, 1.12, 1.12, 1.12, 1.06, 1.06,
1.00, 1.02, 1.04, 1.02, 1.06, 1.02, 1.04, 0.98, 0.98, 0.98, 1.00, 1.02, 1.02,
1.00, 1.02, 0.96, 0.94, 0.94, 0.94, 0.96, 0.86, 0.92, 0.98, 1.08, 1.04, 0.74,
0.98, 1.02, 1.02, 1.12, 1.34, 2.02, 1.68, 1.12, 1.38, 1.14, 1.16, 1.22, 1.10,
1.14, 1.16, 1.28, 1.44, 2.58, 1.30, 1.20, 1.16, 1.06, 1.06, 1.08, 1.00, 1.00,
0.92, 1.00, 1.02, 1.00, 1.06, 1.10, 1.14, 1.08, 1.00, 1.04, 1.10, 1.06, 1.06,
1.06, 1.02, 1.04, 0.96, 0.96, 0.96, 0.92, 0.84, 0.88, 0.90, 1.00, 1.08, 0.80,
0.90, 0.98, 1.00, 1.10, 1.24, 1.66, 1.94, 1.02, 1.06, 1.08, 1.10, 1.30, 1.10,
1.12, 1.20, 1.16, 1.26, 1.42, 2.18, 1.26, 1.06, 1.00, 1.04, 1.00, 0.98, 0.94,
0.88, 0.98, 0.96, 0.92, 0.94, 0.96, 0.96, 0.94, 0.90, 0.92, 0.96, 0.96, 0.96,
0.98, 0.90, 0.90, 0.88, 0.88, 0.88, 0.90, 0.78, 0.84, 0.86, 0.92, 1.00, 0.68,
0.82, 0.90, 0.88, 0.98, 1.08, 1.36, 2.04, 0.98, 0.96, 1.02, 1.20, 0.98, 1.00,
1.08, 0.98, 1.02, 1.14, 1.28, 2.04, 1.16, 1.04, 0.96, 0.98, 0.92, 0.86, 0.88,
0.82, 0.92, 0.90, 0.86, 0.84, 0.86, 0.90, 0.84, 0.82, 0.82, 0.86, 0.86, 0.84,
0.84, 0.82, 0.80, 0.78, 0.78, 0.76, 0.74, 0.68, 0.74, 0.80, 0.80, 0.90, 0.60,
0.72, 0.80, 0.82, 0.86, 0.94, 1.24, 1.92, 0.92, 1.12, 0.90, 0.90, 0.94, 0.90,
0.90, 0.94, 0.98, 1.08, 1.24, 2.04, 1.04, 0.94, 0.86, 0.86, 0.86, 0.82, 0.84,
0.76, 0.80, 0.80, 0.80, 0.78, 0.80, 0.82, 0.76, 0.76, 0.76, 0.76, 0.78, 0.78,
0.76, 0.76, 0.72, 0.74, 0.70, 0.68, 0.72, 0.70, 0.64, 0.70, 0.72, 0.74, 0.64,
0.62, 0.74, 0.80, 0.82, 0.88, 1.02, 1.66, 0.94, 0.94, 0.96, 1.00, 1.16, 1.02,
1.04, 1.06, 1.02, 1.10, 1.22, 1.94, 1.18, 1.12, 1.06, 1.06, 1.04, 1.02, 0.94,
0.94, 0.98, 0.96, 0.96, 0.98, 1.00, 0.96, 0.92, 0.90, 0.86, 0.82, 0.90, 0.84,
0.84, 0.82, 0.80, 0.80, 0.76, 0.80, 0.82, 0.80, 0.72, 0.72, 0.76, 0.80, 0.76,
0.70, 0.74, 0.82, 0.84, 0.88, 0.98, 1.44, 0.96, 0.88, 0.92, 1.08, 0.90, 0.92,
0.96, 0.94, 1.04, 1.08, 1.14, 1.66, 1.08, 0.96, 0.90, 0.86, 0.84, 0.86, 0.82,
0.84, 0.82, 0.84, 0.84, 0.84, 0.84, 0.82, 0.86, 0.82, 0.82, 0.86, 0.90, 0.84,
0.82, 0.78, 0.80, 0.78, 0.74, 0.78, 0.76, 0.76, 0.70, 0.72, 0.76, 0.72, 0.70,
0.64]
# Now let's generate random data for the same period
d1 = np.random.random(365)
assert len(d) == len(d1)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.scatter(d, d1, alpha=0.5)
ax1.set_title('No correlation')
ax1.grid(True)
ax2 = fig.add_subplot(222)
ax2.scatter(d1, d1, alpha=0.5)
ax2.set_title('Ideal positive correlation')
ax2.grid(True)
ax3 = fig.add_subplot(223)
ax3.scatter(d1, d1*-1, alpha=0.5)
ax3.set_title('Ideal negative correlation')
ax3.grid(True)
ax4 = fig.add_subplot(224)
ax4.scatter(d1, d1+d, alpha=0.5)
ax4.set_title('Non ideal positive correlation')
ax4.grid(True)
plt.tight_layout()
plt.show() | 46.1875 | 78 | 0.552436 |
40d5469fd32315fb7f4708a40672a155712e5afb | 22,427 | py | Python | src/thespian/tweaks.py | mtttech/dndpersonae | c4fa129d1e940c9f9a5e29d703e3988b45d90356 | [
"MIT"
] | 1 | 2022-03-28T16:10:15.000Z | 2022-03-28T16:10:15.000Z | src/thespian/tweaks.py | mtttech/dndpersonae | c4fa129d1e940c9f9a5e29d703e3988b45d90356 | [
"MIT"
] | null | null | null | src/thespian/tweaks.py | mtttech/dndpersonae | c4fa129d1e940c9f9a5e29d703e3988b45d90356 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import logging
from attributes import get_ability_modifier
from sourcetree.utils import (
get_feats_list,
get_feat_perks,
get_feat_proficiencies,
get_feat_requirements,
)
from stdio import prompt
log = logging.getLogger("thespian.tweaks")
| 40.481949 | 101 | 0.514112 |
40d5ed5ea76d8603996be2780920650b434417e6 | 9,213 | py | Python | Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 4a66e1419189e279a82fa6a7ff7945153308842a | [
"MIT"
] | null | null | null | Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 4a66e1419189e279a82fa6a7ff7945153308842a | [
"MIT"
] | null | null | null | Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 4a66e1419189e279a82fa6a7ff7945153308842a | [
"MIT"
] | null | null | null | import os
import platform
import numpy
if __name__ == '__main__':
trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TrainSet'
testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TestSet'
#
# trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy'
# testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy - Copy'
#
# # trainFolder = '/home/leonidas/Desktop/images/train'
# # testFolder = '/home/leonidas/Desktop/images/test'
#
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels, outputClasses] = \
# load_dataset(trainFolder, testFolder,imageSize=(3,3),convertion='L',imageChannels=1)
#
# print trainArray.shape
# print trainArray
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape
#
# trainPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512.txt'
# testPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512.txt'
# trainLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512_labels.txt'
# testLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512_labels.txt'
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels,
# outputClasses] = loadFeatures(trainPath=trainPath, trainLabels=trainLabelPath, testPath=testPath,
# testLabels=testLabelPath);
i=0;
for trainArray,train_labels in Matrix().getArrayOfImagesUsingMiniBatches(folderPath=trainFolder,image_size=(100,100),batch_size=15):
print (trainArray.shape)
print (train_labels.shape)
i+=len(trainArray)
print "aaasdasdas d : ",i
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape
| 43.457547 | 151 | 0.64485 |
40d66ffe931947e9a30f4f5ac4f0646b982e924f | 7,906 | py | Python | networking_calico/plugins/ml2/drivers/calico/policy.py | manojcode/networking-calico | db709fb27b492d3be3c094fa43e8c696962369b7 | [
"Apache-2.0"
] | null | null | null | networking_calico/plugins/ml2/drivers/calico/policy.py | manojcode/networking-calico | db709fb27b492d3be3c094fa43e8c696962369b7 | [
"Apache-2.0"
] | null | null | null | networking_calico/plugins/ml2/drivers/calico/policy.py | manojcode/networking-calico | db709fb27b492d3be3c094fa43e8c696962369b7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networking_calico.common import config as calico_config
from networking_calico.compat import log
from networking_calico import datamodel_v3
from networking_calico.plugins.ml2.drivers.calico.syncer import ResourceSyncer
LOG = log.getLogger(__name__)
# Each OpenStack security group is mapped to a Calico NetworkPolicy. A VM's
# security group membership is represented by the VM having a label for each
# security group that it belongs to; thus the selector
# 'has(<security-group-label>)' represents the VMs that belong to that security
# group.
#
# The label for each security group is 'sg.projectcalico.org/openstack-'
# followed by the security group ID, and the name of the NetworkPolicy for each
# security group is 'ossg.default.' followed by the security group ID.
SG_LABEL_PREFIX = 'sg.projectcalico.org/openstack-'
SG_NAME_LABEL_PREFIX = 'sg-name.projectcalico.org/openstack-'
SG_NAME_MAX_LENGTH = (datamodel_v3.SANITIZE_LABEL_MAX_LENGTH -
len(SG_NAME_LABEL_PREFIX))
SG_NAME_PREFIX = 'ossg.default.'
def policy_spec(sgid, rules):
"""Generate JSON NetworkPolicySpec for the given security group."""
# <rules> can include those for several security groups. Pick out the
# rules for the security group that we are translating right now.
sg_rules = (r for r in rules if r['security_group_id'] == sgid)
# Split the rules based on direction, and map to Calico form.
inbound_rules = []
outbound_rules = []
for rule in sg_rules:
if rule['direction'] == 'ingress':
inbound_rules.append(_neutron_rule_to_etcd_rule(rule))
else:
outbound_rules.append(_neutron_rule_to_etcd_rule(rule))
return {
'ingress': inbound_rules,
'egress': outbound_rules,
'selector': 'has(%s)' % (SG_LABEL_PREFIX + sgid),
}
def _neutron_rule_to_etcd_rule(rule):
"""_neutron_rule_to_etcd_rule
Translate a single Neutron rule dict to a single dict in our
etcd format.
"""
ethertype = rule['ethertype']
etcd_rule = {'action': 'Allow'}
# Map the ethertype field from Neutron to etcd format.
etcd_rule['ipVersion'] = {'IPv4': 4,
'IPv6': 6}[ethertype]
# Map the protocol field from Neutron to etcd format.
if rule['protocol'] is None or rule['protocol'] == -1:
pass
elif rule['protocol'] == 'ipv6-icmp':
etcd_rule['protocol'] = 'ICMPv6'
elif rule['protocol'] == 'icmp':
etcd_rule['protocol'] = {'IPv4': 'ICMP',
'IPv6': 'ICMPv6'}[ethertype]
elif isinstance(rule['protocol'], int):
etcd_rule['protocol'] = rule['protocol']
else:
etcd_rule['protocol'] = rule['protocol'].upper()
port_spec = None
if rule['protocol'] == 'icmp' or rule['protocol'] == 'ipv6-icmp':
# OpenStack stashes the ICMP match criteria in
# port_range_min/max.
icmp_fields = {}
icmp_type = rule['port_range_min']
if icmp_type is not None and icmp_type != -1:
icmp_fields['type'] = icmp_type
icmp_code = rule['port_range_max']
if icmp_code is not None and icmp_code != -1:
icmp_fields['code'] = icmp_code
if icmp_fields:
etcd_rule['icmp'] = icmp_fields
else:
# src/dst_ports is a list in which each entry can be a
# single number, or a string describing a port range.
if rule['port_range_min'] == -1:
port_spec = None
elif rule['port_range_min'] == rule['port_range_max']:
if rule['port_range_min'] is not None:
port_spec = [rule['port_range_min']]
else:
port_spec = ['%s:%s' % (rule['port_range_min'],
rule['port_range_max'])]
entity_rule = {}
if rule['remote_group_id'] is not None:
entity_rule['selector'] = 'has(%s)' % (SG_LABEL_PREFIX +
rule['remote_group_id'])
if rule['remote_ip_prefix'] is not None:
entity_rule['nets'] = [rule['remote_ip_prefix']]
LOG.debug("=> Entity rule %s" % entity_rule)
# Store in source or destination field of the overall rule.
if entity_rule:
if rule['direction'] == 'ingress':
etcd_rule['source'] = entity_rule
if port_spec is not None:
etcd_rule['destination'] = {'ports': port_spec}
else:
if port_spec is not None:
entity_rule['ports'] = port_spec
etcd_rule['destination'] = entity_rule
LOG.debug("=> %s Calico rule %s" % (rule['direction'], etcd_rule))
return etcd_rule
| 39.728643 | 79 | 0.611055 |
40d757b3788e8715de2cff8adf8b1027f7b43c6d | 4,359 | py | Python | 25/main.py | gosha20777/mipt-bioinfo-2021 | ed14975e9f597e7b2427bc589f12ac08d451c509 | [
"MIT"
] | null | null | null | 25/main.py | gosha20777/mipt-bioinfo-2021 | ed14975e9f597e7b2427bc589f12ac08d451c509 | [
"MIT"
] | null | null | null | 25/main.py | gosha20777/mipt-bioinfo-2021 | ed14975e9f597e7b2427bc589f12ac08d451c509 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
with open('input.txt') as f:
seq1 = f.readline().strip()
seq2 = f.readline().strip()
with open('BLOSUM62.txt') as f1:
lines = [line.strip().split() for line in f1.readlines()]
matrix = {(i[0], i[1]): int(i[2]) for i in lines}
penalty = 5
alignment = '\n'.join(linear_space_global_alignment(seq1, seq2, matrix, penalty))
print(alignment)
| 39.990826 | 115 | 0.532003 |
40d75b3cb34c1d537273d852cc304bd850526e28 | 10,039 | py | Python | utils/visualize_tree.py | moyiming1/Retrosynthesis-pathway-ranking | 380f31189d09395d0de911759b8bcea436b559b2 | [
"MIT"
] | 10 | 2021-02-24T02:31:40.000Z | 2022-02-17T07:58:46.000Z | utils/visualize_tree.py | wangxr0526/Retrosynthesis-pathway-ranking | 380f31189d09395d0de911759b8bcea436b559b2 | [
"MIT"
] | 1 | 2022-02-14T16:13:59.000Z | 2022-02-14T16:13:59.000Z | utils/visualize_tree.py | wangxr0526/Retrosynthesis-pathway-ranking | 380f31189d09395d0de911759b8bcea436b559b2 | [
"MIT"
] | 3 | 2021-01-05T11:43:03.000Z | 2022-02-17T08:52:27.000Z | import os, sys
project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_path)
import pickle
if __name__ == "__main__":
file_name = project_path + '/data/pathway_train_example.pkl'
with open(file_name, 'rb') as f:
data = pickle.load(f)
trees_to_plot = [d['tree'] for d in data['generated_paths'][0:10]]
create_tree_html(trees_to_plot, 'plotted_trees')
| 47.131455 | 123 | 0.574858 |
40d7ebe962811bafc69c16d6ae16e6cb4f35d53d | 3,955 | py | Python | python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 08910c7574203f685a0971cba61a54166d805a1c | [
"MIT"
] | null | null | null | python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 08910c7574203f685a0971cba61a54166d805a1c | [
"MIT"
] | null | null | null | python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 08910c7574203f685a0971cba61a54166d805a1c | [
"MIT"
] | null | null | null | '''
Homework assignment for the 'Python is easy' course by Pirple.
Written be Ed Yablonsky.
Snowman(Hangman) game.
'''
from os import (
name as os_name,
system as system_call,
)
from os.path import (
abspath,
dirname,
join as join_path,
)
'''
Screen displays game output
'''
'''
Input represents game input device
'''
'''
Art is a game art which is set of frames that get loaded from a text file.
Draws its current frame on a screen.
'''
'''
Riddle holds secret word and gets solved by guesses
'''
'''
Game is a game itself
'''
Game().play()
| 23.682635 | 81 | 0.551707 |
40d82abf6ddc30ada008f9205fa131b2828d8ba2 | 2,569 | py | Python | src/GenericTsvReader.py | getzlab/ABSOLUTE | cd443ec9370df98778d98227bb9a11c3e24c00cb | [
"BSD-3-Clause"
] | null | null | null | src/GenericTsvReader.py | getzlab/ABSOLUTE | cd443ec9370df98778d98227bb9a11c3e24c00cb | [
"BSD-3-Clause"
] | null | null | null | src/GenericTsvReader.py | getzlab/ABSOLUTE | cd443ec9370df98778d98227bb9a11c3e24c00cb | [
"BSD-3-Clause"
] | null | null | null | """
Created on Jul 5, 2012
@author: lichtens
"""
import csv
import os
| 33.802632 | 124 | 0.652005 |
40db83a1176151c4bc0bdff2477e10a8b1ab20a4 | 12,376 | py | Python | examples/applications/plot_impact_imbalanced_classes.py | cdchushig/imbalanced-learn | f02e7c7c2c021c85823cace405ca2c58ad4ff147 | [
"MIT"
] | 5,678 | 2016-07-19T10:22:35.000Z | 2022-03-31T22:46:41.000Z | examples/applications/plot_impact_imbalanced_classes.py | cdchushig/imbalanced-learn | f02e7c7c2c021c85823cace405ca2c58ad4ff147 | [
"MIT"
] | 759 | 2016-07-19T05:41:59.000Z | 2022-03-28T11:00:10.000Z | examples/applications/plot_impact_imbalanced_classes.py | cdchushig/imbalanced-learn | f02e7c7c2c021c85823cace405ca2c58ad4ff147 | [
"MIT"
] | 1,165 | 2016-07-19T22:56:42.000Z | 2022-03-31T22:46:45.000Z | """
==========================================================
Fitting model on imbalanced datasets and how to fight bias
==========================================================
This example illustrates the problem induced by learning on datasets having
imbalanced classes. Subsequently, we compare different approaches alleviating
these negative effects.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Problem definition
# ------------------
#
# We are dropping the following features:
#
# - "fnlwgt": this feature was created while studying the "adult" dataset.
# Thus, we will not use this feature which is not acquired during the survey.
# - "education-num": it is encoding the same information than "education".
# Thus, we are removing one of these 2 features.
# %%
from sklearn.datasets import fetch_openml
df, y = fetch_openml("adult", version=2, as_frame=True, return_X_y=True)
df = df.drop(columns=["fnlwgt", "education-num"])
# %% [markdown]
# The "adult" dataset as a class ratio of about 3:1
# %%
classes_count = y.value_counts()
classes_count
# %% [markdown]
# This dataset is only slightly imbalanced. To better highlight the effect of
# learning from an imbalanced dataset, we will increase its ratio to 30:1
# %%
from imblearn.datasets import make_imbalance
ratio = 30
df_res, y_res = make_imbalance(
df,
y,
sampling_strategy={classes_count.idxmin(): classes_count.max() // ratio},
)
y_res.value_counts()
# %% [markdown]
# We will perform a cross-validation evaluation to get an estimate of the test
# score.
#
# As a baseline, we could use a classifier which will always predict the
# majority class independently of the features provided.
# %%
from sklearn.model_selection import cross_validate
from sklearn.dummy import DummyClassifier
dummy_clf = DummyClassifier(strategy="most_frequent")
scoring = ["accuracy", "balanced_accuracy"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
print(f"Accuracy score of a dummy classifier: {cv_result['test_accuracy'].mean():.3f}")
# %% [markdown]
# Instead of using the accuracy, we can use the balanced accuracy which will
# take into account the balancing issue.
# %%
print(
f"Balanced accuracy score of a dummy classifier: "
f"{cv_result['test_balanced_accuracy'].mean():.3f}"
)
# %% [markdown]
# Strategies to learn from an imbalanced dataset
# ----------------------------------------------
# We will use a dictionary and a list to continuously store the results of
# our experiments and show them as a pandas dataframe.
# %%
index = []
scores = {"Accuracy": [], "Balanced accuracy": []}
# %% [markdown]
# Dummy baseline
# ..............
#
# Before to train a real machine learning model, we can store the results
# obtained with our :class:`~sklearn.dummy.DummyClassifier`.
# %%
import pandas as pd
index += ["Dummy classifier"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Linear classifier baseline
# ..........................
#
# We will create a machine learning pipeline using a
# :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard,
# we will need to one-hot encode the categorical columns and standardized the
# numerical columns before to inject the data into the
# :class:`~sklearn.linear_model.LogisticRegression` classifier.
#
# First, we define our numerical and categorical pipelines.
# %%
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
num_pipe = make_pipeline(
StandardScaler(), SimpleImputer(strategy="mean", add_indicator=True)
)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OneHotEncoder(handle_unknown="ignore"),
)
# %% [markdown]
# Then, we can create a preprocessor which will dispatch the categorical
# columns to the categorical pipeline and the numerical columns to the
# numerical pipeline
# %%
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector as selector
preprocessor_linear = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
# %% [markdown]
# Finally, we connect our preprocessor with our
# :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our
# model.
# %%
from sklearn.linear_model import LogisticRegression
lr_clf = make_pipeline(preprocessor_linear, LogisticRegression(max_iter=1000))
# %%
index += ["Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that our linear model is learning slightly better than our dummy
# baseline. However, it is impacted by the class imbalance.
#
# We can verify that something similar is happening with a tree-based model
# such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of
# classifier, we will not need to scale the numerical data, and we will only
# need to ordinal encode the categorical data.
# %%
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import RandomForestClassifier
num_pipe = SimpleImputer(strategy="mean", add_indicator=True)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
)
preprocessor_tree = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
rf_clf = make_pipeline(
preprocessor_tree, RandomForestClassifier(random_state=42, n_jobs=2)
)
# %%
index += ["Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by
# the class imbalanced, slightly less than the linear model. Now, we will
# present different approach to improve the performance of these 2 models.
#
# Use `class_weight`
# ..................
#
# Most of the models in `scikit-learn` have a parameter `class_weight`. This
# parameter will affect the computation of the loss in linear model or the
# criterion in the tree-based model to penalize differently a false
# classification from the minority and majority class. We can set
# `class_weight="balanced"` such that the weight applied is inversely
# proportional to the class frequency. We test this parametrization in both
# linear model and tree-based model.
# %%
lr_clf.set_params(logisticregression__class_weight="balanced")
index += ["Logistic regression with balanced class weights"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf.set_params(randomforestclassifier__class_weight="balanced")
index += ["Random forest with balanced class weights"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that using `class_weight` was really effective for the linear
# model, alleviating the issue of learning from imbalanced classes. However,
# the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward
# the majority class, mainly due to the criterion which is not suited enough to
# fight the class imbalance.
#
# Resample the training set during learning
# .........................................
#
# Another way is to resample the training set by under-sampling or
# over-sampling some of the samples. `imbalanced-learn` provides some samplers
# to do such processing.
# %%
from imblearn.pipeline import make_pipeline as make_pipeline_with_sampler
from imblearn.under_sampling import RandomUnderSampler
lr_clf = make_pipeline_with_sampler(
preprocessor_linear,
RandomUnderSampler(random_state=42),
LogisticRegression(max_iter=1000),
)
# %%
index += ["Under-sampling + Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf = make_pipeline_with_sampler(
preprocessor_tree,
RandomUnderSampler(random_state=42),
RandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Under-sampling + Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Applying a random under-sampler before the training of the linear model or
# random forest, allows to not focus on the majority class at the cost of
# making more mistake for samples in the majority class (i.e. decreased
# accuracy).
#
# We could apply any type of samplers and find which sampler is working best
# on the current dataset.
#
# Instead, we will present another way by using classifiers which will apply
# sampling internally.
#
# Use of specific balanced algorithms from imbalanced-learn
# .........................................................
#
# We already showed that random under-sampling can be effective on decision
# tree. However, instead of under-sampling once the dataset, one could
# under-sample the original dataset before to take a bootstrap sample. This is
# the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and
# :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
# %%
from imblearn.ensemble import BalancedRandomForestClassifier
rf_clf = make_pipeline(
preprocessor_tree,
BalancedRandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Balanced random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The performance with the
# :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than
# applying a single random under-sampling. We will use a gradient-boosting
# classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from imblearn.ensemble import BalancedBaggingClassifier
bag_clf = make_pipeline(
preprocessor_tree,
BalancedBaggingClassifier(
base_estimator=HistGradientBoostingClassifier(random_state=42),
n_estimators=10,
random_state=42,
n_jobs=2,
),
)
index += ["Balanced bag of histogram gradient boosting"]
cv_result = cross_validate(bag_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# This last approach is the most effective. The different under-sampling allows
# to bring some diversity for the different GBDT to learn and not focus on a
# portion of the majority class.
| 33.906849 | 87 | 0.747253 |
40dc4792e5546b69652c162537bffd53c76ae2d8 | 3,949 | py | Python | python/fix-page-breaks.py | utcompling/GeoAnnotate | a864106d9595e8426339f1d34432a54e04cee66a | [
"Apache-2.0"
] | 9 | 2015-11-19T06:03:08.000Z | 2021-02-16T19:14:42.000Z | python/fix-page-breaks.py | utcompling/GeoAnnotate | a864106d9595e8426339f1d34432a54e04cee66a | [
"Apache-2.0"
] | null | null | null | python/fix-page-breaks.py | utcompling/GeoAnnotate | a864106d9595e8426339f1d34432a54e04cee66a | [
"Apache-2.0"
] | 1 | 2018-10-09T23:12:34.000Z | 2018-10-09T23:12:34.000Z | #!/usr/bin/python
import argparse
import re
parser = argparse.ArgumentParser(description='Fix page breaks in War of The Rebellion text')
parser.add_argument('files', nargs='*',
help='Files to process')
args = parser.parse_args()
for file in args.files:
outfile = open(file + ".joined-pagebreak", "w")
text = ''.join(open(file).readlines())
pages = re.split("PAGEBREAK\n", text)
# Remove empty pages
pages = [x for x in pages if x]
for i in xrange(0, len(pages) - 1):
# Remove extraneous blank lines
pages[i] = re.sub("\n\n\n+", "\n\n", pages[i])
# Undo HTML entities
pages[i] = re.sub("&", "&", pages[i])
pages[i] = re.sub("<", "<", pages[i])
pages[i] = re.sub(">", ">", pages[i])
# Do the following a second time to handle cases of
# &amp;, which are common
pages[i] = re.sub("&", "&", pages[i])
m = re.match(r"^( *\[*CHAP\. [A-Z]+\.\]* *\n\n?)(.*)", pages[i], re.S)
if m:
pages[i] = m.group(2)
print "Removed CHAP heading on page %s:\n[%s]\n" % (i, m.group(1))
m = re.match("(.*?)(\n?(?: *[0-9]+|S) *(?:R R(?: *[-_VY]+ *[^\n]*)?|R *-+ *[^\n]*)\n)(.*)$", pages[i], re.S)
if m:
pages[i] = m.group(1) + m.group(3)
print "Removed R R notation on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match(r"(.*?\n)(\n* *------+\n( *(?:[*+#@$|^\\/&~=>!?]|[abc] |[abc][A-Z])[^\n]*\n|\n)* *-------+\n+(?:[*+#@$|^\\/&~=>!?] *[A-Z][^\n]*\n|\n)*)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed footnote on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match("(.*?\n)(\n*[*]?MAP[^\n]*\n+)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed MAP notation on page %s:\n[%s]\n" % (i, m.group(2))
while pages[i] and pages[i][-1] == "\n":
pages[i] = pages[i][0:-1]
if "\n" not in pages[i]:
lastlinelen = len(pages[i])
else:
m = re.match(".*\n([^\n]*)$", pages[i], re.S)
assert m
lastlinelen = len(m.group(1))
shortline = lastlinelen < 60
join = False
hyphenjoin = False
if not pages[i]:
continue
if len(pages[i]) >= 2 and pages[i][-1] == '-' and pages[i][-2].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE HYPHEN, NOT JOINED"
else:
msg = "PAGEBREAK HYPHEN-JOINED"
hyphenjoin = True
join = True
elif pages[i + 1] and pages[i + 1][0].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE NEXT PAGE STARTS LOWERCASE, NOT JOINED"
else:
msg = "PAGEBREAK NEXT PAGE STARTS LOWERCASE, JOINED"
join = True
elif len(pages[i]) >= 3 and pages[i][-1] == '.' and pages[i][-2].isupper() and pages[i][-3] in ['.', ' ']:
if shortline:
msg = "PAGEBREAK SHORT-LINE ENDS WITH ABBREVIATION PERIOD, NOT JOINED"
else:
msg = "PAGEBREAK ENDS ABBREV-PERIOD, JOINED"
join = True
elif pages[i][-1] == '.':
msg = "PAGEBREAK ENDS PERIOD, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '*' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD STAR, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '"' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD QUOTE, NOT JOINED"
elif pages[i][-1] == ':':
msg = "PAGEBREAK ENDS COLON, NOT JOINED"
elif pages[i][-1] == ',':
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE COMMA, NOT JOINED"
else:
msg = "PAGEBREAK ENDS COMMA, JOINED"
join = True
else:
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE OTHER, NOT JOINED"
else:
msg = "PAGEBREAK ENDS OTHER, JOINED"
join = True
print "Page %s: %s" % (i, msg)
if hyphenjoin:
outfile.write(pages[i][0:-1])
elif join:
outfile.write(pages[i] + " ")
else:
outfile.write(pages[i])
outfile.write("\n\n")
outfile.write("\n%s\n" % msg)
outfile.close()
| 36.564815 | 171 | 0.52469 |
40dd78243c51556a2be73588a2b4ac205cbb6f28 | 570 | py | Python | 2021/02/part2.py | FranciscoAT/advent-of-code | 69f20696e4c59ff6dfa010b22dd3593ea3d12208 | [
"MIT"
] | null | null | null | 2021/02/part2.py | FranciscoAT/advent-of-code | 69f20696e4c59ff6dfa010b22dd3593ea3d12208 | [
"MIT"
] | null | null | null | 2021/02/part2.py | FranciscoAT/advent-of-code | 69f20696e4c59ff6dfa010b22dd3593ea3d12208 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
main("test")
main("puzzle")
| 23.75 | 43 | 0.436842 |
40dd84dd99f72ec266d9a45433b99fd282b94576 | 6,068 | py | Python | tf_seal/python/tensor.py | karlhigley/tf-seal | 74a38e3ff71d29e862881d56bca84aaa23efd710 | [
"Apache-2.0"
] | 94 | 2019-08-06T16:03:27.000Z | 2022-03-24T18:19:07.000Z | tf_seal/python/tensor.py | karlhigley/tf-seal | 74a38e3ff71d29e862881d56bca84aaa23efd710 | [
"Apache-2.0"
] | 23 | 2019-08-19T16:22:12.000Z | 2022-03-31T15:09:58.000Z | tf_seal/python/tensor.py | karlhigley/tf-seal | 74a38e3ff71d29e862881d56bca84aaa23efd710 | [
"Apache-2.0"
] | 17 | 2019-08-08T22:45:46.000Z | 2022-03-22T08:05:16.000Z | import numpy as np
import tensorflow as tf
import tf_seal.python.ops.seal_ops as ops
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops as tf_ops
# def __sub__(self, other):
# other = convert_to_tensor(other)
# res = ops.big_sub(self._raw, other._raw)
# return Tensor(res)
def _fetch_function(seal_tensor):
unwrapped = [convert_from_tensor(seal_tensor, dtype=tf.float64)]
rewrapper = lambda components_fetched: components_fetched[0].astype(np.float64)
return unwrapped, rewrapper
def _feed_function(seal_tensor, feed_value):
return [(seal_tensor._raw, feed_value)]
def _feed_function_for_partial_run(seal_tensor):
return [seal_tensor._raw]
# this allows tf_seal.Tensor to be passed directly to tf.Session.run,
# unwrapping and converting the result as needed
tf_session.register_session_run_conversion_functions(
tensor_type=Tensor,
fetch_function=_fetch_function,
feed_function=_feed_function,
feed_function_for_partial_run=_feed_function_for_partial_run,
)
# TODO(Morten)
# this allows implicit convertion of tf_seal.Tensor to tf.Tensor,
# but since the output dtype is determined by the outer context
# we essentially have to export with the implied risk of data loss
tf_ops.register_tensor_conversion_function(Tensor, _tensor_conversion_function)
# this allows Tensor to pass the tf.is_tensor test
tf_ops.register_dense_tensor_like_type(Tensor)
# this allows tf_big.Tensor to be plumbed through Keras layers
# but seems only truly useful when used in conjunction with
# `register_tensor_conversion_function`
tf_utils.register_symbolic_tensor_type(Tensor)
| 30.492462 | 102 | 0.742914 |
40de3e3ad949140a43a1a19f490e5fc039aedb2f | 25,087 | py | Python | program/admin.py | Dumbaz/autoradio-pv | 8aae293e58b2e79a05956c535bb109f74edc89c3 | [
"BSD-3-Clause"
] | null | null | null | program/admin.py | Dumbaz/autoradio-pv | 8aae293e58b2e79a05956c535bb109f74edc89c3 | [
"BSD-3-Clause"
] | null | null | null | program/admin.py | Dumbaz/autoradio-pv | 8aae293e58b2e79a05956c535bb109f74edc89c3 | [
"BSD-3-Clause"
] | null | null | null | from django.core.exceptions import ObjectDoesNotExist
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render
from django.conf import settings
from .models import Language, Type, MusicFocus, Category, Topic, RTRCategory, Host, Note, RRule, Schedule, Show, TimeSlot
from .forms import MusicFocusForm
from datetime import date, datetime, time, timedelta
admin.site.register(Language, LanguageAdmin)
admin.site.register(Type, TypeAdmin)
admin.site.register(MusicFocus, MusicFocusAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(RTRCategory, RTRCategoryAdmin)
admin.site.register(Host, HostAdmin)
admin.site.register(Note, NoteAdmin)
#admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(TimeSlot, TimeSlotAdmin)
admin.site.register(Show, ShowAdmin) | 42.305228 | 173 | 0.585363 |
40de4834e40d2116182061d040e90ff70baa0986 | 2,177 | py | Python | test/logic/test_block_features.py | Sam-prog-sudo/Sam.github.io | 75f41ec26b4b3aafcb117e467e38fca1e69f5c87 | [
"MIT"
] | 3 | 2020-07-06T21:03:03.000Z | 2020-07-18T07:02:59.000Z | test/logic/test_block_features.py | Sam-prog-sudo/Sam.github.io | 75f41ec26b4b3aafcb117e467e38fca1e69f5c87 | [
"MIT"
] | null | null | null | test/logic/test_block_features.py | Sam-prog-sudo/Sam.github.io | 75f41ec26b4b3aafcb117e467e38fca1e69f5c87 | [
"MIT"
] | 4 | 2020-07-17T11:16:05.000Z | 2020-10-01T08:57:21.000Z | import hashlib
import json
from time import time
import pytest
from app.chaine.blockchain import Blockchain
def test_block_creation(a_valid_block, proof=123, previous_hash='abc'):
bc = Blockchain()
block_a_tester = bc.new_block(proof, previous_hash)
assert block_a_tester['index'] == a_valid_block['index']
assert isinstance(
block_a_tester['timestamp'],
type(a_valid_block['timestamp'])
)
assert block_a_tester['proof'] == a_valid_block['proof']
assert block_a_tester['previous_hash'] == a_valid_block['previous_hash']
| 23.408602 | 76 | 0.622876 |
40e031fd64128f14855fedd41208af0c66f89410 | 886 | py | Python | urls.py | cartologic/cartoview_graduated_styler | f3dc6b0d48dc95bdd7e68d148a5182a4e259dbf3 | [
"BSD-2-Clause"
] | null | null | null | urls.py | cartologic/cartoview_graduated_styler | f3dc6b0d48dc95bdd7e68d148a5182a4e259dbf3 | [
"BSD-2-Clause"
] | 16 | 2017-08-06T09:49:01.000Z | 2021-09-01T08:40:58.000Z | urls.py | cartologic/cartoview_graduated_styler | f3dc6b0d48dc95bdd7e68d148a5182a4e259dbf3 | [
"BSD-2-Clause"
] | null | null | null | # from django.conf.urls import patterns, url, include
# from django.views.generic import TemplateView
# from . import views, APP_NAME
#
# urlpatterns = patterns('',
# url(r'^$', views.index, name='%s.index' % APP_NAME),
# )
from django.urls import path, re_path, include
from . import views, APP_NAME
from .api import LayerResource
from tastypie.api import Api
Resources_api = Api(api_name="api")
Resources_api.register(LayerResource())
urlpatterns = [
re_path(r'^$', views.index, name='%s.index' % APP_NAME),
path('styles/<str:layername>/', views.layer_styles, name='%s.layer_styles' % APP_NAME),
path('styles/save/<str:layer_name>/<str:style_name>', views.save_style, name='%s.save_style' % APP_NAME),
re_path(r'^proxy/geoserver/rest/(?P<suburl>.*)$', views.geoserver_rest_proxy, name='%s.proxy' % APP_NAME),
re_path(r'^', include(Resources_api.urls)),
]
| 34.076923 | 110 | 0.705418 |
40e07b3163c543bc0b7215aac128eae673625978 | 1,721 | py | Python | core/rest/wscdn.py | cybert79/Osmedeus | 684d853144e2f85343c3367440120142455f296b | [
"MIT"
] | 1 | 2019-06-13T09:14:11.000Z | 2019-06-13T09:14:11.000Z | core/rest/wscdn.py | KbaHaxor/Osmedeus | 0894d52ad5949e9151b0fd05d9746ecafc8057b5 | [
"MIT"
] | null | null | null | core/rest/wscdn.py | KbaHaxor/Osmedeus | 0894d52ad5949e9151b0fd05d9746ecafc8057b5 | [
"MIT"
] | 2 | 2020-01-09T17:48:23.000Z | 2020-01-09T17:48:24.000Z | import os
import glob
import json
from pathlib import Path
from flask_restful import Api, Resource, reqparse
from flask_jwt_extended import jwt_required
from flask import Flask, request, escape, make_response, send_from_directory
import utils
# incase you can't install ansi2html it's won't break the api
try:
from ansi2html import Ansi2HTMLConverter
except:
pass
current_path = os.path.dirname(os.path.realpath(__file__))
'''
render stdout content
'''
| 30.732143 | 91 | 0.654852 |
40e08ff17bc877d0938f412ad22362a39a6d45db | 13,534 | py | Python | custom_components/hahm/services.py | noxhirsch/custom_homematic | afc03c813f44d342f75477e6fcce85fc78515258 | [
"MIT"
] | null | null | null | custom_components/hahm/services.py | noxhirsch/custom_homematic | afc03c813f44d342f75477e6fcce85fc78515258 | [
"MIT"
] | null | null | null | custom_components/hahm/services.py | noxhirsch/custom_homematic | afc03c813f44d342f75477e6fcce85fc78515258 | [
"MIT"
] | null | null | null | """Module with hahomematic services."""
from __future__ import annotations
from datetime import datetime
import logging
from hahomematic.const import (
ATTR_ADDRESS,
ATTR_INTERFACE_ID,
ATTR_NAME,
ATTR_PARAMETER,
ATTR_VALUE,
HmPlatform,
)
from hahomematic.device import HmDevice
from hahomematic.entity import BaseEntity, GenericEntity
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, ATTR_MODE, ATTR_TIME
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import comp_entity_ids
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from .const import (
ATTR_PARAMSET,
ATTR_PARAMSET_KEY,
ATTR_RX_MODE,
ATTR_VALUE_TYPE,
DOMAIN,
)
from .control_unit import ControlUnit, HaHub
from .helpers import get_device_address_at_interface_from_identifiers
_LOGGER = logging.getLogger(__name__)
ATTR_CHANNEL = "channel"
ATTR_DEVICE_ID = "device_id"
DEFAULT_CHANNEL = 1
SERVICE_EXPORT_DEVICE_DEFINITION = "export_device_definition"
SERVICE_PUT_PARAMSET = "put_paramset"
SERVICE_SET_DEVICE_VALUE = "set_device_value"
SERVICE_SET_INSTALL_MODE = "set_install_mode"
SERVICE_SET_VARIABLE_VALUE = "set_variable_value"
HAHM_SERVICES = [
SERVICE_EXPORT_DEVICE_DEFINITION,
SERVICE_PUT_PARAMSET,
SERVICE_SET_DEVICE_VALUE,
SERVICE_SET_INSTALL_MODE,
SERVICE_SET_VARIABLE_VALUE,
]
SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
}
)
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): comp_entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
}
)
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema(
{
vol.Required(ATTR_INTERFACE_ID): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1): vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMETER): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_VALUE_TYPE): vol.In(
["boolean", "dateTime.iso8601", "double", "int", "string"]
),
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET): dict,
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
def _get_device(hass: HomeAssistant, device_id: str) -> HmDevice | None:
"""Return the homematic device."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.hm_devices.get(device_address)
return None
def _get_interface_channel_address(
hass: HomeAssistant, device_id: str, channel: int
) -> tuple[str, str] | None:
"""Return interface and channel_address with given device_id and channel."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
channel_address = f"{device_address}:{channel}"
return interface_id, channel_address
def _get_entity(hass: HomeAssistant, entity_id: str) -> BaseEntity | None:
"""Return entity by given entity_id."""
control_unit: ControlUnit
for control_unit in hass.data[DOMAIN].values():
if hm_entity := control_unit.async_get_hm_entity(entity_id=entity_id):
if isinstance(hm_entity, BaseEntity):
return hm_entity
return None
def _get_entities_by_platform(
hass: HomeAssistant, platform: HmPlatform
) -> list[BaseEntity]:
"""Return entities by given platform."""
control_unit: ControlUnit
hm_entities: list[BaseEntity] = []
for control_unit in hass.data[DOMAIN].values():
hm_entities.extend(
control_unit.async_get_hm_entities_by_platform(platform=platform)
)
return hm_entities
def _get_hm_entity(
hass: HomeAssistant, interface_id: str, channel_address: str, parameter: str
) -> GenericEntity | None:
"""Get homematic entity."""
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.get_hm_entity_by_parameter(
channel_address=channel_address, parameter=parameter
)
return None
def _get_cu_by_interface_id(
hass: HomeAssistant, interface_id: str
) -> ControlUnit | None:
"""Get ControlUnit by interface_id."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if control_unit and control_unit.central.clients.get(interface_id):
return control_unit
return None
def _get_hub_by_entity_id(hass: HomeAssistant, entity_id: str) -> HaHub | None:
"""Get ControlUnit by device address."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if (
control_unit
and control_unit.hub
and control_unit.hub.entity_id == entity_id
):
return control_unit.hub
return None
| 32.455635 | 85 | 0.691813 |
40e0c14f05e9b921525413c2427c2d6661b5419f | 865 | py | Python | app/migrations/0001_initial.py | MariaAlice00/ifpi-tds-projeto-integrador | e2ce73279c9bf31de0b33b105723ae7a24deac54 | [
"MIT"
] | null | null | null | app/migrations/0001_initial.py | MariaAlice00/ifpi-tds-projeto-integrador | e2ce73279c9bf31de0b33b105723ae7a24deac54 | [
"MIT"
] | null | null | null | app/migrations/0001_initial.py | MariaAlice00/ifpi-tds-projeto-integrador | e2ce73279c9bf31de0b33b105723ae7a24deac54 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-06-03 00:35
from django.db import migrations, models
| 30.892857 | 117 | 0.564162 |
40e13f8b874a94920da4e07d42899e93081c3e2f | 4,284 | py | Python | graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | adadf5f211cc67a14bb3aca7c61219513d036b13 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | 1 | 2019-05-28T13:04:32.000Z | 2019-05-28T13:04:32.000Z | graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | adadf5f211cc67a14bb3aca7c61219513d036b13 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | adadf5f211cc67a14bb3aca7c61219513d036b13 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | # Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import re
COPYRIGHT_HEADER = """\
/*
* Copyright (c) 2017-2019, Oracle and/or its affiliates.
* Copyright (c) 2014 by Bart Kiers
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
// Checkstyle: stop
// JaCoCo Exclude
//@formatter:off
{0}
"""
PTRN_SUPPRESS_WARNINGS = re.compile(r"@SuppressWarnings.*")
TRANSFORMS = [
replace_suppress_warnings,
replace_rulectx,
replace_localctx,
]
if __name__ == '__main__':
fpath = sys.argv[1]
with open(fpath, 'r') as FILE:
content = COPYRIGHT_HEADER.format(postprocess(FILE))
with open(fpath, 'w+') as FILE:
FILE.write(content)
| 37.911504 | 88 | 0.722222 |
40e2d06c8105c95bcdc7c6b4d3475a48fa240fbc | 6,284 | py | Python | scripts/ape_protocol_deploy.py | coordinape/coordinape-protocol | 8c90de1b1fbc19bab05e1c5848813d022492753a | [
"MIT"
] | 22 | 2021-10-17T23:19:38.000Z | 2022-03-24T05:13:56.000Z | scripts/ape_protocol_deploy.py | coordinape/coordinape-protocol | 8c90de1b1fbc19bab05e1c5848813d022492753a | [
"MIT"
] | 12 | 2021-09-29T16:27:03.000Z | 2022-03-30T17:54:08.000Z | scripts/ape_protocol_deploy.py | coordinape/coordinape-protocol | 8c90de1b1fbc19bab05e1c5848813d022492753a | [
"MIT"
] | 4 | 2021-10-14T19:08:36.000Z | 2022-03-29T16:42:21.000Z | from brownie import accounts, Wei, chain, ApeToken, ApeVaultFactory, ApeDistributor, ApeRegistry, ApeRouter, FeeRegistry, MockRegistry, MockVaultFactory, MockToken, MockVault
| 56.107143 | 174 | 0.732336 |
40e6bbe29a59bd4a98298179d233b2bfddb4c1e0 | 971 | py | Python | groups/views.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
] | 10 | 2017-11-25T01:47:20.000Z | 2020-03-24T18:28:24.000Z | groups/views.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
] | 319 | 2017-11-16T09:56:03.000Z | 2022-03-28T00:24:37.000Z | groups/views.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
] | 6 | 2017-11-12T14:04:08.000Z | 2021-03-10T09:41:18.000Z | from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import DetailView, ListView, UpdateView
from .models import Committee
| 30.34375 | 65 | 0.77137 |