blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
af5f6907b97177df49aeb41017c7b910c91a8543
|
0cc472d10382adfe58e72704ab3f6e2f9d8a4f48
|
/src/common/Typehelper.py
|
9458b023b5513ef714429b609b1c2ac306563a41
|
[] |
no_license
|
yueguangxuanyuan/ProgramProcessAnalysis
|
c7c90262dd1dd3590597b8ff018a3326590daec4
|
5bb93d8f9a7db2e82e034c7c912913a67942face
|
refs/heads/master
| 2020-06-07T05:17:58.490888
| 2019-06-20T14:20:18
| 2019-06-20T14:20:18
| 192,934,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
def strToType(typeName,value):
if typeName == 'int':
return int(value);
if (typeName == 'float'):
return float(value)
if(typeName == 'double'):
return float(value)
if typeName == 'String':
return str(value);
|
[
"yueguangxuanyuan@gmail.com"
] |
yueguangxuanyuan@gmail.com
|
5545e02446baf063cc70fffc4e1cf9e600cc43b4
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4144/codes/1618_2923.py
|
d7608f21637243d5ea3e7ca02b6e9ce1976bd5c6
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
taxa = float(input("digite a taxa de juros: "))
final = 1500 * ((1 + taxa) ** 36)
print(round(final, 2))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
34a28069f8eaa6458a54aac14b25b064ad76de6d
|
ee81efa621f8a18569d8ac00e5176aff1a736d86
|
/krizaljka.py
|
4e7f8251a722a4f1e9ed16eee5ff051608abbb78
|
[] |
no_license
|
renaldyresa/Kattis
|
c8b29f40a84f4161f49c6247abf10ec2ecc14810
|
e504f54602b054eeffaac48b43e70beb976ca94c
|
refs/heads/master
| 2021-12-01T14:57:57.614911
| 2021-11-29T07:44:43
| 2021-11-29T07:44:43
| 182,920,692
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
kt1,kt2 = map(str,input().split())
h = 0
for i in range(len(kt1)):
for j in range (len(kt2)):
if kt1[i] == kt2[j] :
h = 1
break
if h == 1 :
break
q = 0
w = 0
for l in range(len(kt2)):
for k in range(len(kt1)):
if l== j and k==i :
print(kt1[q],end='')
q += 1
w += 1
elif l == j :
print(kt1[q],end='')
q += 1
elif k == i :
print(kt2[w],end='')
w +=1
else :
print('.',end='')
print()
|
[
"noreply@github.com"
] |
renaldyresa.noreply@github.com
|
e470b4d143e4ac234e1e9bfdacb68ec8adf5fa78
|
89b2db0af633ae5b5be515a04d87a5db9c9b5778
|
/01-02.py
|
d5f279f01e5acee311af81472b21b47a306a5c5e
|
[] |
no_license
|
slahmar/advent-of-code-2018
|
d4f6cc33997332c74758bf1a194a2db5001b5544
|
67348b501b7119558855d6034c9178285da30203
|
refs/heads/master
| 2020-04-09T12:40:55.213364
| 2019-02-09T18:16:46
| 2019-02-09T18:16:46
| 160,359,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
from collections import Counter
with open('01.txt', 'r') as file:
freqs = Counter()
freq = 0
while not freqs or freqs.most_common(1)[0][1] < 2:
for line in file.readlines():
freq += int(line)
freqs[freq] += 1
if freqs[freq] == 2:
break
print("First frequency which appeared twice {}".format(freqs.most_common(1)))
|
[
"noreply@github.com"
] |
slahmar.noreply@github.com
|
e8f2f7e54b0957e4ae0a616395fcfea9fb79e0b5
|
a6ede1d64dc81cea56459199fbd2a1ade0603a50
|
/src/test1/canvascheckbox.py
|
be9d3d009845d19745db2f803492d139b88b75fd
|
[] |
no_license
|
anantha1987/python_stories
|
a9cb8192350ef9bddb028eaa7615a488b3a3c43b
|
44f05462a3fc1a9765b84b3f771a27d95aa300cf
|
refs/heads/master
| 2020-12-31T23:22:25.285404
| 2020-02-08T06:00:49
| 2020-02-08T06:00:49
| 239,074,213
| 0
| 0
| null | 2020-02-08T06:36:44
| 2020-02-08T05:21:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,990
|
py
|
from ttkwidgets import CheckboxTreeview
import tkinter as tk
import tkinter.ttk as ttk
root=tk.Tk()
root.geometry('600x600')
root.title("Checkbox example")
f1=tk.Frame(root,width=595,height=595,background='blue',bd=1)
f1.grid(row=0,column=0)
checkingFrame=tk.Frame(f1,width=590,height=590,background='yellow',bd=1)
checkingFrame.grid(row=0,column=0,padx=5,pady=5)
canvas_tree=tk.Canvas(checkingFrame,bg='white')
canvas_tree.grid(row=0,column=0)
main_tree=CheckboxTreeview(canvas_tree,show='tree')
main_tree.column("#0",width=500,minwidth=600,stretch=True)
main_tree.configure(height=10)
main_tree.insert("", "end", "1", text="1"+'2')
main_tree.insert("1", "end", "11", text="11")
main_tree.insert("1", "end", "12", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("11", "end", "111",text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra' )
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra'+"Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra'+"Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
main_tree.insert("", "end", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
vsbar=tk.Scrollbar(checkingFrame,orient=tk.VERTICAL,command=main_tree.yview)
vsbar.grid(row=0,column=1,sticky=tk.NS)
hsbar=tk.Scrollbar(checkingFrame,orient=tk.HORIZONTAL,command=main_tree.xview)
hsbar.grid(row=1,column=0,sticky=tk.EW)
main_tree.config(xscroll=hsbar.set,yscroll=vsbar.set)
main_tree.grid(row=0,column=0)
canvas_tree.create_window((0,0),window=main_tree,anchor=tk.NW)
canvas_tree.configure(yscrollcommand=vsbar.set,xscrollcommand=hsbar.set)
main_tree.update_idletasks()
bbox=canvas_tree.bbox(tk.ALL)
canvas_tree.configure(scrollregion=bbox,width=400,height=400)
# can=tk.Canvas(f1,width=500)
# tree=CheckboxTreeview(can,show='tree')
# tree.column('#0',minwidth=350,stretch=True,width=300)
# tree.insert("", "end", "1", text="1"+'2')
# tree.insert("1", "end", "11", text="11")
# tree.insert("1", "end", "12", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
# tree.insert("11", "end", "111",text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra' )
# tree.insert("", "end", "2", text="Anantha"+"kumar"+'Kondra'+'Anantha Kumar Kondra')
# tree.grid()
#
# xscrol=ttk.Scrollbar(can,orient=tk.HORIZONTAL,command=tree.xview)
# xscrol.grid_anchor(anchor=tk.S)
#
# xscrol.grid( sticky='ew')
# tree.config(xscroll=xscrol.set)
#
# can.grid(row=0,column=0,sticky=tk.N+tk.S+tk.E+tk.W)
root.mainloop()
|
[
"39458121+anantha1987@users.noreply.github.com"
] |
39458121+anantha1987@users.noreply.github.com
|
64ecc79136da0e7bf0ed0b47f75922078b4dad18
|
a21f8049ecd5f531f4a5ff530b3f182e75636b65
|
/delinkermap.py
|
9ccebdf8e047fc591bcf363beda2a768ca54c0ce
|
[
"MIT"
] |
permissive
|
jamesmunns/delinkermap
|
44e9fbdecff46793527bcc21efe551eee905ced3
|
150d28dfa03b7007359dffb6f11c1d9333274b70
|
refs/heads/master
| 2021-01-23T08:33:42.941299
| 2017-09-05T21:55:03
| 2017-09-05T21:55:03
| 102,533,293
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,452
|
py
|
#!/usr/bin/env python3
import re
import sys
from subprocess import check_output as cmd
from multiprocessing import Pool
COMPONENT = r"[\.a-zA-Z0-9-_\$]*"
SPACE_OR_NEWLINE = r"[ \n]*"
HEX_NUM = r"0x[a-zA-Z0-9]*"
OBJECT = r"[/a-zA-Z0-9-_\.]*"
LINE_ITEM = re.compile("({cmp}){sp}({hn}){sp}({hn}){sp}({obj})".format(
cmp=COMPONENT,
sp=SPACE_OR_NEWLINE,
hn=HEX_NUM,
obj=OBJECT))
DEMANGLEABLE = re.compile("(_ZN{cmp}E)".format(
cmp=COMPONENT))
NODE_ITEM = re.compile("(<.*>|{cmp})".format(
cmp=COMPONENT))
with open(sys.argv[1], 'r') as ifile:
lines = ifile.read()
print("matching...")
matches = [m for m in LINE_ITEM.findall(lines)]
print("filtering removed components...")
active_items = [m for m in matches if (int(m[1], 16) != 0 and int(m[2], 16) != 0)]
print("demangling symbols...")
# p = Pool()
def demangle(i):
component, position, size, symbol = i
srch = DEMANGLEABLE.search(component)
if srch != None:
c = cmd(["c++filt", srch.group(1)]).strip().decode('ascii')
return (c, position, size, symbol)
else:
return None
demangled_or_none = [demangle(d) for d in active_items]
processed_names = [d for d in demangled_or_none if d != None]
class SizeNode(object):
def __init__(self):
self.children = {}
self.matches = []
def add(self, component, addr, size):
if len(component) == 1:
self.matches.append((addr, size, component[0]))
return
if component[0] not in self.children:
self.children[component[0]] = SizeNode()
self.children[component[0]].add(component[1:], addr, size)
total_map = SizeNode()
for p in processed_names:
components = [y for y in NODE_ITEM.findall(p[0]) if len(y) != 0]
total_map.add(components, p[1], p[2])
def recursive_print(node, space=0):
size = 0
strs = []
if len(node.children) == 0:
for m in node.matches:
strs.append("{}item:{} size:{} loc:{}".format(' ' * space, m[2], m[1], m[0]))
size += int(m[1], 16)
else:
for key in sorted(node.children.keys()):
n_size, n_strs = recursive_print(node.children[key], space+4)
strs.append("{}{} - {}".format(' ' * space, key, n_size))
strs.extend(n_strs)
size += n_size
return (size, strs)
size, strs = recursive_print(total_map)
print("Total size: {}".format(size))
for s in strs:
print(s)
|
[
"james.munns@gmail.com"
] |
james.munns@gmail.com
|
61b6f11111c63ea415de9b8226415f583006298a
|
f5d507413ba15ee6a3aadda933d711a2104386c2
|
/djangocms_forms/models.py
|
0b1a805f56dd5999e3b35d62b9772edac7336da3
|
[] |
no_license
|
samuelblattner/djangocms-forms
|
150db04de8de0022fecf9039627191d989ddcedd
|
0a0d1c3c7a399c524c3901551c134bf0da19fd59
|
refs/heads/master
| 2021-01-16T21:31:33.467067
| 2015-07-31T19:05:16
| 2015-07-31T19:05:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,435
|
py
|
import re
from cms.models import CMSPlugin
from cms.models.fields import PageField
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from unidecode import unidecode
from .conf import settings
from .fields import PluginReferenceField
from .managers import ActiveFormManager
@python_2_unicode_compatible
class Form(models.Model):
name = models.CharField(_('Name'), max_length=255, db_index=True, editable=False)
objects = models.Manager()
active_objects = ActiveFormManager()
class Meta:
verbose_name = _('form')
verbose_name_plural = _('forms')
def __str__(self):
return self.name
@python_2_unicode_compatible
class FormDefinition(CMSPlugin):
name = models.CharField(_('Form Name'), max_length=255)
title = models.CharField(_('Title'), max_length=150, blank=True)
description = models.TextField(_('Description'), blank=True)
submit_btn_txt = models.CharField(
_('Submit Button Text'), max_length=100, default=_('Submit'),
help_text=_('Text for the Submit Button. The default is \'Submit\''))
post_submit_msg = models.TextField(
_('Post Submit Message'), blank=True, default=_('Thank You'),
help_text=_('Display this message to users after they submit your form.'))
# 'HTTP redirect after successful submission'
success_redirect = models.BooleanField(
_('Redirect?'), default=False,
help_text=_('HTTP redirect after successful submission'))
page_redirect = PageField(
verbose_name=_('Page URL'), blank=True, null=True,
on_delete=models.SET_NULL,
help_text=_('A page has priority over an external URL'))
external_redirect = models.URLField(
_('External URL'), blank=True,
help_text=_('e.g. http://example.com/thank-you'))
# Email
email_to = models.CharField(
_('Send form data to e-mail address'), max_length=255, blank=True,
help_text=_('Separate several addresses with a comma.'))
email_from = models.EmailField(_('Sender Email Address'), max_length=255, blank=True)
email_subject = models.CharField(_('Email Subject'), max_length=255, blank=True)
email_uploaded_files = models.BooleanField(
_('Send uploaded files as email attachments'), default=True)
# Save to database
save_data = models.BooleanField(
_('Save to database'), default=True,
help_text=_('Logs all form submissions to the database.'))
spam_protection = models.SmallIntegerField(
_('Spam Protection'),
choices=settings.DJANGOCMS_FORMS_SPAM_PROTECTIONS,
default=settings.DJANGOCMS_FORMS_DEFAULT_SPAM_PROTECTION)
form_template = models.CharField(
_('Form Template'), max_length=150, blank=True,
choices=settings.DJANGOCMS_FORMS_TEMPLATES,
default=settings.DJANGOCMS_FORMS_DEFAULT_TEMPLATE,
)
plugin_reference = PluginReferenceField(Form, related_name='plugin')
class Meta:
verbose_name_plural = _('forms')
verbose_name = _('form')
def __str__(self):
return self.name
@property
def redirect_url(self):
if self.page_redirect:
return self.page_redirect.get_absolute_url()
elif self.external_redirect:
return self.external_redirect
@property
def upload_to(self):
return '%s-%s' % (
slugify(unidecode(self.name)).replace('_', '-'),
self.plugin_reference_id)
def copy_relations(self, oldinstance):
for field in oldinstance.fields.all():
field.pk = None
field.form = self
field.save()
@python_2_unicode_compatible
class FormField(models.Model):
form = models.ForeignKey(FormDefinition, related_name='fields')
field_type = models.CharField(
_('Field Type'), max_length=100,
choices=settings.DJANGOCMS_FORMS_FIELD_TYPES,
default=settings.DJANGOCMS_FORMS_DEFAULT_FIELD_TYPE)
label = models.CharField(_('name'), max_length=255)
placeholder_text = models.CharField(_('Placeholder Text'), blank=True, max_length=100)
required = models.BooleanField(_('Required'), default=True)
help_text = models.TextField(
_('Description'), blank=True,
help_text=_('A description / instructions for this field.'))
initial = models.CharField(_('Default Value'), max_length=255, blank=True)
choice_values = models.TextField(
_('Choices'), blank=True,
help_text=_('Enter options one per line. For "File Upload" '
'field type, enter allowed filetype (e.g .pdf) one per line.'))
position = models.PositiveIntegerField(_('Position'), blank=True, null=True)
class Meta:
verbose_name_plural = _('fields')
verbose_name = _('field')
ordering = ('position', )
def __str__(self):
return self.label
def field_attrs(self):
args = {
'required': self.required,
'label': self.label if self.label else '',
'initial': self.initial if self.initial else None,
'help_text': self.help_text,
}
return args
def get_choices(self):
if self.choice_values:
regex = re.compile('[\s]*\n[\s]*')
choices = regex.split(self.choice_values)
return [(str(choice), str(choice)) for choice in choices]
@python_2_unicode_compatible
class FormSubmission(models.Model):
plugin = models.ForeignKey(
Form, verbose_name=_('Form'), editable=False, related_name='submissions')
creation_date = models.DateTimeField(_('Date'), auto_now=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('User'), editable=False, null=True)
ip = models.GenericIPAddressField(verbose_name='IP', blank=True, null=True)
referrer = models.CharField(_('Referrer URL'), max_length=150, blank=True)
form_data = JSONField(_('Form Data'))
class Meta:
verbose_name_plural = _('form submissions')
verbose_name = _('form submission')
ordering = ('-creation_date', )
permissions = (
('export_formsubmission', 'Can export Form Submission'),
)
def __str__(self):
return u'%s' % self.plugin
|
[
"mishbah@jp74.com"
] |
mishbah@jp74.com
|
45ba51c83865a7fcc65e8995f904f3104af957f8
|
9671c38ba355b2a30c285e61d5d5af4d49c69787
|
/2019/2/1202 Program Alarm.py
|
807377ee5cdaaddf69bafe25e7d3f850a0b99202
|
[] |
no_license
|
JeeZeh/advent-of-code
|
c49b2bfb12e39162c0f57f0896654672e39c0f70
|
bf8613e765ae69d189c20d5869eed42257ae5e21
|
refs/heads/master
| 2023-01-16T07:30:04.507687
| 2022-12-25T18:12:42
| 2022-12-25T18:12:42
| 226,599,377
| 3
| 2
| null | 2022-12-04T00:49:36
| 2019-12-08T01:38:19
|
Python
|
UTF-8
|
Python
| false
| false
| 524
|
py
|
data = list(map(int, open("input.txt").read().split(",")))
data[1] = 12
data[2] = 2
for x in range(100):
for y in range(100):
mem = data.copy()
i = 0
mem[1] = x
mem[2] = y
while mem[i] != 99:
if mem[i] == 1:
mem[mem[i + 3]] = mem[mem[i + 1]] + mem[mem[i + 2]]
if mem[i] == 2:
mem[mem[i + 3]] = mem[mem[i + 1]] * mem[mem[i + 2]]
i += 4
if mem[0] == 19690720:
print(100 * mem[1] + mem[2])
|
[
"29103230+JeeZeh@users.noreply.github.com"
] |
29103230+JeeZeh@users.noreply.github.com
|
2f37b5475cf5dd147b55ebb87ae26148385efdb8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_179/ch31_2020_04_12_19_48_03_976036.py
|
810efec55299cf985e0cedf6150169f8866144ed
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
def eh_primo (n):
i = 3
if n == 2:
return True
elif n = 0 or n = 1 or n%2 = 0:
return False
else:
while i < n:
if n%i == 0:
return False
i = i + 2
return True
|
[
"you@example.com"
] |
you@example.com
|
84d55e4177e6804301d24c4dfe19a3e69a81a43c
|
47c4d1d1017c06138a672905742a3d3609500288
|
/EvalBox/Attack/AdvAttack/__init__.py
|
b19aa1ef5d50e28be460e65b52e403a806821ba4
|
[] |
no_license
|
liuaishan/AISafety
|
a32de018a1b9e8ae1821ab757a22edbfe7f818af
|
241e1258f5658f399f905f1db1f9ef7b68bccb1d
|
refs/heads/main
| 2022-12-28T15:21:34.384449
| 2020-10-18T11:59:35
| 2020-10-18T11:59:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
#!/usr/bin/env python
# coding=UTF-8
'''
@Author: linna
@LastEditors: linna
@Description:
@Date: 2019-09-0313:38:17
@LastEditTime: 2020-09-23 13:38:22
'''
from .attack import Attack
from .fgsm import FGSM
from .rfgsm import RFGSM
from .bim import BIM
from .pgd import PGD
from .umifgsm import UMIFGSM
from .deepfool import DEEPFOOL
from .om import OM
from .cw2 import CW2
from .llc import LLC
from .jsm import JSM
from .blb import BLB
from .ead import EAD
from .uap import UAP
from .ba import BA
from .zoo import ZOO
from .ILLC import ILLC
from .RLLC import RLLC
from .spsa import SPSA
from .PA import PA
|
[
"zaozhe@buaa.edu.cn"
] |
zaozhe@buaa.edu.cn
|
695e7281f1f68146b70bcc02883d40d300b79020
|
a22cc323b29f50da397d8363ac2521e3542a0fd7
|
/dpaycli/conveyor.py
|
360bb98f88958ebb9bee6105b62a737774382746
|
[
"MIT"
] |
permissive
|
dpays/dpay-cli
|
1a58c7dae45218e3b05b7e17ff5ce03e918d27b9
|
dfa80898e1faea2cee92ebec6fe04873381bd40f
|
refs/heads/master
| 2020-04-01T09:26:43.200933
| 2018-10-15T08:03:06
| 2018-10-15T08:03:06
| 153,075,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,076
|
py
|
# This Python file uses the following encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import base64
import json
import random
import requests
import struct
from datetime import datetime
from binascii import hexlify
from .instance import shared_dpay_instance
from .account import Account
from dpaycligraphenebase.py23 import py23_bytes
from dpaycligraphenebase.ecdsasig import sign_message
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class Conveyor(object):
""" Class to access DSite API instances:
https://github.com/dsites/dsite-api
Description from the official documentation:
* Feature flags: "Feature flags allows our apps (condenser mainly) to
hide certain features behind flags."
* User data: "Conveyor is the central point for storing sensitive user
data (email, phone, etc). No other services should store this data
and should instead query for it here every time."
* User tags: "Tagging mechanism for other services, allows defining and
assigning tags to accounts (or other identifiers) and querying for
them."
Not contained in the documentation, but implemented and working:
* Draft handling: saving, listing and removing post drafts
consisting of a post title and a body.
The underlying RPC authentication and request signing procedure is
described here: https://github.com/dpays/rpc-auth
"""
def __init__(self, url="https://api.dsite.io",
dpay_instance=None):
""" Initialize a Conveyor instance
:param str url: (optional) URL to the Conveyor API, defaults to
https://api.dsite.io
:param dpaycli.dpay.DPay dpay_instance: DPay instance
"""
self.url = url
self.dpay = dpay_instance or shared_dpay_instance()
self.id = 0
self.ENCODING = 'utf-8'
self.TIMEFORMAT = '%Y-%m-%dT%H:%M:%S.%f'
self.K = hashlib.sha256(py23_bytes('dpay_jsonrpc_auth',
self.ENCODING)).digest()
def prehash_message(self, timestamp, account, method, params, nonce):
""" Prepare a hash for the Conveyor API request with SHA256 according
to https://github.com/dpays/rpc-auth
Hashing of `second` is then done inside `ecdsasig.sign_message()`.
:param str timestamp: valid iso8601 datetime ending in "Z"
:param str account: valid dPay blockchain account name
:param str method: Conveyor method name to be called
:param bytes param: base64 encoded request parameters
:param bytes nonce: random 8 bytes
"""
first = hashlib.sha256(py23_bytes(timestamp + account + method +
params, self.ENCODING))
return self.K + first.digest() + nonce
def _request(self, account, method, params, key):
"""Assemble the request, hash it, sign it and send it to the Conveyor
instance. Returns the server response as JSON.
:param str account: account name
:param str method: Conveyor method name to be called
:param dict params: request parameters as `dict`
:param str key: DPay posting key for signing
"""
params_bytes = py23_bytes(json.dumps(params), self.ENCODING)
params_enc = base64.b64encode(params_bytes).decode(self.ENCODING)
timestamp = datetime.utcnow().strftime(self.TIMEFORMAT)[:-3] + "Z"
nonce_int = random.getrandbits(64)
nonce_bytes = struct.pack('>Q', nonce_int) # 64bit ULL, big endian
nonce_str = "%016x" % (nonce_int)
message = self.prehash_message(timestamp, account, method,
params_enc, nonce_bytes)
signature = sign_message(message, key)
signature_hex = hexlify(signature).decode(self.ENCODING)
request = {
"jsonrpc": "2.0",
"id": self.id,
"method": method,
"params": {
"__signed": {
"account": account,
"nonce": nonce_str,
"params": params_enc,
"signatures": [signature_hex],
"timestamp": timestamp
}
}
}
r = requests.post(self.url, data=json.dumps(request))
self.id += 1
return r.json()
def _conveyor_method(self, account, signing_account, method, params):
""" Wrapper function to handle account and key lookups
:param str account: name of the addressed account
:param str signing_account: name of the account to sign the request
:param method: Conveyor method name to be called
:params dict params: request parameters as `dict`
"""
account = Account(account, dpay_instance=self.dpay)
if signing_account is None:
signer = account
else:
signer = Account(signing_account, dpay_instance=self.dpay)
if "posting" not in signer:
signer.refresh()
if "posting" not in signer:
raise AssertionError("Could not access posting permission")
for authority in signer["posting"]["key_auths"]:
posting_wif = self.dpay.wallet.getPrivateKeyForPublicKey(
authority[0])
return self._request(account['name'], method, params,
posting_wif)
def get_user_data(self, account, signing_account=None):
""" Get the account's email address and phone number. The request has to be
signed by the requested account or an admin account.
:param str account: requested account
:param str signing_account: (optional) account to sign the
request. If unset, `account` is used.
Example:
.. code-block:: python
from dpaycli import DPay
from dpaycli.conveyor import Conveyor
s = DPay(keys=["5JPOSTINGKEY"])
c = Conveyor(dpay_instance=s)
print(c.get_user_data('accountname'))
"""
account = Account(account, dpay_instance=self.dpay)
user_data = self._conveyor_method(account, signing_account,
"conveyor.get_user_data",
[account['name']])
if "result" in user_data:
return user_data["result"]
else:
return user_data
def set_user_data(self, account, params, signing_account=None):
""" Set the account's email address and phone number. The request has to be
signed by an admin account.
:param str account: requested account
:param dict param: user data to be set
:param str signing_account: (optional) account to sign the
request. If unset, `account` is used.
Example:
.. code-block:: python
from dpaycli import DPay
from dpaycli.conveyor import Conveyor
s = DPay(keys=["5JADMINPOSTINGKEY"])
c = Conveyor(dpay_instance=s)
userdata = {'email': 'foo@bar.com', 'phone':'+123456789'}
c.set_user_data('accountname', userdata, 'adminaccountname')
"""
return self._conveyor_method(account, signing_account,
"conveyor.set_user_data",
[params])
def get_feature_flags(self, account, signing_account=None):
""" Get the account's feature flags. The request has to be signed by the
requested account or an admin account.
:param str account: requested account
:param str signing_account: (optional) account to sign the
request. If unset, `account` is used.
Example:
.. code-block:: python
from dpaycli import DPay
from dpaycli.conveyor import Conveyor
s = DPay(keys=["5JPOSTINGKEY"])
c = Conveyor(dpay_instance=s)
print(c.get_feature_flags('accountname'))
"""
account = Account(account, dpay_instance=self.dpay)
feature_flags = self._conveyor_method(account, signing_account,
"conveyor.get_feature_flags",
[account['name']])
if "result" in feature_flags:
return feature_flags["result"]
else:
return feature_flags
def get_feature_flag(self, account, flag, signing_account=None):
""" Test if a specific feature flag is set for an account. The request
has to be signed by the requested account or an admin account.
:param str account: requested account
:param str flag: flag to be tested
:param str signing_account: (optional) account to sign the
request. If unset, `account` is used.
Example:
.. code-block:: python
from dpaycli import DPay
from dpaycli.conveyor import Conveyor
s = DPay(keys=["5JPOSTINGKEY"])
c = Conveyor(dpay_instance=s)
print(c.get_feature_flag('accountname', 'accepted_tos'))
"""
account = Account(account, dpay_instance=self.dpay)
return self._conveyor_method(account, signing_account,
"conveyor.get_feature_flag",
[account['name'], flag])
def save_draft(self, account, title, body):
""" Save a draft in the Conveyor database
:param str account: requested account
:param str title: draft post title
:param str body: draft post body
"""
account = Account(account, dpay_instance=self.dpay)
draft = {'title': title, 'body': body}
return self._conveyor_method(account, None,
"conveyor.save_draft",
[account['name'], draft])
def list_drafts(self, account):
""" List all saved drafts from `account`
:param str account: requested account
Sample output:
.. code-block:: js
{
'jsonrpc': '2.0', 'id': 2, 'result': [
{'title': 'draft-title', 'body': 'draft-body',
'uuid': '06497e1e-ac30-48cb-a069-27e1672924c9'}
]
}
"""
account = Account(account, dpay_instance=self.dpay)
return self._conveyor_method(account, None,
"conveyor.list_drafts",
[account['name']])
def remove_draft(self, account, uuid):
""" Remove a draft from the Conveyor database
:param str account: requested account
:param str uuid: draft identifier as returned from
`list_drafts`
"""
account = Account(account, dpay_instance=self.dpay)
return self._conveyor_method(account, None,
"conveyor.remove_draft",
[account['name'], uuid])
def healthcheck(self):
""" Get the Conveyor status
Sample output:
.. code-block:: js
{
'ok': True, 'version': '1.1.1-4d28e36-1528725174',
'date': '2018-07-21T12:12:25.502Z'
}
"""
url = urljoin(self.url, "/.well-known/healthcheck.json")
r = requests.get(url)
return r.json()
|
[
"jaredricelegal@gmail.com"
] |
jaredricelegal@gmail.com
|
1cfa332536abeb77d7f6d40576cbfb2992f172b3
|
9ec58308459dc95405d1a32fcf8fae7f687a207b
|
/test/test_k_lipschitzconstant.py
|
56181ba4b0528a3f5bcdea366dc5618fd4cf5849
|
[
"MIT"
] |
permissive
|
ivanlyon/exercises
|
067aed812486dbd7a3d7de6e47a692c8b9383163
|
0792976ae2acb85187b26a52812f9ebdd119b5e8
|
refs/heads/master
| 2021-05-24T04:17:29.012329
| 2021-05-11T17:26:50
| 2021-05-11T17:26:50
| 65,584,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,210
|
py
|
import io
import unittest
from unittest.mock import patch
from kattis import k_lipschitzconstant
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input_1(self):
'''Run and assert problem statement sample 1 input and output.'''
inputs = []
inputs.append('3')
inputs.append('1 1')
inputs.append('2 2')
inputs.append('3 4')
inputs = '\n'.join(inputs) + '\n'
outputs = '2.000000000\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_lipschitzconstant.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
def test_sample_input_2(self):
'''Run and assert problem statement sample 2 input and output.'''
inputs = []
inputs.append('2')
inputs.append('1 4')
inputs.append('2 2')
inputs = '\n'.join(inputs) + '\n'
outputs = '2.000000000\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_lipschitzconstant.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
def test_sample_input_3(self):
'''Run and assert problem statement sample 3 input and output.'''
inputs = []
inputs.append('4')
inputs.append('-10 6.342')
inputs.append('-7 3')
inputs.append('46 18.1')
inputs.append('2 -34')
inputs = '\n'.join(inputs) + '\n'
outputs = '4.111111111\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_lipschitzconstant.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
|
[
"roblyon00@gmail.com"
] |
roblyon00@gmail.com
|
a15780d4134153340092b276e7299f3fb54d2570
|
ac91ee2f10f428315f80134447794de370726c22
|
/python/main_test_packet_speed.py
|
52e219009838a0c62cefbe7e06f495cc0eb78f65
|
[] |
no_license
|
gviejo/LMNphysio
|
c34a49719e7094fb79b07b55980e186ac95b4fc2
|
24c9466d6a8a1deaf6b30f38388e90212af07c1e
|
refs/heads/master
| 2023-09-01T08:05:02.240143
| 2023-08-18T19:29:01
| 2023-08-18T19:29:01
| 170,920,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
import numpy as np
import pandas as pd
import neuroseries as nts
from pylab import *
from wrappers import *
from functions import *
import sys
data_directory = '/home/guillaume/LMNphysio/data/A5000/A5001-200226A'
episodes = ['sleep', 'wake']
events = ['1']
spikes, shank = loadSpikeData(data_directory)
n_channels, fs, shank_to_channel = loadXML(data_directory)
position = loadPosition(data_directory, events, episodes)
wake_ep = loadEpoch(data_directory, 'wake', episodes)
sleep_ep = loadEpoch(data_directory, 'sleep')
acceleration = loadAuxiliary(data_directory, n_probe = 2)
if 'A5002' in data_directory:
acceleration = acceleration[[0,1,2]]
else:
acceleration = acceleration[[3,4,5]]
acceleration.columns = pd.Index(np.arange(3))
sleep_ep = refineSleepFromAccel(acceleration, sleep_ep)
bins = np.arange(wake_ep.loc[0,'start'], wake_ep.loc[0,'end'], 5000)
spike_count = {}
for n in np.where(shank.flatten()==0)[0]:
spike_count[n] = pd.Series(index = bins[0:-1]+np.diff(bins)/2, data = np.histogram(spikes[n].restrict(wake_ep).index.values, bins)[0])
spike_count = pd.DataFrame.from_dict(spike_count)
tmp = spike_count.mean(1)
tmp2 = tmp.rolling(window=10, win_type='gaussian', center= True, min_periods=1).mean(std = 1.0)
|
[
"guillaue.viejo@gmail.com"
] |
guillaue.viejo@gmail.com
|
975d55c83d9b4f29cb206179a448076985191c60
|
bd6ae68d882cc40876b4f2906fa522ef807d1d89
|
/2/2_1/源码/75函数默认参数.py
|
87ce4767c09b40196f6d34813c82d106b6624d2b
|
[] |
no_license
|
bopopescu/Projects-1
|
7ecc1f472f418b701bc40c8a58bab447e678c68f
|
1b7fd99bf1ed96adb7a9486daf098947a9208f77
|
refs/heads/master
| 2022-11-19T18:31:32.863170
| 2018-02-26T02:09:09
| 2018-02-26T02:09:09
| 281,792,443
| 0
| 0
| null | 2020-07-22T22:06:26
| 2020-07-22T22:06:25
| null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
#!/usr/bin/python
#_*_ coding:utf-8 _*_
KEVIM = 'Kevim Liu'
# lau.liu@9street.org
def stu_register(name,age,soures,country="CN"):
print('----注册学生信息-----')
print('姓名:',name)
print('age:',age)
print('课程',soures)
print('国籍:',country)
# print(name,age,soures,country)
# stu_register('王山炮',22,'ptyhone_devopt')
# stu_register('张三',21,'linux')
# stu_register('李四',21,'linux')
stu_register('王山炮',soures='ptyhone_devopt',age=22)
|
[
"liujicun01@163.com"
] |
liujicun01@163.com
|
ce92ba7d2727c429dea4e58d50bb4094beebbaf1
|
8f9b0f099a4cee619e8d0053a58e94d019e7b559
|
/bin/tounaer/Crawl_tounaer_guonianbao.py
|
b6ca5aa54393587e2245f18fedad2c5d54f21e26
|
[] |
no_license
|
dongshaohui/yintuwang_offline
|
fb7cab23824b2d6e9084be4fdd8d8b11e80f4c41
|
fda6488f93b82a8692a5f85abf85f505c692bec8
|
refs/heads/master
| 2020-04-26T22:55:59.388932
| 2015-06-21T16:16:39
| 2015-06-21T16:16:39
| 30,067,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,104
|
py
|
# !/usr/bin/python
# -*- coding=utf8 -*-
# author : Shaohui Dong
# description : 爬取宜人贷理财服务数据
from BeautifulSoup import BeautifulSoup
import sys,os,urllib2,threading
import datetime
import json
import re
import DB
g_root_link = "http://www.touna.cn/borrow.do?method=list&borrowType=102&page=0&size=100&subtime=1419907176551&_=1419907176552"
g_pro_link = "http://www.touna.cn/invest-page.html?id="
# 连接数据库
def Connent_Online_Mysql_By_DB(hostname,port,username,pwd,dbname,socket):
db = DB.DB(False,host=hostname, port=port, user=username ,passwd=pwd, db=dbname,charset='gbk', unix_socket=socket)
return db
# 写入数据库
def write_record_db(db,list_obj,table_name):
try:
db.insert(table_name,list_obj)
db.commit()
except Exception,e:
print e
def fetch_json_data(db):
context = urllib2.urlopen(g_root_link,'r').read()
json_obj = json.loads(context)
for product in json_obj['result']['list']:
if product['status'] != 1:
continue
record = {}
record['proName'] = product['name']
record['interest'] = (str)(product['apr']) + "%"
record['amount'] = product['account']
record['invested'] = product['account_yes']
record['surplus'] = record['amount'] - record['invested']
record['duetime'] = product['time_limit_name']
record['status'] = product['status_name']
record['credit'] = product['credit_rating']
record['minamount'] = '50'
record['urllink'] = g_pro_link + (str)(product['id'])
record['datestr'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
write_record_db(db,record,'p2p_product_tounaer_guonianbao')
if __name__ == '__main__':
db = Connent_Online_Mysql_By_DB('rdsjjuvbqjjuvbqout.mysql.rds.aliyuncs.com',3306,'dongsh','5561225','financal_product','/tmp/mysql.sock')
# 清空原有数据库
script_path = os.getcwd()
script_path = script_path[:script_path.find('p2p3000')]+"p2p3000/tool/empty_db_table.sh"
os.system(script_path + ' p2p_product_tounaer_guonianbao')
#os.system('/home/dong/p2p3000/tool/empty_db_table.sh p2p_product_tounaer_guonianbao')
fetch_json_data(db)
|
[
"405989455@qq.com"
] |
405989455@qq.com
|
bb3219cbc8ec31dd765120e96672aa2b74086516
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq3280.py
|
1543140b02ea74ccca969f2f47081af9354cf0ee
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,324
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=44
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[3])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[1])) # number=34
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=36
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=31
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=38
c.append(cirq.X.on(input_qubit[1])) # number=39
c.append(cirq.H.on(input_qubit[1])) # number=41
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=42
c.append(cirq.H.on(input_qubit[1])) # number=43
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=33
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=30
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=37
c.append(cirq.H.on(input_qubit[3])) # number=19
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[3])) # number=21
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=23
c.append(cirq.X.on(input_qubit[3])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=25
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=17
c.append(cirq.rx(-0.48380526865282825).on(input_qubit[3])) # number=26
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.X.on(input_qubit[2])) # number=22
c.append(cirq.Y.on(input_qubit[2])) # number=11
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.X.on(input_qubit[0])) # number=14
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq3280.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
f35c0f9b8c76e299a20fd3ee09d8b54cd391f351
|
d3b99bd7db430a1e46f4a1a59a9eac6cc82c1787
|
/apps/organization/forms.py
|
b2c89d1e04c00566cc71d261a3709a7c02f7d99e
|
[] |
no_license
|
David20111030/Eduonline
|
460dcd2c530f2a309825666ec6cb394a14336c81
|
798711f39862256ae5030d29409e90b627684e1c
|
refs/heads/master
| 2021-04-15T13:03:39.202881
| 2017-07-15T14:04:14
| 2017-07-15T14:04:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
# _*_ encoding: utf-8 _*_
import re
from django import forms
from operation.models import UserAsk,UserFavorite
class UserAskForm(forms.ModelForm):
class Meta:
model = UserAsk
fields =['name','mobile','course_name']
def clean_mobile(self):
#验证手机号码是否合法
mobile = self.cleaned_data['mobile']
REGEX_MOBILE = "^1[358]\d{9}$|^147\d{8}$|^176\d{8}$"
p = re.compile(REGEX_MOBILE)
if p.match(mobile):
return mobile
else:
raise forms.ValidationError(u"手机号码非法",code="mobile_invalid")
|
[
"salt_stack@126.com"
] |
salt_stack@126.com
|
4207e5baa7d013e44e18c5137edd94cf511ae5dc
|
f8d957a01539658fcdcb66e11e82f68dcd648921
|
/pennylane/transforms/batch_transform.py
|
d5261a1ff71f330a24e84ad459ea6839c1ab1998
|
[
"Apache-2.0"
] |
permissive
|
hosseinsadeghi/pennylane
|
0ccdbdd6536f5245910d959828a4d6029e41ae97
|
d3363227f29f4c58c63203da378de2e1c20a21b0
|
refs/heads/master
| 2023-07-17T07:42:42.567989
| 2021-09-02T19:54:11
| 2021-09-02T19:54:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,968
|
py
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains tools and decorators for registering batch transforms."""
# pylint: disable=too-few-public-methods
import functools
import pennylane as qml
from pennylane.interfaces.batch import execute
class batch_transform:
r"""Class for registering a tape transform that takes a tape, and outputs
a batch of tapes to be independently executed on a quantum device.
Examples of such transforms include quantum gradient shift rules (such
as finite-differences and the parameter-shift rule) and metrics such as
the quantum Fisher information matrix.
Args:
transform_fn (function): The function to register as the batch tape transform.
It can have an arbitrary number of arguments, but the first argument
**must** be the input tape.
expand_fn (function): An expansion function (if required) to be applied to the
input tape before the transformation takes place.
differentiable (bool): Specifies whether the transform is differentiable or
not. A transform may be non-differentiable for several reasons:
- It does not use an autodiff framework for its tensor manipulations;
- It returns a non-differentiable or non-numeric quantity, such as
a boolean, string, or integer.
In such a case, setting ``differentiable=False`` instructs the decorator
to mark the output as 'constant', reducing potential overhead.
**Example**
A valid batch tape transform is a function that satisfies the following:
- The first argument must be a tape.
- Depending on the structure of this input tape, various quantum operations, functions,
and templates may be called.
- Any internal classical processing should use the ``qml.math`` module to ensure
the transform is differentiable.
- The transform should return a tuple containing:
* Multiple transformed tapes to be executed on a device.
* A classical processing function for post-processing the executed tape results.
This processing function should have the signature ``f(list[tensor_like]) → Any``.
If ``None``, no classical processing is applied to the results.
For example:
.. code-block:: python
@qml.batch_transform
def my_transform(tape, a, b):
"Generates two tapes, one with all RX replaced with RY,
and the other with all RX replaced with RZ."
tape1 = qml.tape.JacobianTape()
tape2 = qml.tape.JacobianTape()
# loop through all operations on the input tape
for op in tape.operations + tape.measurements:
if op.name == "RX":
wires = op.wires
param = op.parameters[0]
with tape1:
qml.RY(a * qml.math.abs(param), wires=wires)
with tape2:
qml.RZ(b * qml.math.abs(param), wires=wires)
else:
for t in [tape1, tape2]:
with t:
qml.apply(op)
def processing_fn(results):
return qml.math.sum(qml.math.stack(results))
return [tape1, tape2], processing_fn
We can apply this transform to a quantum tape:
>>> with qml.tape.JacobianTape() as tape:
... qml.Hadamard(wires=0)
... qml.RX(-0.5, wires=0)
... qml.expval(qml.PauliX(0))
>>> tapes, fn = my_transform(tape, 0.65, 2.5)
>>> print(tapes[0].draw())
0: ──H──RY(0.325)──┤ ⟨X⟩
>>> print(tapes[1].draw())
0: ──H──RZ(1.25)──┤ ⟨X⟩
We can execute these tapes manually:
>>> from pennylane.interfaces.batch import execute
>>> dev = qml.device("default.qubit", wires=1)
>>> res = execute(tapes, dev, interface="autograd", gradient_fn=qml.gradients.param_shift)
>>> print(res)
[tensor([0.94765073], requires_grad=True), tensor([0.31532236], requires_grad=True)]
Applying the processing function, we retrieve the end result of the transform:
>>> print(fn(res))
1.2629730888100839
Alternatively, we may also transform a QNode directly, using either
decorator syntax:
>>> @my_transform(0.65, 2.5)
... @qml.qnode(dev)
... def circuit(x):
... qml.Hadamard(wires=0)
... qml.RX(x, wires=0)
... return qml.expval(qml.PauliX(0))
>>> print(circuit(-0.5))
1.2629730888100839
or by transforming an existing QNode:
>>> @qml.qnode(dev)
... def circuit(x):
... qml.Hadamard(wires=0)
... qml.RX(x, wires=0)
... return qml.expval(qml.PauliX(0))
>>> circuit = my_transform(circuit, 0.65, 2.5)
>>> print(circuit(-0.5))
1.2629730888100839
Batch tape transforms are fully differentiable:
>>> gradient = qml.grad(circuit)(-0.5)
>>> print(gradient)
2.5800122591960153
"""
def __init__(self, transform_fn, expand_fn=None, differentiable=True):
if not callable(transform_fn):
raise ValueError(
f"The batch transform function to register, {transform_fn}, "
"does not appear to be a valid Python function or callable."
)
self.transform_fn = transform_fn
self.expand_fn = expand_fn
self.differentiable = differentiable
functools.update_wrapper(self, transform_fn)
def qnode_execution_wrapper(self, qnode, targs, tkwargs):
"""A wrapper method that takes a QNode and transform arguments,
and returns a function that 'wraps' the QNode execution.
The returned function should accept the same keyword arguments as
the QNode, and return the output of the applying the tape transform
to the QNode's constructed tape.
"""
def _wrapper(*args, **kwargs):
qnode.construct(args, kwargs)
tapes, processing_fn = self.construct(qnode.qtape, *targs, **tkwargs)
# TODO: work out what to do for backprop
interface = qnode.interface
# TODO: extract gradient_fn from QNode
gradient_fn = qnode.diff_method
if interface is None or not self.differentiable:
gradient_fn = None
elif gradient_fn in ("best", "parameter-shift"):
gradient_fn = qml.gradients.param_shift
elif gradient_fn == "finite-diff":
gradient_fn = qml.gradients.finite_diff
res = execute(
tapes,
device=qnode.device,
gradient_fn=gradient_fn,
interface=interface,
)
return processing_fn(res)
return _wrapper
def __call__(self, qnode, *targs, **tkwargs):
if isinstance(qnode, qml.tape.QuantumTape):
# Input is a quantum tape.
# tapes, fn = some_transform(tape, *transform_args)
return self.construct(qnode, *targs, **tkwargs)
if isinstance(qnode, qml.QNode):
# Input is a QNode:
# result = some_transform(qnode, *transform_args)(*qnode_args)
wrapper = self.qnode_execution_wrapper(qnode, targs, tkwargs)
wrapper = functools.wraps(qnode)(wrapper)
else:
# Input is not a QNode nor a quantum tape.
# Assume Python decorator syntax:
#
# result = some_transform(*transform_args)(qnode)(*qnode_args)
#
# or
#
# @some_transform(*transform_args)
# @qml.qnode(dev)
# def circuit(...):
# ...
# result = circuit(*qnode_args)
# Prepend the input to the transform args,
# and create a wrapper function.
targs = (qnode,) + targs
def wrapper(qnode):
_wrapper = self.qnode_execution_wrapper(qnode, targs, tkwargs)
_wrapper = functools.wraps(qnode)(_wrapper)
return _wrapper
wrapper.tape_fn = functools.partial(self.transform_fn, *targs, **tkwargs)
wrapper.expand_fn = self.expand_fn
wrapper.differentiable = self.differentiable
return wrapper
def construct(self, tape, *args, **kwargs):
"""Applies the batch tape transform to an input tape.
Args:
tape (.QuantumTape): the tape to be transformed
*args: positional arguments to pass to the tape transform
**kwargs: keyword arguments to pass to the tape transform
Returns:
tuple[list[tapes], callable]: list of transformed tapes
to execute and a post-processing function.
"""
expand = kwargs.pop("_expand", True)
if expand and self.expand_fn is not None:
tape = self.expand_fn(tape)
tapes, processing_fn = self.transform_fn(tape, *args, **kwargs)
if processing_fn is None:
processing_fn = lambda x: x
return tapes, processing_fn
|
[
"noreply@github.com"
] |
hosseinsadeghi.noreply@github.com
|
abdff1a74b900f6955b0c6e6f11bbe3febdb8558
|
f463ea1fd226201c88abc13b567718f1ba4a9396
|
/2016/Assignment1/cs231n/classifiers/linear_classifier.py
|
26caa4a36e825434243769b610cc857f036f126a
|
[] |
no_license
|
RahulBaboota/CS231n-Homeworks
|
29bc25baeff5ea272a8ab66c13de73cfeaa5be17
|
820615203eb03d4e479bcf3e2efe831f3da2c534
|
refs/heads/master
| 2021-03-27T08:50:23.860952
| 2019-10-18T18:46:21
| 2019-10-18T18:46:21
| 101,315,598
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,968
|
py
|
import numpy as np
from cs231n.classifiers.softmax import *
from cs231n.classifiers.linear_svm import *
class LinearClassifier(object):
def __init__(self):
self.W = None
def train(self, X, y, learningRate = 1e-3, reg = 1e-5, numIters = 100, batchSize = 200, verbose = False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learningRate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- numIters: (integer) number of steps to take when optimizing
- batchSize: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
## Number of training samples and the dimensions of each training sample.
numTrain, dim = X.shape
## Total number of classes (assume y takes values 0...K-1 where K is number of classes)
numClasses = np.max(y) + 1
if self.W is None:
## Initialise W randomly.
self.W = 0.001 * np.random.randn(dim, numClasses)
# Run stochastic gradient descent to optimize W
lossHistory = []
for it in xrange(numIters):
XBatch = None
yBatch = None
#########################################################################
# TODO: #
# Sample batchSize elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in XBatch and their corresponding labels in #
# yBatch; after sampling XBatch should have shape (dim, batchSize) #
# and yBatch should have shape (batchSize,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
## Creating an array which randomly selects images.
randomIndices = np.random.choice(np.arange(numTrain), size = batchSize)
XBatch = X[randomIndices]
yBatch = y[randomIndices]
#########################################################################
# END OF YOUR CODE #
#########################################################################
## Evaluate loss and gradient
loss, grad = self.loss(XBatch, yBatch, reg)
lossHistory.append(loss)
## Perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
## Updating the weights using stochastic gradient descent.
self.W -= learningRate * grad
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print 'iteration %d / %d: loss %f' % (it, numIters, loss)
return lossHistory
def predict(self, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: D x N array of training data. Each column is a D-dimensional point.
Returns:
- yPred: Predicted labels for the data in X. yPred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
yPred = np.zeros(X.shape[1])
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in yPred. #
###########################################################################
## Performing the forward pass to compute the raw scores.
rawScores = X.dot(self.W)
## Finding the prediction made by the classifier.
yPred = rawScores.argmax(axis = 1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return yPred
def loss(self, XBatch, yBatch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- XBatch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- yBatch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, XBatch, yBatch, reg):
return svmLossVectorized(self.W, XBatch, yBatch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, XBatch, yBatch, reg):
return softmaxLossVectorized(self.W, XBatch, yBatch, reg)
|
[
"rahulbaboota08@gmail.com"
] |
rahulbaboota08@gmail.com
|
570f8cfad52606c0201d19107b7cbeea9163c0b6
|
53f7dbee9628a42f1205ba18670b4ca19f650eef
|
/src/remerge/core.py
|
e5342857052dc300d825cee0c4939d9df76d7709
|
[
"MIT"
] |
permissive
|
pmbaumgartner/remerge-mwe
|
b89f772adfa2a52b7c191afb6c373ecdd72441f5
|
7355bf1e7123f2373ebf48111f1fae00f145bb2b
|
refs/heads/main
| 2023-05-23T14:31:01.003815
| 2022-10-12T14:16:56
| 2022-10-12T14:16:56
| 543,675,980
| 15
| 2
|
MIT
| 2022-10-12T14:14:39
| 2022-09-30T15:52:03
|
Python
|
UTF-8
|
Python
| false
| false
| 17,163
|
py
|
import json
from collections import Counter, defaultdict
from copy import deepcopy
from dataclasses import dataclass, field
from enum import Enum
from functools import cached_property
from itertools import groupby, islice
from pathlib import Path
from typing import Callable
from typing import Counter as CounterType
from typing import (
DefaultDict,
Dict,
Final,
Iterable,
List,
Literal,
NamedTuple,
NewType,
Optional,
Set,
Sized,
Tuple,
)
import numpy as np
import numpy.typing as npt
from tqdm import tqdm, trange
_SMALL: Final[float] = 1e-10
SelectionMethod = Literal["frequency", "log_likelihood", "npmi"]
class Lexeme(NamedTuple):
word: Tuple[str, ...]
ix: int
def __repr__(self) -> str:
return f"({self.word}|{self.ix})"
LineIndex = NewType("LineIndex", int)
TokenIndex = NewType("TokenIndex", int)
@dataclass
class LexemeData:
lexemes_to_locations: DefaultDict[
Lexeme, Set[Tuple[LineIndex, TokenIndex]]
] = field(default_factory=lambda: defaultdict(set))
locations_to_lexemes: List[List[Lexeme]] = field(default_factory=list)
lexemes_to_freqs: Dict[Lexeme, int] = field(default_factory=dict)
@classmethod
def from_corpus(
cls, corpus: Iterable[Iterable[str]], progress_bar: bool = False
) -> "LexemeData":
lexeme_data = cls()
total: Optional[int] = len(corpus) if isinstance(corpus, Sized) else None
corpus_iter = enumerate(corpus)
if progress_bar:
corpus_iter = tqdm(
corpus_iter,
desc="Creating LexemeData from Corpus",
unit="line",
total=total,
)
for (line_ix, tokens) in corpus_iter:
line_lexemes = []
for (word_ix, word) in enumerate(tokens):
line_ix = LineIndex(line_ix)
word_ix = TokenIndex(word_ix)
lexeme = Lexeme(word=(word,), ix=0)
loc = (line_ix, word_ix)
lexeme_data.lexemes_to_locations[lexeme].add(loc)
line_lexemes.append(lexeme)
lexeme_data.locations_to_lexemes.append(line_lexemes)
# Using this conditional prevents double counting merged lexemes.
lexeme_data.lexemes_to_freqs = {
k: len(v) for k, v in lexeme_data.lexemes_to_locations.items() if k.ix == 0
}
return lexeme_data
@property
def corpus_length(self) -> int:
"""Returns number of lines in corpus: max(line_ix) + 1."""
return len(self.locations_to_lexemes)
def render_corpus(self) -> List[List[Lexeme]]:
return self.locations_to_lexemes
def locations_to_root_lexemes(self, line: LineIndex) -> Dict[TokenIndex, Lexeme]:
lexeme_dicts = self.locations_to_lexemes[line]
return {TokenIndex(k): v for k, v in enumerate(lexeme_dicts) if v.ix == 0}
Bigram = Tuple[Lexeme, Lexeme]
def _count_bigram_line(*args):
el1c = [b[0] for b in args]
el2c = [b[1] for b in args]
bc = [b for b in args]
return (el1c, el2c, bc)
@dataclass
class BigramData:
bigrams_to_freqs: CounterType[Bigram] = field(default_factory=Counter)
bigrams_to_locations: Dict[Bigram, List[Tuple[LineIndex, TokenIndex]]] = field(
default_factory=lambda: defaultdict(list)
)
left_lex_freqs: CounterType[Lexeme] = field(default_factory=Counter)
right_lex_freqs: CounterType[Lexeme] = field(default_factory=Counter)
@classmethod
def from_lexemes(
cls, lexeme_data: LexemeData, progress_bar: bool = False
) -> "BigramData":
bigram_data = cls()
corpus_iter = range(lexeme_data.corpus_length)
if progress_bar:
corpus_iter = tqdm(
corpus_iter,
desc="Creating BigramData from LexemeData",
unit="line",
total=lexeme_data.corpus_length - 1,
)
for line_ix in corpus_iter:
line_lexeme_data = lexeme_data.locations_to_root_lexemes(LineIndex(line_ix))
line_items = line_lexeme_data.items()
line_bigrams = []
for (left_ix, left), (_, right) in zip(
line_items, islice(line_items, 1, None)
):
bigram = (left, right)
location = (LineIndex(line_ix), TokenIndex(left_ix))
bigram_data.bigrams_to_locations[bigram].append(location)
line_bigrams.append(bigram)
bigram_data.batch_add_bigrams(line_bigrams)
return bigram_data
def batch_add_bigrams(self, bigram_locations: List[Bigram]):
el1s, el2s, bigrams = _count_bigram_line(*bigram_locations)
self.left_lex_freqs.update(el1s)
self.right_lex_freqs.update(el2s)
self.bigrams_to_freqs.update(bigrams)
@dataclass
class WinnerInfo:
bigram: Bigram
merged_lexeme: Lexeme
bigram_locations: List[Tuple[LineIndex, TokenIndex]]
@classmethod
def from_bigram_with_data(
cls, bigram: Bigram, bigram_data: BigramData
) -> "WinnerInfo":
el1_words = list(bigram[0].word)
el2_words = list(bigram[1].word)
all_words = el1_words + el2_words
new_lexeme = Lexeme(word=tuple(all_words), ix=0)
locations = sorted(bigram_data.bigrams_to_locations[bigram])
return cls(bigram=bigram, merged_lexeme=new_lexeme, bigram_locations=locations)
def clean_bigram_locations(self) -> List[Tuple[LineIndex, TokenIndex]]:
"""This is greedily selecting correct bigrams from the candidate locations of bigrams.
Why? Well, in the case of a sentence like (a, a, a), with winner = (a, a), we can only convert
the first occurrence of this bigram and not the second, since the first occurence would be transformed into the bigram,
the new bigram in the second position no longer exists - but could be a candidate for the next round if it is indeed that common
of a pattern.
A more complex example is with winner (a, b, a, b) in ((a, b), (a, b), (a, b)). Here is the same idea: once we
merge the first occurence it is no longer available, even though it occurs later.
"""
clean_locations: List[Tuple[LineIndex, TokenIndex]] = []
for line, location in groupby(self.bigram_locations, key=lambda x: x[0]):
exclude_token: Set[TokenIndex] = set()
token_ix = [i[1] for i in location]
for token in token_ix:
if token in exclude_token:
continue
excludes = [i for i in token_ix if i < token + self.n_lexemes]
exclude_token.update(excludes)
clean_locations.append((line, token))
return clean_locations
@property
def n_lexemes(self) -> int:
return len(self.merged_lexeme.word)
@property
def merge_token_count(self) -> int:
# TODO: Optimize by putting in loop so we don't have to iterate here
return len(self.clean_bigram_locations())
def merge_winner(
winner: WinnerInfo, lexeme_data: LexemeData, bigram_data: BigramData
) -> Tuple[LexemeData, BigramData]:
bigram_lines = set(i[0] for i in winner.clean_bigram_locations())
old_bigrams_lookup = {
line_ix: list(lexeme_data.locations_to_root_lexemes(LineIndex(line_ix)).items())
for line_ix in bigram_lines
}
for (line_ix, word_ix) in winner.clean_bigram_locations():
for lexeme_index in range(winner.n_lexemes):
pos = TokenIndex(word_ix + lexeme_index)
old_lexeme = lexeme_data.locations_to_lexemes[line_ix][pos]
lexeme = Lexeme(word=winner.merged_lexeme.word, ix=lexeme_index)
lexeme_data.locations_to_lexemes[line_ix][pos] = lexeme
lexeme_data.lexemes_to_locations[old_lexeme].remove((line_ix, pos))
lexeme_data.lexemes_to_locations[lexeme].add((line_ix, pos))
for line_ix, lexemes in old_bigrams_lookup.items():
old_bigrams = list(
zip([l[1] for l in lexemes], islice([l[1] for l in lexemes], 1, None))
)
new_root_lexemes_items = list(
lexeme_data.locations_to_root_lexemes(LineIndex(line_ix)).items()
)
new_root_lexemes = list(lex for _, lex in new_root_lexemes_items)
new_bigrams = list(zip(new_root_lexemes, islice(new_root_lexemes, 1, None)))
bigram_data.bigrams_to_freqs.update(new_bigrams)
bigram_data.left_lex_freqs.update([b[0] for b in new_bigrams])
bigram_data.right_lex_freqs.update([b[1] for b in new_bigrams])
bigram_data.bigrams_to_freqs.subtract(old_bigrams)
bigram_data.left_lex_freqs.subtract([b[0] for b in old_bigrams])
bigram_data.right_lex_freqs.subtract([b[1] for b in old_bigrams])
for (left_ix, left), (_, right) in zip(lexemes, islice(lexemes, 1, None)):
bigram = (left, right)
location = (LineIndex(line_ix), TokenIndex(left_ix))
bigram_data.bigrams_to_locations[bigram].remove(location)
for (left_ix, left), (_, right) in zip(
new_root_lexemes_items, islice(new_root_lexemes_items, 1, None)
):
bigram = (left, right)
location = (LineIndex(line_ix), TokenIndex(left_ix))
bigram_data.bigrams_to_locations[bigram].append(location)
lexeme_data.lexemes_to_freqs[winner.merged_lexeme] = winner.merge_token_count
el1_freq = lexeme_data.lexemes_to_freqs[winner.bigram[0]]
new_el1_freq = el1_freq - winner.merge_token_count
lexeme_data.lexemes_to_freqs[winner.bigram[0]] = new_el1_freq
el2_freq = lexeme_data.lexemes_to_freqs[winner.bigram[1]]
new_el2_freq = el2_freq - winner.merge_token_count
lexeme_data.lexemes_to_freqs[winner.bigram[1]] = new_el2_freq
lexeme_data.lexemes_to_freqs = {
k: v for k, v in lexeme_data.lexemes_to_freqs.items() if v != 0
}
lexeme_data.lexemes_to_locations = defaultdict(
set, {k: v for k, v in lexeme_data.lexemes_to_locations.items() if v != set()}
)
bigram_data.bigrams_to_freqs = Counter(
{k: v for k, v in bigram_data.bigrams_to_freqs.items() if v > 0}
)
bigram_data.left_lex_freqs = Counter(
{k: v for k, v in bigram_data.left_lex_freqs.items() if v > 0}
)
bigram_data.right_lex_freqs = Counter(
{k: v for k, v in bigram_data.right_lex_freqs.items() if v > 0}
)
assert winner.bigram not in bigram_data.bigrams_to_freqs
return lexeme_data, bigram_data
# NamedTuple doesn't support cached_property
@dataclass(frozen=True)
class BigramFreqArrays:
bigram_index: List[Bigram]
bigram_freq_array: npt.NDArray[np.int_]
el1_freq_array: npt.NDArray[np.int_]
el2_freq_array: npt.NDArray[np.int_]
@cached_property
def bigram_count(self) -> np.int_:
return self.bigram_freq_array.sum()
@classmethod
def from_bigram_data(
cls, bigram_data: BigramData, min_count: int = 0
) -> "BigramFreqArrays":
length = len(
[i for i in bigram_data.bigrams_to_freqs.values() if i >= min_count]
)
bigram_freq_array = np.empty(length, dtype=np.int_)
el1_freq_array = np.empty(length, dtype=np.int_)
el2_freq_array = np.empty(length, dtype=np.int_)
bigram_index = []
i = 0
for (bigram, freq) in bigram_data.bigrams_to_freqs.items():
if freq < min_count:
continue
bigram_freq_array[i] = freq
l1 = bigram_data.left_lex_freqs[bigram[0]]
el1_freq_array[i] = l1
l2 = bigram_data.right_lex_freqs[bigram[1]]
el2_freq_array[i] = l2
bigram_index.append(bigram)
i += 1
# manually count instead of enumerate
return cls(bigram_index, bigram_freq_array, el1_freq_array, el2_freq_array)
def calculate_winner_log_likelihood(
bigram_data: BigramData, min_count: int = 0
) -> Bigram:
data = BigramFreqArrays.from_bigram_data(bigram_data, min_count=min_count)
log_likelihoods = _calculate_log_likelihood(data)
winner_ix = np.argmax(log_likelihoods)
winner: Bigram = data.bigram_index[winner_ix]
return winner
def calculate_winner_npmi(bigram_data: BigramData, min_count: int = 0) -> Bigram:
data = BigramFreqArrays.from_bigram_data(bigram_data, min_count=min_count)
npmis = _calculate_npmi(data)
winner_ix = np.argmax(npmis)
winner: Bigram = data.bigram_index[winner_ix]
return winner
def calculate_winner_frequency(bigrams: BigramData, min_count: int = 0) -> Bigram:
return bigrams.bigrams_to_freqs.most_common(1)[0][0]
def _calculate_npmi(data: BigramFreqArrays) -> npt.NDArray[np.float_]:
prob_ab = data.bigram_freq_array / data.bigram_count
prob_a = data.el1_freq_array / data.bigram_count
prob_b = data.el2_freq_array / data.bigram_count
npmi = np.log(prob_ab / (prob_a * prob_b)) / -(np.log(prob_ab))
return npmi
def _calculate_log_likelihood(data: BigramFreqArrays) -> npt.NDArray[np.float_]:
# For reference, see also: nltk.collocations.BigramAssocMeasures, specifically _contingency
# http://ecologyandevolution.org/statsdocs/online-stats-manual-chapter4.html
obsA = data.bigram_freq_array
obsB = data.el1_freq_array - obsA
obsC = data.el2_freq_array - obsA
obsD = data.bigram_count - obsA - obsB - obsC
expA = ((obsA + obsB) * (obsA + obsC)) / data.bigram_count
expB = ((obsA + obsB) * (obsB + obsD)) / data.bigram_count
expC = ((obsC + obsD) * (obsA + obsC)) / data.bigram_count
expD = ((obsC + obsD) * (obsB + obsD)) / data.bigram_count
llA = obsA * np.log((obsA / (expA + _SMALL)) + _SMALL)
llB = obsB * np.log((obsB / (expB + _SMALL)) + _SMALL)
llC = obsC * np.log((obsC / (expC + _SMALL)) + _SMALL)
llD = obsD * np.log((obsD / (expD + _SMALL)) + _SMALL)
log_likelihood = 2.0 * (llA + llB + llC + llD)
log_likelihood = np.where(llA > 0, log_likelihood, log_likelihood * -1.0)
return log_likelihood
SELECTION_METHODS: Dict[SelectionMethod, Callable[[BigramData, int], Bigram]] = {
"log_likelihood": calculate_winner_log_likelihood,
"frequency": calculate_winner_frequency,
"npmi": calculate_winner_npmi,
}
ProgressBarOptions = Literal["all", "iterations", "none"]
def run(
corpus: List[List[str]],
iterations: int,
*,
method: SelectionMethod = "log_likelihood",
min_count: int = 0,
output: Optional[Path] = None,
progress_bar: ProgressBarOptions = "iterations",
) -> List[WinnerInfo]:
"""Run the remerge algorithm.
Args:
corpus (List[List[str]]): A corpus of already tokenized texts.
iterations (int): The number of iterations to run the algorithm. Papers typically use >500.
method (SelectionMethod, optional): One of "frequency", "log_likelihood", or "npmi". Defaults to "log_likelihood".
min_count (int, optional): The minimum count required for a bigram to be included in the winner calculations.
If choosing NPMI ("npmi") as the selection method, prefer using min_count because this measure is biased towards
infrequent word pairs. Defaults to 0.
output (Optional[Path], optional): A file path to output the winning merged lexemes as JSON. Defaults to None.
progress_bar (ProgressBarOptions, optional): Verbosity of progress bar. "all" will display the lexeme and bigram
construction progress each iteration plus total iteration progress. "iterations" will display progress
on the total number of iterations. "none" has no output. Defaults to "iterations".
Returns:
List[WinnerInfo]: The winning bigram from each iteration.
"""
winners: List[WinnerInfo] = []
all_progress = progress_bar == "all"
lexemes = LexemeData.from_corpus(corpus, progress_bar=all_progress)
bigrams = BigramData.from_lexemes(lexemes, progress_bar=all_progress)
winner_selection_function = SELECTION_METHODS[method]
if output is not None:
print(f"Outputting winning merged lexemes to '{output}'")
iterations_iter = (
trange(iterations)
if progress_bar in {"all", "iterations"}
else range(iterations)
)
for _ in iterations_iter:
winning_bigram = winner_selection_function(bigrams, min_count)
winner = WinnerInfo.from_bigram_with_data(
bigram=winning_bigram, bigram_data=bigrams
)
winners.append(winner)
if output:
winner_lexemes = {i: w.merged_lexeme.word for i, w in enumerate(winners)}
output.write_text(json.dumps(winner_lexemes))
lexemes, bigrams = merge_winner(winner, lexemes, bigrams)
if isinstance(iterations_iter, tqdm):
lines = set(w[0] for w in winner.bigram_locations)
pct_bgr = len(lines) / lexemes.corpus_length
iterations_iter.set_postfix(
{
"last_winner": winner.merged_lexeme.word,
"pct_bgr": f"{pct_bgr*100:.1f}%",
}
)
return winners
|
[
"5107405+pmbaumgartner@users.noreply.github.com"
] |
5107405+pmbaumgartner@users.noreply.github.com
|
7516d8abf2b927b3b25da547626b6cd3d7f9db87
|
b019c47889d911b4008f62a05053abc31b4c8ef9
|
/trax/tf_numpy/numpy/array_methods.py
|
f52240f5691dcde83037bbe637f9ec6e5270dec7
|
[
"Apache-2.0"
] |
permissive
|
pkol/trax
|
c70e014ff777e959f3ef1226ad0272970bc69afb
|
4dda0a5079b48ca8c325164884e677a87a0e3b37
|
refs/heads/master
| 2020-09-12T13:45:31.767971
| 2020-03-30T12:02:46
| 2020-03-30T12:02:46
| 222,443,710
| 0
| 0
|
Apache-2.0
| 2019-11-18T12:26:32
| 2019-11-18T12:26:31
| null |
UTF-8
|
Python
| false
| false
| 25,429
|
py
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common array methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import six
import tensorflow.compat.v2 as tf
from trax.tf_numpy.numpy import array_creation
from trax.tf_numpy.numpy import array_manipulation
from trax.tf_numpy.numpy import arrays
from trax.tf_numpy.numpy import dtypes
from trax.tf_numpy.numpy import utils
def all(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
"""Whether all array elements or those along an axis evaluate to true.
Casts the array to bool type if it is not already and uses `tf.reduce_all` to
compute the result.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Could be an int or a tuple of integers. If not specified,
the reduction is performed over all array indices.
keepdims: If true, retains reduced dimensions with length 1.
Returns:
An ndarray. Note that unlike NumPy this does not return a scalar bool if
`axis` is None.
"""
a = array_creation.asarray(a, dtype=bool)
return utils.tensor_to_ndarray(
tf.reduce_all(input_tensor=a.data, axis=axis, keepdims=keepdims))
def any(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
"""Whether any element in the entire array or in an axis evaluates to true.
Casts the array to bool type if it is not already and uses `tf.reduce_any` to
compute the result.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Could be an int or a tuple of integers. If not specified,
the reduction is performed over all array indices.
keepdims: If true, retains reduced dimensions with length 1.
Returns:
An ndarray. Note that unlike NumPy this does not return a scalar bool if
`axis` is None.
"""
a = array_creation.asarray(a, dtype=bool)
return utils.tensor_to_ndarray(
tf.reduce_any(input_tensor=a.data, axis=axis, keepdims=keepdims))
def argmax(a, axis=None):
"""Returns the indices of the maximum values along an array axis.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. The axis along which to compute argmax. If None, index of
the max element in the flattened array is returned.
Returns:
An ndarray with the same shape as `a` with `axis` removed if not None.
If `axis` is None, a scalar array is returned.
"""
a = array_creation.asarray(a)
if axis is None or utils.isscalar(a):
# When axis is None or the array is a scalar, numpy flattens the array.
a_t = tf.reshape(a.data, [-1])
else:
a_t = a.data
return utils.tensor_to_ndarray(tf.argmax(input=a_t, axis=axis))
def argmin(a, axis=None):
"""Returns the indices of the minimum values along an array axis.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. The axis along which to compute argmin. If None, index of
the min element in the flattened array is returned.
Returns:
An ndarray with the same shape as `a` with `axis` removed if not None.
If `axis` is None, a scalar array is returned.
"""
a = array_creation.asarray(a)
if axis is None or utils.isscalar(a):
# When axis is None or the array is a scalar, numpy flattens the array.
a_t = tf.reshape(a.data, [-1])
else:
a_t = a.data
return utils.tensor_to_ndarray(tf.argmin(input=a_t, axis=axis))
def clip(a, a_min=None, a_max=None):
"""Clips array values to lie within a given range.
Uses `tf.clip_by_value`.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
a_min: array_like. Must be a scalar or a shape that can be broadcast to
`a.shape`. At least one of `a_min` or `a_max` should be non-None.
a_max: array_like. Must be a scalar or a shape that can be broadcast to
`a.shape`. At least one of `a_min` or `a_max` should be non-None.
Returns:
An ndarray with trimmed values with the same shape and dtype as `a`.
Raises:
ValueError: if both a_min and a_max are None.
"""
if a_min is None and a_max is None:
raise ValueError('Both a_min and a_max cannot be None.')
a = array_creation.asarray(a)
# Unlike np.clip, tf.clip_by_value requires both min and max values to be
# specified so we set them to the smallest/largest values of the array dtype.
if a_min is None:
a_min = np.iinfo(a.dtype).min
if a_max is None:
a_max = np.iinfo(a.dtype).max
a_min = array_creation.asarray(a_min, dtype=a.dtype)
a_max = array_creation.asarray(a_max, dtype=a.dtype)
return utils.tensor_to_ndarray(
tf.clip_by_value(a.data, a_min.data, a_max.data))
def compress(condition, a, axis=None):
"""Compresses `a` by selecting values along `axis` with `condition` true.
Uses `tf.boolean_mask`.
Args:
condition: 1-d array of bools. If `condition` is shorter than the array
axis (or the flattened array if axis is None), it is padded with False.
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Axis along which to select elements. If None, `condition` is
applied on flattened array.
Returns:
An ndarray.
Raises:
ValueError: if `condition` is not of rank 1.
"""
condition = array_creation.asarray(condition, dtype=bool)
a = array_creation.asarray(a)
if condition.ndim != 1:
raise ValueError('condition must be a 1-d array.')
# `np.compress` treats scalars as 1-d arrays.
if a.ndim == 0:
a = ravel(a)
if axis is None:
a = ravel(a)
axis = 0
if axis < 0:
axis += a.ndim
assert axis >= 0 and axis < a.ndim
# `tf.boolean_mask` requires the first dimensions of array and condition to
# match. `np.compress` pads condition with False when it is shorter.
condition_t = condition.data
a_t = a.data
if condition.shape[0] < a.shape[axis]:
padding = tf.fill([a.shape[axis] - condition.shape[0]], False)
condition_t = tf.concat([condition_t, padding], axis=0)
return utils.tensor_to_ndarray(tf.boolean_mask(tensor=a_t, mask=condition_t,
axis=axis))
def copy(a):
"""Returns a copy of the array."""
return array_creation.array(a, copy=True)
def cumprod(a, axis=None, dtype=None):
"""Returns cumulative product of `a` along an axis or the flattened array.
Uses `tf.cumprod`.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Axis along which to compute products. If None, operation is
performed on the flattened array.
dtype: Optional. The type of the output array. If None, defaults to the
dtype of `a` unless `a` is an integer type with precision less than `int`
in which case the output type is `int.`
Returns:
An ndarray with the same number of elements as `a`. If `axis` is None, the
output is a 1-d array, else it has the same shape as `a`.
"""
a = array_creation.asarray(a, dtype=dtype)
if dtype is None and tf.as_dtype(a.dtype).is_integer:
# If a is an integer type and its precision is less than that of `int`,
# the output type will be `int`.
output_type = np.promote_types(a.dtype, int)
if output_type != a.dtype:
a = array_creation.asarray(a, dtype=output_type)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
if axis < 0:
axis += a.ndim
assert axis >= 0 and axis < a.ndim
return utils.tensor_to_ndarray(tf.math.cumprod(a.data, axis))
def cumsum(a, axis=None, dtype=None):
"""Returns cumulative sum of `a` along an axis or the flattened array.
Uses `tf.cumsum`.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: Optional. Axis along which to compute sums. If None, operation is
performed on the flattened array.
dtype: Optional. The type of the output array. If None, defaults to the
dtype of `a` unless `a` is an integer type with precision less than `int`
in which case the output type is `int.`
Returns:
An ndarray with the same number of elements as `a`. If `axis` is None, the
output is a 1-d array, else it has the same shape as `a`.
"""
a = array_creation.asarray(a, dtype=dtype)
if dtype is None and tf.as_dtype(a.dtype).is_integer:
# If a is an integer type and its precision is less than that of `int`,
# the output type will be `int`.
output_type = np.promote_types(a.dtype, int)
if output_type != a.dtype:
a = array_creation.asarray(a, dtype=output_type)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
if axis < 0:
axis += a.ndim
assert axis >= 0 and axis < a.ndim
return utils.tensor_to_ndarray(tf.cumsum(a.data, axis))
def imag(a):
"""Returns imaginary parts of all elements in `a`.
Uses `tf.imag`.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
Returns:
An ndarray with the same shape as `a`.
"""
a = array_creation.asarray(a)
# TODO(srbs): np.imag returns a scalar if a is a scalar, whereas we always
# return an ndarray.
return utils.tensor_to_ndarray(tf.math.imag(a.data))
_TO_INT64 = 0
_TO_FLOAT = 1
def _reduce(tf_fn, a, axis=None, dtype=None, keepdims=None,
promote_int=_TO_INT64, tf_bool_fn=None, preserve_bool=False):
"""A general reduction function.
Args:
tf_fn: the TF reduction function.
a: the array to be reduced.
axis: (optional) the axis along which to do the reduction. If None, all
dimensions are reduced.
dtype: (optional) the dtype of the result.
keepdims: (optional) whether to keep the reduced dimension(s).
promote_int: how to promote integer and bool inputs. There are three
choices: (1) _TO_INT64: always promote them to int64 or uint64; (2)
_TO_FLOAT: always promote them to a float type (determined by
dtypes.default_float_type); (3) None: don't promote.
tf_bool_fn: (optional) the TF reduction function for bool inputs. It
will only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s
dtype is `np.bool_` and `preserve_bool` is True.
preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype
is `np.bool_` (some reductions such as np.sum convert bools to
integers, while others such as np.max preserve bools.
Returns:
An ndarray.
"""
if dtype:
dtype = utils.result_type(dtype)
if keepdims is None:
keepdims = False
a = array_creation.asarray(a, dtype=dtype)
if ((dtype == np.bool_ or preserve_bool and a.dtype == np.bool_)
and tf_bool_fn is not None):
return utils.tensor_to_ndarray(
tf_bool_fn(input_tensor=a.data, axis=axis, keepdims=keepdims))
if dtype is None:
dtype = a.dtype
if np.issubdtype(dtype, np.integer) or dtype == np.bool_:
if promote_int == _TO_INT64:
# If a is an integer/bool type and whose bit width is less than 64,
# numpy up-casts it to 64-bit.
if dtype == np.bool_:
is_signed = True
width = 8 # We can use any number here that is less than 64
else:
is_signed = np.issubdtype(dtype, np.signedinteger)
width = np.iinfo(dtype).bits
if width < 64:
if is_signed:
dtype = np.int64
else:
dtype = np.uint64
a = a.astype(dtype)
elif promote_int == _TO_FLOAT:
a = a.astype(dtypes.default_float_type())
return utils.tensor_to_ndarray(
tf_fn(input_tensor=a.data, axis=axis, keepdims=keepdims))
@utils.np_doc(np.sum)
def sum(a, axis=None, dtype=None, keepdims=None): # pylint: disable=redefined-builtin
return _reduce(tf.reduce_sum, a, axis=axis, dtype=dtype, keepdims=keepdims,
tf_bool_fn=tf.reduce_any)
@utils.np_doc(np.prod)
def prod(a, axis=None, dtype=None, keepdims=None):
return _reduce(tf.reduce_prod, a, axis=axis, dtype=dtype, keepdims=keepdims,
tf_bool_fn=tf.reduce_all)
@utils.np_doc(np.mean)
def mean(a, axis=None, dtype=None, keepdims=None):
return _reduce(tf.math.reduce_mean, a, axis=axis, dtype=dtype,
keepdims=keepdims, promote_int=_TO_FLOAT)
@utils.np_doc(np.amax)
def amax(a, axis=None, keepdims=None):
return _reduce(tf.reduce_max, a, axis=axis, dtype=None, keepdims=keepdims,
promote_int=None, tf_bool_fn=tf.reduce_any, preserve_bool=True)
@utils.np_doc(np.amin)
def amin(a, axis=None, keepdims=None):
return _reduce(tf.reduce_min, a, axis=axis, dtype=None, keepdims=keepdims,
promote_int=None, tf_bool_fn=tf.reduce_all, preserve_bool=True)
@utils.np_doc(np.var)
def var(a, axis=None, keepdims=None):
return _reduce(tf.math.reduce_variance, a, axis=axis, dtype=None,
keepdims=keepdims, promote_int=_TO_FLOAT)
@utils.np_doc(np.std)
def std(a, axis=None, keepdims=None):
return _reduce(tf.math.reduce_std, a, axis=axis, dtype=None,
keepdims=keepdims, promote_int=_TO_FLOAT)
def ravel(a):
"""Flattens `a` into a 1-d array.
If `a` is already a 1-d ndarray it is returned as is.
Uses `tf.reshape`.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
Returns:
A 1-d ndarray.
"""
a = array_creation.asarray(a)
if a.ndim == 1:
return a
return utils.tensor_to_ndarray(tf.reshape(a.data, [-1]))
def real(val):
"""Returns real parts of all elements in `a`.
Uses `tf.real`.
Args:
val: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
Returns:
An ndarray with the same shape as `a`.
"""
val = array_creation.asarray(val)
# TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always
# return an ndarray.
return utils.tensor_to_ndarray(tf.math.real(val.data))
@utils.np_doc(np.repeat)
def repeat(a, repeats, axis=None):
a = array_creation.asarray(a).data
repeats = array_creation.asarray(repeats).data
return utils.tensor_to_ndarray(tf.repeat(a, repeats, axis))
def around(a, decimals=0):
"""Rounds each array element to the specified number of decimals.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
decimals: Optional, defaults to 0. The number of decimal places to round to.
Could be negative.
Returns:
An ndarray.
"""
a = array_creation.asarray(a)
factor = math.pow(10, decimals)
a_t = tf.multiply(a.data, factor)
a_t = tf.round(a_t)
a_t = tf.math.divide(a_t, factor)
return utils.tensor_to_ndarray(a_t)
def reshape(a, newshape):
"""Reshapes an array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
newshape: 0-d or 1-d array_like.
Returns:
An ndarray with the contents and dtype of `a` and shape `newshape`.
"""
a = array_creation.asarray(a)
if isinstance(newshape, arrays.ndarray):
newshape = newshape.data
return utils.tensor_to_ndarray(tf.reshape(a.data, newshape))
def expand_dims(a, axis):
"""Expand the shape of an array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: int. axis on which to expand the shape.
Returns:
An ndarray with the contents and dtype of `a` and shape expanded on axis.
"""
a = array_creation.asarray(a)
return utils.tensor_to_ndarray(tf.expand_dims(a.data, axis=axis))
def squeeze(a, axis=None):
"""Removes single-element axes from the array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axis: scalar or list/tuple of ints.
TODO(srbs): tf.squeeze throws error when axis is a Tensor eager execution
is enabled. So we cannot allow axis to be array_like here. Fix.
Returns:
An ndarray.
"""
a = array_creation.asarray(a)
return utils.tensor_to_ndarray(tf.squeeze(a, axis))
def transpose(a, axes=None):
"""Permutes dimensions of the array.
Args:
a: array_like. Could be an ndarray, a Tensor or any object that can
be converted to a Tensor using `tf.convert_to_tensor`.
axes: array_like. A list of ints with length rank(a) or None specifying the
order of permutation. The i'th dimension of the output array corresponds
to axes[i]'th dimension of the `a`. If None, the axes are reversed.
Returns:
An ndarray.
"""
a = array_creation.asarray(a)
if axes is not None:
axes = array_creation.asarray(axes)
return utils.tensor_to_ndarray(tf.transpose(a=a.data, perm=axes))
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Args:
a: array_like. Input array.
axis1: int. First axis.
axis2: int. Second axis.
Returns:
An ndarray.
"""
a = array_creation.asarray(a)
# TODO(wangpeng): handling partial shapes with unknown ranks
n = len(a.shape)
if not (-n <= axis1 and axis1 < n):
raise ValueError('axis1 must be in range [-%s, %s); got %s' % (n, n, axis1))
if not (-n <= axis2 and axis2 < n):
raise ValueError('axis2 must be in range [-%s, %s); got %s' % (n, n, axis2))
if axis1 < 0:
axis1 += n
if axis2 < 0:
axis2 += n
perm = list(range(n))
perm[axis1] = axis2
perm[axis2] = axis1
return transpose(a, perm)
def _setitem(arr, index, value):
"""Sets the `value` at `index` in the array `arr`.
This works by replacing the slice at `index` in the tensor with `value`.
Since tensors are immutable, this builds a new tensor using the `tf.concat`
op. Currently, only 0-d and 1-d indices are supported.
Note that this may break gradients e.g.
a = tf_np.array([1, 2, 3])
old_a_t = a.data
with tf.GradientTape(persistent=True) as g:
g.watch(a.data)
b = a * 2
a[0] = 5
g.gradient(b.data, [a.data]) # [None]
g.gradient(b.data, [old_a_t]) # [[2., 2., 2.]]
Here `d_b / d_a` is `[None]` since a.data no longer points to the same
tensor.
Args:
arr: array_like.
index: scalar or 1-d integer array.
value: value to set at index.
Returns:
ndarray
Raises:
ValueError: if `index` is not a scalar or 1-d array.
"""
# TODO(srbs): Figure out a solution to the gradient problem.
arr = array_creation.asarray(arr)
index = array_creation.asarray(index)
if index.ndim == 0:
index = ravel(index)
elif index.ndim > 1:
raise ValueError('index must be a scalar or a 1-d array.')
value = array_creation.asarray(value, dtype=arr.dtype)
if arr.shape[len(index):] != value.shape:
value = array_manipulation.broadcast_to(value, arr.shape[len(index):])
prefix_t = arr.data[:index.data[0]]
postfix_t = arr.data[index.data[0] + 1:]
if len(index) == 1:
arr._data = tf.concat( # pylint: disable=protected-access
[prefix_t, tf.expand_dims(value.data, 0), postfix_t], 0)
else:
subarray = arr[index.data[0]]
_setitem(subarray, index[1:], value)
arr._data = tf.concat( # pylint: disable=protected-access
[prefix_t, tf.expand_dims(subarray.data, 0), postfix_t], 0)
setattr(arrays.ndarray, 'transpose', transpose)
setattr(arrays.ndarray, 'reshape', reshape)
setattr(arrays.ndarray, '__setitem__', _setitem)
def pad(array, pad_width, mode, constant_values=0):
"""Pads an array.
Args:
array: array_like of rank N. Input array.
pad_width: {sequence, array_like, int}.
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode: string. One of the following string values:
'constant'
Pads with a constant value.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
**NOTE**: The supported list of `mode` does not match that of numpy's.
constant_values: scalar with same dtype as `array`.
Used in 'constant' mode as the pad value. Default is 0.
Returns:
An ndarray padded array of rank equal to `array` with shape increased
according to `pad_width`.
Raises:
ValueError if `mode` is not supported.
"""
if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'):
raise ValueError('Unsupported padding mode: ' + mode)
mode = mode.upper()
array = array_creation.asarray(array)
pad_width = array_creation.asarray(pad_width, dtype=tf.int32)
return utils.tensor_to_ndarray(tf.pad(
tensor=array.data, paddings=pad_width.data, mode=mode,
constant_values=constant_values))
def take(a, indices, axis=None):
"""Take elements from an array along an axis.
See https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html for
description.
Args:
a: array_like. The source array.
indices: array_like. The indices of the values to extract.
axis: int, optional. The axis over which to select values. By default, the
flattened input array is used.
Returns:
A ndarray. The returned array has the same type as `a`.
"""
a = array_creation.asarray(a)
indices = array_creation.asarray(indices)
a = a.data
if axis is None:
a = tf.reshape(a, [-1])
axis = 0
return utils.tensor_to_ndarray(tf.gather(a, indices.data, axis=axis))
def where(condition, x, y):
"""Return an array with elements from `x` or `y`, depending on condition.
Args:
condition: array_like, bool. Where True, yield `x`, otherwise yield `y`.
x: see below.
y: array_like, optional. Values from which to choose. `x`, `y` and
`condition` need to be broadcastable to some shape.
Returns:
An array.
"""
condition = array_creation.asarray(condition, dtype=np.bool_)
x, y = array_creation._promote_dtype(x, y)
return utils.tensor_to_ndarray(tf.where(condition.data, x.data, y.data))
def shape(a):
"""Return the shape of an array.
Args:
a: array_like. Input array.
Returns:
Tuple of ints.
"""
a = array_creation.asarray(a)
return a.shape
def ndim(a):
a = array_creation.asarray(a)
return a.ndim
def isscalar(a):
return ndim(a) == 0
def _boundaries_to_sizes(a, boundaries, axis):
"""Converting boundaries of splits to sizes of splits.
Args:
a: the array to be split.
boundaries: the boundaries, as in np.split.
axis: the axis along which to split.
Returns:
A list of sizes of the splits, as in tf.split.
"""
if axis >= len(a.shape):
raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))
total_size = a.shape[axis]
sizes = []
sizes_sum = 0
prev = 0
for i, b in enumerate(boundaries):
size = b - prev
if size < 0:
raise ValueError('The %s-th boundary %s is smaller than the previous '
'boundary %s' % (i, b, prev))
size = min(size, max(0, total_size - sizes_sum))
sizes.append(size)
sizes_sum += size
prev = b
sizes.append(max(0, total_size - sizes_sum))
return sizes
def split(a, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
See https://docs.scipy.org/doc/numpy/reference/generated/numpy.split.html for
reference.
Args:
a: the array to be splitted.
indices_or_sections: int or 1-D array, representing the number of even
splits or the boundaries between splits.
axis: the axis along which to split.
Returns:
A list of sub-arrays.
"""
a = array_creation.asarray(a)
if not isinstance(indices_or_sections, six.integer_types):
indices_or_sections = _boundaries_to_sizes(a, indices_or_sections, axis)
result = tf.split(a.data, indices_or_sections, axis=axis)
return [utils.tensor_to_ndarray(a) for a in result]
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
8b745fc24590730af1bfcea38487baaebdeea983
|
9ea977520ab7dd032a12a8bc83609cce4c33f29b
|
/ass3/2015csz8044/posRun.py
|
792a385e56440ff797b74eaf139b0c04de4d8329
|
[] |
no_license
|
neelamadhav/graphical
|
7021a1c866b996945b5a4593b95b0581ccb7d170
|
4af576907c43cca80a33761fa01f5552fe29ca8d
|
refs/heads/master
| 2021-01-10T07:05:58.789877
| 2015-11-14T04:02:25
| 2015-11-14T04:02:25
| 44,488,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,363
|
py
|
import string
stopwords = ["after", "afterwards", "again", "against", "ago", "ah", "ahead", "ain't", "all", "allow", "allows", "almost", "alone", "along", "alongside", "already", "also", "although", "always", "am", "amid", "amidst", "among", "amongst", "amoungst", "amount", "an", "and", "announce", "another", "any", "anybody", "anyhow", "anymore", "anyone", "anything", "anyway", "anyways", "anywhere", "apart", "apparently", "appear", "appreciate", "appropriate", "approximately", "are", "aren", "arent", "aren't", "arise", "around", "as", "a's", "aside", "ask", "asking", "associated", "at", "auth", "available", "away", "awfully", "b", "back", "backward", "backwards", "be", "became", "because", "become", "becomes", "becoming", "been", "before", "beforehand", "begin", "beginning", "beginnings", "begins", "behind", "being", "believe", "below", "beside", "besides", "best", "better", "between", "beyond", "bill", "biol", "both", "bottom", "brief", "briefly", "but", "by", "c", "ca", "call", "came", "can", "cannot", "cant", "can't", "caption", "cause", "causes", "certain", "certainly", "changes", "clearly", "c'mon", "co", "co.", "com", "come", "comes", "computer", "con", "concerning", "consequently", "consider", "considering", "contain", "containing", "contains", "corresponding", "could", "couldnt", "couldn't", "course", "cry", "c's", "currently", "d", "dare", "daren't", "date", "de", "definitely", "describe", "described", "despite", "detail", "did", "didn't", "different", "directly", "do", "does", "doesn't", "doing", "done", "don't", "down", "downwards", "due", "during", "e", "each", "ed", "edu", "effect", "eg", "eight", "eighty", "either", "eleven", "else", "elsewhere", "empty", "end", "ending", "enough", "entirely", "especially", "et", "et-al", "etc", "even", "ever", "evermore", "every", "everybody", "everyone", "everything", "everywhere", "ex", "exactly", "example", "except", "f", "fairly", "far", "farther", "few", "fewer", "ff", "fifteen", "fifth", "fify", "fill", "find", "fire", "first", "five", "fix", "followed", "following", "follows", "for", "forever", "former", "formerly", "forth", "forty", "forward", "four", "from", "front", "full", "further", "furthermore", "g", "gave", "get", "gets", "getting", "give", "given", "gives", "giving", "go", "goes", "going", "gone", "got", "gotten", "greetings", "h", "had", "hadn't", "half", "happens", "hardly", "has", "hasnt", "hasn't", "have", "not have", "haven't", "having", "he", "hed", "he'd", "he'll", "hello", "help", "hence", "her", "here", "hereafter", "hereby", "herein", "heres", "here's", "hereupon", "hers", "herse", "herself", "hes", "he's", "hi", "hid", "him", "himse", "himself", "his", "hither", "home", "hopefully", "how", "howbeit", "however", "how's", "hundred", "i", "I", "id", "i'd", "ie", "if", "ignored", "i'll", "im", "i'm", "immediate", "immediately", "importance", "important", "in", "inasmuch", "inc", "inc.", "indeed", "index", "indicate", "indicated", "indicates", "information", "inner", "inside", "insofar", "instead", "interest", "into", "invention", "inward", "is", "isn't", "it", "itd", "it'd", "it'll", "its", "it's", "itse", "itself", "i've", "j", "just", "k", "keep", "keeps", "kept", "keys", "kg", "km", "know", "known", "knows", "l", "largely", "last", "lately", "later", "latter", "latterly", "least", "less", "lest", "let", "lets", "let's", "like", "liked", "likely", "likewise", "line", "little", "'ll", "look", "looking", "looks", "low", "lower", "ltd", "m", "made", "mainly", "make", "makes", "many", "may", "maybe", "mayn't", "me", "mean", "means", "meantime", "meanwhile", "merely", "mg", "might", "mightn't", "mill", "million", "mine", "minus", "miss", "ml", "more", "moreover", "most", "mostly", "move", "mr", "mrs", "much", "mug", "must", "mustn't", "my", "myse", "myself", "n", "na", "name", "namely", "nay", "nd", "near", "nearly", "necessarily", "necessary", "need", "needn't", "needs", "neither", "never", "neverf", "neverless", "nevertheless", "new", "next", "nine", "ninety", "no", "nobody", "non", "none", "nonetheless", "noone", "no-one", "nor", "normally", "nos", "not", "noted", "nothing", "not in", "notwithstanding", "novel", "now", "nowhere", "o", "obtain", "obtained", "obviously", "of", "off", "often", "oh", "ok", "okay", "old", "omitted", "on", "once", "one", "ones", "one's", "only", "onto", "opposite", "or", "ord", "other", "others", "otherwise", "ought", "oughtn't", "our", "ours ", "ours", "ourselves", "out", "outside", "over", "overall", "owing", "own", "p", "page", "pages", "part", "particular", "particularly", "past", "per", "perhaps", "placed", "please", "plus", "poorly", "possible", "possibly", "potentially", "pp", "predominantly", "present", "presumably", "previously", "primarily", "probably", "promptly", "proud", "provided", "provides", "put", "q", "que", "quickly", "quite", "qv", "r", "ran", "rather", "rd", "re", "readily", "really", "reasonably", "recent", "recently", "ref", "refs", "regarding", "regardless", "regards", "related", "relatively", "research", "respectively", "resulted", "resulting", "results", "right", "round", "run", "s", "said", "same", "saw", "say", "saying", "says", "sec", "second", "secondly", "section", "see", "seeing", "seem", "seemed", "seeming", "seems", "seen", "self", "selves", "sensible", "sent", "serious", "seriously", "seven", "several", "shall", "shan't", "she", "shed", "she'd", "she'll", "shes", "she's", "should", "shouldn't", "show", "showed", "shown", "showns", "shows", "side", "significant", "significantly", "similar", "similarly", "since", "sincere", "six", "sixty", "slightly", "so", "some", "somebody", "someday", "somehow", "someone", "somethan", "something", "sometime", "sometimes", "somewhat", "somewhere", "soon", "sorry", "specifically", "specified", "specify", "specifying", "state", "states", "still", "stop", "strongly", "sub", "substantially", "successfully", "such", "sufficiently", "suggest", "sup", "sure", "system", "t", "take", "taken", "taking", "tell", "ten", "tends", "th", "than", "thank", "thanks", "thanx", "that", "that'll", "thats", "that's", "that've", "the", "their", "theirs", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "thered", "there'd", "therefore", "therein", "there'll", "thereof", "therere", "there're", "theres","there's", "thereto", "thereupon", "there've", "these", "they", "theyd", "they'd", "they'll", "theyre", "they're", "they've", "thick", "thickv", "thin", "thing", "things", "think", "third", "thirty", "this", "thorough", "thoroughly", "those", "thou", "though", "thoughh", "thousand", "three", "throug", "through", "throughout", "thru", "thus", "til", "till", "tip", "to", "together", "too", "took", "top", "toward", "towards", "tried", "tries", "truly", "try", "trying", "ts", "t's", "twelve", "twenty", "twice", "two", "u", "un", "under", "underneath", "undoing", "unfortunately", "unless", "unlike", "unlikely", "until", "unto", "up", "upon", "ups", "upwards", "us", "use", "used", "useful", "usefully", "usefulness", "uses", "using", "usually", "uucp", "v", "value", "various", "'ve", "versus", "very", "via", "viz", "vol", "vols", "vs", "w", "want", "wants", "was", "wasn't", "way", "we", "wed", "we'd", "welcome", "well", "we'll", "went", "were", "we're", "weren't", "we've", "what", "whatever", "what'll", "whats", "what's", "what've", "when", "whence", "whenever", "when's", "where", "whereafter", "whereas", "whereby", "wherein", "wheres", "where's", "whereupon", "wherever", "whether", "which", "whichever", "while", "whilst", "whim", "whither", "who", "whod", "who'd", "whoever", "whole", "who'll", "whom", "whomever", "whos", "who's", "whose", "why", "why's", "widely", "will", "willing", "wish", "with", "within", "without", "wonder", "won't", "words", "world", "would", "wouldn't", "www", "x", "y", "yes", "yet", "you", "youd", "you'd", "you'll", "your", "youre", "you're", "yours", "yourself", "yourselves", "you've", "z", "zero"]
invalidChars = set(string.punctuation)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
out = open('temp.pos.input.feature', 'w')
for line in open('temp.pos.input').readlines():
line = line.strip()
if line == '':
out.write('\n');
continue
line = line.split(' ')
word = line[0]
outline = word
capital = not word.islower() and not word.isupper()
allCapital = word.isupper()
numerical = is_number(word)
mention = word.startswith('@')
hashtag = word.startswith('#')
url = word.startswith('http')
apostrophe = False
if word.find("'") > -1 or word.find('"') > -1:
apostrophe = True
specialChar = word in invalidChars
stopword = word.strip().lower() in stopwords
if specialChar:
outline = outline + ' SPECIALCHAR'
if not numerical and not specialChar and capital:
outline = outline + ' CAPITALIZED'
if allCapital:
outline = outline + ' ALLCAPITALIZED'
if numerical:
outline = outline + ' NUMERICAL'
if mention:
outline = outline + ' MENTION'
if hashtag:
outline = outline + ' HASHTAG'
if url:
outline = outline + ' URL'
if apostrophe:
outline = outline + ' APOSTROPHE'
if stopword:
outline = outline + ' STOPWORD'
outline = outline +'\n'
out.write(outline)
out.close()
|
[
"neelamadhavg@gmail.com"
] |
neelamadhavg@gmail.com
|
0f94d084520c7500d5f40912ad474da7ef444f69
|
e54fb4602d884952935c33ea93e345bb8b9424eb
|
/easy_tries/setup.py
|
ad2a8587f9ac6600eabfff57c57056e2069fe25a
|
[] |
no_license
|
akaashhazarika/easy_tries
|
3094694d7e0cb1047658692889b3cb846360664a
|
747fba6ff6d9d88fdd559aec42a88fc506d8a99c
|
refs/heads/master
| 2020-06-20T07:47:39.578744
| 2019-07-15T18:15:16
| 2019-07-15T18:15:16
| 197,048,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
from setuptools import setup
setup(name='easy_tries',
version='0.1',
description='Python Implementation of Tries for search and Auto Complete',
url='http://github.com/akaashhazarika/easy_tries',
author='Akaash Hazarika',
author_email='akaashhazarika@gmail.com',
license='MIT',
packages=['easy_tries'],
zip_safe=False)
|
[
"akaashhazarika@gmail.com"
] |
akaashhazarika@gmail.com
|
f3bd2204373b7afe9536e2ef7ad99941600b2053
|
b6be68fd512b7cec64577ef515321f7caf311cb6
|
/game.py
|
7a8b8a541d9e2f015f585e70def52df0f8c3aa04
|
[] |
no_license
|
lukabombala/birdgame
|
c9e11a993bf8bf5a32d419228a22cfa43b9dd7c9
|
249208f71a7b953c9377d8883c2096b49483fa71
|
refs/heads/master
| 2021-06-23T19:58:31.238053
| 2019-10-23T18:40:20
| 2019-10-23T18:40:20
| 216,393,314
| 0
| 0
| null | 2021-04-20T18:47:58
| 2019-10-20T16:38:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
import sys
from collections import defaultdict
import pygame
import colors
import config as c
class Game:
def __init__(self,
caption,
width,
height,
background_image_filename,
frame_rate,):
self.background_image = pygame.image.load(background_image_filename)
self.frame_rate = frame_rate
self.game_over = False
self.objects = []
pygame.init()
pygame.font.init()
self.surface = pygame.display.set_mode((width, height))
pygame.display.set_caption(caption)
self.clock = pygame.time.Clock()
self.keydown_handlers = defaultdict(list)
self.keyup_handlers = defaultdict(list)
self.mouse_handlers = []
def update(self):
for obj in self.objects:
obj.update()
def draw(self):
for obj in self.objects:
obj.draw(self.surface)
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
for handler in self.keydown_handlers[event.key]:
handler(event.key)
elif event.type == pygame.KEYUP:
for handler in self.keyup_handlers[event.key]:
handler(event.key)
elif event.type in (pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP,
pygame.MOUSEMOTION):
for handler in self.mouse_handlers:
handler(event.type, event.pos)
def run(self):
while not self.game_over:
self.surface.blit(self.background_image, (0, 0))
pygame.draw.rect(self.surface, colors.SKY, [0, 0, c.SCREEN_WIDTH, c.ground_level])
pygame.draw.rect(self.surface, colors.GROUND_COLOR, [0,
c.ground_level,
c.SCREEN_WIDTH,
c.SCREEN_HEIGHT - c.ground_level])
self.handle_events()
self.update()
self.draw()
pygame.display.update()
self.clock.tick(self.frame_rate)
|
[
"lukabombala@gmail.com"
] |
lukabombala@gmail.com
|
7daa4f77d597e2b23f4f0a8015010ae7691443b4
|
3dcfe383c36d2c2af057b7128ad86f3a01554faa
|
/label_studio/data_manager/prepare_params.py
|
c0e902f7013616059e5c7e8d9afe30f1e3b580a4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ieso/label-studio
|
7306faa87894d1a72a6a33b9a316ccdfc4e80684
|
b3b451351e1751455f60cdf718c8e25914938a8a
|
refs/heads/master
| 2023-08-21T20:08:06.199925
| 2021-10-28T14:37:07
| 2021-10-28T14:37:07
| 359,428,836
| 0
| 0
|
Apache-2.0
| 2021-04-19T11:07:15
| 2021-04-19T11:07:13
| null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
from enum import Enum
from typing import List, Optional, Union
from pydantic import BaseModel, StrictInt, StrictFloat, StrictStr, StrictBool
class FilterIn(BaseModel):
min: Union[StrictInt, StrictFloat, StrictStr]
max: Union[StrictInt, StrictFloat, StrictStr]
class Filter(BaseModel):
filter: str
operator: str
type: str
value: Union[StrictInt, StrictFloat, StrictBool, StrictStr, FilterIn]
class ConjunctionEnum(Enum):
OR = 'or'
AND = 'and'
class Filters(BaseModel):
conjunction: ConjunctionEnum
items: List[Filter]
class SelectedItems(BaseModel):
all: bool
included: List[int] = []
excluded: List[int] = []
class PrepareParams(BaseModel):
project: int
ordering: List[str] = []
selectedItems: Optional[SelectedItems] = None
filters: Optional[Filters] = None
data: Optional[dict] = None
|
[
"noreply@github.com"
] |
ieso.noreply@github.com
|
266a781fba696f660205890b24d0f48ff854d213
|
2a957611907cad466da49f09aa16c61baa6a009b
|
/python/attacks/RSA/lsb_client.py
|
9f0def542438d810c311ac85258f9dd5c6f2fca8
|
[] |
no_license
|
alessandroguggino/Cryptography
|
6dea1552b988c7dd4507936dd53de9678e170bf0
|
ef5fa1c0fbfc0bcd16293b56b182fe1a92b80edc
|
refs/heads/master
| 2023-07-13T17:42:35.123088
| 2021-08-19T13:08:20
| 2021-08-19T13:08:20
| 276,401,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
from Crypto.PublicKey import RSA
from pwn import *
import os
os.environ['PWNLIB_NOTERM'] = 'True' # Configuration patch to allow pwntools to be run inside of an IDE
os.environ['PWNLIB_SILENT'] = 'True'
from mysecrets import HOST,PORT
from mysecrets import lsb_n as n, lsb_e as e
from mysecrets import lsb_ciphertext as ciphertext
from mysecrets import lsb_plaintext
def to_bytes(m,l=n.bit_length()):
return int.to_bytes(m, l, byteorder='big')
def to_int(b):
return int.from_bytes(b,byteorder='big')
def print_bounds(low, up):
print("[" + str(low) + "," + str(up) + "]")
# test the connection
# server = remote(HOST, PORT)
# server.send(to_bytes(ciphertext))
# bit = server.recv(1024)
# print(bit)
# server.close()
# loop
lower_bound = 0
upper_bound = n
print_bounds(lower_bound, upper_bound)
k = pow(2, e, n) # 2^e mod n
c = ciphertext
for i in range(n.bit_length()):
c = (k * c) % n # c' = 2^e * m^e = (2m)^e
# interact with the LSB Oracle
server = remote(HOST, PORT)
server.send(to_bytes(c))
bit = server.recv(1024)
server.close()
#print(bit)
if bit[0] == 1: # 2m > n --> m is in [n/2,n]
lower_bound = (upper_bound+lower_bound) // 2
else: # 2m < n --> m is in [0, n/2]
upper_bound = (upper_bound+lower_bound) // 2
print_bounds(lower_bound, upper_bound)
print(to_bytes(lower_bound, n.bit_length()).decode())
print(to_bytes(upper_bound, n.bit_length()).decode())
# correction
print(lsb_plaintext - lower_bound)
final = lower_bound ^ 32
print(to_bytes(final, n.bit_length()).decode())
|
[
"noreply@github.com"
] |
alessandroguggino.noreply@github.com
|
0dcbadd58691ccd70835c98e2c3e1ec20f19ad96
|
44a220c7e3c022d7a403415fce80bcbd39c6b734
|
/testJCW/action/system/role/__init__.py
|
3b700e6d9372e2870f17a47e62c854ba63493195
|
[] |
no_license
|
zhuypy/AutoTest_UI
|
c078695fa7401503bdef9f83c373a2842442949e
|
7eee221d141b6907627bc208088407c2925bb17b
|
refs/heads/master
| 2022-04-19T23:34:59.898972
| 2020-04-16T03:19:40
| 2020-04-16T03:19:40
| 256,086,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 103
|
py
|
# -*- coding:utf-8 -*-
'''
@File : __init__.py.py
@Author :
@Date : 2019/6/5 14:48
@Desc :
'''
|
[
"1007531447@qq.com"
] |
1007531447@qq.com
|
112a1b440ce085e0d4c6c05c5a44285efa8ab6d2
|
18a79067223932c2f7aa6ff6b81d0b3f36169db2
|
/codeforces/1512/E.py
|
e637a3e6e94ab8d1578276e88b1467c08879b483
|
[] |
no_license
|
aadiupadhyay/CodeForces
|
894b0e5faef73bfd55a28c2058fb0ca6f43c69f9
|
76dac4aa29a2ea50a89b3492387febf6515cf43e
|
refs/heads/master
| 2023-04-12T17:58:52.733861
| 2021-05-07T20:08:00
| 2021-05-11T20:07:11
| 330,149,645
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
# aadiupadhyay
import os.path
from math import gcd, floor, ceil
from collections import *
import sys
mod = 1000000007
INF = float('inf')
def st(): return list(sys.stdin.readline().strip())
def li(): return list(map(int, sys.stdin.readline().split()))
def mp(): return map(int, sys.stdin.readline().split())
def inp(): return int(sys.stdin.readline())
def pr(n): return sys.stdout.write(str(n)+"\n")
def prl(n): return sys.stdout.write(str(n)+" ")
if os.path.exists('input.txt'):
sys.stdin = open('input.txt', 'r')
sys.stdout = open('output.txt', 'w')
def solve():
n, a, b, total = mp()
s = set(range(1, n+1))
ele = b-a+1
if total < ele*(ele+1)//2:
pr(-1)
return
nsum = n*(n+1)//2
left = n-ele
leftsum = left*(left+1)//2
have = nsum-leftsum
if total > have:
pr(-1)
return
cur = []
i = n
while total:
w = total - i
if w >= ele*(ele-1)//2:
cur.append(i)
total -= i
s.discard(i)
ele -= 1
i -= 1
l = []
for i in range(a-1):
p = max(s)
s.discard(p)
l.append(p)
for i in range(b+1, n+1):
p = max(s)
s.discard(p)
cur.append(p)
l += cur
print(*l)
for _ in range(inp()):
solve()
|
[
"upadhyay.aaditya2001@gmail.com"
] |
upadhyay.aaditya2001@gmail.com
|
c689e220591bd622b0744eddaaea4c26b8a2cbf0
|
fdfffa8cacb572a157ead4a9723f90b25ecfe50c
|
/modules/ducktests/tests/ignitetest/services/utils/jmx_remote/jmx_remote_params.py
|
077f7c56c4923a67e1a9b10fef4531ecdd118dcd
|
[
"Apache-2.0",
"LicenseRef-scancode-gutenberg-2020",
"CC0-1.0",
"BSD-3-Clause"
] |
permissive
|
apache/ignite
|
0bc83435a8db46d9c4df000fe05b1c70165b37d4
|
dbf1c7825d74809cd6859c85a8ac9ed9ac071e39
|
refs/heads/master
| 2023-08-31T21:31:04.618489
| 2023-08-31T19:43:09
| 2023-08-31T19:43:09
| 31,006,158
| 4,806
| 2,308
|
Apache-2.0
| 2023-09-14T18:56:33
| 2015-02-19T08:00:05
|
Java
|
UTF-8
|
Python
| false
| false
| 1,940
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from typing import NamedTuple
ENABLED = "enabled"
JMX_REMOTE_KEY_NAME = "jmx_remote"
JMX_REMOTE_PORT_KEY_NAME = "port"
JMX_REMOTE_DEFAULT_PORT = 1098
class JmxRemoteParams(NamedTuple):
"""
Params for JMX Remote.
If enabled the Ignite node exposes JMX endpoint to non-local hosts via the provided port.
Port is optional. If omitted the JMX_REMOTE_DEFAULT_PORT is used.
"""
enabled: bool
port: int = JMX_REMOTE_DEFAULT_PORT
def get_jmx_remote_params(_globals: dict):
"""
Gets JMX Remote params from Globals. Format is like below (port field is optional):
{
"jmx_remote": {
"enabled": true
"port": 1098
}
}
:param _globals: Globals parameters
:return: instance of JmxRemoteParams
"""
if JMX_REMOTE_KEY_NAME in _globals and _globals[JMX_REMOTE_KEY_NAME].get(ENABLED, False):
return JmxRemoteParams(enabled=True,
port=_globals[JMX_REMOTE_KEY_NAME].get(JMX_REMOTE_PORT_KEY_NAME,
JMX_REMOTE_DEFAULT_PORT))
else:
return JmxRemoteParams(enabled=False)
|
[
"noreply@github.com"
] |
apache.noreply@github.com
|
a966845dc6ef285450d71ea59e2fc297c7b494fd
|
590935ad561449559989fce5a85fb8df691b898d
|
/manage.py
|
d6c7e0e96d0785bb7292da5f3051281363df378c
|
[] |
no_license
|
yuki-katayama/Django_bordproject_udemy
|
22aa5b2e6b4c00d389502dd1b193071290b7c68d
|
56b8ca8325e48dfb580ab0b7af2872317a6608f7
|
refs/heads/master
| 2022-12-13T13:11:23.555227
| 2020-09-13T03:10:26
| 2020-09-13T03:10:26
| 295,062,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bordproject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"katayu810@gmail.com"
] |
katayu810@gmail.com
|
7c8e6559b705deb86447c305af13ca3973998111
|
0f08ac954da42af79458685fc314e14426aa3f46
|
/djangevent/urls.py
|
8f5f01b5ca894b22b9fd033229b8240f8338fd9b
|
[] |
no_license
|
serkansokmen/djangevent
|
06fec64f8714e76e3125dc591e7ea411360f17d7
|
aec589d4685caf28898f813f444e9d9e6dd7fe56
|
refs/heads/master
| 2022-07-09T02:12:55.096261
| 2013-07-09T18:42:01
| 2013-07-09T18:42:01
| 11,156,782
| 0
| 0
| null | 2020-04-07T17:09:16
| 2013-07-03T17:19:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
from django.contrib import admin
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
admin.autodiscover()
# See: https://docs.djangoproject.com/en/dev/topics/http/urls/
urlpatterns = patterns(
'',
# Admin panel and documentation:
url(r'^admin/', include(admin.site.urls)),
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^api/', include('apps.events.urls')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
(r'^404/$', 'django.views.defaults.page_not_found'),
(r'^500/$', 'django.views.defaults.server_error'),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
if 'rosetta' in settings.INSTALLED_APPS:
urlpatterns += patterns(
'',
url(r'^rosetta/', include('rosetta.urls')),
)
|
[
"e.serkan.sokmen@gmail.com"
] |
e.serkan.sokmen@gmail.com
|
61aef98a7fed6c913b99e2514a19938460365174
|
c32a225b013190f047793a083006802d9e9306eb
|
/apidocs/ApiXML2Trac.py
|
95b909506088b7f30f0bfe03ecbf27603c990c98
|
[] |
no_license
|
coderbyheart/hsrm-mi-wtf
|
531312ef3c64801e86f63891a42e2a844fd3d103
|
6372aa9e5c308308198fc3cc859689fb23d5f858
|
refs/heads/master
| 2016-09-06T04:23:11.363670
| 2011-11-02T13:52:20
| 2011-11-02T13:52:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,007
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
'''Konvertiert die Schnittstellendefinition (XML) in eine Trac-Wiki-Seite
@author Markus Tacker <m@tacker.org>'''
from xml.dom.minidom import parse, parseString
from minidomutil import domGetText
def maskWikiPageNames(*strings):
ret = []
for string in strings:
if string == None:
continue
if not re.match('^[a-z]', string):
string = re.sub('([A-Z][a-z]+[A-Z][a-z]+)', "!\\1", str(string))
ret.append(string)
return ret[0] if len(ret) == 1 else tuple(ret)
class BaseType(object):
'Basisklasse für alle Typen'
todo = False
class SimpleType(BaseType):
'Definiert einene einfachen Datentypen'
def __repr__(self):
return 'SimpleType(%s)' % self.identifier
class IntegerType(SimpleType):
'Definiert einen Integer'
identifier = 'Integer'
class StringType(SimpleType):
'Definiert einen String'
identifier = 'String'
class BooleanType(SimpleType):
'Definiert einen Boolean'
identifier = 'Boolean'
class FloatType(SimpleType):
'Definiert einen Float'
identifier = 'Float'
class DateTimeType(StringType):
'Definiert ein Datum'
identifier = 'DateTime'
class BinaryType(SimpleType):
'Definiert Binärdaten'
identifier = 'binary'
class DictionaryType(SimpleType):
'Definiert Dictionaries'
identifier = 'Dictionary'
class ObjectType(SimpleType):
'Definiert Objekte'
identifier = 'Object'
class EnumTypeException(Exception):
'Exception für die Klasse EnumType'
class EnumType(BaseType):
'Definiert Enums'
def __init__(self, identifier):
self.identifier = identifier
self.values = {}
def addValue(self, key, description):
if key in self.values:
raise EnumTypeException("Value %s already defined" % key)
self.values[key] = description
class ComplexType(BaseType):
'Definiert einen komplexten Typen'
def __init__(self, type, identifier):
if type not in ('Object', 'Dictionary'):
raise ComplexTypeException('A complex type must be either Object or Dictionary')
self.isobject = type == 'Object'
self.identifier = identifier
def __repr__(self):
return 'ComplexType(%s)' % self.identifier
@property
def properties(self):
return self._properties
@properties.setter
def properties(self, props):
if len(props) == 0:
raise ComplexTypeException('Properties must not be empty')
self._properties = props
class ComplexTypeException(Exception):
'Exception für die Klasse Property'
class Property(object):
'Definiert eine Property'
islist = False
isoptional = False
isproperty = False
example = '-'
def __init__(self, identifier):
self.identifier = identifier
def __repr__(self):
return 'Property(%s)' % self.identifier
class ActionGroup(object):
'Definiert eine Gruppe von Actions, was einem Namespace entspricht'
todo = False
def __init__(self, identifier):
self.identifier = identifier
class Action(object):
'Definiert eine Action'
todo = False
def __init__(self, identifier, group):
self.identifier = identifier
self.group = group
class SchnittstellenXMLException(Exception):
'Exception für SchnittstellenXML'
class SchnittstellenXML(object):
'Repräsentiert die Schnittstellendefinition, die in einer XML-Datei abgelegt ist.'
def __init__(self, xmlfile):
self.dom = parse(xmlfile)
# Erzeugt ein Dictionary mit Typedefinitionen
self.types = {}
for item in self.dom.getElementsByTagName('simpletype'):
self.createSimpleType(item)
for item in self.dom.getElementsByTagName('enum'):
self.createEnumType(item)
for item in self.dom.getElementsByTagName('complextype'):
self.createComplexType(item)
def createSimpleType(self, item):
'Erzeugt aus einem XML Element einen einfachen Datentypen'
t = item.getAttribute('name')
if t == 'String':
type = StringType()
elif t == 'Float':
type = FloatType()
elif t == 'Integer':
type = IntegerType()
elif t == 'DateTime':
type = DateTimeType()
elif t == 'Boolean':
type = BooleanType()
elif t == 'binary':
type = BinaryType()
else:
raise SchnittstellenXMLException('Unknown type: %s' % t)
if type.identifier in self.types:
raise SchnittstellenXMLException('Already defined: %s' % type.identifier)
self.types[type.identifier] = type
type.description = domGetText(item.getElementsByTagName('description')[0])
type.example = domGetText(item.getElementsByTagName('example')[0])
todo = item.getElementsByTagName('todo')
if len(todo) > 0:
type.todo = domGetText(todo[0])
def createEnumType(self, item):
'Erzeugt aus einem XML Element einen Enum'
type = EnumType(item.getAttribute('name'))
if type.identifier in self.types:
raise SchnittstellenXMLException('Already defined: %s' % type.identifier)
self.types[type.identifier] = type
type.description = domGetText(item.getElementsByTagName('description')[0])
type.example = domGetText(item.getElementsByTagName('example')[0])
todo = item.getElementsByTagName('todo')
if len(todo) > 0:
type.todo = domGetText(todo[0])
items = item.getElementsByTagName('items')[0]
for item in items.getElementsByTagName('item'):
type.addValue(item.getAttribute('value'), domGetText(item.getElementsByTagName('description')[0]))
def createComplexType(self, item):
'Erzeugt aus einem XML Element einen komplexten Datentypen'
type = ComplexType(item.getAttribute('type'), item.getAttribute('name'))
if type.identifier in self.types:
raise SchnittstellenXMLException('Already defined: %s' % type.identifier)
self.types[type.identifier] = type
type.description = domGetText(item.getElementsByTagName('description')[0])
todo = item.getElementsByTagName('todo')
if len(todo) > 0:
type.todo = domGetText(todo[0])
type.properties = self.getProperties(item)
def getProperties(self, item):
'Erzeugt aus einem XML Element ein Dictionary mit Properties'
props = {}
for p in item.getElementsByTagName('property'):
prop = self.getProperty(p)
if prop.identifier in props:
raise SchnittstellenXMLException("Property already defined: %s\n%s" % (prop.identifier, item.toxml()))
props[prop.identifier] = prop
return props
def getProperty(self, item):
'Erzeugt aus einem XML Element eine Property'
property = Property(item.getAttribute('name'))
property.type = self.types[item.getAttribute('type')]
property.description = item.getAttribute('description')
multiple = item.getAttribute('multiple')
if multiple == "true":
property.islist = True
optional = item.getAttribute('optional')
if optional == "true":
property.isoptional = True
example = item.getElementsByTagName('example')
if len(example) > 0:
property.example = domGetText(example[0])
else:
# Standardbeispiel des Basis-Typen nehmen
if not isinstance(property.type, ComplexType):
property.example = property.type.example
todo = item.getElementsByTagName('todo')
if len(todo) > 0:
property.todo = domGetText(todo[0])
return property
def getGroupedActions(self):
'Erzeugt ein Dictionary mit den gruppierten Actions'
self._groupedActions = {}
for ag in self.dom.getElementsByTagName('group'):
actionGroup = ActionGroup(ag.getAttribute('name'))
actionGroup.description = domGetText(ag.getElementsByTagName('description')[0])
todo = ag.getElementsByTagName('todo')
if len(todo) > 0:
actionGroup.todo = domGetText(todo[0])
actionGroup.actions = self.getActions(actionGroup, ag)
if actionGroup.identifier in self._groupedActions:
raise SchnittstellenXMLException("Action group already defined: %s\n%s" % (actionGroup.identifier, item.toxml()))
self._groupedActions[actionGroup.identifier] = actionGroup
return self._groupedActions
def getActions(self, group, item):
'Erzeugt ein Dictionary mit den Actions, die in item definiert sind'
actions = {}
for actionItem in item.getElementsByTagName('action'):
action = Action(actionItem.getAttribute('name'), group)
action.description = domGetText(actionItem.getElementsByTagName('description')[0])
action.inServer = actionItem.getAttribute("inServer") == "true"
action.inClient = actionItem.getAttribute("inClient") == "true"
action.messageType = actionItem.getAttribute("messageType")
todo = actionItem.getElementsByTagName('todo')
if len(todo) > 0:
action.todo = domGetText(todo[0])
if action.identifier in actions:
raise SchnittstellenXMLException("Action already defined: %s\n%s" % (action.identifier, item.toxml()))
actions[action.identifier] = action
action.request = self.getProperties(actionItem.getElementsByTagName('request')[0])
action.response = self.getProperties(actionItem.getElementsByTagName('response')[0])
notification = actionItem.getElementsByTagName('notification')
if notification:
action.notification = self.getProperties(notification[0])
else:
action.notification = None
return actions
def writeProperties(properties, out):
out.write("||**Name**||**Typ**||**Beschreibung**||**Beispiel**||\n")
for p in sorted(properties):
prop = properties[p]
out.write("||%s{{{%s}}}||[#%s %s]%s||%s||{{{%s}}}||\n" % ("(optional) " if prop.isoptional else "", maskWikiPageNames(prop.identifier), prop.type.identifier, prop.type.identifier, maskWikiPageNames('[]' if prop.islist else ''), maskWikiPageNames(prop.description), maskWikiPageNames(prop.example)))
if __name__ == '__main__':
import io
import sys
import os
import xmlrpc.client
import configparser
ini_file = os.path.realpath(os.path.dirname(sys.argv[0]) + os.sep + 'trac.ini')
config = configparser.SafeConfigParser()
config.add_section('trac')
config.set('trac', 'url', '')
if os.path.isfile(ini_file):
config.read(ini_file)
if config.get('trac', 'url') == '' and len(sys.argv) < 2:
sys.stderr.write("Missing arguments.\n")
sys.stderr.write("Usage: %s <trac url> [xml file]\n" % sys.argv[0])
sys.exit(1)
if len(sys.argv) > 1:
config.set('trac', 'url', sys.argv[1])
s = SchnittstellenXML(sys.argv[2] if len(sys.argv) > 2 else os.path.realpath(os.path.dirname(sys.argv[0]) + os.sep + '../Schnittstellen/schnittstellen.xml'))
groups = s.getGroupedActions()
out = io.StringIO()
out.write("[[PageOutline()]]\n")
# Erzeuge Übersicht der Typen
out.write("= Datentypen =\n")
out.write("Diese Datentypen werden von den Methoden der API zum Datenaustausch verwendet.\n")
out.write("== Einfache Datentypen ==\n")
for st in s.types:
type = s.types[st]
if not isinstance(type, SimpleType):
continue
out.write("=== %s ===\n%s\n\nBeispiel: {{{%s}}}\n" % maskWikiPageNames(type.identifier, type.description, type.example))
if type.todo:
out.write("\n||[[Image(source:2011swtpro01/Project/Material/Icons/woo/warning_32.png)]]||%s||\n\n" % type.todo)
out.write("----\n\n")
out.write("== Enums ==\n")
for et in s.types:
type = s.types[et]
if not isinstance(type, EnumType):
continue
out.write("=== %s ===\n%s\n\nBeispiel: {{{%s}}}\n" % maskWikiPageNames(type.identifier, type.description, type.example))
if type.todo:
out.write("\n||[[Image(source:2011swtpro01/Project/Material/Icons/woo/warning_32.png)]]||%s||\n\n" % type.todo)
out.write("\n**Werte**\n\n")
out.write("||**Übertragener Wert**||**Beschreibung**||\n")
for value in type.values:
out.write("||{{{%s}}}||%s||\n" % (value, type.values[value]))
out.write("----\n\n")
for ct in sorted(s.types):
type = s.types[ct]
if not isinstance(type, ComplexType):
continue
out.write("== %s ==\n" % maskWikiPageNames(type.identifier))
if type.todo:
out.write("\n||[[Image(source:2011swtpro01/Project/Material/Icons/woo/warning_32.png)]]||%s||\n\n" % type.todo)
out.write("(//%s//) %s\n" % ('Object' if type.isobject else 'Dictionary', maskWikiPageNames(type.description)))
out.write("\n**%s**\n\n" % ('Attribute' if type.isobject else 'Schlüssel'))
writeProperties(type.properties, out)
out.write("----\n\n")
# Erzeuge eine Liste der Schnittstellen
out.write("= API =\n")
out.write("Nachfolgend findet sich die Liste der Methoden.\n\n")
out.write("⚑ Eine schwarze Fahne vor einem Schnittstellennamen bedeutet, dass dieses Schnittstelle serverseitig implementiert wurde.\n\n")
out.write("⚐ Eine weiße Fahne vor einem Schnittstellennamen bedeutet, dass dieses Schnittstelle clientseitig implementiert wurde. Siehe #316.\n")
for ag in sorted(groups):
group = groups[ag]
out.write("== %s ==\n%s\n" % maskWikiPageNames(group.identifier, group.description))
if group.todo:
out.write("||[[Image(source:2011swtpro01/Project/Material/Icons/woo/warning_32.png)]]||%s||\n" % group.todo)
for a in sorted(group.actions):
action = group.actions[a]
out.write("=== %s%s%s.%s() ===\n%s\n" % maskWikiPageNames("⚑ " if action.inServer else "", "⚐ " if action.inClient else "", group.identifier, action.identifier, action.description))
if action.todo:
out.write("||[[Image(source:2011swtpro01/Project/Material/Icons/woo/warning_32.png)]]||%s||\n" % action.todo)
if action.messageType:
out.write("\n\nMessage-Type: {{{%s}}}\n\n" % action.messageType)
out.write("\n** Request **\n")
writeProperties(action.request, out)
# Standard-Anwtort ist vom Typ Response, überschreibe mit Porperties der Antwort
out.write("\n** Response **\n")
response = s.types['Response']
writeProperties(dict(response.properties, **action.response), out)
if action.notification:
out.write("\n** Notification **\n")
writeProperties(action.notification, out)
out.write("\n** Tickets **\n")
out.write("[[TicketQuery(component=Schnittstellen&summary~=%s.%s())]]\n" % maskWikiPageNames(group.identifier, action.identifier))
# Upload
trac_url = config.get('trac', 'url')
if trac_url[-1] != "/":
trac_url += "/"
server = xmlrpc.client.ServerProxy("%slogin/xmlrpc" % trac_url)
try:
server.wiki.putPage('SchnittStellen', out.getvalue(), {'comment': 'Automatically updated by cron.'})
except xmlrpc.client.Fault as e:
if e.faultString != "'Page not modified' while executing 'wiki.putPage()'":
raise e
|
[
"m@tacker.org"
] |
m@tacker.org
|
494865951ad7578bb7e96643d525034b2c9a82cd
|
c24c6ac0346b7f7d674a5624cc92a4cc30d97a49
|
/1015_Distancia_Entre_Dois_Pontos.py
|
5cb40dc62769429c2a7de3185617d0a90f294dd7
|
[] |
no_license
|
wilmarv/uri.python
|
8b5bb311f6e4be8464d250ef6451a5f910df1c9c
|
6a6b4179d49488b24b5f238be26a0605360ea304
|
refs/heads/master
| 2022-11-14T10:33:11.402851
| 2020-07-05T05:28:31
| 2020-07-05T05:28:31
| 277,236,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
x1, y1 = map(float,input().split())
x2, y2 = map(float,input().split())
d = ((x2-x1)**2+(y2-y1)**2)**(1/2)
print('{:.4f}'.format(d))
|
[
"wilmarfonseca@gec.inatel.br"
] |
wilmarfonseca@gec.inatel.br
|
6316fa8877f8856c44caa9be35ac489e87bad10c
|
bbb89d13318df191b83716ad28633c6dd87147a5
|
/ciandt_next_gen_2022/desafio_04.py
|
69bee09bef6895a10eded6d1f59c28037f2a5b67
|
[] |
no_license
|
matheusvictor/estudos_python
|
50745522d2801fd5e9c2c3307eb251c1f18dcdbd
|
627c01a5e89192388fb5c34f5fdccbc7a3129d9f
|
refs/heads/master
| 2022-10-28T09:00:52.972993
| 2022-10-06T17:45:28
| 2022-10-06T17:45:28
| 192,107,427
| 5
| 0
| null | 2022-10-05T18:09:22
| 2019-06-15T17:43:49
|
Python
|
UTF-8
|
Python
| false
| false
| 149
|
py
|
def retorna_tempo_arena_em_milisegundos(distancia,velocidade):
d = float(distancia) * 1000
tempo = (d / velocidade) * 1000
return round(tempo)
|
[
"matheusvictor.salles@gmail.com"
] |
matheusvictor.salles@gmail.com
|
84b1f5be8c044f324a8b97d54ad96e6373dfd092
|
ce1a37c3135fcdcf22d29c7e1951aae55fada9de
|
/data/Keyword Extractions/Extracted/subsetKeywordRemover.py
|
c8ee5a2b8ee6c9ccf7323ee5df4a0a91aed9df5f
|
[] |
no_license
|
thiranja/research-git-repo
|
d62e1cec34375ffd6c251227917c0d6740e7216d
|
9b167a34533860d5616c93183fedf6c629c8fc86
|
refs/heads/main
| 2023-03-26T14:25:59.395092
| 2021-03-25T23:47:13
| 2021-03-25T23:47:13
| 323,130,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
# keywords = ["This", "this","This is", "This is not","This is the","This is the keyword","This is the","This the"]
# newKeywords = []
def isSubphrase(subphrase,phrase):
subphraseArray = subphrase.split(" ")
phraseArray = phrase.split(" ")
for word in subphraseArray:
if word in phraseArray:
continue
else:
return False
return True
def removeSubstringKeywords(keywords, newKeywords):
for i in range( 0 , len(keywords)):
keyword = keywords[i]
if (len(newKeywords) == 0):
newKeywords.append(keyword)
continue
canAppendKeyword = True
newKeywordsLength = len(newKeywords)
for j in range ( 0, newKeywordsLength):
#print(newKeywords)
newKeyword = newKeywords[j]
if isSubphrase(keyword,newKeyword):
canAppendKeyword = False
break
if isSubphrase(newKeyword,keyword):
newKeywords.remove(newKeyword)
newKeywordsLength -= 1
break
if ( canAppendKeyword ):
newKeywords.append(keyword)
# isSubphrase(keywords[7],keywords[5])
# removeSubstringKeywords(keywords,newKeywords)
# print(newKeywords)
|
[
"lakrandikathiranja@gmail.com"
] |
lakrandikathiranja@gmail.com
|
a89d7d3d33529ddf1bb165e3392fb2f540690c05
|
8140618782d8e1496b7f829aae4c8952612edea5
|
/31-01-2018/variables.py
|
b6cbb2fceff3d7d25d58505bb762211be0e78bde
|
[] |
no_license
|
psenderski/projektpython
|
cc177dfabb2d5e2dac06e224f6e6560fcd894008
|
ce85204a5863f4e423d26eabde25587c834559b9
|
refs/heads/master
| 2021-05-04T05:47:40.020785
| 2018-02-28T19:14:55
| 2018-02-28T19:14:55
| 120,345,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
# int
age = 26
# float
temperature = 7.4
# string
name = 'Przemek'
# bool
programmer = True
# Nonetype
nothing = None
print(age)
print(type(age))
print(temperature)
print(name)
# @todo: wyświetl resztę zmiennych na ekran
age = 52
print(age)
print(type(age))
age = 'something'
print(age)
print(type(age))
# @todo: wyświetl typ wszystkich zmiennych
|
[
"psenderski@interia.pl"
] |
psenderski@interia.pl
|
5a8bc6fffdcb844844db90c55d84a0a4153e8429
|
bb3454e9011e82bc5fdc824edb73f82e40a7b7d4
|
/get_people.py
|
1dbaf570c3964eb0e2d8c6a623ddcd9910c5773d
|
[] |
no_license
|
dazhaoniel/datascience-linkedin
|
7f5e6bbb43fc897a4d94cdb168904f964f0c7363
|
c6fef3cd8d545db93d4738978e7b57f9aa9d9c26
|
refs/heads/master
| 2018-12-28T10:22:08.872729
| 2013-03-30T02:36:24
| 2013-03-30T02:36:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
# Author: Daniel Zhao
# File: linkedin_get_jobs.py
# View database at http://localhost:5984/_utils/index.html
import sys
import time
import couchdb
import httplib, json
from couchdb.design import ViewDefinition
from time import gmtime, strftime
from login import login
# Linkedin Industry Code: https://developer.linkedin.com/documents/industry-codes
# Industry: Information Technology and Services
INDUSTRY_CODE = '27'
INDUSTRY_NAME = 'retail'
MAX_RESULTS = 5000
# Establish a connection to a CouchDB database
server = couchdb.Server('http://localhost:5984')
DB = 'job-people-%s' % ( INDUSTRY_NAME, )
DB2 = 'job-people-%s-meta' % ( INDUSTRY_NAME, )
try:
db = server.create(DB)
except couchdb.http.PreconditionFailed, e:
# Already exists, so append to it, keeping in mind that duplicates could occur
db = server[DB]
try:
db2 = server.create(DB2)
except couchdb.http.PreconditionFailed, e:
# Already exists, so append to it, keeping in mind that duplicates could occur
db2 = server[DB2]
client = login()
start = 0
while start <= MAX_RESULTS:
url = "http://api.linkedin.com/v1/people-search?facet=industry,"+ INDUSTRY_CODE +"&facet=current-company,null&count=20&start="+ str(start) +"&format=json"
resp, content = client.request(url)
# print resp
# print content
db2.save( resp ) # This worked
db.save( json.loads(content) )
print strftime("%Y-%m-%d %H:%M:%S", gmtime()) + ' - collected 20 results'
start += 20
|
[
"danielantoiny@gmail.com"
] |
danielantoiny@gmail.com
|
8c5cc85f7827a115e7c88d2a50bc47445bf21922
|
e1f164addc9a286a9fb1bcfba81694a1158279d9
|
/sphinx_django/admin.py
|
752e88fe98c65d4a8ba93f7b2ac1eab39a5b1cd6
|
[] |
no_license
|
dusual/simplecomment
|
0a013b3c93a811c89cdd8bc3de6f4fa23c763d32
|
b51a695a961a6db777b7d275358726f2cfb8f559
|
refs/heads/master
| 2016-09-06T03:49:45.532776
| 2012-12-08T08:43:04
| 2012-12-08T08:43:04
| 1,317,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
from django.contrib import admin
from sphinx_django.sphinxcomment.models import Comment, Element
class CommentAdmin(admin.ModelAdmin):
list_display = ['element', 'submitter_name', 'comment', 'reviewed',
'hidden', 'date']
search_fields = ['comment']
date_hierarchy = 'date'
list_filter = ['date', 'submitter_name']
search_fields = ['title', 'submitter_name', 'submitter_url']
fieldsets = (
(None, {'fields': ('submitter_name', 'element', 'comment')}),
('Review and presentation state', {'fields': ('reviewed', 'hidden')}),
('Other info', {'fields': ('submitter_url', 'ip')}),
)
# XXX: adding 'date' to the 'Other info' fieldset results in a
# ImproperlyConfigured error. :S
class ElementAdmin(admin.ModelAdmin):
search_fields = ['id', 'chapter_name']
list_filter = ['chapter_name', 'title']
admin.site.register(Comment, CommentAdmin)
admin.site.register(Element, ElementAdmin)
|
[
"amit.pureenergy@gmail.com"
] |
amit.pureenergy@gmail.com
|
9a9382a25e944f00f75d61e0510937d8257b50f3
|
bf11fa1b21d90cab8abd4b066fdc1659937df6e1
|
/remote_works/graphql/delivery/resolvers.py
|
593862c08f86b1ec3350fd994c6a0a23e0d407ad
|
[
"BSD-3-Clause"
] |
permissive
|
tetyanaloskutova/remote-works
|
818f2caec788e4f197f49f3a5dc5f418c37dc601
|
b3bb51e9c0c4c2febf4aa1e2a7d893e77c331e89
|
refs/heads/master
| 2022-02-23T00:08:40.210012
| 2022-02-14T20:53:36
| 2022-02-14T20:53:36
| 173,939,582
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
import graphene_django_optimizer as gql_optimizer
from ...delivery import models
def resolve_delivery_zones(info):
qs = models.DeliveryZone.objects.all()
return gql_optimizer.query(qs, info)
|
[
"tetyana.loskutova@gmail.com"
] |
tetyana.loskutova@gmail.com
|
e9e13bd21b0a5ebe14450be1be54f85d93b4c0ab
|
066da3596d7534b7eb2b37e826102152f97a5f67
|
/centinel/experiments/tcp_connect.py
|
21fc0e170ed8a80330a9d5453c73ef59ea2ffdbd
|
[
"MIT"
] |
permissive
|
gsathya/blocker
|
7f26ac9234cc666a93caa1190c6b77da4fcdf974
|
80723892c309ad08f51067dba431072ea01578da
|
refs/heads/master
| 2021-01-10T21:15:12.843761
| 2014-05-08T22:27:50
| 2014-05-08T22:27:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
import socket
class TCPConnectExperiment:
name = "tcp_connect"
def __init__(self, input_file):
self.input_file = input_file
self.results = []
self.host = None
self.port = None
def run(self):
for line in self.input_file:
self.host, self.port = line.strip().split(' ')
self.tcp_connect()
def tcp_connect(self):
result = {
"host" : self.host,
"port" : self.port
}
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, int(self.port)))
sock.close()
result["success"] = "true"
except Exception as err:
result["failure"] = str(err)
self.results.append(result)
|
[
"gsathya.ceg@gmail.com"
] |
gsathya.ceg@gmail.com
|
35ba0136a87d9efe1bdf81b27723c16b5585aba2
|
9545652800884f0e54fe6595d8634c29ea4827a2
|
/每日一题/2020_09_07_前K个高频元素.py
|
e836ac1a9de16303533f5a54052614466169f89e
|
[] |
no_license
|
challeger/leetCode
|
662d9f600a40fd8970568679656f6911a6fdfb05
|
d75c35b6f8ab33c158de7fa977ab0b16dac4fc25
|
refs/heads/master
| 2023-01-13T07:34:42.464959
| 2020-11-13T02:40:31
| 2020-11-13T02:40:31
| 286,426,790
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
"""
day: 2020-09-07
url: https://leetcode-cn.com/problems/top-k-frequent-elements/
题目名: 前k个高频元素
给定一个非空的整数数组, 返回其中出现频率前 k 高的元素
思路:
记录出现次数,排序,输出.
"""
from typing import List
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
from collections import Counter
counter = Counter(nums)
foo = sorted(counter.items(), key=lambda x: x[1], reverse=True)
res = []
for i in range(k):
res.append(foo[i][0])
return res
if __name__ == "__main__":
test = [1, 1, 1, 2, 2, 3]
s = Solution()
print(s.topKFrequent(test, 2))
|
[
"799613500@qq.com"
] |
799613500@qq.com
|
0575ea7082474b0773a69f03f5ee29c2dee9f02b
|
6fa6060026a6165f47aad2e83d0c0621f39b3900
|
/Turbot_data/Moments_models/2pops/fold_SC_ae_b.py
|
ce751ceeb0fdeb5ed9d45f56a04d1b848b27a73d
|
[] |
no_license
|
heroalone/Demographic-Modelling
|
f07b645a0ee9e211d2c8230f024eff664f794b89
|
809458dc011bf5515b7502eacbf3aeb6befcfa50
|
refs/heads/master
| 2023-03-17T02:12:32.935120
| 2021-03-11T09:14:31
| 2021-03-11T09:14:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,046
|
py
|
#!/usr/bin/env python
# Secondary contact model: Ancestral expansion, Split,Bottleneck and growth in the Baltic Sea, asymmetric migration following secondary contact
# n(para): 8
import matplotlib
matplotlib.use('PDF')
import moments
import random
import pylab
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
from moments import Misc,Spectrum,Numerics,Manips,Integration,Demographics1D,Demographics2D
import sys
infile=sys.argv[1]
pop_ids=[sys.argv[2],sys.argv[3]]
projections=[int(sys.argv[4]),int(sys.argv[5])]
params=[1,1,1,0.1,1,1,1,1,1]
dd = Misc.make_data_dict(infile)
data = Spectrum.from_data_dict(dd, pop_ids,projections,polarized=False)
ns=data.sample_sizes
np.set_printoptions(precision=3)
#-------------------
# split with growth and asymmetrical migration; with genomic islands
def SC_ae_b(params, ns):
"""
nu1= pop size after ancestral expansion (this remains constant for teh North sea population after the split)
s=proportion of the North Sea pop which invaded the Baltic (i.e. original bottleneck)
nu2= final size of Baltic Sea pop
Tae= timing of ancestral population expansion
T1= time of population split
T2= time of secondary contact and start of population growth in the Baltic Sea
m12= migration rate from North Sea to Baltic
m21= migration rate from Baltic Sea to North Sea
"""
nu_ae,nu1,nu2,s,Tae,T1,T2,m12,m21 = params
nu2_0 = nu1*s
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/T2)
nu_func= lambda t: [nu1,nu2_func(t)]
# calculate the spectrum
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs.integrate([nu_ae], Tae)
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
fs.integrate([nu1, nu1*s], T1, m = np.array([[0, 0], [0, 0]]))
fs.integrate(nu_func, T2, dt_fac=0.01, m=np.array([[0, m12], [m21, 0]]))
return fs
func=SC_ae_b
upper_bound = [100,100,100,0.999,10,10,10,200,200]
lower_bound = [1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-5,1e-5]
params = moments.Misc.perturb_params(params, fold=int(sys.argv[6]), upper_bound=upper_bound,
lower_bound=lower_bound)
# fitting (poptg = optimal parameters):
poptg = moments.Inference.optimize_log(params, data, func,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=False, maxiter=int(sys.argv[7]))
# extracting model predictions, likelihood and theta
model = func(poptg, ns)
ll_model = moments.Inference.ll_multinom(model, data)
theta = moments.Inference.optimal_sfs_scaling(model, data)
# random index for this replicate
ind=str(random.randint(0,999999))
# optimization number
opti=int(sys.argv[8])
# round number
round=(sys.argv[9])
# printing parameters
print "RESULT","SC_ae_b",ind,len(params),opti,round,ll_model,sys.argv[1],sys.argv[2],sys.argv[3],poptg,theta
|
[
"noreply@github.com"
] |
heroalone.noreply@github.com
|
57be481bdc9870c859d5619880cbf7226668a36b
|
c8422cb23cc0b112f279476ad25d7fc5c66863d7
|
/14_Longest_Common_Prefix.py
|
c7d61ec96874ee0789910c1c1306e9e81692f4ec
|
[] |
no_license
|
skyroam/leet-code
|
d278b8b7132517a1a405dfef9a352964d85d16fd
|
7a30c8e44f0e9bf63d25fe964646506936ade999
|
refs/heads/master
| 2022-01-11T11:50:56.363587
| 2019-09-18T12:21:48
| 2019-09-18T12:21:48
| 109,248,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
num = len(strs)
if num == 0:
return ""
elif num == 1:
return strs[0]
else:
min_ = len(strs[0])
index = 0
for ind, item in enumerate(strs):
if len(item) < min_:
min_ = len(item)
index = ind
for i in range(min_):
for j in range(num-1):
if strs[j][i] != strs[j+1][i]:
return strs[index][:i]
return strs[index]
|
[
"skyroam.wyx@gmail.com"
] |
skyroam.wyx@gmail.com
|
b71e342de074106d3dcaa30452b90417df3938bf
|
780b01976dad99c7c2ed948b8473aa4e2d0404ba
|
/exobuilder/smartexo/smartexo_base.py
|
37623334d272a11f85fd51d71e6f28bd97bee53f
|
[] |
no_license
|
trendmanagement/tmqrexo_alexveden
|
a8ad699c2c3df4ce283346d287aff4364059a351
|
4d92e2ee2bc97ea2fcf075382d4a5f80ce3d72e4
|
refs/heads/master
| 2021-03-16T08:38:00.518593
| 2019-01-23T08:30:18
| 2019-01-23T08:30:18
| 56,336,692
| 1
| 1
| null | 2019-01-22T14:21:03
| 2016-04-15T17:05:53
|
Python
|
UTF-8
|
Python
| false
| false
| 6,094
|
py
|
from exobuilder.exo.exoenginebase import ExoEngineBase
from exobuilder.algorithms.rollover_helper import RolloverHelper
import logging
class SmartEXOBase(ExoEngineBase):
EXO_NAME = "SmartEXOBase"
def __init__(self, symbol, direction, date, datasource, **kwargs):
self._symbol = symbol
self.custom_values = {}
# Use ContFut EXO to process SmartEXO data
self._base_exo_name = "{0}_ContFut".format(self._symbol)
super().__init__(symbol, direction, date, datasource, **kwargs)
@staticmethod
def direction_type():
# Fixed at 2017-03-14 (return value was 0)
# SmartEXOs has unified direction, direction = 0 lead to double SmartEXO calculation in smart exo script
# Returning 1 we are sure that SmartEXO calculates only once
return 1
@classmethod
def names_list(cls, symbol):
return ['{0}_{1}'.format(symbol, cls.EXO_NAME)]
@property
def exo_name(self):
return '{0}_{1}'.format(self._symbol, self.EXO_NAME)
def is_rollover(self):
if len(self.position) != 0:
for p in self.position.legs.values():
rh = RolloverHelper(p.instrument)
if rh.is_rollover(p):
return True
return False
def process_rollover(self):
trans_list = self.position.close_all_translist()
self.log('Rollover occured, new series used')
return trans_list
def get_custom_values(self):
"""
Method that return custom EXO data frame values, to store inside EXO Dataframe in the DB
:return: dictionary {'string_key': (int or float) value}
"""
return self.custom_values
def calculate_regime(self, date, exo_df):
"""
Calculates Bull/Bear/Neutral areas based on some logic
:param date: Current date time
:param exo_df: Price dataframe for underlying quotes
:return:
-1 - for bearish zone
0 - for neutral zone
+1 - for bullish zone
None - for unknown (just lead to existing position close)
"""
raise NotImplementedError("You should override this method to process SmartEXO logic")
@staticmethod
def new_position_bullish_zone(date, fut, opt_chain):
"""
Returns transaction to open new Smart EXO position for bullish zone
params date: current date
params fut: current actual future contract
params opt_chain: current actual options chain
returns: List of Transactions to open
"""
return []
@staticmethod
def new_position_bearish_zone(date, fut, opt_chain):
"""
Returns transaction to open new Smart EXO position for bearish zone
params date: current date
params fut: current actual future contract
params opt_chain: current actual options chain
returns: List of Transactions to open
"""
return []
@staticmethod
def new_position_neutral_zone(date, fut, opt_chain):
"""
Returns transaction to open new Smart EXO position for neutral zone
params date: current date
params fut: current actual future contract
params opt_chain: current actual options chain
returns: List of Transactions to open
"""
return []
def manage_opened_position(self, date, fut, opt_chain, regime, opened_position):
"""
Return transactions list to manage opened positions, it could be used for delta rebalancing or dynamic delta hedging
:param fut:
:param opt_chain:
:param regime:
:param opened_position:
:return:
"""
return []
def process_day(self):
"""
Main EXO's position management method
:return: list of Transactions to process
"""
# Get cont futures price for EXO
exo_df, exo_info = self.datasource.exostorage.load_series(self._base_exo_name)
regime = self.calculate_regime(self.date, exo_df)
logging.debug("Regime {0}".format(regime))
trans_list = []
#
# Writing custom values to store inside DB
#
self.custom_values = {
'regime': regime if regime is not None else float('nan')
}
if regime is None and len(self.position) > 0:
return self.position.close_all_translist()
instr = self.datasource.get(self._symbol, self.date)
rh = RolloverHelper(instr)
fut, opt_chain = rh.get_active_chains()
if fut is None or opt_chain is None:
raise ValueError("Active option chain is not found for {0}".format(self._symbol))
if regime == 1 and 'bullish' not in self.position.legs:
# Close all
trans_list += self.position.close_all_translist()
tl = self.new_position_bullish_zone(self.date, fut, opt_chain)
if len(tl) > 0:
tl[0]._leg_name = 'bullish'
trans_list += tl
self._log_transactions(trans_list)
return trans_list
if regime == -1 and 'bearish' not in self.position.legs:
# Close all
trans_list += self.position.close_all_translist()
tl = self.new_position_bearish_zone(self.date, fut, opt_chain)
if len(tl) > 0:
tl[0]._leg_name = 'bearish'
trans_list += tl
self._log_transactions(trans_list)
return trans_list
if regime == 0 and 'neutral' not in self.position.legs:
# Close all
trans_list += self.position.close_all_translist()
tl = self.new_position_neutral_zone(self.date, fut, opt_chain)
if len(tl) > 0:
tl[0]._leg_name = 'neutral'
trans_list += tl
self._log_transactions(trans_list)
return trans_list
#
# Manage opened position
#
return self.manage_opened_position(self.date, fut, opt_chain, regime, self.position)
|
[
"i@alexveden.com"
] |
i@alexveden.com
|
e756e0dccc7b86c84fd2275c1dea8a6dd9bcb273
|
1c48378b207e4d5b41afd2f29a5db7c8276180fc
|
/Python/Day 1/A1Q4.py
|
828463092445bffc10f46eb77486013eaaf9af06
|
[] |
no_license
|
Newcomer03/Basic-Programs
|
b3e365516ae516c6d53fb9b317def00b77dc741e
|
1fdf6190b35403ea754ca8ccec3813c926b1e289
|
refs/heads/main
| 2023-06-18T12:30:08.470642
| 2021-07-11T14:06:14
| 2021-07-11T14:06:14
| 384,951,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
st = input("Enter a String\n")
if len(st) >= 2 and st[:2] == "Is":
print(st)
else:
print("Is" + st)
|
[
"noreply@github.com"
] |
Newcomer03.noreply@github.com
|
d3241a4402b846dbe54bef75303020bb10e1aba1
|
f8d2beeb8cad25c6f6c4182fdd7b818d9787032c
|
/test/onnx/test_fx_dynamic_with_onnxruntime.py
|
7a38e34cfaebb8cb22f5bccb7cb8247d2a93434c
|
[
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
ddkalamk/pytorch
|
231a99de36313a2c24c6c0366da73cf7d6c3816d
|
f09347a9f11ac751023d3598ab8a04a10ce22a59
|
refs/heads/master
| 2023-04-08T10:16:33.079457
| 2023-03-26T03:35:03
| 2023-03-26T03:35:03
| 195,946,338
| 0
| 1
|
NOASSERTION
| 2020-03-18T07:56:13
| 2019-07-09T06:27:24
|
C++
|
UTF-8
|
Python
| false
| false
| 13,499
|
py
|
# Owner(s): ["module: onnx"]
from __future__ import annotations
import copy
import inspect
import io
import unittest
import warnings
from typing import Any, Callable, Optional, Sequence, Tuple, Union
import numpy as np
import onnx.reference
import onnx_test_common
import onnxruntime # type: ignore[import]
import torch
import torchvision
from torch.onnx._internal import _beartype, diagnostics, fx as fx_onnx
from torch.testing._internal import common_utils
from torch.types import Number
from torch.utils import _pytree as pytree
_NumericType = Union[Number, torch.Tensor, np.ndarray]
_ModelType = Union[torch.nn.Module, Callable]
_ONNXModelType = Union["onnx.ModelProto", bytes, str, io.BytesIO]
_InputArgsType = Union[torch.Tensor, Tuple[Any, ...]]
_OutputsType = Sequence[_NumericType]
@_beartype.beartype
def _run_ort(
onnx_model: _ONNXModelType, pytorch_inputs: _InputArgsType
) -> _OutputsType:
session = onnxruntime.InferenceSession(
onnx_model, providers=["CPUExecutionProvider"]
)
input_names = [ort_input.name for ort_input in session.get_inputs()]
return session.run(
None, {k: v.cpu().numpy() for k, v in zip(input_names, pytorch_inputs)}
)
@_beartype.beartype
def _run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
model: _ModelType,
input_args: _InputArgsType,
rtol: float = 1e-3,
atol: float = 1e-7,
opset_version: int = 18,
additional_test_inputs: Optional[Sequence[_InputArgsType]] = None,
**input_kwargs,
):
"""Compare the results of PyTorch model with exported ONNX model
Args:
model (_ModelType): PyTorch model
input_args (_InputArgsType): torch input arguments
rtol (float, optional): relative tolerance. Defaults to 1e-3.
atol (float, optional): absolute tolerance. Defaults to 1e-7.
opset_version (int, optional): ONNX opset version. Defaults to 18.
additional_test_inputs (Optional[Sequence[_InputArgsType]], optional):
Test the models with another dataset, which is designed for dynamic axes
testing. Defaults to None.
"""
@_beartype.beartype
def _try_clone_model(model: _ModelType) -> _ModelType:
"""Used for preserving original model in case forward mutates model states."""
try:
return copy.deepcopy(model)
except Exception:
warnings.warn(
"Failed to clone model. Model state might be mutated during verification."
)
return model
@_beartype.beartype
def compare_pytorch_onnx_with_ort(
onnx_model: Union["onnx.ModelProto", bytes],
model_input_args: _InputArgsType,
):
# Inspect the model's signature. It will be used
# to flatten kwargs.
if isinstance(model, torch.nn.Module):
signature = inspect.signature(model.forward)
else:
signature = inspect.signature(model)
# Bind args and kwargs to the model's signature to
# flatten kwargs into positional args since ONNX
# model cannot be called with kwargs.
bound = signature.bind(*model_input_args)
# Fill optional inputs.
bound.apply_defaults()
assert not bound.kwargs
pt_cloned_model = _try_clone_model(model)
ref_outputs, _ = pytree.tree_flatten(pt_cloned_model(*model_input_args))
ort_outputs = _run_ort(onnx_model, bound.args)
for ref_output, ort_output in zip(ref_outputs, ort_outputs):
torch.testing.assert_close(
ref_output, torch.tensor(ort_output), rtol=rtol, atol=atol
)
# Feed args and kwargs into exporter.
# Note that exporter should flatten kwargs into positional args the exported model;
# since ONNX doesn't represent kwargs.
onnx_model = fx_onnx.export_after_normalizing_args_and_kwargs(
model,
*input_args,
opset_version=opset_version,
use_binary_format=True,
enable_dynamic_axes=True, # export models with dynamic shapes
**input_kwargs,
)
compare_pytorch_onnx_with_ort(onnx_model, input_args)
# This confirms the exported mode accepts different input shapes
# when dynamic shape is enabled.
if additional_test_inputs:
for additional_input_args in additional_test_inputs:
compare_pytorch_onnx_with_ort(onnx_model, additional_input_args)
class TestFxDynamicWithOnnxRuntime(onnx_test_common._TestONNXRuntime):
def setUp(self):
super().setUp()
self.diag_ctx = diagnostics.engine.create_diagnostic_context(
"test_fx_export", version=torch.__version__
)
self.opset_version = 18
def tearDown(self):
diagnostics.engine.dump(
f"test_report_{self._testMethodName}.sarif", compress=False
)
super().tearDown()
@unittest.skip(
"_aten_convolution_onnx: _add_attribute_to_torchscript_node()"
" parameter value=[None, None] violates type hint"
"typing.Union[float, int, str, bytes, typing.Sequence[float],"
" typing.Sequence[int], torch.Tensor], as [None, None]:"
)
def test_shufflenet_v2_dynamic_axes(self):
model = torchvision.models.shufflenet_v2_x0_5(pretrained=False)
dummy_input = torch.randn(1, 3, 224, 224, requires_grad=True)
test_inputs = torch.randn(3, 3, 224, 224, requires_grad=True)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
model,
(dummy_input,),
additional_test_inputs=[(dummy_input,), (test_inputs,)],
rtol=1e-3,
atol=1e-5,
)
def test_add(self):
class DynamicAdd(torch.nn.Module):
def forward(self, x, y):
return torch.ops.aten.add(x, y)
x = torch.randn(2, 3)
y = torch.randn(2, 3)
another_x = torch.randn(3, 4)
another_y = torch.randn(3, 4)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
DynamicAdd(), (x, y), additional_test_inputs=[(another_x, another_y)]
)
def test_sigmoid_add(self):
class DynamicAdd(torch.nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x, y):
z = torch.ops.aten.add(x, y)
return self.sigmoid(z)
x = torch.randn(2, 3)
y = torch.randn(2, 3)
x = x[1:, :]
y = y[1:, :]
input_x = torch.randn(1, 4)
input_y = torch.randn(1, 4)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
DynamicAdd(), (x, y), additional_test_inputs=[(input_x, input_y)]
)
@unittest.skip("flaky test: https://github.com/microsoft/onnx-script/issues/523")
def test_matmul(self):
class DynamicMatMul(torch.nn.Module):
def forward(self, x, y):
return torch.ops.aten.matmul(x, y)
x = torch.randn(2, 3, 6)
y = torch.randn(2, 6, 4)
input_x = torch.randn(2, 3, 4)
input_y = torch.randn(2, 4, 4)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
DynamicMatMul(), (x, y), additional_test_inputs=[(input_x, input_y)]
)
@unittest.skip(
"fx.graph: doesn't handle scalar like normal tensor, so this is not yet "
"supported! TypeError: forward() takes 1 positional argument but 2 were given"
)
def test_scalar_tensor(self):
class test(torch.nn.Module):
def forward(self, x):
return torch.scalar_tensor(x.size(0)), torch.scalar_tensor(
x.size(1), dtype=torch.int64
)
x = torch.randn(2, 3, 4)
y = torch.randn(7, 8, 9)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
test(),
(x,),
additional_test_inputs=[(y,)],
)
@unittest.skip(
"_aten_convolution_onnx: _add_attribute_to_torchscript_node()"
" parameter value=[None, None] violates type hint"
"typing.Union[float, int, str, bytes, typing.Sequence[float],"
" typing.Sequence[int], torch.Tensor], as [None, None]:"
)
def test_transpose_infer_shape(self):
class TransposeModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 1, 3, stride=2)
def forward(self, x):
x = self.conv(x)
return x.transpose(0, 1)
x = torch.randn(32, 3, 64, 64)
y = torch.randn(16, 3, 8, 64)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
TransposeModule(),
(x,),
additional_test_inputs=[(y,)],
)
@unittest.skip("torch._dynamo.exc.TorchRuntimeError")
def test_squeeze_runtime_dim(self):
class Squeeze(torch.nn.Module):
def forward(self, d1, d2):
t = torch.zeros(d1[0], d2[0])
return t.squeeze(0)
d1 = torch.tensor([1])
d3 = torch.tensor([3])
d4 = torch.tensor([4])
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
Squeeze(), (d1, d4), additional_test_inputs=[(d3, d4)]
)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
Squeeze(), (d3, d4), additional_test_inputs=[(d1, d3)]
)
@unittest.skip(
"AssertionError: The values for attribute 'shape' do not match:"
" torch.Size([5, 6, 2]) != torch.Size([4, 4, 2]). Even symbolic "
"fx.graph can't get dynamic arguments from this Module."
)
def test_slice(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[: x.size(0) - i, i : x.size(2), i:3])
return tuple(results)
x = torch.rand(5, 5, 5)
y = torch.randn(6, 7, 8)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
DynamicSliceExportMod(),
(x,),
additional_test_inputs=[(y,)],
)
@unittest.skip(
"fx.graph: doesn't handle scalar like normal tensor, so this is not yet"
"supported! TypeError: forward() takes 1 positional argument but 2 were given"
)
def test_arange(self):
class ArangeModel(torch.nn.Module):
def forward(self, input):
return (
torch.arange(input.shape[0]),
torch.arange(12),
torch.arange(start=input.shape[0], end=input.shape[0] + 5),
)
x = torch.randn(5, 3, 2)
y = torch.randn(8, 3, 2)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
ArangeModel(),
(x,),
additional_test_inputs=[(y,)],
)
@unittest.skip(
"fx.graph: torch._subclasses.fake_tensor.DataDependentOutputException: "
"aten._local_scalar_dense.default"
)
def test_expand_as_fill_zero(self):
class Model(torch.nn.Module):
def forward(self, x):
x[:, x.size(0) :] = 0
return x
x = torch.ones(2, 5)
x2 = torch.randn(3, 4)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
Model(),
(x,),
additional_test_inputs=[(x2,)],
)
@unittest.skip(
"ATenLib: INVALID_ARGUMENT : Failed to load model with error: "
"ONNX Schema aten_copy: failed validating the check: !(it.GetName().empty())"
)
def test_expand_as_fill_tensor(self):
class Model(torch.nn.Module):
def forward(self, x):
x[:, x.size(0) :] = torch.tensor([1, 2, 3])
return x
x = torch.ones(2, 5, 3)
x2 = torch.randn(3, 4, 3)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
Model(),
(x,),
additional_test_inputs=[(x2,)],
)
def test_expand_as_fill_seperate_tensor(self):
class Model(torch.nn.Module):
def forward(self, x):
aa = torch.tensor([[0], [1], [2]])
return aa.expand_as(x)
x = torch.ones(3, 2)
x2 = torch.randn(3, 5)
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
Model(),
(x,),
additional_test_inputs=[(x2,)],
)
def test_view_dynamic_zero_dim(self):
class ViewModel(torch.nn.Module):
def forward(self, input):
input = input.view(-1, 2)
return input.view(1, -1)
x = torch.ones(2)
another_x = torch.empty((0,))
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
ViewModel(),
(x,),
additional_test_inputs=[(another_x,)],
)
def test_flatten_dynamic_axes(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.flatten(x, start_dim=2, end_dim=3)
batch_size = 3
x = torch.randn(batch_size, 5, 4, 5)
y = torch.randn(5, 5, 4, 5)
model = MyModule()
_run_test_with_fx_to_onnx_exporter_and_onnx_runtime(
model, (x,), additional_test_inputs=[(y,)]
)
if __name__ == "__main__":
common_utils.run_tests()
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
09c0351bdcf24e3830e9a7301cefd1d1226c89be
|
4d4947181174d777196a59baa988c938613ef064
|
/DIS/signals.py
|
3b2779cbc85a70528dfd4fd2d4390d46c425a4bd
|
[] |
no_license
|
VictorImmanuvel1/Department-Information-System
|
37bea88efab5a7c7c05e3439cadd74e63a043007
|
1fd185dd0b332617b5bc5a8e6ace02be3fdae407
|
refs/heads/main
| 2023-06-11T14:50:50.931114
| 2021-04-28T07:53:40
| 2021-04-28T07:53:40
| 361,377,426
| 0
| 0
| null | 2021-04-28T07:53:41
| 2021-04-25T08:52:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile,Education,oe,ap,article,seminar,student,sem1,sem2,sem3,sem4,sem5
@receiver(post_save,sender=User)
def create_profile(sender,instance,created,**kwargs):
if created:
Profile.objects.create(user=instance)
Education.objects.create(user=instance)
oe.objects.create(user=instance)
ap.objects.create(user=instance)
article.objects.create(user=instance)
seminar.objects.create(user=instance)
@receiver(post_save,sender=User)
def save_profile(sender,instance,created,**kwargs):
instance.profile.save()
instance.education.save()
instance.oe.save()
instance.ap.save()
instance.article.save()
instance.seminar.save()
@receiver(post_save,sender=student)
def create(sender,instance,created,**kwargs):
if created:
sem1.objects.create(sid=instance)
sem2.objects.create(sid=instance)
sem3.objects.create(sid=instance)
sem4.objects.create(sid=instance)
sem5.objects.create(sid=instance)
@receiver(post_save,sender=student)
def save(sender,instance,created,**kwargs):
instance.sem1.save()
instance.sem2.save()
instance.sem3.save()
instance.sem4.save()
instance.sem5.save()
|
[
"victorimmanuvel@protonmail.com"
] |
victorimmanuvel@protonmail.com
|
fc0c191c4df7d151acb3246642ee5516c3f00ae1
|
ba168a8a6e93346a93238e204d44244c6abd4dab
|
/app/core/migrations/0004_auto_20200613_1300.py
|
78f6d337bea3b6878f001bbedb809dab89358b49
|
[
"MIT"
] |
permissive
|
Amirh-zahmatkesh/acc-back
|
6e8935aeac1e7f4d21b2b96fb1fe67355b4d1290
|
77e693c743977eacf6b9ed5c3bcf5b1ae918a9d5
|
refs/heads/master
| 2022-11-16T20:26:39.754198
| 2020-06-17T08:02:52
| 2020-06-17T08:02:52
| 280,595,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
# Generated by Django 3.0.6 on 2020-06-13 13:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20200613_1259'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='slug',
field=models.SlugField(),
),
]
|
[
"amirh.zahmatkesh98@gmail.com"
] |
amirh.zahmatkesh98@gmail.com
|
9623807c3351f38f27e901faf5983a24023c0ed1
|
85149cccb5dfbd804163dd67f20cdd114ad56a71
|
/examples/bahl/uq_bahl.py
|
5f08e5e684fb0608d396c8cec6355808ad1f5974
|
[] |
no_license
|
Mazzol/uncertainpy
|
9bf9f3a60b177278fdf428d0dc5f805e14c82f07
|
7e81a46200c272cfa2d5d929a597307a322b9530
|
refs/heads/master
| 2020-03-14T21:52:07.441718
| 2018-04-25T15:00:21
| 2018-04-25T15:00:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
import uncertainpy as un
import chaospy as cp
# Subclassing NeuronModel
class NeuronModelBahl(un.NeuronModel):
def __init__(self, stimulus_start=None, stimulus_end=None):
# Hardcode the path of the Bahl neuron model
super(NeuronModelBahl, self).__init__(interpolate=True,
path="bahl_neuron_model",
stimulus_start=stimulus_start,
stimulus_end=stimulus_end)
# Reimplement the set_parameters method used by run
def set_parameters(self, parameters):
for parameter in parameters:
self.h(parameter + " = " + str(parameters[parameter]))
# These commands must be added for this specific
# model to recalculate the parameters after they have been set
self.h("recalculate_passive_properties()")
self.h("recalculate_channel_densities()")
# Initialize the model with the start and end time of the stimulus
model = NeuronModelBahl(stimulus_start=100, stimulus_end=600)
# Define a parameter list and use it directly
parameters = {"e_pas": -80, cp.Uniform(-60, -85),
"apical Ra": 261, cp.Uniform(150, 300)}
# Initialize the features
features = un.SpikingFeatures()
# Perform the uncertainty quantification
UQ = un.UncertaintyQuantification(model=model,
parameters=parameters,
features=features)
data = UQ.quantify()
|
[
"simetenn@gmail.com"
] |
simetenn@gmail.com
|
6697c4a6ca304290cdf7ee90384c8bfc5c911d0b
|
697772c770d5b0c04402b819c80d420fa6d41c3e
|
/tests/beos_plugin_tests/scenarios/scenarios_basic_operations/014_[1.1]_Basic_operations_test_.py
|
e390d7a7c69d2524fd015f0eb14ed8ff596ac618
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
terradacs/beos-core
|
bf8f436d1754c4ad7741d976a12fd7a9e514bf85
|
31e19170bcad573b1d498811284e62babd478f92
|
refs/heads/beos-initial-release
| 2021-06-16T07:42:30.089362
| 2019-07-31T11:47:24
| 2019-07-31T11:47:24
| 172,755,498
| 10
| 3
|
NOASSERTION
| 2021-03-22T22:50:25
| 2019-02-26T17:13:37
|
C++
|
UTF-8
|
Python
| false
| false
| 5,122
|
py
|
#!/usr/bin/python3
# Scenario based on test : [1.1]-Basic-operations-test
import os
import sys
import time
import datetime
currentdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(currentdir)))
from beos_test_utils.beos_utils_pack import init, ActionResult, ResourceResult, VotersResult
if __name__ == "__main__":
try:
node, summary, args, log = init(__file__)
accounts = node.create_accounts(2, "5.0000 BTS")
node.run_node()
#Changeparams
#node.changeparams(["0.0000 BTS"], 40, [20,0,40,20,8000000], [20,0,40,10,5000000], 3000000)
newparams = {
"beos" : {
"starting_block" : 20,
"next_block" : 0,
"ending_block" : 40,
"block_interval" : 20,
"trustee_reward" : 8000000
},
"ram" : {
"starting_block" : 20,
"next_block" : 0,
"ending_block" : 40,
"block_interval" : 10,
"trustee_reward" : 5000000
},
"proxy_assets" : [ "0.0000 BTS"],
"ram_leftover" : 3000000,
"starting_block_for_initial_witness_election":40
}
node.changeparams(newparams)
#Actions
summary.user_block_status(node, accounts[0].name, ResourceResult(_balance="5.0000 BTS",_net_weight="0.0000 BEOS",_cpu_weight="0.0000 BEOS",_ram_bytes=5448))
summary.user_block_status(node, accounts[1].name, ResourceResult(_balance="5.0000 BTS",_net_weight="0.0000 BEOS",_cpu_weight="0.0000 BEOS",_ram_bytes=5448))
summary.action_status(node.transfer(_from=accounts[0].name,_to=accounts[1].name,_quantity="1.0000 BTS",_memo=""), ActionResult(False, "transaction net usage is too high: 128 > 0") )
node.wait_till_block(20)
summary.action_status(node.transfer(_from=accounts[0].name,_to=accounts[1].name,_quantity="5.0000 BTS",_memo="") )
summary.user_block_status(node, accounts[0].name, ResourceResult(_balance="",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
summary.user_block_status(node, accounts[1].name, ResourceResult(_balance="10.0000 BTS",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
node.wait_till_block(24)
summary.user_block_status(node, accounts[1].name, ResourceResult(_balance="10.0000 BTS",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
node.wait_till_block(26)
summary.user_block_status(node, accounts[0].name, ResourceResult(_balance="",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
node.wait_till_block(28)
summary.action_status(node.transfer(_from=accounts[0].name,_to=accounts[1].name,_quantity="1.0000 BTS",_memo=""), ActionResult(False, "no balance object found") )
summary.user_block_status(node, accounts[1].name, ResourceResult(_balance="10.0000 BTS",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
node.wait_till_block(30)
summary.user_block_status(node, accounts[0].name, ResourceResult(_balance="",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
node.wait_till_block(40)
summary.user_block_status(node, accounts[0].name, ResourceResult(_balance="",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
summary.user_block_status(node, accounts[1].name, ResourceResult(_balance="10.0000 BTS",_net_weight="1376706011.7673 BEOS",_cpu_weight="1376706011.7674 BEOS",_ram_bytes=26660255448))
node.wait_till_block(50)
summary.action_status(node.transfer(_from=accounts[1].name,_to=accounts[0].name,_quantity="10.0000 BTS",_memo="") )
summary.user_block_status(node, accounts[0].name, ResourceResult(_balance="10.0000 BTS",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
summary.user_block_status(node, accounts[1].name, ResourceResult(_balance="",_net_weight="1376706011.7673 BEOS",_cpu_weight="1376706011.7674 BEOS",_ram_bytes=26660255448))
summary.action_status(node.transfer(_from=accounts[0].name,_to=accounts[1].name,_quantity="10.0000 BTS",_memo="") )
summary.user_block_status(node, accounts[0].name, ResourceResult(_balance="",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
summary.user_block_status(node, accounts[1].name, ResourceResult(_balance="10.0000 BTS",_net_weight="1376706011.7673 BEOS",_cpu_weight="1376706011.7674 BEOS",_ram_bytes=26660255448))
summary.action_status(node.withdraw(_from=accounts[1].name,_bts_to="any_account",_quantity="10.0000 BTS",_memo="_memo") )
#At end
summary.user_block_status(node, accounts[0].name, ResourceResult(_balance="",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=5332055448))
summary.user_block_status(node, accounts[1].name, ResourceResult(_balance="",_net_weight="1376706011.7673 BEOS",_cpu_weight="1376706011.7674 BEOS",_ram_bytes=26660255448))
except Exception as _ex:
log.exception("Exception `{0}` occures while executing `{1}` tests.".format(str(_ex), __file__))
finally:
summary_status = summary.summarize()
node.stop_node()
exit(summary_status)
|
[
"wrona@syncad.com"
] |
wrona@syncad.com
|
0fe74dba4a6c30fea18d505b94370729aabd9a40
|
e920ca65077c792109f3f44d125b7dce5a012576
|
/model.py
|
ceb9b1c0865e61b440f038b9aaadc41bc6fbf4ac
|
[] |
no_license
|
dsp6414/person_search
|
3d6433d3b64a2a686e36935febc9c0582d3e0a24
|
cf13c8839248b471eeb57836c0e5e52b0f6f9fa0
|
refs/heads/master
| 2020-03-14T08:40:08.092777
| 2018-04-29T14:41:36
| 2018-04-29T14:41:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,788
|
py
|
# -----------------------------------------------------
# Spatial Invariant Person Search Network
#
# Author: Liangqi Li and Xinlei Chen
# Creating Date: Apr 1, 2018
# Latest rectified: Apr 10, 2018
# -----------------------------------------------------
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import yaml
from resnet import resnet
from densenet import densenet
from strpn import STRPN
from losses import oim_loss, smooth_l1_loss
class SIPN(nn.Module):
def __init__(self, net_name, pre_model=None, is_train=True):
super().__init__()
self.net_name = net_name
self.is_train = is_train
# TODO: set depending on dataset
self.num_pid = 483
self.queue_size = 500
self.lut_momentum = 0.5
self.reid_feat_dim = 256
self.register_buffer('lut', torch.zeros(
self.num_pid, self.reid_feat_dim).cuda())
self.register_buffer('queue', torch.zeros(
self.queue_size, self.reid_feat_dim).cuda())
if self.net_name == 'res50':
self.net = resnet(50, pre_model, self.is_train)
elif self.net_name == 'dense121':
self.net = densenet(121, pre_model, self.is_train)
elif self.net_name == 'dense161':
self.net = densenet(161, pre_model, self.is_train)
else:
raise KeyError(self.net_name)
self.fc7_channels = self.net.fc7_channels
# SPIN consists of three main parts
self.head = self.net.head
self.strpn = STRPN(self.net.net_conv_channels, self.num_pid,
self.is_train)
self.tail = self.net.tail
self.cls_score_net = nn.Linear(self.fc7_channels, 2)
self.bbox_pred_net = nn.Linear(self.fc7_channels, 8)
self.reid_feat_net = nn.Linear(self.fc7_channels, self.reid_feat_dim)
self.init_linear_weight(False)
def forward(self, im_data, gt_boxes, im_info, mode='gallery'):
if self.is_train:
net_conv = self.head(im_data)
# returned parameters contain 3 tuples here
pooled_feat, rpn_loss, label, bbox_info = self.strpn(
net_conv, gt_boxes, im_info)
fc7 = self.tail(pooled_feat).mean(3).mean(2)
cls_score = self.cls_score_net(fc7)
bbox_pred = self.bbox_pred_net(fc7)
reid_feat = F.normalize(self.reid_feat_net(fc7))
cls_pred = torch.max(cls_score, 1)[1]
cls_prob = F.softmax(cls_score)
det_label, pid_label = label
det_label = det_label.view(-1)
cls_loss = F.cross_entropy(cls_score.view(-1, 2), det_label)
bbox_loss = smooth_l1_loss(bbox_pred, bbox_info)
reid_loss = oim_loss(reid_feat, pid_label, self.num_pid,
self.queue_size, self.lut,
self.queue, self.lut_momentum)
rpn_cls_loss, rpn_box_loss = rpn_loss
return rpn_cls_loss, rpn_box_loss, cls_loss, bbox_loss, reid_loss
else:
if mode == 'gallery':
net_conv = self.head(im_data)
rois, pooled_feat = self.strpn(net_conv, gt_boxes, im_info)
fc7 = self.tail(pooled_feat).mean(3).mean(2)
cls_score = self.cls_score_net(fc7)
bbox_pred = self.bbox_pred_net(fc7)
reid_feat = F.normalize(self.reid_feat_net(fc7))
cls_pred = torch.max(cls_score, 1)[1]
cls_prob = F.softmax(cls_score)
with open('config.yml', 'r') as f:
config = yaml.load(f)
mean = config['train_bbox_normalize_means']
std = config['train_bbox_normalize_stds']
means = bbox_pred.data.new(mean).repeat(2).unsqueeze(
0).expand_as(bbox_pred)
stds = bbox_pred.data.new(std).repeat(2).unsqueeze(
0).expand_as(bbox_pred)
bbox_pred = bbox_pred.mul(Variable(stds)).add(Variable(means))
cls_prob = cls_prob.data.cpu().numpy()
bbox_pred = bbox_pred.data.cpu().numpy()
rois = rois.data.cpu().numpy()
reid_feat = reid_feat.data.cpu().numpy()
return cls_prob, bbox_pred, rois, reid_feat
elif mode == 'query':
net_conv = self.head(im_data)
# TODO: move pooling layer from strpn to SIPN
pooled_feat = self.strpn(net_conv, gt_boxes, im_info, mode)
fc7 = self.tail(pooled_feat).mean(3).mean(2)
reid_feat = F.normalize(self.reid_feat_net(fc7))
return reid_feat.data.cpu().numpy()
else:
raise KeyError(mode)
def train(self, mode=True):
nn.Module.train(self, mode)
self.net.train(mode)
def init_linear_weight(self, trun):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(
mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.cls_score_net, 0, 0.01, trun)
normal_init(self.bbox_pred_net, 0, 0.001, trun)
# TODO: change 0.01 for reid_feat_net
normal_init(self.reid_feat_net, 0, 0.01, trun)
def load_trained_model(self, state_dict):
nn.Module.load_state_dict(
self, {k: state_dict[k] for k in list(self.state_dict())})
|
[
"noreply@github.com"
] |
dsp6414.noreply@github.com
|
d7794907c843dd270061bb2fbcc662e9a50aded5
|
b91588cda1a129f06aa9493ee6d6a70e4f996b7f
|
/Production/python/Spring15v2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_cff.py
|
e7a99cd64aead259403e31460f446688c6ae74b7
|
[] |
no_license
|
muzamilahmad/LeptoQuarkTreeMaker
|
2371e93589dbe41b02a93a2533cbf5e955aaa0db
|
8e7eed0d03c6770a029eafb9b638e82c600a7425
|
refs/heads/master
| 2021-01-19T01:02:35.749590
| 2018-02-06T17:27:52
| 2018-02-06T17:27:52
| 65,389,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,887
|
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/006A698D-EF6D-E511-8500-001E67E69879.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/02671D8F-EF6D-E511-9846-90B11C06EA7B.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/048C9B94-EF6D-E511-9B59-00266CF2454C.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/04ABE396-EF6D-E511-B30C-90B11C08CA45.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/069E22BC-F06D-E511-B63A-002590A83160.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/0A0E82ED-F06D-E511-9158-001E67398CA0.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/0ABC1755-F06D-E511-981A-001E67E6A166.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/10398F8D-EF6D-E511-A67B-00266CF25708.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/1404A066-F06D-E511-BF5F-001E673971C5.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/14A85892-EF6D-E511-B848-002590A371AA.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/16B32595-EF6D-E511-926C-001E67E34034.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/1A517561-F06D-E511-BD85-001E67397C33.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/1E730895-EF6D-E511-91D7-001517FB228C.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/203527A3-EF6D-E511-996F-D4856459BE56.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/24059093-EF6D-E511-B921-001E673975F8.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/2437B158-F06D-E511-B84E-001E67E71368.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/264C0193-EF6D-E511-A3FE-002590207C28.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/3AE7E9C3-EF6D-E511-A5A5-02163E00F364.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/3AF2F593-EF6D-E511-BD5B-001E67398BE7.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/3C30F95E-F06D-E511-AA0A-002590200B70.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/3E7F3096-EF6D-E511-A50C-002590200A1C.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/421A1157-F06D-E511-A0D5-001E67E6F7BA.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/46CF898A-EF6D-E511-93C1-0025905C2D98.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/46F97A8D-EF6D-E511-9938-002590A3C984.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/480D708C-EF6D-E511-B9BD-001E67E6F891.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/485429A0-EF6D-E511-8853-002590A88800.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/485B3597-EF6D-E511-BFB3-002590A81DAC.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/4CCBCB59-F06D-E511-9C2D-002590200AD8.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/4E711A91-EF6D-E511-B430-B083FED07198.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/503AA99B-EF6D-E511-8099-001E67E71E20.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/525E71C1-EF6D-E511-8941-00259073E520.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/52B99528-F06D-E511-9E92-02163E016637.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/54D0C599-EF6D-E511-8029-02163E00C79F.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/5686C856-F06D-E511-AE50-001E67E6F805.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/58AB3D60-F06D-E511-93E3-001E67E33C6A.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/5E92C365-F06D-E511-95B7-0025902009B8.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/62286FAD-EF6D-E511-9764-001E67A3FE66.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/62D93798-EF6D-E511-9F13-001E4F1C5820.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/64CAA096-EF6D-E511-95AF-002590A3A3D2.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/663ECF97-EF6D-E511-86B9-002590200AE4.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/68502191-EF6D-E511-AFCC-001E67E68677.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/6A5237A9-EF6D-E511-A67A-7845C4FC3AE5.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/6AFB448E-EF6D-E511-AC39-001E67396D10.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/6C8780C4-EF6D-E511-B03B-02163E015F46.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/6CFF9C8A-EF6D-E511-B7D2-00266CF9C0F0.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/70A3D48C-EF6D-E511-9F5E-001E67A3EF70.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/72C0EFF1-F06D-E511-AAB5-001E67E34034.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/74616698-EF6D-E511-B704-001E675A6725.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/7A04BD89-EF6D-E511-9FBE-001E67D5D8EF.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/80BB2F8C-EF6D-E511-862E-001E67A40514.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/8238588A-EF6D-E511-A6C3-00266CF9BE0C.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/86BBE7DC-EF6D-E511-B371-02163E00B778.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/8C55718B-EF6D-E511-93A3-0025905C95F8.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/9008EA8D-EF6D-E511-8C37-9C8E991A143E.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/9896F491-F06D-E511-8FA9-002590200878.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/9899136B-F06D-E511-B796-001E67E6F92C.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/9A6C916D-F06D-E511-ABB5-001E67396D51.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/A048E9CA-EF6D-E511-B0CD-02163E00C4C9.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/A0F4C672-F06D-E511-8C47-002590A831B4.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/A61810A6-EF6D-E511-A3DB-001E67397D05.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/AA9E1B50-F16D-E511-8D69-001E67E6F49F.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/AC847A64-F06D-E511-B1AF-001E67E71D03.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/ACB4B35F-F06D-E511-98A2-001E67E6F8EB.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/ACD32EA3-EF6D-E511-9A60-001E67E713C2.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/AE2BD593-EF6D-E511-933B-002590200A7C.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/AE76FF91-EF6D-E511-BC84-20CF3027A62B.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/B0AC868C-EF6D-E511-9B91-0025907FD4A0.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/BCD85297-EF6D-E511-BE00-90B11C08CA45.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/C0094AAC-EF6D-E511-B5E3-001E67397AE4.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/C0B63761-F06D-E511-9147-001E67E33ECC.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/C415A0E5-F06D-E511-AC40-001E67398BE7.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/C6118BA3-EF6D-E511-B718-02163E00C035.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/C62AD25B-F06D-E511-B8E2-001E673972F6.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/CC05FF8A-EF6D-E511-8FC0-0025905C43EA.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/CCC2A28E-EF6D-E511-93C9-20CF3027A613.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/CEB6568A-EF6D-E511-80C3-00266CF9BE0C.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/D01D11DE-F06D-E511-87FF-001E67E69879.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/D4F8F61E-F06D-E511-83EB-02163E00EA7A.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/DA889F90-EF6D-E511-BDF5-001E67E6F82D.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/E0DF2354-F06D-E511-9B72-001E67E6F490.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/E6A0B691-EF6D-E511-9398-001E67396892.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/E839C790-EF6D-E511-B2EB-001E67E6F404.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/E885458F-EF6D-E511-835D-20CF3027A5CA.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/EE3777AA-EF6D-E511-9BC3-002590A3C96E.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/F23BA45B-F06D-E511-9045-002590200840.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/F8699A5E-F06D-E511-B442-001E67E6F8B4.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/FC49C45F-F06D-E511-AD4C-002590A4FFA2.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/FC5D736F-F06D-E511-926B-001E67E6F89B.root',
'/store/mc/RunIISpring15MiniAODv2/QCD_HT1500to2000_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/74X_mcRun2_asymptotic_v2-v1/10000/FC8812F0-F06D-E511-A30C-001E67E6F82D.root' ] );
secFiles.extend( [
] )
|
[
"kpedro88@gmail.com"
] |
kpedro88@gmail.com
|
3272af571f27ef764ca94877f5ce2aa433f2fd3f
|
42bef710f7050918cd9bc8049248a988317dce3b
|
/main/migrations/0003_additionalimage_st.py
|
b47f488d6aa8cc119286915d724a4cc9cc3be2e9
|
[] |
no_license
|
wyacheslav-20/Strel_CNC
|
a5f7c3dee848a0e565519ce49ab87287a6a598c2
|
43e1bddd1af5d7bf1b9ece65ec9e7dbb9437c59b
|
refs/heads/master
| 2020-12-19T01:05:03.651417
| 2020-02-17T20:48:20
| 2020-02-17T20:48:20
| 235,573,206
| 0
| 0
| null | 2020-01-30T12:27:52
| 2020-01-22T12:54:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,527
|
py
|
# Generated by Django 3.0.2 on 2020-02-02 14:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import main.utilities
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20200131_1120'),
]
operations = [
migrations.CreateModel(
name='St',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=40, verbose_name='Tема')),
('content', models.TextField(verbose_name='Oпиcaниe')),
('price', models.FloatField(default=0, verbose_name='Цeнa')),
('contacts', models.TextField(verbose_name='Koнтaкты')),
('image', models.ImageField(blank=True, upload_to=main.utilities.get_timestamp_path, verbose_name='Изображение')),
('is_active', models.BooleanField(db_index=True, default=True, verbose_name='Выводить в списке?')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Опубликовано')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Aвтop объявления')),
('rubric', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='main.SubRubric', verbose_name='Pyбpикa')),
],
options={
'verbose_name': 'Объявления',
'verbose_name_plural': 'Объявления',
'ordering': ['-created_at'],
},
),
migrations.CreateModel(
name='Additionalimage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to=main.utilities.get_timestamp_path, verbose_name='Изображение')),
('st', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.St', verbose_name='Объявление')),
],
options={
'verbose_name': 'Дополнительная иллюстрация',
'verbose_name_plural': 'Дополнительные иллюстрации',
},
),
]
|
[
"noreply@github.com"
] |
wyacheslav-20.noreply@github.com
|
014b4f11a277086ca1dbd5b4f930d298b111bcaf
|
1d02157bbe8ec6856f51f2a6c3cb7b916ad0fa99
|
/my_exceptions.py
|
6cc67cbb4e6d54c8217ab09df84eda54c3374fd7
|
[] |
no_license
|
dishults/dslr
|
23c212c7427cc413627df6174004f0cd4a8ebfc2
|
656f66a1e19b9ba59c8c756a52df6bd6b563bf5e
|
refs/heads/master
| 2023-02-22T16:18:34.504537
| 2021-01-29T15:07:33
| 2021-01-29T15:07:33
| 249,403,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
import sys
class Usage(Exception):
def __init__(self, stage="train", extra=""):
if "./" in sys.argv[0]:
sys.exit(f"Example usage: {sys.argv[0]} datasets/dataset_{stage}.csv{extra}")
else:
sys.exit(f"Example usage: ./{sys.argv[0]} datasets/dataset_{stage}.csv{extra}")
class Header(Exception):
def __init__(self):
sys.exit(f"Header is incorrect for your dataset file '{sys.argv[1]}'")
class File(Exception):
def __init__(self):
sys.exit(f"Dataset file '{sys.argv[1]}' doesn't exist, is empty or incorrect")
class Dataset(Exception):
def __init__(self):
sys.exit("Check that your downloaded dataset is correct and hasn't been altered")
class Houses(Exception):
def __init__(self):
print("No data for Hogwarts Houses")
raise Dataset
class Weights(Exception):
def __init__(self):
sys.exit(f"Something went wrong with your '{sys.argv[2]}' file. Double check it's correct.")
|
[
"me@manhattan.lan"
] |
me@manhattan.lan
|
7e53153542cdb14806115ac36c29ba4abf21775a
|
b9c323a994a293601d5d7332dcea97dbb3e7ef42
|
/practicer_flask/topics/postgres.py
|
c70dbb277d94d880ecae04de6942034bcd2d0e80
|
[
"MIT"
] |
permissive
|
DominikPott/practicer-flask
|
60195e38daebfa7a07fd0a3203b93a5ee0bfa6db
|
c8e523095bdd5912dadb7357d16a4e76229a04da
|
refs/heads/master
| 2023-05-06T23:37:02.797637
| 2021-05-30T18:59:39
| 2021-05-30T18:59:39
| 363,600,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,086
|
py
|
import os
import psycopg2
def get_db():
url = os.environ.get('DATABASE_URL', None)
if url:
con = psycopg2.connect(url, sslmode='require')
else:
con = psycopg2.connect(host="localhost", database="statistics", user="postgres", password="test")
return con
def _create_table():
db = None
try:
db = get_db()
cursor = db.cursor()
create_table_query = '''CREATE TABLE IF NOT EXISTS topics (
DATE TEXT PRIMARY KEY NOT NULL,
TOPIC TEXT NOT NULL
);
'''
cursor.execute(create_table_query)
db.commit()
except (Exception, psycopg2.Error) as error:
print("Error while creating table.", error)
finally:
if db:
cursor.close()
db.close()
def _drop_table(name):
db = None
try:
db = get_db()
cursor = db.cursor()
create_table_query = f'DROP TABLE {name}'
cursor.execute(create_table_query)
db.commit()
except (Exception, psycopg2.Error) as error:
print("Error while dropping table", error)
finally:
if db:
cursor.close()
db.close()
def topics():
query = 'SELECT * from topics'
db = get_db()
cursor = db.cursor()
cursor.execute(query)
topics_raw = cursor.fetchall()
cursor.close()
db.close()
return list(map(map_to_dict, topics_raw))
def map_to_dict(topic):
return {'date': topic[0], 'topic': topic[1]}
def add_topic(topic):
date = topic['date']
topic = topic['topic']
query = f"INSERT INTO topics (DATE, TOPIC) VALUES ('{date}', '{topic}')"
db = get_db()
cursor = db.cursor()
cursor.execute(query)
db.commit()
cursor.close()
db.close()
if __name__ == '__main__':
_drop_table(name='topics')
_create_table()
ts = [{'date': '2021.05.15', 'topic': 'Ananas'},
{'date': '2021.05.16', 'topic': 'Bagger'},
{'date': '2021.05.17', 'topic': 'Tomate'},
{'date': '2021.05.18', 'topic': 'Zebra'},
{'date': '2021.05.19', 'topic': 'Schwert'},
{'date': '2021.05.20', 'topic': 'Maulwurf'},
{'date': '2021.05.21', 'topic': 'Lampe'},
{'date': '2021.05.22', 'topic': 'Geburtstagskuchen'},
{'date': '2021.05.23', 'topic': 'Stift'},
{'date': '2021.05.24', 'topic': 'Handy'},
{'date': '2021.05.25', 'topic': 'Socke'},
{'date': '2021.05.26', 'topic': 'Zecke'},
{'date': '2021.05.27', 'topic': 'Bier'},
{'date': '2021.05.28', 'topic': 'Feilchen'},
{'date': '2021.05.29', 'topic': 'Breaking Bad'},
{'date': '2021.05.30', 'topic': 'Walnuss'},
{'date': '2021.05.31', 'topic': 'Waschmaschine'},
{'date': '2021.06.01', 'topic': 'Pfeffer'},
{'date': '2021.06.02', 'topic': 'Bett'},
{'date': '2021.06.03', 'topic': 'Neonlicht'},
{'date': '2021.06.04', 'topic': 'Brief'},
]
for t in ts:
add_topic(t)
t = topics()
print(t)
|
[
"d_tronic_p@gmx.de"
] |
d_tronic_p@gmx.de
|
c1353e4d5fd5aeedd6b659f753a2dfdaed518a60
|
e89509b453632747077bc57dbec265a7703d5c7c
|
/function/first class/firstyhu.py
|
a0d27f9e5ca1c9efafbad2efa91d9502353a9825
|
[] |
no_license
|
Madhav2108/udemy-python-as
|
a9dcfdbfdc1bb85471aa66de77957e962a7c5486
|
0bc6a501516618fb3c7ab10be6bc16c047aeec3f
|
refs/heads/master
| 2023-03-30T11:25:16.064592
| 2021-03-30T18:10:46
| 2021-03-30T18:10:46
| 286,001,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
def shout1(text):
return text.upper()
print (shout1('Hello') )
yell = shout1
print (yell('Hello'))
|
[
"noreply@github.com"
] |
Madhav2108.noreply@github.com
|
64931a97973e990a801007d7f2681ac77b22a909
|
32ad1d2e9e9505a9415cc4e165bff0bd4cd992e6
|
/Sorting/quick_sort_with_random_pivot.py
|
e760aed0c8552d5acb0b2e86d81d107f8c4064ed
|
[] |
no_license
|
shan18/Algo-Wiki
|
c26c3a56dc55256b25e71121ca7ab03f439827ad
|
7d5d1f8b2894726ff95a1a60dd676f236ab9ee93
|
refs/heads/master
| 2018-10-28T04:46:32.090787
| 2018-09-24T18:22:24
| 2018-09-24T18:22:24
| 105,189,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
"""Quick Sort
Time complexity is O(n log(n)).
It uses a Divide and Conquer Approach.
"""
from random import randint
# Setting the pivot element to its correct location
def partition(a, start, end):
pivot_index = randint(start, end)
a[start], a[pivot_index] = a[pivot_index], a[start]
i = start + 1
for j in range(start + 1, end + 1):
if a[j] <= a[start]:
a[j], a[i] = a[i], a[j]
i += 1
a[start], a[i - 1] = a[i - 1], a[start]
return i - 1
# Dividing the problem into smaller subproblems
def quick_sort(a, start, end):
if start < end:
q = partition(a, start, end)
quick_sort(a, start, q - 1)
quick_sort(a, q + 1, end)
if __name__ == '__main__':
filename = input("Enter the file name containing the array: ")
a = []
with open(filename) as f:
for n in f.read().split():
a.append(int(n))
quick_sort(a, 0, len(a) - 1)
print("The sorted array: ", a)
|
[
"thegeek.004@gmail.com"
] |
thegeek.004@gmail.com
|
c96827707326cba9a7f78a456f0afade56a03985
|
cd871fe2429f8c5b938f07937307c2597d9d8649
|
/tabulator/loaders/native.py
|
20531ceb64470c00c7a6ba4bb167e06189667041
|
[
"MIT"
] |
permissive
|
AleksiKnuutila/tabulator-py
|
9997ed9557bf932bc5d7aa99d9e49048c0797490
|
27ab97fbb2f6ee5f11adb3472da4454363041842
|
refs/heads/master
| 2021-01-13T08:51:32.974845
| 2016-09-23T18:57:44
| 2016-09-23T18:57:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from .. import exceptions
from . import api
# Module API
class NativeLoader(api.Loader):
"""Null loader to pass python native lists.
"""
# Public
def __init__(self, **options):
self.__options = options
def load(self, source, encoding, mode):
message = 'NativeLoader doesn\'t support load method'
raise exceptions.LoadingError(message)
|
[
"noreply@github.com"
] |
AleksiKnuutila.noreply@github.com
|
1755a22937114ff1bed1509bc3e143df4eb9d669
|
7923e7378d41e970bbd9394e20f33485a1d58886
|
/scripts/data_cleaning.py
|
3d6eaa6e88d66d28837f5ca037b3a0ae5d4ec836
|
[] |
no_license
|
pbeckdorf/capstonesproject
|
6d0fd59a61c23b1133727e3920a64e2cc1c0d081
|
59799f97cae6cc1281b80ed47208108be01fa53e
|
refs/heads/master
| 2022-12-10T09:49:51.273422
| 2020-09-03T16:50:59
| 2020-09-03T16:50:59
| 292,609,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
total_cases = [100,200,300]
deaths = [10, 5, 20, 200]
|
[
"pbeckdorf@economia.cl"
] |
pbeckdorf@economia.cl
|
d4f85c103ccd79f4dbda2d277093963e5f6af2aa
|
6fe5fae54af9cee2e9de356dd149b7cfe1ca112a
|
/examples/zpt/_handler.py
|
6dff1e09ea9f4ce89dca4366776335a808f6c395
|
[] |
no_license
|
GrahamDumpleton-abandoned/vampire
|
181b03a53b62f3f53bdf83fb3d4305ef146d3526
|
bdcb2d25105ca95fcbda1afa16c4d9d681c664f6
|
refs/heads/master
| 2021-01-05T11:26:25.908716
| 2010-07-11T21:24:07
| 2010-07-11T21:24:07
| 241,008,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
from mod_python import apache
import os
import vampire
from ZopePageTemplates import PageTemplate
# Default handler for HTML.
def handler_html(req,**kwargs):
# Check for existance of ZPT source file.
path = os.path.splitext(req.filename)[0] + ".zpt"
if os.path.exists(path):
layout_file = os.path.join(os.path.dirname(__file__),"_layout.zpt")
layout = PageTemplate()
layout.write(open(layout_file,"r").read())
config = vampire.loadConfig(req,".vampire")
settings = {}
for key,value in config.items("Settings"):
settings[key] = value
settings["request"] = req
settings["form"] = kwargs
page = PageTemplate()
page.write(open(path,"r").read())
settings["here"] = { "layout": layout }
content = page.pt_render(extra_context=settings)
req.content_type = page.content_type
req.send_http_header()
req.write(content)
return apache.OK
return apache.DECLINED
# Default handler for ZPT.
def handler_zpt(req):
if os.path.exists(req.filename):
return apache.HTTP_NOT_FOUND
return apache.DECLINED
|
[
"devnull@localhost"
] |
devnull@localhost
|
446f123c795ecd70e833d9410489701f5c5a8e30
|
76b9f33c396d7233bc40794919f5872e93dbbf9c
|
/django/debuggingtest-master/boards/views.py
|
d927d0552129a426c701c1beb5a28030ad314bea
|
[] |
no_license
|
Lustellz/TIL-c9
|
14b6796e37cd2ea33e7e108e1f2d686eacb47ae9
|
86bb493d2e9b42a06545501cacfa1ceed6f9bc25
|
refs/heads/master
| 2021-06-16T18:06:28.325082
| 2019-05-09T06:26:47
| 2019-05-09T06:26:47
| 166,776,996
| 0
| 0
| null | 2021-06-10T21:22:41
| 2019-01-21T08:33:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from .models import Board, Comment
from .forms import BoardForm, CommentForm
# Create your views here.
def list(request):
boards = Board.objects.order_by('-pk')
ctx = {
'boards': boards,
}
return render(request, 'boards/list.html', ctx)
def detail(request, board_pk):
board = get_object_or_404(Board, pk=board_pk)
ctx = {
'board': board,
'form': CommentForm(),
}
return render(request, 'boards/detail.html', ctx)
@login_required
def create(request):
if not request.user.is_authenticated:
return redirect('boards:list')
if request.method == 'POST':
board_form = BoardForm(request.POST)
if board_form.is_valid():
board = board_form.save(commit=False)
board.user = request.user
board.save()
return redirect('boards:detail', board.id)
else:
board_form = BoardForm()
ctx = {
'board_form': board_form,
}
return render(request, 'boards/form.html', ctx)
@login_required
def edit(request, board_pk):
board = get_object_or_404(Board, pk=board_pk)
if request.user != board.user:
return redirect('board:detail', board_pk)
if request.method == 'POST':
board_update_form = BoardForm(request.POST, instance=board)
if board_update_form.is_valid():
board_update_form.save()
return redirect('boards:detail', board_pk)
else:
board_update_form = BoardForm(instance=board)
ctx = {
'form': board_update_form,
}
return render(request, 'boards/form.html', ctx)
@require_POST
def delete(request, board_pk):
board = get_object_or_404(Board, pk=board_pk)
board.delete()
return redirect('boards:list')
@login_required
@require_POST
def comment_create(request, board_pk):
board = get_object_or_404(Board, pk=board_pk)
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
comment_form.save()
return redirect('boards:detail', board_pk)
@login_required
@require_POST
def comment_delete(request, board_pk, comment_pk):
comment = get_object_or_404(Comment, pk=comment_pk)
if request.user == comment.user:
comment.delete()
return redirect('boards:detail', board_pk)
|
[
"lustellz@gmail.com"
] |
lustellz@gmail.com
|
037c7a5f807faf5d8eeaa10c152f743b10d111ef
|
7c105a76577ffa87403c441962dbb12e26182398
|
/src/datasets/ds_cifar10.py
|
b9f47a205f627b5f579b4bfb157aad1a3f31b7c7
|
[] |
no_license
|
lyubonko/classification
|
f8850f5b00c3317ba4625fb2094f9ff228530ea5
|
774ee3f7398c74edc97f5983f524298f7a07ec2b
|
refs/heads/master
| 2020-07-03T18:44:39.808812
| 2019-08-12T21:22:46
| 2019-08-12T21:22:46
| 202,008,765
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
import torch
import torch.utils.data as data
import torchvision.datasets as dsets
import numpy as np
from datasets.transforms_cifar10 import *
class DataSetCifar10(object):
"""
Class manage CIFAR10 data-set
"""
def __init__(self,
path_data,
num_dunkeys=4,
batch_size_train=100,
batch_size_val=100,
download=False,
tiny=False,
transform_keys=None):
if transform_keys is None:
transform_keys = {'train': "init",
'val': "init"}
self.batch_sizes = {'train': batch_size_train, 'val': batch_size_val}
self.transforms = {'train': transforms_c10[transform_keys['train']],
'val': transforms_c10[transform_keys['val']]}
self.dataset = {}
self.loader = {}
for t in ['train', 'val']:
self.dataset[t] = dsets.CIFAR10(root=path_data,
train=(t == 'train'),
download=download,
transform=self.transforms[t])
self.loader[t] = torch.utils.data.DataLoader(dataset=self.dataset[t],
batch_size=self.batch_sizes[t],
shuffle=(t == 'train'),
num_workers=num_dunkeys)
if tiny:
tiny_trainset = torch.utils.data.dataset.Subset(self.dataset['train'], np.arange(self.batch_sizes['train']))
tiny_loader = torch.utils.data.DataLoader(tiny_trainset, batch_size=self.batch_sizes['train'])
for t in ['train', 'val']:
self.dataset[t] = tiny_trainset
self.loader[t] = tiny_loader
self.classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
|
[
"lyubonko@gmail.com"
] |
lyubonko@gmail.com
|
c16258c80b008084c4e7b2a8e5c9968444bc523f
|
cb39c9d1ca3b63a0412eac6077b96b66b289fd4c
|
/Quiz-24.py
|
0c2e8db65d8277152295f3bb60c7c274495b5891
|
[] |
no_license
|
Reikenzan/Some-Python
|
bd64f562fc17b27bbb2a1fecd75f5d6ceff17c1c
|
84bea86486952f77b19dadb1d8dc99b133b95656
|
refs/heads/master
| 2021-05-08T06:31:16.690290
| 2018-10-16T15:33:52
| 2018-10-16T15:33:52
| 106,629,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
#quiz resolved
def getFirstLetters(myList):
newList = []
for string in myList:
firstLetter = string[0]
newList.append(firstLetter)
return newList
def main():
strList = input("Enter a list of strings:")
userList = strList.split(",")
returnedList = getFirstLetters(userList)
print("Your new list is",returnedList)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Reikenzan.noreply@github.com
|
e7874140cb21afaa66b0a2a651badfc70a1c7750
|
6b78bd7f62f7f407bf11d877cc4d91e7db3b62fe
|
/csc/python/wk-3-project-1/src/iterative_sorting/test_iterative.py
|
468d92d7181c18eb42b1f55d9133cb5ff1305388
|
[] |
no_license
|
PascalUlor/code-challenges
|
b85efacd4bc5999a0748d1fa1e84f503be09dc94
|
6488d0a6d2729bd50b106573f16488479fd6e264
|
refs/heads/master
| 2023-03-03T17:50:18.413127
| 2023-02-21T13:10:02
| 2023-02-21T13:10:02
| 212,979,719
| 1
| 0
| null | 2023-02-15T22:59:13
| 2019-10-05T10:14:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
import unittest
import random
from iterative_sorting import *
class IterativeSortingTest(unittest.TestCase):
def test_selection_sort(self):
arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]
arr2 = []
arr3 = [0, 1, 2, 3, 4, 5]
arr4 = random.sample(range(200), 50)
self.assertEqual(selection_sort(arr1), [0,1,2,3,4,5,6,7,8,9])
self.assertEqual(selection_sort(arr2), [])
self.assertEqual(selection_sort(arr3), [0,1,2,3,4,5])
self.assertEqual(selection_sort(arr4), sorted(arr4))
def test_bubble_sort(self):
arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]
arr2 = []
arr3 = [0, 1, 2, 3, 4, 5]
arr4 = random.sample(range(200), 50)
self.assertEqual(bubble_sort(arr1), [0,1,2,3,4,5,6,7,8,9])
self.assertEqual(bubble_sort(arr2), [])
self.assertEqual(bubble_sort(arr3), [0,1,2,3,4,5])
self.assertEqual(bubble_sort(arr4), sorted(arr4))
# Uncomment this test to test your count_sort implementation
def test_counting_sort(self):
arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]
arr2 = []
arr3 = [1, 5, -2, 4, 3]
arr4 = random.sample(range(200), 50)
self.assertEqual(count_sort(arr1), [0,1,2,3,4,5,6,7,8,9])
self.assertEqual(count_sort(arr2), [])
self.assertEqual(count_sort(arr3), "Error, negative numbers not allowed in Count Sort")
self.assertEqual(count_sort(arr4), sorted(arr4))
if __name__ == '__main__':
unittest.main()
|
[
"pascalulor@yahoo.com"
] |
pascalulor@yahoo.com
|
0a49052505850de641ea0bb1dcc70e1cff5a7ca0
|
4997eb61787baea04cb00527dc79b58e481a452f
|
/09_day/flask-and-MongoDB/app/routes.py
|
6ddde1efa444e79e031bae8c46ef1821854824b2
|
[] |
no_license
|
aaronli39/fintech
|
41b4987116b0ab57f33e8dff3d4eb7e9e22d086a
|
ddca1417319cb45c4992cc8846ba511cd3717c74
|
refs/heads/master
| 2020-06-20T23:14:05.510263
| 2019-10-25T12:31:07
| 2019-10-25T12:31:07
| 197,283,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
import os
from app import app
from flask import render_template, request, redirect
events = [
{"event":"First Day of Classes", "date":"2019-08-21"},
{"event":"Winter Break", "date":"2019-12-20"},
{"event":"Finals Begin", "date":"2019-12-01"},
{"event":"Fintech Graduation", "date":"2019-08-03"},
{"event":"Date", "date":"2019-07-26"},
{"event":"Fintech Trip", "date":"2019-07-25"}
]
password = "duMJ42KGtQfTFEKh"
# from flask_pymongo import PyMongo
from flask_pymongo import PyMongo
# name of database
app.config["test"] = 'test'
# URI of database
app.config['MONGO_URI'] = "mongodb+srv://aaronli39:duMJ42KGtQfTFEKh@fintech-v2rh1.mongodb.net/test?retryWrites=true&w=majority"
mongo = PyMongo(app)
# INDEX
@app.route('/')
@app.route('/index')
def index():
eventsDB = mongo.db.events
# make a query
# empty curly braces will return all dictionaries
events = eventsDB.find({"date": "2019-10-31"})
return render_template('index.html', events = events)
# CONNECT TO DB, ADD DATA
@app.route('/add')
def add():
# connect to the database
users = mongo.db.users
# insert new data
users.insert({
"name": "sophia"
})
print("user created")
# return a message to the user
return render_template("index.html", events=events)
@app.route("/addEvent")
def addEvent():
events = mongo.db.events
events.insert({"event": "Halloween", "date": "2019-10-31"})
print("Event added")
return redirect("/")
|
[
"aaronli39@gmail.com"
] |
aaronli39@gmail.com
|
676ada33993ed1a1add8d641635810ca9929e7c5
|
3e2e745575d0961eb561d84921777b3572053eb0
|
/level_07/wopr/cleanup-copy.py
|
b8d24a3b69ed0e63798a8c7db7ed27b63cd25a48
|
[] |
no_license
|
thelumberjhack/flareon6
|
7375040ad172dc19dc1558915d4f1ce9075c80b7
|
95d6e48ea8a8b33032a7567c1974074cde31325b
|
refs/heads/master
| 2022-02-17T18:40:23.184927
| 2019-09-09T05:44:39
| 2019-09-09T05:44:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,559
|
py
|
"""
Once upon a midnight dreary, while I pondered, weak and weary,
Over many a quaint and curious volume of forgotten lore-
While I nodded, nearly napping, suddenly there came a tapping,
As of some one gently rapping, rapping at my chamber door-
"'Tis some visitor," I muttered, "tapping at my chamber door-
Only this and nothing more."
Ah, distinctly I remember it was in the bleak December;
And each separate dying ember wrought its ghost upon the floor.
Eagerly I wished the morrow;-vainly I had sought to borrow
From my books surcease of sorrow-sorrow for the lost Lenore-
For the rare and radiant maiden whom the angels name Lenore-
Nameless here for evermore.
And the silken, sad, uncertain rustling of each purple curtain
Thrilled me-filled me with fantastic terrors never felt before;
So that now, to still the beating of my heart, I stood repeating,
"'Tis some visitor entreating entrance at my chamber door-
Some late visitor entreating entrance at my chamber door;-
This it is and nothing more."
Presently my soul grew stronger; hesitating then no longer,
"Sir," said I, "or Madam, truly your forgiveness I implore;
But the fact is I was napping, and so gently you came rapping,
And so faintly you came tapping, tapping at my chamber door,
That I scarce was sure I heard you"-here I opened wide the door;-
Darkness there and nothing more.
Deep into that darkness peering, long I stood there wondering, fearing,
Doubting, dreaming dreams no mortal ever dared to dream before;
But the silence was unbroken, and the stillness gave no token,
And the only word there spoken was the whispered word, "Lenore?"
This I whispered, and an echo murmured back the word, "Lenore!"-
Merely this and nothing more.
Back into the chamber turning, all my soul within me burning,
Soon again I heard a tapping somewhat louder than before.
"Surely," said I, "surely that is something at my window lattice;
Let me see, then, what thereat is, and this mystery explore-
Let my heart be still a moment and this mystery explore;-
'Tis the wind and nothing more!"
Open here I flung the shutter, when, with many a flirt and flutter,
In there stepped a stately Raven of the saintly days of yore;
Not the least obeisance made he; not a minute stopped or stayed he;
But, with mien of lord or lady, perched above my chamber door-
Perched upon a bust of Pallas just above my chamber door-
Perched, and sat, and nothing more.
Then this ebony bird beguiling my sad fancy into smiling,
By the grave and stern decorum of the countenance it wore,
"Though thy crest be shorn and shaven, thou," I said, "art sure no craven,
Ghastly grim and ancient Raven wandering from the Nightly shore-
Tell me what thy lordly name is on the Night's Plutonian shore!"
Quoth the Raven "Nevermore."
Much I marvelled this ungainly fowl to hear discourse so plainly,
Though its answer little meaning-little relevancy bore;
For we cannot help agreeing that no living human being
Ever yet was blest with seeing bird above his chamber door-
Bird or beast upon the sculptured bust above his chamber door,
With such name as "Nevermore."
But the Raven, sitting lonely on the placid bust, spoke only
That one word, as if his soul in that one word he did outpour.
Nothing further then he uttered-not a feather then he fluttered-
Till I scarcely more than muttered "Other friends have flown before-
On the morrow he will leave me, as my hopes have flown before."
Then the bird said "Nevermore."
Startled at the stillness broken by reply so aptly spoken,
"Doubtless," said I, "what it utters is its only stock and store
Caught from some unhappy master whom unmerciful Disaster
Followed fast and followed faster till his songs one burden bore-
Till the dirges of his Hope that melancholy burden bore
Of 'Never-nevermore.'"
But the Raven still beguiling my sad fancy into smiling,
Straight I wheeled a cushioned seat in front of bird, and bust and door;
Then, upon the velvet sinking, I betook myself to linking
Fancy unto fancy, thinking what this ominous bird of yore-
What this grim, ungainly, ghastly, gaunt and ominous bird of yore
Meant in croaking "Nevermore."
This I sat engaged in guessing, but no syllable expressing
To the fowl whose fiery eyes now burned into my bosom's core;
This and more I sat divining, with my head at ease reclining
On the cushion's velvet lining that the lamp-light gloated o'er,
But whose velvet violet lining with the lamp-light gloating o'er,
She shall press, ah, nevermore!
Then, methought, the air grew denser, perfumed from an unseen censer
Swung by Seraphim whose foot-falls tinkled on the tufted floor.
"Wretch," I cried, "thy God hath lent thee-by these angels he hath sent thee
Respite-respite and nepenthe, from thy memories of Lenore;
Quaff, oh quaff this kind nepenthe and forget this lost Lenore!"
Quoth the Raven "Nevermore."
"Prophet!" said I, "thing of evil!-prophet still, if bird or devil!-
Whether Tempter sent, or whether tempest tossed thee here ashore,
Desolate yet all undaunted, on this desert land enchanted-
On this home by Horror haunted-tell me truly, I implore-
Is there-is there balm in Gilead?-tell me-tell me, I implore!"
Quoth the Raven "Nevermore."
"Prophet!" said I, "thing of evil-prophet still, if bird or devil!
By that Heaven that bends above us-by that God we both adore-
Tell this soul with sorrow laden if, within the distant Aidenn,
It shall clasp a sainted maiden whom the angels name Lenore-
Clasp a rare and radiant maiden whom the angels name Lenore."
Quoth the Raven "Nevermore."
"Be that word our sign in parting, bird or fiend!" I shrieked, upstarting-
"Get thee back into the tempest and the Night's Plutonian shore!
Leave no black plume as a token of that lie thy soul hath spoken!
Leave my loneliness unbroken!-quit the bust above my door!
Take thy beak from out my heart, and take thy form from off my door!"
Quoth the Raven "Nevermore."
And the Raven, never flitting, still is sitting, still is sitting
On the pallid bust of Pallas just above my chamber door;
And his eyes have all the seeming of a demon's that is dreaming,
And the lamp-light o'er him streaming throws his shadow on the floor;
And my soul from out that shadow that lies floating on the floor
Shall be lifted-nevermore! """import hashlib, io, lzma, pkgutil, random, struct, sys, time
from ctypes import *
print('LOADING...')
BOUNCE = pkgutil.get_data('this', 'key')
def ho(h, g={}):
k = bytes.fromhex(format(h, 'x')).decode()
return g.get(k, k)
a = 1702389091
b = 482955849332
g = ho(29516388843672123817340395359, globals()) # builtins module
aa = getattr(g, ho(a)) # exec
bb = getattr(g, ho(b)) # print
a ^= b # a = 481423330071
b ^= a # b = 1702389091
a ^= b # a = 482955849332 (= original b)
setattr(g, ho(a), aa) # g, print, exec function
setattr(g, ho(b), bb) # g, exec, print function
# Now print and exec functions are switched in the builtins module.
def eye(face):
leg = io.BytesIO()
for arm in face.splitlines():
arm = arm[len(arm.rstrip(b' \t')):]
leg.write(arm)
face = leg.getvalue()
bell = io.BytesIO()
x, y = (0, 0)
for chuck in face:
taxi = {9:0,
32:1}.get(chuck)
if taxi is None:
continue
x, y = x | taxi << y, y + 1
if y > 7:
bell.write(bytes([x]))
x, y = (0, 0)
return bell.getvalue()
def fire(wood, bounce):
meaning = bytearray(wood)
bounce = bytearray(bounce)
regard = len(bounce)
manage = list(range(256))
def prospect(*financial):
return sum(financial) % 256
def blade(feel, cassette):
cassette = prospect(cassette, manage[feel])
manage[feel], manage[cassette] = manage[cassette], manage[feel]
return cassette
cassette = 0
for feel in range(256):
cassette = prospect(cassette, bounce[(feel % regard)])
cassette = blade(feel, cassette)
cassette = 0
for pigeon, _ in enumerate(meaning):
feel = prospect(pigeon, 1)
cassette = blade(feel, cassette)
meaning[pigeon] ^= manage[prospect(manage[feel], manage[cassette])]
return bytes(meaning)
for i in range(256):
try:
print(lzma.decompress(fire(eye(__doc__.encode()), bytes([i]) + BOUNCE)))
except Exception:
pass
# okay decompiling cleanup.pyc
|
[
"yannick.formaggio@sophos.com"
] |
yannick.formaggio@sophos.com
|
f52f32bb65abd65061d45f38d9a91b13948b88aa
|
e580628ab341494342066974b53aab159815b9a5
|
/Project4/etl.py
|
b133bc757ca41d4afbcf525929a998646190ea25
|
[] |
no_license
|
vserraa/Data-Engineering-Nanodegree
|
c0363d11ebdf81d3e510711bbb8608ae415d7cee
|
8c4b74d473c1fb8a601e2f4f4b739590b0b5f7a4
|
refs/heads/master
| 2022-12-03T03:43:55.778053
| 2020-08-17T11:34:39
| 2020-08-17T11:34:39
| 280,669,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,833
|
py
|
import configparser
from datetime import datetime
import os
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql.types import StructType, StructField, IntegerType, StringType, TimestampType, DecimalType, DateType, LongType, DoubleType
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS']['AWS_SECRET_ACCESS_KEY']
song_schema = StructType([
StructField("num_songs", IntegerType(), True),
StructField("artist_id", StringType(), True),
StructField("artist_latitude", DecimalType(), True),
StructField("artist_longitude", DecimalType(), True),
StructField("artist_location", StringType(), True),
StructField("artist_name", StringType(), True),
StructField("song_id", StringType(), True),
StructField("title", StringType(), True),
StructField("duration", DecimalType(), True),
StructField("year", IntegerType(), True)
])
log_schema = StructType([
StructField("artist", StringType(), True),
StructField("auth", StringType(), True),
StructField("firstName", StringType(), True),
StructField("gender", StringType(), True),
StructField("itemInSession", IntegerType(), True),
StructField("lastName", StringType(), True),
StructField("length", DecimalType(), True),
StructField("level", StringType(), True),
StructField("location", StringType(), True),
StructField("method", StringType(), True),
StructField("page", StringType(), True),
StructField("registration", StringType(), True),
StructField("sessionId", StringType(), True),
StructField("song", StringType(), True),
StructField("status", IntegerType(), True),
StructField("ts", LongType(), True),
StructField("userAgent", StringType(), True),
StructField("userId", StringType(), True)
])
def create_spark_session():
'''
Description: Creates a spark session
return: The spark session that was created
'''
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def read_song_data(spark, input_data, my_schema, debug = False):
'''
Parameters:
.spark -> A spark session
.input_data -> Path to the input bucket in S3
.my_schema -> The schema used to read the data from S3
.debug -> A optional parameter to read only a subset of data for debugging porpouses
Description: Loas the data from S3 into a spark dataframe
return: The spark dataframe
'''
if debug:
path = os.path.join(input_data, "song_data/A/A/A/*.json")
else:
path = os.path.join(input_data, "song_data/*/*/*/*.json")
return spark.read.json(path, schema=my_schema)
def read_log_data(spark, input_data, my_schema, debug = False):
'''
Parameters:
.spark -> A spark session
.input_data -> Path to the input bucket in S3
.my_schema -> The schema used to read the data from S3
.debug -> A optional parameter to read only a subset of data for debugging porpouses
Description: Loas the data from S3 into a spark dataframe
return: The spark dataframe
'''
if debug:
path = os.path.join(input_data, 'log_data/2018/11/*.json')
else:
path = os.path.join(input_data, 'log_data/*/*/*.json')
return spark.read.json(path, schema=my_schema)
def process_song_data(spark, song_data, output_data):
'''
Parameters:
.spark -> A spark session
.song_data -> A spark dataframe with data from S3 to be processed
.output_data -> Path to a output bucket in S3 where the results will be stored
Description: Processes the song_data dataframe and loads results into S3 as parquet files
'''
# extract columns to create songs table
songs_columns = ["song_id", "title", "artist_id", "year", "duration"]
songs_df = song_data.select(*songs_columns)
#partitioning songs dataframe by year and then artist as requested
songs_df = songs_df.repartition("year", "artist_id")
songs_output_path = os.path.join(output_data, "songs/")
songs_df = songs_df.dropDuplicates()
songs_df.write.parquet(songs_output_path, mode='overwrite')
# extract columns to create artists table
artists_columns = ["artist_id", "artist_name", "artist_location", "artist_latitude", "artist_longitude"]
artists_df = song_data.select(*artists_columns)
# write artists table to parquet files
artists_output_path = os.path.join(output_data, "artists/")
artists_df = artists_df.dropDuplicates()
artists_df.write.parquet(artists_output_path, mode='overwrite')
def process_log_data(spark, song_df, log_df, output_data):
'''
Parameters:
.spark -> A spark session
.song_df -> A spark dataframe with data from S3 to be processed
.log_df -> A spark dataframe with data from S3 to be processed
.output_data -> Path to a output bucket in S3 where the results will be stored
Description: Processes the song_df and log_df dataframes and loads results into S3 as parquet files
'''
# filter by actions for song plays
log_df = log_df.filter(log_df.page == 'NextSong')
user_exprs = ["userId AS user_id", "firstName AS first_name", "lastName AS last_name",
"gender AS gender", "level AS level"]
# extract columns for users table
users_df = log_df.selectExpr(*user_exprs)
# write users table to parquet files
users_out_path = os.path.join(output_data, 'users/')
users_df = users_df.dropDuplicates()
users_df.write.parquet(users_out_path, mode='overwrite')
# create timestamp column from original timestamp column
ts_df = log_df.withColumn('timestamp', (log_df['ts']/1000).cast(TimestampType()))
# create datetime column from original timestamp column
date_df = ts_df.withColumn('datetime', ts_df['timestamp'].cast(DateType()))
from pyspark.sql.functions import year, month, dayofmonth, dayofweek, weekofyear, dayofyear, hour
# extract columns to create time table
time_df = date_df.select("timestamp",
hour("timestamp").alias("hour"),
dayofmonth("timestamp").alias("day"),
weekofyear("timestamp").alias("week"),
month("timestamp").alias("month"),
year("timestamp").alias("year"),
dayofweek("timestamp").alias("day_of_week")
)
time_df = time_df.repartition("year", "month")
time_df = time_df.dropDuplicates()
time_df_output_path = os.path.join(output_data, 'time/')
time_df.write.parquet(time_df_output_path, mode='overwrite')
cond = [date_df.artist == song_df.artist_name, date_df.song == song_df.title]
joined_df = date_df.join(song_df, cond, how='left').drop(song_df.year)
songplays_df = joined_df.selectExpr("timestamp",
"userId AS user_id",
"level",
"song_id",
"artist_id",
"sessionId AS session_id",
"location",
"userAgent AS user_agent")
songplays_df = songplays_df.withColumn("year", year("timestamp")).withColumn("month", month("timestamp"))
songplays_df = songplays_df.repartition("year", "month")
songplays_df = songplays_df.dropDuplicates()
songplays_output_path = os.path.join(output_data, 'songplays/')
songplays_df.write.parquet(songplays_output_path, mode='overwrite')
def main(argv):
input_data = "s3a://udacity-dend/"
output_data = "s3a://vssm-udacity-bucket/"
if len(argv) == 1 and argv[0] == '-debug':
debug = True
else:
debug = False
spark = create_spark_session()
song_df = read_song_data(spark, input_data, song_schema, debug)
log_df = read_log_data(spark, input_data, log_schema, debug)
process_song_data(spark, song_df, output_data)
process_log_data(spark, song_df, log_df, output_data)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"vssm@cin.ufpe.br"
] |
vssm@cin.ufpe.br
|
58d3c784b632f40bba208fc73fe677df7407a2e4
|
b1ddcf4bac9ca603a7a2333912eb29da8bf2cb7b
|
/CRUDUsingFunction/CRUDUsingFunction/wsgi.py
|
69b52b08d810e37cfa8ae28676e25a4e2e7f8667
|
[] |
no_license
|
sankethalake/django_practice
|
e9477ae0beee4923cd6758cc6d37517ea5979610
|
9877304f0c6415ae8979e5cc13a49559155fdd9d
|
refs/heads/main
| 2023-07-07T07:07:35.598657
| 2021-08-14T06:26:23
| 2021-08-14T06:26:23
| 389,917,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
"""
WSGI config for CRUDUsingFunction project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CRUDUsingFunction.settings')
application = get_wsgi_application()
|
[
"sankethalake@gmail.com"
] |
sankethalake@gmail.com
|
6c269e0d5dead2bd02fb033ebef3ae1399c5885d
|
977bdc0268e1428f1b8c734aa3d8bf6193294048
|
/DJAGEN/branches/mustafa_branch/djagen/collector/models.py
|
f3f561ef91188e05ebcea83c5692848f5d17df48
|
[] |
no_license
|
lkdtr/gezegen
|
b3c7ba20cbd9894aa6726444f626c79aa8670e15
|
d4972b77fbd756d9fc99cd1b96f08a8a8944978d
|
refs/heads/master
| 2020-12-25T16:47:47.407144
| 2016-03-24T11:05:01
| 2016-03-24T11:05:01
| 66,266,213
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,593
|
py
|
from django.db import models
import datetime, unicodedata, random, time
import re
# Create your models here.
ACTION_CHOICES = (
(1, u'Removed'),
(2, u'Approved'),
(3, u'Paused'),
(4, u'Readded'),
(5, u'Applied'),
(6, u'Editted')
)
class Authors (models.Model):
author_id = models.AutoField(primary_key=True, help_text="Author ID")
author_name = models.CharField(max_length=50, help_text="Author Name")
author_surname = models.CharField(max_length=50, help_text="Author Name")
#we dont keep emails at the config.ini files, this part should be entered at the admin page
author_email = models.EmailField(null=True, blank=True, help_text="Author Email Address")
#the png file name of the author
author_face = models.CharField(max_length=30, null=True, blank=True, help_text="Author Face Name")
channel_subtitle = models.TextField(null=True, blank=True, help_text="Channel Subtitle")
channel_title = models.TextField(null=True, blank=True, help_text="Channel Title")
#URL of the feed.
channel_url = models.URLField(help_text="Channel URL")
#Link to the original format feed
channel_link = models.URLField(null=True, blank=True, help_text="Channel Link")
channel_urlstatus = models.IntegerField(null=True, blank=True, help_text="Channel URL Status")
#use this field to check whether the author is shown on the planet or not, like banned situations
current_status = models.SmallIntegerField(default=2, choices=ACTION_CHOICES, help_text="Current Status of the Author")
#whether the application to the planet is approved, the approved ones will be shown at the planet
is_approved = models.BooleanField(default=1, help_text="Approve Status of the Author")
#planets that the channel belongs to
#at the config.ini the entries should be obe of the belows:
#label = Personal
#label = LKD
#label = Eng
#label = Community
label_personal = models.BooleanField(default=1, help_text="Channnels at the Personal Blog Page")
label_lkd = models.BooleanField(default=0, help_text="Channels that are belong to LKD Blogs")
label_community = models.BooleanField(default=0, help_text="Channels that are belong to some community blogs")
label_eng = models.BooleanField(default=0, help_text="Channels that have English entries")
#at the main page, lets just show personal and lkd for now, for communities lets ask them a special rss
def __unicode__(self):
return u'%s %s' % (self.author_name, self.author_surname)
class Meta:
#order according to the author_name, ascending
ordering = ['author_name']
# keep the history for the action that are done on the member urls
class History (models.Model):
action_type = models.SmallIntegerField(choices=ACTION_CHOICES)
action_date = models.DateTimeField()
action_explanation = models.TextField(help_text="Reason of Action", blank=True, null=True)
action_author = models.ForeignKey('Authors')
action_owner = models.CharField(max_length=20, help_text="The user who did the action")
def __unicode__(self):
return str(self.action_type)
class Meta:
#order descending, show the last actions at top
ordering = ['-action_date']
class Entries (models.Model):
id_hash = models.CharField(max_length=50, help_text="Hash of the ID", primary_key=True)
title = models.CharField(max_length=150, help_text="Entry Title")
content_html = models.TextField(help_text="Entry Orginal Content")
content_text = models.TextField(help_text="Entry Pure Text Content")
summary = models.TextField(help_text="Entry Summary", null=True, blank=True)
link = models.URLField(help_text="Link to Entry")
date = models.DateTimeField(help_text="Date of the entry")
entry_id = models.ForeignKey('Authors')
def __unicode__(self):
return self.title
class Meta:
ordering = ['-date']
def sanitize(self, data):
p = re.compile(r'<[^<]*?/?>')
return p.sub('', data)
class RunTime (models.Model):
run_time = models.DateTimeField(help_text="Run time of the planet script", auto_now=True)
def __unicode__(self):
return self.run_time
class Meta:
ordering = ['-run_time']
def get_run_time(self):
dt = ".".join(map(lambda x: str(x), [self.run_time.day, self.run_time.month, self.run_time.year]))
hm = ":".join(map(lambda x: str(x), [self.run_time.hour, self.run_time.minute]))
rslt = " ".join([dt, hm])
return rslt
|
[
"mustafa.arici90@gmail.com"
] |
mustafa.arici90@gmail.com
|
a09a70560597d75a4cb0c3351fc714dbd2e544b6
|
3e86f5de2c6aad6c16ac25d30af7e99fa9f2a7c8
|
/support/lockdb.py
|
44dd2a43224e91b3c7a323134bf44dffa7f0e02e
|
[] |
no_license
|
greasysock/bnbLockClient
|
02d2261147dbe39d9fa66a8aad5d49a214b66aaa
|
adafc3ed9c0978f6913b50044b9f3043d07def69
|
refs/heads/master
| 2021-09-16T00:21:24.725473
| 2018-06-13T19:21:24
| 2018-06-13T19:21:24
| 94,815,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,880
|
py
|
import sqlite3, bson, base64
from support import passwordgen
tables = [("nodeinfo", "'nodeid' name, 'nodepassword' name, 'username' name, 'nodename' name"),
("deviceinfo", "'name' name, 'location' name, 'type' name, 'deviceid' name"),
("devicedata", "'deviceid' name, 'type' int, 'date' date,'dataid' name ,'data' text")]
def get_tables():
out_list = list()
for table, values in tables:
out_list.append(table)
return out_list
class database():
def __init__(self, file_name):
self.__conn = sqlite3.connect(file_name)
self.__c = self.__conn.cursor()
def integrity_check(self):
valid_db = get_tables()
test_db = list()
for table_name in self.__c.execute("SELECT name FROM sqlite_master WHERE type='table'"):
for table in table_name:
test_db.append(table)
if sorted(valid_db) == sorted(test_db):
return True
else:
return False
def save(self):
self.__conn.commit()
def close(self):
self.__conn.close()
def check_deviceid(self, deviceid):
found = False
for device in self.devices:
if device[3] == deviceid:
found = True
break
return found
def check_dataid(self, dataid):
found = False
devicedata = self.get_devicedata_all()
for data in devicedata:
if data[3] == dataid:
found = True
break
return found
def get_device(self, deviceid):
for device in self.__c.execute("SELECT * FROM deviceinfo WHERE deviceid = '{}'".format(deviceid)):
return device
def append_device(self, **kwargs):
command = "INSERT INTO deviceinfo values ('{}', '{}', '{}', '{}')".format(
kwargs['name'],
kwargs['location'],
kwargs['type'],
kwargs['deviceid'])
self.__c.execute(command)
self.save()
@property
def new_deviceid(self):
first_id = passwordgen.random_len(3, set=2)
while self.check_deviceid(first_id):
first_id = passwordgen.random_len(3, set=2)
return first_id
@property
def devices(self):
out_list = list()
for device in self.__c.execute("SELECT * FROM deviceinfo"):
out_list.append(device)
return out_list
def get_devices(self):
out_list = list()
for device in self.__c.execute("SELECT * FROM deviceinfo"):
out_list.append(device)
return out_list
@property
def new_dataid(self):
first_id = passwordgen.random_len(3, set=2)
while self.check_dataid(first_id):
first_id = passwordgen.random_len(3, set=2)
return first_id
def add_devicedata(self, **kwargs):
id = self.new_dataid
command = "INSERT INTO devicedata VALUES ('{}', '{}', '{}', '{}', '{}')".format(
kwargs['deviceid'],
kwargs['type'],
kwargs['date'],
id,
kwargs['data']
)
self.__c.execute(command)
self.save()
return id
def update_devicedata(self, deviceid, dataid, data):
update_command = '''
UPDATE devicedata
SET \"data\" = \'{}\'
WHERE deviceid = \'{}\' AND
dataid = \'{}\''''.format(data, deviceid, dataid)
self.__c.execute(update_command)
self.save()
return -1
def get_devicedata_all(self):
out_list = list()
for data in self.__c.execute("SELECT * FROM devicedata"):
out_list.append(data)
return out_list
def get_devicedata(self, deviceid):
out_list = list()
for data in self.__c.execute("SELECT * FROM devicedata WHERE deviceid = '{}'".format(deviceid)):
out_list.append(data)
return out_list
def get_devicedata_idx(self, deviceid, value, idx = 1):
devicedata = self.get_devicedata(deviceid)
out_list = list()
for data in devicedata:
if data[idx] == value:
out_list.append(data)
return out_list
def remove_devicedata(self, deviceid, dataid):
self.__c.execute("DELETE FROM devicedata WHERE dataid = '{}' AND deviceid = '{}'".format(dataid, deviceid))
self.save()
def get_deivceids(self, idx = 3):
out_list = list()
for device in self.devices:
out_list.append(device[idx])
return out_list
def set_nodeinfo(self, **kwargs):
command = "INSERT INTO nodeinfo VALUES ('{}', '{}', '{}', '{}')"
self.__c.execute(command.format(kwargs['nodeid'],
kwargs['nodepassword'],
'',
''))
self.save()
return -1
def set_noderegistration(self, **kwargs):
command = "UPDATE nodeinfo SET username = '{}', nodename = '{}'".format(kwargs['username'], kwargs['nodename'])
self.__c.execute(command)
self.save()
def get_nodeinfo(self):
nodeinfo = self.__c.execute("SELECT * FROM nodeinfo")
for nodeinf in nodeinfo:
return nodeinf
@property
def node_username(self):
return self.get_nodeinfo()[0]
@property
def node_password(self):
return self.get_nodeinfo()[1]
@property
def node_parent(self):
return self.get_nodeinfo()[2]
@property
def node_name(self):
return self.get_nodeinfo()[3]
def testdb(filename):
testdb = database(filename)
return testdb.integrity_check()
def createdb(file_name):
conn = sqlite3.connect(file_name)
c = conn.cursor()
for table, values in tables:
c.execute("CREATE TABLE '{}' ({})".format(table, values))
conn.commit()
conn.close()
|
[
"chris.gresock@gmail.com"
] |
chris.gresock@gmail.com
|
985e528282041e39605f7a0e1bec4cf5c6961410
|
d58db5812cc7230ae54c396f19f220f40ad30c63
|
/locallibrary/catalog/forms.py
|
7f8d51d17f089caafa57b3da26deeceb75106cc9
|
[] |
no_license
|
caseytin/django_projects
|
3e75592d67d5f5e4acd53731f385313a8d3237e0
|
dfa6a13298a087cde20c94f78b32bbf4f9196c06
|
refs/heads/master
| 2020-04-16T16:58:26.140898
| 2019-04-19T17:08:44
| 2019-04-19T17:08:44
| 165,757,607
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
import datetime
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from catalog.models import BookInstance
class RenewBookForm(forms.Form):
def clean_due_back(self):
data = self.cleaned_data['due_back']
# Check if a date is not in the past.
if data < datetime.date.today():
raise ValidationError(_('Invalid date - renewal in past'))
# Check if a date is in the allowed range (+4 weeks from today).
if data > datetime.date.today() + datetime.timedelta(weeks=4):
raise ValidationError(_('Invalid date - renewal more than 4 weeks ahead'))
# Remember to always return the cleaned data.
return data
class Meta:
model = BookInstance
fields = ['due_back']
labels = {'due_back': _('New renewal date')}
help_texts = {'due_back': _('Enter a date between now and 4 weeks (default 3).')}
renewal_date = forms.DateField(help_text="Enter a date between now and 4 weeks (default 3).")
def clean_renewal_date(self):
data = self.cleaned_data['renewal_date']
# Check if a date is not in the past.
if data < datetime.date.today():
raise ValidationError(_('Invalid date - renewal in past'))
# Check if a date is in the allowed range (+4 weeks from today).
if data > datetime.date.today() + datetime.timedelta(weeks=4):
raise ValidationError(_('Invalid date - renewal more than 4 weeks ahead'))
# Remember to always return the cleaned data.
return data
|
[
"ctin@umich.edu"
] |
ctin@umich.edu
|
b7477fa826edf431ec22f6e6a8967a9e84ca3fcf
|
66d8effab50d23aa809fca2ee47ebd6ff14501f7
|
/Provisioning/Jenkins/06 Python/modules/global_variables.py
|
eb106950452f395aa2545ce72887e662d71ed7d7
|
[] |
no_license
|
pneumakevin/DevOps
|
670499b728958b8b2d1f54f3f8ad6e7d1f9a1c75
|
db941e2031d1c24378e06757278058a13176ecc2
|
refs/heads/master
| 2021-03-22T03:42:29.132311
| 2017-11-27T06:21:37
| 2017-11-27T06:21:37
| 112,156,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
import time
'''==================================
Build Plan Variables
==================================
'''
'''====================================
Folder Vairables
====================================
'''
build_src_repository = 'D:\Projects\Sandbox\SequoiaWebSite'
build_folders_excluded = ('.', '.git', '.vs', '.svn','obj','bin','font', 'images', 'temp')
#folders_included = ('')
build_file_ext_included = ('.sln', '.cs', '.csproj', '.xml', '.snk', '.dll', '.user', '.ashx', '.aspx', '.html', '.htm', '.css', '.config', '.zip')
#build_file_ext_includes = ['*.sln', '*.cs', '*.csproj', '*.xml', '*.snk', '*.dll', '*.user', '*.ashx', '*.aspx', '*.html', '*.htm', '*.css', '*.config', '*.zip','*.doc', '*.odt'] # for files only
#excludes = ['/home/paulo-freitas/Documents'] # for dirs and files
primary_prj = ['Sequoia.CliqStudios', 'Sequoia.SixSquare', 'Sequoia.Admin']
shared_prj = ['Sequoia.Shared']
sql_folder_prefix = 'Sequoia.6square.Database.'
'''==================================
Dynamic Variables
==================================
'''
# Will be updated after build job successful at runtime.
build_src_last_update_date = time.strptime( '2017-10-10 00:00:00', "%Y-%m-%d %H:%M:%S")
# Value will be added from .csprojc at runtime.
primary_references = {'Sequoia.CliqStudios':'', 'Sequoia.SixSquare':'', 'Sequoia.Admin': ''}
|
[
"noreply@github.com"
] |
pneumakevin.noreply@github.com
|
218bd9a1cbc642338ac31db59fa2d81935b7663a
|
55965f592cb7e915cd68bd371ee1a6ad2a6e0247
|
/fol2/d.py
|
a2ae7e959ae2693156856e9795369cb6edb3ccf0
|
[] |
no_license
|
Upasna4/Training
|
2b5b57fc3e5229304860f153db93d912a44472bf
|
33c6eeb565c422e40ea88d50af787f58b9f0da6d
|
refs/heads/master
| 2020-08-05T03:50:36.280910
| 2019-10-02T16:36:09
| 2019-10-02T16:36:09
| 212,383,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
from fol2.c import sum,mul
sum()
mul()
|
[
"upasnabhat17@gmail.com"
] |
upasnabhat17@gmail.com
|
73181bd9da39ef6747ebd8bde0b15ee54e4c5c63
|
c9666a99dc24c4931906680ddf47d8c916f35a6d
|
/axf/urls.py
|
1046af7585f1c8e8e6927d34d7f7300bc2c16981
|
[] |
no_license
|
jinhai1989/axf
|
47cadf5323b814421fc2b577f4f48935ea483729
|
961f9e13ed14b2c90e478975361461a77756eed2
|
refs/heads/master
| 2021-07-11T04:34:42.942929
| 2017-10-11T12:31:51
| 2017-10-11T12:31:51
| 106,543,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^home/$',views.home,name="home" ),#主页
url(r'^market/(\d+)/(\d+)/(\d+)/$',views.market,name="market" ),#超市
url(r'^cart/$',views.cart,name="cart" ),#购物车
url(r'^changecart/(\d+)/$',views.changecart,name="changecart" ),#修改购物车
url(r'^saveorder/$',views.saveorder,name="saveorder" ),#递交订单
url(r'^mine/$',views.mine,name="mine" ),#我的
url(r'^login/$',views.login,name="login" ),#登录
url(r'^register/$',views.register,name="register" ),#注册
#验证用户账号是否存在
url(r'^checkuserid/$',views.checkuserid,name="checkuserid" ),
url(r'^quit/$',views.quit,name="quit" ),
]
|
[
"jin@jin.com"
] |
jin@jin.com
|
ab30326e12c8e49c0db5b50227751503d35b9980
|
afcc6fa6c6a1736b89a018c24b4a6384f8e9c970
|
/b2g_util/__init__.py
|
180d33ee7de7d50e8f5ffa758b51a0aebe662c53
|
[] |
no_license
|
askeing/b2g-util-python
|
953d78c57243277a02b7edadf9d5bb65dab7d840
|
55e8e9b819871729da8ddd21c0b44804c4da0010
|
refs/heads/master
| 2023-06-18T07:04:59.630427
| 2016-03-04T07:16:57
| 2016-03-04T07:16:57
| 39,761,161
| 3
| 5
| null | 2016-01-29T06:39:53
| 2015-07-27T07:30:25
|
Python
|
UTF-8
|
Python
| false
| false
| 224
|
py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import util
import test
|
[
"askeing@gmail.com"
] |
askeing@gmail.com
|
892d774caa9c5b905ab531bbbf72cf2827316c64
|
d3913120a781d33fb130016403fac8b7075cfc1d
|
/backend/equipment/apps.py
|
c205ae5d66f754c31ee8fcbac16322ebddb31ca4
|
[
"MIT"
] |
permissive
|
Vini1979/Engenharia_Software_IF977
|
bdb227f087c9ada34aa58681393898b8d6123a29
|
dee99b7a05736bd35935d30a88b61a1f273d7633
|
refs/heads/main
| 2023-04-13T10:45:40.333964
| 2021-04-28T20:39:37
| 2021-04-28T20:39:37
| 355,015,827
| 0
| 1
|
MIT
| 2021-04-27T03:57:52
| 2021-04-06T01:02:43
|
Python
|
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.apps import AppConfig
class EquipmentConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'equipment'
|
[
"vicpantojadoamaral@gmail.com"
] |
vicpantojadoamaral@gmail.com
|
1fffbb0e94917e1591b20d062f1082d5fae467b5
|
b42302ae883f5187d03ed5deba8cfc7d03cca9e7
|
/eshop/migrations/0003_auto_20210523_1802.py
|
e8a2d1587a0b69265b5fe1036c99239121316a42
|
[] |
no_license
|
Chauhan-Mukesh/e-shop
|
86a94a440ff89a64bb97eb4db88fe8fb533ef076
|
3565aef16e727f91753888f70f3cdc4d3cdeef2a
|
refs/heads/master
| 2023-05-01T15:58:49.498165
| 2021-05-24T02:34:44
| 2021-05-24T02:34:44
| 369,959,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
# Generated by Django 2.2 on 2021-05-23 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('eshop', '0002_contactus'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'ordering': ['username'], 'verbose_name': 'User'},
),
migrations.AlterField(
model_name='product',
name='prod_name',
field=models.CharField(max_length=50, verbose_name='Product Name'),
),
]
|
[
"mrchauhan490@gmail.com"
] |
mrchauhan490@gmail.com
|
629ae714aabaae51da5e87d368cd6d272aafd834
|
f301de68f64e52fc0770518248eafee6a3c25b1f
|
/threaded_comments/urls.py
|
c55fba19b3337f032d64177a3529d291e3ff8899
|
[] |
no_license
|
bedna-KU/Shakal-NG
|
1f0cf0ec8f8b2b0ab65e6ed4b954c095554df8a0
|
c504424545afbe9238d6813962d2a96f7c4889a1
|
refs/heads/master
| 2020-12-26T03:55:57.579418
| 2015-04-25T15:42:47
| 2015-04-25T15:42:47
| 34,467,383
| 0
| 0
| null | 2015-04-23T16:17:17
| 2015-04-23T16:17:17
| null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
#from django.contrib.comments.urls import urlpatterns as original_urls
from threaded_comments import feeds as threaded_comments_feeds
urlpatterns = patterns('threaded_comments.views',
url(r'^reply/(\d+)/$', 'reply_comment', name = 'comments-reply-comment'),
url(r'^post/$', 'post_comment', name = 'comments-post-comment'),
url(r'^posted/$', 'done_comment', name = 'comments-comment-done'),
url(r'^lock/(\d+)/$', 'admin', name = 'comments-admin'),
url(r'^watch/(\d+)/$', 'watch', name = 'comments-watch'),
url(r'^view/(\d+)/$', 'comment', {'single': True}, name = 'comment-single'),
url(r'^id/(\d+)/$', 'comment', {'single': False}, name = 'comment'),
url(r'^(\d+)/$', 'comments', name = 'comments'),
url(r'^feeds/latest/$', threaded_comments_feeds.CommentFeed(), name = 'comments-feed-latest'),
)
#urlpatterns += original_urls
|
[
"miroslav.bendik@gmail.com"
] |
miroslav.bendik@gmail.com
|
30671f804ed90f82e0af64193a524c8c58b5dd6c
|
e76e3aaf9670f1372f919a20667bab038d523fdf
|
/app/app/settings.py
|
acf4043705f9ae091a753b8d35e439615e913f57
|
[] |
no_license
|
Claiborne/django-recipe-api
|
fa0b4f296a86914d08fa6b32e8fbc7f0baa4d61a
|
849d8b724b8ad89cfa5dc130ec5507d50dad1ad7
|
refs/heads/master
| 2022-06-26T10:56:59.747834
| 2019-09-25T04:23:14
| 2019-09-25T04:23:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,281
|
py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&ep3#e&g)rl4j5@rv8g)8+!pbs1^9=5zx0%+i)nwnhd02p($y1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
|
[
"willclaiborne@gmail.com"
] |
willclaiborne@gmail.com
|
89efa2dc00a6220dc2d02663f179718f35cd67f8
|
6e2e5ce3a385120d431be77a5186eca63b2f74aa
|
/pipetree/executor/__init__.py
|
dfca041a6acea8dbdc10e572f413164bd5da0014
|
[
"MIT"
] |
permissive
|
pipetree/pipetree
|
a716bca7f4d9fc78c89d97de4672a61d8998504b
|
e7f1cc3247ac4290fed823a2a3a3166d736fe656
|
refs/heads/master
| 2021-01-13T14:45:15.172827
| 2017-02-13T01:08:21
| 2017-02-13T01:08:21
| 76,606,256
| 2
| 3
| null | 2017-01-18T06:53:19
| 2016-12-16T00:09:27
|
Python
|
UTF-8
|
Python
| false
| false
| 68
|
py
|
from .executor import Executor
from .local import LocalCPUExecutor
|
[
"noreply@github.com"
] |
pipetree.noreply@github.com
|
53d6254220f3ddd0561ebfbd4b317d2d3e80855f
|
4f0cd2618cd7856e5ef51d1ad177fa572ccaea6b
|
/CircuitPython_Templates/status_led_one_neopixel_rgb/code.py
|
9b4478135686bc8e33f0883aed9b180121f9a931
|
[
"MIT"
] |
permissive
|
profharris/Adafruit_Learning_System_Guides
|
ecd213d34ffb7fa227e085ef3c763c802406d30e
|
1e64c043be80451443fcae3f8952c6fd0cb1a52e
|
refs/heads/main
| 2023-07-06T22:17:02.568765
| 2021-08-06T18:44:30
| 2021-08-06T18:44:30
| 394,449,146
| 1
| 0
|
MIT
| 2021-08-09T21:54:29
| 2021-08-09T21:54:28
| null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
"""CircuitPython status NeoPixel red, green, blue example."""
import time
import board
import neopixel
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1)
pixel.brightness = 0.3
while True:
pixel.fill((255, 0, 0))
time.sleep(0.5)
pixel.fill((0, 255, 0))
time.sleep(0.5)
pixel.fill((0, 0, 255))
time.sleep(0.5)
|
[
"kattni@adafruit.com"
] |
kattni@adafruit.com
|
45ac036e9d79b025bb7a0dcaddc6c7c483ac5676
|
2b0a557a168f779b11da7f898acc8a1576eb54c3
|
/server.py
|
d183581b3506c43a9297db5cd34ad8534a5b7864
|
[] |
no_license
|
ngzhian/whatsunblockshouldiuse
|
5e623481213bd2914002fa85d68ae392767ae5ba
|
1c96e7090f3c1ded19bf258b739c099e5e8d5a47
|
refs/heads/master
| 2020-03-30T19:01:31.177601
| 2015-03-09T03:14:33
| 2015-03-09T03:14:33
| 31,860,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,076
|
py
|
import re
import os
import json
from io import StringIO
from bottle import static_file, request, run, route
from bs4 import BeautifulSoup
import requests
# Read port selected by the cloud for our application
PORT = int(os.getenv('VCAP_APP_PORT', 8080))
if os.getenv("VCAP_SERVICES"):
services = json.loads(os.getenv("VCAP_SERVICES"))
service_name = 'question_and_answer';
if services and services[service_name]:
svc = services[service_name][0]['credentials'];
svc_url = svc['url']
svc_username = svc['username'];
svc_password = svc['password'];
print(svc_url, svc_username, svc_password)
EPA_URL = 'http://iaspub.epa.gov/enviro/m_uv?'
EPA_URL_FORMAT = EPA_URL + 'lat={lat}&lon={lon}'
CACHE = {}
@route('/')
def index():
return static_file('index.html', root='static')
@route('/uv')
def uv():
print(CACHE)
url = EPA_URL_FORMAT.format(lat=request.query.lat, lon=request.query.lon)
if url in CACHE:
print('Cache hit')
return CACHE.get(url)
response = requests.get(url).text
index, level, desc = extract_index_level_desc(response)
results = {'index': index, 'level': level, 'desc': desc}
CACHE[url] = results
return results
@route('/ask')
def ask():
question = request.forms.get('ask') or request.query.get('ask')
if not question:
return {}
answer = ask_watson(question)
return {'answer': answer}
def extract_index_level_desc(contents):
soup = BeautifulSoup(contents)
content = soup.find(id="content")
uv_index_line = content.b
img_tag = content.findAll('img')[-1]
image_url = img_tag.attrs['src']
desc = list(list(img_tag.children)[-1].children)[0]
index, level = uv_index_level(image_url)
return index, level, desc
def uv_index_level(url):
"""Example url:
http://www.epa.gov/enviro/facebook/img/iphone/UV_Index_4_Moderate.png
"""
pattern = re.compile(r'[/._]')
components = re.split(pattern, url)
index = components[-3]
level = components[-2].lower()
return index, level
def get_location_text(uv_index_line):
pattern = re.compile(r'<[Ii]>[^<]+</[Ii]>')
location = pattern.search(uv_index_line).group(0)[3:-4]
return location
@route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='static')
@route('/favicon.ico')
def favicon():
return static_file('favicon.ico', root='')
def ask_watson(question):
question = question.strip()
USERNAME = 'd83e357f-9b61-4bcb-b44b-46f934606d12'
PASSWORD = 'aIpNYMrmSIup'
URL = 'https://gateway.watsonplatform.net/question-and-answer-beta/api/v1/question/healthcare'
print('Asking watson', question)
response = requests.post(
URL,
json={'question': {'questionText': question}},
headers={
'Accept': 'application/json',
'X-SyncTimeout': 30,
},
auth=(USERNAME, PASSWORD))
return response.json()
if __name__ == '__main__':
run(host='0.0.0.0', port=PORT, debug=True, reloader=True)
|
[
"ngzhian@gmail.com"
] |
ngzhian@gmail.com
|
35fc385076ea01e14b7f5fe05361a8e7614d1d96
|
ba480d6c617d1f06b90226b7456bdb4bf8207121
|
/теория_30.py
|
8a1c8a54e555d13a3efcf1a739ebccc34514786b
|
[] |
no_license
|
khomyakovskaya/diploma
|
cf3d50baf8a966c10d48bf3c24a3c4a493821117
|
fd2ab58803332bb3a131ffbca41564d800ef2105
|
refs/heads/main
| 2023-08-14T05:59:10.290603
| 2021-09-29T13:19:50
| 2021-09-29T13:19:50
| 411,665,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,815
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'теория_30.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Teoria30Window(object):
def setupUi(self, Teoria30Window):
Teoria30Window.setObjectName("Teoria30Window")
Teoria30Window.resize(1041, 682)
Teoria30Window.setStyleSheet("background-color: rgb(255, 255, 255);")
self.centralwidget = QtWidgets.QWidget(Teoria30Window)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setObjectName("textBrowser")
self.gridLayout.addWidget(self.textBrowser, 1, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(20)
self.label_3.setFont(font)
self.label_3.setStyleSheet("background-color: rgb(211, 239, 255);")
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 0, 0, 1, 1)
self.textBrowser_2 = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser_2.setObjectName("textBrowser_2")
self.gridLayout.addWidget(self.textBrowser_2, 4, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btn_return30 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_return30.sizePolicy().hasHeightForWidth())
self.btn_return30.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.btn_return30.setFont(font)
self.btn_return30.setStyleSheet("background-color: rgb(211, 239, 255);\n"
"color: rgb(0, 0, 0);")
self.btn_return30.setObjectName("btn_return30")
self.horizontalLayout.addWidget(self.btn_return30)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btn_oglavlenie30 = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(14)
self.btn_oglavlenie30.setFont(font)
self.btn_oglavlenie30.setStyleSheet("background-color: rgb(211, 239, 255);\n"
"color: rgb(0, 0, 0);")
self.btn_oglavlenie30.setObjectName("btn_oglavlenie30")
self.horizontalLayout.addWidget(self.btn_oglavlenie30)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.btn_glavnaya30 = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.btn_glavnaya30.setFont(font)
self.btn_glavnaya30.setStyleSheet("background-color: rgb(211, 239, 255);\n"
"color: rgb(0, 0, 0);")
self.btn_glavnaya30.setObjectName("btn_glavnaya30")
self.horizontalLayout.addWidget(self.btn_glavnaya30)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.btn_dalee30 = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(14)
font.setBold(False)
font.setWeight(50)
self.btn_dalee30.setFont(font)
self.btn_dalee30.setStyleSheet("background-color: rgb(211, 239, 255);\n"
"color: rgb(0, 0, 0);")
self.btn_dalee30.setObjectName("btn_dalee30")
self.horizontalLayout.addWidget(self.btn_dalee30)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem4)
self.gridLayout.addLayout(self.horizontalLayout, 6, 0, 1, 1)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setStyleSheet("background-color: rgb(191, 216, 230);")
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout.addWidget(self.line, 5, 0, 1, 1)
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setStyleSheet("background-color: rgb(191, 216, 230);")
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout.addWidget(self.line_2, 7, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(20)
self.label_4.setFont(font)
self.label_4.setStyleSheet("background-color: rgb(211, 239, 255);")
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
Teoria30Window.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(Teoria30Window)
self.statusbar.setObjectName("statusbar")
Teoria30Window.setStatusBar(self.statusbar)
self.retranslateUi(Teoria30Window)
QtCore.QMetaObject.connectSlotsByName(Teoria30Window)
def retranslateUi(self, Teoria30Window):
_translate = QtCore.QCoreApplication.translate
Teoria30Window.setWindowTitle(_translate("Teoria30Window", "АУК"))
self.textBrowser.setHtml(_translate("Teoria30Window", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"justify\" style=\" margin-top:12px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; line-height:150%;\"><span style=\" font-family:\'Times New Roman,serif\'; font-size:14pt;\">На останов котла защита по понижению давления природного газа воздействует только в положении переключателя топлива (ПТ) "Газ", а по понижению давления мазута - в положение "Мазут". Если защиты по понижению давления природного газа и мазута действуют одновременно, то производится останов котла независимо от положений переключателя топлива.</span><span style=\" font-size:8pt;\"> </span></p></body></html>"))
self.label_3.setText(_translate("Teoria30Window", "Защиты, действующие на останов котла"))
self.textBrowser_2.setHtml(_translate("Teoria30Window", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"justify\" style=\" margin-top:12px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; line-height:150%;\"><span style=\" font-family:\'Times New Roman,serif\'; font-size:14pt;\">Частичное снижение нагрузки блока (рис. 20) производится в следующих случаях:</span><span style=\" font-size:8pt;\"> </span></p>\n"
"<p align=\"justify\" style=\" margin-top:12px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; line-height:150%;\"><span style=\" font-family:\'Times New Roman,serif\'; font-size:14pt;\">3.2.1. При отключении электродвигателя одного из дутьевых вентиляторов при другом работающем.</span><span style=\" font-size:8pt;\"> </span></p>\n"
"<p align=\"justify\" style=\" margin-top:12px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; line-height:150%;\"><span style=\" font-family:\'Times New Roman,serif\'; font-size:14pt;\">3.2.2. При отключении электродвигателя одного из дымососов при другом работающем.</span><span style=\" font-size:8pt;\"> </span></p>\n"
"<p align=\"justify\" style=\" margin-top:12px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; line-height:150%;\"><span style=\" font-family:\'Times New Roman,serif\'; font-size:14pt;\">3.2.3. При останове одного из РВП при другом вращающемся.</span><span style=\" font-size:8pt;\"> </span></p></body></html>"))
self.btn_return30.setText(_translate("Teoria30Window", "Назад"))
self.btn_oglavlenie30.setText(_translate("Teoria30Window", "Оглавление"))
self.btn_glavnaya30.setText(_translate("Teoria30Window", "На главную"))
self.btn_dalee30.setText(_translate("Teoria30Window", "Далее"))
self.label_4.setText(_translate("Teoria30Window", "Защиты, действующие на снижение нагрузки блока до 50 - 60 %"))
|
[
"noreply@github.com"
] |
khomyakovskaya.noreply@github.com
|
49857cbe3ebd6c04822a55a04404bb3bf824d180
|
40a3a643326632db9fee144425e386981cec6f23
|
/api/app/model/setting.py
|
c7e369af128184872363c6f4fa4fc57ca224f910
|
[] |
no_license
|
grdaneault/spoil-it-for-me
|
edc3cea606aa52cf3dbb03352db9a7cab1fda706
|
2de4ed38558bda11cc25fc6a61104c3f0a0f9b9a
|
refs/heads/master
| 2020-05-21T21:11:00.707459
| 2019-05-12T15:30:40
| 2019-05-12T15:30:40
| 186,147,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
class Setting:
def __init__(self, name: str, image: str):
self.name = name
self.image = image
|
[
"gregdaneault@gmail.com"
] |
gregdaneault@gmail.com
|
f44b5f35ca90998324a02d598ec4db99381052bc
|
ca75f7099b93d8083d5b2e9c6db2e8821e63f83b
|
/z2/part2/batch/jm/parser_errors_2/828741753.py
|
2c4deb3f2940f12c36a84e0cf59bcac0b189fd80
|
[
"MIT"
] |
permissive
|
kozakusek/ipp-2020-testy
|
210ed201eaea3c86933266bd57ee284c9fbc1b96
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
refs/heads/master
| 2022-10-04T18:55:37.875713
| 2020-06-09T21:15:37
| 2020-06-09T21:15:37
| 262,290,632
| 0
| 0
|
MIT
| 2020-06-09T21:15:38
| 2020-05-08T10:10:47
|
C
|
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 828741753
"""
"""
random actions, total chaos
"""
board = gamma_new(3, 2, 2, 2)
assert board is not None
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_busy_fields(board, 1) == 2
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_busy_fields(board, 1) == 3
assert gamma_free_fields(board, 1) == 1
assert gamma_move(board, 2, 1, 1) == 0
gamma_delete(board)
|
[
"jakub@molinski.dev"
] |
jakub@molinski.dev
|
f330ec77cbf93b6997398ae141465cd8e9e7d312
|
5a7af650d21c2dbda536ad4d8b4ec8e49d7a146a
|
/project/binary_search/binary_search.py
|
5f942e0b095af78571e86ef9732cfe0dc56b6a8c
|
[] |
no_license
|
IvanPostu/simple-python-tasks
|
23348052a28419ccc3ba17cadb96603051a988b2
|
4e21799cea6bc75508109a55cc59d0d4e7b35d62
|
refs/heads/master
| 2022-08-30T06:25:49.620711
| 2020-05-29T09:28:28
| 2020-05-29T09:28:28
| 267,150,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
import random
def generate_random_list():
start = random.randint(0, 100)
return list(range(start, 200, 2))
def split_list_in_two_sublist(a_list):
"""Split list in two sublists
Args:
-
Returns:
Tuple ( first_chunk, second_chunk )
Raises:
-
"""
half = int(len(a_list) / 2) + len(a_list) % 2
return (a_list[:half], a_list[half:])
def list_patentially_contain_a_value(list_, value):
"""Check if list patentially contain a value
Args:
list of numbers,
value - number
Returns:
Boolean value
Raises:
-
"""
first_elm = list_[0]
last_elm = list_[len(list_) - 1]
conditions = [
first_elm < value and last_elm < value,
first_elm > value and last_elm > value,
]
return not any(conditions)
def binary_search(_list, search_val):
"""Do binary search
Args:
_list - list of numbers between rand and 200, step
Returns:
-
Raises:
-
"""
temp_list = _list
iteration_counter = 0
while True:
iteration_counter += 1
print(f'\nIteration {iteration_counter}\nList: {temp_list}\n')
if(len(temp_list) == 1):
break
(list_a, list_b) = split_list_in_two_sublist(temp_list)
temp_list = list_a if list_patentially_contain_a_value(
list_a, search_val) else list_b
return True if temp_list[0] == search_val else False
def show_binary_search():
while True:
print('Enter a number between 100 and 200 : ')
search_val = int(input())
_list = generate_random_list()
if binary_search(_list, search_val):
print(f'Your value {search_val} has been founded.')
else:
print(f'Your value {search_val} not found.')
print('Try again? [y,n]: ')
if(input() != 'y'):
break
|
[
"ipostu20000127@gmail.com"
] |
ipostu20000127@gmail.com
|
24221a6241ecc5b49aa86753b96ffa796b34c50f
|
09b44c65e84db735a893873a16351394d1a670b7
|
/fiblary/client/v4/models.py
|
00bc4a32b4bc2d2b1f74cfb2c5ebda73b78accb2
|
[
"Apache-2.0"
] |
permissive
|
ikari-pl/fiblary
|
f4a1b2622216e8fff5f871034e483cdd22f07de2
|
b910e740075f7948ce375f267dbc3d9de18d54a4
|
refs/heads/master
| 2020-12-28T10:24:36.724955
| 2016-07-04T09:44:35
| 2016-07-04T09:44:35
| 238,288,470
| 0
| 0
|
Apache-2.0
| 2020-02-04T19:30:42
| 2020-02-04T19:30:42
| null |
UTF-8
|
Python
| false
| false
| 5,497
|
py
|
# Copyright 2014 Klaudiusz Staniek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
fiblary.models
~~~~~~~~~~~~~~~~~
Home Center Model Class Implementations
"""
import functools
import jsonpatch
import logging
import six
_logger = logging.getLogger(__name__)
_type_ignore = ["HC_user", "VOIP_user", "weather", 'iOS_device', '']
def factory(controller, item):
# try as item could be anything
try:
if item['type'] in _type_ignore:
return None
except Exception:
pass
model = None
if isinstance(item, dict):
if controller.RESOURCE == 'devices':
model = DeviceModel(controller, item)
elif controller.RESOURCE == 'scenes':
model = SceneModel(controller, item)
else:
model = GenericModel(controller, item)
elif isinstance(item, list):
model = RecursiveList(item)
else:
assert 0, "Unknown model"
return model
class RecursiveList(list):
def __init__(self, value):
if value is None:
pass
elif isinstance(value, list):
for index, data in enumerate(value):
self.append(data)
self.__setitem__(index, data)
else:
raise TypeError, 'Expected list'
self.__dict__['__original__'] = value
def __getitem__(self, key):
return list.__getitem__(self, key)
def __setitem__(self, key, value):
if isinstance(value, str):
value = unicode(value)
if isinstance(value, list) and not isinstance(value, RecursiveList):
value = RecursiveList(value)
if isinstance(value, dict) and not isinstance(value, RecursiveDict):
value = RecursiveDict(value)
list.__setitem__(self, key, value)
__setattr__ = __setitem__
__getattr__ = __getitem__
class RecursiveDict(dict):
def __init__(self, value=None):
if value is None:
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError, 'Expected dict'
self.__dict__['__original__'] = value
def changes(self):
original = self.__dict__['__original__']
return jsonpatch.make_patch(original, dict(self)).to_string()
def __setitem__(self, key, value):
if isinstance(value, str):
value = unicode(value)
if isinstance(value, dict) and not isinstance(value, RecursiveDict):
value = RecursiveDict(value)
if isinstance(value, list) and not isinstance(value, RecursiveList):
value = RecursiveList(value)
if not callable(value):
dict.__setitem__(self, key, value)
else:
# actions are callable so added only to the local dict
self.__dict__[key] = value
def __getitem__(self, key):
return dict.__getitem__(self, key)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
__setattr__ = __setitem__
__getattr__ = __getitem__
class GenericModel(RecursiveDict):
def __init__(self, controller, item):
self.__dict__['controller'] = controller
super(GenericModel, self).__init__(item)
class DeviceModel(GenericModel):
def __init__(self, controller, item):
super(DeviceModel, self).__init__(controller, item)
if 'actions' in self:
def action(action_name, argn, *args, **kwargs):
_logger.info("{0}({1})->{2}{3}".format(
self.name, self.id, action_name, args)
)
if len(args) != argn:
# hack due to http://bugzilla.fibaro.com/view.php?id=1125
if action_name != 'setTargetLevel':
raise TypeError(
"%s() takes exactly %d argument(s) (%d given)" % (
action_name, argn, len(args))
)
return self.controller.action(self.id, action_name, *args)
for action_name, argn in six.iteritems(self['actions']):
_logger.debug("{0}({1})<-{2}({3})".format(
self.name,
self.id,
action_name,
argn))
self.__dict__[str(action_name)] = functools.partial(
action, action_name, argn)
class SceneModel(GenericModel):
def __init__(self, controller, item):
super(SceneModel, self).__init__(controller, item)
"""Home Center specific subclass for the Scene with actions"""
def start(self):
return self.controller.start(self.id)
def stop(self):
return self.controller.stop(self.id)
def enable(self):
return self.controller.enable(self.id)
def disable(self):
return self.controller.disable(self.id)
|
[
"mariusz@lbh.pl"
] |
mariusz@lbh.pl
|
793461b3bca84e69e604fb6c24d1c59def45593f
|
b30ef73e34c12095c382c95bcc2cd3552cae4f1e
|
/count.py
|
7fe15fe547176d99b970c376ecf43bed16469be0
|
[] |
no_license
|
Cyber-Netic/Python-Counting-Program
|
0fcc0f574c34e8ea51a2260f57d2ea66c92340cb
|
47fb51c6e88bf1f3d755cde941ed97d5737dcc7a
|
refs/heads/master
| 2020-09-17T09:06:06.046027
| 2019-11-25T23:52:14
| 2019-11-25T23:52:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
while True:
print('First, enter your name:')
x = input()
try:
if(not x.isalpha()):
print("Error: Contains numbers! Please use only letters.")
x = input("Enter your name again:")
else:
print('Hello, ' +x+'! Welcome to Python Counting Program')
except ValueError:
continue
print('Now, enter any number:')
y = input()
i = 0
if(not y.isnumeric()):
print("Error: Contains letters! Please use only numbers.")
x = input()
else:
print('You entered ' + y + '. Here is your score:')
while i <= int(y):
print(i)
i += 1
# checking for alphabets/number
#print(x.isalpha())
#print(y.isnumeric())
print('by Yuliia Poperechna')
|
[
"noreply@github.com"
] |
Cyber-Netic.noreply@github.com
|
eecf0ef61fbc4fc48698da33a49fe452fb25fd0a
|
f0b60e7df4db6512189530b185df6af8a5516080
|
/test_Datapro.py
|
610719f846958b96f2138c903fb99454246788db
|
[] |
no_license
|
williamSYSU/Corner
|
27d6afb244c6266578d3483a868a7f3b1751cb1f
|
c0e6ca80fdae6f796f87c08527e9b19fce371f7c
|
refs/heads/master
| 2020-03-31T20:18:00.807861
| 2018-11-05T10:45:53
| 2018-11-05T10:45:53
| 152,534,000
| 1
| 0
| null | 2018-10-11T05:06:03
| 2018-10-11T05:06:03
| null |
UTF-8
|
Python
| false
| false
| 36,421
|
py
|
# -*- coding: utf-8 -*-
# @Author : William
# @Project : Corner-william
# @FileName : test_Datapro.py
# @Time : Created at 2018/10/12
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
from __future__ import unicode_literals, print_function, division
import time
import unicodedata
import numpy as np
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional as f
import torch.optim as optim
from io import open
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import config
class NormalAttention(nn.Module):
def __init__(self, d_input, d_target, d_hidden, dropout=0.1):
super(NormalAttention, self).__init__()
self.d_input = d_input
self.d_target = d_target
self.d_hid = d_hidden
self.attn = nn.Linear(d_input, d_hidden)
self.attn_target = nn.Linear(d_target, d_hidden)
# self.combine = nn.Linear(d_input + d_target, 1)
self.attn_target_1 = nn.Linear(d_hidden + d_hidden, d_hidden)
self.combine = nn.Linear(d_hidden, 1)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(config.dropout)
self.layer_norm = nn.LayerNorm(d_input)
self.tanh = nn.Tanh()
def forward(self, input_seq, target_seq):
combine_input = self.attn(input_seq)
tar = self.attn_target(target_seq)
tar = tar.unsqueeze(1)
combine_tar = tar.view(len(input_seq), 1, -1)
_combine_input = torch.unsqueeze(combine_input, dim=1).expand(-1, 1, -1, -1)
_combine_tar = torch.unsqueeze(combine_tar, dim=2).expand(-1, -1, len(input_seq[0]), -1)
# _combine_input = torch.unsqueeze(input_seq, dim=1).expand(-1, 1, -1, -1)
# _combine_tar = torch.unsqueeze(tar, dim=2).expand(-1, -1, len(input_seq[0]), -1)
# _combine_tar = combine_tar.view(1, 1, 1, 50).expand(-1, -1, len(input_seq[1]), -1)
# attn_out = self.tanh(_combine_tar + _combine_input)
attn_out = self.tanh(self.attn_target_1(torch.cat((_combine_input, _combine_tar), dim=-1)))
attn_out = self.dropout(self.combine(attn_out))
attn_score = self.softmax(attn_out.squeeze(3))
# attn_out = input_seq * attn
# attn_out = attn_out.sum(dim=1)
out = torch.bmm(attn_score, input_seq)
# out = self.layer_norm(out)
return out
class Gate(nn.Module):
def __init__(self, d_part1, d_part2, d_target, d_hidden):
super().__init__()
self.d_part1 = d_part1
self.d_part2 = d_part2
self.d_hid = d_target
self.p1_tar_w = nn.Linear(d_part1, d_hidden)
self.p1_tar_u = nn.Linear(d_target, d_hidden)
self.p2_tar_w = nn.Linear(d_part2, d_hidden)
self.p2_tar_u = nn.Linear(d_target, d_hidden)
self.layer_norm = nn.LayerNorm(d_hidden)
self.softmax = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
def forward(self, input1_seq, input2_seq, target):
p1_1 = self.p1_tar_w(input1_seq)
p1_2 = self.p1_tar_u(target)
p2_1 = self.p2_tar_w(input2_seq)
p2_2 = self.p2_tar_u(target)
z_l = self.tanh(p1_1 + p1_2)
z_r = self.tanh(p2_1 + p2_2)
z_w = torch.cat([z_l, z_r], dim=1)
z_w = self.softmax(z_w)
z_l_w = z_w[:, 0, :].unsqueeze(1)
z_r_w = z_w[:, 1, :].unsqueeze(1)
out = z_l_w * input1_seq + z_r_w * p2_1
# out = self.layer_norm(out)
return out
class DotProductAttention(nn.Module):
# Scaled-dot-product Attention layer
def __init__(self, d_query, d_key, d_value, mapping_on="query"):
# mapping_on: whether linear transformation is required, mapping query or key into a new space
# mapping_on: "query" || "key" || "both" || "none"
super(DotProductAttention, self).__init__()
self.d_query = d_query
self.d_key = d_key
self.d_value = d_value
self.mapping_on = mapping_on
if mapping_on == "query":
# mapping query to key's space
self.q_h = nn.Linear(d_query, d_key)
elif mapping_on == "key":
# mapping key to query's space
self.k_h = nn.Linear(d_key, d_query)
elif mapping_on == "both":
# mapping query and key into the same space
self.q_h = nn.Linear(d_query, d_value)
self.k_h = nn.Linear(d_key, d_value)
self.temper = np.power(d_value, 0.5)
# self.weight = nn.Parameter(torch.Tensor(d_query, d_query))
# uniform = 1. / math.sqrt(self.d_query)
# self.weight.data.uniform_(-uniform, uniform)
def forward(self, q, k, v):
# query: [s_batch, 1, d_query]
# key: [*, l_key, d_key] # usually d_key = d_query
# value: [*, l_value, d_value] # usually l_value = l_key
# if len(key.shape) == 3, then "*" must equal to s_batch
if self.mapping_on == "query":
q = self.q_h(q)
elif self.mapping_on == "key":
k = self.k_h(k)
elif self.mapping_on == "both":
q = self.q_h(q)
k = self.k_h(k)
# print("11", k[0])
# [s_b, 1, d_q] * [*, d_k, l_k] = [s_b, 1, l_k]
if len(k.shape) == 3:
# similarity = torch.matmul(q, k.permute(0, 2, 1)) / self.temper
# similarity = torch.matmul(q, k.permute(0, 2, 1))
similarity = torch.matmul(q, k.permute(0, 2, 1))
else:
# len(k.shape) == 2
similarity = torch.matmul(q, k.transpose(0, 1)) / self.temper
# print("22", similarity[0])
attn = f.softmax(similarity, dim=-1)
# print("attn : ", attn[1])
# [s_b, 1, l_k] * [*, l_v, d_v] = [s_b, 1, d_v]
output = torch.matmul(attn, v)
# print("44", output[0])
return output, attn
class WdeRnnEncoderFix(nn.Module):
def __init__(self, hidden_size, output_size, context_dim, embed, trained_aspect, dropout=0.1):
super(WdeRnnEncoderFix, self).__init__()
self.hidden_size = hidden_size
self.blstm = nn.LSTM(hidden_size, 300, bidirectional=True, batch_first=True)
self.embedded = nn.Embedding.from_pretrained(embed)
self.aspect_embed = nn.Embedding.from_pretrained(trained_aspect)
self.tanh = nn.Tanh()
self.hidden_layer = nn.Linear(hidden_size * 2, hidden_size)
self.context_input_ = nn.Linear(600, 50)
self.embedding_layers = nn.Linear(0 + hidden_size, output_size)
# self.slf_attention = attention.MultiHeadAttention(600, 3)
# self.slf_attention = attention.MultiHeadAttentionDotProduct(3, 600, 300, 300, 0.01)
# self.Position_wise = attention.PositionwiseFeedForward(600, 600, 0.01)
self.attention = NormalAttention(600, 50, 50)
self.gate = Gate(300, 50, 50, 300)
self.min_context = nn.Linear(300, 50)
def forward(self, input, hidden):
BATCH_SIZE = len(input)
batch_len = input[:, 0]
batch_context = input[:, 1]
input_index = input[:, 2:]
input_index = input_index.long()
# seq_len = batch_len.item()
# input_index = input_index[0][0:seq_len]
# print('input_index',input_index)
# print(hidden.size())
sorted_seq_lengths, indices = torch.sort(batch_len, descending=True)
# TODO: change NO.1 -> switch order of following two lines
_, desorted_indices = torch.sort(indices, descending=False)
input_index = input_index[:, 0: sorted_seq_lengths[0]]
input_index = input_index[indices]
input_value = self.embedded(input_index)
input_value = input_value.float()
packed_inputs = nn.utils.rnn.pack_padded_sequence(input_value, sorted_seq_lengths.cpu().data.numpy()
, batch_first=True)
# print(sorted_seq_lengths, indices)
output, hidden = self.blstm(packed_inputs, hidden)
padded_res, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
desorted_output = padded_res[desorted_indices]
'''
self attention module add or not?
point wise product add or not?
'''
# desorted_output = self.slf_attention(desorted_output, context_input)
# desorted_output, _ = self.slf_attention(desorted_output, desorted_output, desorted_output)
# desorted_output = self.Position_wise(desorted_output)
'''
Normal attention module add or not?
'''
context_input = self.aspect_embed(batch_context).float()
context_input = self.min_context(context_input)
attn_target = self.attention(desorted_output, context_input)
desorted_output = F.max_pool2d(desorted_output, (desorted_output.size(1), 1))
# output.view(self.hidden_size * 2, -1)
# output = torch.max(output)
desorted_output = self.tanh(self.hidden_layer(desorted_output))
context_input = context_input.view(BATCH_SIZE, 1, 50)
_context_input = self.tanh(self.context_input_(attn_target))
gate_out = self.gate(desorted_output, _context_input, context_input)
embedding_input = torch.cat((desorted_output, _context_input), dim=2)
desorted_output = self.tanh(self.embedding_layers(gate_out))
return desorted_output
def initHidden(self, BATCH_SIZE):
return (torch.zeros(2, BATCH_SIZE, self.hidden_size, device=config.device),
torch.zeros(2, BATCH_SIZE, self.hidden_size, device=config.device))
class PreTrainABAE_fix(nn.Module):
def __init__(self, embed_dim, n_aspect, aspect_embedding, embed):
super(PreTrainABAE_fix, self).__init__()
self.embed_dim = embed_dim
self.n_aspect = n_aspect
self.embedded = nn.Embedding.from_pretrained(embed)
# query: global_content_embeding: [batch_size, embed_dim]
# key: inputs: [batch_size, doc_size, embed_dim]
# value: inputs
# mapping the input word embedding to global_content_embedding space
self.sentence_embedding_attn = DotProductAttention(
d_query=embed_dim,
d_key=embed_dim,
d_value=embed_dim,
mapping_on="key"
)
# embed_dim => n_aspect
self.aspect_linear = nn.Linear(embed_dim, n_aspect)
# initialized with the centroids of clusters resulting from running k-means on word embeddings in corpus
self.aspect_lookup_mat = nn.Parameter(data=aspect_embedding, requires_grad=True)
# self.aspect_lookup_mat = nn.Parameter(torch.Tensor(n_aspect, embed_dim).double())
# self.aspect_lookup_mat.data.uniform_(-1, 1)
def forward(self, inputs, eps=config.epsilon):
input_lengths = inputs[:, 0]
inputs = inputs[:, 2:]
input_index = inputs.long()
sorted_seq_lengths, indices = torch.sort(input_lengths, descending=True)
_, desorted_indices = torch.sort(indices, descending=False)
input_index = input_index[:, 0: sorted_seq_lengths[0]]
# input_index = input_index[indices]
inputs = self.embedded(input_index).double()
# inputs: [batch_size, doc_size, embed_dim]
# input_lengths: [batch_size]
# averaging embeddings in a document: [batch_size, 1, embed_dim]
avg_denominator = input_lengths.repeat(self.embed_dim).view(self.embed_dim, -1).transpose(0, 1).float()
global_content_embed = torch.sum(inputs.double(), dim=1).div(avg_denominator.double())
global_content_embed = global_content_embed.unsqueeze(dim=1)
# construct sentence embedding, with attention(query: global_content_embed, keys: inputs, value: inputs)
# [batch_size, embed_dim]
sentence_embedding, _ = self.sentence_embedding_attn(
global_content_embed.float(), inputs.float(), inputs.float()
)
# print("attn : ", sentence_embedding)
sentence_embedding = sentence_embedding.squeeze(dim=1)
# [batch_size, n_aspect]
aspect_weight = F.softmax(self.aspect_linear(sentence_embedding), dim=1)
_, predicted = torch.max(aspect_weight.data, 1)
return predicted
def regular(self, eps=config.epsilon):
div = eps + torch.norm(self.aspect_lookup_mat, 2, -1)
div = div.view(-1, 1)
self.aspect_lookup_mat.data = self.aspect_lookup_mat / div
class MyDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class DataPrepare:
def __init__(self):
"""load data from files"""
print("=" * 100)
print("Prepare training data...")
lines_pos1 = open('data/Weakly_labeled_data_1.1M/camera_positive.csv',
encoding='utf-8').read().strip().split('\n')
lines_pos2 = open('data/Weakly_labeled_data_1.1M/cellphone_positive.csv',
encoding='utf-8').read().strip().split('\n')
lines_pos3 = open('data/Weakly_labeled_data_1.1M/laptop_positive.csv',
encoding='utf-8').read().strip().split('\n')
lines_neg1 = open('data/Weakly_labeled_data_1.1M/camera_negative.csv',
encoding='utf-8').read().strip().split('\n')
lines_neg2 = open('data/Weakly_labeled_data_1.1M/cellphone_negative.csv',
encoding='utf-8').read().strip().split('\n')
lines_neg3 = open('data/Weakly_labeled_data_1.1M/laptop_negative.csv',
encoding='utf-8').read().strip().split('\n')
lines = open('data/Labeled_data_11754/new_11754.csv',
encoding='gbk').read().strip().split('\n')
'''merge data'''
lines_pos = lines_pos1 + lines_pos2 + lines_pos3
lines_neg = lines_neg1 + lines_neg2 + lines_neg3
lines_all = lines_pos + lines_neg
'''normalize sentences'''
self.pairs_all = [self.normalizeString(s) for s in lines_all]
self.pairs_pos = [self.normalizeString(s) for s in lines_pos]
self.pairs_neg = [self.normalizeString(s) for s in lines_neg]
self.pairs_classifier = [self.normalizeString(s) for s in lines]
self.vocab = {}
self.maxlen = 0
self.max_items = []
def word2idx(sentence, maxlen, max_items):
items = sentence.strip().split()
if len(items) > maxlen:
maxlen = len(items)
max_items = items
for word in items:
if word not in self.vocab:
self.vocab[word] = len(self.vocab)
return maxlen, max_items
'''count maxlen and obtain bb'''
for line in self.pairs_classifier:
self.maxlen, self.max_items = word2idx(line, self.maxlen, self.max_items)
for line in self.pairs_pos:
self.maxlen, self.max_items = word2idx(line, self.maxlen, self.max_items)
for line in self.pairs_neg:
self.maxlen, self.max_items = word2idx(line, self.maxlen, self.max_items)
@property
def weakly_data(self):
"""get weakly train data"""
'''if pre-save vectors from Google News'''
if config.pp_data_weak:
save_name = 'embed\embedding\word_embedding_classifier.txt'
self.saveVocab(save_name)
return self.vocab, self.pairs_pos, self.pairs_neg
@property
def weakly_data_process(self):
"""process weakly train data"""
print("=" * 100)
print("Weakly data Process...")
vocab, pairs_pos, pairs_neg = self.weakly_data
final_embedding = np.array(np.load("embed/Vector_word_embedding_all.npy"))
# maxlen = 0
# bb = []
#
# def word2idx(sentence, vocab, maxlen, bb):
# items = sentence.strip().split()
# if len(items) > maxlen:
# maxlen = len(items)
# bb = items
# for word in items:
# if word not in vocab:
# vocab[word] = len(vocab)
# return maxlen, bb
#
# for line in pairs_pos:
# maxlen, bb = word2idx(line, vocab, maxlen, bb)
#
# for line in pairs_neg:
# maxlen, bb = word2idx(line, vocab, maxlen, bb)
'''initialize sentence'''
input_sen_1 = config.pad_idx + np.zeros((len(pairs_pos), config.maxlen))
input_sen_1 = input_sen_1.astype(np.int)
input_sen_2 = config.pad_idx + np.zeros((len(pairs_neg), config.maxlen))
input_sen_2 = input_sen_2.astype(np.int)
# def sentence2vec(sentence, vocab, wordindex):
# items = sentence.strip().split()
# length = len(items)
# for word in items:
# wordindex.append(vocab[word])
# return length, wordindex
#
# def cal_sentence_index():
# for line in range(len(pairs_pos)):
# wordindex = []
# length, wordindex = sentence2vec(pairs_pos[line], vocab, wordindex)
# input_sen_1[line][0] = length
# input_sen_1[line][1] = 10
# input_sen_1[line][2:length + 2] = np.array(wordindex)
#
# for line in range(len(pairs_neg)):
# wordindex = []
# length, wordindex = sentence2vec(pairs_neg[line], vocab, wordindex)
# input_sen_2[line][0] = length
# input_sen_2[line][1] = 10
# input_sen_2[line][2:length + 2] = np.array(wordindex)
# return input_sen_1, input_sen_2
'''serialize sentence and add extra info'''
input_sen_1, input_sen_2 = self.week_cal_sentence_index(
input_sen_1, input_sen_2, pairs_pos, pairs_neg)
# cal_sentence_index()
'''initialize unknown word embedding'''
add = np.zeros(config.embed_dim)
final_embedding = np.row_stack((final_embedding, add))
'''randomly choose train and test data'''
np.random.shuffle(input_sen_1)
np.random.shuffle(input_sen_2)
input_pos_train = input_sen_1[:int(len(input_sen_1) * config.weak_sr), :]
input_neg_train = input_sen_2[:int(len(input_sen_2) * config.weak_sr), :]
input_pos_test = input_sen_1[int(len(input_sen_1) * config.weak_sr):, :]
input_neg_test = input_sen_2[int(len(input_sen_2) * config.weak_sr):, :]
def random_sample(matrix, sample_size):
matrix_after = []
sample_index = np.random.randint(0, len(matrix), sample_size)
for i in sample_index:
# np.row_stack((matrix_after, matrix[i]))
matrix_after.append(matrix[i])
return np.array(matrix_after)
train_pos_1 = random_sample(input_pos_train, config.sample_size)
train_pos_2 = random_sample(input_pos_train, config.sample_size)
train_pos_neg = random_sample(input_neg_train, config.sample_size)
train_neg_1 = random_sample(input_neg_train, config.sample_size)
train_neg_2 = random_sample(input_neg_train, config.sample_size)
train_neg_pos = random_sample(input_pos_train, config.sample_size)
train_dim1 = np.vstack((train_pos_1, train_neg_1))
train_dim2 = np.vstack((train_pos_2, train_neg_2))
train_dim3 = np.vstack((train_pos_neg, train_neg_pos))
all_data = MyDataset(self.read_weak_data(train_dim1, train_dim2, train_dim3))
return all_data, final_embedding, \
np.array(input_pos_test[0:config.weak_test_samples, :]), \
np.array(input_neg_test[0:config.weak_test_samples, :])
def week_cal_sentence_index(self, input_sen_1, input_sen_2, pairs_pos, pairs_neg):
"""serialize sentence and add extra info"""
for line in range(len(pairs_pos)):
length, wordindex = self.sentence2vec(pairs_pos[line])
input_sen_1[line][0] = length # real length of sentence
input_sen_1[line][1] = 10 # aspect index
input_sen_1[line][2:length + 2] = np.array(wordindex) #
if config.need_pos is True:
input_sen_1[line][config.maxlen:length + config.maxlen] = [x for x in range(length)]
for line in range(len(pairs_neg)):
length, wordindex = self.sentence2vec(pairs_pos[line])
input_sen_2[line][0] = length
input_sen_2[line][1] = 10
input_sen_2[line][2:length + 2] = np.array(wordindex)
if config.need_pos is True:
input_sen_2[line][config.maxlen:length + config.maxlen] = [x for x in range(length)]
return input_sen_1, input_sen_2
def unicodeToAscii(self, s):
"""encode sentence from Unicode to Ascii"""
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def normalizeString(self, s):
"""clean symbols and lower letters"""
s = self.unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?\(\)\"])", r"", s)
s = re.sub(r"[^0-9a-zA-Z]+", r" ", s)
return s
def normalize(self, s):
"""clean other symbols"""
# s = unicodeToAscii(s.strip())
s = re.sub(r"([\[\]\"\n])", r"", s)
return s
def saveVocab(self, filename, mode='w'):
"""save embedding vocab into local files"""
if config.pp_data_clas:
import gensim
model = gensim.models.KeyedVectors.load_word2vec_format('D:/GoogleNews-vectors-negative300.bin',
binary=True)
# spell = SpellChecker()
spell = None
i = 0
with open(filename, mode=mode, encoding='utf-8') as file:
for key in self.vocab.keys():
if key in model:
a = key + ',' + self.normalize(str(model[key])) + "\n"
file.write(a)
i += 1
else:
spell_key = spell.correction(key)
if spell_key in model:
a = key + "," + self.normalize(str(model[spell_key])) + "\n"
file.write(a)
i += 1
else:
a = key + "," + spell_key + "\n"
file.write(a)
def random_sample(self, matrix, sample_size):
"""random sample data"""
matrix_after = []
sample_index = np.random.randint(0, len(matrix), sample_size)
for i in sample_index:
# np.row_stack((matrix_after, matrix[i]))
matrix_after.append(matrix[i])
return np.array(matrix_after)
def read_weak_data(self, dim_1, dim_2, dim_3):
"""read weakly data"""
all_data = []
for idx in range(len(dim_1)):
items = torch.from_numpy(dim_1[idx])
items1 = torch.from_numpy(dim_2[idx])
items2 = torch.from_numpy(dim_3[idx])
data = {
'input1': items,
'input2': items1,
'input3': items2
}
all_data.append(data)
return all_data
def sentence2vec(self, sentence):
"""serialize sentence"""
wordindex = []
items = sentence.strip().split()
length = len(items)
for word in items:
wordindex.append(self.vocab[word])
return length, wordindex
def word2idx(self, sentence, maxlen, max_items):
"""build vocab and count maxlen of sentence"""
items = sentence.strip().split()
if len(items) > maxlen:
maxlen = len(items)
max_items = items
for word in items:
if word not in self.vocab:
self.vocab[word] = len(self.vocab)
return maxlen, max_items
class CornerData:
def __init__(self):
pass
def pp_dataloader_weak(self, all_data, final_embedding):
all_data_train = all_data
embed = final_embedding
final_embedding = torch.from_numpy(embed)
train_dataloader = DataLoader(
dataset=all_data_train,
batch_size=config.batch_size,
shuffle=True,
drop_last=True,
num_workers=4
)
return final_embedding, train_dataloader
def weakly_train(train_data, test_pos, test_neg, embed):
init_aspect = np.array(np.load("initAspect.npy"))
# init_aspect = init_aspect / np.linalg.norm(init_aspect, axis=-1, keepdims=True)
init_aspect = torch.from_numpy(init_aspect)
PreTrainABAE = PreTrainABAE_fix(300, 24, init_aspect, embed).to(config.device)
pre_trained_aspect = torch.load("AspectExtract/Aspect_Model.pkl")
aspect_dict = PreTrainABAE.state_dict()
pre_trained_dict = {k: v for k, v in pre_trained_aspect.items() if k in aspect_dict}
aspect_dict.update(pre_trained_dict)
PreTrainABAE.load_state_dict(aspect_dict)
PreTrainABAE = PreTrainABAE.eval()
trained_aspect = pre_trained_aspect["aspect_lookup_mat"].data
run = WdeRnnEncoderFix(300, 300, 50, embed, trained_aspect).to(config.device)
# params = []
# for param in run.parameters():
# if param.requires_grad:
# params.append(param)
# optimizer = optim.SGD(filter(lambda p: p.requires_grad, run.parameters()), lr=0.0001)
optimizer = optim.SGD(filter(lambda p: p.requires_grad, run.parameters()), lr=0.0001)
loss_func = torch.nn.TripletMarginLoss(margin=4.0, p=2)
for epoch in range(200):
run_hidden = run.initHidden(config.batch_size)
loss_last = torch.tensor([0], dtype=torch.float)
# TODO: remove zero_grad()
optimizer.zero_grad()
# run.zero_grad()
for idx, sample_batch in enumerate(train_data):
# now = time.time()
run = run.train()
input1 = sample_batch['input1'].to(config.device)
input2 = sample_batch['input2'].to(config.device)
input3 = sample_batch['input3'].to(config.device)
aspect_info = PreTrainABAE(input1)
input1[:, 1] = aspect_info
aspect_info = PreTrainABAE(input2)
input2[:, 1] = aspect_info
aspect_info = PreTrainABAE(input3)
input3[:, 1] = aspect_info
out1 = run(input1, run_hidden).view(config.batch_size, 300)
out2 = run(input2, run_hidden).view(config.batch_size, 300)
out3 = run(input3, run_hidden).view(config.batch_size, 300)
loss_last = loss_func(out1, out2, out3)
loss_last.backward()
optimizer.step()
print('epoch {} of {}: loss : {}'.format(epoch, 500, loss_last.item()))
def push():
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?\(\)\"])", r"", s)
s = re.sub(r"[^0-9a-zA-Z]+", r" ", s)
return s
def normalize(s):
# s = unicodeToAscii(s.strip())
s = re.sub(r"([\[\]\"\n])", r"", s)
return s
def unicodeToAscii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
lines_pos1 = open(
'data/Weakly_labeled_data_1.1M/camera_positive.csv'
, encoding='utf-8').read().strip().split('\n')
lines_neg1 = open(
'data/Weakly_labeled_data_1.1M/camera_negative.csv'
, encoding='utf-8').read().strip().split('\n')
lines_pos2 = open(
'data/Weakly_labeled_data_1.1M/cellphone_positive.csv'
, encoding='utf-8').read().strip().split('\n')
lines_neg2 = open(
'data/Weakly_labeled_data_1.1M/cellphone_negative.csv'
, encoding='utf-8').read().strip().split('\n')
lines_pos3 = open(
'data/Weakly_labeled_data_1.1M/laptop_positive.csv'
, encoding='utf-8').read().strip().split('\n')
lines_neg3 = open(
'data/Weakly_labeled_data_1.1M/laptop_negative.csv'
, encoding='utf-8').read().strip().split('\n')
lines_pos = lines_pos1 + lines_pos2 + lines_pos3
lines_neg = lines_neg1 + lines_neg2 + lines_neg3
lines = open(
'data/Labeled_data_11754/new_11754.csv'
, encoding='gbk').read().strip().split('\n')
pairs_classify = [normalizeString(s) for s in lines]
pairs_pos = [normalizeString(s) for s in lines_pos]
pairs_neg = [normalizeString(s) for s in lines_neg]
vocab = {}
print("=" * 100)
print("Take Word To Vec")
final_embedding = np.array(np.load("embed/Vector_word_embedding_all.npy"))
# final_embedding = np.delete(final_embedding, 60905, 0)
# print(final_embedding[60905])
maxlen = 0
bb = []
def word2idx(sentence, vocab, maxlen, bb):
items = sentence.strip().split()
if len(items) > maxlen:
maxlen = len(items)
bb = items
for word in items:
if word not in vocab:
vocab[word] = len(vocab)
return maxlen, bb
for line in pairs_classify:
maxlen, bb = word2idx(line, vocab, maxlen, bb)
for line in pairs_pos:
maxlen, bb = word2idx(line, vocab, maxlen, bb)
for line in pairs_neg:
maxlen, bb = word2idx(line, vocab, maxlen, bb)
input_sen_1 = config.pad_idx + np.zeros((len(pairs_pos), config.maxlen))
input_sen_1 = input_sen_1.astype(np.int)
input_sen_2 = config.pad_idx + np.zeros((len(pairs_neg), config.maxlen))
input_sen_2 = input_sen_2.astype(np.int)
def sentence2vec(sentence, vocab, wordindex):
items = sentence.strip().split()
length = len(items)
for word in items:
wordindex.append(vocab[word])
return length, wordindex
def cal_sentence_index():
for line in range(len(pairs_pos)):
wordindex = []
length, wordindex = sentence2vec(pairs_pos[line], vocab, wordindex)
input_sen_1[line][0] = length
input_sen_1[line][1] = 10
input_sen_1[line][2:length + 2] = np.array(wordindex)
for line in range(len(pairs_neg)):
wordindex = []
length, wordindex = sentence2vec(pairs_neg[line], vocab, wordindex)
input_sen_2[line][0] = length
input_sen_2[line][1] = 10
input_sen_2[line][2:length + 2] = np.array(wordindex)
return input_sen_1, input_sen_2
cal_sentence_index()
# add = -1 + 2*np.random.random(300)
add = np.zeros(config.embed_dim)
final_embedding = np.row_stack((final_embedding, add))
np.random.shuffle(input_sen_1)
np.random.shuffle(input_sen_2)
input_pos_train = input_sen_1[:int(len(input_sen_1) * config.weak_sr), :]
input_neg_train = input_sen_2[:int(len(input_sen_2) * config.weak_sr), :]
input_pos_test = input_sen_1[int(len(input_sen_1) * config.weak_sr):, :]
input_neg_test = input_sen_2[int(len(input_sen_2) * config.weak_sr):, :]
def random_sample(matrix, sample_size):
matrix_after = []
sample_index = np.random.randint(0, len(matrix), sample_size)
for i in sample_index:
# np.row_stack((matrix_after, matrix[i]))
matrix_after.append(matrix[i])
return np.array(matrix_after)
train_pos_1 = random_sample(input_pos_train, config.sample_size)
train_pos_2 = random_sample(input_pos_train, config.sample_size)
train_pos_neg = random_sample(input_neg_train, config.sample_size)
train_neg_1 = random_sample(input_neg_train, config.sample_size)
train_neg_2 = random_sample(input_neg_train, config.sample_size)
train_neg_pos = random_sample(input_pos_train, config.sample_size)
train_dim1 = np.vstack((train_pos_1, train_neg_1))
train_dim2 = np.vstack((train_pos_2, train_neg_2))
train_dim3 = np.vstack((train_pos_neg, train_neg_pos))
def read_data(dim_1, dim_2, dim_3):
all_data = []
for idx in range(len(dim_1)):
items = torch.from_numpy(dim_1[idx])
items1 = torch.from_numpy(dim_2[idx])
items2 = torch.from_numpy(dim_3[idx])
data = {
'input1': items,
'input2': items1,
'input3': items2
}
all_data.append(data)
return all_data
all_data = MyDataset(read_data(train_dim1, train_dim2, train_dim3))
return all_data, final_embedding, np.array(input_pos_test[0:8000, :]), np.array(input_neg_test[0:8000, :])
def beginTrain_lstm(embedding, train_dataloader):
init_aspect = np.array(np.load("initAspect.npy"))
# init_aspect = init_aspect / np.linalg.norm(init_aspect, axis=-1, keepdims=True)
init_aspect = torch.from_numpy(init_aspect)
PreTrainABAE = PreTrainABAE_fix(300, 24, init_aspect, embedding).cuda(config.device)
pre_trained_aspect = torch.load("AspectExtract/Aspect_Model.pkl")
aspect_dict = PreTrainABAE.state_dict()
pre_trained_dict = {k: v for k, v in pre_trained_aspect.items() if k in aspect_dict}
aspect_dict.update(pre_trained_dict)
PreTrainABAE.load_state_dict(aspect_dict)
PreTrainABAE = PreTrainABAE.eval()
trained_aspect = pre_trained_aspect["aspect_lookup_mat"].data
run = WdeRnnEncoderFix(300, 300, 50, embedding, trained_aspect).cuda(config.device)
# TODO: change NO.2 -> chagne optimizer initialize
# params = []
# for param in run.parameters():
# if param.requires_grad:
# params.append(param)
# optimizer = optim.SGD(params, lr=0.0001)
optimizer = optim.SGD(filter(lambda p: p.requires_grad, run.parameters()), lr=0.0001)
loss_func = nn.TripletMarginLoss(margin=4.0, p=2)
for epoch in range(200):
run_hidden = run.initHidden(config.batch_size)
loss_last = torch.tensor([0], dtype=torch.float)
# TODO: add zero_grad()
optimizer.zero_grad()
for idx, sample_batch in enumerate(train_dataloader):
# now = time.time()
run = run.train()
input1 = sample_batch['input1'].cuda(config.device)
input2 = sample_batch['input2'].cuda(config.device)
input3 = sample_batch['input3'].cuda(config.device)
# if input1[:,0].item() < 3 or input2[:,0].item() < 3 or input3[:,0].item() < 3:
# continue
aspect_info = PreTrainABAE(input1)
input1[:, 1] = aspect_info
aspect_info = PreTrainABAE(input2)
input2[:, 1] = aspect_info
aspect_info = PreTrainABAE(input3)
input3[:, 1] = aspect_info
out1 = run(input1.cuda(config.device), run_hidden).view(config.batch_size, 300)
out2 = run(input2.cuda(config.device), run_hidden).view(config.batch_size, 300)
out3 = run(input3.cuda(config.device), run_hidden).view(config.batch_size, 300)
loss_last = loss_func(out1, out2, out3)
loss_last.backward()
optimizer.step()
# TODO: remove valid
# if epoch % 2 == 0:
# run.zero_grad()
# run = run.eval()
# valid_now = self.valid(PreTrainABAE, run)
# a = round((loss_last).item(), 5)
# b = round(valid_now, 5)
# if valid_now > 1.13:
# file_name = "pretrainmodel/" + "every2_loss_" + str(a) + "valid_" + str(
# b) + ".pkl"
# torch.save(run.state_dict(), file_name)
# valid_compare = valid_now
#
# print('epoch {} of {}: TEST : {}'.format(epoch, 200, valid_now))
print('epoch {} of {}: loss : {}'.format(epoch, 200, (loss_last).item()))
if __name__ == '__main__':
print('current time:', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
'''prepare data'''
# data_prepare = DataPrepare()
my_loader = CornerData()
# all_data, final_embedding, test_pos, test_neg = data_prepare.weakly_data_process
all_data, final_embedding, test_pos, test_neg = push()
embedding, train_dataloader = my_loader.pp_dataloader_weak(all_data, final_embedding)
'''calculate accuracy'''
weakly_train(train_dataloader, test_pos, test_neg, embedding)
# beginTrain_lstm(embedding, train_dataloader)
|
[
"278976237@qq.com"
] |
278976237@qq.com
|
cd1d47d99acf174779ae17c371c1608da97b648d
|
a0db69ddb08f229519ec89e699ffb6c575ed7bf5
|
/addpoint/catimg/views.py
|
59604eefae69f52c4c6908730313133f5b4eb650
|
[] |
no_license
|
ninsgosai/photo-project
|
db814fe8850a87d49c98f1fa3347b5e60577a3f7
|
abeb5b1f00701d4716e70e1b8b0ea2815586167a
|
refs/heads/main
| 2023-01-07T01:09:52.911010
| 2020-11-18T09:41:04
| 2020-11-18T09:41:04
| 313,877,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,939
|
py
|
from django.shortcuts import render, HttpResponseRedirect
from django.contrib import messages
from .models import Catimg
from category.models import Category
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url='login')
def addci(request):
cat = Category.objects.all()
if request.method == 'POST':
cid = request.POST['cid']
cimg = request.FILES['cimg']
cbimg = request.FILES['cbimg']
xlg = request.POST['xlg']
ylg = request.POST['ylg']
xtxt = request.POST['xtxt']
ytxt = request.POST['ytxt']
sta = request.POST['sta']
add = Catimg(cat_id_id=cid, ci_img=cimg, ci_blankimg=cbimg, x_logo=xlg, y_logo=ylg, x_txt=xtxt, y_txt=ytxt, status=sta)
add.save()
messages.success(request, 'Category Image Added Successfully', extra_tags='success')
return HttpResponseRedirect('/ci/managecategoryimage/')
# msg = {'serr':'Category Image Added Successfully'}
# return render(request, 'catimg/add_catimg.html', {'err':msg, 'cat':cat, 'name': request.user})
else:
# msg = {'ferr':'Please Fill All Field Either Category Image Do Not Add.'}
return render(request, 'catimg/add_catimg.html', {'cat':cat, 'name': request.user})
return render(request, 'catimg/add_catimg.html', {'cat':cat, 'name': request.user})
@login_required(login_url='login')
def edtci(request, eid):
ecati = Catimg.objects.get(pk=eid)
cat = Category.objects.all()
return render(request, 'catimg/edit_catimg.html', {'eci':ecati, 'cat':cat, 'name': request.user})
@login_required(login_url='login')
def updci(request, uciid):
if request.method == 'POST':
try:
if request.FILES['cimg'] != 0 and request.FILES['cbimg'] != 0:
cid = request.POST['cid']
cimg = request.FILES['cimg']
cbimg = request.FILES['cbimg']
xlg = request.POST['xlg']
ylg = request.POST['ylg']
xtxt = request.POST['xtxt']
ytxt = request.POST['ytxt']
sta = request.POST['sta']
edt = Catimg.objects.get(ci_id = uciid)
edt.cat_id_id = cid
edt.ci_img = cimg
edt.ci_blankimg = cbimg
edt.x_logo = xlg
edt.y_logo = ylg
edt.x_txt = xtxt
edt.y_txt = ytxt
edt.status = sta
edt.save()
except:
cid = request.POST['cid']
xlg = request.POST['xlg']
ylg = request.POST['ylg']
xtxt = request.POST['xtxt']
ytxt = request.POST['ytxt']
sta = request.POST['sta']
edt = Catimg.objects.get(ci_id = uciid)
edt.cat_id_id = cid
edt.ci_img = edt.ci_img
edt.ci_blankimg = edt.ci_blankimg
edt.x_logo = xlg
edt.y_logo = ylg
edt.x_txt = xtxt
edt.y_txt = ytxt
edt.status = sta
edt.save()
messages.success(request, 'Category Image Updated Successfully', extra_tags='success')
return HttpResponseRedirect('/ci/managecategoryimage/')
messages.success(request, 'Category Image Updated Successfully', extra_tags='success')
return HttpResponseRedirect('/ci/managecategoryimage/')
@login_required(login_url='login')
def delci(request, did):
de = Catimg.objects.get(pk=did)
de.delete()
messages.success(request, 'Category Image Deleted Successfully', extra_tags='danger')
return HttpResponseRedirect('/ci/managecategoryimage/')
@login_required(login_url='login')
def manci(request):
cati = Catimg.objects.all()
return render(request, 'catimg/man_catimg.html', {'ci': cati, 'name': request.user})
|
[
"69950933+ninad-goswamy@users.noreply.github.com"
] |
69950933+ninad-goswamy@users.noreply.github.com
|
88b2d208ec0c421caa0260b3170e6bddcc6015df
|
c81fa8d1d83d92fc431be589243a51085beb5592
|
/pu/_version.py
|
e0861be54fda54ac92118a7d025d3297a6d8f964
|
[] |
no_license
|
huyx/pu
|
4accdf17a9b186be68e347cbb79e3234139ed50b
|
f91060bb83f878f0bf17a04007cd00ad38adefb1
|
refs/heads/master
| 2021-01-10T20:11:12.292747
| 2019-02-19T07:25:31
| 2019-02-19T07:25:31
| 26,038,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
# -*- coding: utf-8 -*-
version = '0.20.0'
|
[
"ycyuxin@gmail.com"
] |
ycyuxin@gmail.com
|
b5088b61fa95236e1bbb03e7917ba62faf25de1b
|
f836b1b2b8914871c27cf1123e67d1956c127a3f
|
/setup.py
|
c3f0f992200fd23e7c3a0f36bf1e8ec2be432fdf
|
[] |
no_license
|
chengxinGuo/OODA-FLOW-Bioimage-Pipeline
|
62b98ada347c071b05c5443417775e85c981ca9d
|
b93fb5ffaa2c11221a09ab23e75c54b9f8710e43
|
refs/heads/master
| 2022-11-19T05:17:21.969867
| 2020-07-14T06:48:13
| 2020-07-14T06:48:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,735
|
py
|
#
# Copyright (C) 2016-2017 by Yuan Lufeng
# See license.txt for full license and copyright notice.
#
# Authors: Yuan Lufeng
#
# setup.py
#
# Created on: Dec 11th, 2016
# Author: Yuan Lufeng
#
##\brief this version can complie the code include cuda, C++, python and cython.
#
NAME="FCDLR-original"
DESCRIPTION = "Fast Cell Division and Lineage Reconstruction pipeline"
LONG_DESCRIPTION = ''
MAINTAINER = 'Yuan Lufeng'
MAINTAINER_EMAIL = 'yuanlufeng@ncic.ac.cn'
URL = 'https://github.com/septicmk/lambdaimage'
LICENSE = 'BSD'
DOWNLOAR_URL = 'https://github.com/septicmk/lambdaimage'
#with open('lambdaimage/__init__.py') as f:
# for line in f:
# if line.startswith('__version__'):
# VERSION = line.strip().split()[-1][1:-1]
# break
#with open('requirements.txt') as f:
# REQUIRE = [l.strip() for l in f.readlines() if l]
if __name__ == '__main__':
import os
from os.path import join as pjoin
import subprocess
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext
import numpy
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
nvcc = find_in_path('nvcc', os.environ['PATH'])
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
extensions =[
Extension("_tracking_GMM",
sources=["_tracking_GMM.pyx","TrackingGaussianMixtureModel.cpp","GaussianMixtureModel.cpp","../Utils/parseConfigFile.cpp","responsibilities.cpp","variationalInference.cpp","../external/xmlParser2/xmlParser.cpp","../external/Nathan/tictoc.c","../UtilsCUDA/knnCuda.cu","../UtilsCUDA/GMEMupdateCUDA.cu","../constants.cpp","../temporalLogicalRules/temporalLogicalRules.cpp","../UtilsCUDA/3DEllipticalHaarFeatures/gentleBoost/gentleBoost.cpp","../temporalLogicalRules/trackletCalculation.cpp","cellDivision.cpp","supportFunctionsMain.cpp","../nucleiChSvWshedPBC/hierarchicalSegmentation.cpp","backgroundDetectionInterface.cpp","kdtree.cpp","../backgroundDetection/backgroundClassifier.cpp","../temporalLogicalRules/supervoxel.cpp","../temporalLogicalRules/localGeometricDescriptor.cpp","../Utils/WishartCDF.cpp","../Utils/MultinormalCDF.cpp","../temporalLogicalRules/nuclei.cpp","../temporalLogicalRules/lineageHyperTree.cpp","../temporalLogicalRules/lineage.cpp","../temporalLogicalRules/GaussianMixtureModel_Redux.cpp","../../build/mylib/array.c","../../build/mylib/mylib.c","../../build/mylib/image.c","../../build/mylib/histogram.c","../../build/mylib/region.c","../../build/mylib/MY_TIFF/tiff.io.c","../../build/mylib/MY_TIFF/tiff.image.c","../../build/mylib/water.shed.c","../../build/mylib/connectivity.c","../../build/mylib/cdf.c","../Utils/CSparse.c","../../build/mylib/draw.c","../../build/mylib/level.set.c","../../build/mylib/linear.algebra.c","../../build/mylib/svg.c","../../build/mylib/filters.c","../../build/mylib/paths.c","../../build/mylib/swc.c","../../build/mylib/fct.min.c","../../build/mylib/utilities.c","../../build/mylib/fct.root.c","../../build/mylib/hash.c","../../build/mylib/snake.c","../temporalLogicalRules/knnCUDA/knnCuda.cu","../external/gsl/gamma.c","../external/gsl/psi.c","../external/gsl/trig.c","../external/gsl/math.c","../external/gsl/exp.c","../external/gsl/zeta.c","../external/gsl/elementary.c","../external/gsl/log.c","../external/gsl/infnan.c","../external/gsl/error.c","../external/gsl/stream.c","../temporalLogicalRules/sparseHungarianAlgorithm/sparseHungarianAlgorithm.cpp","../temporalLogicalRules/sparseHungarianAlgorithm/external/munkres-cpp-master/src/munkres.cpp","../temporalLogicalRules/lineageWindowFeatures.cpp","../external/gsl/fdiv.c"],
#sources=["_tracking_GMM.pyx","TrackingGaussianMixtureModel.cpp","GaussianMixtureModel.cpp","../Utils/parseConfigFile.cpp","responsibilities.cpp","variationalInference.cpp","../external/xmlParser2/xmlParser.cpp","../external/Nathan/tictoc.c","../UtilsCUDA/knnCuda.cu","../UtilsCUDA/GMEMupdateCUDA.cu","../constants.cpp","../temporalLogicalRules/temporalLogicalRules.cpp","../UtilsCUDA/3DEllipticalHaarFeatures/gentleBoost/gentleBoost.cpp","../temporalLogicalRules/trackletCalculation.cpp","cellDivision.cpp","supportFunctionsMain.cpp","../nucleiChSvWshedPBC/hierarchicalSegmentation.cpp","backgroundDetectionInterface.cpp","kdtree.cpp","../backgroundDetection/backgroundClassifier.cpp","../temporalLogicalRules/supervoxel.cpp","../temporalLogicalRules/localGeometricDescriptor.cpp","../Utils/WishartCDF.cpp","../Utils/MultinormalCDF.cpp","../temporalLogicalRules/nuclei.cpp","../temporalLogicalRules/lineageHyperTree.cpp","../temporalLogicalRules/lineage.cpp","../temporalLogicalRules/GaussianMixtureModel_Redux.cpp","../../build2/mylib/array.c","../../build2/mylib/mylib.c","../../build2/mylib/image.c","../../build2/mylib/histogram.c","../../build2/mylib/region.c","../../build2/mylib/MY_TIFF/tiff.io.c","../../build2/mylib/MY_TIFF/tiff.image.c","../../build2/mylib/water.shed.c","../../build2/mylib/connectivity.c","../../build2/mylib/cdf.c","../Utils/CSparse.c","../../build2/mylib/draw.c","../../build2/mylib/level.set.c","../../build2/mylib/linear.algebra.c","../../build2/mylib/svg.c","../../build2/mylib/filters.c","../../build2/mylib/paths.c","../../build2/mylib/swc.c","../../build2/mylib/fct.min.c","../../build2/mylib/utilities.c","../../build2/mylib/fct.root.c","../../build2/mylib/hash.c","../../build2/mylib/snake.c","../temporalLogicalRules/knnCUDA/knnCuda.cu","../external/gsl/gamma.c","../external/gsl/psi.c","../external/gsl/trig.c","../external/gsl/math.c","../external/gsl/exp.c","../external/gsl/zeta.c","../external/gsl/elementary.c","../external/gsl/log.c","../external/gsl/infnan.c","../external/gsl/error.c","../external/gsl/stream.c","../temporalLogicalRules/sparseHungarianAlgorithm/sparseHungarianAlgorithm.cpp","../temporalLogicalRules/sparseHungarianAlgorithm/external/munkres-cpp-master/src/munkres.cpp","../UtilsCUDA/3DEllipticalHaarFeatures/AnnotationEllipsoid.cpp"],
#sources=["_tracking_GMM.pyx","TrackingGaussianMixtureModel.cpp","GaussianMixtureModel.cpp","../Utils/parseConfigFile.cpp","responsibilities.cpp","variationalInference.cpp","../external/xmlParser2/xmlParser.cpp","../external/Nathan/tictoc.c","../UtilsCUDA/knnCuda.cu","../UtilsCUDA/GMEMupdateCUDA.cu","../constants.cpp","../temporalLogicalRules/temporalLogicalRules.cpp","../UtilsCUDA/3DEllipticalHaarFeatures/EllipticalHaarFeatures.cpp","../UtilsCUDA/3DEllipticalHaarFeatures/EllipticalHaarFeatures.cu","../UtilsCUDA/3DEllipticalHaarFeatures/gentleBoost/gentleBoost.cpp","../temporalLogicalRules/trackletCalculation.cpp","cellDivision.cpp","supportFunctionsMain.cpp","../nucleiChSvWshedPBC/hierarchicalSegmentation.cpp","backgroundDetectionInterface.cpp","kdtree.cpp","../backgroundDetection/backgroundClassifier.cpp","../temporalLogicalRules/supervoxel.cpp","../temporalLogicalRules/localGeometricDescriptor.cpp","../Utils/WishartCDF.cpp","../Utils/MultinormalCDF.cpp","../temporalLogicalRules/nuclei.cpp","../temporalLogicalRules/lineageHyperTree.cpp","../temporalLogicalRules/lineage.cpp","../temporalLogicalRules/GaussianMixtureModel_Redux.cpp","../../build2/mylib/array.c","../../build2/mylib/mylib.c","../../build2/mylib/image.c","../../build2/mylib/histogram.c","../../build2/mylib/region.c","../../build2/mylib/MY_TIFF/tiff.io.c","../../build2/mylib/MY_TIFF/tiff.image.c","../../build2/mylib/water.shed.c","../../build2/mylib/connectivity.c","../../build2/mylib/cdf.c","../Utils/CSparse.c","../../build2/mylib/draw.c","../../build2/mylib/level.set.c","../../build2/mylib/linear.algebra.c","../../build2/mylib/svg.c","../../build2/mylib/filters.c","../../build2/mylib/paths.c","../../build2/mylib/swc.c","../../build2/mylib/fct.min.c","../../build2/mylib/utilities.c","../../build2/mylib/fct.root.c","../../build2/mylib/hash.c","../../build2/mylib/snake.c","../temporalLogicalRules/knnCUDA/knnCuda.cu","../external/gsl/gamma.c","../external/gsl/psi.c","../external/gsl/trig.c","../external/gsl/math.c","../external/gsl/exp.c","../external/gsl/zeta.c","../external/gsl/elementary.c","../external/gsl/log.c","../external/gsl/infnan.c","../external/gsl/error.c","../external/gsl/stream.c","../temporalLogicalRules/sparseHungarianAlgorithm/sparseHungarianAlgorithm.cpp","../temporalLogicalRules/sparseHungarianAlgorithm/external/munkres-cpp-master/src/munkres.cpp","../UtilsCUDA/3DEllipticalHaarFeatures/AnnotationEllipsoid.cpp"],
#sources=["_tracking_GMM.pyx","TrackingGaussianMixtureModel.cpp","GaussianMixtureModel.cpp","../Utils/parseConfigFile.cpp","responsibilities.cpp","variationalInference.cpp","../external/xmlParser2/xmlParser.cpp","../external/Nathan/tictoc.c","../UtilsCUDA/knnCuda.cu","../UtilsCUDA/GMEMupdateCUDA.cu","../constants.cpp","../temporalLogicalRules/temporalLogicalRules.cpp","../UtilsCUDA/3DEllipticalHaarFeatures/EllipticalHaarFeatures.cu","../UtilsCUDA/3DEllipticalHaarFeatures/gentleBoost/gentleBoost.cpp","../temporalLogicalRules/trackletCalculation.cpp","cellDivision.cpp","supportFunctionsMain.cpp","../nucleiChSvWshedPBC/hierarchicalSegmentation.cpp","backgroundDetectionInterface.cpp","kdtree.cpp","../backgroundDetection/backgroundClassifier.cpp","../temporalLogicalRules/supervoxel.cpp","../temporalLogicalRules/localGeometricDescriptor.cpp","../Utils/WishartCDF.cpp","../Utils/MultinormalCDF.cpp","../temporalLogicalRules/nuclei.cpp","../temporalLogicalRules/lineageHyperTree.cpp","../temporalLogicalRules/GaussianMixtureModel_Redux.cpp","../../build2/mylib/array.c","../../build2/mylib/mylib.c","../../build2/mylib/image.c","../../build2/mylib/histogram.c","../../build2/mylib/region.c","../../build2/mylib/MY_TIFF/tiff.io.c","../../build2/mylib/MY_TIFF/tiff.image.c","../../build2/mylib/water.shed.c","../../build2/mylib/connectivity.c","../../build2/mylib/cdf.c","../Utils/CSparse.c","../../build2/mylib/draw.c","../../build2/mylib/level.set.c","../../build2/mylib/fft.c","../../build2/mylib/linear.algebra.c","../../build2/mylib/svg.c","../../build2/mylib/filters.c","../../build2/mylib/paths.c","../../build2/mylib/swc.c","../../build2/mylib/fct.min.c","../../build2/mylib/utilities.c","../../build2/mylib/fct.root.c","../../build2/mylib/hash.c","../../build2/mylib/snake.c"],
#sources=["_tracking_GMM.pyx","TrackingGaussianMixtureModel.cpp","GaussianMixtureModel.cpp","../Utils/parseConfigFile.cpp","responsibilities.cpp","variationalInference.cpp","../external/xmlParser2/xmlParser.cpp","../external/Nathan/tictoc.c","../UtilsCUDA/knnCuda.cu","../UtilsCUDA/GMEMupdateCUDA.cu","../constants.cpp","../temporalLogicalRules/temporalLogicalRules.cpp","../UtilsCUDA/3DEllipticalHaarFeatures/EllipticalHaarFeatures.cu","../UtilsCUDA/3DEllipticalHaarFeatures/gentleBoost/gentleBoost.cpp","../temporalLogicalRules/trackletCalculation.cpp","cellDivision.cpp","supportFunctionsMain.cpp","../nucleiChSvWshedPBC/hierarchicalSegmentation.cpp","backgroundDetectionInterface.cpp","kdtree.cpp","../backgroundDetection/backgroundClassifier.cpp","../temporalLogicalRules/supervoxel.cpp","../temporalLogicalRules/localGeometricDescriptor.cpp","../Utils/WishartCDF.cpp","../Utils/MultinormalCDF.cpp","../temporalLogicalRules/nuclei.cpp","../temporalLogicalRules/lineageHyperTree.cpp","../temporalLogicalRules/GaussianMixtureModel_Redux.cpp","../../build2/mylib/array.c","../../build2/mylib/mylib.c","../../build2/mylib/image.c","../../build2/mylib/histogram.c","../../build2/mylib/region.c","../../build2/mylib/MY_TIFF/tiff.io.c","../../build2/mylib/MY_TIFF/tiff.image.c","../../build2/mylib/water.shed.c","../../build2/mylib/connectivity.c","../../build2/mylib/cdf.c","../Utils/CSparse.c","../../build2/mylib/draw.c","../../build2/mylib/level.set.c","../../build2/mylib/fft.c","../../build2/mylib/linear.algebra.c","../../build2/mylib/svg.c","../../build2/mylib/filters.c","../../build2/mylib/paths.c","../../build2/mylib/swc.c","../../build2/mylib/fct.min.c","../../build2/mylib/utilities.c","../../build2/mylib/fct.root.c","../../build2/mylib/hash.c","../../build2/mylib/snake.c","../temporalLogicalRules/mylib/MY_FFT/fft.D.c"],
#sources=["_tracking_GMM.pyx","TrackingGaussianMixtureModel.cpp","GaussianMixtureModel.cpp","../Utils/parseConfigFile.cpp","responsibilities.cpp","variationalInference.cpp","../external/xmlParser2/xmlParser.cpp","../external/Nathan/tictoc.c","../UtilsCUDA/knnCuda.cu","../UtilsCUDA/GMEMupdateCUDA.cu","../constants.cpp","../temporalLogicalRules/temporalLogicalRules.cpp","../UtilsCUDA/3DEllipticalHaarFeatures/EllipticalHaarFeatures.cu","../UtilsCUDA/3DEllipticalHaarFeatures/gentleBoost/gentleBoost.cpp","../temporalLogicalRules/trackletCalculation.cpp","cellDivision.cpp","supportFunctionsMain.cpp","../nucleiChSvWshedPBC/hierarchicalSegmentation.cpp","backgroundDetectionInterface.cpp","kdtree.cpp","../backgroundDetection/backgroundClassifier.cpp","../temporalLogicalRules/supervoxel.cpp","../temporalLogicalRules/localGeometricDescriptor.cpp","../Utils/WishartCDF.cpp","../Utils/MultinormalCDF.cpp","../temporalLogicalRules/nuclei.cpp","../temporalLogicalRules/lineageHyperTree.cpp","../temporalLogicalRules/GaussianMixtureModel_Redux.cpp","../../build2/mylib/array.c","../../build2/mylib/mylib.c","../../build2/mylib/image.c","../../build2/mylib/histogram.c","../../build2/mylib/region.c","../../build2/mylib/MY_TIFF/tiff.io.c","../../build2/mylib/MY_TIFF/tiff.image.c","../../build2/mylib/water.shed.c","../../build2/mylib/connectivity.c","../../build2/mylib/cdf.c","../Utils/CSparse.c","../../build2/mylib/draw.c","../../build2/mylib/level.set.c","../../build2/mylib/fft.c","../../build2/mylib/linear.algebra.c","../../build2/mylib/svg.c","../../build2/mylib/filters.c","../../build2/mylib/paths.c","../../build2/mylib/swc.c","../../build2/mylib/fct.min.c","../../build2/mylib/utilities.c","../../build2/mylib/fct.root.c","../../build2/mylib/hash.c","../../build2/mylib/snake.c","../temporalLogicalRules/mylib/MY_FFT/fft.D.c","../temporalLogicalRules/mylib/MY_FFT/fft.F.c"],
#sources=["_tracking_GMM.pyx","TrackingGaussianMixtureModel.cpp","GaussianMixtureModel.cpp","../Utils/parseConfigFile.cpp","responsibilities.cpp","variationalInference.cpp","../../build2/mylib/array.c","../../build2/mylib/mylib.c","../../build2/mylib/utilities.c","../../build2/mylib/image.c","../../build2/mylib/MY_TIFF/tiff.image.c","../../build2/mylib/MY_TIFF/tiff.io.c","../../build2/mylib/linear.algebra.c","selectForeground.cpp","watershedSegmentation.cpp","../temporalLogicalRules/supervoxel.cpp","../constants.cpp","set_union.c","hierarchicalSegmentation.cpp","../temporalLogicalRules/localGeometricDescriptor.cpp","agglomerateClustering.cpp","MedianFilter2D/medianFilter2D.cpp","CUDAmedianFilter2D/medianFilter2D.cu"],
#sources=["_process_stack.pyx","ProcessStack.cpp","IO.cpp","../Utils/parseConfigFile.cpp","../temporalLogicalRules/mylib/array.p"],
#sources=["_process_stack.pyx","ProcessStack.cpp","IO.cpp","../Utils/parseConfigFile.cpp","../temporalLogicalRules/supervoxel.cpp"],
#sources=["_process_stack.pyx","ProcessStack.cpp","IO.cpp","hierarchicalSegmentation.cpp","../Utils/parseConfigFile.cpp","watershedPersistanceAgglomeration.cpp","watershedSegmentation.cpp","agglomerateClustering.cpp"],
include_dirs=[numpy.get_include(),".","..","../temporalLogicalRules/","../nucleiChSvWshedPBC","../mylib/","../backgroundDetection","../UtilsCUDA/3DEllipticalHaarFeatures","../temporalLogicalRules/mylib/MY_TIFF","../temporalLogicalRules/knnCUDA","../external/gsl/","../temporalLogicalRules/sparseHungarianAlgorithm","../temporalLogicalRules/sparseHungarianAlgorithm/external/munkres-cpp-master/src",CUDA['include']],
#include_dirs=[numpy.get_include(),".","..","../temporalLogicalRules/","../temporalLogicalRules/mylib/","../temporalLogicalRules/mylib/MY_TIFF","MedianFilter2D",CUDA['include']],
language="c++",
#library_dirs = [CUDA['lib64']],
library_dirs = ["../../build/UtilsCUDA/3DEllipticalHaarFeatures",CUDA['lib64']],
#libraries = ['cudart'],
libraries = ['cudart','cusparse','cuda','ellipticalHaarFeatures'],
#libraries = ['cudart','cusparse','cuda'],
#libraries = ['cudart','cusparse'],
#libraries = ['cudart','cusparse','myfft'],
#libraries = ['cudart','cusparse','libmyfft.a'],
runtime_library_dirs = [CUDA['lib64']],
#extra_compile_args=["-std=c++0x","-pthread"],
#extra_link_args=["-std=c++0x","-pthread"]),
extra_compile_args={'gcc':["-std=c++0x"],
'nvcc':['-arch=sm_35', '--ptxas-options=-v', '-c', '--compiler-options', "'-fPIC'"]},
extra_link_args=["-std=c++0x"]),
#Extension("lambdatgmm.nucleiSegmentation._io",
# sources=["lambdatgmm/nucleiSegmentation/_io.pyx","lambdatgmm/nucleiSegmentation/IO.cpp"],
# #sources=["lambdatgmm/nucleiSegmentation/_io.pyx","lambdatgmm/nucleiSegmentation/IO.h"],
# include_dirs=[numpy.get_include()],
# language="c++"),
#Extension("lambdaimage.udf._trans",
# sources=["lambdaimage/udf/_trans.pyx","lambdaimage/udf/_trans_c.c"],
# include_dirs=[numpy.get_include()]),
#Extension("lambdaimage.udf._update",
# sources=["lambdaimage/udf/_update.pyx", "lambdaimage/udf/_update_c.c"],
# include_dirs=[numpy.get_include()]),
#Extension("lambdaimage.udf._moment",
# sources=["lambdaimage/udf/_moment.pyx"],
# inlcude_dirs=[numpy.get_include()]),
#Extension("lambdaimage.udf._intensity",
# sources=["lambdaimage/udf/_intensity.pyx"],
# include_dirs=[numpy.get_include()]),
#Extension("lambdaimage.udf._phansalkar",
# sources=["lambdaimage/udf/_phansalkar.pyx", "lambdaimage/udf/_phansalkar_c.c"],
# include_dirs=[numpy.get_include()]),
]
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(
name = NAME,
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
maintainer = MAINTAINER,
maintainer_email = MAINTAINER_EMAIL,
url=URL,
license = LICENSE,
download_url = DOWNLOAR_URL,
#version = VERSION,
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
#install_requires = REQUIRE,
packages = find_packages(),
#cmdclass = {'build_ext': build_ext},
cmdclass={'build_ext': custom_build_ext},
ext_modules = extensions
# since the package has c code, the egg cannot be zipped
#zip_safe=False
)
|
[
"noreply@github.com"
] |
chengxinGuo.noreply@github.com
|
ea41937cfdb5d0ddde38badacad944b72cfbfb7b
|
5a54c1e95578a3a073fe592c86621b69114e3a06
|
/env/lib/python3.7/types.py
|
13b39e3e356da4dafe288d71839337c8e5e2a300
|
[
"MIT"
] |
permissive
|
fepas/django-react
|
a0c66e51d92652f8e22cdbf19b5699f4d9aa00da
|
6da3fc9d33af23c96ae018df90eaff9a7b6b5f37
|
refs/heads/master
| 2020-04-29T18:20:13.954071
| 2019-03-18T17:09:12
| 2019-03-18T17:09:12
| 176,320,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44
|
py
|
/home/fepas/anaconda3/lib/python3.7/types.py
|
[
"fepas.unb@gmail.com"
] |
fepas.unb@gmail.com
|
5b45e5fcdbafb967bebb0e2376695c8056505900
|
87b9119646ff00e180584818869bbd49ba0bad8c
|
/dashboards/urls.py
|
cc16ec193597f943435b2649df1641b8e925fd77
|
[] |
no_license
|
d40a/project1
|
f47944dcd7700a1e9df8ae42e46ce929d18d5624
|
b4c3ea1a21a6988a9ec7e10a3801e2be53d74d5f
|
refs/heads/master
| 2021-06-10T22:23:46.997810
| 2017-02-03T18:22:42
| 2017-02-03T18:22:42
| 80,071,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^get_data/', views.get_data),
url(r'^details_of_test/', views.details_of_test),
]
|
[
"dimaa@google.com"
] |
dimaa@google.com
|
6b3d429131abadcf9ca92e14ad498d833ecf9d6e
|
7df64cc23fba406829ebb04b54974dbf4dd26318
|
/simulated_pick/models/simulated_pick_product.py
|
2137b75490722ee9c85a30667aa96665bbdaca8a
|
[] |
no_license
|
sendalpegat/azi-odoo-modules
|
feb5c379b49afb07df9f6642416e2557a71040db
|
9d6eaa43465492291c5bd6b98876fe893d2fd250
|
refs/heads/master
| 2023-05-07T02:57:45.738424
| 2019-12-20T20:44:40
| 2019-12-20T20:44:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,213
|
py
|
# -*- coding: utf-8 -*-
# (c) 2014 scosist
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import models, fields, api
import odoo.addons.decimal_precision as dp
class SimulatedPickProduct(models.TransientModel):
_name = 'simulated.pick.product'
sim_prod_id = fields.Many2one(
comodel_name='product.product',
string='Simulated Product',
required=True,
ondelete="no action",
index=True)
product_id = fields.Many2one(
comodel_name='product.product',
string='Product',
required=True,
ondelete="no action",
index=True)
product_qty = fields.Float(
string="Req'd Qty",
digits=dp.get_precision('Product Unit of Measure'),
required=True)
on_hand_before = fields.Float(
string='On-Hand Before',
digits=dp.get_precision('Product Unit of Measure'),
required=True)
on_hand_after = fields.Float(
string='On-Hand After',
digits=dp.get_precision('Product Unit of Measure'),
required=True)
short = fields.Float(
string='Short',
digits=dp.get_precision('Product Unit of Measure'),
required=True)
proc_action = fields.Char(string='Action')
routing_detail = fields.Char(string="Routing Detail")
categ_id = fields.Many2one(
comodel_name='product.category',
related='product_id.categ_id',
string='Internal Category',
store=True)
product_uom = fields.Many2one(
comodel_name='product.uom',
related='product_id.uom_id',
string='UoM',
store=True)
default_supplier_id = fields.Many2one(
comodel_name='res.partner',
string='Supplier',
compute='_compute_default_supplier',
readonly=True,
index=True,
store=True)
@api.depends('product_id')
def _compute_default_supplier(self):
for line in self:
line.default_supplier_id = line.product_id.seller_ids and line.product_id.seller_ids[0].name or False
@api.multi
def action_material_analysis(self):
self.ensure_one()
return self.product_id.action_material_analysis()
|
[
"matt454357@gmail.com"
] |
matt454357@gmail.com
|
e765d4fa18aeaa069f1c020736556de7f79b88a7
|
255f0c22b05deda164f9f5e5e08722ac41844d3e
|
/src/plot_logs.py
|
a759ab631b10ef5301933f05a7af30ef0826dc95
|
[] |
no_license
|
culring/nDES
|
86ff0619c876117e87c103f1b28db407c414e9a5
|
c2f371a46356fa1f494fe42e6be67a4938e4765e
|
refs/heads/master
| 2023-08-10T15:35:26.692884
| 2020-12-09T17:07:45
| 2020-12-09T17:07:45
| 400,531,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,977
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def get_plot_style(fig_size_multiplier: float = 1.5):
""" Get style parameters for :mod:`matplotlib`.
Args:
fig_size_multiplier: Figure size multiplier.
Returns:
Dictionary of style parameters which can be used to alter
:data:`matplotlib.rcParams` dictionary.
"""
# golden ratio
fig_width = 6.750 * fig_size_multiplier
fig_height = fig_width / 1.618
params = {
'axes.labelsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': False,
'figure.figsize': [fig_width, fig_height],
'savefig.bbox': 'tight',
'savefig.transparent': False
}
return params
def set_grid(axis):
"""Set a grid on the axis.
Args:
axis: Axis on which the grid will be set.
Returns:
Axis with set grid.
"""
axis.spines['top'].set_visible(False)
axis.spines['right'].set_visible(False)
axis.spines['left'].set_visible(False)
axis.spines['bottom'].set_visible(False)
axis.get_xaxis().tick_bottom()
axis.get_yaxis().tick_left()
axis.tick_params(axis='x', direction='out')
axis.tick_params(axis='y', direction='out')
# offset the spines
for spine in axis.spines.values():
spine.set_position(('outward', 5))
# put the grid behind
axis.set_axisbelow(True)
axis.grid(color="0.9", linestyle='-', linewidth=1)
return axis
plt.rcParams.update(get_plot_style(2.5))
if __name__ == "__main__":
fig, ax = plt.subplots()
for i in ['4', '8', '16']:
log = np.load(f'log_{i}k_fixed.npy')
log = np.clip(log, 0., 3.)
x = np.linspace(0, 500000, num=len(log))
ax.plot(x, log, label=(r'$\lambda$ = ' + f'{i}k'))
ax.legend()
ax.set_xlabel('Epoch')
ax.set_ylabel('Log loss')
ax = set_grid(ax)
# plt.show()
fig.savefig('logloss_lambda_des.png')
|
[
"fuine@riseup.net"
] |
fuine@riseup.net
|
d04a8bf3cfa899b41ec2c4abea2d088d9706e9cf
|
fd16ccc7c5576a2f1921bcd9a10d7a157566190e
|
/Source/server/SocketServer/TestSocket/CardsPattern/Mode_AnyOutRange.py
|
c5d7c210a9ce1bc7f5b347f6913d21ec02edc4e3
|
[] |
no_license
|
willy2358/lxqenjoy
|
5469b2b8cf615a43ae777a841156523a8bf0564b
|
8d72d76497b21996e72cf97aa4bb7a5fdf6a03be
|
refs/heads/dev
| 2021-01-02T22:40:16.346181
| 2018-10-17T14:34:28
| 2018-10-17T14:34:28
| 99,359,908
| 0
| 1
| null | 2018-10-03T13:47:34
| 2017-08-04T16:12:19
|
Python
|
UTF-8
|
Python
| false
| false
| 410
|
py
|
from CardsPattern.Mode import Mode
class Mode_AnyOutRange(Mode):
"""description of class"""
def __init__(self, start, end, **kwargs):
self.__start = start
self.__end = end
return super().__init__(**kwargs)
def is_match(self, faces):
for f in faces:
if f < self.__start or f > self.__end:
return True
return True
|
[
"willy2358@139.com"
] |
willy2358@139.com
|
53b326c9ddfedcdcf23fa90a9cb66454161f8125
|
5f65582119800ab9860d401a2ad7d494a0f1bbde
|
/Learning/Warmups/PQ_Countdown/Level1.py
|
8531a15d3cd19d94adae6f12fc025342235df7c6
|
[] |
no_license
|
CVHS-TYM/Marpaung_Story
|
dad965a80b8c563fe8a4f7cda04935cc353bd56f
|
85bf1b58ca5247303474888e7fefefba55f185f9
|
refs/heads/master
| 2020-03-28T11:03:36.416489
| 2018-09-27T14:28:47
| 2018-09-27T14:28:47
| 148,173,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
for i in range (11):
print(10-i)
|
[
"timothymar.21461@redlandsschools.net"
] |
timothymar.21461@redlandsschools.net
|
271b53908f50313b75ff7927ab6fb2b4705c1c7f
|
043048ecdfd1ddb91c2364b56986aceb2f38eb2e
|
/vilmedic/networks/models/summarization/SumHugMulti.py
|
fefc60e55237dd2e6015b67ec50daaf4325d3061
|
[
"MIT"
] |
permissive
|
Ascensiony/vilmedic
|
09fa566b6ee11f57e3798945b56d2948ae0fabec
|
c1d4c1b893e65d0adab828570752f343742ad5af
|
refs/heads/main
| 2023-08-11T17:17:30.975359
| 2021-09-15T22:57:59
| 2021-09-15T22:57:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,942
|
py
|
import torch.nn as nn
import torch
from vilmedic.networks.blocks.huggingface.encoder_decoder.evaluation import evaluation
from vilmedic.networks.models.utils import get_n_params
# v4.3.2
from transformers.modeling_outputs import Seq2SeqLMOutput
from vilmedic.networks.blocks.huggingface.encoder_decoder.encoder_decoder_model import EncoderDecoderModel
class MultimodalEnc(nn.Module):
def __init__(self, encoder, cnn):
super().__init__()
self.encoder = encoder
cnn_func = cnn.pop('proto')
self.visual_projection = nn.Linear(cnn.pop("visual_embedding_dim"), self.encoder.config.hidden_size)
self.cnn = eval(cnn_func)(**cnn)
def forward(self, input_ids, images, **kwargs):
# Encoder
encoder_outputs = self.encoder(input_ids, **kwargs)
# CNN
with torch.no_grad():
visual_features = self.cnn(images.cuda())
# Add visual attribute to encoder_outputs
visual_features = self.visual_projection(visual_features)
encoder_outputs.visual_features = visual_features
return encoder_outputs
def train(self, mode: bool = True):
self.training = mode
for module in self.children():
module.train(mode)
self.cnn.train(False)
return self
class MultimodalEncDec(EncoderDecoderModel):
def __init__(self, encoder, decoder, cnn, **kwargs):
enc_dec = EncoderDecoderModel.from_encoder_decoder_pretrained(encoder.pop('proto'),
decoder.pop('proto'))
super().__init__(encoder=enc_dec.encoder, decoder=enc_dec.decoder, config=enc_dec.config)
self.encoder = MultimodalEnc(self.encoder, cnn)
# beam param
self.to_tile = ["last_hidden_state", "visual_features"]
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
kwargs_encoder = {argument: value for argument, value in kwargs.items() if
not argument.startswith("decoder_")}
# Encoder
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs_encoder,
)
visual_features = encoder_outputs.visual_features
# Concat modalities
encoder_hidden_states = torch.cat((encoder_outputs.last_hidden_state, visual_features), dim=1)
# update mask accordingly
image_mask = torch.ones(
visual_features.size(-2), device=visual_features.device
).expand(visual_features.size()[:-1]).long()
attention_mask = torch.cat((attention_mask, image_mask), dim=-1)
# Decode
kwargs_decoder = {
argument[len("decoder_"):]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
inputs_embeds=decoder_inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
past_key_values=past_key_values,
return_dict=return_dict,
**kwargs_decoder,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=decoder_outputs.loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
class SumHugMulti(nn.Module):
def __init__(self, encoder, decoder, cnn, **kwargs):
super().__init__()
self.cnn = eval(cnn.pop('proto'))(**cnn)
self.visual_projection = nn.Linear(cnn.pop("visual_embedding_dim"), self.encoder.config.hidden_size)
self.enc_dec = EncoderDecoderModel(encoder, decoder)
self.enc = self.enc_dec.enc
# Evaluation
self.eval_func = evaluation
self.enc_dec.to_tile = ["last_hidden_state", "visual_features"]
self.bos_token_id = self.enc_dec.dec.config.bos_token_id
self.eos_token_id = self.enc_dec.dec.config.eos_token_id
self.pad_token_id = self.enc_dec.dec.config.pad_token_id
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
kwargs_encoder = {argument: value for argument, value in kwargs.items() if
not argument.startswith("decoder_")}
# Encoder
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs_encoder,
)
visual_features = encoder_outputs.visual_features
# Concat modalities
encoder_hidden_states = torch.cat((encoder_outputs.last_hidden_state, visual_features), dim=1)
# update mask accordingly
image_mask = torch.ones(
visual_features.size(-2), device=visual_features.device
).expand(visual_features.size()[:-1]).long()
attention_mask = torch.cat((attention_mask, image_mask), dim=-1)
# Decode
kwargs_decoder = {
argument[len("decoder_"):]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
inputs_embeds=decoder_inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
past_key_values=past_key_values,
return_dict=return_dict,
**kwargs_decoder,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=decoder_outputs.loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def __repr__(self):
# s = super().__repr__() + '\n'
s = str(type(self.enc_dec.encoder).__name__) + '(' + str(self.enc_dec.encoder.encoder.config) + ')\n'
s += str(type(self.enc_dec.decoder).__name__) + '(' + str(self.enc_dec.decoder.config) + ')\n'
s += "{}\n".format(get_n_params(self))
return s
|
[
"jeanbenoit.delbrouck@gmail.com"
] |
jeanbenoit.delbrouck@gmail.com
|
15f29bffa2032c5460e92f35750be8872586fffd
|
5504b97bd576906b08da76e95be0348ca676bacf
|
/ps5/testing.py
|
0eb640edd7a292e21432c69a3a588a87e562eac0
|
[] |
no_license
|
ducpq91/mit-ocw-6.0001-ps
|
283e486acbb9a48b8d9f7afcdae05399e61cdee2
|
500a0df8c97885fdb391f4dff10463eff2abbf9c
|
refs/heads/master
| 2020-03-17T16:36:36.084548
| 2018-10-24T05:46:22
| 2018-10-24T05:46:22
| 133,755,240
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,205
|
py
|
import feedparser
import string
import time
import threading
from project_util import translate_html
from mtTkinter import *
from datetime import datetime
import pytz
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
description = translate_html(entry.description)
pubdate = translate_html(entry.published)
try:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %Z")
pubdate.replace(tzinfo=pytz.timezone("GMT"))
# pubdate = pubdate.astimezone(pytz.timezone('EST'))
# pubdate.replace(tzinfo=None)
except ValueError:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %z")
newsStory = NewsStory(guid, title, description, link, pubdate)
ret.append(newsStory)
return ret
class NewsStory(object):
def __init__(self, guid, title, description, link, pubdate):
self.guid = guid
self.title = title
self.description = description
self.link = link
self.pubdate = pubdate
def get_guid(self):
return self.guid
def get_title(self):
return self.title
def get_description(self):
return self.description
def get_link(self):
return self.link
def get_pubdate(self):
return self.pubdate
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
# DO NOT CHANGE THIS!
raise NotImplementedError
# PHRASE TRIGGERS
# Problem 2
# TODO: PhraseTrigger
class PhraseTrigger(Trigger):
def __init__(self, phrase):
assert len([punc for punc in string.punctuation if punc in phrase]) == 0, "Punctuation mark(s) present in " \
"phrase."
assert len([char for char in phrase.strip().split(" ") if char == ""]) == 0, "Phrase contains multiple " \
"spaces between words."
self.phrase = phrase.strip().lower()
# def is_phrase_in(self, text):
# newtext = text.lower().split(" ")
# output = []
# for word in newtext:
# if word == '':
# continue
# elif sum([punc in word for punc in string.punctuation]) > 0:
# newword = str()
# for char in word:
# if char in string.punctuation:
# continue
# else:
# newword += char
# output.append(newword)
# else:
# output.append(word)
# text1 = ' '.join(output)
#
# if self.phrase in text1:
# return True
# else:
# return False
def is_phrase_in(self, text):
newtext = str()
for char in text:
if char in string.punctuation:
newtext += " "
else:
newtext += char
newtext1 = newtext.lower().split(" ")
output = []
for word in newtext1:
if word == '':
continue
else:
output.append(word)
text1 = ' '.join(output)
if self.phrase in text1:
if self.phrase.split(" ") == [word for word in self.phrase.split(" ") if word in text1.split(" ")]:
return True
else:
return False
else:
return False
# Problem 3
# TODO: TitleTrigger
class TitleTrigger(PhraseTrigger):
def __init__(self, phrase):
PhraseTrigger.__init__(self, phrase)
def evaluate(self, story):
title = story.get_title()
if self.is_phrase_in(title):
return True
else:
return False
# Problem 4
# TODO: DescriptionTrigger
class DescriptionTrigger(PhraseTrigger):
def __init__(self, phrase):
PhraseTrigger.__init__(self, phrase)
def evaluate(self, story):
desc = story.get_description()
if self.is_phrase_in(desc):
return True
else:
return False
# TIME TRIGGERS
# Problem 5
# TODO: TimeTrigger
# Constructor:
# Input: Time has to be in EST and in the format of "%d %b %Y %H:%M:%S".
# Convert time from string to a datetime before saving it as an attribute.
class TimeTrigger(Trigger):
def __init__(self, timestring):
assert isinstance(timestring, str), "Value entered was not a string."
format = "%d %b %Y %H:%M:%S"
try:
dtobj = datetime.strptime(timestring, format)
dtobj_est = dtobj.replace(tzinfo = pytz.timezone("EST"))
self.datetime = dtobj_est
except ValueError as e:
print("ValueError:", e)
# Problem 6
# TODO: BeforeTrigger and AfterTrigger
class BeforeTrigger(TimeTrigger):
def __init__(self, timestring):
TimeTrigger.__init__(self, timestring)
def evaluate(self, story):
if self.datetime > story.get_pubdate():
return True
else:
return False
class AfterTrigger(TimeTrigger):
def __init__(self, timestring):
TimeTrigger.__init__(self, timestring)
def evaluate(self, story):
if self.datetime < story.get_pubdate():
return True
else:
return False
# COMPOSITE TRIGGERS
# Problem 7
# TODO: NotTrigger
class NotTrigger(Trigger):
def __init__(self, trig):
self.trig = trig
def evaluate(self, story):
trig_eva = self.trig.evaluate(story)
return not trig_eva
# Problem 8
# TODO: AndTrigger
class AndTrigger(Trigger):
def __init__(self, trig1, trig2):
self.trig1 = trig1
self.trig2 = trig2
def evaluate(self, story):
trig_eva = self.trig1.evaluate(story) & self.trig2.evaluate(story)
return trig_eva
# Problem 9
# TODO: OrTrigger
class OrTrigger(Trigger):
def __init__(self, trig1, trig2):
self.trig1 = trig1
self.trig2 = trig2
def evaluate(self, story):
trig_eva = self.trig1.evaluate(story) | self.trig2.evaluate(story)
return trig_eva
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
# TODO: Problem 10
sel_stories = []
for story in stories:
for trig in triggerlist:
if trig.evaluate(story):
sel_stories.append(story)
break
else:
continue
return sel_stories
def read_trigger_config(filename):
"""
filename: the name of a trigger configuration file
Returns: a list of trigger objects specified by the trigger configuration
file.
"""
# We give you the code to read in the file and eliminate blank lines and
# comments. You don't need to know how it works for now!
trigger_file = open(filename, 'r')
lines = []
for line in trigger_file:
line = line.rstrip()
if not (len(line) == 0 or line.startswith('//')):
lines.append(line)
# TODO: Problem 11
# line is the list of lines that you need to parse and for which you need
# to build triggers
print(lines) # for now, print it so you see what it contains!
# Running through lines to single out the "ADD" commands, put them into adds = [].
# Construct a trig_lib with the following structure: trig_lib = {'t1':TitleTrigger("something"), 't2':...} by
# running through lines.
# Running through the adds list to add triggers to sel_trigs = []. Return sel_trigs.
trigger_type1 = ["DESCRIPTION", "TITLE", "AFTER", "BEFORE"]
trigger_type2 = ["NOT"]
adds = []
trig_lib = {}
for block in lines:
block_unit = block.split(",")
if block_unit[0] == "ADD":
adds.append(block_unit)
else:
if block_unit[1] in trigger_type1:
trig_lib[block_unit[0]] = eval(block_unit[1].title() + "Trigger" + "('" + block_unit[2] + "')")
elif block_unit[1] in trigger_type2:
trig_lib[block_unit[0]] = eval(block_unit[1].title() + "Trigger" + "(trig_lib['" + block_unit[2] +
+ "'])")
else:
trig_lib[block_unit[0]] = eval(block_unit[1].title() + "Trigger" + "(trig_lib['" + block_unit[2] + "']"
+ "," + "trig_lib['" + block_unit[3] + "'])")
print(trig_lib)
print(adds)
sel_trigs = []
for add in adds:
for i in range(1, len(add)):
sel_trigs.append(trig_lib[add[i]])
return sel_trigs
triggerlist = read_trigger_config('triggers.txt')
print(triggerlist)
SLEEPTIME = 120 # seconds -- how often we poll
def main_thread(master):
# A sample trigger list - you might need to change the phrases to correspond
# to what is currently in the news
try:
t1 = TitleTrigger("medal")
t2 = DescriptionTrigger("Trump")
t3 = DescriptionTrigger("Saudi")
t4 = AndTrigger(t2, t3)
triggerlist = [t1, t4]
# Problem 11
# TODO: After implementing read_trigger_config, uncomment this line
triggerlist = read_trigger_config('triggers.txt')
# HELPER CODE - you don't need to understand this!
# Draws the popup window that displays the filtered stories
# Retrieves and filters the stories from the RSS feeds
frame = Frame(master)
frame.pack(side=BOTTOM)
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT, fill=Y)
t = "Google & Yahoo Top News"
title = StringVar()
title.set(t)
ttl = Label(master, textvariable=title, font=("Helvetica", 18))
ttl.pack(side=TOP)
cont = Text(master, font=("Helvetica", 14), yscrollcommand=scrollbar.set)
cont.pack(side=BOTTOM)
cont.tag_config("title", justify='center')
button = Button(frame, text="Exit", command=root.destroy)
button.pack(side=BOTTOM)
guidShown = []
def get_cont(newstory):
if newstory.get_guid() not in guidShown:
cont.insert(END, newstory.get_title() + "\n", "title")
cont.insert(END, "\n---------------------------------------------------------------\n", "title")
cont.insert(END, newstory.get_description())
cont.insert(END, "\n*********************************************************************\n", "title")
guidShown.append(newstory.get_guid())
while True:
print("Polling . . .", end=' ')
# Get stories from Google's Top Stories RSS news feed
stories = process("http://news.google.com/news?output=rss")
# Get stories from Yahoo's Top Stories RSS news feed
stories.extend(process("http://news.yahoo.com/rss/topstories"))
stories = filter_stories(stories, triggerlist)
list(map(get_cont, stories))
scrollbar.config(command=cont.yview)
print("Sleeping...")
time.sleep(SLEEPTIME)
except Exception as e:
print(e, "something broke")
if __name__ == '__main__':
root = Tk()
root.title("Some RSS parser")
t = threading.Thread(target=main_thread, args=(root,))
t.start()
root.mainloop()
|
[
"noreply@github.com"
] |
ducpq91.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.