hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4747776d908096882a8a9e84d7bd93098a2de6a8
| 1,399
|
py
|
Python
|
DailyProgrammer/20120410B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2
|
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/20120410B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/20120410B.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
Reverse Polish Notation(RPN) is a mathematical notation where every operator follows all of its operands. For instance,
to add three and four, one would write "3 4 +" rather than "3 + 4". If there are multiple operations, the operator is
given immediately after its second operand; so the expression written "3 ? 4 + 5" would be written "3 4 ? 5 +" first
subtract 4 from 3, then add 5 to that.
Transform the algebraic expression with brackets into RPN form.
You can assume that for the test cases below only single letters will be used, brackets [ ] will not be used and each
expression has only one RPN form (no expressions like abc)
Test Input:
(a+(b*c))
((a+b)*(z+x))
((a+t)*((b+(a+c))^(c+d)))
Test Output:
abc*+
ab+zx+*
at+bac++cd+ ^ *
"""
import re
inp = '((a+t)*((b+(a+c))^(c+d)))'
print(inp)
parenth = re.compile(r"(?<=\()[^()]*(?=\))", re.DOTALL)
symbol = re.compile(r"[+\-*/^](?=\w)", re.DOTALL)
while True:
# Find expression between two parens without parens inbetween. End loop if not found
txt = parenth.search(inp)
if txt is None:
break
# find operator and its location in found expression
sym = symbol.search(txt.group())
# rearrange expression
new = txt.group()[:sym.span()[0]] + txt.group()[sym.span()[1]:] + sym.group()
# update rearranged expression
inp = inp[:txt.span()[0]-1] + new + inp[txt.span()[1]+1:]
print(inp)
| 31.088889
| 119
| 0.654753
|
"""
Reverse Polish Notation(RPN) is a mathematical notation where every operator follows all of its operands. For instance,
to add three and four, one would write "3 4 +" rather than "3 + 4". If there are multiple operations, the operator is
given immediately after its second operand; so the expression written "3 ? 4 + 5" would be written "3 4 ? 5 +" first
subtract 4 from 3, then add 5 to that.
Transform the algebraic expression with brackets into RPN form.
You can assume that for the test cases below only single letters will be used, brackets [ ] will not be used and each
expression has only one RPN form (no expressions like abc)
Test Input:
(a+(b*c))
((a+b)*(z+x))
((a+t)*((b+(a+c))^(c+d)))
Test Output:
abc*+
ab+zx+*
at+bac++cd+ ^ *
"""
import re
inp = '((a+t)*((b+(a+c))^(c+d)))'
print(inp)
parenth = re.compile(r"(?<=\()[^()]*(?=\))", re.DOTALL)
symbol = re.compile(r"[+\-*/^](?=\w)", re.DOTALL)
while True:
# Find expression between two parens without parens inbetween. End loop if not found
txt = parenth.search(inp)
if txt is None:
break
# find operator and its location in found expression
sym = symbol.search(txt.group())
# rearrange expression
new = txt.group()[:sym.span()[0]] + txt.group()[sym.span()[1]:] + sym.group()
# update rearranged expression
inp = inp[:txt.span()[0]-1] + new + inp[txt.span()[1]+1:]
print(inp)
| 0
| 0
| 0
|
ba03b875f1a3ba108e73bbc56cfa6730a8dc9704
| 589
|
py
|
Python
|
corehq/messaging/smsbackends/http/sms_sending.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/messaging/smsbackends/http/sms_sending.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/messaging/smsbackends/http/sms_sending.py
|
akashkj/commcare-hq
|
b00a62336ec26cea1477dfb8c048c548cc462831
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
from corehq.util.urlvalidate.urlvalidate import (
PossibleSSRFAttempt,
validate_user_input_url,
)
from corehq.apps.sms.models import SMSBase
from corehq.util.metrics import metrics_counter
| 26.772727
| 72
| 0.689304
|
from corehq.util.urlvalidate.urlvalidate import (
PossibleSSRFAttempt,
validate_user_input_url,
)
from corehq.apps.sms.models import SMSBase
from corehq.util.metrics import metrics_counter
def verify_sms_url(url, msg, backend):
try:
validate_user_input_url(url)
except PossibleSSRFAttempt as e:
metrics_counter('commcare.sms.ssrf_attempt', tags={
'domain': msg.domain,
'src': type(backend).__name__,
'reason': e.reason
})
msg.set_system_error(SMSBase.ERROR_FAULTY_GATEWAY_CONFIGURATION)
raise
| 366
| 0
| 23
|
1d4c563ca6d7fbca06cb44be85b9076ebe241a61
| 5,083
|
py
|
Python
|
origins/ontologies.py
|
cocoemily/paleocore2
|
34b9ba30358963a0a1c8ae7252ed7c5ef178a758
|
[
"MIT"
] | null | null | null |
origins/ontologies.py
|
cocoemily/paleocore2
|
34b9ba30358963a0a1c8ae7252ed7c5ef178a758
|
[
"MIT"
] | null | null | null |
origins/ontologies.py
|
cocoemily/paleocore2
|
34b9ba30358963a0a1c8ae7252ed7c5ef178a758
|
[
"MIT"
] | null | null | null |
# Origins Project Choice Lists, Vocabularies, Ontologies
# choice lists and vocabularies are defined with the following design template:
# variable_label1 = value # variable_labels are lowercase, values can be strings or numbers or codes
# variable_label2 = value
# CHOICES = (
# (variable_label1, 'string_representation')
# (variable_label2, 'string_representation')
# The design allows use of the variable_labels in code. Changes to the value applies automatically then in code and
# in what is written to database.
# Continents of the World
africa = 'Africa'
antarctica = 'Antarctica'
asia = 'Asia'
australia = 'Australia'
europe = 'Europe'
north_america = 'North America'
south_america = 'South America'
CONTINENT_CHOICES = (
(africa, 'Africa'),
(antarctica, 'Antarctica'),
(asia, 'Asia'),
(australia, 'Australia'),
(europe, 'Europe'),
(north_america, 'North America'),
(south_america, 'South America')
)
# Type Specimens Choices
# Definitions copied from ICZN online http://code.iczn.org
allotype = 'allotype' # A term, not regulated by the Code, for a designated specimen of opposite sex to the holotype
cotype = 'cotype' # A term not recognized by the Code, formerly used for either syntype or paratype, but that should
# not now be used in zoological nomenclature
genotype = 'genotype' # A term not recognized by the Code, formerly used for type species, but that should not
# now be used in zoological nomenclature
hapanotype = 'hapanotype' # One or more preparations consisting of directly related individuals representing distinct
# stages in the life cycle, which together form the name-bearing type in an extant species of protistan.
holotype = 'holotype' # The single specimen (except in the case of a hapantotype, q.v.) designated or otherwise fixed
# as the name-bearing type of a nominal species or subspecies when the nominal taxon is established.
isotype = 'isotype' # A duplicate specimen of the holotype.
isosyntype = 'isosyntype' # A duplicate of a syntype.
paratype = 'paratype' # A specimen not formally designated as a type but cited along with the type collection in the
# original description of a taxon.
lectotype = 'lectotype' # A syntype designated as the single name-bearing type specimen subsequent to the establishment
# of a nominal species or subspecies
neotype = 'neotype' # The single specimen designated as the name-bearing type of a nominal species or subspecies
# when there is a need to define the nominal taxon objectively and no name-bearing type is believed to be extant.
# If stability and universality are threatened, because an existing name-bearing type is either taxonomically
# inadequate or not in accord with the prevailing usage of a name, the Commission may use its plenary power
# to set aside that type and designate a neotype.
paralectotype = 'paralectotype' # Each specimen of a former syntype series remaining after the designation
# of a lectotype
syntype = 'syntype' # Each specimen of a type series (q.v.) from which neither a holotype nor a lectotype has
# been designated. The syntypes collectively constitute the name-bearing type.
topotype = 'topotype' # A term, not regulated by the Code, for a specimen originating from the type locality of the
# species or subspecies to which it is thought to belong, whether or not the specimen is part of the type series.
# Using a select set of terms recognized by ICZN.
TYPE_CHOICES = (
(holotype, 'Holotype'),
(paratype, 'Paratype'),
(lectotype, 'Lectotype'),
(neotype, 'Neotype'),
(syntype, 'Syntype'),
)
# Nomenclatural Code Choices
iczn = 'ICZN'
icbn = 'ICBN'
NOMENCLATURAL_CODE_CHOICES = (
(iczn, 'ICZN'),
(icbn, 'ICBN')
)
# Nomenclatural Status Choices
valid = 'valid'
invalid_gh = 'invalid_gh' # Generic homonym
invalid_ga = 'invalid_ga' # Genus nomen nudum before 1931
invalid_gb = 'invalid_gb' # Genus nomen nudum after 1930
invalid_sh = 'invalid_sh' # Specific homonym
invalid_sm = 'invalid_sm' # Specific nomen nudum before 1931
invalid_sn = 'invalid_sn' # Specific nomen nudum after 1930
invalid_so = 'invalid_so' # Specific nomen nudum - proposed conditionally
suppressed = 'suppressed' # Name suppressed by ICZN decision.
NOMENCLATURAL_STATUS_CHOICES = (
(valid, 'Valid'),
(invalid_gh, 'Invalid GH'),
(invalid_ga, 'Invalid GA'),
(invalid_gb, 'Invalid GB'),
(invalid_sh, 'Inavlid SH'),
(invalid_sm, 'Invalid SM'),
(invalid_sn, 'Invalid SN'),
(invalid_so, 'Inavlid SO'),
(suppressed, 'Supressed')
)
# Classification Status Choices
accepted = 'accepted'
junior_synonym = 'junior_synonym'
deprecated = 'deprecated'
# supressed defined above for Nomenclatural status choices
CLASSIFICATION_STATUS_CHOICES = (
(accepted, 'Accepted'),
(junior_synonym, 'Junior Synonym'),
(deprecated, 'Deprecated')
)
# helper functions
def choices2list(choices_tuple):
"""
Helper function that returns a choice list tuple as a simple list of stored values
:param choices_tuple:
:return:
"""
return [c[0] for c in choices_tuple]
| 42.008264
| 119
| 0.744639
|
# Origins Project Choice Lists, Vocabularies, Ontologies
# choice lists and vocabularies are defined with the following design template:
# variable_label1 = value # variable_labels are lowercase, values can be strings or numbers or codes
# variable_label2 = value
# CHOICES = (
# (variable_label1, 'string_representation')
# (variable_label2, 'string_representation')
# The design allows use of the variable_labels in code. Changes to the value applies automatically then in code and
# in what is written to database.
# Continents of the World
africa = 'Africa'
antarctica = 'Antarctica'
asia = 'Asia'
australia = 'Australia'
europe = 'Europe'
north_america = 'North America'
south_america = 'South America'
CONTINENT_CHOICES = (
(africa, 'Africa'),
(antarctica, 'Antarctica'),
(asia, 'Asia'),
(australia, 'Australia'),
(europe, 'Europe'),
(north_america, 'North America'),
(south_america, 'South America')
)
# Type Specimens Choices
# Definitions copied from ICZN online http://code.iczn.org
allotype = 'allotype' # A term, not regulated by the Code, for a designated specimen of opposite sex to the holotype
cotype = 'cotype' # A term not recognized by the Code, formerly used for either syntype or paratype, but that should
# not now be used in zoological nomenclature
genotype = 'genotype' # A term not recognized by the Code, formerly used for type species, but that should not
# now be used in zoological nomenclature
hapanotype = 'hapanotype' # One or more preparations consisting of directly related individuals representing distinct
# stages in the life cycle, which together form the name-bearing type in an extant species of protistan.
holotype = 'holotype' # The single specimen (except in the case of a hapantotype, q.v.) designated or otherwise fixed
# as the name-bearing type of a nominal species or subspecies when the nominal taxon is established.
isotype = 'isotype' # A duplicate specimen of the holotype.
isosyntype = 'isosyntype' # A duplicate of a syntype.
paratype = 'paratype' # A specimen not formally designated as a type but cited along with the type collection in the
# original description of a taxon.
lectotype = 'lectotype' # A syntype designated as the single name-bearing type specimen subsequent to the establishment
# of a nominal species or subspecies
neotype = 'neotype' # The single specimen designated as the name-bearing type of a nominal species or subspecies
# when there is a need to define the nominal taxon objectively and no name-bearing type is believed to be extant.
# If stability and universality are threatened, because an existing name-bearing type is either taxonomically
# inadequate or not in accord with the prevailing usage of a name, the Commission may use its plenary power
# to set aside that type and designate a neotype.
paralectotype = 'paralectotype' # Each specimen of a former syntype series remaining after the designation
# of a lectotype
syntype = 'syntype' # Each specimen of a type series (q.v.) from which neither a holotype nor a lectotype has
# been designated. The syntypes collectively constitute the name-bearing type.
topotype = 'topotype' # A term, not regulated by the Code, for a specimen originating from the type locality of the
# species or subspecies to which it is thought to belong, whether or not the specimen is part of the type series.
# Using a select set of terms recognized by ICZN.
TYPE_CHOICES = (
(holotype, 'Holotype'),
(paratype, 'Paratype'),
(lectotype, 'Lectotype'),
(neotype, 'Neotype'),
(syntype, 'Syntype'),
)
# Nomenclatural Code Choices
iczn = 'ICZN'
icbn = 'ICBN'
NOMENCLATURAL_CODE_CHOICES = (
(iczn, 'ICZN'),
(icbn, 'ICBN')
)
# Nomenclatural Status Choices
valid = 'valid'
invalid_gh = 'invalid_gh' # Generic homonym
invalid_ga = 'invalid_ga' # Genus nomen nudum before 1931
invalid_gb = 'invalid_gb' # Genus nomen nudum after 1930
invalid_sh = 'invalid_sh' # Specific homonym
invalid_sm = 'invalid_sm' # Specific nomen nudum before 1931
invalid_sn = 'invalid_sn' # Specific nomen nudum after 1930
invalid_so = 'invalid_so' # Specific nomen nudum - proposed conditionally
suppressed = 'suppressed' # Name suppressed by ICZN decision.
NOMENCLATURAL_STATUS_CHOICES = (
(valid, 'Valid'),
(invalid_gh, 'Invalid GH'),
(invalid_ga, 'Invalid GA'),
(invalid_gb, 'Invalid GB'),
(invalid_sh, 'Inavlid SH'),
(invalid_sm, 'Invalid SM'),
(invalid_sn, 'Invalid SN'),
(invalid_so, 'Inavlid SO'),
(suppressed, 'Supressed')
)
# Classification Status Choices
accepted = 'accepted'
junior_synonym = 'junior_synonym'
deprecated = 'deprecated'
# supressed defined above for Nomenclatural status choices
CLASSIFICATION_STATUS_CHOICES = (
(accepted, 'Accepted'),
(junior_synonym, 'Junior Synonym'),
(deprecated, 'Deprecated')
)
# helper functions
def choices2list(choices_tuple):
"""
Helper function that returns a choice list tuple as a simple list of stored values
:param choices_tuple:
:return:
"""
return [c[0] for c in choices_tuple]
| 0
| 0
| 0
|
17714782ac90fa3f8a593cd5ecfbcd309fdb7f2c
| 133
|
py
|
Python
|
templates/text_handlers.py
|
Tsitko/drawyourbot
|
87ce611a6aaba0dbcd02332edecf1dfe79dcae03
|
[
"MIT"
] | 22
|
2021-04-22T08:00:08.000Z
|
2021-08-11T00:30:30.000Z
|
templates/text_handlers.py
|
Tsitko/drawyourbot
|
87ce611a6aaba0dbcd02332edecf1dfe79dcae03
|
[
"MIT"
] | null | null | null |
templates/text_handlers.py
|
Tsitko/drawyourbot
|
87ce611a6aaba0dbcd02332edecf1dfe79dcae03
|
[
"MIT"
] | 4
|
2021-08-10T08:36:12.000Z
|
2022-03-27T15:21:30.000Z
|
if last_question[bot.message.chat_id] == '%block_name%' and not got_answer:
%get_answer%
%next_blocks%
got_answer = True
| 26.6
| 79
| 0.699248
|
if last_question[bot.message.chat_id] == '%block_name%' and not got_answer:
%get_answer%
%next_blocks%
got_answer = True
| 0
| 0
| 0
|
5088bdcc2555dffb970cb2a2f2ba052473943cd3
| 11,536
|
py
|
Python
|
core/utils.py
|
CAPTools/CAPCollector
|
9d890b0f9a0d9a655e4042315ff94133621530e9
|
[
"BSD-3-Clause"
] | 11
|
2015-01-24T03:04:31.000Z
|
2022-01-12T23:33:49.000Z
|
core/utils.py
|
CAPTools/CAPCollector
|
9d890b0f9a0d9a655e4042315ff94133621530e9
|
[
"BSD-3-Clause"
] | null | null | null |
core/utils.py
|
CAPTools/CAPCollector
|
9d890b0f9a0d9a655e4042315ff94133621530e9
|
[
"BSD-3-Clause"
] | 8
|
2015-04-19T18:22:53.000Z
|
2021-12-15T11:21:02.000Z
|
# Copyright (c) 2013, Carnegie Mellon University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Helpers for core CAP Collector module."""
import copy
from datetime import datetime
import logging
import lxml
import os
import re
import uuid
from bs4 import BeautifulSoup
from core import models
from dateutil import parser
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext
import pytz
try:
import xmlsec
XMLSEC_DEFINED = True
except ImportError:
# This module is not available on AppEngine.
# https://code.google.com/p/googleappengine/issues/detail?id=1034
XMLSEC_DEFINED = False
def GetCurrentDate():
"""The current date helper."""
return datetime.now(pytz.utc)
def GenerateFeed(feed_type="xml"):
"""Generates XML for alert feed based on active alert files.
Args:
feed_type: (string) Either xml of html.
Returns:
String. Ready to serve XML feed content.
"""
# Build feed header.
now = timezone.now().isoformat()
time_str, tz_str = now.split("+")
feed_updated = "%s+%s" % (time_str.split(".")[0], tz_str)
feed_url = settings.SITE_URL + reverse("feed", args=[feed_type])
entries = []
# For each unexpired message, get the necessary values and add it to the feed.
for alert in models.Alert.objects.filter(
updated=False,
expires_at__gt=GetCurrentDate()).order_by("-created_at"):
entries.append(ParseAlert(alert.content, feed_type, alert.uuid))
feed_dict = {
"entries": entries,
"feed_url": feed_url,
"updated": feed_updated,
"version": settings.VERSION,
}
feed_template = "core/feed." + feed_type + ".tmpl"
return BeautifulSoup(
render_to_string(feed_template, feed_dict), feed_type).prettify()
def ParseAlert(xml_string, feed_type, alert_uuid):
"""Parses select fields from the CAP XML file at file_name.
Primary use is intended for populating a feed <entry>.
Note:
- This code assumes the input alert XML has only one <info>.
- The parsed XML does not contain all fields in the CAP specification.
- The code accepts both complete and partial CAP messages.
Args:
xml_string: (string) Alert XML string.
feed_type: (string) Alert feed representation (XML or HTML).
alert_uuid: (string) Alert UUID.
Returns:
Dictionary.
Keys/values corresponding to alert XML attributes or empty dictionary.
"""
def GetFirstText(xml_element):
"""Returns the first text item from an XML element."""
if xml_element and len(xml_element):
return xml_element[0].text
return ""
def GetAllText(xml_element):
"""Returns an array of text items from multiple elements."""
if xml_element and len(xml_element):
return [item.text for item in xml_element]
return []
def GetNameValuePairs(xml_elements):
"""Returns a list of dictionaries for paired elements."""
pair_list = []
for xml_element in xml_elements:
name_element, value_element = xml_element.getchildren()
pair_list.append({
"name": name_element.text,
"value": value_element.text})
return pair_list
def GetCapElement(element_name, xml_tree):
"""Extracts elements from CAP XML tree."""
element = "//p:" + element_name
finder = lxml.etree.XPath(element, namespaces={"p": settings.CAP_NS})
return finder(xml_tree)
alert_dict = {}
try:
xml_tree = lxml.etree.fromstring(xml_string)
expires_str = GetFirstText(GetCapElement("expires", xml_tree))
# Extract the other needed values from the CAP XML.
sender = GetFirstText(GetCapElement("sender", xml_tree))
sender_name = GetFirstText(GetCapElement("senderName", xml_tree))
name = sender
if sender_name:
name = name + ": " + sender_name
title = GetFirstText(GetCapElement("headline", xml_tree))
if not title:
title = ugettext("Alert Message") # Force a default.
link = "%s%s" % (settings.SITE_URL,
reverse("alert", args=[alert_uuid, feed_type]))
expires = parser.parse(expires_str) if expires_str else None
sent_str = GetFirstText(GetCapElement("sent", xml_tree))
sent = parser.parse(sent_str) if sent_str else None
alert_dict = {
"title": title,
"event": GetFirstText(GetCapElement("event", xml_tree)),
"link": link,
"web": GetFirstText(GetCapElement("web", xml_tree)),
"name": name,
"sender": sender,
"sender_name": sender_name,
"expires": expires,
"msg_type": GetFirstText(GetCapElement("msgType", xml_tree)),
"references": GetFirstText(GetCapElement("references", xml_tree)),
"alert_id": GetFirstText(GetCapElement("identifier", xml_tree)),
"category": GetFirstText(GetCapElement("category", xml_tree)),
"response_type": GetFirstText(GetCapElement("responseType", xml_tree)),
"sent": sent,
"description": GetFirstText(GetCapElement("description", xml_tree)),
"instruction": GetFirstText(GetCapElement("instruction", xml_tree)),
"urgency": GetFirstText(GetCapElement("urgency", xml_tree)),
"severity": GetFirstText(GetCapElement("severity", xml_tree)),
"certainty": GetFirstText(GetCapElement("certainty", xml_tree)),
"language": GetFirstText(GetCapElement("language", xml_tree)),
"parameters": GetNameValuePairs(GetCapElement("parameter", xml_tree)),
"event_codes": GetNameValuePairs(GetCapElement("eventCode", xml_tree)),
"area_desc": GetFirstText(GetCapElement("areaDesc", xml_tree)),
"geocodes": GetNameValuePairs(GetCapElement("geocode", xml_tree)),
"circles": GetAllText(GetCapElement("circle", xml_tree)),
"polys": GetAllText(GetCapElement("polygon", xml_tree)),
}
# Non-CAP-compliant fields used for message templates.
expiresDurationMinutes = GetFirstText(
GetCapElement("expiresDurationMinutes", xml_tree))
if expiresDurationMinutes:
alert_dict["expiresDurationMinutes"] = expiresDurationMinutes
# We don't expect any invalid XML alerts.
except lxml.etree.XMLSyntaxError as e:
logging.exception(e)
return alert_dict
def SignAlert(xml_tree, username):
"""Sign XML with user key/certificate.
Args:
xml_tree: (string) Alert XML tree.
username: (string) Username of the alert author.
Returns:
String.
Signed alert XML tree if your has key/certificate pair
Unchanged XML tree otherwise.
"""
if not XMLSEC_DEFINED:
return xml_tree
key_path = os.path.join(settings.CREDENTIALS_DIR, username + ".key")
cert_path = os.path.join(settings.CREDENTIALS_DIR, username + ".cert")
try:
signed_xml_tree = copy.deepcopy(xml_tree)
xmlsec.add_enveloped_signature(signed_xml_tree, pos=-1)
xmlsec.sign(signed_xml_tree, key_path, cert_path)
return signed_xml_tree
except (IOError, xmlsec.exceptions.XMLSigException):
return xml_tree
def CreateAlert(xml_string, username):
"""Creates alert signed by userame from provided XML string.
Args:
xml_string: (string) XML content.
username: (string) Username of the alert author.
Returns:
A tuple of (msg_id, valid, error) where:
msg_id: (string) Unique alert ID (UUID)
valid: (bool) Whether alert has valid XML or not.
error: (string) Error message in case XML is invalid.
"""
msg_id = None
valid = False
try:
# Clean up the XML format a bit.
xml_string = re.sub("> +<", "><", xml_string)
# Now parse into etree and validate.
xml_tree = lxml.etree.fromstring(xml_string)
with open(os.path.join(settings.SCHEMA_DIR,
settings.CAP_SCHEMA_FILE), "r") as schema_file:
schema_string = schema_file.read()
xml_schema = lxml.etree.XMLSchema(lxml.etree.fromstring(schema_string))
valid = xml_schema.validate(xml_tree)
error = xml_schema.error_log.last_error
except lxml.etree.XMLSyntaxError as e:
error = "Malformed XML: %s" % e
if valid:
msg_id = str(uuid.uuid4())
# Assign <identifier> and <sender> values.
find_identifier = lxml.etree.XPath("//p:identifier",
namespaces={"p": settings.CAP_NS})
identifier = find_identifier(xml_tree)[0]
identifier.text = msg_id
# Set default <web> field if one was not filled by user.
find_web = lxml.etree.XPath("//p:info/p:web",
namespaces={"p": settings.CAP_NS})
web = find_web(xml_tree)[0]
if web.text == "pending":
web.text = "%s%s" % (settings.SITE_URL,
reverse("alert", args=[msg_id, "html"]))
find_sender = lxml.etree.XPath("//p:sender",
namespaces={"p": settings.CAP_NS})
sender = find_sender(xml_tree)[0]
sender.text = username + "@" + settings.SITE_DOMAIN
find_sent = lxml.etree.XPath("//p:sent",
namespaces={"p": settings.CAP_NS})
sent = find_sent(xml_tree)[0]
find_expires = lxml.etree.XPath("//p:expires",
namespaces={"p": settings.CAP_NS})
expires = find_expires(xml_tree)[0]
find_references = lxml.etree.XPath("//p:references",
namespaces={"p": settings.CAP_NS})
has_references = len(find_references(xml_tree)) != 0
# Sign the XML tree.
xml_tree = SignAlert(xml_tree, username)
# Re-serialize as string.
signed_xml_string = lxml.etree.tostring(xml_tree, pretty_print=False)
alert_obj = models.Alert()
alert_obj.uuid = msg_id
alert_obj.created_at = sent.text
alert_obj.expires_at = expires.text
alert_obj.content = signed_xml_string
alert_obj.save()
if has_references:
for element in find_references(xml_tree):
updated_alert_uuid = element.text.split(",")[1]
models.Alert.objects.filter(
uuid=updated_alert_uuid).update(updated=True)
return (msg_id, valid, error)
| 35.937695
| 80
| 0.692181
|
# Copyright (c) 2013, Carnegie Mellon University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Carnegie Mellon University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Helpers for core CAP Collector module."""
import copy
from datetime import datetime
import logging
import lxml
import os
import re
import uuid
from bs4 import BeautifulSoup
from core import models
from dateutil import parser
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext
import pytz
try:
import xmlsec
XMLSEC_DEFINED = True
except ImportError:
# This module is not available on AppEngine.
# https://code.google.com/p/googleappengine/issues/detail?id=1034
XMLSEC_DEFINED = False
def GetCurrentDate():
"""The current date helper."""
return datetime.now(pytz.utc)
def GenerateFeed(feed_type="xml"):
"""Generates XML for alert feed based on active alert files.
Args:
feed_type: (string) Either xml of html.
Returns:
String. Ready to serve XML feed content.
"""
# Build feed header.
now = timezone.now().isoformat()
time_str, tz_str = now.split("+")
feed_updated = "%s+%s" % (time_str.split(".")[0], tz_str)
feed_url = settings.SITE_URL + reverse("feed", args=[feed_type])
entries = []
# For each unexpired message, get the necessary values and add it to the feed.
for alert in models.Alert.objects.filter(
updated=False,
expires_at__gt=GetCurrentDate()).order_by("-created_at"):
entries.append(ParseAlert(alert.content, feed_type, alert.uuid))
feed_dict = {
"entries": entries,
"feed_url": feed_url,
"updated": feed_updated,
"version": settings.VERSION,
}
feed_template = "core/feed." + feed_type + ".tmpl"
return BeautifulSoup(
render_to_string(feed_template, feed_dict), feed_type).prettify()
def ParseAlert(xml_string, feed_type, alert_uuid):
"""Parses select fields from the CAP XML file at file_name.
Primary use is intended for populating a feed <entry>.
Note:
- This code assumes the input alert XML has only one <info>.
- The parsed XML does not contain all fields in the CAP specification.
- The code accepts both complete and partial CAP messages.
Args:
xml_string: (string) Alert XML string.
feed_type: (string) Alert feed representation (XML or HTML).
alert_uuid: (string) Alert UUID.
Returns:
Dictionary.
Keys/values corresponding to alert XML attributes or empty dictionary.
"""
def GetFirstText(xml_element):
"""Returns the first text item from an XML element."""
if xml_element and len(xml_element):
return xml_element[0].text
return ""
def GetAllText(xml_element):
"""Returns an array of text items from multiple elements."""
if xml_element and len(xml_element):
return [item.text for item in xml_element]
return []
def GetNameValuePairs(xml_elements):
"""Returns a list of dictionaries for paired elements."""
pair_list = []
for xml_element in xml_elements:
name_element, value_element = xml_element.getchildren()
pair_list.append({
"name": name_element.text,
"value": value_element.text})
return pair_list
def GetCapElement(element_name, xml_tree):
"""Extracts elements from CAP XML tree."""
element = "//p:" + element_name
finder = lxml.etree.XPath(element, namespaces={"p": settings.CAP_NS})
return finder(xml_tree)
alert_dict = {}
try:
xml_tree = lxml.etree.fromstring(xml_string)
expires_str = GetFirstText(GetCapElement("expires", xml_tree))
# Extract the other needed values from the CAP XML.
sender = GetFirstText(GetCapElement("sender", xml_tree))
sender_name = GetFirstText(GetCapElement("senderName", xml_tree))
name = sender
if sender_name:
name = name + ": " + sender_name
title = GetFirstText(GetCapElement("headline", xml_tree))
if not title:
title = ugettext("Alert Message") # Force a default.
link = "%s%s" % (settings.SITE_URL,
reverse("alert", args=[alert_uuid, feed_type]))
expires = parser.parse(expires_str) if expires_str else None
sent_str = GetFirstText(GetCapElement("sent", xml_tree))
sent = parser.parse(sent_str) if sent_str else None
alert_dict = {
"title": title,
"event": GetFirstText(GetCapElement("event", xml_tree)),
"link": link,
"web": GetFirstText(GetCapElement("web", xml_tree)),
"name": name,
"sender": sender,
"sender_name": sender_name,
"expires": expires,
"msg_type": GetFirstText(GetCapElement("msgType", xml_tree)),
"references": GetFirstText(GetCapElement("references", xml_tree)),
"alert_id": GetFirstText(GetCapElement("identifier", xml_tree)),
"category": GetFirstText(GetCapElement("category", xml_tree)),
"response_type": GetFirstText(GetCapElement("responseType", xml_tree)),
"sent": sent,
"description": GetFirstText(GetCapElement("description", xml_tree)),
"instruction": GetFirstText(GetCapElement("instruction", xml_tree)),
"urgency": GetFirstText(GetCapElement("urgency", xml_tree)),
"severity": GetFirstText(GetCapElement("severity", xml_tree)),
"certainty": GetFirstText(GetCapElement("certainty", xml_tree)),
"language": GetFirstText(GetCapElement("language", xml_tree)),
"parameters": GetNameValuePairs(GetCapElement("parameter", xml_tree)),
"event_codes": GetNameValuePairs(GetCapElement("eventCode", xml_tree)),
"area_desc": GetFirstText(GetCapElement("areaDesc", xml_tree)),
"geocodes": GetNameValuePairs(GetCapElement("geocode", xml_tree)),
"circles": GetAllText(GetCapElement("circle", xml_tree)),
"polys": GetAllText(GetCapElement("polygon", xml_tree)),
}
# Non-CAP-compliant fields used for message templates.
expiresDurationMinutes = GetFirstText(
GetCapElement("expiresDurationMinutes", xml_tree))
if expiresDurationMinutes:
alert_dict["expiresDurationMinutes"] = expiresDurationMinutes
# We don't expect any invalid XML alerts.
except lxml.etree.XMLSyntaxError as e:
logging.exception(e)
return alert_dict
def SignAlert(xml_tree, username):
"""Sign XML with user key/certificate.
Args:
xml_tree: (string) Alert XML tree.
username: (string) Username of the alert author.
Returns:
String.
Signed alert XML tree if your has key/certificate pair
Unchanged XML tree otherwise.
"""
if not XMLSEC_DEFINED:
return xml_tree
key_path = os.path.join(settings.CREDENTIALS_DIR, username + ".key")
cert_path = os.path.join(settings.CREDENTIALS_DIR, username + ".cert")
try:
signed_xml_tree = copy.deepcopy(xml_tree)
xmlsec.add_enveloped_signature(signed_xml_tree, pos=-1)
xmlsec.sign(signed_xml_tree, key_path, cert_path)
return signed_xml_tree
except (IOError, xmlsec.exceptions.XMLSigException):
return xml_tree
def CreateAlert(xml_string, username):
"""Creates alert signed by userame from provided XML string.
Args:
xml_string: (string) XML content.
username: (string) Username of the alert author.
Returns:
A tuple of (msg_id, valid, error) where:
msg_id: (string) Unique alert ID (UUID)
valid: (bool) Whether alert has valid XML or not.
error: (string) Error message in case XML is invalid.
"""
msg_id = None
valid = False
try:
# Clean up the XML format a bit.
xml_string = re.sub("> +<", "><", xml_string)
# Now parse into etree and validate.
xml_tree = lxml.etree.fromstring(xml_string)
with open(os.path.join(settings.SCHEMA_DIR,
settings.CAP_SCHEMA_FILE), "r") as schema_file:
schema_string = schema_file.read()
xml_schema = lxml.etree.XMLSchema(lxml.etree.fromstring(schema_string))
valid = xml_schema.validate(xml_tree)
error = xml_schema.error_log.last_error
except lxml.etree.XMLSyntaxError as e:
error = "Malformed XML: %s" % e
if valid:
msg_id = str(uuid.uuid4())
# Assign <identifier> and <sender> values.
find_identifier = lxml.etree.XPath("//p:identifier",
namespaces={"p": settings.CAP_NS})
identifier = find_identifier(xml_tree)[0]
identifier.text = msg_id
# Set default <web> field if one was not filled by user.
find_web = lxml.etree.XPath("//p:info/p:web",
namespaces={"p": settings.CAP_NS})
web = find_web(xml_tree)[0]
if web.text == "pending":
web.text = "%s%s" % (settings.SITE_URL,
reverse("alert", args=[msg_id, "html"]))
find_sender = lxml.etree.XPath("//p:sender",
namespaces={"p": settings.CAP_NS})
sender = find_sender(xml_tree)[0]
sender.text = username + "@" + settings.SITE_DOMAIN
find_sent = lxml.etree.XPath("//p:sent",
namespaces={"p": settings.CAP_NS})
sent = find_sent(xml_tree)[0]
find_expires = lxml.etree.XPath("//p:expires",
namespaces={"p": settings.CAP_NS})
expires = find_expires(xml_tree)[0]
find_references = lxml.etree.XPath("//p:references",
namespaces={"p": settings.CAP_NS})
has_references = len(find_references(xml_tree)) != 0
# Sign the XML tree.
xml_tree = SignAlert(xml_tree, username)
# Re-serialize as string.
signed_xml_string = lxml.etree.tostring(xml_tree, pretty_print=False)
alert_obj = models.Alert()
alert_obj.uuid = msg_id
alert_obj.created_at = sent.text
alert_obj.expires_at = expires.text
alert_obj.content = signed_xml_string
alert_obj.save()
if has_references:
for element in find_references(xml_tree):
updated_alert_uuid = element.text.split(",")[1]
models.Alert.objects.filter(
uuid=updated_alert_uuid).update(updated=True)
return (msg_id, valid, error)
| 0
| 0
| 0
|
0abb359fcd3fa230f84c1d94f935a7561a18b43e
| 104
|
py
|
Python
|
reward_surfaces/experiments/__init__.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | null | null | null |
reward_surfaces/experiments/__init__.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | null | null | null |
reward_surfaces/experiments/__init__.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | 2
|
2021-10-03T14:51:38.000Z
|
2021-11-10T02:54:26.000Z
|
from .generate_eval_jobs import generate_eval_jobs
from .generate_plane_jobs import generate_plane_data
| 34.666667
| 52
| 0.903846
|
from .generate_eval_jobs import generate_eval_jobs
from .generate_plane_jobs import generate_plane_data
| 0
| 0
| 0
|
8e2a9218dc15d719fba899fa20f53e298214c11a
| 13,078
|
py
|
Python
|
rain_api_core/urs_util.py
|
asfadmin/rain-api-core
|
99985d4a346ab06449a42ed6b5b91f36d2bc760a
|
[
"Apache-2.0"
] | 1
|
2020-05-06T22:01:22.000Z
|
2020-05-06T22:01:22.000Z
|
rain_api_core/urs_util.py
|
asfadmin/rain-api-core
|
99985d4a346ab06449a42ed6b5b91f36d2bc760a
|
[
"Apache-2.0"
] | 87
|
2019-09-16T20:45:59.000Z
|
2022-03-31T21:18:44.000Z
|
rain_api_core/urs_util.py
|
asfadmin/rain-api-core
|
99985d4a346ab06449a42ed6b5b91f36d2bc760a
|
[
"Apache-2.0"
] | 2
|
2020-05-06T22:01:29.000Z
|
2021-03-23T18:22:52.000Z
|
import logging
import os
import urllib
from time import time
from json import loads
from rain_api_core.general_util import log_context, return_timing_object, duration
from rain_api_core.view_util import make_set_cookie_headers_jwt, get_exp_time, JWT_COOKIE_NAME
from rain_api_core.aws_util import retrieve_secret
log = logging.getLogger(__name__)
def get_urs_creds():
"""
Fetches URS creds from secrets manager.
:return: looks like:
{
"UrsId": "stringofseeminglyrandomcharacters",
"UrsAuth": "verymuchlongerstringofseeminglyrandomcharacters"
}
:type: dict
"""
secret_name = os.getenv('URS_CREDS_SECRET_NAME', None)
if not secret_name:
log.error('URS_CREDS_SECRET_NAME not set')
return {}
secret = retrieve_secret(secret_name)
if not ('UrsId' in secret and 'UrsAuth' in secret):
log.error('AWS secret {} does not contain required keys "UrsId" and "UrsAuth"'.format(secret_name))
return secret
# This do_login() is mainly for chalice clients.
| 39.155689
| 156
| 0.658663
|
import logging
import os
import urllib
from time import time
from json import loads
from rain_api_core.general_util import log_context, return_timing_object, duration
from rain_api_core.view_util import make_set_cookie_headers_jwt, get_exp_time, JWT_COOKIE_NAME
from rain_api_core.aws_util import retrieve_secret
log = logging.getLogger(__name__)
def get_base_url(ctxt=False):
# Make a redirect url using optional custom domain_name, otherwise use raw domain/stage provided by API Gateway.
try:
return 'https://{}/'.format(
os.getenv('DOMAIN_NAME', '{}/{}'.format(ctxt['domainName'], ctxt['stage'])))
except (TypeError, IndexError) as e:
log.error('could not create a redirect_url, because {}'.format(e))
raise
def get_redirect_url(ctxt=False):
return '{}login'.format(get_base_url(ctxt))
def do_auth(code, redirect_url, aux_headers=None):
aux_headers = aux_headers or {} # A safer default
url = os.getenv('AUTH_BASE_URL', 'https://urs.earthdata.nasa.gov') + "/oauth/token"
# App U:P from URS Application
auth = get_urs_creds()['UrsAuth']
post_data = {"grant_type": "authorization_code",
"code": code,
"redirect_uri": redirect_url}
headers = {"Authorization": "Basic " + auth}
headers.update(aux_headers)
post_data_encoded = urllib.parse.urlencode(post_data).encode("utf-8")
post_request = urllib.request.Request(url, post_data_encoded, headers)
t0 = time()
try:
log.debug('headers: {}'.format(headers))
log.debug('url: {}'.format(url))
log.debug('post_data: {}'.format(post_data))
response = urllib.request.urlopen(post_request) #nosec URL is *always* URS.
t1 = time()
packet = response.read()
log.debug('ET to do_auth() urlopen(): {} sec'.format(t1 - t0))
log.debug('ET to do_auth() request to URS: {} sec'.format(time() - t0))
log.info(return_timing_object(service="EDL", endpoint=url, method="POST", duration=duration(t0)))
return loads(packet)
except urllib.error.URLError as e:
log.error("Error fetching auth: {0}".format(e))
log.debug('ET for the attempt: {}'.format(format(round(time() - t0, 4))))
return {}
def get_urs_url(ctxt, to=False):
base_url = os.getenv('AUTH_BASE_URL', 'https://urs.earthdata.nasa.gov') + '/oauth/authorize'
# From URS Application
client_id = get_urs_creds()['UrsId']
log.debug('domain name: {0}'.format(os.getenv('DOMAIN_NAME', 'no domainname set')))
log.debug('if no domain name set: {}.execute-api.{}.amazonaws.com/{}'.format(ctxt['apiId'], os.getenv('AWS_DEFAULT_REGION', '<region>'), ctxt['stage']))
urs_url = '{0}?client_id={1}&response_type=code&redirect_uri={2}'.format(base_url, client_id, get_redirect_url(ctxt))
if to:
urs_url += "&state={0}".format(to)
# Try to handle scripts
try:
download_agent = ctxt['identity']['userAgent']
except IndexError:
log.debug("No User Agent!")
return urs_url
if not download_agent.startswith('Mozilla'):
urs_url += "&app_type=401"
return urs_url
def get_profile(user_id, token, temptoken=False, aux_headers=None):
aux_headers = aux_headers or {} # Safer Default
if not user_id or not token:
return {}
# get_new_token_and_profile() will pass this function a temporary token with which to fetch the profile info. We
# don't want to keep it around, just use it here, once:
if temptoken:
headertoken = temptoken
else:
headertoken = token
url = os.getenv('AUTH_BASE_URL', 'https://urs.earthdata.nasa.gov') + "/api/users/{0}".format(user_id)
headers = {"Authorization": "Bearer " + headertoken}
headers.update(aux_headers)
req = urllib.request.Request(url, None, headers)
try:
timer = time()
response = urllib.request.urlopen(req) # nosec URL is *always* URS.
packet = response.read()
log.info(return_timing_object(service="EDL", endpoint=url, duration=duration(timer)))
user_profile = loads(packet)
return user_profile
except urllib.error.URLError as e:
log.warning("Error fetching profile: {0}".format(e))
if not temptoken: # This keeps get_new_token_and_profile() from calling this over and over
log.debug('because error above, going to get_new_token_and_profile()')
return get_new_token_and_profile(user_id, token, aux_headers)
log.debug('We got that 401 above and we\'re using a temptoken ({}), so giving up and not getting a profile.'.format(temptoken))
return {}
def get_new_token_and_profile(user_id, cookietoken, aux_headers=None):
aux_headers = aux_headers or {} # A safer default
# get a new token
url = os.getenv('AUTH_BASE_URL', 'https://urs.earthdata.nasa.gov') + "/oauth/token"
# App U:P from URS Application
auth = get_urs_creds()['UrsAuth']
post_data = {"grant_type": "client_credentials" }
headers = {"Authorization": "Basic " + auth}
headers.update(aux_headers)
# Download token
post_data_encoded = urllib.parse.urlencode(post_data).encode("utf-8")
post_request = urllib.request.Request(url, post_data_encoded, headers)
t0 = time()
try:
log.info("Attempting to get new Token")
response = urllib.request.urlopen(post_request) #nosec URL is *always* URS.
t1 = time()
packet = response.read()
log.info(return_timing_object(service="EDL", endpoint=url, duration=duration(t0)))
new_token = loads(packet)['access_token']
t2 = time()
log.info("Retrieved new token: {0}".format(new_token))
log.debug('ET for get_new_token_and_profile() urlopen() {} sec'.format(t1 - t0))
log.debug('ET for get_new_token_and_profile() response.read() and loads() {} sec'.format(t2- t1))
# Get user profile with new token
return get_profile(user_id, cookietoken, new_token, aux_headers=aux_headers)
except urllib.error.URLError as e:
log.error("Error fetching auth: {0}".format(e))
log.debug('ET for the attempt: {}'.format(format(round(time() - t0, 4))))
return False
def user_in_group_list(private_groups, user_groups):
client_id = get_urs_creds()['UrsId']
log.info("Searching for private groups {0} in {1}".format(private_groups, user_groups))
for u_g in user_groups:
if u_g['client_id'] == client_id:
for p_g in private_groups:
if p_g == u_g['name']:
# Found the matching group!
log.info("User belongs to private group {}".format(p_g))
return True
def user_in_group_urs(private_groups, user_id, token, user_profile=None, refresh_first=False, aux_headers=None):
aux_headers = aux_headers or {} # A safer default
new_profile = {}
if refresh_first or not user_profile:
user_profile = get_profile(user_id, token, aux_headers=aux_headers)
new_profile = user_profile
if isinstance(user_profile, dict) and 'user_groups' in user_profile and user_in_group_list(private_groups, user_profile['user_groups']):
log.info("User {0} belongs to private group".format(user_id))
return True, new_profile
# Couldn't find user in provided groups, but we may as well look at a fresh group list:
if not refresh_first:
# we have a maybe not so fresh user_profile and we could try again to see if someone added a group to this user:
log.debug("Could not validate user {0} belonging to groups {1}, attempting profile refresh".format(user_id, private_groups))
return user_in_group_urs(private_groups, user_id, {}, refresh_first=True, aux_headers=aux_headers)
log.debug("Even after profile refresh, user {0} does not belong to groups {1}".format(user_id, private_groups))
return False, new_profile
def user_in_group(private_groups, cookievars, refresh_first=False, aux_headers=None):
aux_headers = aux_headers or {} # A safer default
# If a new profile is fetched, it is assigned to this var, and returned so that a fresh jwt cookie can be set.
new_profile = {}
if not private_groups:
return False, new_profile
jwt_payload = cookievars.get(JWT_COOKIE_NAME)
if not jwt_payload:
return False, new_profile
if refresh_first:
new_profile = get_profile(jwt_payload['urs-user-id'], jwt_payload['urs-access-token'], aux_headers=aux_headers)
jwt_payload['urs-groups'] = new_profile['user_groups']
in_group = user_in_group_list(private_groups, jwt_payload['urs-groups'])
if in_group:
return True, new_profile
if not in_group and not refresh_first:
# one last ditch effort to see if they were so very recently added to group:
jwt_payload['urs-groups'] = get_profile(jwt_payload['urs-user-id'], jwt_payload['urs-access-token'], aux_headers=aux_headers)['user_groups']
return user_in_group(private_groups, cookievars, refresh_first=True, aux_headers=aux_headers)
return False, new_profile
def get_urs_creds():
"""
Fetches URS creds from secrets manager.
:return: looks like:
{
"UrsId": "stringofseeminglyrandomcharacters",
"UrsAuth": "verymuchlongerstringofseeminglyrandomcharacters"
}
:type: dict
"""
secret_name = os.getenv('URS_CREDS_SECRET_NAME', None)
if not secret_name:
log.error('URS_CREDS_SECRET_NAME not set')
return {}
secret = retrieve_secret(secret_name)
if not ('UrsId' in secret and 'UrsAuth' in secret):
log.error('AWS secret {} does not contain required keys "UrsId" and "UrsAuth"'.format(secret_name))
return secret
def user_profile_2_jwt_payload(user_id, access_token, user_profile):
return {
# Do we want more items in here?
'first_name': user_profile['first_name'],
'last_name': user_profile['last_name'],
'email': user_profile['email_address'],
'urs-user-id': user_id,
'urs-access-token': access_token,
'urs-groups': user_profile['user_groups'],
'iat': int(time()),
'exp': get_exp_time(),
}
# This do_login() is mainly for chalice clients.
def do_login(args, context, cookie_domain='', aux_headers=None):
aux_headers = aux_headers or {} # A safer default
log.debug('the query_params: {}'.format(args))
if not args:
template_vars = {'contentstring': 'No params', 'title': 'Could Not Login'}
headers = {}
return 400, template_vars, headers
if args.get('error', False):
contentstring = 'An error occurred while trying to log into URS. URS says: "{}". '.format(args.get('error', ''))
template_vars = {'contentstring': contentstring, 'title': 'Could Not Login'}
if args.get('error') == 'access_denied':
# This happens when user doesn't agree to EULA. Maybe other times too.
return_status = 401
template_vars['contentstring'] = 'Be sure to agree to the EULA.'
template_vars['error_code'] = 'EULA_failure'
else:
return_status = 400
return return_status, template_vars, {}
if 'code' not in args:
contentstring = 'Did not get the required CODE from URS'
template_vars = {'contentstring': contentstring, 'title': 'Could not login.'}
headers = {}
return 400, template_vars, headers
log.debug('pre-do_auth() query params: {}'.format(args))
redir_url = get_redirect_url(context)
auth = do_auth(args.get('code', ''), redir_url, aux_headers=aux_headers)
log.debug('auth: {}'.format(auth))
if not auth:
log.debug('no auth returned from do_auth()')
template_vars = {'contentstring': 'There was a problem talking to URS Login', 'title': 'Could Not Login'}
return 400, template_vars, {}
user_id = auth['endpoint'].split('/')[-1]
log_context(user_id=user_id)
user_profile = get_profile(user_id, auth['access_token'], aux_headers={})
log.debug('Got the user profile: {}'.format(user_profile))
if user_profile:
log.debug('urs-access-token: {}'.format(auth['access_token']))
if 'state' in args:
redirect_to = args["state"]
else:
redirect_to = get_base_url(context)
if 'user_groups' not in user_profile or not user_profile['user_groups']:
user_profile['user_groups'] = []
jwt_cookie_payload = user_profile_2_jwt_payload(user_id, auth['access_token'], user_profile)
headers = {'Location': redirect_to}
headers.update(make_set_cookie_headers_jwt(jwt_cookie_payload, '', cookie_domain))
return 301, {}, headers
template_vars = {'contentstring': 'Could not get user profile from URS', 'title': 'Could Not Login'}
return 400, template_vars, {}
| 11,746
| 0
| 252
|
6bc67b3877e6d543746b09d6c9a8ffaf05e2d6b6
| 979
|
py
|
Python
|
source/lambda/solution_helper/lambda_function.py
|
aws-solutions/aws-devops-monitoring-dashboard
|
ce634d51c64118ba6716b1ffa19756d2e97ad4c8
|
[
"Apache-2.0"
] | 9
|
2021-10-30T13:03:37.000Z
|
2022-03-07T19:29:30.000Z
|
source/lambda/solution_helper/lambda_function.py
|
aws-solutions/aws-devops-monitoring-dashboard
|
ce634d51c64118ba6716b1ffa19756d2e97ad4c8
|
[
"Apache-2.0"
] | 1
|
2022-01-03T20:18:32.000Z
|
2022-01-13T00:44:51.000Z
|
source/lambda/solution_helper/lambda_function.py
|
aws-solutions/aws-devops-monitoring-dashboard
|
ce634d51c64118ba6716b1ffa19756d2e97ad4c8
|
[
"Apache-2.0"
] | 5
|
2021-10-30T13:03:32.000Z
|
2022-03-16T18:36:33.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import uuid
import requests
import json
from copy import copy
from datetime import datetime
from crhelper import CfnResource
from util.solution_metrics import send_metrics
logger = logging.getLogger(__name__)
helper = CfnResource(json_logging=True, log_level="INFO")
@helper.create
@helper.update
@helper.delete
| 26.459459
| 90
| 0.720123
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import uuid
import requests
import json
from copy import copy
from datetime import datetime
from crhelper import CfnResource
from util.solution_metrics import send_metrics
logger = logging.getLogger(__name__)
helper = CfnResource(json_logging=True, log_level="INFO")
@helper.create
@helper.update
@helper.delete
def solution_helper(event, _):
logger.info(f"[solution_helper] event: {event}")
if event["ResourceType"] == "Custom::CreateUUID" and event["RequestType"] == "Create":
random_id = str(uuid.uuid4())
helper.Data.update({"UUID": random_id})
logger.info(f"[solution_helper] create uuid: {random_id}")
def handler(event, context):
logger.info(f"[handler] event: {event}")
try:
helper(event, context)
except Exception as error:
logger.exception("[handler] failed: {error}")
| 485
| 0
| 45
|
48e3d47332daa40f2b72af7bae81629a2d1002eb
| 762
|
py
|
Python
|
test/solution_tests/FIZ/test_fiz.py
|
DPNT-Sourcecode/FIZ-erqp01
|
df4fc9abf63c9597352a4bfa5d3f97ac5aada0bc
|
[
"Apache-2.0"
] | null | null | null |
test/solution_tests/FIZ/test_fiz.py
|
DPNT-Sourcecode/FIZ-erqp01
|
df4fc9abf63c9597352a4bfa5d3f97ac5aada0bc
|
[
"Apache-2.0"
] | null | null | null |
test/solution_tests/FIZ/test_fiz.py
|
DPNT-Sourcecode/FIZ-erqp01
|
df4fc9abf63c9597352a4bfa5d3f97ac5aada0bc
|
[
"Apache-2.0"
] | null | null | null |
from solutions.FIZ import fizz_buzz_solution
| 36.285714
| 74
| 0.66273
|
from solutions.FIZ import fizz_buzz_solution
class TestHlo2():
def test_fiz(self):
assert fizz_buzz_solution.fizz_buzz(3) == 'fizz fake deluxe'
assert fizz_buzz_solution.fizz_buzz(12) == 'fizz'
assert fizz_buzz_solution.fizz_buzz(10) == 'buzz'
assert fizz_buzz_solution.fizz_buzz(15) == 'fizz buzz fake deluxe'
assert fizz_buzz_solution.fizz_buzz(31) == 'fizz'
assert fizz_buzz_solution.fizz_buzz(51) == 'fizz buzz'
assert fizz_buzz_solution.fizz_buzz(53) == 'fizz buzz'
assert fizz_buzz_solution.fizz_buzz(55) == 'buzz fake deluxe'
assert fizz_buzz_solution.fizz_buzz(33) == 'fizz fake deluxe'
assert fizz_buzz_solution.fizz_buzz(8) == 8
| 634
| -4
| 49
|
6d148d29c415b6451fce5bdddc781fcb51b3a959
| 9,755
|
py
|
Python
|
Test/Mock/Component.py
|
paul-ollis/cleversheep3
|
86e6ca76ea4e8524f16e2348d38484dcfafb07d0
|
[
"Apache-2.0"
] | null | null | null |
Test/Mock/Component.py
|
paul-ollis/cleversheep3
|
86e6ca76ea4e8524f16e2348d38484dcfafb07d0
|
[
"Apache-2.0"
] | null | null | null |
Test/Mock/Component.py
|
paul-ollis/cleversheep3
|
86e6ca76ea4e8524f16e2348d38484dcfafb07d0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Classes to mock components for use in system testing.
The main class is the `Component`, which is intended to be used as the base
class for an object that plays the part of a separately running process.
However, a component is actually executed under the control of the
`cleversheep3.Test.PollManager`. This allows its behaviour to be tightly
controlled and tests to be fairly deterministic.
Actually this has become suitably general purpose as to be put somewhere else.
But what I will probably do is keep Component as a mocking class and make it
inherit from some other class, which will contain the bulk of current
Component.
"""
from __future__ import print_function
__docformat__ = "restructuredtext"
import itertools
from cleversheep3.Test.Tester import Logging
from . import Comms
class Component:
"""The base class for a mock component in a test scenario.
The intention for this class is that it should provide most of the
functionality required to emulate a process. At least well enough for most
test purposes.
:Ivariables:
pollManager
The ``PollManager`` that is managing the component.
connections
One or more active connections managed by the component.
listeners
A dict of active listeners being manager by the component. The key is
the name of the peer that is expected to connect and the value of the
``Listener`` managing the listen socket.
pendingConnections
A dict of pending outgoing connections. The key is the name of the peer
that is expected to connect and the value of the ``Connector`` trying
to connect to the peer.
"""
_name = "UNKNOWN"
peerCounter = itertools.count(1)
#{ Construction
def __init__(self, pollManager, log=None):
"""Constructor:
:Parameters:
pollManager
This is typically a `PollManager` instance, but could be something
that provides the same interface.
log
A standard ``logging`` object. If omitted, a default logging object
is used.
This is likely to disappear, so it is best not to use it. Currently
the log object is stored, but not used.
"""
self.log = log or Logging.getLogger(self._name).info
# self.log = log or getLog(self._name).info
self.pollManager = pollManager
self.connections = {}
self.listeners = {}
self.pendingConnections = {}
#{ Connection establishment
def listenFor(self, listenName, bindAddr):
"""Listen for connection from a peer.
Arranges to start listening for connection from a peer.
A SOCK_STREAM socket is created and added to a list of listenting
sockets. Each call to this methods sets up a new listener.
When a connection request occurs a socket for the connection is
created (by accepting the request). Then the `onIncomingConnect` method
is invoked as ``self.onIncomingConnect(s, peerName)``, where ``s`` is
the new connection socket and ``peerName`` is the ``listenName``
passed to this ``listenFor`` method.
:Param listenName:
The name by which the listener should be known, which is normally
name of the peer that is expected to try to connect.
:Param bindAddr:
The address to bind to for listening. This is a tuple of
``(ipAddr, port)`` and the ``ipAddr`` is often an empty string,
meaning accept connection from any address.
"""
listenSocket = Comms.Listener(self.pollManager, listenName, bindAddr,
onConnect=self._onIncomingConnect)
self.listeners[listenName] = listenSocket
return listenSocket
addListener = listenFor
def openDgramSocket(self, peerName, bindAddr=None, peerAddr=None):
"""Open a datagram socket.
:Param peerName:
A symbolic name for the communicating peer.
:Param bindAddr:
The Taddress to bind to for receiving packets.
"""
p = Comms.Dgram(self.pollManager, peerName, bindAddr=bindAddr,
peerAddr=peerAddr, onReceive=self._onReceive)
self.connections[peerName] = p
def openInputPipe(self, pipePath, peerName, native=False):
"""Open a named pipe for reading from.
:Param pipePath:
The path name of the PIPE.
"""
p = Comms.PipeConn(False, pipePath, peerName, self.pollManager,
onReceive=self._onReceive, onClose=self._onClose,
onError=self._onError, native=native)
self.connections[peerName] = p
def openOutputPipe(self, pipePath, peerName, native=False):
"""Open a named pipe for reading from.
:Param pipePath:
The path name of the PIPE.
"""
p = Comms.PipeConn(True, pipePath, peerName, self.pollManager,
onReceive=self._onReceive, onClose=self._onClose,
onError=self._onError, native=native)
self.connections[peerName] = p
def connectTo(self, peerName, peerAddr, connectTimes=(0.0, 0.5),
bindAddress=None):
"""Start trying to connect to a peer.
:Param peerName:
The name by which the connection to the peer should be known,
which is normally the peer's name.
:Param peerAddr:
The internet address, i.e. a tuple of ``(ipAddr, port)``, of the
peer. The ``ipAddr`` may be an empty string, meaning
``localhost``.
:Param connectTimes:
A tuple of ``(firstDelay, retryDelay)``, each is floating point
value representing seconds. The first connection attempt will
be made after ``firstDelay`` seconds. If that fails then
connection attempts will be made every ``retryDelay`` seconds.
"""
c = Comms.Connecter(self.pollManager, peerName, peerAddr,
connectTimes, self._onOutgoingConnect,
bindAddress=bindAddress)
self.pendingConnections[peerName] = c
return c
def sendTo(self, peerName, bytes, count=None):
"""Send bytes to a named peer.
:Param peerName:
The name by which the connection to the peer is known, which is
normally the peer's name.
:Param bytes, count:
The string of bytes to write and how many of those bytes to send.
The `count` is normally omitted (or ``None``), in which case the
entire string is sent.
"""
conn = self.connections.get(peerName)
if not conn:
return
conn.send(bytes, count=count)
#{ The Component protocol methods.
def onIncomingConnect(self, s, peerName):
"""Invoked when accepting a connection from a peer."""
def onOutgoingConnect(self, peerName):
"""Invoked when an outgoing connection is established."""
def onReceive(self, conn):
"""Invoked when a connection has received bytes."""
def onClose(self, conn):
"""Invoked when a connection closes."""
def onError(self, conn, exc):
"""Invoked when a connection has an abnormal error."""
# TODO: Get a better name.
def getConnName(self, listenName, peerAddr):
"""Map an incoming connection to a peer name.
If you listen for multiple clients connecting to a single port then
you need to over-ride this so that each new connection gets given
a new name.
"""
return listenName
#{ Handling of socket activity.
def _onIncomingConnect(self, s, listenName, peerAddr, source):
"""This is invoked by a `Listener`, for a new connection."""
peerName = self.getConnName(listenName, peerAddr)
if peerName.endswith('%d'):
peerName = peerName % next(self.peerCounter)
conn = self.connections[peerName] = Comms.SocketConn(s,
peerName, self.pollManager,
onReceive=self._onReceive, onClose=self._onClose,
onError=self._onError, addr=peerAddr,
isSSL=source.isSslConnection())
self.onIncomingConnect(s, peerName)
return conn
def _onOutgoingConnect(self, peerName, source):
"""This is invoked by a `Connecter`, for a new connection."""
# TODO: Make logging controllable.
# logComms(self.log, self._name, peerName, "<CONNECT>")
pending = self.pendingConnections.pop(peerName)
conn = self.connections[peerName] = Comms.SocketConn(pending.s,
peerName, self.pollManager,
onReceive=self._onReceive, onClose=self._onClose,
onError=self._onError,
isSSL=source.isSslConnection())
self.onOutgoingConnect(peerName)
| 37.810078
| 79
| 0.634341
|
#!/usr/bin/env python
"""Classes to mock components for use in system testing.
The main class is the `Component`, which is intended to be used as the base
class for an object that plays the part of a separately running process.
However, a component is actually executed under the control of the
`cleversheep3.Test.PollManager`. This allows its behaviour to be tightly
controlled and tests to be fairly deterministic.
Actually this has become suitably general purpose as to be put somewhere else.
But what I will probably do is keep Component as a mocking class and make it
inherit from some other class, which will contain the bulk of current
Component.
"""
from __future__ import print_function
__docformat__ = "restructuredtext"
import itertools
from cleversheep3.Test.Tester import Logging
from . import Comms
def logComms(logFunc, src, dst, text):
logFunc("%s -> %s %s", src, dst, text)
class Component:
"""The base class for a mock component in a test scenario.
The intention for this class is that it should provide most of the
functionality required to emulate a process. At least well enough for most
test purposes.
:Ivariables:
pollManager
The ``PollManager`` that is managing the component.
connections
One or more active connections managed by the component.
listeners
A dict of active listeners being manager by the component. The key is
the name of the peer that is expected to connect and the value of the
``Listener`` managing the listen socket.
pendingConnections
A dict of pending outgoing connections. The key is the name of the peer
that is expected to connect and the value of the ``Connector`` trying
to connect to the peer.
"""
_name = "UNKNOWN"
peerCounter = itertools.count(1)
#{ Construction
def __init__(self, pollManager, log=None):
"""Constructor:
:Parameters:
pollManager
This is typically a `PollManager` instance, but could be something
that provides the same interface.
log
A standard ``logging`` object. If omitted, a default logging object
is used.
This is likely to disappear, so it is best not to use it. Currently
the log object is stored, but not used.
"""
self.log = log or Logging.getLogger(self._name).info
# self.log = log or getLog(self._name).info
self.pollManager = pollManager
self.connections = {}
self.listeners = {}
self.pendingConnections = {}
def shutDown(self):
for name, conn in self.listeners.items():
if conn is not None:
conn.shutDown()
for name, conn in self.connections.items():
if conn is not None:
conn.shutDown()
for name, conn in self.pendingConnections.items():
if conn is not None:
conn.shutDown()
self.connections = {}
self.listeners = {}
self.pendingConnections = {}
#{ Connection establishment
def listenFor(self, listenName, bindAddr):
"""Listen for connection from a peer.
Arranges to start listening for connection from a peer.
A SOCK_STREAM socket is created and added to a list of listenting
sockets. Each call to this methods sets up a new listener.
When a connection request occurs a socket for the connection is
created (by accepting the request). Then the `onIncomingConnect` method
is invoked as ``self.onIncomingConnect(s, peerName)``, where ``s`` is
the new connection socket and ``peerName`` is the ``listenName``
passed to this ``listenFor`` method.
:Param listenName:
The name by which the listener should be known, which is normally
name of the peer that is expected to try to connect.
:Param bindAddr:
The address to bind to for listening. This is a tuple of
``(ipAddr, port)`` and the ``ipAddr`` is often an empty string,
meaning accept connection from any address.
"""
listenSocket = Comms.Listener(self.pollManager, listenName, bindAddr,
onConnect=self._onIncomingConnect)
self.listeners[listenName] = listenSocket
return listenSocket
addListener = listenFor
def openDgramSocket(self, peerName, bindAddr=None, peerAddr=None):
"""Open a datagram socket.
:Param peerName:
A symbolic name for the communicating peer.
:Param bindAddr:
The Taddress to bind to for receiving packets.
"""
p = Comms.Dgram(self.pollManager, peerName, bindAddr=bindAddr,
peerAddr=peerAddr, onReceive=self._onReceive)
self.connections[peerName] = p
def openInputPipe(self, pipePath, peerName, native=False):
"""Open a named pipe for reading from.
:Param pipePath:
The path name of the PIPE.
"""
p = Comms.PipeConn(False, pipePath, peerName, self.pollManager,
onReceive=self._onReceive, onClose=self._onClose,
onError=self._onError, native=native)
self.connections[peerName] = p
def openOutputPipe(self, pipePath, peerName, native=False):
"""Open a named pipe for reading from.
:Param pipePath:
The path name of the PIPE.
"""
p = Comms.PipeConn(True, pipePath, peerName, self.pollManager,
onReceive=self._onReceive, onClose=self._onClose,
onError=self._onError, native=native)
self.connections[peerName] = p
def connectTo(self, peerName, peerAddr, connectTimes=(0.0, 0.5),
bindAddress=None):
"""Start trying to connect to a peer.
:Param peerName:
The name by which the connection to the peer should be known,
which is normally the peer's name.
:Param peerAddr:
The internet address, i.e. a tuple of ``(ipAddr, port)``, of the
peer. The ``ipAddr`` may be an empty string, meaning
``localhost``.
:Param connectTimes:
A tuple of ``(firstDelay, retryDelay)``, each is floating point
value representing seconds. The first connection attempt will
be made after ``firstDelay`` seconds. If that fails then
connection attempts will be made every ``retryDelay`` seconds.
"""
c = Comms.Connecter(self.pollManager, peerName, peerAddr,
connectTimes, self._onOutgoingConnect,
bindAddress=bindAddress)
self.pendingConnections[peerName] = c
return c
def sendTo(self, peerName, bytes, count=None):
"""Send bytes to a named peer.
:Param peerName:
The name by which the connection to the peer is known, which is
normally the peer's name.
:Param bytes, count:
The string of bytes to write and how many of those bytes to send.
The `count` is normally omitted (or ``None``), in which case the
entire string is sent.
"""
conn = self.connections.get(peerName)
if not conn:
return
conn.send(bytes, count=count)
#{ The Component protocol methods.
def onIncomingConnect(self, s, peerName):
"""Invoked when accepting a connection from a peer."""
def onOutgoingConnect(self, peerName):
"""Invoked when an outgoing connection is established."""
def onReceive(self, conn):
"""Invoked when a connection has received bytes."""
def onClose(self, conn):
"""Invoked when a connection closes."""
def onError(self, conn, exc):
"""Invoked when a connection has an abnormal error."""
# TODO: Get a better name.
def getConnName(self, listenName, peerAddr):
"""Map an incoming connection to a peer name.
If you listen for multiple clients connecting to a single port then
you need to over-ride this so that each new connection gets given
a new name.
"""
return listenName
#{ Handling of socket activity.
def _onIncomingConnect(self, s, listenName, peerAddr, source):
"""This is invoked by a `Listener`, for a new connection."""
peerName = self.getConnName(listenName, peerAddr)
if peerName.endswith('%d'):
peerName = peerName % next(self.peerCounter)
conn = self.connections[peerName] = Comms.SocketConn(s,
peerName, self.pollManager,
onReceive=self._onReceive, onClose=self._onClose,
onError=self._onError, addr=peerAddr,
isSSL=source.isSslConnection())
self.onIncomingConnect(s, peerName)
return conn
def _onOutgoingConnect(self, peerName, source):
"""This is invoked by a `Connecter`, for a new connection."""
# TODO: Make logging controllable.
# logComms(self.log, self._name, peerName, "<CONNECT>")
pending = self.pendingConnections.pop(peerName)
conn = self.connections[peerName] = Comms.SocketConn(pending.s,
peerName, self.pollManager,
onReceive=self._onReceive, onClose=self._onClose,
onError=self._onError,
isSSL=source.isSslConnection())
self.onOutgoingConnect(peerName)
def _onReceive(self, conn):
self.onReceive(conn)
def _onClose(self, conn):
self.onClose(conn)
# logComms(self.log, conn.peerName, self._name, "<CLOSE>")
def _onError(self, conn, exc):
self.onError(conn, exc)
| 683
| 0
| 131
|
6f7b106f7638421b8fd55db0ebf2169f0c928b89
| 2,415
|
py
|
Python
|
src/ralph/signals.py
|
DoNnMyTh/ralph
|
97b91639fa68965ad3fd9d0d2652a6545a2a5b72
|
[
"Apache-2.0"
] | 1,668
|
2015-01-01T12:51:20.000Z
|
2022-03-29T09:05:35.000Z
|
src/ralph/signals.py
|
hq-git/ralph
|
e2448caf02d6e5abfd81da2cff92aefe0a534883
|
[
"Apache-2.0"
] | 2,314
|
2015-01-02T13:26:26.000Z
|
2022-03-29T04:06:03.000Z
|
src/ralph/signals.py
|
hq-git/ralph
|
e2448caf02d6e5abfd81da2cff92aefe0a534883
|
[
"Apache-2.0"
] | 534
|
2015-01-05T12:40:28.000Z
|
2022-03-29T21:10:12.000Z
|
from django.db import connection
from django.db.models.signals import post_save
from django.dispatch import receiver
# TODO(mkurek): make this working as a decorator, example:
# @post_commit(MyModel)
# def my_handler(instance):
# ...
def post_commit(func, model, signal=post_save, single_call=True):
"""
Post commit signal for specific model.
It's better than Django's post_save, because:
* it handles transaction rollback (transaction could be rolled back
after calling post_save)
* it handles M2M relations (post_save is (usually) called when main model
is saved, before related M2M instances are saved)
Writing tests:
Remember to make your TestCase inheriting from one of:
- TransactionTestCase (Django)
- APITransactionTestCase (Django Rest Framework)
Unless `on_commit` signal won't be called.
Requirements:
* you have to use database supporting transactions (ex. MySQL)
* you have to use django-transaction-hooks
(https://github.com/carljm/django-transaction-hooks) for Django<=1.8
(it was merged into Django 1.9)
Notice that this feature will work whether or not you're using transactions
in your code. Possible scenarios are as follows:
* `ATOMIC_REQUESTS` is set to True in settings - then every request is
wrapped in transaction - at the end of processing each (saving) request,
this hook will be processed (for models which were saved)
* view is decorated using `transaction.atomic` - at the end of processing
the view, this hook will be called (if any of registered models was saved)
* if transaction is not started for current request, then this hook will
behave as post_save (will be called immediately)
"""
@receiver(signal, sender=model, weak=False)
| 41.637931
| 80
| 0.681159
|
from django.db import connection
from django.db.models.signals import post_save
from django.dispatch import receiver
# TODO(mkurek): make this working as a decorator, example:
# @post_commit(MyModel)
# def my_handler(instance):
# ...
def post_commit(func, model, signal=post_save, single_call=True):
"""
Post commit signal for specific model.
It's better than Django's post_save, because:
* it handles transaction rollback (transaction could be rolled back
after calling post_save)
* it handles M2M relations (post_save is (usually) called when main model
is saved, before related M2M instances are saved)
Writing tests:
Remember to make your TestCase inheriting from one of:
- TransactionTestCase (Django)
- APITransactionTestCase (Django Rest Framework)
Unless `on_commit` signal won't be called.
Requirements:
* you have to use database supporting transactions (ex. MySQL)
* you have to use django-transaction-hooks
(https://github.com/carljm/django-transaction-hooks) for Django<=1.8
(it was merged into Django 1.9)
Notice that this feature will work whether or not you're using transactions
in your code. Possible scenarios are as follows:
* `ATOMIC_REQUESTS` is set to True in settings - then every request is
wrapped in transaction - at the end of processing each (saving) request,
this hook will be processed (for models which were saved)
* view is decorated using `transaction.atomic` - at the end of processing
the view, this hook will be called (if any of registered models was saved)
* if transaction is not started for current request, then this hook will
behave as post_save (will be called immediately)
"""
@receiver(signal, sender=model, weak=False)
def wrap(sender, instance, **kwargs):
def wrapper():
# prevent from calling the same func multiple times for single
# instance
called_already_attr = '_' + func.__name__ + '_called'
if not (
getattr(instance, called_already_attr, False) and
single_call
):
func(instance)
setattr(instance, called_already_attr, True)
# TODO(mkurek): replace connection by transaction after upgrading to
# Django 1.9
connection.on_commit(wrapper)
| 562
| 0
| 26
|
585404588be51bc49193fe8046ecb213a7dc11b7
| 3,743
|
py
|
Python
|
ai_modules/kcwu_short2.py
|
sgpritam/2048-python
|
1366db6a712b6808699d6b71166487d7cc01a88c
|
[
"BSD-3-Clause"
] | 79
|
2016-01-01T17:41:11.000Z
|
2022-02-21T18:18:13.000Z
|
ai_modules/kcwu_short2.py
|
sgpritam/2048-python
|
1366db6a712b6808699d6b71166487d7cc01a88c
|
[
"BSD-3-Clause"
] | 4
|
2017-04-08T15:14:08.000Z
|
2021-12-25T00:51:34.000Z
|
ai_modules/kcwu_short2.py
|
sgpritam/2048-python
|
1366db6a712b6808699d6b71166487d7cc01a88c
|
[
"BSD-3-Clause"
] | 63
|
2017-02-25T13:54:44.000Z
|
2022-01-17T18:04:59.000Z
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from multiprocessing import *
import sys
range4 = range(4)
job_table = {}
table = {}
# vim:sw=4:expandtab:softtabstop=4
| 26.174825
| 176
| 0.536468
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from multiprocessing import *
import sys
range4 = range(4)
job_table = {}
def rotateRight(grid):
return [[grid[r][3-c] for r in range4] for c in range4]
def move_row(row):
out = [x for x in row if x]
ic = oc = 0
while out[ic:]:
if out[ic+1:] and out[ic] == out[ic+1]:
out[oc] = 2*out[ic]
ic += 1
else:
out[oc] = out[ic]
ic += 1
oc += 1
out[oc:]=[None]*(4-oc)
return out
def move(grid, rot):
for i in range(rot):
grid = rotateRight(grid)
out = map(move_row, grid)
return out, out != grid
def eval_monotone_L(grid):
L = 0
for x in range4:
m = 0
for y in range(3):
A = grid[x][y] or 0
B = grid[x][y+1] or 0
if A and A >= B:
m += 1
L += m ** 2 * 4
else:
L -= abs(A- B) * 1.5
m = 0
return L
def eval_monotone_LR(grid):
return max(eval_monotone_L(grid), eval_monotone_L(rotateRight(rotateRight(grid))))
def eval_smoothness(grid):
return -sum( min([1e8]+[abs((grid[x][y] or 2) - (grid[x+a][y+b] or 2)) for a, b in((-1,0),(0,-1),(1,0),(0,1)) if 0 <= x+a <4 and 0<=y+b<4]) for x in range4 for y in range4)
def EVAL(grid):
return eval_monotone_LR(grid) + eval_monotone_LR(rotateRight(grid))+ eval_smoothness(grid) \
-(16-sum(r.count(None) for r in grid))**2
def encode(grid):
return tuple(grid[0]+grid[1]+grid[2]+grid[3])
def search_max(grid, depth, nodep):
return max([search_min(move(grid,m)[0], depth-1, nodep) for m in range4 if move(grid,m)[1]]+[-1e8])
table = {}
def worker(jq, rq):
while 1:
grid, depth, nodep = jq.get()
table.clear()
rq.put((
(encode(grid), depth)
,search_min(grid, depth, nodep)))
def search_min(grid, depth, nodep):
if depth == 0:
return EVAL(grid)
key = encode(grid), depth
if key in table:
return table[key]
scores = []
for i in range4:
row = grid[i]
for j in range4:
if not row[j]:
score = 0
for v, p in ((2, .9), (4, .1)):
row[j] = v
score += p * search_max(grid, depth, p*nodep)
row[j] = None
scores.append(score)
b = sum(scores) / len(scores)
table[key] = b
return b
def gen_job3(grid, depth, nodep, jq):
for m in range4:
g2, moved = move(grid, m)
key = encode(g2), depth - 1
if moved and key not in job_table:
job_table[key] = 1
jq.put((g2, depth - 1, nodep))
def gen_job2(grid, depth, nodep, jq):
for i in range4:
row = grid[i]
for j in range4:
if not row[j]:
for v, p in ((2, .9), (4, .1)):
row[j] = v
gen_job3(grid, depth, p*nodep, jq)
row[j] = None
class AI:
def __init__(self):
self.mg = Manager()
self.jq = self.mg.Queue()
self.rq = self.mg.Queue()
self.pp = []
for i in range(30):
p = Process(target=worker, args=(self.jq, self.rq))
self.pp.append(p)
p.start()
def __del__(self):
for i in range(30):
self.jq.put(0)
def getNextMove(self, grid):
table.clear()
job_table.clear()
for m in range4:
move(grid, m)[1] and gen_job2(move(grid,m)[0], 2, 1, self.jq)
for i in job_table:
key, value = self.rq.get()
table[key] = value
return ['up','left','down','right'][max((search_min(move(grid,m)[0],2,1),m) for m in range4 if move(grid,m)[1])[1]]
# vim:sw=4:expandtab:softtabstop=4
| 3,073
| -12
| 398
|
51b3b5a2926120226b4c6ef9a3aa3313337a91df
| 5,181
|
py
|
Python
|
nengolib/networks/echo_state.py
|
ikajic/nengolib
|
bd30ec38ba656bedb94a267b5f86b51d1cec4954
|
[
"MIT"
] | 27
|
2016-01-21T04:11:02.000Z
|
2021-11-16T20:41:04.000Z
|
nengolib/networks/echo_state.py
|
ikajic/nengolib
|
bd30ec38ba656bedb94a267b5f86b51d1cec4954
|
[
"MIT"
] | 178
|
2016-01-21T16:04:34.000Z
|
2021-05-01T16:28:02.000Z
|
nengolib/networks/echo_state.py
|
ikajic/nengolib
|
bd30ec38ba656bedb94a267b5f86b51d1cec4954
|
[
"MIT"
] | 4
|
2019-03-19T18:22:02.000Z
|
2021-03-23T16:06:57.000Z
|
import numpy as np
from scipy.linalg import eig
from nengo.params import IntParam, NumberParam
from nengo.neurons import NeuronTypeParam
from nengo.synapses import SynapseParam
import nengo
from nengolib import Network
from nengolib.neurons import Tanh
from nengolib.networks.reservoir import Reservoir
__all__ = ['EchoState']
class EchoState(Network, Reservoir):
"""An Echo State Network (ESN) within a Nengo Reservoir.
This creates a standard Echo State Network (ENS) as a Nengo network,
defaulting to the standard set of assumptions of non-spiking Tanh units
and a random recurrent weight matrix [1]_. This is based on the
minimalist Python implementation from [2]_.
The network takes some arbitrary time-varying vector as input, encodes it
randomly, and filters it using nonlinear units and a random recurrent
weight matrix normalized by its spectral radius.
This class also inherits ``nengolib.networks.Reservoir``, and thus the
optimal linear readout is solved for in the same way: the network is
simulated on a test signal, and then a solver is used to optimize the
decoding connection weights.
References:
[1] http://www.scholarpedia.org/article/Echo_state_network
[2] http://minds.jacobs-university.de/mantas/code
"""
n_neurons = IntParam('n_neurons', default=None, low=1)
dimensions = IntParam('dimensions', default=None, low=1)
dt = NumberParam('dt', low=0, low_open=True)
recurrent_synapse = SynapseParam('recurrent_synapse')
gain = NumberParam('gain', low=0, low_open=True)
neuron_type = NeuronTypeParam('neuron_type')
def __init__(self, n_neurons, dimensions, recurrent_synapse=0.005,
readout_synapse=None, radii=1.0, gain=1.25, rng=None,
neuron_type=Tanh(), include_bias=True, ens_seed=None,
label=None, seed=None, add_to_container=None, **ens_kwargs):
"""Initializes the Echo State Network.
Parameters
----------
n_neurons : int
The number of neurons to use in the reservoir.
dimensions : int
The dimensionality of the input signal.
recurrent_synapse : nengo.synapses.Synapse (Default: ``0.005``)
Synapse used to filter the recurrent connection.
readout_synapse : nengo.synapses.Synapse (Default: ``None``)
Optional synapse to filter all of the outputs before solving
for the linear readout. This is included in the connection to the
``output`` Node created within the network.
radii : scalar or array_like, optional (Default: ``1``)
The radius of each dimension of the input signal, used to normalize
the incoming connection weights.
gain : scalar, optional (Default: ``1.25``)
A scalar gain on the recurrent connection weight matrix.
rng : ``numpy.random.RandomState``, optional (Default: ``None``)
Random state used to initialize all weights.
neuron_type : ``nengo.neurons.NeuronType`` optional \
(Default: ``Tanh()``)
Neuron model to use within the reservoir.
include_bias : ``bool`` (Default: ``True``)
Whether to include a bias current to the neural nonlinearity.
This should be ``False`` if the neuron model already has a bias,
e.g., ``LIF`` or ``LIFRate``.
ens_seed : int, optional (Default: ``None``)
Seed passed to the ensemble of neurons.
"""
Network.__init__(self, label, seed, add_to_container)
self.n_neurons = n_neurons
self.dimensions = dimensions
self.recurrent_synapse = recurrent_synapse
self.radii = radii # TODO: make array or scalar parameter?
self.gain = gain
self.rng = np.random if rng is None else rng
self.neuron_type = neuron_type
self.include_bias = include_bias
self.W_in = (
self.rng.rand(self.n_neurons, self.dimensions) - 0.5) / self.radii
if self.include_bias:
self.W_bias = self.rng.rand(self.n_neurons, 1) - 0.5
else:
self.W_bias = np.zeros((self.n_neurons, 1))
self.W = self.rng.rand(self.n_neurons, self.n_neurons) - 0.5
self.W *= self.gain / max(abs(eig(self.W)[0]))
with self:
self.ensemble = nengo.Ensemble(
self.n_neurons, 1, neuron_type=self.neuron_type, seed=ens_seed,
**ens_kwargs)
self.input = nengo.Node(size_in=self.dimensions)
pool = self.ensemble.neurons
nengo.Connection(
self.input, pool, transform=self.W_in, synapse=None)
nengo.Connection( # note the bias will be active during training
nengo.Node(output=1, label="bias"), pool,
transform=self.W_bias, synapse=None)
nengo.Connection(
self.ensemble.neurons, pool, transform=self.W,
synapse=self.recurrent_synapse)
Reservoir.__init__(
self, self.input, pool, readout_synapse=readout_synapse,
network=self)
| 43.175
| 79
| 0.647365
|
import numpy as np
from scipy.linalg import eig
from nengo.params import IntParam, NumberParam
from nengo.neurons import NeuronTypeParam
from nengo.synapses import SynapseParam
import nengo
from nengolib import Network
from nengolib.neurons import Tanh
from nengolib.networks.reservoir import Reservoir
__all__ = ['EchoState']
class EchoState(Network, Reservoir):
"""An Echo State Network (ESN) within a Nengo Reservoir.
This creates a standard Echo State Network (ENS) as a Nengo network,
defaulting to the standard set of assumptions of non-spiking Tanh units
and a random recurrent weight matrix [1]_. This is based on the
minimalist Python implementation from [2]_.
The network takes some arbitrary time-varying vector as input, encodes it
randomly, and filters it using nonlinear units and a random recurrent
weight matrix normalized by its spectral radius.
This class also inherits ``nengolib.networks.Reservoir``, and thus the
optimal linear readout is solved for in the same way: the network is
simulated on a test signal, and then a solver is used to optimize the
decoding connection weights.
References:
[1] http://www.scholarpedia.org/article/Echo_state_network
[2] http://minds.jacobs-university.de/mantas/code
"""
n_neurons = IntParam('n_neurons', default=None, low=1)
dimensions = IntParam('dimensions', default=None, low=1)
dt = NumberParam('dt', low=0, low_open=True)
recurrent_synapse = SynapseParam('recurrent_synapse')
gain = NumberParam('gain', low=0, low_open=True)
neuron_type = NeuronTypeParam('neuron_type')
def __init__(self, n_neurons, dimensions, recurrent_synapse=0.005,
readout_synapse=None, radii=1.0, gain=1.25, rng=None,
neuron_type=Tanh(), include_bias=True, ens_seed=None,
label=None, seed=None, add_to_container=None, **ens_kwargs):
"""Initializes the Echo State Network.
Parameters
----------
n_neurons : int
The number of neurons to use in the reservoir.
dimensions : int
The dimensionality of the input signal.
recurrent_synapse : nengo.synapses.Synapse (Default: ``0.005``)
Synapse used to filter the recurrent connection.
readout_synapse : nengo.synapses.Synapse (Default: ``None``)
Optional synapse to filter all of the outputs before solving
for the linear readout. This is included in the connection to the
``output`` Node created within the network.
radii : scalar or array_like, optional (Default: ``1``)
The radius of each dimension of the input signal, used to normalize
the incoming connection weights.
gain : scalar, optional (Default: ``1.25``)
A scalar gain on the recurrent connection weight matrix.
rng : ``numpy.random.RandomState``, optional (Default: ``None``)
Random state used to initialize all weights.
neuron_type : ``nengo.neurons.NeuronType`` optional \
(Default: ``Tanh()``)
Neuron model to use within the reservoir.
include_bias : ``bool`` (Default: ``True``)
Whether to include a bias current to the neural nonlinearity.
This should be ``False`` if the neuron model already has a bias,
e.g., ``LIF`` or ``LIFRate``.
ens_seed : int, optional (Default: ``None``)
Seed passed to the ensemble of neurons.
"""
Network.__init__(self, label, seed, add_to_container)
self.n_neurons = n_neurons
self.dimensions = dimensions
self.recurrent_synapse = recurrent_synapse
self.radii = radii # TODO: make array or scalar parameter?
self.gain = gain
self.rng = np.random if rng is None else rng
self.neuron_type = neuron_type
self.include_bias = include_bias
self.W_in = (
self.rng.rand(self.n_neurons, self.dimensions) - 0.5) / self.radii
if self.include_bias:
self.W_bias = self.rng.rand(self.n_neurons, 1) - 0.5
else:
self.W_bias = np.zeros((self.n_neurons, 1))
self.W = self.rng.rand(self.n_neurons, self.n_neurons) - 0.5
self.W *= self.gain / max(abs(eig(self.W)[0]))
with self:
self.ensemble = nengo.Ensemble(
self.n_neurons, 1, neuron_type=self.neuron_type, seed=ens_seed,
**ens_kwargs)
self.input = nengo.Node(size_in=self.dimensions)
pool = self.ensemble.neurons
nengo.Connection(
self.input, pool, transform=self.W_in, synapse=None)
nengo.Connection( # note the bias will be active during training
nengo.Node(output=1, label="bias"), pool,
transform=self.W_bias, synapse=None)
nengo.Connection(
self.ensemble.neurons, pool, transform=self.W,
synapse=self.recurrent_synapse)
Reservoir.__init__(
self, self.input, pool, readout_synapse=readout_synapse,
network=self)
| 0
| 0
| 0
|
29f16302d911899d097a220dbe4d2244dad9298b
| 427
|
py
|
Python
|
tests/unit/test_k8s.py
|
lslebodn/conu
|
dee6fd958471f77d1c0511b031ea136dfaf8a77a
|
[
"MIT"
] | 95
|
2018-05-19T14:35:08.000Z
|
2022-01-08T23:31:40.000Z
|
tests/unit/test_k8s.py
|
lslebodn/conu
|
dee6fd958471f77d1c0511b031ea136dfaf8a77a
|
[
"MIT"
] | 179
|
2017-09-12T11:14:30.000Z
|
2018-04-26T05:36:13.000Z
|
tests/unit/test_k8s.py
|
lslebodn/conu
|
dee6fd958471f77d1c0511b031ea136dfaf8a77a
|
[
"MIT"
] | 16
|
2018-05-09T14:15:32.000Z
|
2021-08-02T21:11:33.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright Contributors to the Conu project.
# SPDX-License-Identifier: MIT
#
"""
Tests for Kubernetes backend
"""
from conu.backend.k8s.utils import k8s_ports_to_metadata_ports, metadata_ports_to_k8s_ports
| 21.35
| 91
| 0.754098
|
# -*- coding: utf-8 -*-
#
# Copyright Contributors to the Conu project.
# SPDX-License-Identifier: MIT
#
"""
Tests for Kubernetes backend
"""
from conu.backend.k8s.utils import k8s_ports_to_metadata_ports, metadata_ports_to_k8s_ports
def test_port_conversion():
test_ports = ["8080/tcp", "12345"]
k8s_ports = metadata_ports_to_k8s_ports(test_ports)
assert test_ports == k8s_ports_to_metadata_ports(k8s_ports)
| 167
| 0
| 23
|
d7129155bec3bfc5f1ad8ebe45c9344c21663af1
| 7,468
|
py
|
Python
|
research/rl/ppo.py
|
matwilso/boxLCD
|
7505e27f47e6694026303aa6cf12477959fc9fba
|
[
"MIT"
] | 2
|
2021-05-17T14:33:54.000Z
|
2021-09-09T07:14:03.000Z
|
research/rl/ppo.py
|
matwilso/boxLCD
|
7505e27f47e6694026303aa6cf12477959fc9fba
|
[
"MIT"
] | null | null | null |
research/rl/ppo.py
|
matwilso/boxLCD
|
7505e27f47e6694026303aa6cf12477959fc9fba
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import yaml
from datetime import datetime
from collections import defaultdict
from copy import deepcopy
import itertools
import numpy as np
import torch as th
from torch.optim import Adam
import time
import numpy as np
import scipy.signal
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from research.rl.buffers import OGRB, ReplayBuffer, PPOBuffer
from research.rl.pponets import ActorCritic
from research.define_config import env_fn
from boxLCD import env_map
import boxLCD
from research import utils
from research import wrappers
#from research.nets.flat_everything import FlatEverything
from jax.tree_util import tree_multimap, tree_map
from ._base import RLAlgo, TN
| 38.297436
| 157
| 0.638324
|
import matplotlib.pyplot as plt
import yaml
from datetime import datetime
from collections import defaultdict
from copy import deepcopy
import itertools
import numpy as np
import torch as th
from torch.optim import Adam
import time
import numpy as np
import scipy.signal
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from research.rl.buffers import OGRB, ReplayBuffer, PPOBuffer
from research.rl.pponets import ActorCritic
from research.define_config import env_fn
from boxLCD import env_map
import boxLCD
from research import utils
from research import wrappers
#from research.nets.flat_everything import FlatEverything
from jax.tree_util import tree_multimap, tree_map
from ._base import RLAlgo, TN
class PPO(RLAlgo):
def __init__(self, G):
super().__init__(G)
# Create actor-critic module and target networks
self.ac = ActorCritic(self.obs_space, self.act_space, self.goal_key, G=G).to(G.device)
# Experience buffer
self.buf = PPOBuffer(G, obs_space=self.obs_space, act_space=self.act_space, size=G.num_envs * G.steps_per_epoch)
# Count variables (protip: try to get a feel for how different size networks behave!)
var_counts = tuple(utils.count_vars(module) for module in [self.ac.pi, self.ac.v])
print('\nNumber of parameters: \t pi: %d, \t v: %d\n' % var_counts)
self.sum_count = sum(var_counts)
# Set up optimizers for policy and value function
self.pi_optimizer = Adam(self.ac.pi.parameters(), lr=G.pi_lr, betas=(0.9, 0.999), eps=1e-8)
self.vf_optimizer = Adam(self.ac.v.parameters(), lr=G.vf_lr, betas=(0.9, 0.999), eps=1e-8)
self.test_agent(-1)
if self.G.lenv:
self.test_agent(-1, use_lenv=True)
def get_av(self, o):
return self.ac.step(o)[:2]
def compute_loss_pi(self, data):
obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']
# Policy loss
pi, logp = self.ac.pi(obs, act)
ratio = th.exp(logp - logp_old)
clip_adv = th.clamp(ratio, 1 - self.G.clip_ratio, 1 + self.G.clip_ratio) * adv
loss_pi = -(th.min(ratio * adv, clip_adv)).mean()
# Useful extra info
approx_kl = (logp_old - logp).mean().cpu()
ent = pi.entropy().mean().cpu()
clipped = ratio.gt(1 + self.G.clip_ratio) | ratio.lt(1 - self.G.clip_ratio)
clipfrac = th.as_tensor(clipped, dtype=th.float32).mean().cpu()
pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)
return loss_pi, pi_info
def compute_loss_v(self, data):
obs, ret = data['obs'], data['ret']
return ((self.ac.v(obs) - ret)**2).mean()
def update(self, data):
pi_l_old, pi_info_old = self.compute_loss_pi(data)
pi_l_old = pi_l_old.cpu()
v_l_old = self.compute_loss_v(data).cpu()
# Train policy with multiple steps of gradient descent
for i in range(self.G.train_pi_iters):
idxs = np.random.randint(0, data['act'].shape[0], self.G.bs)
self.pi_optimizer.zero_grad()
loss_pi, pi_info = self.compute_loss_pi(tree_map(lambda x: x[idxs], data))
kl = pi_info['kl']
# if kl > 1.5 * self.G.target_kl:
# break
loss_pi.backward()
self.pi_optimizer.step()
self.logger['StopIter'] += [i]
# Value function learning
for i in range(self.G.train_v_iters):
idxs = np.random.randint(0, data['act'].shape[0], self.G.bs)
self.vf_optimizer.zero_grad()
loss_v = self.compute_loss_v(tree_map(lambda x: x[idxs], data))
loss_v.backward()
self.vf_optimizer.step()
# Log changes from updte
kl, ent, cf = pi_info['kl'], pi_info_old['ent'], pi_info['cf']
self.logger['LossPi'] += [pi_l_old.detach().cpu()]
self.logger['LossV'] += [v_l_old.detach().cpu()]
self.logger['KL'] += [kl.detach().cpu()]
self.logger['Entropy'] += [ent.detach().cpu()]
self.logger['ClipFrac'] += [cf.detach().cpu()]
self.logger['DeltaLossPi'] += [loss_pi.detach().cpu() - pi_l_old.detach().cpu()]
self.logger['DeltaLossV'] += [loss_v.detach().cpu() - v_l_old.detach().cpu()]
def run_firehose(self):
"""run w/o leaving GPU"""
pass
def run(self):
# Prepare for interaction with environment
epoch = -1
epoch_time = self.start_time = time.time()
if self.G.lenv:
o, ep_ret, ep_len = self.env.reset(np.arange(self.G.num_envs)), th.zeros(self.G.num_envs).to(self.G.device), np.zeros(self.G.num_envs) # .to(G.device)
success = th.zeros(self.G.num_envs).to(self.G.device)
time_to_succ = self.G.ep_len * th.ones(self.G.num_envs).to(self.G.device)
pf = th
else:
o, ep_ret, ep_len = self.env.reset(), np.zeros(self.G.num_envs), np.zeros(self.G.num_envs)
success = np.zeros(self.G.num_envs, dtype=np.bool)
time_to_succ = self.G.ep_len * np.ones(self.G.num_envs)
pf = np
# Main loop: collect experience in venv and updte/log each epoch
for itr in range(1, self.G.total_steps + 1):
with utils.Timer(self.logger, 'action'):
o = {key: val for key, val in o.items()}
a, v, logp = self.ac.step(o)
# Step the venv
with utils.Timer(self.logger, 'step'):
next_o, r, d, info = self.env.step(a) # , self.logger)
ep_ret += r
ep_len += 1
# store
trans = {'act': a, 'rew': r, 'val': v, 'logp': logp}
for key in o:
trans[f'o:{key}'] = o[key]
if self.G.lenv:
def fx(x):
if isinstance(x, np.ndarray):
return x
else:
return x.detach().cpu().numpy()
trans = tree_map(fx, trans)
self.buf.store_n(trans)
o = next_o
if self.G.lenv:
d = d.cpu().numpy()
def proc(x): return x.cpu().float()
else:
def proc(x): return x
timeout = ep_len == self.G.ep_len
terminal = np.logical_or(d, timeout)
epoch_ended = itr % self.G.steps_per_epoch == 0
terminal_epoch = np.logical_or(terminal, epoch_ended)
timeout_epoch = np.logical_or(timeout, epoch_ended)
mask = ~timeout_epoch
if self.G.learned_rew:
#self.logger['preproc_rew'] += [info['preproc_rew'].mean()]
self.logger['learned_rew'] += [info['learned_rew'].mean()]
self.logger['og_rew'] += [info['og_rew'].mean()]
self.logger['goal_delta'] += [info['goal_delta'].mean()]
self.logger['rew_delta'] += [info['rew_delta'].mean()]
# if trajectory didn't reach terminal state, bootstrap value target
_, v, _ = self.ac.step(o)
v[mask] *= 0
self.buf.finish_paths(np.nonzero(terminal_epoch)[0], v)
for idx in np.nonzero(terminal_epoch)[0]:
self.logger['EpRet'] += [proc(ep_ret[idx])]
self.logger['EpLen'] += [ep_len[idx]]
ep_ret[idx] = 0
ep_len[idx] = 0
if epoch_ended:
if (self.G.logdir / 'pause.marker').exists():
import ipdb; ipdb.set_trace()
epoch = itr // self.G.steps_per_epoch
self.update(self.buf.get())
with utils.Timer(self.logger, 'test_agent'):
self.test_agent(itr)
if self.G.lenv:
self.test_agent(itr, use_lenv=True)
# save it
self.ac.save(self.G.logdir)
self.logger['var_count'] = [self.sum_count]
self.logger['dt'] = dt = time.time() - epoch_time
self.logger['env_interactions'] = env_interactions = itr * self.G.num_envs
self.logger = utils.dump_logger(self.logger, self.writer, itr, self.G)
epoch_time = time.time()
| 6,460
| 212
| 23
|
f25c24d909d80eb63ed1e25e177320b08bbc6edc
| 1,542
|
py
|
Python
|
tests/test_economy.py
|
Erogue-Lord/ancap-bot
|
cb2424627e27225a8e2396eaa465236d4e7b24bb
|
[
"MIT"
] | 1
|
2020-08-17T17:09:05.000Z
|
2020-08-17T17:09:05.000Z
|
tests/test_economy.py
|
Erogue-Lord/ancap-bot
|
cb2424627e27225a8e2396eaa465236d4e7b24bb
|
[
"MIT"
] | null | null | null |
tests/test_economy.py
|
Erogue-Lord/ancap-bot
|
cb2424627e27225a8e2396eaa465236d4e7b24bb
|
[
"MIT"
] | null | null | null |
import asyncio
from datetime import datetime
from decimal import Decimal
from tortoise import Tortoise
import pytest
@pytest.fixture(autouse=True)
| 32.808511
| 66
| 0.702335
|
import asyncio
from datetime import datetime
from decimal import Decimal
from tortoise import Tortoise
import pytest
@pytest.fixture(autouse=True)
def event_loop(monkeypatch):
global ancap_bot
monkeypatch.delattr("dotenv.load_dotenv")
import ancap_bot.cogs.economy
loop = asyncio.get_event_loop()
loop.run_until_complete(ancap_bot.db.init())
yield loop
loop.run_until_complete(Tortoise.close_connections())
loop.close()
def test_salary(event_loop):
async def async_salary():
await ancap_bot.db.init()
economy_cog = ancap_bot.cogs.economy.Economy(None)
await ancap_bot.db.User.create(user_id=1)
await economy_cog.pay(datetime.now(), 1)
user = await ancap_bot.db.User.filter(user_id=1).first()
await user.save()
assert user.balance == 25
await Tortoise.close_connections()
event_loop.run_until_complete(async_salary())
def test_transference(event_loop):
async def async_transference():
await ancap_bot.db.User.create(user_id=1)
await ancap_bot.db.User.create(user_id=2)
user_1 = await ancap_bot.db.User.filter(user_id=1).first()
user_1.balance = 20
await user_1.save()
await ancap_bot.db.transaction(1, Decimal(10), 2)
user_1 = await ancap_bot.db.User.filter(user_id=1).first()
user_2 = await ancap_bot.db.User.filter(user_id=1).first()
assert user_1.balance == 10
assert user_2.balance == 10
event_loop.run_until_complete(async_transference())
| 1,322
| 0
| 68
|
b8bdbd0987c85f503a5658663124311935d26509
| 414
|
py
|
Python
|
ABC/abc051-abc100/abc076/b.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 2
|
2020-06-12T09:54:23.000Z
|
2021-05-04T01:34:07.000Z
|
ABC/abc051-abc100/abc076/b.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 961
|
2020-06-23T07:26:22.000Z
|
2022-03-31T21:34:52.000Z
|
ABC/abc051-abc100/abc076/b.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | null | null | null |
'''input
10
10
76
4
3
10
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem B
if __name__ == '__main__':
operation_count = int(input())
incremental_value = int(input())
candidates = list()
for i in range(operation_count + 1):
result = 2 ** i + (operation_count - i) * incremental_value
candidates.append(result)
print(min(candidates))
| 16.56
| 68
| 0.586957
|
'''input
10
10
76
4
3
10
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem B
if __name__ == '__main__':
operation_count = int(input())
incremental_value = int(input())
candidates = list()
for i in range(operation_count + 1):
result = 2 ** i + (operation_count - i) * incremental_value
candidates.append(result)
print(min(candidates))
| 0
| 0
| 0
|
318c4aa7d390df96ab796a0470c35e596cc5b013
| 807
|
py
|
Python
|
tests/test_parse_bytes_function.py
|
SethMMorton/natsor
|
45c042ee849710fb45df6c3a9f980cdc0d7524f4
|
[
"MIT"
] | null | null | null |
tests/test_parse_bytes_function.py
|
SethMMorton/natsor
|
45c042ee849710fb45df6c3a9f980cdc0d7524f4
|
[
"MIT"
] | null | null | null |
tests/test_parse_bytes_function.py
|
SethMMorton/natsor
|
45c042ee849710fb45df6c3a9f980cdc0d7524f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""These test the utils.py functions."""
import pytest
from hypothesis import given
from hypothesis.strategies import binary
from natsort.ns_enum import NSType, ns
from natsort.utils import BytesTransformer, parse_bytes_factory
@pytest.mark.parametrize(
"alg, example_func",
[
(ns.DEFAULT, lambda x: (x,)),
(ns.IGNORECASE, lambda x: (x.lower(),)),
# With PATH, it becomes a tested tuple.
(ns.PATH, lambda x: ((x,),)),
(ns.PATH | ns.IGNORECASE, lambda x: ((x.lower(),),)),
],
)
@given(x=binary())
| 29.888889
| 64
| 0.677819
|
# -*- coding: utf-8 -*-
"""These test the utils.py functions."""
import pytest
from hypothesis import given
from hypothesis.strategies import binary
from natsort.ns_enum import NSType, ns
from natsort.utils import BytesTransformer, parse_bytes_factory
@pytest.mark.parametrize(
"alg, example_func",
[
(ns.DEFAULT, lambda x: (x,)),
(ns.IGNORECASE, lambda x: (x.lower(),)),
# With PATH, it becomes a tested tuple.
(ns.PATH, lambda x: ((x,),)),
(ns.PATH | ns.IGNORECASE, lambda x: ((x.lower(),),)),
],
)
@given(x=binary())
def test_parse_bytest_factory_makes_function_that_returns_tuple(
x: bytes, alg: NSType, example_func: BytesTransformer
) -> None:
parse_bytes_func = parse_bytes_factory(alg)
assert parse_bytes_func(x) == example_func(x)
| 210
| 0
| 22
|
952747f2c41b0d1614d5d77fab4a7fa08c5f3565
| 285
|
py
|
Python
|
music/distance/aural/diatonic/seventh/minor.py
|
jedhsu/music
|
dea68c4a82296cd4910e786f533b2cbf861377c3
|
[
"MIT"
] | null | null | null |
music/distance/aural/diatonic/seventh/minor.py
|
jedhsu/music
|
dea68c4a82296cd4910e786f533b2cbf861377c3
|
[
"MIT"
] | null | null | null |
music/distance/aural/diatonic/seventh/minor.py
|
jedhsu/music
|
dea68c4a82296cd4910e786f533b2cbf861377c3
|
[
"MIT"
] | null | null | null |
"""
*minor 7th*
The minor 7th diatonic interval.
"""
from dataclasses import dataclass
from fivear.musical.scale import Diatonic
from ...simple import SimpleInterval
__all__ = ["MinorSeventh"]
@dataclass
| 11.875
| 41
| 0.708772
|
"""
*minor 7th*
The minor 7th diatonic interval.
"""
from dataclasses import dataclass
from fivear.musical.scale import Diatonic
from ...simple import SimpleInterval
__all__ = ["MinorSeventh"]
@dataclass
class MinorSeventh(
SimpleInterval,
Diatonic,
):
pass
| 0
| 44
| 22
|
4c2ac7fef64884be4cb9a8c8930be3a4e673d4f8
| 198
|
py
|
Python
|
applications/baseapp/management/template_structures/application/__init__.py
|
ajitjasrotia/django-project-skeleton
|
70e3e06384dfb018f59b1af8c7c3febf2bbcd47c
|
[
"MIT"
] | 48
|
2018-01-10T11:21:35.000Z
|
2021-09-08T23:28:07.000Z
|
applications/baseapp/management/template_structures/application/__init__.py
|
ajitjasrotia/django-project-skeleton
|
70e3e06384dfb018f59b1af8c7c3febf2bbcd47c
|
[
"MIT"
] | 26
|
2018-04-20T10:46:00.000Z
|
2019-09-21T06:47:13.000Z
|
applications/baseapp/management/template_structures/application/__init__.py
|
ajitjasrotia/django-project-skeleton
|
70e3e06384dfb018f59b1af8c7c3febf2bbcd47c
|
[
"MIT"
] | 20
|
2019-03-09T19:46:10.000Z
|
2022-03-27T14:57:03.000Z
|
# isort:skip_file
# flake8: noqa
from .html import TEMPLATE_HTML
from .apps import TEMPLATE_APPS
from .urls import TEMPLATE_URLS
from .views import TEMPLATE_VIEWS
from .tests import TEMPLATE_TESTS
| 22
| 33
| 0.823232
|
# isort:skip_file
# flake8: noqa
from .html import TEMPLATE_HTML
from .apps import TEMPLATE_APPS
from .urls import TEMPLATE_URLS
from .views import TEMPLATE_VIEWS
from .tests import TEMPLATE_TESTS
| 0
| 0
| 0
|
a50d3e313cf746148e5555ff3d2afcdb2bab96ec
| 227
|
py
|
Python
|
Python/6kyu/replace_with_alphabet_position/solution.py
|
petergouvoussis/codewars_challenges
|
8800b2fcb0283838a828857f70e3b46169b7b184
|
[
"MIT"
] | null | null | null |
Python/6kyu/replace_with_alphabet_position/solution.py
|
petergouvoussis/codewars_challenges
|
8800b2fcb0283838a828857f70e3b46169b7b184
|
[
"MIT"
] | null | null | null |
Python/6kyu/replace_with_alphabet_position/solution.py
|
petergouvoussis/codewars_challenges
|
8800b2fcb0283838a828857f70e3b46169b7b184
|
[
"MIT"
] | null | null | null |
import string
| 22.7
| 52
| 0.581498
|
import string
def alphabet_position(text):
abc = '0' + string.ascii_lowercase
output = []
for i in text:
if i.isalpha():
output.append(str(abc.index(i.lower())))
return ' '.join(output)
| 191
| 0
| 22
|
112cacdef9b3bf1094e329eebb6d2a5c6fff3abe
| 997
|
py
|
Python
|
app/server/models.py
|
A-A-Tyurin/test_smart_design
|
25073debe89801cc23d7acc466263076be691733
|
[
"MIT"
] | null | null | null |
app/server/models.py
|
A-A-Tyurin/test_smart_design
|
25073debe89801cc23d7acc466263076be691733
|
[
"MIT"
] | null | null | null |
app/server/models.py
|
A-A-Tyurin/test_smart_design
|
25073debe89801cc23d7acc466263076be691733
|
[
"MIT"
] | null | null | null |
from typing import Dict, Optional
from pydantic import BaseModel, validator
| 29.323529
| 73
| 0.602808
|
from typing import Dict, Optional
from pydantic import BaseModel, validator
class Product(BaseModel):
name: str
description: str
params: Optional[Dict[str, str]]
class Config:
min_anystr_length = 1
max_anystr_length = 250
error_msg_templates = {
'value_error.any_str.min_length': 'min_length:{limit_value}',
'value_error.any_str.max_length': 'max_length:{limit_value}',
}
@validator('name', 'description')
def is_space_value(cls, value):
if value.isspace():
raise ValueError('Value must not be a space')
return value
@validator('params')
def is_space_values(cls, value_dict):
if value_dict:
for key, value in value_dict.items():
if key.isspace():
raise ValueError('Key must not be a space')
if value.isspace():
raise ValueError('Value must not be a space')
return value_dict
| 432
| 464
| 23
|
807e583cbe68257d26ee0f4d40e74fb8a9d64b95
| 1,203
|
py
|
Python
|
unity.py
|
forever7410852/esvn
|
a8c5db2c46bf1dbaa30bc62f3cd7458f826e97b3
|
[
"Apache-2.0"
] | 1
|
2017-04-16T14:19:28.000Z
|
2017-04-16T14:19:28.000Z
|
unity.py
|
forever7410852/esvn
|
a8c5db2c46bf1dbaa30bc62f3cd7458f826e97b3
|
[
"Apache-2.0"
] | null | null | null |
unity.py
|
forever7410852/esvn
|
a8c5db2c46bf1dbaa30bc62f3cd7458f826e97b3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# run in python 3.5 and after
import fire
import subprocess
import os
import re
import signal
import time
import sys
class unity(object):
"""An enhanced unity cli."""
if __name__ == '__main__':
fire.Fire(unity)
| 30.075
| 124
| 0.588529
|
#!/usr/bin/env python3
# run in python 3.5 and after
import fire
import subprocess
import os
import re
import signal
import time
import sys
class unity(object):
"""An enhanced unity cli."""
def open(self):
result = subprocess.run(
["defaults", "read", "/Users/HSH/Library/Preferences/com.unity3d.UnityEditor5.x.plist"], stdout=subprocess.PIPE)
unity = "/Applications/Unity5.3.7/Unity.app/Contents/MacOS/Unity"
# print(result)
projects = {}
for line in result.stdout.decode().split('\n'):
if "RecentlyUsedProjectPaths" in line:
number = line.split('=')[0].strip(' "').split('-')[1]
projects[number] = line.split('=')[1].strip(' ";')
print("Select the project you want to open :")
for x, y in projects.items():
print(" %s : %s" % (x, y))
r = input('number:')
if r in projects.keys():
print(projects[r])
signal.signal(signal.SIGINT, handler)
subprocess.Popen([unity, "-projectPath", projects[r]])
def handler(signum, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
if __name__ == '__main__':
fire.Fire(unity)
| 908
| 0
| 50
|
0aa7901321d61c9a8020d316629398976cd486f9
| 11,827
|
py
|
Python
|
texar/modules/embedders/position_embedders.py
|
Holmeswww/Text_Infilling
|
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
|
[
"Apache-2.0"
] | 25
|
2019-01-03T09:15:20.000Z
|
2022-02-12T04:20:59.000Z
|
texar/modules/embedders/position_embedders.py
|
Holmeswww/Text_Infilling
|
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
|
[
"Apache-2.0"
] | 4
|
2019-03-28T11:02:20.000Z
|
2022-02-15T04:57:33.000Z
|
texar/modules/embedders/position_embedders.py
|
Holmeswww/Text_Infilling
|
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
|
[
"Apache-2.0"
] | 9
|
2019-01-03T02:20:37.000Z
|
2022-02-12T04:20:50.000Z
|
#
"""
Various position embedders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from texar.modules.embedders.embedder_base import EmbedderBase
from texar.modules.embedders import embedder_utils
from texar.utils.mode import is_train_mode
from texar.utils.shapes import mask_sequences
# pylint: disable=arguments-differ, invalid-name
__all__ = [
"PositionEmbedder",
"SinusoidsPositionEmbedder",
]
class PositionEmbedder(EmbedderBase):
"""Simple position embedder that maps position indexes into embeddings
via lookup.
Either :attr:`init_value` or :attr:`position_size` is required. If both are
given, :attr:`init_value.shape[0]` must equal :attr:`position_size`.
Args:
init_value (optional): A `Tensor` or numpy array that contains the
initial value of embeddings. It is typically of shape
`[position_size, embedding dim]`
If `None`, embedding is initialized as specified in
:attr:`hparams["initializer"]`. Otherwise, the
:attr:`"initializer"` and :attr:`"dim"`
hyperparameters in :attr:`hparams` are ignored.
position_size (int, optional): The number of possible positions, e.g.,
the maximum sequence length. Required if :attr:`init_value` is
not given.
hparams (dict, optional): Embedder hyperparameters. If it is not
specified, the default hyperparameter setting is used. See
:attr:`default_hparams` for the sturcture and default values.
"""
@staticmethod
def default_hparams():
"""Returns a dictionary of hyperparameters with default values.
Returns:
A dictionary with the following structure and values.
.. code-block:: python
{
"name": "position_embedder",
"dim": 100,
"initializer": {
"type": "random_uniform_initializer",
"kwargs": {
"minval": -0.1,
"maxval": 0.1,
"seed": None
}
},
"regularizer": {
"type": "L1L2",
"kwargs": {
"l1": 0.,
"l2": 0.
}
},
"dropout_rate": 0,
"trainable": True,
}
See :func:`~texar.modules.default_embedding_hparams` for more
details.
"""
hparams = embedder_utils.default_embedding_hparams()
hparams["name"] = "position_embedder"
return hparams
def _build(self, positions=None, sequence_length=None, mode=None, **kwargs):
"""Embeds with look-up.
Either :attr:`position` or :attr:`sequence_length` is required:
- If both are given, :attr:`sequence_length` is used to mask out \
embeddings of those time steps beyond the respective sequence \
lengths.
- If only :attr:`sequence_length` is given, then positions \
from 0 to sequence length - 1 are embedded.
Args:
positions (optional): An integer tensor containing the position
ids to embed.
sequence_length (optional): An integer tensor of shape
`[batch_size]`. Time steps beyond
the respective sequence lengths will have zero-valued
embeddings.
mode (optional): A tensor taking value in
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`, including
`TRAIN`, `EVAL`, and `PREDICT`. If `None`, dropout will be
controlled by :func:`texar.context.global_mode`.
kwargs: Additional keyword arguments for
:tf_main:`tf.nn.embedding_lookup <nn/embedding_lookup>` besides
:attr:`params` and :attr:`ids`.
Returns:
A `Tensor` of shape `shape(inputs) + embedding dimension`.
"""
# Gets embedder inputs
inputs = positions
if positions is None:
if sequence_length is None:
raise ValueError(
'Either `positions` or `sequence_length` is required.')
max_length = tf.reduce_max(sequence_length)
single_inputs = tf.range(start=0, limit=max_length, dtype=tf.int32)
# Expands `single_inputs` to have shape [batch_size, max_length]
expander = tf.expand_dims(tf.ones_like(sequence_length), -1)
inputs = expander * tf.expand_dims(single_inputs, 0)
ids_rank = len(inputs.shape.dims)
embedding = self._embedding
is_training = is_train_mode(mode)
# Gets dropout strategy
st = self._hparams.dropout_strategy
if positions is None and st == 'item':
# If `inputs` is based on `sequence_length`, then dropout
# strategies 'item' and 'item_type' have the same effect, we
# use 'item_type' to avoid unknown noise_shape in the 'item'
# strategy
st = 'item_type'
# Dropouts as 'item_type' before embedding
if st == 'item_type':
dropout_layer = self._get_dropout_layer(
self._hparams, dropout_strategy=st)
if dropout_layer:
embedding = dropout_layer.apply(inputs=embedding,
training=is_training)
# Embeds
outputs = tf.nn.embedding_lookup(embedding, inputs, **kwargs)
# Dropouts as 'item' or 'elements' after embedding
if st != 'item_type':
dropout_layer = self._get_dropout_layer(
self._hparams, ids_rank=ids_rank, dropout_input=outputs,
dropout_strategy=st)
if dropout_layer:
outputs = dropout_layer.apply(inputs=outputs,
training=is_training)
# Optionally masks
if sequence_length is not None:
outputs = mask_sequences(
outputs, sequence_length,
tensor_rank=len(inputs.shape.dims) + self._dim_rank)
return outputs
@property
def embedding(self):
"""The embedding tensor.
"""
return self._embedding
@property
def dim(self):
"""The embedding dimension.
"""
return self._dim
@property
def position_size(self):
"""The position size, i.e., maximum number of positions.
"""
return self._position_size
class SinusoidsPositionEmbedder(EmbedderBase):
"""Sinusoid position embedder that maps position indexes into embeddings
via sinusoid calculation.
Each channel of the input Tensor is incremented by a sinusoid of a
different frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query
and thememory inputs to attention.
The use of relative position is possible because sin(x+y) and
cos(x+y) can be experessed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
"""
def default_hparams(self):
"""returns a dictionary of hyperparameters with default values
We use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels/2.
"""
hparams = {
'name':'sinusoid_posisiton_embedder',
'min_timescale': 1.0,
'max_timescale': 1.0e4,
'trainable': False,
}
return hparams
| 39.033003
| 87
| 0.603196
|
#
"""
Various position embedders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
from texar.modules.embedders.embedder_base import EmbedderBase
from texar.modules.embedders import embedder_utils
from texar.utils.mode import is_train_mode
from texar.utils.shapes import mask_sequences
# pylint: disable=arguments-differ, invalid-name
__all__ = [
"PositionEmbedder",
"SinusoidsPositionEmbedder",
]
class PositionEmbedder(EmbedderBase):
"""Simple position embedder that maps position indexes into embeddings
via lookup.
Either :attr:`init_value` or :attr:`position_size` is required. If both are
given, :attr:`init_value.shape[0]` must equal :attr:`position_size`.
Args:
init_value (optional): A `Tensor` or numpy array that contains the
initial value of embeddings. It is typically of shape
`[position_size, embedding dim]`
If `None`, embedding is initialized as specified in
:attr:`hparams["initializer"]`. Otherwise, the
:attr:`"initializer"` and :attr:`"dim"`
hyperparameters in :attr:`hparams` are ignored.
position_size (int, optional): The number of possible positions, e.g.,
the maximum sequence length. Required if :attr:`init_value` is
not given.
hparams (dict, optional): Embedder hyperparameters. If it is not
specified, the default hyperparameter setting is used. See
:attr:`default_hparams` for the sturcture and default values.
"""
def __init__(self, init_value=None, position_size=None, hparams=None):
EmbedderBase.__init__(self, hparams=hparams)
if init_value is None and position_size is None:
raise ValueError(
"Either `init_value` or `position_size` is required.")
self._init_parameterized_embedding(init_value, position_size,
self._hparams)
self._position_size = position_size
if position_size is None:
self._position_size = self._num_embeds
if self._position_size != self._num_embeds:
raise ValueError(
'position_size must equal to init_value.shape[0].'
'Got %d and %d' % (self._position_size, self._num_embeds))
self._built = True
@staticmethod
def default_hparams():
"""Returns a dictionary of hyperparameters with default values.
Returns:
A dictionary with the following structure and values.
.. code-block:: python
{
"name": "position_embedder",
"dim": 100,
"initializer": {
"type": "random_uniform_initializer",
"kwargs": {
"minval": -0.1,
"maxval": 0.1,
"seed": None
}
},
"regularizer": {
"type": "L1L2",
"kwargs": {
"l1": 0.,
"l2": 0.
}
},
"dropout_rate": 0,
"trainable": True,
}
See :func:`~texar.modules.default_embedding_hparams` for more
details.
"""
hparams = embedder_utils.default_embedding_hparams()
hparams["name"] = "position_embedder"
return hparams
def _build(self, positions=None, sequence_length=None, mode=None, **kwargs):
"""Embeds with look-up.
Either :attr:`position` or :attr:`sequence_length` is required:
- If both are given, :attr:`sequence_length` is used to mask out \
embeddings of those time steps beyond the respective sequence \
lengths.
- If only :attr:`sequence_length` is given, then positions \
from 0 to sequence length - 1 are embedded.
Args:
positions (optional): An integer tensor containing the position
ids to embed.
sequence_length (optional): An integer tensor of shape
`[batch_size]`. Time steps beyond
the respective sequence lengths will have zero-valued
embeddings.
mode (optional): A tensor taking value in
:tf_main:`tf.estimator.ModeKeys <estimator/ModeKeys>`, including
`TRAIN`, `EVAL`, and `PREDICT`. If `None`, dropout will be
controlled by :func:`texar.context.global_mode`.
kwargs: Additional keyword arguments for
:tf_main:`tf.nn.embedding_lookup <nn/embedding_lookup>` besides
:attr:`params` and :attr:`ids`.
Returns:
A `Tensor` of shape `shape(inputs) + embedding dimension`.
"""
# Gets embedder inputs
inputs = positions
if positions is None:
if sequence_length is None:
raise ValueError(
'Either `positions` or `sequence_length` is required.')
max_length = tf.reduce_max(sequence_length)
single_inputs = tf.range(start=0, limit=max_length, dtype=tf.int32)
# Expands `single_inputs` to have shape [batch_size, max_length]
expander = tf.expand_dims(tf.ones_like(sequence_length), -1)
inputs = expander * tf.expand_dims(single_inputs, 0)
ids_rank = len(inputs.shape.dims)
embedding = self._embedding
is_training = is_train_mode(mode)
# Gets dropout strategy
st = self._hparams.dropout_strategy
if positions is None and st == 'item':
# If `inputs` is based on `sequence_length`, then dropout
# strategies 'item' and 'item_type' have the same effect, we
# use 'item_type' to avoid unknown noise_shape in the 'item'
# strategy
st = 'item_type'
# Dropouts as 'item_type' before embedding
if st == 'item_type':
dropout_layer = self._get_dropout_layer(
self._hparams, dropout_strategy=st)
if dropout_layer:
embedding = dropout_layer.apply(inputs=embedding,
training=is_training)
# Embeds
outputs = tf.nn.embedding_lookup(embedding, inputs, **kwargs)
# Dropouts as 'item' or 'elements' after embedding
if st != 'item_type':
dropout_layer = self._get_dropout_layer(
self._hparams, ids_rank=ids_rank, dropout_input=outputs,
dropout_strategy=st)
if dropout_layer:
outputs = dropout_layer.apply(inputs=outputs,
training=is_training)
# Optionally masks
if sequence_length is not None:
outputs = mask_sequences(
outputs, sequence_length,
tensor_rank=len(inputs.shape.dims) + self._dim_rank)
return outputs
@property
def embedding(self):
"""The embedding tensor.
"""
return self._embedding
@property
def dim(self):
"""The embedding dimension.
"""
return self._dim
@property
def position_size(self):
"""The position size, i.e., maximum number of positions.
"""
return self._position_size
class SinusoidsPositionEmbedder(EmbedderBase):
"""Sinusoid position embedder that maps position indexes into embeddings
via sinusoid calculation.
Each channel of the input Tensor is incremented by a sinusoid of a
different frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query
and thememory inputs to attention.
The use of relative position is possible because sin(x+y) and
cos(x+y) can be experessed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
"""
def __init__(self, hparams=None):
EmbedderBase.__init__(self, hparams=hparams)
def default_hparams(self):
"""returns a dictionary of hyperparameters with default values
We use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels/2.
"""
hparams = {
'name':'sinusoid_posisiton_embedder',
'min_timescale': 1.0,
'max_timescale': 1.0e4,
'trainable': False,
}
return hparams
def _build(self, length, channels):
position = tf.to_float(tf.range(length))
num_timescales = channels // 2
min_timescale = self._hparams.min_timescale
max_timescale = self._hparams.max_timescale
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) \
* tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return signal
class SinusoidsSegmentalPositionEmbedder(EmbedderBase):
def __init__(self, hparams=None):
EmbedderBase.__init__(self, hparams=hparams)
def default_hparams(self):
"""returns a dictionary of hyperparameters with default values
We use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels/2.
"""
hparams = {
'name': 'sinusoid_segmental_posisiton_embedder',
'min_timescale': 1.0,
'max_timescale': 1.0e4,
'trainable': False,
'base': 256,
}
return hparams
def _build(self, length, channels, segment_ids, offsets):
"""
:param length: an int
:param channels: an int
:param segment_id: [batch_size, length]
:param segment_offset: [batch_size, length]
:return: [batch_size, length, channels]
"""
# TODO(wanrong): check if segment_ids is of shape [batch_size, length]
position = tf.to_float(tf.add(tf.multiply(tf.cast(256, tf.int64), segment_ids),
offsets))
num_timescales = channels // 2
min_timescale = 1.0
max_timescale = 1.0e4
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 2) * inv_timescales
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
signal = tf.reshape(signal, shape=[-1, length, channels])
return signal
| 1,714
| 1,721
| 103
|
94f957f34ddd100883c7635dde7686fb84a58a99
| 4,691
|
py
|
Python
|
rules-conv.py
|
beorn7/promhacks
|
042fdd4e7d50589e7957220684d5f29864d95e03
|
[
"MIT"
] | 4
|
2018-12-13T14:16:44.000Z
|
2019-01-04T11:13:47.000Z
|
rules-conv.py
|
beorn7/promhacks
|
042fdd4e7d50589e7957220684d5f29864d95e03
|
[
"MIT"
] | null | null | null |
rules-conv.py
|
beorn7/promhacks
|
042fdd4e7d50589e7957220684d5f29864d95e03
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# Converts Prom1.x rule format into Prom2.x while keeping formatting
# and comments. This does not work in general. Some valid Prom1 rules
# files might not get converted properly.
import glob
import re
for rules_file in glob.iglob('*.rules'):
name = re.match(r'(.*)\.rules', rules_file)[1]
with open(name + '.yml', mode='w') as yaml:
print('groups:', file=yaml)
print('- name:', name, file=yaml)
print(' rules:', file=yaml)
with open(rules_file) as rules:
convert(rules, yaml)
| 36.084615
| 87
| 0.448305
|
#!/usr/bin/python3
# Converts Prom1.x rule format into Prom2.x while keeping formatting
# and comments. This does not work in general. Some valid Prom1 rules
# files might not get converted properly.
import glob
import re
def convert(rules, yaml):
indent = 2
in_alert = False
in_record = False
in_expr = False
in_labels = False
for line in rules:
if not line: break
# Keep empty lines. Assume that this always ends a rule.
if line.strip() == "":
print(file=yaml)
if not in_labels and not in_expr:
in_alert = False
in_record = False
indent = 2
continue
# Plain comments.
if line.strip().startswith('#'):
yaml.write(indent*' ' + line)
# Assume that an unindented comment ends a rule.
if line.startswith('#') and not in_labels and not in_expr:
in_alert = False
in_record = False
indent = 2
continue
# Continue / end alert.
if in_alert:
components = line.split()
if components[0] == 'IF':
# Assume that IF is always the start of the expr block.
in_expr = True
indent = 4
print(' expr: |2', file=yaml)
if len(components) > 1:
yaml.write((indent+2)*' ' + ' '.join(components[1:]) + '\n')
continue
if components[0] == 'FOR':
# Assume that FOR is always the FOR entry.
in_expr = False
in_labels = False
indent = 4
yaml.write(' for: '+ ' '.join(components[1:]) + '\n')
continue
if components[0] == 'LABELS':
# Assume that LABELS is always the start of the labels block.
in_expr = False
in_labels = True
indent = 6
print(' labels:', file=yaml)
continue
if components[0] == 'ANNOTATIONS':
# Assume that ANNOTATIONS is always the start of the annotations block.
in_expr = False
in_labels = True
indent = 6
print(' annotations:', file=yaml)
continue
if in_expr:
if line.startswith(' ') or line.startswith(')'):
yaml.write(indent*' ' + line)
continue
if line.startswith(' '):
yaml.write((indent-2)*' ' + line)
continue
in_expr = False
indent = 4
if in_labels:
if line.strip().startswith('}'):
in_labels = False
indent = 4
continue
m = re.match(r'\s*(\w+)\s*=\s*(.*?)\s*,?\s*(#[^"'']*)?$', line)
yaml.write(indent*' ' + m[1] + ': ' + m[2])
if m[3]:
yaml.write(' ' + m[3]) # Trailing comment.
yaml.write('\n')
continue
in_alert = False
in_expr = False
in_labels = False
indent = 2
# Continue / end record.
if in_record:
if line.startswith(' ') or line.startswith(')'):
# Assumes that continuations start with blank or ')'.
yaml.write(indent*' ' + line)
continue
in_record = False
indent = 2
# Alert start.
if line.startswith('ALERT '):
in_alert = True
indent = 4
yaml.write(' - alert: '+line[6:].strip()+'\n')
# Record start.
else:
in_record = True
indent = 6
m = re.match(r'([\w:]+)\s*(?:\{(.*)\})?\s*=\s*(.*)', line.strip())
yaml.write(' - record: '+m[1].strip()+'\n')
if m[2]:
yaml.write(' labels:\n')
for lp in m[2].split(','):
parts = lp.split('=')
yaml.write(' %s: %s\n' % (parts[0].strip(), parts[1].strip()))
yaml.write(' expr: |2\n')
if m[3]:
yaml.write(indent*' ' + m[3] + '\n')
for rules_file in glob.iglob('*.rules'):
name = re.match(r'(.*)\.rules', rules_file)[1]
with open(name + '.yml', mode='w') as yaml:
print('groups:', file=yaml)
print('- name:', name, file=yaml)
print(' rules:', file=yaml)
with open(rules_file) as rules:
convert(rules, yaml)
| 4,116
| 0
| 23
|
2d626d671dcd6abe298bfd3da9ff2103e85d3d2d
| 4,182
|
py
|
Python
|
pyt.py
|
ojhavijay/VIJAY-KUMAR-OJHA
|
d89224326cf89ac65da6b18aed71bbbb25c839d0
|
[
"Unlicense"
] | null | null | null |
pyt.py
|
ojhavijay/VIJAY-KUMAR-OJHA
|
d89224326cf89ac65da6b18aed71bbbb25c839d0
|
[
"Unlicense"
] | null | null | null |
pyt.py
|
ojhavijay/VIJAY-KUMAR-OJHA
|
d89224326cf89ac65da6b18aed71bbbb25c839d0
|
[
"Unlicense"
] | null | null | null |
from tkinter import*
from tkinter import ttk
#===================FUNCTION DECLARATION==============================================================================
if __name__ == '__main__':
root=Tk()
obj=ChatBot(root)
root.mainloop()
| 34
| 365
| 0.549976
|
from tkinter import*
from tkinter import ttk
class ChatBot:
def __init__(self,root):
self.root=root
self.root.title("UniBot")
self.root.geometry("730x620+0+0")
self.root.bind('<Return>',self.enter_func)
main_frame=Frame(self.root,bd=5,bg='orange',width=615)
main_frame.pack()
Title_label=Label(main_frame,bd=3,relief=RAISED,anchor='nw',width=730,text='UNIBOT',font=('arial',30,'bold'),fg='green',bg='white')
Title_label.pack()
self.scroll_y=ttk.Scrollbar(main_frame,orient=VERTICAL)
self.text=Text(main_frame,width=65,height=20,bd=5,relief=RAISED,font=('arial',14),yscrollcommand=self.scroll_y.set)
self.scroll_y.pack(side=RIGHT,fill=Y)
self.text.pack()
btn_frame=Frame(self.root,bd=5,bg='white',width=730)
btn_frame.pack()
label_1=Label(btn_frame,text="type something",font=('arial',14,'bold'),fg='green',bg='white')
label_1.grid(row=0,column=0,padx=5,sticky=W)
self.entry=StringVar()
self.entry1=ttk.Entry(btn_frame,textvariable=self.entry,width=40,font=('arial',15,'bold'))
self.entry1.grid(row=0,column=1,padx=5,sticky=W)
self.send=Button(btn_frame,text="Send",font=('arial',15,'bold'),width=8,bg='blue',command=self.send)
self.send.grid(row=0,column=2,padx=5,sticky=W)
self.clear=Button(btn_frame,text="Clear",command=self.clear,font=('arial',15,'bold'),width=8,bg='red',fg='white')
self.clear.grid(row=1,column=0,padx=5,sticky=W)
self.msg=''
self.label_11=Label(btn_frame,text=self.msg,font=('arial',14,'bold'),fg='green',bg='white')
self.label_11.grid(row=1,column=1,padx=5,sticky=W)
#===================FUNCTION DECLARATION==============================================================================
def enter_func(self,event):
self.send.invoke()
self.entry.set('')
def clear(self):
self.text.delete('1.0',END)
self.entry.set('')
def send(self):
send='\t\t\t'+'You: '+self.entry.get()
self.text.insert(END,'\n'+send)
if (self.entry.get()==''):
self.msg='Please enter something'
self.label_11.config(text=self.msg,fg='red')
else:
self.msg=''
self.label_11.config(text=self.msg,fg='red')
if(self.entry.get()=='hello'):
self.text.insert(END,'\n\n'+'Bot: Hi, i am UniBot.')
elif (self.entry.get()=="hi"):
self.text.insert(END,'\n\n'+'Bot: Hi')
elif (self.entry.get()=="where is LPU?"):
self.text.insert(END,'\n\n'+'Bot: Phagwara.')
elif (self.entry.get()=="how is lpu?"):
self.text.insert(END,'\n\n'+'Bot: Good.')
elif (self.entry.get()=="how many student in cse?"):
self.text.insert(END,'\n\n'+'Bot: near about 1000.')
elif (self.entry.get()=="who created you?"):
self.text.insert(END,'\n\n'+'Bot:Vijay and Akshay.')
elif (self.entry.get()=="what you do?"):
self.text.insert(END,'\n\n'+'Bot:I am Unibot, i am here to give you information realted the University.')
elif (self.entry.get()=="tell me about LPU."):
self.text.insert(END,'\n\n'+'Bot: LPU ranks among Top 100 Institutions in India: Govt. of India NIRF Ranking 2020 offering diploma, undergraduate, postgraduate and doctorate (Ph.D) courses in Management (BBA/MBA), Engineering (B.Tech/M.Tech), Pharma, Science, Agriculture, Fashion, Law, Journalism, Hotel Management and Computer Application (BCA/MCA)')
elif (self.entry.get()=="bye"):
self.text.insert(END,'\n\n'+'Thank You for chatting!')
else:
self.text.insert(END,'\n\n'+'Sorry, try something else')
if __name__ == '__main__':
root=Tk()
obj=ChatBot(root)
root.mainloop()
| 3,673
| -7
| 155
|
ec052e40881fff0e487807290780c7199d0dba38
| 33,017
|
py
|
Python
|
tests/test_swiftclient.py
|
citrix-openstack-build/python-swiftclient
|
68dde8dd514e4eef89aafa6c1c93e065045c3cbd
|
[
"Apache-2.0"
] | null | null | null |
tests/test_swiftclient.py
|
citrix-openstack-build/python-swiftclient
|
68dde8dd514e4eef89aafa6c1c93e065045c3cbd
|
[
"Apache-2.0"
] | null | null | null |
tests/test_swiftclient.py
|
citrix-openstack-build/python-swiftclient
|
68dde8dd514e4eef89aafa6c1c93e065045c3cbd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: More tests
import mock
import httplib
import socket
import StringIO
import testtools
import warnings
from urlparse import urlparse
# TODO: mock http connection class with more control over headers
from .utils import fake_http_connect, fake_get_keystoneclient_2_0
from swiftclient import client as c
from swiftclient import utils as u
# TODO: following tests are placeholders, need more tests, better coverage
if __name__ == '__main__':
testtools.main()
| 37.181306
| 79
| 0.572856
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: More tests
import mock
import httplib
import socket
import StringIO
import testtools
import warnings
from urlparse import urlparse
# TODO: mock http connection class with more control over headers
from .utils import fake_http_connect, fake_get_keystoneclient_2_0
from swiftclient import client as c
from swiftclient import utils as u
class TestClientException(testtools.TestCase):
def test_is_exception(self):
self.assertTrue(issubclass(c.ClientException, Exception))
def test_format(self):
exc = c.ClientException('something failed')
self.assertTrue('something failed' in str(exc))
test_kwargs = (
'scheme',
'host',
'port',
'path',
'query',
'status',
'reason',
'device',
)
for value in test_kwargs:
kwargs = {
'http_%s' % value: value,
}
exc = c.ClientException('test', **kwargs)
self.assertTrue(value in str(exc))
class TestJsonImport(testtools.TestCase):
def tearDown(self):
try:
import json
except ImportError:
pass
else:
reload(json)
try:
import simplejson
except ImportError:
pass
else:
reload(simplejson)
super(TestJsonImport, self).tearDown()
def test_any(self):
self.assertTrue(hasattr(c, 'json_loads'))
def test_no_simplejson(self):
# break simplejson
try:
import simplejson
except ImportError:
# not installed, so we don't have to break it for these tests
pass
else:
delattr(simplejson, 'loads')
reload(c)
try:
from json import loads
except ImportError:
# this case is stested in _no_json
pass
else:
self.assertEquals(loads, c.json_loads)
class TestConfigTrueValue(testtools.TestCase):
def test_TRUE_VALUES(self):
for v in u.TRUE_VALUES:
self.assertEquals(v, v.lower())
def test_config_true_value(self):
orig_trues = u.TRUE_VALUES
try:
u.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(u.config_true_value(val) is True)
self.assertTrue(u.config_true_value(True) is True)
self.assertTrue(u.config_true_value('foo') is False)
self.assertTrue(u.config_true_value(False) is False)
finally:
u.TRUE_VALUES = orig_trues
class MockHttpTest(testtools.TestCase):
def setUp(self):
super(MockHttpTest, self).setUp()
def fake_http_connection(*args, **kwargs):
_orig_http_connection = c.http_connection
return_read = kwargs.get('return_read')
query_string = kwargs.get('query_string')
storage_url = kwargs.get('storage_url')
def wrapper(url, proxy=None, ssl_compression=True):
if storage_url:
self.assertEqual(storage_url, url)
parsed, _conn = _orig_http_connection(url, proxy=proxy)
conn = fake_http_connect(*args, **kwargs)()
def request(method, url, *args, **kwargs):
if query_string:
self.assert_(url.endswith('?' + query_string))
return
conn.request = request
conn.has_been_read = False
_orig_read = conn.read
def read(*args, **kwargs):
conn.has_been_read = True
return _orig_read(*args, **kwargs)
conn.read = return_read or read
return parsed, conn
return wrapper
self.fake_http_connection = fake_http_connection
def tearDown(self):
super(MockHttpTest, self).tearDown()
reload(c)
class MockHttpResponse():
def __init__(self):
self.status = 200
self.buffer = []
def read(self):
return ""
def getheader(self, name, default):
return ""
def fake_response(self):
return MockHttpResponse()
def fake_send(self, msg):
self.buffer.append(msg)
class TestHttpHelpers(MockHttpTest):
def test_quote(self):
value = 'standard string'
self.assertEquals('standard%20string', c.quote(value))
value = u'\u0075nicode string'
self.assertEquals('unicode%20string', c.quote(value))
def test_http_connection(self):
url = 'http://www.test.com'
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, c.HTTPConnection))
url = 'https://www.test.com'
_junk, conn = c.http_connection(url)
self.assertTrue(isinstance(conn, httplib.HTTPSConnection) or
isinstance(conn, c.HTTPSConnectionNoSSLComp))
url = 'ftp://www.test.com'
self.assertRaises(c.ClientException, c.http_connection, url)
def test_validate_headers(self):
headers = {'key': 'value'}
self.assertEquals(c.validate_headers(headers), None)
headers = {'key': 'value1\nvalue2'}
self.assertRaises(c.InvalidHeadersException, c.validate_headers,
headers)
headers = {'key': 'value1\rvalue2'}
self.assertRaises(c.InvalidHeadersException, c.validate_headers,
headers)
# TODO: following tests are placeholders, need more tests, better coverage
class TestGetAuth(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(url, None)
self.assertEquals(token, None)
def test_invalid_auth(self):
c.http_connection = self.fake_http_connection(200)
self.assertRaises(c.ClientException, c.get_auth,
'http://www.tests.com', 'asdf', 'asdf',
auth_version="foo")
def test_auth_v1(self):
c.http_connection = self.fake_http_connection(200)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf',
auth_version="1.0")
self.assertEquals(url, None)
self.assertEquals(token, None)
def test_auth_v2(self):
os_options = {'tenant_name': 'asdf'}
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(os_options)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf',
os_options=os_options,
auth_version="2.0")
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
def test_auth_v2_no_tenant_name(self):
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0({})
self.assertRaises(c.ClientException, c.get_auth,
'http://www.tests.com', 'asdf', 'asdf',
os_options={},
auth_version='2.0')
def test_auth_v2_with_tenant_user_in_user(self):
tenant_option = {'tenant_name': 'foo'}
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(tenant_option)
url, token = c.get_auth('http://www.test.com', 'foo:bar', 'asdf',
os_options={},
auth_version="2.0")
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
def test_auth_v2_tenant_name_no_os_options(self):
tenant_option = {'tenant_name': 'asdf'}
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(tenant_option)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf',
tenant_name='asdf',
os_options={},
auth_version="2.0")
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
def test_auth_v2_with_os_options(self):
os_options = {'service_type': 'object-store',
'endpoint_type': 'internalURL',
'tenant_name': 'asdf'}
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(os_options)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf',
os_options=os_options,
auth_version="2.0")
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
def test_auth_v2_with_tenant_user_in_user_no_os_options(self):
tenant_option = {'tenant_name': 'foo'}
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(tenant_option)
url, token = c.get_auth('http://www.test.com', 'foo:bar', 'asdf',
auth_version="2.0")
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
def test_auth_v2_with_os_region_name(self):
os_options = {'region_name': 'good-region',
'tenant_name': 'asdf'}
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(os_options)
url, token = c.get_auth('http://www.test.com', 'asdf', 'asdf',
os_options=os_options,
auth_version="2.0")
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
def test_auth_v2_no_endpoint(self):
os_options = {'region_name': 'unknown_region',
'tenant_name': 'asdf'}
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(
os_options, c.ClientException)
self.assertRaises(c.ClientException, c.get_auth,
'http://www.tests.com', 'asdf', 'asdf',
os_options=os_options, auth_version='2.0')
def test_auth_v2_ks_exception(self):
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(
{}, c.ClientException)
self.assertRaises(c.ClientException, c.get_auth,
'http://www.tests.com', 'asdf', 'asdf',
os_options={},
auth_version='2.0')
def test_auth_v2_cacert(self):
os_options = {'tenant_name': 'foo'}
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(
os_options, None)
auth_url_secure = 'https://www.tests.com'
auth_url_insecure = 'https://www.tests.com/self-signed-certificate'
url, token = c.get_auth(auth_url_secure, 'asdf', 'asdf',
os_options=os_options, auth_version='2.0',
insecure=False)
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
url, token = c.get_auth(auth_url_insecure, 'asdf', 'asdf',
os_options=os_options, auth_version='2.0',
cacert='ca.pem', insecure=False)
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
self.assertRaises(c.ClientException, c.get_auth,
auth_url_insecure, 'asdf', 'asdf',
os_options=os_options, auth_version='2.0')
self.assertRaises(c.ClientException, c.get_auth,
auth_url_insecure, 'asdf', 'asdf',
os_options=os_options, auth_version='2.0',
insecure=False)
def test_auth_v2_insecure(self):
os_options = {'tenant_name': 'foo'}
c.get_keystoneclient_2_0 = fake_get_keystoneclient_2_0(
os_options, None)
auth_url_secure = 'https://www.tests.com'
auth_url_insecure = 'https://www.tests.com/invalid-certificate'
url, token = c.get_auth(auth_url_secure, 'asdf', 'asdf',
os_options=os_options, auth_version='2.0')
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
url, token = c.get_auth(auth_url_insecure, 'asdf', 'asdf',
os_options=os_options, auth_version='2.0',
insecure=True)
self.assertTrue(url.startswith("http"))
self.assertTrue(token)
self.assertRaises(c.ClientException, c.get_auth,
auth_url_insecure, 'asdf', 'asdf',
os_options=os_options, auth_version='2.0')
self.assertRaises(c.ClientException, c.get_auth,
auth_url_insecure, 'asdf', 'asdf',
os_options=os_options, auth_version='2.0',
insecure=False)
class TestGetAccount(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_account('http://www.test.com', 'asdf')[1]
self.assertEquals(value, [])
def test_param_marker(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&marker=marker")
c.get_account('http://www.test.com', 'asdf', marker='marker')
def test_param_limit(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&limit=10")
c.get_account('http://www.test.com', 'asdf', limit=10)
def test_param_prefix(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&prefix=asdf/")
c.get_account('http://www.test.com', 'asdf', prefix='asdf/')
def test_param_end_marker(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&end_marker=end_marker")
c.get_account('http://www.test.com', 'asdf', end_marker='end_marker')
class TestHeadAccount(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.head_account('http://www.tests.com', 'asdf')
# TODO: Hmm. This doesn't really test too much as it uses a fake that
# always returns the same dict. I guess it "exercises" the code, so
# I'll leave it for now.
self.assertEquals(type(value), dict)
def test_server_error(self):
body = 'c' * 65
c.http_connection = self.fake_http_connection(500, body=body)
self.assertRaises(c.ClientException, c.head_account,
'http://www.tests.com', 'asdf')
try:
c.head_account('http://www.tests.com', 'asdf')
except c.ClientException as e:
new_body = "[first 60 chars of response] " + body[0:60]
self.assertEquals(e.__str__()[-89:], new_body)
class TestGetContainer(MockHttpTest):
def test_no_content(self):
c.http_connection = self.fake_http_connection(204)
value = c.get_container('http://www.test.com', 'asdf', 'asdf')[1]
self.assertEquals(value, [])
def test_param_marker(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&marker=marker")
c.get_container('http://www.test.com', 'asdf', 'asdf', marker='marker')
def test_param_limit(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&limit=10")
c.get_container('http://www.test.com', 'asdf', 'asdf', limit=10)
def test_param_prefix(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&prefix=asdf/")
c.get_container('http://www.test.com', 'asdf', 'asdf', prefix='asdf/')
def test_param_delimiter(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&delimiter=/")
c.get_container('http://www.test.com', 'asdf', 'asdf', delimiter='/')
def test_param_end_marker(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&end_marker=end_marker")
c.get_container('http://www.test.com', 'asdf', 'asdf',
end_marker='end_marker')
def test_param_path(self):
c.http_connection = self.fake_http_connection(
204,
query_string="format=json&path=asdf")
c.get_container('http://www.test.com', 'asdf', 'asdf',
path='asdf')
class TestHeadContainer(MockHttpTest):
def test_server_error(self):
body = 'c' * 60
c.http_connection = self.fake_http_connection(500, body=body)
self.assertRaises(c.ClientException, c.head_container,
'http://www.test.com', 'asdf', 'asdf',
)
try:
c.head_container('http://www.test.com', 'asdf', 'asdf')
except c.ClientException as e:
self.assertEquals(e.http_response_content, body)
class TestPutContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.put_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
def test_server_error(self):
body = 'c' * 60
c.http_connection = self.fake_http_connection(500, body=body)
self.assertRaises(c.ClientException, c.put_container,
'http://www.test.com', 'asdf', 'asdf',
)
try:
c.put_container('http://www.test.com', 'asdf', 'asdf')
except c.ClientException as e:
self.assertEquals(e.http_response_content, body)
class TestDeleteContainer(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
value = c.delete_container('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(value, None)
class TestGetObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.get_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
def test_query_string(self):
c.http_connection = self.fake_http_connection(200,
query_string="hello=20")
c.get_object('http://www.test.com', 'asdf', 'asdf', 'asdf',
query_string="hello=20")
def test_request_headers(self):
request_args = {}
def fake_request(method, url, body=None, headers=None):
request_args['method'] = method
request_args['url'] = url
request_args['body'] = body
request_args['headers'] = headers
return
conn = self.fake_http_connection(200)('http://www.test.com/')
conn[1].request = fake_request
headers = {'Range': 'bytes=1-2'}
c.get_object('url_is_irrelevant', 'TOKEN', 'container', 'object',
http_conn=conn, headers=headers)
self.assertFalse(request_args['headers'] is None,
"No headers in the request")
self.assertTrue('Range' in request_args['headers'],
"No Range header in the request")
self.assertEquals(request_args['headers']['Range'], 'bytes=1-2')
class TestHeadObject(MockHttpTest):
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.head_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
class TestPutObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
value = c.put_object(*args)
self.assertTrue(isinstance(value, basestring))
def test_unicode_ok(self):
conn = c.http_connection(u'http://www.test.com/')
mock_file = StringIO.StringIO(u'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91')
args = (u'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91',
'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91',
u'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91',
u'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91',
mock_file)
headers = {'X-Header1': u'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91',
'X-2': 1, 'X-3': {'a': 'b'}, 'a-b': '.x:yz mn:fg:lp'}
resp = MockHttpResponse()
conn[1].getresponse = resp.fake_response
conn[1].send = resp.fake_send
value = c.put_object(*args, headers=headers, http_conn=conn)
self.assertTrue(isinstance(value, basestring))
# Test for RFC-2616 encoded symbols
self.assertTrue("a-b: .x:yz mn:fg:lp" in resp.buffer[0],
"[a-b: .x:yz mn:fg:lp] header is missing")
def test_chunk_warning(self):
conn = c.http_connection('http://www.test.com/')
mock_file = StringIO.StringIO('asdf')
args = ('asdf', 'asdf', 'asdf', 'asdf', mock_file)
resp = MockHttpResponse()
conn[1].getresponse = resp.fake_response
conn[1].send = resp.fake_send
with warnings.catch_warnings(record=True) as w:
c.put_object(*args, chunk_size=20, headers={}, http_conn=conn)
self.assertEquals(len(w), 0)
body = 'c' * 60
c.http_connection = self.fake_http_connection(200, body=body)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
with warnings.catch_warnings(record=True) as w:
c.put_object(*args, chunk_size=20)
self.assertEquals(len(w), 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
def test_server_error(self):
body = 'c' * 60
c.http_connection = self.fake_http_connection(500, body=body)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf')
self.assertRaises(c.ClientException, c.put_object, *args)
try:
c.put_object(*args)
except c.ClientException as e:
self.assertEquals(e.http_response_content, body)
def test_query_string(self):
c.http_connection = self.fake_http_connection(200,
query_string="hello=20")
c.put_object('http://www.test.com', 'asdf', 'asdf', 'asdf',
query_string="hello=20")
class TestPostObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', {})
c.post_object(*args)
def test_unicode_ok(self):
conn = c.http_connection(u'http://www.test.com/')
args = (u'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91',
'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91',
u'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91',
u'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91')
headers = {'X-Header1': u'\u5929\u7a7a\u4e2d\u7684\u4e4c\u4e91',
'X-2': '1', 'X-3': {'a': 'b'}, 'a-b': '.x:yz mn:kl:qr'}
resp = MockHttpResponse()
conn[1].getresponse = resp.fake_response
conn[1].send = resp.fake_send
c.post_object(*args, headers=headers, http_conn=conn)
# Test for RFC-2616 encoded symbols
self.assertTrue("a-b: .x:yz mn:kl:qr" in resp.buffer[0],
"[a-b: .x:yz mn:kl:qr] header is missing")
def test_server_error(self):
body = 'c' * 60
c.http_connection = self.fake_http_connection(500, body=body)
args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', {})
self.assertRaises(c.ClientException, c.post_object, *args)
try:
c.post_object(*args)
except c.ClientException as e:
self.assertEquals(e.http_response_content, body)
class TestDeleteObject(MockHttpTest):
def test_ok(self):
c.http_connection = self.fake_http_connection(200)
c.delete_object('http://www.test.com', 'asdf', 'asdf', 'asdf')
def test_server_error(self):
c.http_connection = self.fake_http_connection(500)
self.assertRaises(c.ClientException, c.delete_object,
'http://www.test.com', 'asdf', 'asdf', 'asdf')
def test_query_string(self):
c.http_connection = self.fake_http_connection(200,
query_string="hello=20")
c.delete_object('http://www.test.com', 'asdf', 'asdf', 'asdf',
query_string="hello=20")
class TestConnection(MockHttpTest):
def test_instance(self):
conn = c.Connection('http://www.test.com', 'asdf', 'asdf')
self.assertEquals(conn.retries, 5)
def test_instance_kwargs(self):
args = {'user': 'ausername',
'key': 'secretpass',
'authurl': 'http://www.test.com',
'tenant_name': 'atenant'}
conn = c.Connection(**args)
self.assertEquals(type(conn), c.Connection)
def test_instance_kwargs_token(self):
args = {'preauthtoken': 'atoken123',
'preauthurl': 'http://www.test.com:8080/v1/AUTH_123456'}
conn = c.Connection(**args)
self.assertEquals(type(conn), c.Connection)
def test_storage_url_override(self):
static_url = 'http://overridden.storage.url'
c.http_connection = self.fake_http_connection(
200, body='[]', storage_url=static_url)
conn = c.Connection('http://auth.url/', 'some_user', 'some_key',
os_options={
'object_storage_url': static_url})
method_signatures = (
(conn.head_account, []),
(conn.get_account, []),
(conn.head_container, ('asdf',)),
(conn.get_container, ('asdf',)),
(conn.put_container, ('asdf',)),
(conn.delete_container, ('asdf',)),
(conn.head_object, ('asdf', 'asdf')),
(conn.get_object, ('asdf', 'asdf')),
(conn.put_object, ('asdf', 'asdf', 'asdf')),
(conn.post_object, ('asdf', 'asdf', {})),
(conn.delete_object, ('asdf', 'asdf')),
)
with mock.patch('swiftclient.client.get_auth_1_0') as mock_get_auth:
mock_get_auth.return_value = ('http://auth.storage.url', 'tToken')
for method, args in method_signatures:
method(*args)
def test_retry(self):
c.http_connection = self.fake_http_connection(500)
def quick_sleep(*args):
pass
c.sleep = quick_sleep
conn = c.Connection('http://www.test.com', 'asdf', 'asdf')
self.assertRaises(c.ClientException, conn.head_account)
self.assertEquals(conn.attempts, conn.retries + 1)
def test_resp_read_on_server_error(self):
c.http_connection = self.fake_http_connection(500)
conn = c.Connection('http://www.test.com', 'asdf', 'asdf', retries=0)
def get_auth(*args, **kwargs):
return 'http://www.new.com', 'new'
conn.get_auth = get_auth
self.url, self.token = conn.get_auth()
method_signatures = (
(conn.head_account, []),
(conn.get_account, []),
(conn.head_container, ('asdf',)),
(conn.get_container, ('asdf',)),
(conn.put_container, ('asdf',)),
(conn.delete_container, ('asdf',)),
(conn.head_object, ('asdf', 'asdf')),
(conn.get_object, ('asdf', 'asdf')),
(conn.put_object, ('asdf', 'asdf', 'asdf')),
(conn.post_object, ('asdf', 'asdf', {})),
(conn.delete_object, ('asdf', 'asdf')),
)
for method, args in method_signatures:
self.assertRaises(c.ClientException, method, *args)
try:
self.assertTrue(conn.http_conn[1].has_been_read)
except AssertionError:
msg = '%s did not read resp on server error' % method.__name__
self.fail(msg)
except Exception as e:
raise e.__class__("%s - %s" % (method.__name__, e))
def test_reauth(self):
c.http_connection = self.fake_http_connection(401)
def get_auth(*args, **kwargs):
return 'http://www.new.com', 'new'
def swap_sleep(*args):
self.swap_sleep_called = True
c.get_auth = get_auth
c.http_connection = self.fake_http_connection(200)
c.sleep = swap_sleep
self.swap_sleep_called = False
conn = c.Connection('http://www.test.com', 'asdf', 'asdf',
preauthurl='http://www.old.com',
preauthtoken='old',
)
self.assertEquals(conn.attempts, 0)
self.assertEquals(conn.url, 'http://www.old.com')
self.assertEquals(conn.token, 'old')
conn.head_account()
self.assertTrue(self.swap_sleep_called)
self.assertEquals(conn.attempts, 2)
self.assertEquals(conn.url, 'http://www.new.com')
self.assertEquals(conn.token, 'new')
def test_reset_stream(self):
class LocalContents(object):
def __init__(self, tell_value=0):
self.already_read = False
self.seeks = []
self.tell_value = tell_value
def tell(self):
return self.tell_value
def seek(self, position):
self.seeks.append(position)
self.already_read = False
def read(self, size=-1):
if self.already_read:
return ''
else:
self.already_read = True
return 'abcdef'
class LocalConnection(object):
def __init__(self, parsed_url=None):
self.reason = ""
if parsed_url:
self.host = parsed_url.netloc
self.port = parsed_url.netloc
def putrequest(self, *args, **kwargs):
return
def putheader(self, *args, **kwargs):
return
def endheaders(self, *args, **kwargs):
return
def send(self, *args, **kwargs):
raise socket.error('oops')
def request(self, *args, **kwargs):
return
def getresponse(self, *args, **kwargs):
self.status = 200
return self
def getheader(self, *args, **kwargs):
return 'header'
def read(self, *args, **kwargs):
return ''
def local_http_connection(url, proxy=None, ssl_compression=True):
parsed = urlparse(url)
return parsed, LocalConnection()
orig_conn = c.http_connection
try:
c.http_connection = local_http_connection
conn = c.Connection('http://www.example.com', 'asdf', 'asdf',
retries=1, starting_backoff=.0001)
contents = LocalContents()
exc = None
try:
conn.put_object('c', 'o', contents)
except socket.error as err:
exc = err
self.assertEquals(contents.seeks, [0])
self.assertEquals(str(exc), 'oops')
contents = LocalContents(tell_value=123)
exc = None
try:
conn.put_object('c', 'o', contents)
except socket.error as err:
exc = err
self.assertEquals(contents.seeks, [123])
self.assertEquals(str(exc), 'oops')
contents = LocalContents()
contents.tell = None
exc = None
try:
conn.put_object('c', 'o', contents)
except c.ClientException as err:
exc = err
self.assertEquals(contents.seeks, [])
self.assertEquals(str(exc), "put_object('c', 'o', ...) failure "
"and no ability to reset contents for reupload.")
finally:
c.http_connection = orig_conn
if __name__ == '__main__':
testtools.main()
| 29,259
| 299
| 2,380
|
fbf53043effec4b990be99acf5f0238e010187d1
| 1,377
|
py
|
Python
|
save_neutral_pose.py
|
johndpope/FacialRetargeting
|
5fb0c1da6af6c3d59aef264f567bfa7a244d0764
|
[
"MIT"
] | null | null | null |
save_neutral_pose.py
|
johndpope/FacialRetargeting
|
5fb0c1da6af6c3d59aef264f567bfa7a244d0764
|
[
"MIT"
] | null | null | null |
save_neutral_pose.py
|
johndpope/FacialRetargeting
|
5fb0c1da6af6c3d59aef264f567bfa7a244d0764
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
from utils.load_data import load_c3d_file
# declare variables
path = 'D:/MoCap_Data/David/NewSession_labeled/'
file = 'NeutralTrail14.c3d'
save_folder = 'data/'
save_name = 'David_neutral_pose'
neutral_frame = 900
template_labels = ['LeftBrow1', 'LeftBrow2', 'LeftBrow3', 'LeftBrow4', 'RightBrow1', 'RightBrow2', 'RightBrow3',
'RightBrow4', 'Nose1', 'Nose2', 'Nose3', 'Nose4', 'Nose5', 'Nose6', 'Nose7', 'Nose8',
'UpperMouth1', 'UpperMouth2', 'UpperMouth3', 'UpperMouth4', 'UpperMouth5', 'LowerMouth1',
'LowerMouth2', 'LowerMouth3', 'LowerMouth4', 'LeftOrbi1', 'LeftOrbi2', 'RightOrbi1', 'RightOrbi2',
'LeftCheek1', 'LeftCheek2', 'LeftCheek3', 'RightCheek1', 'RightCheek2', 'RightCheek3',
'LeftJaw1', 'LeftJaw2', 'RightJaw1', 'RightJaw2', 'LeftEye1', 'RightEye1', 'Head1', 'Head2',
'Head3', 'Head4']
# load sequence
data, labels = load_c3d_file(os.path.join(path, file),
template_labels=template_labels,
get_labels=True,
verbose=True)
print("labels", len(labels))
print(labels)
print("shape data[neutral_frame]", np.shape(data[neutral_frame]))
print(data[neutral_frame])
# save
np.save(os.path.join(save_folder, save_name), data[neutral_frame])
| 45.9
| 117
| 0.627451
|
import numpy as np
import os
from utils.load_data import load_c3d_file
# declare variables
path = 'D:/MoCap_Data/David/NewSession_labeled/'
file = 'NeutralTrail14.c3d'
save_folder = 'data/'
save_name = 'David_neutral_pose'
neutral_frame = 900
template_labels = ['LeftBrow1', 'LeftBrow2', 'LeftBrow3', 'LeftBrow4', 'RightBrow1', 'RightBrow2', 'RightBrow3',
'RightBrow4', 'Nose1', 'Nose2', 'Nose3', 'Nose4', 'Nose5', 'Nose6', 'Nose7', 'Nose8',
'UpperMouth1', 'UpperMouth2', 'UpperMouth3', 'UpperMouth4', 'UpperMouth5', 'LowerMouth1',
'LowerMouth2', 'LowerMouth3', 'LowerMouth4', 'LeftOrbi1', 'LeftOrbi2', 'RightOrbi1', 'RightOrbi2',
'LeftCheek1', 'LeftCheek2', 'LeftCheek3', 'RightCheek1', 'RightCheek2', 'RightCheek3',
'LeftJaw1', 'LeftJaw2', 'RightJaw1', 'RightJaw2', 'LeftEye1', 'RightEye1', 'Head1', 'Head2',
'Head3', 'Head4']
# load sequence
data, labels = load_c3d_file(os.path.join(path, file),
template_labels=template_labels,
get_labels=True,
verbose=True)
print("labels", len(labels))
print(labels)
print("shape data[neutral_frame]", np.shape(data[neutral_frame]))
print(data[neutral_frame])
# save
np.save(os.path.join(save_folder, save_name), data[neutral_frame])
| 0
| 0
| 0
|
9ccbe38ce3cdfc09fc680af159046a22595d593a
| 1,122
|
py
|
Python
|
test/unit/SenseHATDisplay/run.py
|
rsm31/apama_GPIO
|
06da24c5ede5bd036514aa214d8a5e914e0b988e
|
[
"Apache-2.0"
] | 2
|
2017-12-29T20:36:35.000Z
|
2018-02-07T10:31:32.000Z
|
test/unit/SenseHATDisplay/run.py
|
rsm31/apama_GPIO
|
06da24c5ede5bd036514aa214d8a5e914e0b988e
|
[
"Apache-2.0"
] | 1
|
2018-03-16T11:40:58.000Z
|
2019-03-20T12:18:05.000Z
|
test/unit/SenseHATDisplay/run.py
|
rsm31/apama_GPIO
|
06da24c5ede5bd036514aa214d8a5e914e0b988e
|
[
"Apache-2.0"
] | 2
|
2017-12-29T21:22:59.000Z
|
2021-12-16T11:53:33.000Z
|
from senseHAT.BaseTest import SenseHATBaseTest
from random import randint
| 26.714286
| 64
| 0.637255
|
from senseHAT.BaseTest import SenseHATBaseTest
from random import randint
class PySysTest(SenseHATBaseTest):
def execute(self):
self.clearPixels()
self.start()
self.correlator.injectMonitorscript(filenames=['display.mon'])
verifyArray = []
setterList = 'SetterList(['
for x in range(0, 7):
for y in range(0, 8):
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
setter = 'Setter(%d,%d,%d,%d,%d)'%(x, y, r, g, b)
setterList += setter + ','
verifyArray.append([x, y, [r, g, b]])
setterList = setterList[:-1] + '])'
self.correlator.sendEventStrings(setterList)
self.waitForSignal('correlator.out', expr='Finished loading')
self.checkPixel(7, 0, [0, 0, 0])
self.checkPixel(7, 1, [0, 255, 255])
self.checkPixel(7, 2, [255, 0, 255])
self.checkPixel(7, 3, [255, 255, 255])
self.checkPixel(7, 4, [255, 255, 0])
self.checkPixel(7, 5, [255, 0, 0])
self.checkPixel(7, 6, [0, 255, 0])
self.checkPixel(7, 7, [0, 0, 255])
for values in verifyArray:
self.checkPixel(values[0], values[1], values[2])
self.clearPixels()
def validate(self):
pass
| 965
| 13
| 70
|
54db8109199fa0eec2d705d8f92645b132535acb
| 1,173
|
py
|
Python
|
tests/unit/test_config.py
|
outcastofmusic/jikken
|
3d3a67b699c92790b48b84492e98662068e49374
|
[
"MIT"
] | 5
|
2017-12-05T17:39:28.000Z
|
2021-01-18T19:05:30.000Z
|
tests/unit/test_config.py
|
outcastofmusic/jikken
|
3d3a67b699c92790b48b84492e98662068e49374
|
[
"MIT"
] | 1
|
2021-03-25T21:45:41.000Z
|
2021-03-25T21:45:41.000Z
|
tests/unit/test_config.py
|
outcastofmusic/jikken
|
3d3a67b699c92790b48b84492e98662068e49374
|
[
"MIT"
] | null | null | null |
import pytest
import os
from jikken.database.config import get_config, write_default_config, JikkenConfig, read_config
@pytest.fixture()
| 30.076923
| 130
| 0.71185
|
import pytest
import os
from jikken.database.config import get_config, write_default_config, JikkenConfig, read_config
@pytest.fixture()
def home_dir(tmpdir, monkeypatch):
home_dir = tmpdir.mkdir('home')
monkeypatch.setenv('HOME', str(home_dir))
return home_dir
def test_default_config_created(home_dir):
config_file = write_default_config()
assert config_file == os.path.join(str(home_dir), ".jikken", "config")
def test_read_default_config_file(home_dir):
config_file = write_default_config()
config = read_config(config_file)
expected_config = JikkenConfig(db_type='tiny', db_path=os.path.join(str(home_dir), ".jikken", "jikken_db/"), db_name="jikken")
assert config == expected_config
def test_load_local_config(home_dir, tmpdir):
new_config_file = tmpdir.join("config")
new_config = \
"""
[db]
path = jikken_db/
type = mongo
"""
with new_config_file.open('w') as file_handle:
file_handle.write(new_config)
config = get_config(str(new_config_file))
expected_config = JikkenConfig(db_type='mongo', db_path="jikken_db/")
assert config == expected_config
| 940
| 0
| 91
|
727fed69bd7418960e671b30d7bed67924a69d3f
| 825
|
py
|
Python
|
schedule/migrations/0024_auto_20141116_1234.py
|
yourcelf/masterschedule
|
e585df0e9edcaff5fa4f04f77a9452e3073b5db7
|
[
"Unlicense"
] | 1
|
2015-02-11T04:08:36.000Z
|
2015-02-11T04:08:36.000Z
|
schedule/migrations/0024_auto_20141116_1234.py
|
yourcelf/masterschedule
|
e585df0e9edcaff5fa4f04f77a9452e3073b5db7
|
[
"Unlicense"
] | null | null | null |
schedule/migrations/0024_auto_20141116_1234.py
|
yourcelf/masterschedule
|
e585df0e9edcaff5fa4f04f77a9452e3073b5db7
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 27.5
| 79
| 0.603636
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0023_conference_venue_random_slugs'),
]
operations = [
migrations.AlterField(
model_name='conference',
name='random_slug',
field=models.CharField(unique=True, max_length=64, editable=False),
),
migrations.AlterField(
model_name='person',
name='random_slug',
field=models.CharField(unique=True, max_length=64, editable=False),
),
migrations.AlterField(
model_name='venue',
name='random_slug',
field=models.CharField(unique=True, max_length=64, editable=False),
),
]
| 0
| 695
| 23
|
1081def19677c92dd923288beb9c5df34d939976
| 928
|
py
|
Python
|
corehq/motech/dhis2/management/commands/populate_sql_dhis2_connection.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
corehq/motech/dhis2/management/commands/populate_sql_dhis2_connection.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 94
|
2020-12-11T06:57:31.000Z
|
2022-03-15T10:24:06.000Z
|
corehq/motech/dhis2/management/commands/populate_sql_dhis2_connection.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | null | null | null |
from corehq.apps.cleanup.management.commands.populate_sql_model_from_couch_model import PopulateSQLCommand
| 32
| 106
| 0.648707
|
from corehq.apps.cleanup.management.commands.populate_sql_model_from_couch_model import PopulateSQLCommand
class Command(PopulateSQLCommand):
@classmethod
def couch_doc_type(cls):
return 'Dhis2Connection'
@classmethod
def sql_class(cls):
from corehq.motech.dhis2.models import Dhis2Connection
return Dhis2Connection
@classmethod
def commit_adding_migration(cls):
return "d670f19bfda1ab4e842d7d47162c5691b9bef55d"
def update_or_create_sql_object(self, doc):
model, created = self.sql_class().objects.update_or_create(
domain=doc['domain'],
defaults={
'server_url': doc.get('server_url'),
'username': doc.get('username'),
'password': doc.get('password'),
'skip_cert_verify': doc.get('skip_cert_verify') or False,
}
)
return (model, created)
| 626
| 171
| 23
|
d9969e3c638019ce1c670f7295db31d83f5b7653
| 100
|
py
|
Python
|
OpenGLCffi/GL/EXT/GREMEDY/frame_terminator.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GL/EXT/GREMEDY/frame_terminator.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GL/EXT/GREMEDY/frame_terminator.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
from OpenGLCffi.GL import params
@params(api='gl', prms=[])
| 14.285714
| 32
| 0.74
|
from OpenGLCffi.GL import params
@params(api='gl', prms=[])
def glFrameTerminatorGREMEDY():
pass
| 16
| 0
| 22
|
70270b2fa3b99e488be39abb00d56b72ca2b5297
| 3,039
|
py
|
Python
|
dataviz/flagstriband.py
|
Udzu/pudzu
|
5a0302830b052fc54feba891eb7bf634957a9d90
|
[
"MIT"
] | 119
|
2017-07-22T15:02:30.000Z
|
2021-08-02T10:42:59.000Z
|
dataviz/flagstriband.py
|
Udzu/pudzu
|
5a0302830b052fc54feba891eb7bf634957a9d90
|
[
"MIT"
] | null | null | null |
dataviz/flagstriband.py
|
Udzu/pudzu
|
5a0302830b052fc54feba891eb7bf634957a9d90
|
[
"MIT"
] | 28
|
2017-08-04T14:28:41.000Z
|
2019-11-27T23:46:14.000Z
|
from pudzu.charts import *
df = pd.read_csv("datasets/flagstriband.csv")
df = pd.concat([pd.DataFrame(df.colours.apply(list).tolist(), columns=list("TMB")), df], axis=1).set_index("colours")
FONT, SIZE = calibri, 24
fg, bg = "black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
COLORS = { "W": "white", "Y": "yellow", "R": "red", "G": "green", "B": "blue", "K": "black", }
W, H = 320, 200
PAD = 100
grids = list(generate_batches([grid(c) for c in COLORS], 2))
grid = Image.from_array(grids, padding=(PAD,PAD//2), bg=bg)
title = Image.from_column([
Image.from_text_bounded("From Austria to Zanzibar".upper(), grid.size, 360, partial(FONT, bold=True), fg=fg, bg=bg, padding=(PAD,20)),
Image.from_text_bounded("a catalog of horizontal triband flags".upper(), grid.size, 240, partial(FONT, bold=True), fg=fg, bg=bg, padding=(PAD,20)),
], padding=0)
img = Image.from_column([title, grid], bg=bg, padding=(20,0)).pad(10, bg)
img.place(Image.from_text("/u/Udzu", FONT(48), fg=fg, bg=bg, padding=10).pad((2,2,0,0), fg), align=1, padding=10, copy=False)
img.save("output/flagstriband.png")
img.resize_fixed_aspect(scale=0.5).save("output/flagstriband2.png")
| 55.254545
| 173
| 0.659756
|
from pudzu.charts import *
df = pd.read_csv("datasets/flagstriband.csv")
df = pd.concat([pd.DataFrame(df.colours.apply(list).tolist(), columns=list("TMB")), df], axis=1).set_index("colours")
FONT, SIZE = calibri, 24
fg, bg = "black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
COLORS = { "W": "white", "Y": "yellow", "R": "red", "G": "green", "B": "blue", "K": "black", }
W, H = 320, 200
def label(c, size):
w, h = size
label = Image.from_text_bounded(" ", (W,H), SIZE, partial(FONT, bold=True), beard_line=True)
description = Image.from_text_bounded(" ", (W,H), SIZE, partial(FONT, italics=True), beard_line=True)
if c == "Y":
flag = Triangle(max(w,h), "orange", "yellow", p=1.0).crop_to_aspect(w,h).trim(1).pad(1, "grey")
else:
flag = Rectangle((w-2, h-2), RGBA(COLORS.get(c)).blend(bg, 0.1)).pad(1, "grey")
return Image.from_column([label, description, flag], padding=2, bg=bg)
def process(d):
if non(d['name']): return None
label = Image.from_text_bounded(d['name'].replace("*","").upper(), (W,H), SIZE, partial(FONT, bold=True), beard_line=True)
description = Image.from_text_bounded(get_non(d, 'description', " "), (W,H), SIZE, partial(FONT, italics=True), beard_line=True)
flag = Image.from_url_with_cache(get_non(d, 'flag', default_img)).to_rgba()
flag = flag.resize_fixed_aspect(height=H-2) if flag.width / flag.height < 1.3 else flag.resize((W-2,H-2))
flag = flag.pad(1, "grey")
flaglabel = Image.from_column([label, description, flag], padding=2, bg=bg)
if "*" in d['name']: flaglabel = flaglabel.blend(Rectangle(flaglabel.size, bg), 0.3)
return flaglabel
def grid(middle):
ms = df[df.M == middle]
colors = "".join(COLORS).replace(middle,"")
array = [[dict(ms.loc[b+middle+t][["name", "description", "flag"]]) for b in colors] for t in colors]
data = pd.DataFrame(array, index=list(colors), columns=list(colors))
grid = grid_chart(data, process, padding=(10,20), fg=fg, bg=bg, yalign=1,
row_label=lambda row: label(data.index[row], (100, H)), col_label=lambda col: label(data.columns[col], (W,100)), corner_label=label(middle, (100,100)))
return grid
PAD = 100
grids = list(generate_batches([grid(c) for c in COLORS], 2))
grid = Image.from_array(grids, padding=(PAD,PAD//2), bg=bg)
title = Image.from_column([
Image.from_text_bounded("From Austria to Zanzibar".upper(), grid.size, 360, partial(FONT, bold=True), fg=fg, bg=bg, padding=(PAD,20)),
Image.from_text_bounded("a catalog of horizontal triband flags".upper(), grid.size, 240, partial(FONT, bold=True), fg=fg, bg=bg, padding=(PAD,20)),
], padding=0)
img = Image.from_column([title, grid], bg=bg, padding=(20,0)).pad(10, bg)
img.place(Image.from_text("/u/Udzu", FONT(48), fg=fg, bg=bg, padding=10).pad((2,2,0,0), fg), align=1, padding=10, copy=False)
img.save("output/flagstriband.png")
img.resize_fixed_aspect(scale=0.5).save("output/flagstriband2.png")
| 1,730
| 0
| 73
|
a1e0965849d574fe42bfa2715312122b0a5be353
| 1,156
|
py
|
Python
|
nightcappackages/nightcappackages/classes/helpers/tmp_files.py
|
abaker2010/NightCAP
|
c58365a0e2ff1896ce0f8fbf2977b3e83feee1e2
|
[
"MIT"
] | 2
|
2022-02-11T17:47:38.000Z
|
2022-02-11T21:13:36.000Z
|
nightcappackages/nightcappackages/classes/helpers/tmp_files.py
|
abaker2010/NightCAP
|
c58365a0e2ff1896ce0f8fbf2977b3e83feee1e2
|
[
"MIT"
] | null | null | null |
nightcappackages/nightcappackages/classes/helpers/tmp_files.py
|
abaker2010/NightCAP
|
c58365a0e2ff1896ce0f8fbf2977b3e83feee1e2
|
[
"MIT"
] | null | null | null |
# Copyright 2020 by Aaron Baker.
# All rights reserved.
# This file is part of the Nightcap Project,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
# region Imports
import tempfile
import shutil
from nightcapcore import Printer
from nightcappackages import *
# endregion
| 27.52381
| 75
| 0.649654
|
# Copyright 2020 by Aaron Baker.
# All rights reserved.
# This file is part of the Nightcap Project,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
# region Imports
import tempfile
import shutil
from nightcapcore import Printer
from nightcappackages import *
# endregion
class NightcapTmpFileHelper(object):
def __init__(self) -> None:
super().__init__()
self.tmp_location: str = ""
self.printer = Printer()
self._deleted = False
def __del__(self):
try:
if self._deleted:
self._rmtmp()
except Exception as e:
pass
def delete(self):
self._rmtmp()
self._deleted = True
def create(self):
self._createtmp()
self._deleted = False
def _createtmp(self):
# region Tmp dir functions
self.tmp_location = tempfile.mkdtemp()
self.printer.print_underlined_header("Preparing")
self.printer.item_1("Creating tmp dir " + self.tmp_location)
def _rmtmp(self):
shutil.rmtree(self.tmp_location)
| 595
| 15
| 184
|
89bbc81542526f853c976415f525c8b7b73f6e69
| 8,515
|
py
|
Python
|
Client/FTP-Client.py
|
Junaid-D/FTP-py
|
2b9ff7abb5e390fc91be370889a43eec9c2eb08c
|
[
"MIT"
] | null | null | null |
Client/FTP-Client.py
|
Junaid-D/FTP-py
|
2b9ff7abb5e390fc91be370889a43eec9c2eb08c
|
[
"MIT"
] | null | null | null |
Client/FTP-Client.py
|
Junaid-D/FTP-py
|
2b9ff7abb5e390fc91be370889a43eec9c2eb08c
|
[
"MIT"
] | null | null | null |
import socket
from tkinter import *
ServerIP='127.0.0.1'
port = 4500
thisClient=FTPClient()
thisClient.run()
| 32.011278
| 90
| 0.535878
|
import socket
from tkinter import *
ServerIP='127.0.0.1'
port = 4500
class FTPClient():
def __init__(self):
self.conSoc=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.loggedIn=False
self.open=True
self.dataSoc=None
self.passiveIP=None
self.passivePort=None
self.type='b'
def run(self):
self.conSoc.connect((ServerIP,port))
serverResp=''
while(serverResp==''):
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s' % serverResp)
if(serverResp.startswith('220')):
self.login()
if(self.loggedIn==True):
while 1 & self.open==True:
command=input('Input next command..')
self.parseCommand(command)
def login(self):
#serverResp=self.conSoc.recv(1024).decode('ascii')
# print('S %s' % serverResp)
while 1:
userName=input("type username..")
loginMessage='USER '+userName+'\r\n'
print('C %s' % loginMessage)
self.conSoc.sendall(loginMessage.encode('ascii'))
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s' % serverResp)
if(serverResp.startswith('331')):
password=input("type password..")
loginMessage='PASS '+password+'\r\n'
print('C %s' % loginMessage)
self.conSoc.sendall(loginMessage.encode('ascii'))
else:
continue
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s' % serverResp)
if(serverResp.startswith('200')):
self.loggedIn=True
print("Login success!")
break
def parseCommand(self,command):
if (command=='QUIT'):
self.QUIT()
elif (command=='PORT'):
self.PORT()
elif (command=="PASV"):
self.PASV()
elif (command=='TYPE'):
self.TYPE()
elif (command=='MODE'):
self.MODE()
elif (command=='STRU'):
self.STRU()
elif (command=='RETR'):
self.RETR()
elif (command=='STOR'):
self.STOR()
elif (command=='NOOP'):
self.NOOP()
else:
print('Invalid Command')
def QUIT(self):
message='QUIT\r\n'
print('C %s'%message)
self.conSoc.sendall(message.encode('ascii'))
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s'%serverResp)
if(serverResp.startswith('221')):
self.open=False
print('Connection closed by server.')
self.conSoc.close()
return
def PORT(self):
print('Requesting data port')
ip=''
while ip.count('.')!=3:
ip=input('IP use . as separator?\n')
splitIP=ip.split('.')
portNo=input('Port no: ?\n')
port1=int(portNo)//256
port2=int(portNo)%256
#ip
sequence=splitIP[0]+','+splitIP[1]+','+splitIP[2]+','+splitIP[3]
#port
sequence=sequence+','+str(port1)+','+str(port2)
self.dataSoc=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.dataSoc.bind((ip,(int(portNo))))
message='PORT '+sequence+'\r\n'
print('C %s'%message)
self.conSoc.sendall(message.encode('ascii'))
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s'%serverResp)
if(serverResp.startswith('5')):
print('Error with parameters, retuning to menu..')
return
def PASV(self):
message='PASV\r\n'
print('C %s'%message)
self.conSoc.sendall(message.encode('ascii'))
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s'%serverResp)
if(serverResp.startswith('2')):
splitResp=serverResp[:-2]
print(splitResp)
splitResp=splitResp.split()
splitIP=splitResp[4]
splitIP=splitIP.split(",")
self.passiveIP=splitIP[0]+splitIP[1]+splitIP[2]+splitIP[3]
self.passivePort=int(splitIP[4])*256+int(splitIP[5])
elif(serverResp.startswith('5')):
print('Error with parameters, retuning to menu..')
return
def RETR(self):#stream--server will close connection, block-- eof block will be sent
if(self.passiveIP==None and self.dataSoc==None):
print('No data connection was set up')
return
filename=input('Input filename\n')
message='RETR '+filename+'\r\n'
print('C %s'%message)
self.conSoc.sendall(message.encode('ascii'))
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s'%serverResp)
if(self.dataSoc!=None):##Assume active
self.dataSoc.listen()
s1,addr=self.dataSoc.accept()
newFile=open('new_'+filename,'w'+self.type)
while 1:
data=s1.recv(1024)
if (not data): break##meaning the connection is closed in an 'orderly' way
newFile.write(data)
newFile.close()
print('Transfer complete')
self.CloseDataSocket()
return
if(self.passiveIP!=None):##Assume Passive
self.dataSoc=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.dataSoc.connect(self.passiveIP,self.passivePort)
newFile=open('new_'+filename,'w'+self.type)
while 1:
data=self.dataSoc.recv(1024)
if (not data): break##meaning the connection is closed in an 'orderly' way
newFile.write(data)
newFile.close()
print('Transfer complete')
self.dataSoc.close()
self.dataSoc=None
return
def STOR(self):
if(self.passiveIP==None and self.dataSoc==None):
print('No data connection was set up')
return
filename=input('Input filename\n')
filenameOnServer=input('Called on server?\n')
message='STOR '+filenameOnServer+'\r\n'
print('C %s'%message)
self.conSoc.sendall(message.encode('ascii'))
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s'%serverResp)
if(self.dataSoc!=None):##Assume active
self.dataSoc.listen()
s1,addr=self.dataSoc.accept()
with open(filename,'r'+self.type) as f:##read as binary
toSend=f.read(1024)#using send for now instead of sendall
while (toSend):
if (self.type==''): toSend=toSend.encode('ascii')
s1.send(toSend)
toSend=f.read(1024)
s1.shutdown(socket.SHUT_RDWR)
s1.close()
self.CloseDataSocket()
return
if(self.passiveIP!=None):##Assume Passive
self.dataSoc=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.dataSoc.connect(self.passiveIP,self.passivePort)
with open(filename,'r'+self.type) as f:##read as binary
toSend=f.read(1024)#using send for now instead of sendall
while (toSend):
if (self.type==''): toSend=toSend.encode('ascii')
self.dataSoc.send(toSend)
toSend=f.read(1024)
self.dataSoc.close()
self.dataSoc=None
return
def TYPE(self):
type=''
while(len(type)!=1):
type=input('Type?\n')
message='TYPE '+type+'\r\n'
print('C %s'%message)
self.conSoc.sendall(message.encode('ascii'))
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s'%serverResp)
return
def MODE(self):
return
def STRU(self):
return
def NOOP(self):
message='NOOP\r\n'
print('C %s'%message)
self.conSoc.sendall(message.encode('ascii'))
serverResp=self.conSoc.recv(1024).decode('ascii')
print('S %s'%serverResp)
return
def CloseDataSocket(self):
#self.dataSoc.shutdown(socket.SHUT_RDWR)
self.dataSoc.close()
self.dataSoc=None
return
thisClient=FTPClient()
thisClient.run()
| 7,976
| -3
| 403
|
6a44978c2724514cf08af55c609ff36d4c533ac1
| 4,057
|
py
|
Python
|
extensions/python/src/main/resources/jet_to_python_pb2.py
|
software-is-art/hazelcast
|
7f785606f1093aa6f420147ca46dd0befe11c4b8
|
[
"ECL-2.0",
"Apache-2.0"
] | 4,283
|
2015-01-02T03:56:10.000Z
|
2022-03-29T23:07:45.000Z
|
extensions/python/src/main/resources/jet_to_python_pb2.py
|
software-is-art/hazelcast
|
7f785606f1093aa6f420147ca46dd0befe11c4b8
|
[
"ECL-2.0",
"Apache-2.0"
] | 14,014
|
2015-01-01T04:29:38.000Z
|
2022-03-31T21:47:55.000Z
|
extensions/python/src/main/resources/jet_to_python_pb2.py
|
software-is-art/hazelcast
|
7f785606f1093aa6f420147ca46dd0befe11c4b8
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,608
|
2015-01-04T09:57:08.000Z
|
2022-03-31T12:05:26.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: jet-to-python.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='jet-to-python.proto',
package='jet_to_python',
syntax='proto3',
serialized_options=_b('\n\035com.hazelcast.jet.python.grpcB\023JetToPythonTopLevelP\001'),
serialized_pb=_b('\n\x13jet-to-python.proto\x12\rjet_to_python\"\"\n\x0cInputMessage\x12\x12\n\ninputValue\x18\x01 \x03(\t\"$\n\rOutputMessage\x12\x13\n\x0boutputValue\x18\x01 \x03(\t2_\n\x0bJetToPython\x12P\n\rstreamingCall\x12\x1b.jet_to_python.InputMessage\x1a\x1c.jet_to_python.OutputMessage\"\x00(\x01\x30\x01\x42\x36\n\x1d\x63om.hazelcast.jet.python.grpcB\x13JetToPythonTopLevelP\x01\x62\x06proto3')
)
_INPUTMESSAGE = _descriptor.Descriptor(
name='InputMessage',
full_name='jet_to_python.InputMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='inputValue', full_name='jet_to_python.InputMessage.inputValue', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=72,
)
_OUTPUTMESSAGE = _descriptor.Descriptor(
name='OutputMessage',
full_name='jet_to_python.OutputMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='outputValue', full_name='jet_to_python.OutputMessage.outputValue', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=110,
)
DESCRIPTOR.message_types_by_name['InputMessage'] = _INPUTMESSAGE
DESCRIPTOR.message_types_by_name['OutputMessage'] = _OUTPUTMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InputMessage = _reflection.GeneratedProtocolMessageType('InputMessage', (_message.Message,), {
'DESCRIPTOR' : _INPUTMESSAGE,
'__module__' : 'jet_to_python_pb2'
# @@protoc_insertion_point(class_scope:jet_to_python.InputMessage)
})
_sym_db.RegisterMessage(InputMessage)
OutputMessage = _reflection.GeneratedProtocolMessageType('OutputMessage', (_message.Message,), {
'DESCRIPTOR' : _OUTPUTMESSAGE,
'__module__' : 'jet_to_python_pb2'
# @@protoc_insertion_point(class_scope:jet_to_python.OutputMessage)
})
_sym_db.RegisterMessage(OutputMessage)
DESCRIPTOR._options = None
_JETTOPYTHON = _descriptor.ServiceDescriptor(
name='JetToPython',
full_name='jet_to_python.JetToPython',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=112,
serialized_end=207,
methods=[
_descriptor.MethodDescriptor(
name='streamingCall',
full_name='jet_to_python.JetToPython.streamingCall',
index=0,
containing_service=None,
input_type=_INPUTMESSAGE,
output_type=_OUTPUTMESSAGE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_JETTOPYTHON)
DESCRIPTOR.services_by_name['JetToPython'] = _JETTOPYTHON
# @@protoc_insertion_point(module_scope)
| 30.051852
| 407
| 0.761647
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: jet-to-python.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='jet-to-python.proto',
package='jet_to_python',
syntax='proto3',
serialized_options=_b('\n\035com.hazelcast.jet.python.grpcB\023JetToPythonTopLevelP\001'),
serialized_pb=_b('\n\x13jet-to-python.proto\x12\rjet_to_python\"\"\n\x0cInputMessage\x12\x12\n\ninputValue\x18\x01 \x03(\t\"$\n\rOutputMessage\x12\x13\n\x0boutputValue\x18\x01 \x03(\t2_\n\x0bJetToPython\x12P\n\rstreamingCall\x12\x1b.jet_to_python.InputMessage\x1a\x1c.jet_to_python.OutputMessage\"\x00(\x01\x30\x01\x42\x36\n\x1d\x63om.hazelcast.jet.python.grpcB\x13JetToPythonTopLevelP\x01\x62\x06proto3')
)
_INPUTMESSAGE = _descriptor.Descriptor(
name='InputMessage',
full_name='jet_to_python.InputMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='inputValue', full_name='jet_to_python.InputMessage.inputValue', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=72,
)
_OUTPUTMESSAGE = _descriptor.Descriptor(
name='OutputMessage',
full_name='jet_to_python.OutputMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='outputValue', full_name='jet_to_python.OutputMessage.outputValue', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=110,
)
DESCRIPTOR.message_types_by_name['InputMessage'] = _INPUTMESSAGE
DESCRIPTOR.message_types_by_name['OutputMessage'] = _OUTPUTMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InputMessage = _reflection.GeneratedProtocolMessageType('InputMessage', (_message.Message,), {
'DESCRIPTOR' : _INPUTMESSAGE,
'__module__' : 'jet_to_python_pb2'
# @@protoc_insertion_point(class_scope:jet_to_python.InputMessage)
})
_sym_db.RegisterMessage(InputMessage)
OutputMessage = _reflection.GeneratedProtocolMessageType('OutputMessage', (_message.Message,), {
'DESCRIPTOR' : _OUTPUTMESSAGE,
'__module__' : 'jet_to_python_pb2'
# @@protoc_insertion_point(class_scope:jet_to_python.OutputMessage)
})
_sym_db.RegisterMessage(OutputMessage)
DESCRIPTOR._options = None
_JETTOPYTHON = _descriptor.ServiceDescriptor(
name='JetToPython',
full_name='jet_to_python.JetToPython',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=112,
serialized_end=207,
methods=[
_descriptor.MethodDescriptor(
name='streamingCall',
full_name='jet_to_python.JetToPython.streamingCall',
index=0,
containing_service=None,
input_type=_INPUTMESSAGE,
output_type=_OUTPUTMESSAGE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_JETTOPYTHON)
DESCRIPTOR.services_by_name['JetToPython'] = _JETTOPYTHON
# @@protoc_insertion_point(module_scope)
| 0
| 0
| 0
|
cabd551c543209c2d843f34d3e05a150bfc258e9
| 159
|
py
|
Python
|
setup.py
|
plang85/hackathon2017-11
|
717d93cbf6c0d0e71e6a2f427b2e22760eeebf5c
|
[
"Unlicense"
] | null | null | null |
setup.py
|
plang85/hackathon2017-11
|
717d93cbf6c0d0e71e6a2f427b2e22760eeebf5c
|
[
"Unlicense"
] | null | null | null |
setup.py
|
plang85/hackathon2017-11
|
717d93cbf6c0d0e71e6a2f427b2e22760eeebf5c
|
[
"Unlicense"
] | null | null | null |
from setuptools import setup
setup(name='hackathon',
install_requires=['pandas'],
extras_require={'test': ['pytest'],},
packages=['hackathon'])
| 19.875
| 43
| 0.666667
|
from setuptools import setup
setup(name='hackathon',
install_requires=['pandas'],
extras_require={'test': ['pytest'],},
packages=['hackathon'])
| 0
| 0
| 0
|
fcf128319027f0a57916c9329dcf3057b808a2af
| 2,033
|
py
|
Python
|
rectround.py
|
pgalatic/zeitgeist
|
9c74ead7fd3870f3f6d9fbafd96946ce131c8bd8
|
[
"MIT"
] | null | null | null |
rectround.py
|
pgalatic/zeitgeist
|
9c74ead7fd3870f3f6d9fbafd96946ce131c8bd8
|
[
"MIT"
] | null | null | null |
rectround.py
|
pgalatic/zeitgeist
|
9c74ead7fd3870f3f6d9fbafd96946ce131c8bd8
|
[
"MIT"
] | null | null | null |
#
# author: Paul Galatic
#
# This program is JUST for drawing a rounded rectangle.
#
import pdb
from PIL import Image, ImageDraw
from extern import *
def sub_rectangle(draw, xy, corner_radius=25, fill=(255, 255, 255)):
'''
Source: https://stackoverflow.com/questions/7787375/python-imaging-library-pil-drawing-rounded-rectangle-with-gradient
'''
upper_left_point = xy[0]
bottom_right_point = xy[1]
draw.rectangle(
[
(upper_left_point[0], upper_left_point[1] + corner_radius),
(bottom_right_point[0], bottom_right_point[1] - corner_radius)
],
fill=fill,
)
draw.rectangle(
[
(upper_left_point[0] + corner_radius, upper_left_point[1]),
(bottom_right_point[0] - corner_radius, bottom_right_point[1])
],
fill=fill,
)
draw.pieslice([upper_left_point, (upper_left_point[0] + corner_radius * 2, upper_left_point[1] + corner_radius * 2)],
180,
270,
fill=fill,
)
draw.pieslice([(bottom_right_point[0] - corner_radius * 2, bottom_right_point[1] - corner_radius * 2), bottom_right_point],
0,
90,
fill=fill,
)
draw.pieslice([(upper_left_point[0], bottom_right_point[1] - corner_radius * 2), (upper_left_point[0] + corner_radius * 2, bottom_right_point[1])],
90,
180,
fill=fill,
)
draw.pieslice([(bottom_right_point[0] - corner_radius * 2, upper_left_point[1]), (bottom_right_point[0], upper_left_point[1] + corner_radius * 2)],
270,
360,
fill=fill,
)
| 29.897059
| 151
| 0.616331
|
#
# author: Paul Galatic
#
# This program is JUST for drawing a rounded rectangle.
#
import pdb
from PIL import Image, ImageDraw
from extern import *
def sub_rectangle(draw, xy, corner_radius=25, fill=(255, 255, 255)):
'''
Source: https://stackoverflow.com/questions/7787375/python-imaging-library-pil-drawing-rounded-rectangle-with-gradient
'''
upper_left_point = xy[0]
bottom_right_point = xy[1]
draw.rectangle(
[
(upper_left_point[0], upper_left_point[1] + corner_radius),
(bottom_right_point[0], bottom_right_point[1] - corner_radius)
],
fill=fill,
)
draw.rectangle(
[
(upper_left_point[0] + corner_radius, upper_left_point[1]),
(bottom_right_point[0] - corner_radius, bottom_right_point[1])
],
fill=fill,
)
draw.pieslice([upper_left_point, (upper_left_point[0] + corner_radius * 2, upper_left_point[1] + corner_radius * 2)],
180,
270,
fill=fill,
)
draw.pieslice([(bottom_right_point[0] - corner_radius * 2, bottom_right_point[1] - corner_radius * 2), bottom_right_point],
0,
90,
fill=fill,
)
draw.pieslice([(upper_left_point[0], bottom_right_point[1] - corner_radius * 2), (upper_left_point[0] + corner_radius * 2, bottom_right_point[1])],
90,
180,
fill=fill,
)
draw.pieslice([(bottom_right_point[0] - corner_radius * 2, upper_left_point[1]), (bottom_right_point[0], upper_left_point[1] + corner_radius * 2)],
270,
360,
fill=fill,
)
def rectangle(draw, size, fill=WHITE, border=None):
width, height = size
img = Image.new('RGBA', size, color=BLANK)
if border:
outdims = ((0, 0), (width, height))
sub_rectangle(draw, outdims, fill=border)
indims = ((BORDER, BORDER), (width - BORDER, height - BORDER))
else:
indims = ((0, 0), (width, height))
sub_rectangle(draw, indims, fill=fill)
return img
| 409
| 0
| 23
|
86a6dbe707d06b0e60c73117110adb209c2be7ac
| 7,823
|
py
|
Python
|
data/myutils.py
|
vkazei/deeplogs
|
4f6f853ce608a59e9d4b1a3160eb6b0035f333c0
|
[
"MIT"
] | 25
|
2019-07-17T10:25:22.000Z
|
2022-03-30T15:37:59.000Z
|
data/myutils.py
|
vkazei/deeplogs
|
4f6f853ce608a59e9d4b1a3160eb6b0035f333c0
|
[
"MIT"
] | null | null | null |
data/myutils.py
|
vkazei/deeplogs
|
4f6f853ce608a59e9d4b1a3160eb6b0035f333c0
|
[
"MIT"
] | 16
|
2019-07-17T08:44:09.000Z
|
2022-03-08T06:32:28.000Z
|
#%%
# utilities
import subprocess
import os
import matplotlib
import matplotlib.pyplot as plt
import time
import numpy as np
from numpy import linalg
import m8r as sf
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
from tensorflow.python.ops.image_ops_impl import _random_flip
from skimage.transform import resize
class _const():
"""Default settings for modeling and inversion
"""
dx = 50
dt = 0.005
T_max = 7
nt = int(T_max / dt + 1)
central_freq = 7
jgx = 2
jsx = jgx
jdt = 4
sxbeg = 5000//dx
gxbeg = 1000//dx
szbeg = 2
jlogz = 2
trmodel = "marmvel.hh"
random_state_number = 314
random_model_repeat = 100
# upsample for plotting
ups_plot = 4
# one can stretch training models horizontally
stretch_X_train = 1
const = _const()
#%%
def tf_random_flip_channels(image, seed=None):
"""
With a 1 in 2 chance, outputs the contents of `image` flipped along the
third dimension, which is `channels`. Otherwise output the image as-is.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
`tf.set_random_seed`
for behavior.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
return _random_flip(image, 2, seed, 'random_flip_channels')
def np_to_rsf(vel, model_output, d1 = const.dx, d2 = const.dx):
''' Write 2D numpy array vel to rsf file model_output '''
yy = sf.Output(model_output)
yy.put('n1',np.shape(vel)[1])
yy.put('n2',np.shape(vel)[0])
yy.put('d1',d1)
yy.put('d2',d2)
yy.put('o1',0)
yy.put('o2',0)
yy.write(vel)
yy.close()
def merge_dict(dict1, dict2):
''' Merge dictionaries with same keys'''
dict3 = dict1.copy()
for key, value in dict1.items():
dict3[key] = np.concatenate((value, dict2[key]), axis=0)
return dict3
def cmd(command):
"""Run command and pipe what you would see in terminal into the output cell
"""
process = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
while True:
output = process.stderr.readline().decode('utf-8')
if output == '' and process.poll() is not None:
# this prints the stdout in the end
output2 = process.stdout.read().decode('utf-8')
print(output2.strip())
break
if output:
print(output.strip())
rc = process.poll()
return rc
class cd:
"""Context manager for changing the current working directory"""
# to distort the model
def elastic_transform(image, alpha, sigma, random_state_number=None, v_dx=const.dx, plot_name=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
random_state = np.random.RandomState(random_state_number)
shape = image.shape
#print(shape)
# with our velocities dx is vertical shift
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), (sigma, sigma/10, 1), mode="constant", cval=0) * 4 * alpha
# with our velocities dy is horizontal
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), (sigma, sigma/10, 1), mode="constant", cval=0) * alpha
dz = np.zeros_like(dx)
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))
distorted_image = map_coordinates(image, indices, order=1, mode='reflect', prefilter=False)
distorted_image = distorted_image.reshape(image.shape)
if plot_name != None:
plt_nb_T(v_dx * np.squeeze(dx[:,:]), fname=f"VerticalShifts_{alpha}", title="Vertical shifts (km)")
dq_x = 100
dq_z = 17
M = np.hypot(dy.squeeze()[::dq_x,::dq_z].T, dx.squeeze()[::dq_x,::dq_z].T)
M = dx.squeeze()[::dq_x,::dq_z].T
M = np.squeeze(image)[::dq_x,::dq_z].T
if 1:
fig1, ax1 = plt.subplots(figsize=(16,9))
ax1.set_title('Guiding model')
plt.imshow(1e-3*np.squeeze(image.T), extent=(0, v_dx * dx.shape[0] * 1e-3, v_dx * dx.shape[1] *1e-3, 0))
plt.axis("tight")
plt.xlabel("Distance (km)")
plt.ylabel("Depth (km)")
plt.colorbar()
Q = ax1.quiver(
1e-3*v_dx *y.squeeze()[::dq_x,::dq_z].T, 1e-3*v_dx *x.squeeze()[::dq_x,::dq_z].T,
np.abs(1e-4*v_dx*dx.squeeze()[::dq_x,::dq_z].T), 1e-3*v_dx*dx.squeeze()[::dq_x,::dq_z].T,
scale_units='xy', scale=1, pivot='tip')
plt.savefig(f"../latex/Fig/shiftsVectors", bbox_inches='tight')
plt_show_proceed()
fig1, ax1 = plt.subplots(figsize=(16,9))
ax1.set_title('Distorted model')
plt.imshow(1e-3*np.squeeze(distorted_image.T), extent=(0, v_dx * dx.shape[0] * 1e-3, v_dx * dx.shape[1] *1e-3, 0))
plt.axis("tight")
plt.xlabel("Distance (km)")
plt.ylabel("Depth (km)")
plt.colorbar()
Q = ax1.quiver(
1e-3*v_dx *y.squeeze()[::dq_x,::dq_z].T, 1e-3*v_dx *x.squeeze()[::dq_x,::dq_z].T,
np.abs(1e-4*v_dx*dx.squeeze()[::dq_x,::dq_z].T), 1e-3*v_dx*dx.squeeze()[::dq_x,::dq_z].T,
scale_units='xy', scale=1, pivot='tip')
plt.savefig(f"../latex/Fig/deformedModel{plot_name}", bbox_inches='tight')
plt_show_proceed()
return distorted_image
| 33.865801
| 122
| 0.613959
|
#%%
# utilities
import subprocess
import os
import matplotlib
import matplotlib.pyplot as plt
import time
import numpy as np
from numpy import linalg
import m8r as sf
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
from tensorflow.python.ops.image_ops_impl import _random_flip
from skimage.transform import resize
class _const():
"""Default settings for modeling and inversion
"""
dx = 50
dt = 0.005
T_max = 7
nt = int(T_max / dt + 1)
central_freq = 7
jgx = 2
jsx = jgx
jdt = 4
sxbeg = 5000//dx
gxbeg = 1000//dx
szbeg = 2
jlogz = 2
trmodel = "marmvel.hh"
random_state_number = 314
random_model_repeat = 100
# upsample for plotting
ups_plot = 4
# one can stretch training models horizontally
stretch_X_train = 1
const = _const()
#%%
def tf_random_flip_channels(image, seed=None):
"""
With a 1 in 2 chance, outputs the contents of `image` flipped along the
third dimension, which is `channels`. Otherwise output the image as-is.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
`tf.set_random_seed`
for behavior.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
return _random_flip(image, 2, seed, 'random_flip_channels')
def upsample(X, upscale):
return resize(X, upscale * np.array(X.shape))
def nrms(T_pred, T_true):
return 100*linalg.norm(T_pred-T_true)/linalg.norm(T_true)
def rsf_to_np(file_name):
f = sf.Input(file_name)
vel = f.read()
return vel
def np_to_rsf(vel, model_output, d1 = const.dx, d2 = const.dx):
''' Write 2D numpy array vel to rsf file model_output '''
yy = sf.Output(model_output)
yy.put('n1',np.shape(vel)[1])
yy.put('n2',np.shape(vel)[0])
yy.put('d1',d1)
yy.put('d2',d2)
yy.put('o1',0)
yy.put('o2',0)
yy.write(vel)
yy.close()
def merge_dict(dict1, dict2):
''' Merge dictionaries with same keys'''
dict3 = dict1.copy()
for key, value in dict1.items():
dict3[key] = np.concatenate((value, dict2[key]), axis=0)
return dict3
def cmd(command):
"""Run command and pipe what you would see in terminal into the output cell
"""
process = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
while True:
output = process.stderr.readline().decode('utf-8')
if output == '' and process.poll() is not None:
# this prints the stdout in the end
output2 = process.stdout.read().decode('utf-8')
print(output2.strip())
break
if output:
print(output.strip())
rc = process.poll()
return rc
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def plt_show_proceed(delay=1):
plt.show(block=False)
plt.pause(delay)
plt.close()
def plt_nb_T(vel, fname="Velocity", title="",
ylabel="Depth (km)", xlabel="Distance (km)",
cbar=True,
cbar_label = "(km/s)",
vmin=None, vmax=None,
split_line=False,
dx=const.dx, dz=const.dx, no_labels=False, origin_in_middle=False,
figsize=(16,9),
xticks=True):
plt.figure(figsize=figsize)
vel_image = vel[:,:].T
extent=(0, dx * vel.shape[0] * 1e-3, dz * vel.shape[1] *1e-3, 0)
if origin_in_middle:
extent = (-dx * vel.shape[0] * .5e-3, dx * vel.shape[0] * .5e-3, dz * vel.shape[1] *1e-3, 0)
plt.imshow(vel_image * 1e-3, origin='upper', extent=extent)
#plt.axis("equal")
plt.axis("tight")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if not xticks:
plt.xticks([])
plt.title(title)
plt.clim(vmin,vmax)
if cbar==True:
cbar = plt.colorbar()
cbar.ax.set_ylabel(cbar_label)
if split_line:
plt.axvline(x=extent[1]/2, color='black', linewidth=10, linestyle='-')
if no_labels:
plt.xlabel("")
plt.axis('off')
plt.savefig(fname, bbox_inches='tight')
plt_show_proceed()
def toc(start_time):
return (time.time() - start_time)
def aug_flip(vel):
vel = np.concatenate((vel, np.flipud(vel),vel), axis = 0)
return vel
# to distort the model
def elastic_transform(image, alpha, sigma, random_state_number=None, v_dx=const.dx, plot_name=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
random_state = np.random.RandomState(random_state_number)
shape = image.shape
#print(shape)
# with our velocities dx is vertical shift
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), (sigma, sigma/10, 1), mode="constant", cval=0) * 4 * alpha
# with our velocities dy is horizontal
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), (sigma, sigma/10, 1), mode="constant", cval=0) * alpha
dz = np.zeros_like(dx)
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))
distorted_image = map_coordinates(image, indices, order=1, mode='reflect', prefilter=False)
distorted_image = distorted_image.reshape(image.shape)
if plot_name != None:
plt_nb_T(v_dx * np.squeeze(dx[:,:]), fname=f"VerticalShifts_{alpha}", title="Vertical shifts (km)")
dq_x = 100
dq_z = 17
M = np.hypot(dy.squeeze()[::dq_x,::dq_z].T, dx.squeeze()[::dq_x,::dq_z].T)
M = dx.squeeze()[::dq_x,::dq_z].T
M = np.squeeze(image)[::dq_x,::dq_z].T
if 1:
fig1, ax1 = plt.subplots(figsize=(16,9))
ax1.set_title('Guiding model')
plt.imshow(1e-3*np.squeeze(image.T), extent=(0, v_dx * dx.shape[0] * 1e-3, v_dx * dx.shape[1] *1e-3, 0))
plt.axis("tight")
plt.xlabel("Distance (km)")
plt.ylabel("Depth (km)")
plt.colorbar()
Q = ax1.quiver(
1e-3*v_dx *y.squeeze()[::dq_x,::dq_z].T, 1e-3*v_dx *x.squeeze()[::dq_x,::dq_z].T,
np.abs(1e-4*v_dx*dx.squeeze()[::dq_x,::dq_z].T), 1e-3*v_dx*dx.squeeze()[::dq_x,::dq_z].T,
scale_units='xy', scale=1, pivot='tip')
plt.savefig(f"../latex/Fig/shiftsVectors", bbox_inches='tight')
plt_show_proceed()
fig1, ax1 = plt.subplots(figsize=(16,9))
ax1.set_title('Distorted model')
plt.imshow(1e-3*np.squeeze(distorted_image.T), extent=(0, v_dx * dx.shape[0] * 1e-3, v_dx * dx.shape[1] *1e-3, 0))
plt.axis("tight")
plt.xlabel("Distance (km)")
plt.ylabel("Depth (km)")
plt.colorbar()
Q = ax1.quiver(
1e-3*v_dx *y.squeeze()[::dq_x,::dq_z].T, 1e-3*v_dx *x.squeeze()[::dq_x,::dq_z].T,
np.abs(1e-4*v_dx*dx.squeeze()[::dq_x,::dq_z].T), 1e-3*v_dx*dx.squeeze()[::dq_x,::dq_z].T,
scale_units='xy', scale=1, pivot='tip')
plt.savefig(f"../latex/Fig/deformedModel{plot_name}", bbox_inches='tight')
plt_show_proceed()
return distorted_image
| 1,722
| 0
| 241
|
157703ab428c8c027e5a117c3b1641d7f72605b0
| 2,024
|
py
|
Python
|
analog/tests/test_main.py
|
sitedata/analog
|
29d3d5f41e7a4479d99296032b278f526f0c748d
|
[
"MIT"
] | 11
|
2015-02-27T16:04:50.000Z
|
2021-08-27T23:51:11.000Z
|
analog/tests/test_main.py
|
fabianbuechler/analog
|
4ee7a045717d7e2051ebe92d06cee89701291bff
|
[
"MIT"
] | 1
|
2020-12-29T16:10:55.000Z
|
2021-01-01T17:37:25.000Z
|
analog/tests/test_main.py
|
sitedata/analog
|
29d3d5f41e7a4479d99296032b278f526f0c748d
|
[
"MIT"
] | 2
|
2016-05-22T02:54:32.000Z
|
2020-06-09T21:38:38.000Z
|
"""Test the analog.main module and CLI."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
try:
from unittest import mock
except ImportError:
import mock
import pytest
import analog
@pytest.fixture
def tmp_logfile(tmpdir):
"""Fixture creating a temporary logfile.
:returns: local tempfile object.
"""
log_name = 'logmock.log'
logfile = tmpdir.join(log_name)
logfile.write("log entry #1")
return logfile
def test_help(capsys):
"""analog --help prints help and describes arguments."""
with pytest.raises(SystemExit):
analog.main(['analog', '--help'])
out, err = capsys.readouterr()
# main docstring is used as help description
assert analog.main.__doc__ in out
# analog arguments are listed
assert '--config' in out
assert '--version' in out
assert '--format' in out
assert '--regex' in out
assert '--max-age' in out
assert '--print-stats' in out
assert '--print-path-stats' in out
def test_format_or_regex_required(capsys, tmp_logfile):
"""analog requires log --format or pattern --regex."""
with pytest.raises(SystemExit) as exit:
analog.main(['analog', str(tmp_logfile)])
assert exit.errisinstance(analog.MissingFormatError)
@mock.patch('analog.analyze', return_value=analog.Report([], []))
def test_paths(mock_analyze, capsys, tmp_logfile):
"""analog --path specifies paths to monitor."""
with pytest.raises(SystemExit):
# the --path argument can be specified multiple times, also as -p
analog.main(['analog',
'--format', 'nginx',
'--config', '/foo/bar',
str(tmp_logfile)])
mock_analyze.assert_called_once_with(
log=mock.ANY,
format='nginx',
config='/foo/bar',
max_age=10,
print_stats=False,
print_path_stats=False)
| 29.333333
| 73
| 0.617589
|
"""Test the analog.main module and CLI."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
try:
from unittest import mock
except ImportError:
import mock
import pytest
import analog
@pytest.fixture
def tmp_logfile(tmpdir):
"""Fixture creating a temporary logfile.
:returns: local tempfile object.
"""
log_name = 'logmock.log'
logfile = tmpdir.join(log_name)
logfile.write("log entry #1")
return logfile
def test_help(capsys):
"""analog --help prints help and describes arguments."""
with pytest.raises(SystemExit):
analog.main(['analog', '--help'])
out, err = capsys.readouterr()
# main docstring is used as help description
assert analog.main.__doc__ in out
# analog arguments are listed
assert '--config' in out
assert '--version' in out
assert '--format' in out
assert '--regex' in out
assert '--max-age' in out
assert '--print-stats' in out
assert '--print-path-stats' in out
def test_format_or_regex_required(capsys, tmp_logfile):
"""analog requires log --format or pattern --regex."""
with pytest.raises(SystemExit) as exit:
analog.main(['analog', str(tmp_logfile)])
assert exit.errisinstance(analog.MissingFormatError)
@mock.patch('analog.analyze', return_value=analog.Report([], []))
def test_paths(mock_analyze, capsys, tmp_logfile):
"""analog --path specifies paths to monitor."""
with pytest.raises(SystemExit):
# the --path argument can be specified multiple times, also as -p
analog.main(['analog',
'--format', 'nginx',
'--config', '/foo/bar',
str(tmp_logfile)])
mock_analyze.assert_called_once_with(
log=mock.ANY,
format='nginx',
config='/foo/bar',
max_age=10,
print_stats=False,
print_path_stats=False)
| 0
| 0
| 0
|
a8ee691e6c4c3958a199f2a0524a8fb707e64970
| 9,917
|
py
|
Python
|
src/util/util.py
|
JARVIS-AI/The-Witcher-3-Mod-manager-1
|
fdc4763e29bc3cef6f7b4df51a1c4e286da0fe06
|
[
"BSD-2-Clause"
] | null | null | null |
src/util/util.py
|
JARVIS-AI/The-Witcher-3-Mod-manager-1
|
fdc4763e29bc3cef6f7b4df51a1c4e286da0fe06
|
[
"BSD-2-Clause"
] | null | null | null |
src/util/util.py
|
JARVIS-AI/The-Witcher-3-Mod-manager-1
|
fdc4763e29bc3cef6f7b4df51a1c4e286da0fe06
|
[
"BSD-2-Clause"
] | null | null | null |
'''Global Helpers'''
# pylint: disable=invalid-name,superfluous-parens,missing-docstring,wildcard-import,unused-wildcard-import
from sys import platform
import os
import sys
import re
import traceback
import webbrowser
import subprocess
from shutil import copytree, rmtree
from platform import python_version
from configparser import ConfigParser
from threading import Timer
import cchardet
from PySide2 import QtGui, QtCore, __version__
from PySide2.QtWidgets import QFileDialog, QMessageBox, QWidget
from src.globals import data
from src.globals.constants import *
from src.gui.file_dialog import FileDialog
from src.gui.alerts import MessageCouldntOpenFile, MessageNotConfigured, MessageUnsupportedOS
def copyFolder(src, dst):
'''Copy folder from src to dst'''
dst = os.path.normpath(dst)
src = os.path.normpath(src)
print(
f'copying from {src} to {dst} (exists: {os.path.isdir(os.path.normpath(dst))})')
rmtree(dst, ignore_errors=True)
while os.path.isdir(dst):
pass
copytree(src, dst)
def restartProgram():
'''Restarts the program'''
data.config.write()
python = sys.executable
os.execl(python, python, *sys.argv)
def getFile(directory="", extensions="", title="Select Files or Folders"):
'''Opens custom dialog for selecting multiple folders or files'''
return FileDialog(None, title, str(directory), str(extensions)).selectedFiles
def getSize(start_path='.'):
'''Calculates the size of the selected folder'''
total_size = 0
for dirpath, _, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def getIcon(filename):
'''Gets icon from the res folder'''
icon = QtGui.QIcon()
icon.addFile(getProgramRootFolder() + '/res/' + filename)
return icon
def getKey(item):
'''Helper function for the mod list'''
return item[1]
def isData(name):
'''Checks if given name represents correct mod folder or not'''
return re.match(r"^(~|)mod.+$", name)
def fixUserSettingsDuplicateBrackets():
'''Fix invalid section names in user.settings'''
try:
config = ConfigParser(strict=False)
config.optionxform = str
config.read(data.config.settings + "/user.settings",
encoding=detectEncoding(data.config.settings + "/user.settings"))
for section in config.sections():
newSection = section
while newSection[:1] == "[":
newSection = newSection[1:]
while newSection[-1:] == "]":
newSection = newSection[:-1]
if newSection != section:
items = config.items(section)
if not config.has_section(newSection):
config.add_section(newSection)
for item in items:
config.set(newSection, item[0], item[1])
config.remove_section(section)
with open(data.config.settings+"/user.settings", 'w', encoding="utf-8") as userfile:
config.write(userfile, space_around_delimiters=False)
except:
print("fixing duplicate brackets failed")
def throttle(ms: int):
"""Decorator ensures function that can only be called once every `ms` milliseconds"""
from datetime import datetime, timedelta
return decorate
def debounce(ms: int):
"""Debounce a functions execution by {ms} milliseconds"""
return decorator
| 31.48254
| 106
| 0.609761
|
'''Global Helpers'''
# pylint: disable=invalid-name,superfluous-parens,missing-docstring,wildcard-import,unused-wildcard-import
from sys import platform
import os
import sys
import re
import traceback
import webbrowser
import subprocess
from shutil import copytree, rmtree
from platform import python_version
from configparser import ConfigParser
from threading import Timer
import cchardet
from PySide2 import QtGui, QtCore, __version__
from PySide2.QtWidgets import QFileDialog, QMessageBox, QWidget
from src.globals import data
from src.globals.constants import *
from src.gui.file_dialog import FileDialog
from src.gui.alerts import MessageCouldntOpenFile, MessageNotConfigured, MessageUnsupportedOS
def formatUserError(error: Exception) -> str:
print(traceback.format_exc(), error, file=sys.stderr)
if data.debug:
return traceback.format_exc() + str(error)
else:
return str(error)
def getDocumentsFolder() -> str:
path = ""
if platform == "win32" or platform == "cygwin":
from ctypes import create_unicode_buffer, wintypes, windll
buf = create_unicode_buffer(wintypes.MAX_PATH)
windll.shell32.SHGetFolderPathW(None, 5, None, 0, buf)
path = normalizePath(buf.value)
elif platform == "linux" or platform == "darwin":
# try steam proton documents location path
path = normalizePath(os.path.expanduser(
"~/.local/share/Steam/steamapps/compatdata/292030/pfx/drive_c/users/steamuser/My Documents"))
else:
MessageUnsupportedOS(platform)
sys.exit(1)
if not path or not os.path.exists(path):
path = normalizePath(str(QFileDialog.getExistingDirectory(
None,
"Select \"My Documents\" directory containing the Witcher 3 config directory",
"My Documents")))
return path
def getConfigFolder() -> str:
if platform == "win32" or platform == "cygwin":
return getDocumentsFolder()
if platform == "linux" or platform == "darwin":
return normalizePath(os.path.expanduser("~/.config"))
MessageUnsupportedOS(platform)
sys.exit(1)
def getConfigFolderName() -> str:
if platform == "linux" or platform == "darwin":
return "TheWitcher3ModManager"
return "The Witcher 3 Mod Manager"
def getVersionString() -> str:
return TITLE + " " + VERSION
def getProgramRootFolder() -> str:
if getattr(sys, 'frozen', False):
# The application is frozen
return normalizePath(os.path.dirname(sys.executable))
else:
return normalizePath(os.path.dirname(os.path.abspath(__file__))+"/../../")
def normalizePath(path: str) -> str:
return os.path.normpath(str(path)).replace('\\', '/')
def reconfigureGamePath() -> bool:
MessageNotConfigured()
gamePath = str(QFileDialog.getOpenFileName(
None,
TRANSLATE("MainWindow", "Select witcher3.exe"),
data.config.gameexe or "witcher3.exe",
"*.exe")[0])
try:
data.config.game = gamePath
except ValueError as err:
print(str(err), file=sys.stderr)
QMessageBox.critical(
None,
TRANSLATE("MainWindow", "Selected file not correct"),
TRANSLATE("MainWindow", "'witcher3.exe' file not selected"),
QMessageBox.StandardButton.Ok)
return False
return True
def reconfigureScriptMergerPath():
mergerPath = str(QFileDialog.getOpenFileName(
None,
TRANSLATE("MainWindow", "Select script merger .exe"),
data.config.scriptmerger or '',
"*.exe")[0])
if mergerPath:
data.config.scriptmerger = mergerPath
def showAboutWindow():
QMessageBox.about(
None,
TRANSLATE("MainWindow", "About"),
TRANSLATE(
"MainWindow",
""+TITLE+"\n"
"Version: "+VERSION+"\n"
"Authors: "+(", ".join(AUTHORS))+"\n"
"\n"
"Written in: Python "+python_version()+"\n"
"GUI: PySide2 "+__version__+"\n"
"\n"
"Thank you for using "+TITLE+"!"))
def openUrl(url: str):
webbrowser.open(url)
def openFile(path: str):
try:
if isExecutable(path):
directory, _ = os.path.split(path)
subprocess.Popen([path], cwd=directory)
elif os.path.isfile(path):
if platform == "linux" or platform == "darwin":
try:
subprocess.call(["xdg-open", path])
except OSError as e:
editor = os.getenv('EDITOR')
if editor:
subprocess.Popen([editor, path])
else:
webbrowser.open(path, new=1)
else:
try:
os.startfile(path)
except Exception as e:
webbrowser.open(path, new=1)
elif os.path.isdir(path):
openFolder(path)
else:
raise FileNotFoundError(path)
except Exception as e:
MessageCouldntOpenFile(path, formatUserError(e))
def openFolder(path: str):
while path and not os.path.isdir(path):
path, _ = os.path.split(path)
if platform == "linux" or platform == "darwin":
try:
subprocess.Popen(["xdg-open", path])
except OSError as e:
webbrowser.open(path, new=1)
else:
os.startfile(path, "explore")
def copyFolder(src, dst):
'''Copy folder from src to dst'''
dst = os.path.normpath(dst)
src = os.path.normpath(src)
print(
f'copying from {src} to {dst} (exists: {os.path.isdir(os.path.normpath(dst))})')
rmtree(dst, ignore_errors=True)
while os.path.isdir(dst):
pass
copytree(src, dst)
def restartProgram():
'''Restarts the program'''
data.config.write()
python = sys.executable
os.execl(python, python, *sys.argv)
def getFile(directory="", extensions="", title="Select Files or Folders"):
'''Opens custom dialog for selecting multiple folders or files'''
return FileDialog(None, title, str(directory), str(extensions)).selectedFiles
def getSize(start_path='.'):
'''Calculates the size of the selected folder'''
total_size = 0
for dirpath, _, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def getIcon(filename):
'''Gets icon from the res folder'''
icon = QtGui.QIcon()
icon.addFile(getProgramRootFolder() + '/res/' + filename)
return icon
def getKey(item):
'''Helper function for the mod list'''
return item[1]
def isData(name):
'''Checks if given name represents correct mod folder or not'''
return re.match(r"^(~|)mod.+$", name)
def isExecutable(name: str) -> bool:
_, ext = os.path.splitext(name)
return ext in ('.exe', '.bat')
def translateToChosenLanguage() -> bool:
language = data.config.language
if (language and os.path.exists("translations/" + language)):
print("loading translation", language)
data.translator.load("translations/" + language)
if not data.app.installTranslator(data.translator):
print("loading translation failed", file=sys.stderr)
return False
return True
else:
print("chosen language not found:", language, file=sys.stderr)
return False
def detectEncoding(path: str) -> str:
if os.path.exists(path):
with open(path, 'rb') as file:
text = file.read()
detected = cchardet.detect(text)
print("detected", path, "as", detected)
return detected["encoding"]
else:
return "utf-8"
def fixUserSettingsDuplicateBrackets():
'''Fix invalid section names in user.settings'''
try:
config = ConfigParser(strict=False)
config.optionxform = str
config.read(data.config.settings + "/user.settings",
encoding=detectEncoding(data.config.settings + "/user.settings"))
for section in config.sections():
newSection = section
while newSection[:1] == "[":
newSection = newSection[1:]
while newSection[-1:] == "]":
newSection = newSection[:-1]
if newSection != section:
items = config.items(section)
if not config.has_section(newSection):
config.add_section(newSection)
for item in items:
config.set(newSection, item[0], item[1])
config.remove_section(section)
with open(data.config.settings+"/user.settings", 'w', encoding="utf-8") as userfile:
config.write(userfile, space_around_delimiters=False)
except:
print("fixing duplicate brackets failed")
def throttle(ms: int):
"""Decorator ensures function that can only be called once every `ms` milliseconds"""
from datetime import datetime, timedelta
def decorate(f):
last_modified = None
def wrapped(*args, **kwargs):
nonlocal last_modified
if not last_modified or datetime.now() - last_modified > timedelta(milliseconds=ms):
result = f(*args, **kwargs)
last_modified = datetime.now()
return result
return wrapped
return decorate
def debounce(ms: int):
"""Debounce a functions execution by {ms} milliseconds"""
def decorator(fun):
def debounced(*args, **kwargs):
def deferred():
fun(*args, **kwargs)
try:
debounced.timer.cancel()
except AttributeError:
pass
debounced.timer = Timer(ms / 1000.0, deferred)
debounced.timer.start()
return debounced
return decorator
| 5,965
| 0
| 421
|
eb3202558a1754995d72168519173bed7895d23a
| 421
|
py
|
Python
|
beagle/nodes/__init__.py
|
limkokhian/beagle
|
791e83db94e5a8ab1965b155bb79d32bb259d2b3
|
[
"MIT"
] | 1,139
|
2019-03-24T09:09:05.000Z
|
2022-03-27T14:54:38.000Z
|
beagle/nodes/__init__.py
|
limkokhian/beagle
|
791e83db94e5a8ab1965b155bb79d32bb259d2b3
|
[
"MIT"
] | 78
|
2019-03-24T16:56:06.000Z
|
2022-02-27T21:31:38.000Z
|
beagle/nodes/__init__.py
|
limkokhian/beagle
|
791e83db94e5a8ab1965b155bb79d32bb259d2b3
|
[
"MIT"
] | 149
|
2019-03-24T16:44:45.000Z
|
2022-03-11T12:20:51.000Z
|
from __future__ import absolute_import
from .alert import Alert
from .domain import URI, Domain
from .file import File, FileOf
from .ip_address import IPAddress
from .node import Node
from .process import Process, SysMonProc
from .registry import RegistryKey
__all__ = [
"Node",
"URI",
"Domain",
"File",
"FileOf",
"IPAddress",
"SysMonProc",
"Process",
"RegistryKey",
"Alert",
]
| 17.541667
| 40
| 0.679335
|
from __future__ import absolute_import
from .alert import Alert
from .domain import URI, Domain
from .file import File, FileOf
from .ip_address import IPAddress
from .node import Node
from .process import Process, SysMonProc
from .registry import RegistryKey
__all__ = [
"Node",
"URI",
"Domain",
"File",
"FileOf",
"IPAddress",
"SysMonProc",
"Process",
"RegistryKey",
"Alert",
]
| 0
| 0
| 0
|
6239dc1e86fe07389cf63df6392bc8d72a0e1825
| 111
|
py
|
Python
|
hmt/build/update_mode.py
|
dfioravanti/hmt
|
df79404076ec7acea0cfb12b636d58e3ffc83bc5
|
[
"MIT"
] | 25
|
2020-05-14T13:25:42.000Z
|
2021-11-09T10:09:27.000Z
|
hmt/build/update_mode.py
|
dfioravanti/hmt
|
df79404076ec7acea0cfb12b636d58e3ffc83bc5
|
[
"MIT"
] | 19
|
2020-05-05T19:47:41.000Z
|
2021-02-05T17:06:53.000Z
|
hmt/build/update_mode.py
|
dfioravanti/hmt
|
df79404076ec7acea0cfb12b636d58e3ffc83bc5
|
[
"MIT"
] | 6
|
2020-05-16T10:02:48.000Z
|
2021-10-04T08:03:49.000Z
|
import enum
__all__ = ["UpdateMode"]
| 10.090909
| 28
| 0.603604
|
import enum
class UpdateMode(enum.Enum):
GEN = 0
REPLAY = 1
MIXED = 2
__all__ = ["UpdateMode"]
| 0
| 48
| 23
|
23e6a1ec99527cce378215a514ba7467ba480ba4
| 720
|
py
|
Python
|
py_convert/convertOfficeOsage.py
|
sven-oly/LanguageTools
|
8c1e0bbae274232064e9796aa401c906797af452
|
[
"Apache-2.0"
] | 3
|
2021-02-02T12:11:27.000Z
|
2021-12-28T03:58:05.000Z
|
py_convert/convertOfficeOsage.py
|
sven-oly/LanguageTools
|
8c1e0bbae274232064e9796aa401c906797af452
|
[
"Apache-2.0"
] | 7
|
2020-12-11T00:44:52.000Z
|
2022-03-01T18:00:00.000Z
|
py_convert/convertOfficeOsage.py
|
sven-oly/LanguageTools
|
8c1e0bbae274232064e9796aa401c906797af452
|
[
"Apache-2.0"
] | 3
|
2019-06-08T17:46:47.000Z
|
2021-09-16T02:03:56.000Z
|
# -*- coding: utf-8 -*-
#
# Convert list of Office files (.docx, .xslx, .pptx) files from
# old text encoding to Unicode.
import os
import re
import sys
import convertOffice
import osageConversion
import convertUtil
if __name__ == "__main__":
main(sys.argv)
| 20
| 69
| 0.661111
|
# -*- coding: utf-8 -*-
#
# Convert list of Office files (.docx, .xslx, .pptx) files from
# old text encoding to Unicode.
import os
import re
import sys
import convertOffice
import osageConversion
import convertUtil
def main(argv):
args = convertUtil.parseArgs()
newUnicodeFont = "NotoSans-Regular"
print '** args = %s' % args
paths_to_doc = args.filenames
print('Args = %s'% args)
FONTS_TO_CONVERT = ['Official Osage Language', ]
for input in paths_to_doc:
convertOffice.convertOffice(input, args.output_dir,
osageConversion.oldEncodingToUnicode,
FONTS_TO_CONVERT, newUnicodeFont)
if __name__ == "__main__":
main(sys.argv)
| 431
| 0
| 23
|
963d158b9ada3a6c66832e3fa7f7b6169041484c
| 822
|
py
|
Python
|
Cankaoxiaoxi/article_spider.py
|
StevenChaoo/WebCrawler
|
74711ac15b934b2e5a0eb663a0a2b6dd35050428
|
[
"MIT"
] | 1
|
2021-04-20T13:22:17.000Z
|
2021-04-20T13:22:17.000Z
|
Cankaoxiaoxi/article_spider.py
|
StevenChaoo/WebCrawler
|
74711ac15b934b2e5a0eb663a0a2b6dd35050428
|
[
"MIT"
] | null | null | null |
Cankaoxiaoxi/article_spider.py
|
StevenChaoo/WebCrawler
|
74711ac15b934b2e5a0eb663a0a2b6dd35050428
|
[
"MIT"
] | null | null | null |
#-*- coding:utf-8 -*-
import time
from bs4 import BeautifulSoup
from user_agents import agents
import requests
import random
import re
def get_article(url):
'''
:param url: 指定日期的链接
:return content: 指定url的正文内容
'''
agent = random.choice(agents)
header = {'User-Agent': agent}
res = requests.get(url, headers=header)
time.sleep(2)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
newsArticle = soup.select('.articleText')
pattern = re.compile(r'<[^>]+>', re.S)
for item in (str(newsArticle[0]).split('<strong>')):
new_item = item.split('</strong>')
if len(new_item) > 1:
contents = pattern.sub('', str(new_item))
content_list = contents.split('\'')
content = ''.join(content_list)
return content
| 28.344828
| 56
| 0.618005
|
#-*- coding:utf-8 -*-
import time
from bs4 import BeautifulSoup
from user_agents import agents
import requests
import random
import re
def get_article(url):
'''
:param url: 指定日期的链接
:return content: 指定url的正文内容
'''
agent = random.choice(agents)
header = {'User-Agent': agent}
res = requests.get(url, headers=header)
time.sleep(2)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
newsArticle = soup.select('.articleText')
pattern = re.compile(r'<[^>]+>', re.S)
for item in (str(newsArticle[0]).split('<strong>')):
new_item = item.split('</strong>')
if len(new_item) > 1:
contents = pattern.sub('', str(new_item))
content_list = contents.split('\'')
content = ''.join(content_list)
return content
| 0
| 0
| 0
|
985b36fc0c9644840ca80d083631e6ddadc4631c
| 331
|
py
|
Python
|
aplicacion/migrations/0009_remove_producto_ruta.py
|
jffc-dev/Python-Django-Tecshop
|
c26ab6da20eca0483b900d253eacc37d2e8b1f26
|
[
"MIT"
] | null | null | null |
aplicacion/migrations/0009_remove_producto_ruta.py
|
jffc-dev/Python-Django-Tecshop
|
c26ab6da20eca0483b900d253eacc37d2e8b1f26
|
[
"MIT"
] | null | null | null |
aplicacion/migrations/0009_remove_producto_ruta.py
|
jffc-dev/Python-Django-Tecshop
|
c26ab6da20eca0483b900d253eacc37d2e8b1f26
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2021-01-13 17:40
from django.db import migrations
| 18.388889
| 50
| 0.595166
|
# Generated by Django 3.1.4 on 2021-01-13 17:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('aplicacion', '0008_auto_20210113_1701'),
]
operations = [
migrations.RemoveField(
model_name='producto',
name='ruta',
),
]
| 0
| 225
| 23
|
bf9b5afa26e7111b3c8082256ea21298de72cc49
| 679
|
py
|
Python
|
houdini/scripts/123.py
|
sashaouellet/SDMTools
|
edb529398b07a577a5492887fe840c6cfd891551
|
[
"MIT"
] | 7
|
2017-11-27T20:51:11.000Z
|
2020-07-18T22:51:46.000Z
|
houdini/scripts/123.py
|
tws0002/SDMTools
|
edb529398b07a577a5492887fe840c6cfd891551
|
[
"MIT"
] | 7
|
2017-12-03T21:25:19.000Z
|
2018-02-12T08:03:29.000Z
|
houdini/scripts/123.py
|
tws0002/SDMTools
|
edb529398b07a577a5492887fe840c6cfd891551
|
[
"MIT"
] | 3
|
2018-04-27T02:45:28.000Z
|
2020-02-15T14:12:45.000Z
|
import os, json
import hdefereval
import sdm.houdini
from sdm.houdini.dialog import checkForUpdates
from sdm.houdini.shelves import addShelf
from sdm.houdini.node import applyDefaultShapesAndColors
hdefereval.executeDeferred(checkUpdates)
hdefereval.executeDeferred(addShelf)
hdefereval.executeDeferred(applyDefaultShapesAndColors)
| 26.115385
| 66
| 0.799705
|
import os, json
import hdefereval
import sdm.houdini
from sdm.houdini.dialog import checkForUpdates
from sdm.houdini.shelves import addShelf
from sdm.houdini.node import applyDefaultShapesAndColors
def checkUpdates():
settingsPath = os.path.join(sdm.houdini.folder, 'settings.json')
if os.path.exists(settingsPath):
with open(settingsPath) as file:
settingsJson = json.loads(file.read())
autoCheckUpdates = settingsJson.get('autoCheckUpdates', False)
if not autoCheckUpdates:
return
checkForUpdates(silent=True)
hdefereval.executeDeferred(checkUpdates)
hdefereval.executeDeferred(addShelf)
hdefereval.executeDeferred(applyDefaultShapesAndColors)
| 322
| 0
| 23
|
1412e20f7e6942a0802707fbdf32833a07c7b7bc
| 477
|
py
|
Python
|
main/main.py
|
WonderSeven/DSDA
|
88266ea5dd53d918ba3cd74c7d6bbf431a134e95
|
[
"MIT"
] | 29
|
2020-04-15T09:24:56.000Z
|
2021-09-18T04:04:55.000Z
|
main/main.py
|
WonderSeven/DSDA
|
88266ea5dd53d918ba3cd74c7d6bbf431a134e95
|
[
"MIT"
] | null | null | null |
main/main.py
|
WonderSeven/DSDA
|
88266ea5dd53d918ba3cd74c7d6bbf431a134e95
|
[
"MIT"
] | 5
|
2020-04-14T05:49:16.000Z
|
2021-05-16T05:04:12.000Z
|
'''
@ Author: Tiexin
@ email: tiexinqin@163.com
@Data: 2019-8-14
'''
from engine.configs.parser import BaseOptions
# import engine.fsl_trainer as trainer
import engine.ssl_trainer as trainer
import sys
sys.dont_write_bytecode = True
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
if __name__ == '__main__':
# Load experiment setting
opts = BaseOptions().opts
trainer = trainer.Trainer(opts)
trainer.train()
| 17.666667
| 45
| 0.719078
|
'''
@ Author: Tiexin
@ email: tiexinqin@163.com
@Data: 2019-8-14
'''
from engine.configs.parser import BaseOptions
# import engine.fsl_trainer as trainer
import engine.ssl_trainer as trainer
import sys
sys.dont_write_bytecode = True
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
if __name__ == '__main__':
# Load experiment setting
opts = BaseOptions().opts
trainer = trainer.Trainer(opts)
trainer.train()
| 0
| 0
| 0
|
86ccc60f32ffaaf7bbedc36cb5aaff8ddc66686a
| 284
|
py
|
Python
|
q057.py
|
sjf/project_euler
|
8514710e2018136ba8a087ae58cba35370700f6f
|
[
"MIT"
] | null | null | null |
q057.py
|
sjf/project_euler
|
8514710e2018136ba8a087ae58cba35370700f6f
|
[
"MIT"
] | null | null | null |
q057.py
|
sjf/project_euler
|
8514710e2018136ba8a087ae58cba35370700f6f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import lib
n=1
d=1
i=0
N=1000
count = 0
while i < N+1:
#print(i,str(n)+'/'+str(d),n/d)
if (lib.num_digits(n) >lib.num_digits(d)):
count += 1
#term' = 1 + 1/(1+term)
n+= d #1 + term
n,d=d,n #1/(1+term)
n+= d #1+1/(1+term)
i+=1
print(count)
| 14.2
| 44
| 0.53169
|
#!/usr/bin/env python3
import lib
n=1
d=1
i=0
N=1000
count = 0
while i < N+1:
#print(i,str(n)+'/'+str(d),n/d)
if (lib.num_digits(n) >lib.num_digits(d)):
count += 1
#term' = 1 + 1/(1+term)
n+= d #1 + term
n,d=d,n #1/(1+term)
n+= d #1+1/(1+term)
i+=1
print(count)
| 0
| 0
| 0
|
e9e9d7ae49fb0318befe83eb1b65309eb9166fad
| 7,052
|
py
|
Python
|
Final.py
|
art-hack/Udemy_Coupon_Scraper
|
19e5c1f3b1580524d7eef1d14fc0dfeb34a6bcb5
|
[
"MIT"
] | 4
|
2019-03-04T21:32:06.000Z
|
2020-05-23T16:43:55.000Z
|
Final.py
|
art-hack/Udemy_Coupon_Scraper
|
19e5c1f3b1580524d7eef1d14fc0dfeb34a6bcb5
|
[
"MIT"
] | 1
|
2019-03-04T21:34:52.000Z
|
2019-04-19T14:58:45.000Z
|
Final.py
|
art-hack/Udemy_Coupon_Scraper
|
19e5c1f3b1580524d7eef1d14fc0dfeb34a6bcb5
|
[
"MIT"
] | 2
|
2019-04-20T10:39:34.000Z
|
2020-11-24T19:45:32.000Z
|
import csv
from bs4 import BeautifulSoup
import requests
# function to scrape smartybro
# Code to scrape Anycouponcode.com
# function to scrape BuzzUdemy.com
# function to scrape Comidoc.com
# function to scrape coupontry.com
# function to scrape udemycoupon.learnviral
# function to scrape Udemycoupon.club
# function that sorts the function to be used
# Main driver Program
listFile2 = open('output.csv', 'w')
listFile2.close()
with open('input.txt') as openfileobject:
for line in openfileobject:
page_link = line
checker(page_link)
| 32.953271
| 115
| 0.595576
|
import csv
from bs4 import BeautifulSoup
import requests
# function to scrape smartybro
def smartybro(string):
page_response = requests.get(string, timeout=15)
if page_response.status_code == 200:
page_content = BeautifulSoup(page_response.content, "html.parser")
header = page_content.find_all('h3', class_="sing-tit")
linker = page_content.find_all('a', attrs={'class': 'fasc-button fasc-size-xlarge fasc-type-flat'})
for row in header:
header = row.text
# prices_clean = prices["href"]
for a in linker:
linker = a['href']
# prices= prices.url
u1 = requests.get(linker, timeout=5)
# linker = u1.url
# with open('output.csv', 'wb') as file:
# for line1 in header:
# file.write(line1)
# file.write(',')
# for line2 in linker:
# file.write(line2)
# file.write('\n')
# RESULT = [header, linker]
header = str(header).encode('utf-8')
linker = str(linker).encode('utf-8')
listFile2 = open('output.csv', 'a')
writer2 = csv.writer(listFile2)
writer2.writerow([header] + [linker] + [u1.url])
# print u1.url
print header
print u1.url
else:
print "Error Fetching the Data"
# Code to scrape Anycouponcode.com
def anycode(check):
page_response = requests.get(check, timeout=15)
if page_response.status_code == 200:
page_content = BeautifulSoup(page_response.content, "html.parser")
header = page_content.find('h2', class_="alt")
linker = page_content.find_all('a', attrs={'target': '_blank', 'rel': 'noopener'})
header = header.text
header = header.encode('ascii', 'ignore')
for a in linker:
linker = a['href']
# u1 = requests.get(linker, timeout=5)
header = str(header).encode('utf-8')
linker = str(linker).encode('utf-8')
listFile2 = open('output.csv', 'a')
writer2 = csv.writer(listFile2)
writer2.writerow([header] + [linker])
print header
print linker
else:
print "Error Fetching the Data"
# function to scrape BuzzUdemy.com
def bu(check):
page_response = requests.get(check, timeout=5)
if page_response.status_code == 200:
page_content = BeautifulSoup(page_response.content, "html.parser")
header = page_content.find_all('h2', class_="title front-view-title")
linker = page_content.find_all('a', class_="deal-button show-coupon-button activate-button activate-modal")
for row in header:
header = row.text
for a in linker:
linker = a['href']
header = str(header).encode('utf-8')
linker = str(linker).encode('utf-8')
listFile2 = open('output.csv', 'a')
writer2 = csv.writer(listFile2)
writer2.writerow([header] + [linker])
print header
print linker
else:
print "Error Fetching the Data"
# function to scrape Comidoc.com
def comidoc(check):
page_response = requests.get(check, timeout=5)
if page_response.status_code == 200:
page_content = BeautifulSoup(page_response.content, "html.parser")
header = page_content.find_all('h1', class_="header-post-title-class")
linker = page_content.find_all('a', class_="maxbutton-3 maxbutton maxbutton-enroll-lt")
for row in header:
header = row.text
for a in linker:
linker = a['href']
listFile2 = open('output.csv', 'a')
writer2 = csv.writer(listFile2)
writer2.writerow([header] + [linker])
print header
print linker
else:
print 'Error Fetching the Data'
# function to scrape coupontry.com
def coupontry(check):
page_response = requests.get(check, timeout=5)
if page_response.status_code == 200:
page_content = BeautifulSoup(page_response.content, "html.parser")
header = page_content.find_all('h1', class_="entry-title")
linker = page_content.find_all('a', attrs={'title': 'Click to open site'})
for row in header:
header = row.text
for a in linker:
linker = a['href']
listFile2 = open('output.csv', 'a')
writer2 = csv.writer(listFile2)
writer2.writerow([header] + [linker])
print header
print linker
else:
print "Error Fetching the Data"
# function to scrape udemycoupon.learnviral
def learnviral(check):
page_response = requests.get(check, timeout=5)
if page_response.status_code == 200:
page_content = BeautifulSoup(page_response.content, "html.parser")
header = page_content.find_all('h1', class_='entry-title')
linker = page_content.find_all('a', attrs={'title': 'Click to open site'})
for row in header:
header = row.text
for a in linker:
linker = a['href']
listFile2 = open('output.csv', 'a')
writer2 = csv.writer(listFile2)
writer2.writerow([header] + [linker])
print header
print linker
else:
print "Error Fetching the Data"
# function to scrape Udemycoupon.club
def ucc(check):
page_response = requests.get(check, timeout=5)
if page_response.status_code == 200:
page_content = BeautifulSoup(page_response.content, "html.parser")
header = page_content.find_all('h1', class_="post-title entry-title")
linker = page_content.find_all('blockquote')
linker = linker[0].find('a')
for row in header:
header = row.text
for a in linker:
linker = a
header = header[1:-1]
listFile2 = open('output.csv', 'a')
writer2 = csv.writer(listFile2)
writer2.writerow([header] + [linker])
print header
print linker
else:
print "Error Fetching the Data"
# function that sorts the function to be used
def checker(check):
if 'smartybro' in check:
try:
smartybro(check)
except Exception:
pass
elif 'anycouponcode' in check:
try:
anycode(check)
except Exception:
pass
elif 'buzzudemy' in check:
try:
bu(check)
except Exception:
pass
elif 'comidoc' in check:
try:
comidoc(check)
except Exception:
pass
elif 'coupontry' in check:
try:
coupontry(check)
except Exception:
pass
elif 'udemycoupon.learnviral' in check:
try:
learnviral(check)
except Exception:
pass
elif 'udemycoupon.club' in check:
try:
ucc(check)
except Exception:
pass
# Main driver Program
listFile2 = open('output.csv', 'w')
listFile2.close()
with open('input.txt') as openfileobject:
for line in openfileobject:
page_link = line
checker(page_link)
| 6,302
| 0
| 176
|
8869c65f358751230b27f2c8d14edbe6dee9aa3f
| 780
|
py
|
Python
|
app/hid/write.py
|
liuliu/tinypilot
|
af57e88303c4b9c8fbec0ff0102891829bbd98f1
|
[
"MIT"
] | null | null | null |
app/hid/write.py
|
liuliu/tinypilot
|
af57e88303c4b9c8fbec0ff0102891829bbd98f1
|
[
"MIT"
] | null | null | null |
app/hid/write.py
|
liuliu/tinypilot
|
af57e88303c4b9c8fbec0ff0102891829bbd98f1
|
[
"MIT"
] | null | null | null |
import threading
| 28.888889
| 79
| 0.683333
|
import threading
class Error(Exception):
pass
class WriteError(Error):
pass
def _write_to_hid_interface_immediately(hid_path, buffer):
with open(hid_path, 'wb+') as hid_handle:
hid_handle.write(bytearray(buffer))
def write_to_hid_interface(hid_path, buffer):
# Writes can time out, so attempt the write in a separate thread to avoid
# hanging.
write_thread = threading.Thread(target=_write_to_hid_interface_immediately,
args=(hid_path, buffer))
write_thread.start()
write_thread.join(timeout=0.5)
if write_thread.is_alive():
# If the thread is still alive, it means the join timed out.
raise WriteError(
'Failed to write to HID interface. Is USB cable connected?')
| 645
| 23
| 92
|
2e2ca5beb340b335f03d2f5021040e4b210ee236
| 1,068
|
py
|
Python
|
radar_class/network.py
|
dishierweidu/LCR_1.0_Reappear
|
329d4c80291c58d05fe3d6dab6dd09f41967ad08
|
[
"MIT"
] | null | null | null |
radar_class/network.py
|
dishierweidu/LCR_1.0_Reappear
|
329d4c80291c58d05fe3d6dab6dd09f41967ad08
|
[
"MIT"
] | null | null | null |
radar_class/network.py
|
dishierweidu/LCR_1.0_Reappear
|
329d4c80291c58d05fe3d6dab6dd09f41967ad08
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
default network class
给神经网络类的接口格式定义,神经网络具体需要自行添加
'''
import pickle as pkl
| 28.864865
| 131
| 0.650749
|
# -*- coding: utf-8 -*-
'''
default network class
给神经网络类的接口格式定义,神经网络具体需要自行添加
'''
import pickle as pkl
class Predictor(object):
def __init__(self,weights = ""):
'''
对于示例类,不会提供神经网络预测功能,但对于我们提供的demo,可以加载pkl来获得实际的预测结果
:param weights:pkl文件的存放地址
'''
self._weights = weights
with open(self._weights,'rb') as net:
self._predicted_data = pkl.load(net)
def infer(self,imgs,id):
'''
这个函数用来预测
:param imgs:list of input images
:return:
img_preds: 车辆预测框,a list of the prediction to each image 各元素格式为(predicted_class,conf_score,bounding box(format:x0,y0,x1,y1))
car_locations: 对于每张图片装甲板预测框(车辆定位) np.ndarray 和对应的车辆预测框(与装甲板预测框的车辆预测框序号对应)的列表
上述两个成员具体定义为:
(1)装甲板预测框格式,(N,装甲板四点+装甲板网络置信度+装甲板类型+其对应的车辆预测框序号(即其为哪个车辆预测框ROI区域预测生成的)+四点的bounding box)
其他敌方提到该格式,会写为(N,fp+conf+cls+img_no+bbox)
(2)车辆预测框格式 np.ndarray (N,x0+y0+x1+y1)
'''
img_preds,car_locations = self._predicted_data[id]
return img_preds,car_locations
| 0
| 1,368
| 23
|
070d03a595640cd657ae34d4b8d5115573a0f490
| 1,156
|
py
|
Python
|
Chapter03/process_data.py
|
PacktPublishing/Practical-Data-Wrangling
|
a24caa61a2d5513947d79d78154699901ea75c3a
|
[
"MIT"
] | 12
|
2017-11-18T19:08:29.000Z
|
2022-01-30T12:42:43.000Z
|
Chapter03/process_data.py
|
PacktPublishing/Practical-Data-Wrangling
|
a24caa61a2d5513947d79d78154699901ea75c3a
|
[
"MIT"
] | null | null | null |
Chapter03/process_data.py
|
PacktPublishing/Practical-Data-Wrangling
|
a24caa61a2d5513947d79d78154699901ea75c3a
|
[
"MIT"
] | 10
|
2018-01-10T09:33:39.000Z
|
2022-03-01T23:30:33.000Z
|
import json
import pprint
######### OPEN AND READ THE DATA FILE ###########
inFile = open("data/scf_data.json","r")
scf_data = json.load(inFile)
# print(scf_data)
inFile.close()
############ DATA EXPLORATION #############
# dataType = str(type(scf_data))
# print("type of data: " + dataType)
# print("dictionary keys: " + str(scf_data.keys()))
# issues_data_type = str(type(scf_data["issues"]))
# print("data type of the 'issues' value: " + issues_data_type )
# print("first element of 'issues' list:")
# print(scf_data["issues"][0])
## print data variables
# pp = pprint.PrettyPrinter(indent=4)
# print("first data entry:")
# pp.pprint(scf_data["issues"][0])
############ DATA MODIFICATION #############
new_scf_data = []
variables = ["address","created_at","summary","description","lng","lat","rating"]
for old_entry in scf_data["issues"]:
new_entry={}
for variable in variables:
new_entry[variable] = old_entry[variable]
# print(new_entry)
new_scf_data.append(new_entry)
### OUTPUTTING THE NEW DATA TO A NEW FILE ###
outfile = open("data/scf_output_data.json","w")
json.dump(new_scf_data, outfile, indent=4)
outfile.close()
| 30.421053
| 81
| 0.657439
|
import json
import pprint
######### OPEN AND READ THE DATA FILE ###########
inFile = open("data/scf_data.json","r")
scf_data = json.load(inFile)
# print(scf_data)
inFile.close()
############ DATA EXPLORATION #############
# dataType = str(type(scf_data))
# print("type of data: " + dataType)
# print("dictionary keys: " + str(scf_data.keys()))
# issues_data_type = str(type(scf_data["issues"]))
# print("data type of the 'issues' value: " + issues_data_type )
# print("first element of 'issues' list:")
# print(scf_data["issues"][0])
## print data variables
# pp = pprint.PrettyPrinter(indent=4)
# print("first data entry:")
# pp.pprint(scf_data["issues"][0])
############ DATA MODIFICATION #############
new_scf_data = []
variables = ["address","created_at","summary","description","lng","lat","rating"]
for old_entry in scf_data["issues"]:
new_entry={}
for variable in variables:
new_entry[variable] = old_entry[variable]
# print(new_entry)
new_scf_data.append(new_entry)
### OUTPUTTING THE NEW DATA TO A NEW FILE ###
outfile = open("data/scf_output_data.json","w")
json.dump(new_scf_data, outfile, indent=4)
outfile.close()
| 0
| 0
| 0
|
07fe7f1caba5fdc28bcf5bfffb6d3bfc5316ec2e
| 535
|
py
|
Python
|
resources/migrations/0031_can_approve_reservation_permission.py
|
suutari-ai/respa
|
a944b1c13f855eaf5f883687b5fd025ece7c8176
|
[
"MIT"
] | 1
|
2018-11-13T06:03:27.000Z
|
2018-11-13T06:03:27.000Z
|
resources/migrations/0031_can_approve_reservation_permission.py
|
suutari-ai/respa
|
a944b1c13f855eaf5f883687b5fd025ece7c8176
|
[
"MIT"
] | 10
|
2018-11-21T14:37:17.000Z
|
2021-02-02T09:19:59.000Z
|
resources/migrations/0031_can_approve_reservation_permission.py
|
suutari-ai/respa
|
a944b1c13f855eaf5f883687b5fd025ece7c8176
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-24 11:51
from __future__ import unicode_literals
from django.db import migrations, models
| 26.75
| 151
| 0.650467
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-24 11:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0030_add_reservation_extra_fields'),
]
operations = [
migrations.AlterModelOptions(
name='unit',
options={'permissions': (('can_approve_reservation', 'Can approve reservation'),), 'verbose_name': 'unit', 'verbose_name_plural': 'units'},
),
]
| 0
| 357
| 23
|
67310797224dc8686504881cd9800c05e64aaada
| 5,434
|
py
|
Python
|
tests/libs/io_peripherals/test_io.py
|
izm51/obniz-python-sdk
|
40a738b5fe2c0a415cdc09f46d28c143982bfb07
|
[
"MIT"
] | 11
|
2019-03-22T12:02:11.000Z
|
2021-01-21T04:57:18.000Z
|
tests/libs/io_peripherals/test_io.py
|
izm51/obniz-python-sdk
|
40a738b5fe2c0a415cdc09f46d28c143982bfb07
|
[
"MIT"
] | 5
|
2019-03-02T08:28:25.000Z
|
2021-02-02T22:06:37.000Z
|
tests/libs/io_peripherals/test_io.py
|
izm51/obniz-python-sdk
|
40a738b5fe2c0a415cdc09f46d28c143982bfb07
|
[
"MIT"
] | 3
|
2019-07-20T06:55:09.000Z
|
2019-12-04T05:05:00.000Z
|
from time import sleep
import pytest
from ...utils import assert_finished, assert_obniz, assert_send, receive_json
| 31.051429
| 88
| 0.505889
|
from time import sleep
import pytest
from ...utils import assert_finished, assert_obniz, assert_send, receive_json
class TestPeripheralIO:
@pytest.mark.parametrize("input,expected", [(True, True), (1, True), (0, False)])
def test_output(self, obniz, input, expected):
obniz.io0.output(input)
assert_obniz(obniz)
assert_send(obniz, [{"io0": expected}])
assert_finished(obniz)
def test_output_over_pin(self, obniz):
with pytest.raises(
AttributeError, match=r"'Obniz' object has no attribute 'io20'"
):
obniz.io20.output(True)
assert_finished(obniz)
@pytest.mark.parametrize(
"input,expected",
[("5v", "push-pull5v"), ("3v", "push-pull3v"), ("open-drain", "open-drain")],
)
def test_drive(self, obniz, input, expected):
obniz.io1.drive(input)
assert_obniz(obniz)
assert_send(obniz, [{"io1": {"output_type": expected}}])
assert_finished(obniz)
@pytest.mark.parametrize(
"input,error_message",
[
(None, "please specify drive methods in string"),
("3.3v", "unknown drive method"),
],
)
def test_drive_error(self, obniz, input, error_message):
with pytest.raises(Exception, match=error_message):
obniz.io1.drive(input)
assert_finished(obniz)
@pytest.mark.parametrize(
"input,expected",
[
("5v", "pull-up5v"),
("3v", "pull-up3v"),
("0v", "pull-down"),
(None, "float"),
],
)
def test_pull(self, obniz, input, expected):
obniz.io3.pull(input)
assert_obniz(obniz)
assert_send(obniz, [{"io3": {"pull_type": expected}}])
assert_finished(obniz)
def test_input(self, mocker, obniz):
stub = mocker.stub()
obniz.io7.input(stub)
assert_obniz(obniz)
assert_send(obniz, [{"io7": {"direction": "input", "stream": True}}])
receive_json(obniz, [{"io7": True}])
assert stub.call_count == 1
assert stub.call_args[0][0] is True
receive_json(obniz, [{"io7": False}])
assert stub.call_count == 2
assert stub.call_args[0][0] is False
assert_finished(obniz)
def test_input_wait_true(self, obniz):
# def callback(result):
# assert result is True
obniz.io8.input_wait()
assert_obniz(obniz)
assert_send(obniz, [{"io8": {"direction": "input", "stream": False}}])
assert_finished(obniz)
sleep(0.01)
receive_json(obniz, [{"io8": True}])
def test_end(self, obniz):
obniz.io0.end()
assert_obniz(obniz)
assert_send(obniz, [{"io0": None}])
assert_finished(obniz)
# TODO: 怪しい
def test_input_wait_false(self, obniz):
# def callback(result):
# pass
obniz.io9.input_wait()
assert_obniz(obniz)
assert_send(obniz, [{"io9": {"direction": "input", "stream": False}}])
assert_finished(obniz)
sleep(0.01)
receive_json(obniz, [{"io10": True}])
def test_io_animation(self, obniz):
def state1(index):
# index = 0
obniz.io0.output(False)
obniz.io1.output(True)
def state2(index):
# index = 1
obniz.io0.output(True)
obniz.io1.output(False)
obniz.io.animation(
"animation-1",
"loop",
[{"duration": 10, "state": state1}, {"duration": 10, "state": state2}],
)
assert_obniz(obniz)
assert_send(
obniz,
[
{
"io": {
"animation": {
"name": "animation-1",
"status": "loop",
"states": [
{
"duration": 10,
"state": [{"io0": False}, {"io1": True}],
},
{
"duration": 10,
"state": [{"io0": True}, {"io1": False}],
},
],
}
}
}
],
)
assert_finished(obniz)
def test_io_animation_pause(self, obniz):
obniz.io.animation("animation-1", "pause")
assert_send(
obniz, [{"io": {"animation": {"name": "animation-1", "status": "pause"}}}]
)
def test_io_animation_pause2(self, obniz):
obniz.io.animation("anim", "pause")
assert_send(obniz, [{"io": {"animation": {"name": "anim", "status": "pause"}}}])
def test_io_animation_resume(self, obniz):
obniz.io.animation("a", "resume")
assert_send(obniz, [{"io": {"animation": {"name": "a", "status": "resume"}}}])
assert_finished(obniz)
def test_input_simple(self, obniz):
obniz.send({"io1": "get"})
assert_send(obniz, [{"io1": "get"}])
assert_finished(obniz)
def test_output_detail(self, obniz):
obniz.send({"io0": {"direction": "output", "value": True}})
assert_send(obniz, [{"io0": {"direction": "output", "value": True}}])
assert_finished(obniz)
| 4,230
| 1,069
| 23
|
0a97ce568723f7a1e0016b1d3dbd189335dcdffa
| 1,535
|
py
|
Python
|
plgx-esp-ui/polylogyx/wrappers/v1/host_wrappers.py
|
eclecticiq/eiq-er-ce
|
ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94
|
[
"MIT"
] | null | null | null |
plgx-esp-ui/polylogyx/wrappers/v1/host_wrappers.py
|
eclecticiq/eiq-er-ce
|
ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94
|
[
"MIT"
] | null | null | null |
plgx-esp-ui/polylogyx/wrappers/v1/host_wrappers.py
|
eclecticiq/eiq-er-ce
|
ebb12d5c4e0ee144f8166576924b8ce8dc5dfc94
|
[
"MIT"
] | 2
|
2021-11-12T10:25:02.000Z
|
2022-03-30T06:33:52.000Z
|
from flask_restplus import fields
from polylogyx.blueprints.v1.external_api import api
# Node Wrappers
node_info_wrapper = api.model('node_info_wrapper', {
'computer_name': fields.String(),
'hardware_model': fields.String(),
'hardware_serial': fields.String(),
'hardware_vendor': fields.String(),
'physical_memory': fields.String(),
'cpu_physical_cores': fields.String()
})
nodewrapper = api.model('nodewrapper', {
'id':fields.Integer(),
'host_identifier': fields.String(),
'node_key': fields.String(),
'last_ip': fields.String(),
'platform': fields.String(),
'os_info': fields.Raw(),
'node_info': fields.Nested(node_info_wrapper, default=None),
'network_info': fields.Raw(),
'host_details': fields.Raw(),
'last_checkin': fields.DateTime(default = None),
'enrolled_on': fields.DateTime(default = None),
'last_status': fields.DateTime(default = None),
'last_result': fields.DateTime(default = None),
'last_config': fields.DateTime(default = None),
'last_query_read': fields.DateTime(default = None),
'last_query_write': fields.DateTime(default = None),
})
node_tag_wrapper = api.model('node_tag_wrapper', {
'host_identifier': fields.String(),
'node_key': fields.String()
})
node_status_log_wrapper = api.model('node_status_log_wrapper', {
'line': fields.Integer(),
'message': fields.String(),
'severity': fields.Integer(),
'filename': fields.String(),
'created': fields.DateTime(),
'version': fields.String(),
})
| 34.111111
| 64
| 0.684039
|
from flask_restplus import fields
from polylogyx.blueprints.v1.external_api import api
# Node Wrappers
node_info_wrapper = api.model('node_info_wrapper', {
'computer_name': fields.String(),
'hardware_model': fields.String(),
'hardware_serial': fields.String(),
'hardware_vendor': fields.String(),
'physical_memory': fields.String(),
'cpu_physical_cores': fields.String()
})
nodewrapper = api.model('nodewrapper', {
'id':fields.Integer(),
'host_identifier': fields.String(),
'node_key': fields.String(),
'last_ip': fields.String(),
'platform': fields.String(),
'os_info': fields.Raw(),
'node_info': fields.Nested(node_info_wrapper, default=None),
'network_info': fields.Raw(),
'host_details': fields.Raw(),
'last_checkin': fields.DateTime(default = None),
'enrolled_on': fields.DateTime(default = None),
'last_status': fields.DateTime(default = None),
'last_result': fields.DateTime(default = None),
'last_config': fields.DateTime(default = None),
'last_query_read': fields.DateTime(default = None),
'last_query_write': fields.DateTime(default = None),
})
node_tag_wrapper = api.model('node_tag_wrapper', {
'host_identifier': fields.String(),
'node_key': fields.String()
})
node_status_log_wrapper = api.model('node_status_log_wrapper', {
'line': fields.Integer(),
'message': fields.String(),
'severity': fields.Integer(),
'filename': fields.String(),
'created': fields.DateTime(),
'version': fields.String(),
})
| 0
| 0
| 0
|
bfae2fc9aaa5732f8611fcd1cb8855def7a2b193
| 317
|
py
|
Python
|
Taller_control_repeticion/Ejercicio_07.py
|
willingtonino/Algoritmos_programacion_C4G2
|
2a2c94678ae981974539a8019f17108775521e23
|
[
"MIT"
] | null | null | null |
Taller_control_repeticion/Ejercicio_07.py
|
willingtonino/Algoritmos_programacion_C4G2
|
2a2c94678ae981974539a8019f17108775521e23
|
[
"MIT"
] | null | null | null |
Taller_control_repeticion/Ejercicio_07.py
|
willingtonino/Algoritmos_programacion_C4G2
|
2a2c94678ae981974539a8019f17108775521e23
|
[
"MIT"
] | 1
|
2021-10-31T22:54:45.000Z
|
2021-10-31T22:54:45.000Z
|
"""
Entradas
(X,M)-->int-->valores
Salida
Nueva experiencia Monster-->int-->E
"""
#Caja negra
while True:
#Entrada
valores=input("")
(X,M)=valores.split(" ")
X=int(X)
M=int(M)
#Caja negra
if (X==0) and M==0:
break
else:
E=X*M
#Salida
print(E)
| 15.85
| 36
| 0.492114
|
"""
Entradas
(X,M)-->int-->valores
Salida
Nueva experiencia Monster-->int-->E
"""
#Caja negra
while True:
#Entrada
valores=input("")
(X,M)=valores.split(" ")
X=int(X)
M=int(M)
#Caja negra
if (X==0) and M==0:
break
else:
E=X*M
#Salida
print(E)
| 0
| 0
| 0
|
abbe5fe994b9cb79caff3d7066a3820c28428e35
| 1,388
|
py
|
Python
|
print_nodes.py
|
halfak/wikitax
|
acb084dc4f991d95dc08fdead19b50987ba968f4
|
[
"MIT"
] | 5
|
2019-12-09T21:46:27.000Z
|
2020-06-11T20:37:26.000Z
|
print_nodes.py
|
halfak/wikitax
|
acb084dc4f991d95dc08fdead19b50987ba968f4
|
[
"MIT"
] | 1
|
2019-12-12T21:59:15.000Z
|
2019-12-12T21:59:15.000Z
|
print_nodes.py
|
wikimedia/wikitax
|
acb084dc4f991d95dc08fdead19b50987ba968f4
|
[
"MIT"
] | null | null | null |
"""
Print out the nodes of a taxonomy
Usage:
print_nodes (-h | help)
print_nodes <taxon>... [--debug]
Options:
-h --help Prints this documentation
<taxon> A yaml file containing partial or whole taxonomy. Multiple
files will be merged.
-d --debug Print log information while running
"""
import logging
import sys
import docopt
import yamlconf
ENWIKI_HOST = 'https://en.wikipedia.org'
logger = logging.getLogger(__name__)
if __name__ == "__main__":
sys.exit(main())
| 24.785714
| 77
| 0.643372
|
"""
Print out the nodes of a taxonomy
Usage:
print_nodes (-h | help)
print_nodes <taxon>... [--debug]
Options:
-h --help Prints this documentation
<taxon> A yaml file containing partial or whole taxonomy. Multiple
files will be merged.
-d --debug Print log information while running
"""
import logging
import sys
import docopt
import yamlconf
ENWIKI_HOST = 'https://en.wikipedia.org'
logger = logging.getLogger(__name__)
def main():
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)
taxon_paths = args['<taxon>']
logger.info("Loading taxon from {0}".format(taxon_paths))
taxonomy = yamlconf.load(*(open(p) for p in taxon_paths))
return print_nodes(taxonomy)
def print_nodes(taxonomy):
for line in format_node_lines(taxonomy):
print(line)
def format_node_lines(taxonomy, depth=0):
for key in sorted(taxonomy.keys()):
value = taxonomy[key]
yield (" " * depth) + " - " + str(key)
if isinstance(value, list):
pass
else:
yield from format_node_lines(value, depth+1)
if __name__ == "__main__":
sys.exit(main())
| 791
| 0
| 69
|
223707c4094d91f8896a9078b5135ec648a3dfab
| 7,166
|
py
|
Python
|
django_project/app/machine_learning/main.py
|
ryoma-jp/AI_Dashboard
|
840c6ea9ee1ec82e46c2d6470643031c79aaa1d4
|
[
"MIT"
] | null | null | null |
django_project/app/machine_learning/main.py
|
ryoma-jp/AI_Dashboard
|
840c6ea9ee1ec82e46c2d6470643031c79aaa1d4
|
[
"MIT"
] | null | null | null |
django_project/app/machine_learning/main.py
|
ryoma-jp/AI_Dashboard
|
840c6ea9ee1ec82e46c2d6470643031c79aaa1d4
|
[
"MIT"
] | null | null | null |
#! -*- coding: utf-8 -*-
'''DeepLearning学習処理の実装サンプル
引数に指定する設定ファイルで指定されたパラメータに従い,DeepLearningモデルの学習を実行する実装サンプル.
設定ファイルで指定するパラメータ:
* env: 環境設定
* fifo: 学習制御用のFIFOパス
* result_dir: 結果を格納するディレクトリ
* dataset: データセット関連の設定
* dataset_name: データセット名(Preset: MNIST, CIFAR-10)
* dataset_dir: データセットを格納したディレクトリ
* norm: 正規化方式(max, max-min, z-score)
* data_augmentation: DataAugmentation関連の設定
* rotation_range: 画像の回転[deg]
* width_shift_range: 水平方向の画像幅に対するシフト率[0.0-1.0]
* height_shift_range: 垂直方向の画像高さに対するシフト率[0.0-1.0]
* zoom_range: 拡大率[%]
* channel_shift_range: チャネル(RGB)のシフト率[0.0-1.0]
* horizontal_flip: 水平方向反転有無(True or False)
* model: 学習するモデル関連の設定
* model_type: モデル種別(MLP, SimpleCNN, DeepCNN, SimpleResNet, DeepResNet)
* training_parameter: ハイパーパラメータ
* optimizer: 最適化方式(momentum, adam, sgd, adam_lrs, sgd, lrs)
* batch_size: バッチサイズ
* epochs: EPOCH数
* initializer: 重みの初期化アルゴリズム
glrot_uniform: Xavierの一様分布
glrot_normal: Xavierの正規分布
he_uniform: Heの一様分布
he_normal: Heの正規分布
* droptout_rate: ドロップアウトによる欠落率[0.0-1.0]
* loss_func: 損失関数(tf.keras.lossesのメンバを指定)
'''
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import json
import argparse
import numpy as np
import pandas as pd
import pickle
from machine_learning.lib.data_loader.data_loader import DataLoaderMNIST
from machine_learning.lib.data_loader.data_loader import DataLoaderCIFAR10
from machine_learning.lib.trainer.trainer import TrainerMLP, TrainerCNN, TrainerResNet
#---------------------------------
# 定数定義
#---------------------------------
#---------------------------------
# 関数
#---------------------------------
#---------------------------------
# メイン処理
#---------------------------------
if __name__ == '__main__':
main()
| 33.643192
| 106
| 0.695646
|
#! -*- coding: utf-8 -*-
'''DeepLearning学習処理の実装サンプル
引数に指定する設定ファイルで指定されたパラメータに従い,DeepLearningモデルの学習を実行する実装サンプル.
設定ファイルで指定するパラメータ:
* env: 環境設定
* fifo: 学習制御用のFIFOパス
* result_dir: 結果を格納するディレクトリ
* dataset: データセット関連の設定
* dataset_name: データセット名(Preset: MNIST, CIFAR-10)
* dataset_dir: データセットを格納したディレクトリ
* norm: 正規化方式(max, max-min, z-score)
* data_augmentation: DataAugmentation関連の設定
* rotation_range: 画像の回転[deg]
* width_shift_range: 水平方向の画像幅に対するシフト率[0.0-1.0]
* height_shift_range: 垂直方向の画像高さに対するシフト率[0.0-1.0]
* zoom_range: 拡大率[%]
* channel_shift_range: チャネル(RGB)のシフト率[0.0-1.0]
* horizontal_flip: 水平方向反転有無(True or False)
* model: 学習するモデル関連の設定
* model_type: モデル種別(MLP, SimpleCNN, DeepCNN, SimpleResNet, DeepResNet)
* training_parameter: ハイパーパラメータ
* optimizer: 最適化方式(momentum, adam, sgd, adam_lrs, sgd, lrs)
* batch_size: バッチサイズ
* epochs: EPOCH数
* initializer: 重みの初期化アルゴリズム
glrot_uniform: Xavierの一様分布
glrot_normal: Xavierの正規分布
he_uniform: Heの一様分布
he_normal: Heの正規分布
* droptout_rate: ドロップアウトによる欠落率[0.0-1.0]
* loss_func: 損失関数(tf.keras.lossesのメンバを指定)
'''
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import json
import argparse
import numpy as np
import pandas as pd
import pickle
from machine_learning.lib.data_loader.data_loader import DataLoaderMNIST
from machine_learning.lib.data_loader.data_loader import DataLoaderCIFAR10
from machine_learning.lib.trainer.trainer import TrainerMLP, TrainerCNN, TrainerResNet
#---------------------------------
# 定数定義
#---------------------------------
#---------------------------------
# 関数
#---------------------------------
def ArgParser():
parser = argparse.ArgumentParser(description='TensorFlowの学習実装サンプル',
formatter_class=argparse.RawTextHelpFormatter)
# --- 引数を追加 ---
parser.add_argument('--mode', dest='mode', type=str, default=None, required=True, \
help='機械学習の動作モードを選択("train", "predict")')
parser.add_argument('--config', dest='config', type=str, default=None, required=True, \
help='設定ファイル(*.json)')
args = parser.parse_args()
return args
def _predict_and_calc_accuracy(trainer, x, y=None):
predictions = trainer.predict(x)
print('\nPredictions(shape): {}'.format(predictions.shape))
if (y is not None):
predictions_idx = np.argmax(predictions, axis=1)
y_idx = np.argmax(y, axis=1)
print('n_data : {}'.format(len(predictions_idx)))
print('n_correct : {}'.format(len(predictions_idx[predictions_idx==y_idx])))
return predictions
def main():
# --- NumPy配列形状表示 ---
def print_ndarray_shape(ndarr):
if (ndarr is not None):
print(ndarr.shape)
else:
pass
return
# --- 引数処理 ---
args = ArgParser()
print('[INFO] Arguments')
print(' * args.mode = {}'.format(args.mode))
print(' * args.config = {}'.format(args.config))
# --- configファイルをロード ---
with open(args.config, 'r') as f:
config_data = json.load(f)
# --- 設定パラメータを取得 ---
web_app_ctrl_fifo = config_data['env']['web_app_ctrl_fifo']['value']
trainer_ctrl_fifo = config_data['env']['trainer_ctrl_fifo']['value']
result_dir = config_data['env']['result_dir']['value']
data_augmentation = {}
for (key, value) in config_data['dataset']['data_augmentation'].items():
data_augmentation[key] = value['value']
data_type = config_data['dataset']['dataset_name']['value']
dataset_dir = config_data['dataset']['dataset_dir']['value']
data_norm = config_data['dataset']['norm']['value']
model_type = config_data['model']['model_type']['value']
loss_func = config_data['training_parameter']['loss_func']['value']
optimizer = config_data['training_parameter']['optimizer']['value']
initializer = config_data['training_parameter']['initializer']['value']
dropout_rate = config_data['training_parameter']['dropout_rate']['value']
batch_size = config_data['training_parameter']['batch_size']['value']
epochs = config_data['training_parameter']['epochs']['value']
# --- データセット読み込み ---
with open(os.path.join(dataset_dir, 'dataset.pkl'), 'rb') as f:
dataset = pickle.load(f)
if (loss_func == "sparse_categorical_crossentropy"):
one_hot = False
else:
one_hot = True
dataset.convert_label_encoding(one_hot=one_hot)
print_ndarray_shape(dataset.train_images)
print_ndarray_shape(dataset.train_labels)
print_ndarray_shape(dataset.validation_images)
print_ndarray_shape(dataset.validation_labels)
print_ndarray_shape(dataset.test_images)
print_ndarray_shape(dataset.test_labels)
x_train, x_val, x_test = dataset.normalization(data_norm)
y_train = dataset.train_labels
y_val = dataset.validation_labels
y_test = dataset.test_labels
output_dims = dataset.output_dims
# --- モデル取得 ---
if (args.mode == 'predict'):
model_file = os.path.join(result_dir, 'models', 'hdf5', 'model.h5')
if (not os.path.exists(model_file)):
model_file = None
else:
model_file = None
if (model_type == 'MLP'):
trainer = TrainerMLP(dataset.train_images.shape[1:],
output_dir=result_dir, model_file=model_file,
optimizer=optimizer, initializer=initializer)
elif (model_type == 'SimpleCNN'):
trainer = TrainerCNN(dataset.train_images.shape[1:],
output_dir=result_dir, model_file=model_file,
optimizer=optimizer, loss=loss_func, initializer=initializer)
elif (model_type == 'DeepCNN'):
trainer = TrainerCNN(dataset.train_images.shape[1:],
output_dir=result_dir, model_file=model_file,
optimizer=optimizer, loss=loss_func, initializer=initializer, model_type='deep_model')
elif (model_type == 'SimpleResNet'):
trainer = TrainerResNet(dataset.train_images.shape[1:], output_dims,
output_dir=result_dir, model_file=model_file,
model_type='custom',
optimizer=optimizer, loss=loss_func, initializer=initializer, dropout_rate=dropout_rate)
elif (model_type == 'DeepResNet'):
trainer = TrainerResNet(dataset.train_images.shape[1:], output_dims,
output_dir=result_dir, model_file=model_file,
model_type='custom_deep',
optimizer=optimizer, loss=loss_func, initializer=initializer, dropout_rate=dropout_rate)
else:
print('[ERROR] Unknown model_type: {}'.format(model_type))
quit()
if (args.mode == 'train'):
# --- 学習 ---
trainer.fit(web_app_ctrl_fifo, trainer_ctrl_fifo,
x_train, y_train, x_val=x_val, y_val=y_val, x_test=x_test, y_test=y_test,
batch_size=batch_size, da_params=data_augmentation, epochs=epochs)
trainer.save_model()
predictions = _predict_and_calc_accuracy(trainer, x_test, y_test)
elif (args.mode == 'predict'):
predictions = _predict_and_calc_accuracy(trainer, x_test, y_test)
json_data = []
for i, (prediction, label) in enumerate(zip(np.argmax(predictions, axis=1), np.argmax(y_test, axis=1))):
json_data.append({
'id': int(i),
'prediction': int(prediction),
'label': int(label),
})
with open(os.path.join(result_dir, 'prediction.json'), 'w') as f:
json.dump(json_data, f, ensure_ascii=False, indent=4)
else:
print('[ERROR] Unknown mode: {}'.format(args.mode))
return
#---------------------------------
# メイン処理
#---------------------------------
if __name__ == '__main__':
main()
| 5,396
| 0
| 68
|
1765ce72907a6de52506d4d106fa42031b5e1b1b
| 11,580
|
py
|
Python
|
MLApplication.py
|
sakthianand7/Visualise-ML-Algorithms-results
|
6a2123c1387db0bf86bc2c9a715283383d47a6bc
|
[
"MIT"
] | null | null | null |
MLApplication.py
|
sakthianand7/Visualise-ML-Algorithms-results
|
6a2123c1387db0bf86bc2c9a715283383d47a6bc
|
[
"MIT"
] | null | null | null |
MLApplication.py
|
sakthianand7/Visualise-ML-Algorithms-results
|
6a2123c1387db0bf86bc2c9a715283383d47a6bc
|
[
"MIT"
] | null | null | null |
import streamlit as slt
from sklearn.svm import SVC,SVR
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import plot_confusion_matrix
from matplotlib.colors import ListedColormap
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics import precision_score, recall_score,mean_squared_error
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy.cluster.hierarchy as sch
if __name__ == '__main__':
main()
| 40.208333
| 123
| 0.69905
|
import streamlit as slt
from sklearn.svm import SVC,SVR
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import plot_confusion_matrix
from matplotlib.colors import ListedColormap
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics import precision_score, recall_score,mean_squared_error
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy.cluster.hierarchy as sch
def main():
#Home Screen Contents
slt.title('Visualize Classification, Regression and Clustering')
slt.subheader('Classifiers - Naive Bayes , Kernel SVM , Support Vector Machine')
slt.subheader('Regression - Linear Regression , Polynomial Regression , Random Forest')
slt.subheader('Clustering - K Means Clustering, Hierarchical Clustering')
slt.sidebar.title("SELECT YOUR ALGORITHM")
select=slt.sidebar.selectbox("Try Classification, Regression or Clustering",("Classification", "Regression","Clustering"))
if select=='Classification':
@slt.cache(persist=True)
def fetch_data():
data=pd.read_csv('Social_Network_Ads.csv')
x=data.iloc[:,[2,3]].values
y=data.iloc[:,-1].values
return x,y
@slt.cache(persist=True)
def split_data(x,y):
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0)
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.transform(x_test)
return x_train,x_test,y_train,y_test
def plot_values(listofmetrics):
if 'Confusion Matrix' in listofmetrics:
slt.subheader('Confusion Matrix')
plot_confusion_matrix(model,x_test,y_test,display_labels=class_names,cmap='viridis',)
slt.pyplot()
if 'Color Map' in listofmetrics:
slt.subheader("Color Map - Feature Scaling has been applied")
X_set, y_set = x_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, model.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('darkred', 'green'))(i), label = j)
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
slt.pyplot()
x,y=fetch_data()
class_names=['notpurchased','purchased']
x_train,x_test,y_train,y_test=split_data(x,y)
classifier = slt.sidebar.selectbox("Classifier", ("Kernel SVM","Naive Bayes","Support Vector Machine"))
if classifier == 'Support Vector Machine':
slt.sidebar.subheader("Model Hyperparameters")
metrics = slt.sidebar.multiselect("What metrics to plot?", ('Confusion Matrix','Color Map'))
if slt.sidebar.button("Classify", key='classify'):
slt.subheader("Support Vector Machine Results")
model = SVC(kernel='linear', random_state=0)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
slt.write("Accuracy: ", accuracy.round(2))
slt.write("Precision: ", precision_score(y_test, y_pred, labels=class_names).round(2))
plot_values(metrics)
if classifier == 'Kernel SVM':
slt.sidebar.subheader("Model Hyperparameters")
metrics = slt.sidebar.multiselect("What metrics to plot?", ('Confusion Matrix','Color Map'))
kernel = slt.sidebar.radio("Kernel", ("rbf","poly","sigmoid", "linear"), key='kernel')
if slt.sidebar.button("Classify", key='classify'):
slt.subheader("Kernel SVM Results")
model = SVC(kernel=kernel, random_state=0)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
slt.write("Accuracy: ", accuracy.round(2))
slt.write("Precision: ", precision_score(y_test, y_pred, labels=class_names).round(2))
plot_values(metrics)
if classifier == 'Naive Bayes':
slt.sidebar.subheader("Model Hyperparameters")
metrics = slt.sidebar.multiselect("What metrics to plot?", ('Confusion Matrix','Color Map'))
if slt.sidebar.button("Classify", key='classify'):
slt.subheader("Naive Bayes Results")
model = GaussianNB()
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
slt.write("Accuracy: ", accuracy.round(2))
slt.write("Precision: ", precision_score(y_test, y_pred, labels=class_names).round(2))
plot_values(metrics)
if slt.sidebar.checkbox("Show Dataset", False):
slt.subheader("Classification Dataset ")
slt.write("Customer Purchase Staus based on Social Media Ads")
d=pd.read_csv('Social_Network_Ads.csv')
slt.write(d)
elif select=='Regression':
@slt.cache(persist=True)
def fetch_data():
data=pd.read_csv('salary_data.csv')
x=data.iloc[:,[0:-1]].values
y=data.iloc[:,-1].values
return x,y
@slt.cache(persist=True)
def split_data(x,y):
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0)
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.transform(x_test)
return x_train,x_test,y_train,y_test
def plot_values(listofmetrics):
if 'Graph - Train Predictions' in listofmetrics:
slt.subheader('Graph - Train Predictions')
plt.scatter(y_train,y_train_pred,color='red')
plt.plot(y_train,y_train,color='blue')
plt.title('Estimated vs Actual')
plt.xlabel('Actual ')
plt.ylabel('Estimated')
slt.pyplot()
if 'Graph - Test Predictions' in listofmetrics:
slt.subheader('Graph - Test Predictions')
plt.scatter(y_test,y_pred,color='red')
plt.plot(y_test,y_test,color='blue')
plt.title('Estimated vs Actual')
plt.xlabel('Actual')
plt.ylabel('Estimated')
slt.pyplot()
x,y=fetch_data()
x_train,x_test,y_train,y_test=split_data(x,y)
regressor = slt.sidebar.selectbox("Regressor", ("Linear Regression","Polynomial Regression","Random Forest Regression"))
if regressor == 'Linear Regression':
slt.sidebar.subheader("Model Hyperparameters")
metrics = slt.sidebar.multiselect("What metrics to plot?", ('Graph - Train Predictions','Graph - Test Predictions'))
if slt.sidebar.button("Predict", key='predict'):
slt.subheader("Linear Regression Results")
model=LinearRegression()
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_train_pred=model.predict(x_train)
y_pred = model.predict(x_test)
slt.write("Accuracy: ", accuracy.round(2))
plot_values(metrics)
if regressor == 'Polynomial Regression':
slt.sidebar.subheader("Model Hyperparameters")
metrics = slt.sidebar.multiselect("What metrics to plot?", ('Graph - Predictions',))
if slt.sidebar.button("Predict", key='predict'):
slt.subheader("Polynomial Regression Results")
poly_reg = PolynomialFeatures(degree = 4)
x_poly = poly_reg.fit_transform(x)
poly_reg.fit(x_poly, y)
model = LinearRegression()
model.fit(x_poly, y)
accuracy = model.score(poly_reg.fit_transform(x), y)
slt.write("Accuracy: ", accuracy.round(2))
plt.scatter(x,y,color='red')
plt.plot(x,model.predict(poly_reg.fit_transform(x)))
plt.title('Experience Vs Salary')
plt.xlabel('Experience')
plt.ylabel('Salary')
slt.pyplot()
if regressor == 'Random Forest Regression':
slt.sidebar.subheader("Model Hyperparameters")
metrics = slt.sidebar.multiselect("What metrics to plot?", ('Graph - Predictions',))
if slt.sidebar.button("Predict", key='predict'):
slt.subheader("Random Forest Regression Results")
model = RandomForestRegressor(n_estimators = 10, random_state = 0)
model.fit(x_train,y_train)
accuracy = model.score(x_test, y_test)
slt.write("Accuracy: ", accuracy.round(2))
X_grid = np.arange(min(x_train), max(x_train), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(x_train, y_train, color = 'red')
plt.plot(X_grid, model.predict(X_grid), color = 'blue')
plt.title('Experience Vs Salary')
plt.xlabel('Experience')
plt.ylabel('Salary')
slt.pyplot()
if slt.sidebar.checkbox("Show Dataset", False):
slt.subheader("Regression Dataset ")
slt.write("Experience vs Salary Dataset")
d=pd.read_csv('salary_data.csv')
slt.write(d)
else:
@slt.cache(persist=True)
def fetch_data():
data=pd.read_csv('Mall_Customers.csv')
x = data.iloc[:, [3, 4]].values
return x
def plot_values(listofmetrics):
if 'Color Map' in listofmetrics:
colors=['red','blue','green','cyan','magenta','sienna','lightpink','black','chocalate','violet']
for i in range(n_clusters):
plt.scatter(X[y_kmeans == i, 0], X[y_kmeans == i, 1], s = 100, c = colors[i], label = 'Cluster '+str(i+1))
if centroid=='kmeans':
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 200, c = 'yellow', label = 'Centroids')
plt.title('Clusters of customers')
plt.xlabel('Annual Income ')
plt.ylabel('Spending Score (1-100)')
plt.legend()
slt.pyplot()
X=fetch_data()
cluster = slt.sidebar.selectbox("Cluster", ('K Means Clustering','Hierarchical Clustering'))
if cluster=='K Means Clustering':
slt.sidebar.subheader("Use elbow method to find the optimal nnumber of clusters")
if slt.sidebar.button("Elbow Method"):
wcss=[]
for i in range(1,11):
kmeans=KMeans(n_clusters=i,init='k-means++',max_iter=300,n_init=10)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11),wcss)
plt.title('The Elbow Method')
plt.xlabel('No of Clusters')
plt.ylabel('wcss')
slt.pyplot()
slt.sidebar.subheader("Model Hyperparameters")
n_clusters = slt.sidebar.number_input("Choose the number of clusters", 1, 8, step=1, key='noofclusters')
metrics = slt.sidebar.multiselect("What metrics to plot?", ('Color Map',))
if slt.sidebar.button("Cluster", key='cluster'):
slt.subheader("K means Clustering Results")
kmeans = KMeans(n_clusters = n_clusters, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(X)
centroid='kmeans'
plot_values(metrics)
else:
slt.sidebar.subheader("Use Dendrogram to find the optimal number of clusters")
if slt.sidebar.button('Dendrogram'):
dendrogram=sch.dendrogram(sch.linkage(X,method='ward'))
plt.title('Dendrogram')
plt.xlabel('Customers')
plt.ylabel('distance (euclidean')
slt.pyplot()
slt.sidebar.subheader("Model Hyperparameters")
n_clusters = slt.sidebar.number_input("Choose the number of clusters", 1, 8, step=1, key='noofclusters')
metrics = slt.sidebar.multiselect("What metrics to plot?", ('Color Map',))
if slt.sidebar.button("Cluster", key='cluster'):
slt.subheader("Hierarchical Clustering Results")
model = AgglomerativeClustering(n_clusters = n_clusters, affinity = 'euclidean', linkage='ward')
y_kmeans = model.fit_predict(X)
centroid='hierarchy'
plot_values(metrics)
if slt.sidebar.checkbox("Show Dataset", False):
slt.subheader("Clustering Dataset ")
slt.write("Annual vs Spending Score")
d=pd.read_csv('Mall_Customers.csv')
slt.write(d)
if __name__ == '__main__':
main()
| 10,817
| 0
| 23
|
6d5562898a29341be645852f4693e6ca922f6165
| 6,234
|
py
|
Python
|
pyaccords/pysrc/amazonEc2Act.py
|
MarouenMechtri/accords-platform-1
|
4f950fffd9fbbf911840cc5ad0fe5b5a331edf42
|
[
"Apache-2.0"
] | 1
|
2015-02-28T21:25:54.000Z
|
2015-02-28T21:25:54.000Z
|
pyaccords/pysrc/amazonEc2Act.py
|
MarouenMechtri/accords-platform-1
|
4f950fffd9fbbf911840cc5ad0fe5b5a331edf42
|
[
"Apache-2.0"
] | null | null | null |
pyaccords/pysrc/amazonEc2Act.py
|
MarouenMechtri/accords-platform-1
|
4f950fffd9fbbf911840cc5ad0fe5b5a331edf42
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
#copyright 2012, Hamid MEDJAHED (hmedjahed@prologue.fr) Prologue #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
#See the License for the specific language governing permissions and #
#limitations under the License. #
##############################################################################
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import sys
import pypacksrc
srcdirectory=pypacksrc.srcpydir+"/pyaccords"
sys.path.append(srcdirectory)
from amazonEc2Action import *
from actionClass import *
| 89.057143
| 602
| 0.691209
|
##############################################################################
#copyright 2012, Hamid MEDJAHED (hmedjahed@prologue.fr) Prologue #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
#See the License for the specific language governing permissions and #
#limitations under the License. #
##############################################################################
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import sys
import pypacksrc
srcdirectory=pypacksrc.srcpydir+"/pyaccords"
sys.path.append(srcdirectory)
from amazonEc2Action import *
from actionClass import *
def start(accesskey,secretkey,zone,keypair,categStr):
l=categStr.split(",")
categoryAtr = CamazonEc2(l[0],l[1],l[2],l[3],l[4],l[5],l[6],l[7],l[8],l[9],l[10],l[11],l[12],l[13],l[14],l[15],l[16],l[17],l[18],l[19],l[20],l[21],l[22],l[23],l[24],l[25],l[26])
resCateg = amazonEc2_start(accesskey,secretkey,zone,keypair,categoryAtr)
categStrR = [ str(resCateg.Id),str(resCateg.name),str(resCateg.flavor),str(resCateg.image),str(resCateg.original),str(resCateg.profile),str(resCateg.node),str(resCateg.price),str(resCateg.account),str(resCateg.number),str(resCateg.rootpass),str(resCateg.reference),str(resCateg.network),str(resCateg.access),str(resCateg.accessip),str(resCateg.keypair),str(resCateg.placementgroup),str(resCateg.publicaddr),str(resCateg.privateaddr),str(resCateg.firewall),str(resCateg.group),str(resCateg.zone),str(resCateg.hostname),str(resCateg.workload),str(resCateg.agent),str(resCateg.when),str(resCateg.state) ]
categStrNew = ",".join(categStrR)
return categStrNew
def stop(accesskey,secretkey,zone,categStr):
l=categStr.split(",")
categoryAtr = CamazonEc2(l[0],l[1],l[2],l[3],l[4],l[5],l[6],l[7],l[8],l[9],l[10],l[11],l[12],l[13],l[14],l[15],l[16],l[17],l[18],l[19],l[20],l[21],l[22],l[23],l[24],l[25],l[26] )
resCateg = amazonEc2_stop(accesskey,secretkey,zone,categoryAtr)
categStrR = [ str(resCateg.Id),str(resCateg.name),str(resCateg.flavor),str(resCateg.image),str(resCateg.original),str(resCateg.profile),str(resCateg.node),str(resCateg.price),str(resCateg.account),str(resCateg.number),str(resCateg.rootpass),str(resCateg.reference),str(resCateg.network),str(resCateg.access),str(resCateg.accessip),str(resCateg.keypair),str(resCateg.placementgroup),str(resCateg.publicaddr),str(resCateg.privateaddr),str(resCateg.firewall),str(resCateg.group),str(resCateg.zone),str(resCateg.hostname),str(resCateg.workload),str(resCateg.agent),str(resCateg.when),str(resCateg.state) ]
categStrNew = ",".join(categStrR)
return categStrNew
def restart(accesskey,secretkey,zone,categStr):
l=categStr.split(",")
categoryAtr = CamazonEc2(l[0],l[1],l[2],l[3],l[4],l[5],l[6],l[7],l[8],l[9],l[10],l[11],l[12],l[13],l[14],l[15],l[16],l[17],l[18],l[19],l[20],l[21],l[22],l[23],l[24],l[25],l[26] )
resCateg = amazonEc2_restart(accesskey,secretkey,zone,categoryAtr)
categStrR = [ str(resCateg.Id),str(resCateg.name),str(resCateg.flavor),str(resCateg.image),str(resCateg.original),str(resCateg.profile),str(resCateg.node),str(resCateg.price),str(resCateg.account),str(resCateg.number),str(resCateg.rootpass),str(resCateg.reference),str(resCateg.network),str(resCateg.access),str(resCateg.accessip),str(resCateg.keypair),str(resCateg.placementgroup),str(resCateg.publicaddr),str(resCateg.privateaddr),str(resCateg.firewall),str(resCateg.group),str(resCateg.zone),str(resCateg.hostname),str(resCateg.workload),str(resCateg.agent),str(resCateg.when),str(resCateg.state) ]
categStrNew = ",".join(categStrR)
return categStrNew
def snapshot(accesskey,secretkey,zone,imgname,categStr):
l=categStr.split(",")
categoryAtr = CamazonEc2(l[0],l[1],l[2],l[3],l[4],l[5],l[6],l[7],l[8],l[9],l[10],l[11],l[12],l[13],l[14],l[15],l[16],l[17],l[18],l[19],l[20],l[21],l[22],l[23],l[24],l[25],l[26] )
resCateg = amazonEc2_snapshot(accesskey,secretkey,zone,imgname,categoryAtr)
categStrR = [ str(resCateg.Id),str(resCateg.name),str(resCateg.flavor),str(resCateg.image),str(resCateg.original),str(resCateg.profile),str(resCateg.node),str(resCateg.price),str(resCateg.account),str(resCateg.number),str(resCateg.rootpass),str(resCateg.reference),str(resCateg.network),str(resCateg.access),str(resCateg.accessip),str(resCateg.keypair),str(resCateg.placementgroup),str(resCateg.publicaddr),str(resCateg.privateaddr),str(resCateg.firewall),str(resCateg.group),str(resCateg.zone),str(resCateg.hostname),str(resCateg.workload),str(resCateg.agent),str(resCateg.when),str(resCateg.state) ]
categStrNew = ",".join(categStrR)
return categStrNew
def suspend(accesskey,secretkey,zone,categStr):
l=categStr.split(",")
categoryAtr = CamazonEc2(l[0],l[1],l[2],l[3],l[4],l[5],l[6],l[7],l[8],l[9],l[10],l[11],l[12],l[13],l[14],l[15],l[16],l[17],l[18],l[19],l[20],l[21],l[22],l[23],l[24],l[25],l[26] )
resCateg = amazonEc2_suspend(accesskey,secretkey,zone,categoryAtr)
categStrR = [ str(resCateg.Id),str(resCateg.name),str(resCateg.flavor),str(resCateg.image),str(resCateg.original),str(resCateg.profile),str(resCateg.node),str(resCateg.price),str(resCateg.account),str(resCateg.number),str(resCateg.rootpass),str(resCateg.reference),str(resCateg.network),str(resCateg.access),str(resCateg.accessip),str(resCateg.keypair),str(resCateg.placementgroup),str(resCateg.publicaddr),str(resCateg.privateaddr),str(resCateg.firewall),str(resCateg.group),str(resCateg.zone),str(resCateg.hostname),str(resCateg.workload),str(resCateg.agent),str(resCateg.when),str(resCateg.state) ]
categStrNew = ",".join(categStrR)
return categStrNew
| 4,798
| 0
| 115
|
bc82892d2888cd197b6a33ea660137008f599f15
| 37,920
|
py
|
Python
|
bot_ls.py
|
NikitaMikhailov/bot_herobot
|
4e462f622dd0ba67854b5e778efc86abab303bec
|
[
"MIT"
] | null | null | null |
bot_ls.py
|
NikitaMikhailov/bot_herobot
|
4e462f622dd0ba67854b5e778efc86abab303bec
|
[
"MIT"
] | 6
|
2020-03-24T17:23:25.000Z
|
2021-12-13T20:04:34.000Z
|
bot_ls.py
|
NikitaMikhailov/bot_herobot_ls
|
4e462f622dd0ba67854b5e778efc86abab303bec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env bash
#!/bin/bash
#!/bin/sh
#!/bin/sh -
from vk_api.utils import get_random_id
from vk_api import VkUpload
from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
import random, requests, vk_api, os, bs4
from google_images_download import google_images_download
from lxml import html
import urllib.parse
dict = [".", ",", "!", "?", ")", "(", ":", ";", "'", ']', '[', '"']
dictan = [")", "(", ":", ";", "'", ']', '[', '"', '\\', 'n', '&', 'q', 'u', 'o', 't']
dict7 = {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8,
'September': 9, 'October': 10, 'November': 11, 'December': 12}
dict8 = {'овен':'aries','телец':'taurus' ,'близнецы':'gemini' ,'рак':'cancer' ,'лев':'leo' ,'дева':'virgo' ,'весы':'libra' ,'скорпион':'scorpio' ,'стрелец':'sagittarius','козерог':'capricorn' ,'водолей':'aquarius' ,'рыбы':'pisces'}
kolresp = 0
attachments = []
chand = 0
flagtime = False
fltm1 = False
fltm2 = False
flaggoroscop=True
#защита от пидарасов
f=open('/root/bot_herobot_ls/token.txt','r')
token=f.read()
f.close()
session = requests.Session()
vk_session = vk_api.VkApi(token=token)
longpoll = VkBotLongPoll(vk_session, '178949259')
vk = vk_session.get_api()
upload = VkUpload(vk_session) # Для загрузки изображений
keyboardgor = VkKeyboard(one_time=False)
keyboardgor.add_button('Овен', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Телец', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Близнецы', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Рак', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_line() # Переход на вторую строку
keyboardgor.add_button('Лев', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Дева', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Весы', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Скорпион', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_line() # Переход на вторую строку
keyboardgor.add_button('Стрелец', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Козерог', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Водолей', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Рыбы', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_line() # Переход на вторую строку
keyboardgor.add_button('Убери гороскоп', color=VkKeyboardColor.NEGATIVE)
keyboardosn = VkKeyboard(one_time=False)
keyboardosn.add_button('Мысль', color=VkKeyboardColor.PRIMARY)
keyboardosn.add_button('Цитата', color=VkKeyboardColor.PRIMARY)
keyboardosn.add_button('Факт', color=VkKeyboardColor.PRIMARY)
keyboardosn.add_button('Анекдот', color=VkKeyboardColor.PRIMARY)
#keyboardosn.add_line() # Переход на вторую строку
#keyboardosn.add_button('Анекдот', color=VkKeyboardColor.PRIMARY)
'''
print(keyboardgor.get_keyboard())
vk.messages.send(
user_id=195310233,
random_id=get_random_id(),
keyboard=keyboardgor.get_keyboard(),
message="Я перезагружен!"
)
'''
iscl=["легкое", "сложное", "среднее", "❓ что это такое", "♻ другое слово", "!рестарт крокодил"]
mainfunc()
| 52.303448
| 231
| 0.390454
|
#!/usr/bin/env bash
#!/bin/bash
#!/bin/sh
#!/bin/sh -
from vk_api.utils import get_random_id
from vk_api import VkUpload
from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
import random, requests, vk_api, os, bs4
from google_images_download import google_images_download
from lxml import html
import urllib.parse
dict = [".", ",", "!", "?", ")", "(", ":", ";", "'", ']', '[', '"']
dictan = [")", "(", ":", ";", "'", ']', '[', '"', '\\', 'n', '&', 'q', 'u', 'o', 't']
dict7 = {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6, 'July': 7, 'August': 8,
'September': 9, 'October': 10, 'November': 11, 'December': 12}
dict8 = {'овен':'aries','телец':'taurus' ,'близнецы':'gemini' ,'рак':'cancer' ,'лев':'leo' ,'дева':'virgo' ,'весы':'libra' ,'скорпион':'scorpio' ,'стрелец':'sagittarius','козерог':'capricorn' ,'водолей':'aquarius' ,'рыбы':'pisces'}
kolresp = 0
attachments = []
chand = 0
flagtime = False
fltm1 = False
fltm2 = False
flaggoroscop=True
#защита от пидарасов
f=open('/root/bot_herobot_ls/token.txt','r')
token=f.read()
f.close()
session = requests.Session()
vk_session = vk_api.VkApi(token=token)
longpoll = VkBotLongPoll(vk_session, '178949259')
vk = vk_session.get_api()
upload = VkUpload(vk_session) # Для загрузки изображений
def goroscop(bd_date):
if bd_date[1] == '1':
if int(bd_date[0]) < 20:
return 'capricorn'
else:
return 'aquarius'
if bd_date[1] == '2':
if int(bd_date[0]) < 19:
return 'aquarius'
else:
return 'pisces'
if bd_date[1] == '3':
if int(bd_date[0]) < 21:
return 'pisces'
else:
return 'aries'
if bd_date[1] == '4':
if int(bd_date[0]) < 21:
return 'aries'
else:
return 'taurus'
if bd_date[1] == '5':
if int(bd_date[0]) < 21:
return 'taurus'
else:
return 'gemini'
if bd_date[1] == '6':
if int(bd_date[0]) < 22:
return 'gemini'
else:
return 'cancer'
if bd_date[1] == '7':
if int(bd_date[0]) < 23:
return 'cancer'
else:
return 'leo'
if bd_date[1] == '8':
if int(bd_date[0]) < 23:
return 'leo'
else:
return 'virgo'
if bd_date[1] == '9':
if int(bd_date[0]) < 23:
return 'virgo'
else:
return 'libra'
if bd_date[1] == '10':
if int(bd_date[0]) < 23:
return 'libra'
else:
return 'scorpio'
if bd_date[1] == '11':
if int(bd_date[0]) < 22:
return 'scorpio'
else:
return 'sagittarius'
if bd_date[1] == '12':
if int(bd_date[0]) < 22:
return 'sagittarius'
else:
return 'capricorn'
keyboardgor = VkKeyboard(one_time=False)
keyboardgor.add_button('Овен', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Телец', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Близнецы', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Рак', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_line() # Переход на вторую строку
keyboardgor.add_button('Лев', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Дева', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Весы', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Скорпион', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_line() # Переход на вторую строку
keyboardgor.add_button('Стрелец', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Козерог', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Водолей', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_button('Рыбы', color=VkKeyboardColor.PRIMARY)
keyboardgor.add_line() # Переход на вторую строку
keyboardgor.add_button('Убери гороскоп', color=VkKeyboardColor.NEGATIVE)
keyboardosn = VkKeyboard(one_time=False)
keyboardosn.add_button('Мысль', color=VkKeyboardColor.PRIMARY)
keyboardosn.add_button('Цитата', color=VkKeyboardColor.PRIMARY)
keyboardosn.add_button('Факт', color=VkKeyboardColor.PRIMARY)
keyboardosn.add_button('Анекдот', color=VkKeyboardColor.PRIMARY)
#keyboardosn.add_line() # Переход на вторую строку
#keyboardosn.add_button('Анекдот', color=VkKeyboardColor.PRIMARY)
'''
print(keyboardgor.get_keyboard())
vk.messages.send(
user_id=195310233,
random_id=get_random_id(),
keyboard=keyboardgor.get_keyboard(),
message="Я перезагружен!"
)
'''
def goroscop1():
spisok_znakov=['aries','taurus','gemini','cancer','leo','virgo','libra','scorpio','sagittarius','capricorn','aquarius','pisces']
for i in range (0,12):
f = requests.get(
"http://astroscope.ru/horoskop/ejednevniy_goroskop/" + spisok_znakov[i] + ".html") # .text,"html.parser"
f.encoding = 'utf-8'
text_gor = (bs4.BeautifulSoup(f.text, "html.parser").find('div', 'col-12'))
print(str(str(text_gor).split('\n')[2]).lstrip())
filegor = open('/root/bot_herobot_chat/resurses/goroskop_files/' + spisok_znakov[i] + '.txt', 'w') # /root/bot_herobot_chat
filegor.write(str(str(text_gor).split('\n')[2]).lstrip())
filegor.close()
def sentLS(text,user):
vk.messages.send(
user_id=user,
random_id=get_random_id(),
message=text
)
iscl=["легкое", "сложное", "среднее", "❓ что это такое", "♻ другое слово", "!рестарт крокодил"]
def mainfunc():
flaggoroscop=True
attachments = []
try:
for event in longpoll.listen():
attachments = []
if event.type == VkBotEventType.MESSAGE_NEW and event.obj.text:
text_osn=event.obj.text
# преобразование текста сообщения
event.obj.text = event.obj.text.lower();
evtxt = ''
for i in range(0, len(event.obj.text)):
if not event.obj.text[i] in dict or (i == 0 and event.obj.text[i] == '!'):
evtxt += event.obj.text[i]
if evtxt == '':
event.obj.text = event.obj.text
else:
event.obj.text = evtxt
# если сообщение получено от пользователя
if event.from_user and event.obj.text not in iscl:
fio = requests.get("https://api.vk.com/method/users.get?user_ids=" + str(
event.obj.peer_id) + "&fields=bdate&access_token="+token+"&v=5.92")
first_name = fio.text[14::].split(',')[1].split(':')[1][1:-1:]
last_name = fio.text[14::].split(',')[2].split(':')[1][1:-1:]
#print(last_name, ' ', first_name, ' ', event.obj.peer_id, ' ', event.obj.text)
flaggorod1 = False
s=open('logs_ls.txt','a')
s.write(last_name + ' *_* ' + first_name + ' *_* ' + str(event.obj.from_id) + ' *_* ' + str(event.chat_id) + ' *_* ' + text_osn + '\n')
s.close()
f = open('resurses/goroda1.txt', 'r')
for i in f:
if str(event.obj.peer_id) == i[:-1:]:
flaggorod1 = True
f.close()
if event.obj.text == '!города' and flaggorod1 != True:
f = open('resurses/goroda1.txt', 'a')
f.write(str(event.obj.peer_id) + '\n')
f1 = open('resurses/goroda_files/'+str(event.obj.peer_id) + '.txt', 'w')
f1.close()
f.close()
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Давай сыграем, ' + first_name + ', думаю, правила ты знаешь, если захочешь закончить игру-напиши "!хватит играть"'+"."
)
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Начинай, пиши первый город, я подхвачу'+"."
)
elif event.obj.text == 'начать':
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Привет, меня зовут Херабот и я бот @id195310233(Никиты Михайлова) \n'
'В ЛС мне доступны следующие функции:\n1) !города\n'
'2) !гороскоп\n3) !кубик ..\n4) !факт\n5) !цитата\n6) !мысль\n7) !клавиатура вкл/выкл\n8) !анекдот\n9) Напомни мне\n'
'Остальное время я буду просто болтать с тобой, '+first_name + ', но не обижайся, если невпопад, мой хозяин никак '
'не доделает нейронку.\nА ещё я советую тебе включить клавиатуру, '
'она упрощает взаимодействие со мной.'
)
elif event.obj.text == '!обнови гороскоп' and event.obj.peer_id == 195310233:
goroscop1()
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='обновил'+"."
)
elif event.obj.text == '!хелп' or event.obj.text == '!помощь' or event.obj.text == '!help':
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Привет! В ЛС мне доступны следующие функции:\n1) !города\n'
'2) !гороскоп\n3) !кубик ..\n4) !факт\n5) !цитата\n6) !мысль\n7) !клавиатура вкл/выкл\n8) !анекдот\n9) Напомни мне\n'
'Остальное время я буду просто болтать с тобой, '+first_name + ', но не обижайся, если невпопад, мой хозяин никак '
'не доделает нейронку.\nА ещё я советую тебе включить клавиатуру,'
' она упрощает взаимодействие со мной.'
)
elif event.obj.text == '!анекдот' or event.obj.text == 'анекдот':
anes = random.randint(0, 135500)
for linenum, line in enumerate(open('/root/bot_herobot_chat/resurses/anec.txt', 'r')):
if linenum == anes:
anecdot = (line.strip()).replace('#', '\n')
keyboardanec = VkKeyboard(one_time=False, inline=True)
keyboardanec.add_button('Анекдот', color=VkKeyboardColor.PRIMARY)
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardanec.get_keyboard(),
message=anecdot
)
elif event.obj.text == '!факт' or event.obj.text == 'факт':
cit = random.randint(0, 764)
for linenum, line in enumerate(open('/root/bot_herobot_chat/resurses/facts_clear.txt', 'r')):
if linenum == cit:
messagecit = (line.strip())
if messagecit[-1] == ',':
messagecit = messagecit[:-1:]
keyboardfacts = VkKeyboard(one_time=False, inline=True)
keyboardfacts.add_button('Факт', color=VkKeyboardColor.PRIMARY)
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardfacts.get_keyboard(),
message=str(messagecit)
)
elif event.obj.text == '!мысль' or event.obj.text == 'мысль':
cit = random.randint(0, 1355)
for linenum, line in enumerate(open('/root/bot_herobot_chat/resurses/quotes_clear.txt', 'r')):
if linenum == cit:
messagecit = (line.strip())
if messagecit[-1] == ',':
messagecit = messagecit[:-1:]
keyboardquotes = VkKeyboard(one_time=False, inline=True)
keyboardquotes.add_button('Мысль', color=VkKeyboardColor.PRIMARY)
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardquotes.get_keyboard(),
message=str(messagecit)
)
elif event.obj.text == '!цитата' or event.obj.text == 'цитата':
cit = random.randint(0, 1391)
for linenum, line in enumerate(open('/root/bot_herobot_chat/resurses/twtrr.txt', 'r')):
if linenum == cit:
messagecit = (line.strip())
if messagecit[-1] == ',':
messagecit = messagecit[:-1:]
keyboardtwtrr = VkKeyboard(one_time=False, inline=True)
keyboardtwtrr.add_button('Цитата', color=VkKeyboardColor.PRIMARY)
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardtwtrr.get_keyboard(),
message=str(messagecit)
)
elif event.obj.text.find('!кубик') != -1:
kub = event.obj.text[7::]
try:
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Выпало число ' + str(random.randint(1, int(kub)))+"."
)
except:
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='С твоим числом что-то не так'+"."
)
elif event.obj.text == '!гороскоп':
print(1223)
flaggoroscop=True
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardgor.get_keyboard(),
message='Воспользуйся клавиатурой'+"."
)
elif event.obj.text[:11:] == "напомни мне":
continue
elif event.obj.text in dict8 and flaggoroscop is True:
zodiak = dict8[event.obj.text]
f=open('/root/bot_herobot_chat/resurses/goroskop_files/'+zodiak+'.txt','r')
goroskp=f.read()
f.close()
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardgor.get_keyboard(),
message=goroskp
)
elif (event.obj.text == '!клавиатура вкл' or event.obj.text == 'клавиатура вкл'):
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardosn.get_keyboard(),
message="Окей, "+first_name+"."
)
elif (event.obj.text == '!клавиатура выкл' or event.obj.text == 'клавиатура выкл' or event.obj.text == 'выключить клавиатуру'):
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardosn.get_empty_keyboard(),
message="Окей, "+first_name+"."
)
elif (event.obj.text == '!убери гороскоп' or event.obj.text == 'убери гороскоп') and flaggoroscop is True:
flaggoroscop = False
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardosn.get_keyboard(),
message="Включена обычная клавиатура."
)
keyboardvkl = VkKeyboard(one_time=False, inline=True)
keyboardvkl.add_button('Выключить клавиатуру', color=VkKeyboardColor.NEGATIVE)
vk.messages.send( # Отправляем собщение
user_id=event.obj.peer_id,
random_id=get_random_id(),
keyboard=keyboardvkl.get_keyboard(),
message="Хочешь выключить её?"
)
elif event.obj.text == '!города' and flaggorod1 == True:
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Так мы уже играем, ' + first_name+'.'
)
elif event.obj.text == '!хватит играть' and flaggorod1 == True:
try:
os.remove(str(event.obj.peer_id) + '.txt')
except:
print('еще нет файла 1')
f = open('resurses/goroda1.txt', 'r')
r = ''
for line in f:
if line[:-1:] == str(event.obj.peer_id):
r = r
else:
r += line + '\n'
f.close()
r1 = r.split('\n')
r2 = []
# print(r1)
for i in r1:
if i != '':
r2.append(i)
# print(r2)
r = '\n'.join(r2) + '\n'
# print(r)
os.remove('resurses/goroda1.txt')
f = open('resurses/goroda1.txt', 'w')
f.write(r)
f.close()
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Как скажешь, ' + first_name+'.'
)
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Если захочешь ещё поиграть-просто напиши мне "!города"'+'.'
)
elif flaggorod1 is True:
flaggorod2 = False
flaggorod3 = False
flaggorod5 = False
f1 = open('resurses/goroda_files/'+str(event.obj.peer_id) + '.txt', 'r')
chet = 0
for i in f1:
chet += 1
if str(event.obj.text) == i[:-1:]:
flaggorod3 = True
f1.close()
if chet != 0:
for linenum, line in enumerate(open('resurses/goroda_files/'+str(event.obj.peer_id) + '.txt', 'r')):
if linenum == chet - 1:
poslgorod = (line.strip())
if poslgorod[-1] == 'ь' or poslgorod[-1] == 'ы' or poslgorod[-1] == 'ъ' or poslgorod[
-1] == 'a':
if event.obj.text[0].lower() == poslgorod[-2]:
# print('изменена буква')
flaggorod5 = True
if event.obj.text[0].lower() == poslgorod[-1]:
flaggorod5 = True
else:
flaggorod5 = True
f = open('resurses/cities.txt', 'r')
for i in f:
if event.obj.text == i[:-1:].lower():
flaggorod2 = True
f.close()
f = open('resurses/cityman.txt', 'r')
for i in f:
if event.obj.text == i[:-1:].lower():
flaggorod2 = True
f.close()
if flaggorod2 is True and flaggorod3 is not True and flaggorod5 is True:
f1 = open('resurses/goroda_files/'+str(event.obj.peer_id) + '.txt', 'a')
f1.write(str(event.obj.text + '\n'))
f1.close()
letter = str(event.obj.text[-1])
if letter == 'ь' or letter == 'ы' or letter == 'ъ':
letter = str(event.obj.text[-2])
flgorod = False
try:
while flgorod is False:
# flaggorod31=False
randgorod = random.randint(0, 10960)
for linenum, line in enumerate(open('resurses/cityman.txt', 'r')):
if linenum == randgorod:
gorod = (line.strip())
f1 = open('resurses/goroda_files/'+str(event.obj.peer_id) + '.txt', 'r')
if gorod[-1] == '\n':
gorod = gorod[:-1:]
for i in f1:
if gorod[0].lower() == letter:
if gorod.lower() == i[:-1:]:
flaggorod4 = True
else:
flgorod = True
f1.close()
f1 = open('resurses/goroda_files/'+str(event.obj.peer_id) + '.txt', 'a')
f1.write(gorod.lower() + '\n')
f1.close()
ranom = random.randint(0, 4)
if ranom == 0 or ranom == 4:
print('попал в описание')
try:
dlina = len(gorod)
if gorod[-1] == 'ь' or gorod[-1] == 'ы' or gorod[-1] == 'ъ':
posllet = gorod[-2].upper()
else:
posllet = gorod[-1].upper()
for linenum, line in enumerate(open('resurses/city2.txt', 'r')):
if line[:dlina:] == gorod:
link = line.split('|')
if link[1].find('(') != -1 and link[1].find(')') != -1:
e1 = link[1].find('(')
e2 = link[1].find(')')
text1 = link[1][:e1:] + link[1][e2 + 1::]
text = text1.split('.')
else:
text = link[1].split('.')
# print(text)
ranom2 = random.randint(1, len(text) - 1)
if ranom2 > 3:
ranom2 = 3
gorod += '\nКстати, во что я знаю про этот город\n'+'.'
for r1 in range(0, ranom2):
gorod += text[r1] + '\n'
sluchay = random.randint(0, 4)
print("сформировал описание")
if sluchay == 0:
variants = ['Неплохой вариант, ' + first_name + '!', 'Окей, пойдёт.',
'Хороший город, ты молодец, ' + first_name + '!',
'Здорово, но я всё равно умнее тебя.']
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message=variants[random.randint(0, 3)]
)
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message=gorod
)
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Тебе на букву ' + posllet+'.'
)
print("отправил описание")
except:
print("с описанием проблемы, отправил просто город")
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message=gorod
)
if ranom == 1 or ranom == 3:
print("попал в картинку")
try:
if gorod[-1] == 'ь' or gorod[-1] == 'ы' or gorod[-1] == 'ъ':
posllet = gorod[-2].upper()
else:
posllet = gorod[-1].upper()
'''
response = requests.get(
'https://ru.depositphotos.com/search/город&' + gorod.lower() + '&фото.html')
parsed_body = html.fromstring(response.text)
# Парсим ссылки с картинками
images = parsed_body.xpath('//img/@src')
images = [urllib.parse.urljoin(response.url, url) for url in images]
image_url = images[random.randint(0, len(images))]
'''
response = google_images_download.googleimagesdownload()
arguments = {"keywords": 'город '+event.obj.text.lower(), "size": 'medium', "limit": random.randint(1, 10), "no_download": True,
"print_urls": True}
paths = response.download(arguments)
file_url=open('file_url.txt','r')
#print('файл успешно открыт')
gh=0
for line in file_url:
#print(line)
if gh==0:
image_url=line
gh+=1
#print(image_url)
file_url.close()
image_url = image_url
image = session.get(image_url, stream=True)
photo = upload.photo_messages(photos=image.raw)[0]
attachments.append('photo{}_{}'.format(photo['owner_id'], photo['id'])
)
print("загрузил картинку")
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
attachment=','.join(attachments),
message=gorod + '\nВот, кстати, фото города '+event.obj.text.capitalize()+', который ты предложил.'
)
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Тебе на букву ' + posllet+'.'
)
print("отправил картинку")
except:
print("с картинкой проблемы, отправил чистый город")
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message=gorod
)
if ranom == 2:
print("попал просто в город")
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message=gorod
)
except:
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='ты меня победил, я больше не знаю городов.'
)
f = open('resurses/goroda1.txt', 'r')
r = ''
for line in f:
if line[:-1:] == str(event.obj.peer_id):
r = r
else:
r += line + '\n'
f.close()
f = open('resurses/goroda1.txt', 'w')
f.write(r)
f.close()
os.remove('resurses/goroda_files/'+str(event.obj.peer_id) + '.txt')
elif flaggorod2 is True and flaggorod3 is True:
print("попал в повторение")
spisok1 = ['Либо я тебя неправильно понял, либо такой город уже был.',
'В нашей игре уже был такой город.', 'Ты повторяешься, ' + first_name+'.']
ran = random.randint(0, 2)
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message=spisok1[ran]
)
elif flaggorod2 is True and flaggorod5 is False:
print("попал в неправильную букву")
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='У твоего города неправильная первая буква.'
)
else:
print("попал в отсутствие")
spisok2 = ["Я не нашел такого города в своей базе.", "Извини, но такого города нет.",
"Может ты и прав, но я такого города не знаю."]
ran = random.randint(0, 2)
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message=spisok2[ran]
)
else:
anssplit=open('resurses/baza3.txt','r')
for line in anssplit:
#print(event.obj.text,line.split('\\')[0])
if line.split('\\')[0]==event.obj.text:
response=line.split('\\')[1]
break
else:
response=None
anssplit.close()
anssplit=open('resurses/baza3.txt','r')
if response==None:
#print(11)
for line in anssplit:
for red in range (0,len(event.obj.text.split(' '))-1):
if line.split('\\')[0].find(event.obj.text.split(' ')[red])!=-1:
#print(event.obj.text.split(' ')[red],line.split('\\')[0])
response=line.split('\\')[1]
break
else:
response=None
if response!=None:
break
anssplit.close()
if response:
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message=response
)
else:
xy=['ху','хуи','хуя']
t=random.randint(0,2)
t2=random.randint(3,4)
if len(event.obj.text.split(' '))==1 and random.randint(0,2)==2:
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message=xy[t]+event.obj.text[-(t2)::]
)
else:
vk.messages.send(
user_id=event.obj.peer_id,
random_id=get_random_id(),
message='Я тупой как тапок, ' + first_name+'.'
)
except Exception as err:
vk.messages.send(
user_id=195310233,
random_id=get_random_id(),
message='Возникла ошибка ' + str(err) + ' в главном цикле bot_herobot_ls.'
)
mainfunc()
mainfunc()
| 36,896
| 0
| 91
|
acd1fcf64129e702d8d0ca09af32951d757cb0c4
| 812
|
py
|
Python
|
behavior_cloning/safety_backup_C/C/environment_models/pusher.py
|
chickert/reinforcement_learning
|
473323f08b079004f27a7f0931e5e9a46bfad347
|
[
"MIT"
] | null | null | null |
behavior_cloning/safety_backup_C/C/environment_models/pusher.py
|
chickert/reinforcement_learning
|
473323f08b079004f27a7f0931e5e9a46bfad347
|
[
"MIT"
] | null | null | null |
behavior_cloning/safety_backup_C/C/environment_models/pusher.py
|
chickert/reinforcement_learning
|
473323f08b079004f27a7f0931e5e9a46bfad347
|
[
"MIT"
] | null | null | null |
from environment_models.base import BaseEnv
from airobot_utils.pusher_simulator import PusherSimulator
import numpy as np
| 25.375
| 60
| 0.665025
|
from environment_models.base import BaseEnv
from airobot_utils.pusher_simulator import PusherSimulator
import numpy as np
class PusherEnv(BaseEnv):
def __init__(self):
self.simulator = PusherSimulator(render=False)
def transition_function(state, action):
self.simulator.apply_action(action)
return self.simulator.get_obs()
def reward_function(state, action):
return self.simulator.compute_reward_push(state)
BaseEnv.__init__(
self,
initial_state=self.simulator.get_obs(),
transition_function=transition_function,
reward_function=reward_function,
state_space_dimension=9,
action_space_dimension=2
)
def reset(self):
self.simulator.reset()
| 606
| 4
| 77
|
9753e5f361d1334a73026196cb9f93a820bbcd37
| 183
|
py
|
Python
|
passenger_wsgi.py
|
kevincornish/HeckGuide
|
eb974d6b589908f5fc2308d41032a48941cc3d21
|
[
"MIT"
] | 4
|
2022-02-16T10:19:11.000Z
|
2022-03-17T03:34:26.000Z
|
passenger_wsgi.py
|
kevincornish/HeckGuide
|
eb974d6b589908f5fc2308d41032a48941cc3d21
|
[
"MIT"
] | 1
|
2022-02-17T14:02:31.000Z
|
2022-03-31T03:56:42.000Z
|
passenger_wsgi.py
|
kevincornish/HeckGuide
|
eb974d6b589908f5fc2308d41032a48941cc3d21
|
[
"MIT"
] | 3
|
2022-02-17T06:13:52.000Z
|
2022-03-23T21:37:21.000Z
|
import heckguide.wsgi
from whitenoise import WhiteNoise
application = heckguide.wsgi.application
application = WhiteNoise(application, root='/home/heckkciy/dev.heckguide.com/static')
| 36.6
| 85
| 0.836066
|
import heckguide.wsgi
from whitenoise import WhiteNoise
application = heckguide.wsgi.application
application = WhiteNoise(application, root='/home/heckkciy/dev.heckguide.com/static')
| 0
| 0
| 0
|
2443e0ae12a68ea13caba68dcda33ae496994aee
| 6,619
|
py
|
Python
|
stanford/sms-tools/software/transformations_interface/hpsTransformations_function.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | 1
|
2021-03-12T18:32:06.000Z
|
2021-03-12T18:32:06.000Z
|
stanford/sms-tools/software/transformations_interface/hpsTransformations_function.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | null | null | null |
stanford/sms-tools/software/transformations_interface/hpsTransformations_function.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | null | null | null |
# function call to the transformation functions of relevance for the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
def analysis(inputFile='../../sounds/sax-phrase-short.wav', window='blackman', M=601, N=1024, t=-100,
minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01, stocf=0.1):
"""
Analyze a sound with the harmonic plus stochastic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics
minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound
f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
stocf: decimation factor used for the stochastic approximation
returns inputFile: input file name; fs: sampling rate of input file,
hfreq, hmag: harmonic frequencies, magnitude; mYst: stochastic residual
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# compute the harmonic plus stochastic model of the whole sound
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
# synthesize the harmonic plus stochastic model without original phases
y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, np.array([]), mYst, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModel.wav'
UF.wavwrite(y,fs, outputFile)
# create figure to plot
plt.figure(figsize=(9, 6))
# frequency range to plot
maxplotfreq = 15000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot spectrogram stochastic compoment
plt.subplot(3,1,2)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:int(sizeEnv*maxplotfreq/(.5*fs))+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram
if (hfreq.shape[1] > 0):
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
return inputFile, fs, hfreq, hmag, mYst
def transformation_synthesis(inputFile, fs, hfreq, hmag, mYst, freqScaling = np.array([0, 1.2, 2.01, 1.2, 2.679, .7, 3.146, .7]),
freqStretching = np.array([0, 1, 2.01, 1, 2.679, 1.5, 3.146, 1.5]), timbrePreservation = 1,
timeScaling = np.array([0, 0, 2.138, 2.138-1.0, 3.146, 3.146])):
"""
transform the analysis values returned by the analysis function and synthesize the sound
inputFile: name of input file
fs: sampling rate of input file
hfreq, hmag: harmonic frequencies and magnitudes
mYst: stochastic residual
freqScaling: frequency scaling factors, in time-value pairs (value of 1 no scaling)
freqStretching: frequency stretching factors, in time-value pairs (value of 1 no stretching)
timbrePreservation: 1 preserves original timbre, 0 it does not
timeScaling: time scaling factors, in time-value pairs
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# frequency scaling of the harmonics
hfreqt, hmagt = HT.harmonicFreqScaling(hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs)
# time scaling the sound
yhfreq, yhmag, ystocEnv = HPST.hpsTimeScale(hfreqt, hmagt, mYst, timeScaling)
# synthesis from the trasformed hps representation
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModelTransformation.wav'
UF.wavwrite(y,fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 6))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram of transformed stochastic compoment
plt.subplot(2,1,1)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:int(sizeEnv*maxplotfreq/(.5*fs))+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
if (yhfreq.shape[1] > 0):
harms = yhfreq*np.less(yhfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# analysis
inputFile, fs, hfreq, hmag, mYst = analysis()
# transformation and synthesis
transformation_synthesis(inputFile, fs, hfreq, hmag, mYst)
plt.show()
| 35.586022
| 147
| 0.706149
|
# function call to the transformation functions of relevance for the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
def analysis(inputFile='../../sounds/sax-phrase-short.wav', window='blackman', M=601, N=1024, t=-100,
minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01, stocf=0.1):
"""
Analyze a sound with the harmonic plus stochastic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics
minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound
f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
stocf: decimation factor used for the stochastic approximation
returns inputFile: input file name; fs: sampling rate of input file,
hfreq, hmag: harmonic frequencies, magnitude; mYst: stochastic residual
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# compute the harmonic plus stochastic model of the whole sound
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
# synthesize the harmonic plus stochastic model without original phases
y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, np.array([]), mYst, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModel.wav'
UF.wavwrite(y,fs, outputFile)
# create figure to plot
plt.figure(figsize=(9, 6))
# frequency range to plot
maxplotfreq = 15000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot spectrogram stochastic compoment
plt.subplot(3,1,2)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:int(sizeEnv*maxplotfreq/(.5*fs))+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram
if (hfreq.shape[1] > 0):
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show(block=False)
return inputFile, fs, hfreq, hmag, mYst
def transformation_synthesis(inputFile, fs, hfreq, hmag, mYst, freqScaling = np.array([0, 1.2, 2.01, 1.2, 2.679, .7, 3.146, .7]),
freqStretching = np.array([0, 1, 2.01, 1, 2.679, 1.5, 3.146, 1.5]), timbrePreservation = 1,
timeScaling = np.array([0, 0, 2.138, 2.138-1.0, 3.146, 3.146])):
"""
transform the analysis values returned by the analysis function and synthesize the sound
inputFile: name of input file
fs: sampling rate of input file
hfreq, hmag: harmonic frequencies and magnitudes
mYst: stochastic residual
freqScaling: frequency scaling factors, in time-value pairs (value of 1 no scaling)
freqStretching: frequency stretching factors, in time-value pairs (value of 1 no stretching)
timbrePreservation: 1 preserves original timbre, 0 it does not
timeScaling: time scaling factors, in time-value pairs
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# frequency scaling of the harmonics
hfreqt, hmagt = HT.harmonicFreqScaling(hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs)
# time scaling the sound
yhfreq, yhmag, ystocEnv = HPST.hpsTimeScale(hfreqt, hmagt, mYst, timeScaling)
# synthesis from the trasformed hps representation
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hpsModelTransformation.wav'
UF.wavwrite(y,fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 6))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram of transformed stochastic compoment
plt.subplot(2,1,1)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:int(sizeEnv*maxplotfreq/(.5*fs))+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
if (yhfreq.shape[1] > 0):
harms = yhfreq*np.less(yhfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# analysis
inputFile, fs, hfreq, hmag, mYst = analysis()
# transformation and synthesis
transformation_synthesis(inputFile, fs, hfreq, hmag, mYst)
plt.show()
| 0
| 0
| 0
|
1680e18896f6f4ae0a5b62b3d8827f6c5f5db509
| 153
|
py
|
Python
|
files/seeking.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 13
|
2017-08-22T12:26:07.000Z
|
2021-07-29T16:13:50.000Z
|
files/seeking.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 1
|
2021-02-08T10:24:33.000Z
|
2021-02-08T10:24:33.000Z
|
files/seeking.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 17
|
2018-08-13T11:10:33.000Z
|
2021-07-29T16:14:02.000Z
|
#!/usr/bin/python
with open('works.txt', 'r') as f:
data1 = f.read(22)
print(data1)
f.seek(0, 0)
data2 = f.read(22)
print(data2)
| 12.75
| 33
| 0.542484
|
#!/usr/bin/python
with open('works.txt', 'r') as f:
data1 = f.read(22)
print(data1)
f.seek(0, 0)
data2 = f.read(22)
print(data2)
| 0
| 0
| 0
|
5b655e24ff3b19ff2d7e65b59eebd8e045ff7d9a
| 8,016
|
py
|
Python
|
django_snowflake/operations.py
|
cedar-team/django-snowflake
|
7c5cff1299946af7b7b3c82944c9c9c5ace2a802
|
[
"MIT"
] | 14
|
2021-12-10T03:08:17.000Z
|
2022-03-12T10:18:08.000Z
|
django_snowflake/operations.py
|
cedar-team/django-snowflake
|
7c5cff1299946af7b7b3c82944c9c9c5ace2a802
|
[
"MIT"
] | 15
|
2021-10-29T23:48:22.000Z
|
2022-03-30T11:52:28.000Z
|
django_snowflake/operations.py
|
cedar-team/django-snowflake
|
7c5cff1299946af7b7b3c82944c9c9c5ace2a802
|
[
"MIT"
] | 3
|
2022-01-26T17:07:28.000Z
|
2022-03-02T08:05:16.000Z
|
import decimal
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import timezone
| 41.319588
| 110
| 0.62238
|
import decimal
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import timezone
class DatabaseOperations(BaseDatabaseOperations):
cast_char_field_without_max_length = 'varchar'
cast_data_types = {
'AutoField': 'NUMBER',
'BigAutoField': 'NUMBER',
'SmallAutoField': 'NUMBER',
}
explain_prefix = 'EXPLAIN USING'
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (', '.join(row) for row in placeholder_rows)
values_sql = ', '.join('(%s)' % sql for sql in placeholder_rows_sql)
return 'VALUES ' + values_sql
def combine_expression(self, connector, sub_expressions):
lhs, rhs = sub_expressions
if connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
return 'BITOR(%(lhs)s,%(rhs)s)' % {'lhs': lhs, 'rhs': rhs}
elif connector == '#':
return 'BITXOR(%(lhs)s, %(rhs)s)' % {'lhs': lhs, 'rhs': rhs}
elif connector == '<<':
return 'BITSHIFTLEFT(%(lhs)s, %(rhs)s)' % {'lhs': lhs, 'rhs': rhs}
elif connector == '>>':
return 'BITSHIFTRIGHT(%(lhs)s, %(rhs)s)' % {'lhs': lhs, 'rhs': rhs}
elif connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super().combine_expression(connector, sub_expressions)
def _convert_field_to_tz(self, field_name, tzname):
if tzname and settings.USE_TZ:
field_name = "CONVERT_TIMEZONE('%s', TO_TIMESTAMP(%s))" % (
tzname,
field_name,
)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return '(%s)::date' % field_name
def datetime_cast_time_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return '(%s)::time' % field_name
def date_extract_sql(self, lookup_type, field_name):
# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
if lookup_type == 'week_day':
# For consistency across backends, return Sunday=1, Saturday=7.
return "EXTRACT('dow', %s) + 1" % field_name
elif lookup_type == 'iso_week_day':
return "EXTRACT('dow_iso', %s)" % field_name
elif lookup_type == 'iso_year':
return "EXTRACT('yearofweekiso', %s)" % field_name
else:
return "EXTRACT('%s', %s)" % (lookup_type, field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name, tzname=None):
field_name = self._convert_field_to_tz(field_name, tzname)
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def time_trunc_sql(self, lookup_type, field_name, tzname=None):
field_name = self._convert_field_to_tz(field_name, tzname)
return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name)
def format_for_duration_arithmetic(self, sql):
return "INTERVAL '%s MICROSECONDS'" % sql
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'DateTimeField':
if not settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
# Django expects naive datetimes when settings.USE_TZ is False.
value = timezone.make_naive(value)
return value
def convert_durationfield_value(self, value, expression, connection):
# Snowflake sometimes returns Decimal which is an unsupported type for
# timedelta microseconds component.
if isinstance(value, decimal.Decimal):
value = float(value)
return super().convert_durationfield_value(value, expression, connection)
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
def adapt_datetimefield_value(self, value):
# Work around a bug in Django: https://code.djangoproject.com/ticket/33229
if hasattr(value, 'resolve_expression'):
return value
return super().adapt_datetimefield_value(value)
def adapt_timefield_value(self, value):
# Work around a bug in Django: https://code.djangoproject.com/ticket/33229
if hasattr(value, 'resolve_expression'):
return value
return super().adapt_timefield_value(value)
def explain_query_prefix(self, format=None, **options):
if format is None:
format = 'TABULAR'
prefix = super().explain_query_prefix(format, **options)
return prefix + ' ' + format
def last_executed_query(self, cursor, sql, params):
return cursor.query
def last_insert_id(self, cursor, table_name, pk_name):
# This is subject to race conditions.
return cursor.execute(
'SELECT MAX({pk_name}) FROM {table_name}'.format(
pk_name=self.quote_name(pk_name),
table_name=self.quote_name(table_name),
)
).fetchone()[0]
def limit_offset_sql(self, low_mark, high_mark):
# This method is copied from BaseDatabaseOperations with 'LIMIT %d'
# replaced with 'LIMIT %s' to allow "LIMIT null" for no limit.
limit, offset = self._get_limit_offset_params(low_mark, high_mark)
return ' '.join(sql for sql in (
('LIMIT %s' % limit) if limit else None,
('OFFSET %d' % offset) if offset else None,
) if sql)
def no_limit_value(self):
return 'null'
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name.upper().replace('.', '"."')
def regex_lookup(self, lookup_type):
match_option = 'c' if lookup_type == 'regex' else 'i'
return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option
def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False):
if not tables:
return []
sql = []
if reset_sequences:
sql.extend(
'%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table_name)),
) for table_name in tables
)
else:
# DELETE to preserve sequences.
sql.extend(
'%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table_name)),
) for table_name in tables
)
return sql
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
# Cast rhs_sql with TO_TIME in case it's a string.
return f"TIMEDIFF(MICROSECOND, TO_TIME({rhs_sql}), {lhs_sql})", (*rhs_params, *lhs_params)
return f"TIMEDIFF(MICROSECOND, {rhs_sql}, {lhs_sql})", (*rhs_params, *lhs_params)
| 6,878
| 949
| 23
|
caabfd38ff62ea17041d4c129f3b961d4bd1e247
| 37
|
py
|
Python
|
analysis/hello_world.py
|
goinvo/MAHealthIssues
|
d1f7e4cfeb1b48aa03f4cc2a8fa758afe3f6b8a5
|
[
"MIT"
] | null | null | null |
analysis/hello_world.py
|
goinvo/MAHealthIssues
|
d1f7e4cfeb1b48aa03f4cc2a8fa758afe3f6b8a5
|
[
"MIT"
] | null | null | null |
analysis/hello_world.py
|
goinvo/MAHealthIssues
|
d1f7e4cfeb1b48aa03f4cc2a8fa758afe3f6b8a5
|
[
"MIT"
] | null | null | null |
teststring ="hello"
print(teststring)
| 18.5
| 19
| 0.810811
|
teststring ="hello"
print(teststring)
| 0
| 0
| 0
|
356e1b5e5b3c4424b8872304ce6c3695e3403f0f
| 2,961
|
py
|
Python
|
insights/parsers/tests/test_nfs_exports.py
|
sagaraivale/insights-core
|
852a9669c998acf995e316bd407aeb4dbc6c485e
|
[
"Apache-2.0"
] | 1
|
2018-03-26T12:59:24.000Z
|
2018-03-26T12:59:24.000Z
|
insights/parsers/tests/test_nfs_exports.py
|
sagaraivale/insights-core
|
852a9669c998acf995e316bd407aeb4dbc6c485e
|
[
"Apache-2.0"
] | null | null | null |
insights/parsers/tests/test_nfs_exports.py
|
sagaraivale/insights-core
|
852a9669c998acf995e316bd407aeb4dbc6c485e
|
[
"Apache-2.0"
] | null | null | null |
from insights.parsers.nfs_exports import NFSExports, NFSExportsD
from insights.tests import context_wrap
EXPORTS = """
/home/utcs/shared/ro @rhtttttttttttt(ro,sync) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(rw,sync,no_root_squash)
/home/insights/shared/rw @rhtttttttttttt(rw,sync) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(ro,sync,no_root_squash)
/home/insights/shared/special/all/mail @rhtttttttttttt(rw,sync,no_root_squash)
/home/insights/ins/special/all/config @rhtttttttttttt(ro,sync,no_root_squash) ins1.example.com(rw,sync,no_root_squash)
#/home/insights ins1.example.com(rw,sync,no_root_squash)
/home/example @rhtttttttttttt(rw,sync,root_squash) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(rw,sync,no_root_squash)
/home/example ins3.example.com(rw,sync,no_root_squash)
""".strip()
| 41.125
| 142
| 0.65282
|
from insights.parsers.nfs_exports import NFSExports, NFSExportsD
from insights.tests import context_wrap
EXPORTS = """
/home/utcs/shared/ro @rhtttttttttttt(ro,sync) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(rw,sync,no_root_squash)
/home/insights/shared/rw @rhtttttttttttt(rw,sync) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(ro,sync,no_root_squash)
/home/insights/shared/special/all/mail @rhtttttttttttt(rw,sync,no_root_squash)
/home/insights/ins/special/all/config @rhtttttttttttt(ro,sync,no_root_squash) ins1.example.com(rw,sync,no_root_squash)
#/home/insights ins1.example.com(rw,sync,no_root_squash)
/home/example @rhtttttttttttt(rw,sync,root_squash) ins1.example.com(rw,sync,no_root_squash) ins2.example.com(rw,sync,no_root_squash)
/home/example ins3.example.com(rw,sync,no_root_squash)
""".strip()
def test_nfs_exports():
nfs_exports = NFSExports(context_wrap(EXPORTS))
_do(nfs_exports)
def test_nfs_exports_d():
nfs_exports = NFSExportsD(context_wrap(EXPORTS))
_do(nfs_exports)
def test_nfs_exports_empty():
nfs_exports = NFSExports(context_wrap(""))
_do_empty(nfs_exports)
def test_nfs_exports_d_empty():
nfs_exports = NFSExportsD(context_wrap(""))
_do_empty(nfs_exports)
def _do_empty(nfs_exports):
assert nfs_exports.data == {}
assert nfs_exports.ignored_lines == []
assert nfs_exports.all_options() == set()
assert nfs_exports.export_paths() == set()
def _do(nfs_exports):
assert nfs_exports.data == {
"/home/utcs/shared/ro": {
"@rhtttttttttttt": ["ro", "sync"],
"ins1.example.com": ["rw", "sync", "no_root_squash"],
"ins2.example.com": ["rw", "sync", "no_root_squash"]
}, "/home/insights/shared/rw": {
"@rhtttttttttttt": ["rw", "sync"],
"ins1.example.com": ["rw", "sync", "no_root_squash"],
"ins2.example.com": ["ro", "sync", "no_root_squash"]
}, "/home/insights/shared/special/all/mail": {
"@rhtttttttttttt": ["rw", "sync", "no_root_squash"]
}, "/home/insights/ins/special/all/config": {
"@rhtttttttttttt": ["ro", "sync", "no_root_squash"],
"ins1.example.com": ["rw", "sync", "no_root_squash"]
}, "/home/example": {
"@rhtttttttttttt": ["rw", "sync", "root_squash"],
"ins1.example.com": ["rw", "sync", "no_root_squash"],
"ins2.example.com": ["rw", "sync", "no_root_squash"]
}
}
assert nfs_exports.ignored_lines == [
"/home/example ins3.example.com(rw,sync,no_root_squash)"
]
assert nfs_exports.all_options() == set(["ro", "rw", "sync", "no_root_squash", "root_squash"])
assert nfs_exports.export_paths() == set([
"/home/utcs/shared/ro", "/home/insights/shared/rw",
"/home/insights/shared/special/all/mail",
"/home/insights/ins/special/all/config", "/home/example"
])
| 1,953
| 0
| 138
|
52d1d36ea29df25496029ae68d491bcbcc3e65fb
| 1,596
|
py
|
Python
|
DistributionTools/PEATDB/Linux/freeze.py
|
shambo001/peat
|
7a26e896aa9914b084a9064df09ed15df4047cf3
|
[
"MIT"
] | 3
|
2016-11-11T06:11:03.000Z
|
2021-09-12T22:13:51.000Z
|
DistributionTools/PEATDB/Linux/freeze.py
|
shambo001/peat
|
7a26e896aa9914b084a9064df09ed15df4047cf3
|
[
"MIT"
] | null | null | null |
DistributionTools/PEATDB/Linux/freeze.py
|
shambo001/peat
|
7a26e896aa9914b084a9064df09ed15df4047cf3
|
[
"MIT"
] | 2
|
2016-02-15T16:10:36.000Z
|
2018-02-27T10:33:21.000Z
|
#!/usr/bin/env python
#bbfreeze setup file for PEAT_DB distribution on Windows
#Damien Farrell, #October 2009
"""
This script can be used to create a standalone executable for
either windows or linux. It must be run on the target platform.
You will need to install bbfreeze, see http://pypi.python.org/pypi/bbfreeze/
"""
from bbfreeze import Freezer
import sys, os, shutil
shutil.rmtree('peatdb', ignore_errors=True)
path=os.path.abspath('../../..')
peatpath=os.path.abspath('../../../PEATDB')
version = '2.0'
f = Freezer('peatdb', includes=("numpy",),excludes=("wx",))
f.addScript(os.path.join(peatpath, "PEATApp.py"))
f.addScript(os.path.join(peatpath, "Ekin/Ekin_main.py"))
f.addScript(os.path.join(peatpath, "DNAtool/DNAtool.py"))
m=f.mf
f() # runs the freezing process
'''post freeze'''
#mpl data
import matplotlib
mpldir = matplotlib.get_data_path()
datadir = 'peatdb/mpl-data'
shutil.copytree(mpldir, datadir)
#add peat resource files
resources = ['PEATDB/DNAtool/restriction_enzymes.DAT',
'PEATDB/data/AA_masses.txt',
'PEATDB/App.ico',
'PEATDB/DNAtool/DNAtool.ico',
'Protool/AA.DAT',
'Protool/bbdep02.May.sortlib']
for r in resources:
shutil.copy(os.path.join(path, r), 'peatdb')
#set icon?
#make zip archive
import zipfile
f = zipfile.ZipFile("peatdb-2.0.zip", "w")
for dirpath, dirnames, filenames in os.walk('peatdb'):
for fname in filenames:
fullname = os.path.join(dirpath, fname)
f.write(fullname)
f.close()
| 29.555556
| 77
| 0.657268
|
#!/usr/bin/env python
#bbfreeze setup file for PEAT_DB distribution on Windows
#Damien Farrell, #October 2009
"""
This script can be used to create a standalone executable for
either windows or linux. It must be run on the target platform.
You will need to install bbfreeze, see http://pypi.python.org/pypi/bbfreeze/
"""
from bbfreeze import Freezer
import sys, os, shutil
shutil.rmtree('peatdb', ignore_errors=True)
path=os.path.abspath('../../..')
peatpath=os.path.abspath('../../../PEATDB')
version = '2.0'
f = Freezer('peatdb', includes=("numpy",),excludes=("wx",))
f.addScript(os.path.join(peatpath, "PEATApp.py"))
f.addScript(os.path.join(peatpath, "Ekin/Ekin_main.py"))
f.addScript(os.path.join(peatpath, "DNAtool/DNAtool.py"))
m=f.mf
f() # runs the freezing process
'''post freeze'''
#mpl data
import matplotlib
mpldir = matplotlib.get_data_path()
datadir = 'peatdb/mpl-data'
shutil.copytree(mpldir, datadir)
#add peat resource files
resources = ['PEATDB/DNAtool/restriction_enzymes.DAT',
'PEATDB/data/AA_masses.txt',
'PEATDB/App.ico',
'PEATDB/DNAtool/DNAtool.ico',
'Protool/AA.DAT',
'Protool/bbdep02.May.sortlib']
for r in resources:
shutil.copy(os.path.join(path, r), 'peatdb')
#set icon?
#make zip archive
import zipfile
f = zipfile.ZipFile("peatdb-2.0.zip", "w")
for dirpath, dirnames, filenames in os.walk('peatdb'):
for fname in filenames:
fullname = os.path.join(dirpath, fname)
f.write(fullname)
f.close()
| 0
| 0
| 0
|
38ceffbba3441d962dbd22cc8e0a2968b1cf2fc1
| 8,170
|
py
|
Python
|
common/replay_buffer.py
|
schmidtdominik/Rainbow
|
298c93d3d9322440d3a22cf24045b57af9c83fde
|
[
"MIT"
] | 28
|
2021-07-26T18:35:06.000Z
|
2022-03-28T02:42:04.000Z
|
common/replay_buffer.py
|
schmidtdominik/Rainbow
|
298c93d3d9322440d3a22cf24045b57af9c83fde
|
[
"MIT"
] | null | null | null |
common/replay_buffer.py
|
schmidtdominik/Rainbow
|
298c93d3d9322440d3a22cf24045b57af9c83fde
|
[
"MIT"
] | null | null | null |
import collections
import random
from math import sqrt
import numpy as np
import torch
from gym.wrappers import LazyFrames
from common.utils import prep_observation_for_qnet
class PrioritizedReplayBuffer:
""" based on https://nn.labml.ai/rl/dqn, supports n-step bootstrapping and parallel environments,
removed alpha hyperparameter like google/dopamine
"""
@staticmethod
def find_prefix_sum_idx(self, prefix_sum):
""" find the largest i such that the sum of the leaves from 1 to i is <= prefix sum"""
idx = 1
while idx < self.capacity:
if self.priority_sum[idx * 2] > prefix_sum:
idx = 2 * idx
else:
prefix_sum -= self.priority_sum[idx * 2]
idx = 2 * idx + 1
return idx - self.capacity
@property
@property
| 39.660194
| 148
| 0.614688
|
import collections
import random
from math import sqrt
import numpy as np
import torch
from gym.wrappers import LazyFrames
from common.utils import prep_observation_for_qnet
class UniformReplayBuffer:
def __init__(self, burnin, capacity, gamma, n_step, parallel_envs, use_amp):
self.capacity = capacity
self.burnin = burnin
self.buffer = []
self.nextwrite = 0
self.use_amp = use_amp
self.gamma = gamma
self.n_step = n_step
self.n_step_buffers = [collections.deque(maxlen=self.n_step + 1) for j in range(parallel_envs)]
def put(self, *transition, j):
self.n_step_buffers[j].append(transition)
if len(self.n_step_buffers[j]) == self.n_step + 1 and not self.n_step_buffers[j][0][3]: # n-step transition can't start with terminal state
state = self.n_step_buffers[j][0][0]
action = self.n_step_buffers[j][0][1]
next_state = self.n_step_buffers[j][self.n_step][0]
done = self.n_step_buffers[j][self.n_step][3]
reward = self.n_step_buffers[j][0][2]
for k in range(1, self.n_step):
reward += self.n_step_buffers[j][k][2] * self.gamma ** k
if self.n_step_buffers[j][k][3]:
done = True
break
action = torch.LongTensor([action]).cuda()
reward = torch.FloatTensor([reward]).cuda()
done = torch.FloatTensor([done]).cuda()
if len(self.buffer) < self.capacity:
self.buffer.append((state, next_state, action, reward, done))
else:
self.buffer[self.nextwrite % self.capacity] = (state, next_state, action, reward, done)
self.nextwrite += 1
def sample(self, batch_size, beta=None):
""" Sample a minibatch from the ER buffer (also converts the FrameStacked LazyFrames to contiguous tensors) """
batch = random.sample(self.buffer, batch_size)
state, next_state, action, reward, done = zip(*batch)
state = list(map(lambda x: torch.from_numpy(x.__array__()), state))
next_state = list(map(lambda x: torch.from_numpy(x.__array__()), next_state))
state, next_state, action, reward, done = map(torch.stack, [state, next_state, action, reward, done])
return prep_observation_for_qnet(state, self.use_amp), prep_observation_for_qnet(next_state, self.use_amp), \
action.squeeze(), reward.squeeze(), done.squeeze()
@property
def burnedin(self):
return len(self) >= self.burnin
def __len__(self):
return len(self.buffer)
class PrioritizedReplayBuffer:
""" based on https://nn.labml.ai/rl/dqn, supports n-step bootstrapping and parallel environments,
removed alpha hyperparameter like google/dopamine
"""
def __init__(self, burnin: int, capacity: int, gamma: float, n_step: int, parallel_envs: int, use_amp):
self.burnin = burnin
self.capacity = capacity # must be a power of two
self.gamma = gamma
self.n_step = n_step
self.n_step_buffers = [collections.deque(maxlen=self.n_step + 1) for j in range(parallel_envs)]
self.use_amp = use_amp
self.priority_sum = [0 for _ in range(2 * self.capacity)]
self.priority_min = [float('inf') for _ in range(2 * self.capacity)]
self.max_priority = 1.0 # initial priority of new transitions
self.data = [None for _ in range(self.capacity)] # cyclical buffer for transitions
self.next_idx = 0 # next write location
self.size = 0 # number of buffer elements
@staticmethod
def prepare_transition(state, next_state, action: int, reward: float, done: bool):
action = torch.LongTensor([action]).cuda()
reward = torch.FloatTensor([reward]).cuda()
done = torch.FloatTensor([done]).cuda()
return state, next_state, action, reward, done
def put(self, *transition, j):
self.n_step_buffers[j].append(transition)
if len(self.n_step_buffers[j]) == self.n_step + 1 and not self.n_step_buffers[j][0][3]: # n-step transition can't start with terminal state
state = self.n_step_buffers[j][0][0]
action = self.n_step_buffers[j][0][1]
next_state = self.n_step_buffers[j][self.n_step][0]
done = self.n_step_buffers[j][self.n_step][3]
reward = self.n_step_buffers[j][0][2]
for k in range(1, self.n_step):
reward += self.n_step_buffers[j][k][2] * self.gamma ** k
if self.n_step_buffers[j][k][3]:
done = True
break
assert isinstance(state, LazyFrames)
assert isinstance(next_state, LazyFrames)
idx = self.next_idx
self.data[idx] = self.prepare_transition(state, next_state, action, reward, done)
self.next_idx = (idx + 1) % self.capacity
self.size = min(self.capacity, self.size + 1)
self._set_priority_min(idx, sqrt(self.max_priority))
self._set_priority_sum(idx, sqrt(self.max_priority))
def _set_priority_min(self, idx, priority_alpha):
idx += self.capacity
self.priority_min[idx] = priority_alpha
while idx >= 2:
idx //= 2
self.priority_min[idx] = min(self.priority_min[2 * idx], self.priority_min[2 * idx + 1])
def _set_priority_sum(self, idx, priority):
idx += self.capacity
self.priority_sum[idx] = priority
while idx >= 2:
idx //= 2
self.priority_sum[idx] = self.priority_sum[2 * idx] + self.priority_sum[2 * idx + 1]
def _sum(self):
return self.priority_sum[1]
def _min(self):
return self.priority_min[1]
def find_prefix_sum_idx(self, prefix_sum):
""" find the largest i such that the sum of the leaves from 1 to i is <= prefix sum"""
idx = 1
while idx < self.capacity:
if self.priority_sum[idx * 2] > prefix_sum:
idx = 2 * idx
else:
prefix_sum -= self.priority_sum[idx * 2]
idx = 2 * idx + 1
return idx - self.capacity
def sample(self, batch_size: int, beta: float) -> tuple:
weights = np.zeros(shape=batch_size, dtype=np.float32)
indices = np.zeros(shape=batch_size, dtype=np.int32)
for i in range(batch_size):
p = random.random() * self._sum()
idx = self.find_prefix_sum_idx(p)
indices[i] = idx
prob_min = self._min() / self._sum()
max_weight = (prob_min * self.size) ** (-beta)
for i in range(batch_size):
idx = indices[i]
prob = self.priority_sum[idx + self.capacity] / self._sum()
weight = (prob * self.size) ** (-beta)
weights[i] = weight / max_weight
samples = []
for i in indices:
samples.append(self.data[i])
return indices, weights, self.prepare_samples(samples)
def prepare_samples(self, batch):
state, next_state, action, reward, done = zip(*batch)
state = list(map(lambda x: torch.from_numpy(x.__array__()), state))
next_state = list(map(lambda x: torch.from_numpy(x.__array__()), next_state))
state, next_state, action, reward, done = map(torch.stack, [state, next_state, action, reward, done])
return prep_observation_for_qnet(state, self.use_amp), prep_observation_for_qnet(next_state, self.use_amp), \
action.squeeze(), reward.squeeze(), done.squeeze()
def update_priorities(self, indexes, priorities):
for idx, priority in zip(indexes, priorities):
self.max_priority = max(self.max_priority, priority)
priority_alpha = sqrt(priority)
self._set_priority_min(idx, priority_alpha)
self._set_priority_sum(idx, priority_alpha)
@property
def is_full(self):
return self.capacity == self.size
@property
def burnedin(self):
return len(self) >= self.burnin
def __len__(self):
return self.size
| 6,084
| 866
| 371
|
82160d72d0eb2c7ce7a388b05819e810a8c85d97
| 3,457
|
py
|
Python
|
controller/connectors/SCVMM.py
|
maybe-hello-world/tortilla-controller
|
4bb6b9d893eacaec397357881843cd03037549e3
|
[
"MIT"
] | null | null | null |
controller/connectors/SCVMM.py
|
maybe-hello-world/tortilla-controller
|
4bb6b9d893eacaec397357881843cd03037549e3
|
[
"MIT"
] | null | null | null |
controller/connectors/SCVMM.py
|
maybe-hello-world/tortilla-controller
|
4bb6b9d893eacaec397357881843cd03037549e3
|
[
"MIT"
] | null | null | null |
import logging
import aiohttp
from typing import Tuple
from controller.data.VM import VM
from controller.connectors.Connector import Connector
| 33.563107
| 123
| 0.563494
|
import logging
import aiohttp
from typing import Tuple
from controller.data.VM import VM
from controller.connectors.Connector import Connector
class SCVMMConnector(Connector):
def __init__(self, url: str, timeout: int = 30):
"""
Instatiate SCVMMConnector class
:param url: url of a SCVMM host
:param timeout: timeout for all requests
"""
self.SCVMM_URL = url
self.logger = logging.getLogger("scvmm")
self.logger.info("SCVMM API url is set to " + self.SCVMM_URL)
self.timeout = timeout
async def async_open(self):
self.session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.timeout, sock_read=self.timeout * 4))
async def async_close(self):
await self.session.close()
async def __send_get_request(self, url: str, payload: dict) -> bool:
try:
async with self.session.post(url=url, json=payload) as resp:
self.logger.info(f"Request to {url}, result: {resp.status}.")
self.logger.debug(f"Payload: {payload}")
return 200 <= resp.status < 300
except Exception as e:
self.logger.exception(str(e))
return False
async def list_vms(self, domain: str, username: str) -> Tuple[VM, ...]:
url = self.SCVMM_URL + "vm/list"
params = {
'domain': domain,
'username': username
}
try:
async with self.session.get(url=url, params=params) as resp:
if resp.status >= 300 or resp.status < 200:
return ()
data = await resp.json()
vm_list = tuple(
VM(
name=x.get('Name', "-"),
vmid=x.get('ID', "-"),
status=x.get('VirtualMachineState', "-"),
task=x.get('MostRecentTask', "-"),
taskStatus=x.get('MostRecentTaskUIState', "-"),
vmhost=x.get('VMHost', "-"),
protocol="vmrdp",
port=2179,
vmprovider="scvmm"
)
for x in data
)
self.logger.debug(f"VM list for {domain}\\{username} returned")
return vm_list
except Exception as e:
self.logger.exception(e)
return ()
async def start(self, vmid: str) -> bool:
url = self.SCVMM_URL + "vm/start"
payload = {'vmid': vmid}
return await self.__send_get_request(url=url, payload=payload)
async def shutdown(self, vmid: str) -> bool:
url = self.SCVMM_URL + "vm/shutdown"
payload = {'vmid': vmid}
return await self.__send_get_request(url=url, payload=payload)
async def poweroff(self, vmid: str) -> bool:
url = self.SCVMM_URL + "vm/poweroff"
payload = {'vmid': vmid}
return await self.__send_get_request(url=url, payload=payload)
async def save(self, vmid: str) -> bool:
url = self.SCVMM_URL + "vm/save"
payload = {'vmid': vmid}
return await self.__send_get_request(url=url, payload=payload)
async def list_checkpoints(self, *a, **kw) -> tuple:
raise NotImplementedError
async def create_checkpoint(self, *a, **kw) -> bool:
raise NotImplementedError
async def remove_checkpoint(self, *a, **kw) -> bool:
raise NotImplementedError
| 2,594
| 695
| 23
|
5ea8fc75b71ddaea968fa99dc4a47623684bb7ac
| 706
|
py
|
Python
|
estate/forms.py
|
apwao/neighborhood
|
b71028fb0e312a57776b8485c7bf8e43b8f6c5d5
|
[
"Unlicense",
"MIT"
] | null | null | null |
estate/forms.py
|
apwao/neighborhood
|
b71028fb0e312a57776b8485c7bf8e43b8f6c5d5
|
[
"Unlicense",
"MIT"
] | 5
|
2020-06-05T22:06:38.000Z
|
2021-09-08T01:07:31.000Z
|
estate/forms.py
|
apwao/neighborhood
|
b71028fb0e312a57776b8485c7bf8e43b8f6c5d5
|
[
"Unlicense",
"MIT"
] | null | null | null |
from .models import Business,Profile
from django import forms
class BusinessForm(forms.ModelForm):
"""
class BusinessForm to enable a user to register their businesses
with the application
"""
class ProfileForm(forms.ModelForm):
"""
class BusinessForm to enable a user to register their businesses
with the application
"""
| 27.153846
| 90
| 0.637394
|
from .models import Business,Profile
from django import forms
class BusinessForm(forms.ModelForm):
"""
class BusinessForm to enable a user to register their businesses
with the application
"""
class Meta:
model=Business
fields=('biz_name','email_address','description','image')
class ProfileForm(forms.ModelForm):
"""
class BusinessForm to enable a user to register their businesses
with the application
"""
class Meta:
model=Profile
fields=('name','email_address','neighborhood','neighborhood_name','profile_pic', )
widget={
'neighborhood_name':forms.SelectMultiple(),
}
| 0
| 283
| 52
|
33bcfb0072c2489f5ef8239ee411e6decf69324d
| 17,393
|
py
|
Python
|
code/tutorials/exp_somb/pre_tomos_seg.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 12
|
2020-01-08T01:33:02.000Z
|
2022-03-16T00:25:34.000Z
|
code/tutorials/exp_somb/pre_tomos_seg.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 8
|
2019-12-19T19:34:56.000Z
|
2022-03-10T10:11:28.000Z
|
code/tutorials/exp_somb/pre_tomos_seg.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 2
|
2022-03-30T13:12:22.000Z
|
2022-03-30T18:12:10.000Z
|
"""
Pre-processing for mb_graph_batch.py of oriented membranes from TomoSegMemTV output
Input: - STAR file with 3 columns:
+ _rlnMicrographName: tomogram original
+ _rlnImageName: TomoSegMemTV density map output
+ _psSegLabel: (optional) label for membrane segmentation
+ _psSegImage: (optional) binary mask to focus the segmentation analysis
+ _mtMtubesCsv: (optional) a .csv file with microtubule center lines
- Setting for segmenting the membranes from TomoSegMemTV density map:
+ Density threshold: (optional) required if _psSegLabel not defined
+ Size threshold: (optional) required if _psSegLabel not defined
- Sub-volume splitting settings
Output: - A STAR file with 3 columns:
+ _rlnMicrographName: tomogram original
+ _rlnImageName: sub-volumes
+ _psSegImage: Un-oriented membrane segmentations for each subvolume
+ Columns for localizing the sub-volumes within each original tomogram
"""
################# Package import
import argparse
import gc
import os
import sys
import math
import time
import pyseg as ps
import scipy as sp
import skimage as sk
import numpy as np
from pyseg.globals import signed_distance_2d
###### Global variables
__author__ = 'Antonio Martinez-Sanchez'
MB_LBL, MB_NEIGH = 1, 2
MB_NEIGH_INT, MB_NEIGH_EXT = 2, 3
########################################################################################
# PARAMETERS
########################################################################################
ROOT_PATH = '/fs/pool/pool-ruben/antonio/shiwei'
# Input STAR file
in_star = ROOT_PATH + '/pre/in/mb_seg_single_oriented.star'
# Output directory
out_dir = ROOT_PATH + '/pre/mbo_nosplit'
# Subvolume splitting settings
sp_split = None # (2, 2, 1)
sp_off_voxels = 30 # vox
# Membrane segmentation
sg_res = 0.52 # nm/voxel
sg_th = None # 8
sg_sz = None # 3e3
sg_mb_thick = 4 # nm
sg_mb_neigh = 15 # nm
# CSV file pre-processing
cv_coords_cools = (1, 2, 3)
cv_id_col = 4
# Microtubule settings
mt_rad = 30 # nm
mt_swap_xy = False
########################################################################################
# MAIN ROUTINE
########################################################################################
# Get them from the command line if they were passed through it
parser = argparse.ArgumentParser()
parser.add_argument('--inStar', default=in_star, help='Input star file.')
parser.add_argument('--outDir', default=out_dir, help='Output directory.')
parser.add_argument('--spSplit', nargs='+', type=int, default=sp_split, help='Number of splits (X, Y, Z).')
parser.add_argument('--spOffVoxels', type=int, default=sp_off_voxels, help='Offset voxels.')
parser.add_argument('--sgVoxelSize', default=sg_res, type=float, help='Voxel size (nm/voxel).')
parser.add_argument('--sgThreshold', type=int, default=sg_th, help='Density threshold.')
parser.add_argument('--sgSizeThreshold', type=int, default=sg_sz, help='Size threshold (voxels).')
parser.add_argument('--sgMembThk', default=sg_mb_thick, type=float, help='Segmented membrane thickness (nm)')
parser.add_argument('--sgMembNeigh', default=sg_mb_neigh, type=float, help='Segmented membrane neighbours (nm)')
args = parser.parse_args()
in_star = args.inStar
out_dir = args.outDir
sp_split = None if args.spSplit == [-1] else args.spSplit
sp_off_voxels = args.spOffVoxels
sg_res = args.sgVoxelSize
sg_th = None if args.sgThreshold == -1 else args.sgThreshold
sg_sz = None if args.sgSizeThreshold == -1 else args.sgSizeThreshold
sg_mb_thick = args.sgMembThk
sg_mb_neigh = args.sgMembNeigh
########## Print initial message
print('Pre-processing for SEG analysis of un-oriented membranes from TomoSegMemTV output.')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('Options:')
print('\tOutput directory: ' + str(out_dir))
print('\tInput STAR file: ' + str(in_star))
print('\tData resolution: ' + str(sg_res) + ' nm/vx')
if sg_th is not None:
print('\tSegmentation settings: ')
print('\t\t-Density threshold: ' + str(sg_th))
print('\t\t-Size threshold: ' + str(sg_sz) + ' vx')
print('\tSub-volume splitting settings: ')
print('\t\t-Number of splits (X, Y, Z): ' + str(sp_split))
print('\t\t-Offset voxels: ' + str(sp_off_voxels))
print('\tMicrotubule settings:')
print('\t\t-Microtube luminal radius: ' + str(mt_rad) + ' nm')
print('\tCSV pre-processing: ')
print('\t\t-Columns for samples coordinates (X, Y, Z): ' + str(cv_coords_cools))
print('\t\t-Column for microtubule ID: ' + str(cv_id_col))
print('')
######### Process
print('Parsing input parameters...')
sp_res, mt_rad, sp_off_voxels = float(sg_res), float(mt_rad), int(sp_off_voxels)
out_stem = os.path.splitext(os.path.split(in_star)[1])[0]
conn_mask = np.ones(shape=(3,3,3))
out_seg_dir = out_dir + '/segs'
if not os.path.isdir(out_seg_dir):
os.makedirs(out_seg_dir)
print('Loading input STAR file...')
gl_star = ps.sub.Star()
try:
gl_star.load(in_star)
except ps.pexceptions.PySegInputError as e:
print('ERROR: input STAR file could not be loaded because of "' + e.get_message() + '"')
print('Terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
star = ps.sub.Star()
star.add_column(key='_rlnMicrographName')
star.add_column(key='_rlnImageName')
star.add_column(key='_psSegImage')
star.add_column(key='_psSegRot')
star.add_column(key='_psSegTilt')
star.add_column(key='_psSegPsi')
star.add_column(key='_psSegOffX')
star.add_column(key='_psSegOffY')
star.add_column(key='_psSegOffZ')
mode_oriented = False
if gl_star.has_column('_rlnOriginX') and gl_star.has_column('_rlnOriginY') and gl_star.has_column('_rlnOriginZ'):
print('\t-Segmentation origin found, oriented membrane segmentation activated!')
mode_oriented = True
print('Main Routine: tomograms loop')
tomo_id = 0
for row in range(gl_star.get_nrows()):
in_ref = gl_star.get_element('_rlnMicrographName', row)
print('\tProcessing tomogram: ' + in_ref)
out_ref_stem = os.path.splitext(os.path.split(in_ref)[1])[0]
in_mb = gl_star.get_element('_rlnImageName', row)
print('\t\t-Loading membrane segmentation: ' + in_mb)
tomo_mb = ps.disperse_io.load_tomo(in_mb)
tomo_ref = ps.disperse_io.load_tomo(in_ref, mmap=True)
off_mask_min_x, off_mask_max_x = 0, tomo_ref.shape[0]
off_mask_min_y, off_mask_max_y = 0, tomo_ref.shape[1]
off_mask_min_z, off_mask_max_z = 0, tomo_ref.shape[2]
wide_x = off_mask_max_x - off_mask_min_x
wide_y = off_mask_max_y - off_mask_min_y
wide_z = off_mask_max_z - off_mask_min_z
mt_mask = None
if gl_star.has_column('_mtMtubesCsv'):
in_csv = gl_star.get_element('_mtMtubesCsv', row)
print('\tReading input CSV file: ' + in_csv)
mt_dic = ps.globals.read_csv_mts(in_csv, cv_coords_cools, cv_id_col, swap_xy=mt_swap_xy)
mts_points = list()
for mt_id, mt_samps in zip(iter(mt_dic.keys()), iter(mt_dic.values())):
mts_points += mt_samps
mts_points = np.asarray(mts_points, dtype=np.float32) * (1./sg_res)
print('\tSegmenting the microtubules...')
mt_mask = ps.globals.points_to_mask(mts_points, tomo_mb.shape, inv=True)
mt_mask = sp.ndimage.morphology.distance_transform_edt(mt_mask, sampling=sg_res, return_indices=False)
mt_mask = mt_mask > mt_rad
mb_lbl = 0
if sg_th is None:
if gl_star.has_column('_psSegLabel'):
mb_lbl = gl_star.get_element('_psSegLabel', row)
print('\t\t\t+Segmenting membranes with label: ' + str(mb_lbl))
if mb_lbl > 0:
tomo_mb = tomo_mb == mb_lbl
else:
tomo_mb = tomo_mb > 0
else:
tomo_mb = tomo_mb > 0
else:
tomo_mb = tomo_mb >= sg_th
if gl_star.has_column('_mtMtubesCsv'):
tomo_mb *= mt_mask
del mt_mask
if gl_star.has_column('_psSegImage'):
print('\tApplying the mask...')
hold_mask = ps.disperse_io.load_tomo(gl_star.get_element('_psSegImage', row))
if mb_lbl > 0:
hold_mask = hold_mask == mb_lbl
else:
hold_mask = hold_mask > 0
tomo_mb *= hold_mask
ids_mask = np.where(hold_mask)
off_mask_min_x, off_mask_max_x = ids_mask[0].min()-sp_off_voxels, ids_mask[0].max()+sp_off_voxels
if off_mask_min_x < 0:
off_mask_min_x = 0
if off_mask_max_x > hold_mask.shape[0]:
off_mask_max_x = hold_mask.shape[0]
off_mask_min_y, off_mask_max_y = ids_mask[1].min()-sp_off_voxels, ids_mask[1].max()+sp_off_voxels
if off_mask_min_y < 0:
off_mask_min_y = 0
if off_mask_max_y > hold_mask.shape[1]:
off_mask_max_y = hold_mask.shape[1]
off_mask_min_z, off_mask_max_z = ids_mask[2].min()-sp_off_voxels, ids_mask[2].max()+sp_off_voxels
if off_mask_min_z < 0:
off_mask_min_z = 0
if off_mask_max_z > hold_mask.shape[2]:
off_mask_max_z = hold_mask.shape[2]
del hold_mask
del ids_mask
# ps.disperse_io.save_numpy(tomo_mb, out_dir + '/hold.mrc')
if sg_th is not None:
print('\tMembrane thresholding...')
tomo_sz = ps.globals.global_analysis(tomo_mb, 0.5, c=26)
tomo_mb = tomo_sz > sg_sz
del tomo_sz
seg_center = None
if mode_oriented:
seg_center = np.asarray((gl_star.get_element('_rlnOriginX', row),
gl_star.get_element('_rlnOriginY', row),
gl_star.get_element('_rlnOriginZ', row)))
seg_center[0] -= off_mask_min_x
seg_center[1] -= off_mask_min_y
seg_center[2] -= off_mask_min_z
print('\tSegmenting the membranes...')
if sp_split is None:
svol_mb = tomo_mb[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z]
svol = tomo_ref[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z]
svol_dst = sp.ndimage.morphology.distance_transform_edt(np.invert(svol_mb), sampling=sg_res,
return_indices=False)
svol_seg = np.zeros(shape=svol.shape, dtype=np.float32)
if not mode_oriented:
svol_seg[svol_dst < sg_mb_neigh + sg_mb_thick] = MB_NEIGH
svol_seg[svol_dst < sg_mb_thick] = MB_LBL
else:
svol_dst = signed_distance_2d(svol_mb, res=1, del_b=True, mode_2d=True, set_point=seg_center)
svol_seg[(svol_dst > 0) & (svol_dst < sg_mb_neigh + sg_mb_thick)] = MB_NEIGH_INT
svol_seg[(svol_dst < 0) & (svol_dst > -1. * (sg_mb_neigh + sg_mb_thick))] = MB_NEIGH_EXT
svol_seg[np.absolute(svol_dst) < sg_mb_thick] = MB_LBL
svol_seg[svol_dst == 0] = 0
svol_seg[svol_mb > 0] = MB_LBL
out_svol = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '.mrc'
out_seg = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '_seg.mrc'
ps.disperse_io.save_numpy(svol, out_svol)
ps.disperse_io.save_numpy(svol_seg, out_seg)
del svol_seg
del svol_dst
row_dic = dict()
row_dic['_rlnMicrographName'] = in_ref
row_dic['_rlnImageName'] = out_svol
row_dic['_psSegImage'] = out_seg
row_dic['_psSegRot'] = 0
row_dic['_psSegTilt'] = 0
row_dic['_psSegPsi'] = 0
row_dic['_psSegOffX'] = off_mask_min_x # 0
row_dic['_psSegOffY'] = off_mask_min_y # 0
row_dic['_psSegOffZ'] = off_mask_min_z
star.add_row(**row_dic)
else:
print('\tSplitting into subvolumes:')
if sp_split[0] > 1:
hold_wide = int(math.ceil(wide_x / sp_split[0]))
hold_pad = int(math.ceil((off_mask_max_x - off_mask_min_x) / sp_split[0]))
hold_split = int(sp_split[0] * math.ceil(float(hold_pad)/hold_wide))
offs_x = list()
pad_x = off_mask_min_x + int(math.ceil((off_mask_max_x-off_mask_min_x) / hold_split))
offs_x.append((off_mask_min_x, pad_x+sp_off_voxels))
lock = False
while not lock:
hold = offs_x[-1][1] + pad_x
if hold >= off_mask_max_x:
offs_x.append((offs_x[-1][1] - sp_off_voxels, off_mask_max_x))
lock = True
else:
offs_x.append((offs_x[-1][1]-sp_off_voxels, offs_x[-1][1]+pad_x+sp_off_voxels))
else:
offs_x = [(off_mask_min_x, off_mask_max_x),]
if sp_split[1] > 1:
hold_wide = int(math.ceil(wide_y / sp_split[1]))
hold_pad = int(math.ceil((off_mask_max_y - off_mask_min_y) / sp_split[1]))
hold_split = int(sp_split[1] * math.ceil(float(hold_pad) / hold_wide))
offs_y = list()
pad_y = off_mask_min_y + int(math.ceil((off_mask_max_y-off_mask_min_y) / hold_split))
offs_y.append((off_mask_min_x, pad_y + sp_off_voxels))
lock = False
while not lock:
hold = offs_y[-1][1] + pad_y
if hold >= off_mask_max_y:
offs_y.append((offs_y[-1][1] - sp_off_voxels, off_mask_max_y))
lock = True
else:
offs_y.append((offs_y[-1][1] - sp_off_voxels, offs_y[-1][1] + pad_y + sp_off_voxels))
else:
offs_y = [(off_mask_min_x, off_mask_max_x),]
if sp_split[2] > 1:
hold_wide = int(math.ceil(wide_z / sp_split[2]))
hold_pad = int(math.ceil((off_mask_max_z - off_mask_min_z) / sp_split[2]))
hold_split = int(sp_split[2] * math.ceil(float(hold_pad) / hold_wide))
offs_z = list()
pad_z = off_mask_min_z + int(math.ceil((off_mask_max_z-off_mask_min_z) / hold_split))
offs_z.append((off_mask_min_z, pad_z + sp_off_voxels))
lock = False
while not lock:
hold = offs_z[-1][1] + pad_z
if hold >= off_mask_max_z:
offs_z.append((offs_z[-1][1] - sp_off_voxels, off_mask_max_z))
lock = True
else:
offs_z.append((offs_z[-1][1] - sp_off_voxels, offs_z[-1][1] + pad_z + sp_off_voxels))
else:
offs_z = [(off_mask_min_z, off_mask_max_z),]
split_id = 1
for off_x in offs_x:
for off_y in offs_y:
for off_z in offs_z:
print('\t\t-Splitting subvolume: [' + str(off_x) + ', ' + str(off_y) + ', ' + str(off_z) +']')
svol_mb = tomo_mb[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]]
svol = tomo_ref[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]]
svol_seg = np.zeros(shape=svol.shape, dtype=np.float32)
if not mode_oriented:
svol_dst = sp.ndimage.morphology.distance_transform_edt(np.invert(svol_mb), sampling=sg_res,
return_indices=False)
svol_seg[svol_dst < sg_mb_neigh + sg_mb_thick] = MB_NEIGH
svol_seg[svol_dst < sg_mb_thick] = MB_LBL
else:
seg_off_center = seg_center - np.asarray((off_x[0], off_y[0], off_z[0]))
svol_dst = signed_distance_2d(svol_mb, res=1, del_b=True, mode_2d=True,
set_point=seg_off_center)
svol_seg[(svol_dst > 0) & (svol_dst < sg_mb_neigh + sg_mb_thick)] = MB_NEIGH_INT
svol_seg[(svol_dst < 0) & (svol_dst > -1. * (sg_mb_neigh + sg_mb_thick))] = MB_NEIGH_EXT
svol_seg[np.absolute(svol_dst) < sg_mb_thick] = MB_LBL
svol_seg[svol_dst == 0] = 0
svol_seg[svol_mb > 0] = MB_LBL
out_svol = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '.mrc'
out_seg = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '_mb.mrc'
ps.disperse_io.save_numpy(svol, out_svol)
ps.disperse_io.save_numpy(svol_seg, out_seg)
del svol_seg
del svol_dst
split_id += 1
row_dic = dict()
row_dic['_rlnMicrographName'] = in_ref
row_dic['_rlnImageName'] = out_svol
row_dic['_psSegImage'] = out_seg
row_dic['_psSegRot'] = 0
row_dic['_psSegTilt'] = 0
row_dic['_psSegPsi'] = 0
row_dic['_psSegOffX'] = off_x[0]
row_dic['_psSegOffY'] = off_y[0]
row_dic['_psSegOffZ'] = off_z[0]
star.add_row(**row_dic)
# Prepare next iteration
gc.collect()
tomo_id += 1
out_star = out_dir + '/' + out_stem + '_pre.star'
print('\tStoring output STAR file in: ' + out_star)
star.store(out_star)
print('Terminated. (' + time.strftime("%c") + ')')
| 44.943152
| 126
| 0.605933
|
"""
Pre-processing for mb_graph_batch.py of oriented membranes from TomoSegMemTV output
Input: - STAR file with 3 columns:
+ _rlnMicrographName: tomogram original
+ _rlnImageName: TomoSegMemTV density map output
+ _psSegLabel: (optional) label for membrane segmentation
+ _psSegImage: (optional) binary mask to focus the segmentation analysis
+ _mtMtubesCsv: (optional) a .csv file with microtubule center lines
- Setting for segmenting the membranes from TomoSegMemTV density map:
+ Density threshold: (optional) required if _psSegLabel not defined
+ Size threshold: (optional) required if _psSegLabel not defined
- Sub-volume splitting settings
Output: - A STAR file with 3 columns:
+ _rlnMicrographName: tomogram original
+ _rlnImageName: sub-volumes
+ _psSegImage: Un-oriented membrane segmentations for each subvolume
+ Columns for localizing the sub-volumes within each original tomogram
"""
################# Package import
import argparse
import gc
import os
import sys
import math
import time
import pyseg as ps
import scipy as sp
import skimage as sk
import numpy as np
from pyseg.globals import signed_distance_2d
###### Global variables
__author__ = 'Antonio Martinez-Sanchez'
MB_LBL, MB_NEIGH = 1, 2
MB_NEIGH_INT, MB_NEIGH_EXT = 2, 3
########################################################################################
# PARAMETERS
########################################################################################
ROOT_PATH = '/fs/pool/pool-ruben/antonio/shiwei'
# Input STAR file
in_star = ROOT_PATH + '/pre/in/mb_seg_single_oriented.star'
# Output directory
out_dir = ROOT_PATH + '/pre/mbo_nosplit'
# Subvolume splitting settings
sp_split = None # (2, 2, 1)
sp_off_voxels = 30 # vox
# Membrane segmentation
sg_res = 0.52 # nm/voxel
sg_th = None # 8
sg_sz = None # 3e3
sg_mb_thick = 4 # nm
sg_mb_neigh = 15 # nm
# CSV file pre-processing
cv_coords_cools = (1, 2, 3)
cv_id_col = 4
# Microtubule settings
mt_rad = 30 # nm
mt_swap_xy = False
########################################################################################
# MAIN ROUTINE
########################################################################################
# Get them from the command line if they were passed through it
parser = argparse.ArgumentParser()
parser.add_argument('--inStar', default=in_star, help='Input star file.')
parser.add_argument('--outDir', default=out_dir, help='Output directory.')
parser.add_argument('--spSplit', nargs='+', type=int, default=sp_split, help='Number of splits (X, Y, Z).')
parser.add_argument('--spOffVoxels', type=int, default=sp_off_voxels, help='Offset voxels.')
parser.add_argument('--sgVoxelSize', default=sg_res, type=float, help='Voxel size (nm/voxel).')
parser.add_argument('--sgThreshold', type=int, default=sg_th, help='Density threshold.')
parser.add_argument('--sgSizeThreshold', type=int, default=sg_sz, help='Size threshold (voxels).')
parser.add_argument('--sgMembThk', default=sg_mb_thick, type=float, help='Segmented membrane thickness (nm)')
parser.add_argument('--sgMembNeigh', default=sg_mb_neigh, type=float, help='Segmented membrane neighbours (nm)')
args = parser.parse_args()
in_star = args.inStar
out_dir = args.outDir
sp_split = None if args.spSplit == [-1] else args.spSplit
sp_off_voxels = args.spOffVoxels
sg_res = args.sgVoxelSize
sg_th = None if args.sgThreshold == -1 else args.sgThreshold
sg_sz = None if args.sgSizeThreshold == -1 else args.sgSizeThreshold
sg_mb_thick = args.sgMembThk
sg_mb_neigh = args.sgMembNeigh
########## Print initial message
print('Pre-processing for SEG analysis of un-oriented membranes from TomoSegMemTV output.')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('Options:')
print('\tOutput directory: ' + str(out_dir))
print('\tInput STAR file: ' + str(in_star))
print('\tData resolution: ' + str(sg_res) + ' nm/vx')
if sg_th is not None:
print('\tSegmentation settings: ')
print('\t\t-Density threshold: ' + str(sg_th))
print('\t\t-Size threshold: ' + str(sg_sz) + ' vx')
print('\tSub-volume splitting settings: ')
print('\t\t-Number of splits (X, Y, Z): ' + str(sp_split))
print('\t\t-Offset voxels: ' + str(sp_off_voxels))
print('\tMicrotubule settings:')
print('\t\t-Microtube luminal radius: ' + str(mt_rad) + ' nm')
print('\tCSV pre-processing: ')
print('\t\t-Columns for samples coordinates (X, Y, Z): ' + str(cv_coords_cools))
print('\t\t-Column for microtubule ID: ' + str(cv_id_col))
print('')
######### Process
print('Parsing input parameters...')
sp_res, mt_rad, sp_off_voxels = float(sg_res), float(mt_rad), int(sp_off_voxels)
out_stem = os.path.splitext(os.path.split(in_star)[1])[0]
conn_mask = np.ones(shape=(3,3,3))
out_seg_dir = out_dir + '/segs'
if not os.path.isdir(out_seg_dir):
os.makedirs(out_seg_dir)
print('Loading input STAR file...')
gl_star = ps.sub.Star()
try:
gl_star.load(in_star)
except ps.pexceptions.PySegInputError as e:
print('ERROR: input STAR file could not be loaded because of "' + e.get_message() + '"')
print('Terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
star = ps.sub.Star()
star.add_column(key='_rlnMicrographName')
star.add_column(key='_rlnImageName')
star.add_column(key='_psSegImage')
star.add_column(key='_psSegRot')
star.add_column(key='_psSegTilt')
star.add_column(key='_psSegPsi')
star.add_column(key='_psSegOffX')
star.add_column(key='_psSegOffY')
star.add_column(key='_psSegOffZ')
mode_oriented = False
if gl_star.has_column('_rlnOriginX') and gl_star.has_column('_rlnOriginY') and gl_star.has_column('_rlnOriginZ'):
print('\t-Segmentation origin found, oriented membrane segmentation activated!')
mode_oriented = True
print('Main Routine: tomograms loop')
tomo_id = 0
for row in range(gl_star.get_nrows()):
in_ref = gl_star.get_element('_rlnMicrographName', row)
print('\tProcessing tomogram: ' + in_ref)
out_ref_stem = os.path.splitext(os.path.split(in_ref)[1])[0]
in_mb = gl_star.get_element('_rlnImageName', row)
print('\t\t-Loading membrane segmentation: ' + in_mb)
tomo_mb = ps.disperse_io.load_tomo(in_mb)
tomo_ref = ps.disperse_io.load_tomo(in_ref, mmap=True)
off_mask_min_x, off_mask_max_x = 0, tomo_ref.shape[0]
off_mask_min_y, off_mask_max_y = 0, tomo_ref.shape[1]
off_mask_min_z, off_mask_max_z = 0, tomo_ref.shape[2]
wide_x = off_mask_max_x - off_mask_min_x
wide_y = off_mask_max_y - off_mask_min_y
wide_z = off_mask_max_z - off_mask_min_z
mt_mask = None
if gl_star.has_column('_mtMtubesCsv'):
in_csv = gl_star.get_element('_mtMtubesCsv', row)
print('\tReading input CSV file: ' + in_csv)
mt_dic = ps.globals.read_csv_mts(in_csv, cv_coords_cools, cv_id_col, swap_xy=mt_swap_xy)
mts_points = list()
for mt_id, mt_samps in zip(iter(mt_dic.keys()), iter(mt_dic.values())):
mts_points += mt_samps
mts_points = np.asarray(mts_points, dtype=np.float32) * (1./sg_res)
print('\tSegmenting the microtubules...')
mt_mask = ps.globals.points_to_mask(mts_points, tomo_mb.shape, inv=True)
mt_mask = sp.ndimage.morphology.distance_transform_edt(mt_mask, sampling=sg_res, return_indices=False)
mt_mask = mt_mask > mt_rad
mb_lbl = 0
if sg_th is None:
if gl_star.has_column('_psSegLabel'):
mb_lbl = gl_star.get_element('_psSegLabel', row)
print('\t\t\t+Segmenting membranes with label: ' + str(mb_lbl))
if mb_lbl > 0:
tomo_mb = tomo_mb == mb_lbl
else:
tomo_mb = tomo_mb > 0
else:
tomo_mb = tomo_mb > 0
else:
tomo_mb = tomo_mb >= sg_th
if gl_star.has_column('_mtMtubesCsv'):
tomo_mb *= mt_mask
del mt_mask
if gl_star.has_column('_psSegImage'):
print('\tApplying the mask...')
hold_mask = ps.disperse_io.load_tomo(gl_star.get_element('_psSegImage', row))
if mb_lbl > 0:
hold_mask = hold_mask == mb_lbl
else:
hold_mask = hold_mask > 0
tomo_mb *= hold_mask
ids_mask = np.where(hold_mask)
off_mask_min_x, off_mask_max_x = ids_mask[0].min()-sp_off_voxels, ids_mask[0].max()+sp_off_voxels
if off_mask_min_x < 0:
off_mask_min_x = 0
if off_mask_max_x > hold_mask.shape[0]:
off_mask_max_x = hold_mask.shape[0]
off_mask_min_y, off_mask_max_y = ids_mask[1].min()-sp_off_voxels, ids_mask[1].max()+sp_off_voxels
if off_mask_min_y < 0:
off_mask_min_y = 0
if off_mask_max_y > hold_mask.shape[1]:
off_mask_max_y = hold_mask.shape[1]
off_mask_min_z, off_mask_max_z = ids_mask[2].min()-sp_off_voxels, ids_mask[2].max()+sp_off_voxels
if off_mask_min_z < 0:
off_mask_min_z = 0
if off_mask_max_z > hold_mask.shape[2]:
off_mask_max_z = hold_mask.shape[2]
del hold_mask
del ids_mask
# ps.disperse_io.save_numpy(tomo_mb, out_dir + '/hold.mrc')
if sg_th is not None:
print('\tMembrane thresholding...')
tomo_sz = ps.globals.global_analysis(tomo_mb, 0.5, c=26)
tomo_mb = tomo_sz > sg_sz
del tomo_sz
seg_center = None
if mode_oriented:
seg_center = np.asarray((gl_star.get_element('_rlnOriginX', row),
gl_star.get_element('_rlnOriginY', row),
gl_star.get_element('_rlnOriginZ', row)))
seg_center[0] -= off_mask_min_x
seg_center[1] -= off_mask_min_y
seg_center[2] -= off_mask_min_z
print('\tSegmenting the membranes...')
if sp_split is None:
svol_mb = tomo_mb[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z]
svol = tomo_ref[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z]
svol_dst = sp.ndimage.morphology.distance_transform_edt(np.invert(svol_mb), sampling=sg_res,
return_indices=False)
svol_seg = np.zeros(shape=svol.shape, dtype=np.float32)
if not mode_oriented:
svol_seg[svol_dst < sg_mb_neigh + sg_mb_thick] = MB_NEIGH
svol_seg[svol_dst < sg_mb_thick] = MB_LBL
else:
svol_dst = signed_distance_2d(svol_mb, res=1, del_b=True, mode_2d=True, set_point=seg_center)
svol_seg[(svol_dst > 0) & (svol_dst < sg_mb_neigh + sg_mb_thick)] = MB_NEIGH_INT
svol_seg[(svol_dst < 0) & (svol_dst > -1. * (sg_mb_neigh + sg_mb_thick))] = MB_NEIGH_EXT
svol_seg[np.absolute(svol_dst) < sg_mb_thick] = MB_LBL
svol_seg[svol_dst == 0] = 0
svol_seg[svol_mb > 0] = MB_LBL
out_svol = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '.mrc'
out_seg = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '_seg.mrc'
ps.disperse_io.save_numpy(svol, out_svol)
ps.disperse_io.save_numpy(svol_seg, out_seg)
del svol_seg
del svol_dst
row_dic = dict()
row_dic['_rlnMicrographName'] = in_ref
row_dic['_rlnImageName'] = out_svol
row_dic['_psSegImage'] = out_seg
row_dic['_psSegRot'] = 0
row_dic['_psSegTilt'] = 0
row_dic['_psSegPsi'] = 0
row_dic['_psSegOffX'] = off_mask_min_x # 0
row_dic['_psSegOffY'] = off_mask_min_y # 0
row_dic['_psSegOffZ'] = off_mask_min_z
star.add_row(**row_dic)
else:
print('\tSplitting into subvolumes:')
if sp_split[0] > 1:
hold_wide = int(math.ceil(wide_x / sp_split[0]))
hold_pad = int(math.ceil((off_mask_max_x - off_mask_min_x) / sp_split[0]))
hold_split = int(sp_split[0] * math.ceil(float(hold_pad)/hold_wide))
offs_x = list()
pad_x = off_mask_min_x + int(math.ceil((off_mask_max_x-off_mask_min_x) / hold_split))
offs_x.append((off_mask_min_x, pad_x+sp_off_voxels))
lock = False
while not lock:
hold = offs_x[-1][1] + pad_x
if hold >= off_mask_max_x:
offs_x.append((offs_x[-1][1] - sp_off_voxels, off_mask_max_x))
lock = True
else:
offs_x.append((offs_x[-1][1]-sp_off_voxels, offs_x[-1][1]+pad_x+sp_off_voxels))
else:
offs_x = [(off_mask_min_x, off_mask_max_x),]
if sp_split[1] > 1:
hold_wide = int(math.ceil(wide_y / sp_split[1]))
hold_pad = int(math.ceil((off_mask_max_y - off_mask_min_y) / sp_split[1]))
hold_split = int(sp_split[1] * math.ceil(float(hold_pad) / hold_wide))
offs_y = list()
pad_y = off_mask_min_y + int(math.ceil((off_mask_max_y-off_mask_min_y) / hold_split))
offs_y.append((off_mask_min_x, pad_y + sp_off_voxels))
lock = False
while not lock:
hold = offs_y[-1][1] + pad_y
if hold >= off_mask_max_y:
offs_y.append((offs_y[-1][1] - sp_off_voxels, off_mask_max_y))
lock = True
else:
offs_y.append((offs_y[-1][1] - sp_off_voxels, offs_y[-1][1] + pad_y + sp_off_voxels))
else:
offs_y = [(off_mask_min_x, off_mask_max_x),]
if sp_split[2] > 1:
hold_wide = int(math.ceil(wide_z / sp_split[2]))
hold_pad = int(math.ceil((off_mask_max_z - off_mask_min_z) / sp_split[2]))
hold_split = int(sp_split[2] * math.ceil(float(hold_pad) / hold_wide))
offs_z = list()
pad_z = off_mask_min_z + int(math.ceil((off_mask_max_z-off_mask_min_z) / hold_split))
offs_z.append((off_mask_min_z, pad_z + sp_off_voxels))
lock = False
while not lock:
hold = offs_z[-1][1] + pad_z
if hold >= off_mask_max_z:
offs_z.append((offs_z[-1][1] - sp_off_voxels, off_mask_max_z))
lock = True
else:
offs_z.append((offs_z[-1][1] - sp_off_voxels, offs_z[-1][1] + pad_z + sp_off_voxels))
else:
offs_z = [(off_mask_min_z, off_mask_max_z),]
split_id = 1
for off_x in offs_x:
for off_y in offs_y:
for off_z in offs_z:
print('\t\t-Splitting subvolume: [' + str(off_x) + ', ' + str(off_y) + ', ' + str(off_z) +']')
svol_mb = tomo_mb[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]]
svol = tomo_ref[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]]
svol_seg = np.zeros(shape=svol.shape, dtype=np.float32)
if not mode_oriented:
svol_dst = sp.ndimage.morphology.distance_transform_edt(np.invert(svol_mb), sampling=sg_res,
return_indices=False)
svol_seg[svol_dst < sg_mb_neigh + sg_mb_thick] = MB_NEIGH
svol_seg[svol_dst < sg_mb_thick] = MB_LBL
else:
seg_off_center = seg_center - np.asarray((off_x[0], off_y[0], off_z[0]))
svol_dst = signed_distance_2d(svol_mb, res=1, del_b=True, mode_2d=True,
set_point=seg_off_center)
svol_seg[(svol_dst > 0) & (svol_dst < sg_mb_neigh + sg_mb_thick)] = MB_NEIGH_INT
svol_seg[(svol_dst < 0) & (svol_dst > -1. * (sg_mb_neigh + sg_mb_thick))] = MB_NEIGH_EXT
svol_seg[np.absolute(svol_dst) < sg_mb_thick] = MB_LBL
svol_seg[svol_dst == 0] = 0
svol_seg[svol_mb > 0] = MB_LBL
out_svol = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '.mrc'
out_seg = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '_mb.mrc'
ps.disperse_io.save_numpy(svol, out_svol)
ps.disperse_io.save_numpy(svol_seg, out_seg)
del svol_seg
del svol_dst
split_id += 1
row_dic = dict()
row_dic['_rlnMicrographName'] = in_ref
row_dic['_rlnImageName'] = out_svol
row_dic['_psSegImage'] = out_seg
row_dic['_psSegRot'] = 0
row_dic['_psSegTilt'] = 0
row_dic['_psSegPsi'] = 0
row_dic['_psSegOffX'] = off_x[0]
row_dic['_psSegOffY'] = off_y[0]
row_dic['_psSegOffZ'] = off_z[0]
star.add_row(**row_dic)
# Prepare next iteration
gc.collect()
tomo_id += 1
out_star = out_dir + '/' + out_stem + '_pre.star'
print('\tStoring output STAR file in: ' + out_star)
star.store(out_star)
print('Terminated. (' + time.strftime("%c") + ')')
| 0
| 0
| 0
|
3f641e924d35cf45792a5ad1e2f2a00da473b0f4
| 4,841
|
py
|
Python
|
LIBRAY_MANAGEMENT/Search.py
|
ShriyasnhAgarwl/Hacktoberfest
|
5e8adf77a833f7b99dbddff92716e05641dac857
|
[
"MIT"
] | null | null | null |
LIBRAY_MANAGEMENT/Search.py
|
ShriyasnhAgarwl/Hacktoberfest
|
5e8adf77a833f7b99dbddff92716e05641dac857
|
[
"MIT"
] | null | null | null |
LIBRAY_MANAGEMENT/Search.py
|
ShriyasnhAgarwl/Hacktoberfest
|
5e8adf77a833f7b99dbddff92716e05641dac857
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import sqlite3
from sqlite3 import Error
Sea().mainloop()
| 50.957895
| 139
| 0.54121
|
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import sqlite3
from sqlite3 import Error
class Sea(Tk):
def __init__(self):
super().__init__()
f = StringVar()
g = StringVar()
self.title("Search Book")
self.maxsize(800,500)
self.minsize(800,500)
self.canvas = Canvas(width=800, height=500, bg='black')
self.canvas.pack()
self.photo = PhotoImage(file='search.png')
self.canvas.create_image(-20, -20, image=self.photo, anchor=NW)
self.iconbitmap(r'libico.ico')
l1=Label(self,text="Search Library",font=("Algerian",20,'bold')).place(x=290,y=20)
l = Label(self, text="Search By", font=("Arial", 15, 'bold')).place(x=60, y=96)
def insert(data):
self.listTree.delete(*self.listTree.get_children())
for row in data:
self.listTree.insert("", 'end', text=row[0], values=(row[1], row[2], 'Available' if row[3] == 1 else 'Unavailable'))
def ge():
if (len(g.get())) == 0:
messagebox.showinfo('Error', 'First select a item')
elif (len(f.get())) == 0:
messagebox.showinfo('Error', 'Enter the '+g.get())
elif g.get() == 'Book Name':
try:
self.conn = sqlite3.connect('library_administration.db')
self.mycursor = self.conn.cursor()
self.mycursor.execute("Select * from books where Book_name LIKE ?",['%'+f.get()+'%'])
self.pc = self.mycursor.fetchall()
if self.pc:
insert(self.pc)
else:
messagebox.showinfo("Oop's","Either Book Name is incorrect or it is not available")
except Error:
messagebox.showerror("Error","Something goes wrong")
elif g.get() == 'Author Name':
try:
self.conn = sqlite3.connect('library_administration.db')
self.mycursor = self.conn.cursor()
self.mycursor.execute("Select * from books where Author LIKE ?", ['%'+f.get()+'%'])
self.pc = self.mycursor.fetchall()
if self.pc:
insert(self.pc)
else:
messagebox.showinfo("Oop's","Author Name not found")
except Error:
messagebox.showerror("Error","Something goes wrong")
elif g.get() == 'Book Id':
try:
self.conn = sqlite3.connect('library_administration.db')
self.mycursor = self.conn.cursor()
self.mycursor.execute("Select * from books where Book_Id LIKE ?", ['%'+f.get()+'%'])
self.pc = self.mycursor.fetchall()
if self.pc:
insert(self.pc)
else:
messagebox.showinfo("Oop's","Either Book Id is incorrect or it is not available")
except Error:
messagebox.showerror("Error","Something goes wrong")
b=Button(self,text="Find",width=15,font=("Arial",10,'bold'),command=ge).place(x=460,y=148)
c=ttk.Combobox(self,textvariable=g,values=["Book Name","Author Name","Book Id"],width=40,state="readonly").place(x = 180, y = 100)
en = Entry(self,textvariable=f,width=43).place(x=180,y=155)
la = Label(self, text="Enter", font=("Arial", 15, 'bold')).place(x=100, y=150)
def handle(event):
if self.listTree.identify_region(event.x,event.y) == "separator":
return "break"
self.listTree = ttk.Treeview(self, height=13,columns=('Book Name', 'Book Author', 'Availability'))
self.vsb = ttk.Scrollbar(self,orient="vertical",command=self.listTree.yview)
self.listTree.configure(yscrollcommand=self.vsb.set)
self.listTree.heading("#0", text='Book ID', anchor='center')
self.listTree.column("#0", width=120, anchor='center')
self.listTree.heading("Book Name", text='Book Name')
self.listTree.column("Book Name", width=200, anchor='center')
self.listTree.heading("Book Author", text='Book Author')
self.listTree.column("Book Author", width=200, anchor='center')
self.listTree.heading("Availability", text='Availability')
self.listTree.column("Availability", width=200, anchor='center')
self.listTree.bind('<Button-1>', handle)
self.listTree.place(x=40, y=200)
self.vsb.place(x=763,y=200,height=287)
ttk.Style().configure("Treeview", font=('Times new Roman', 15))
Sea().mainloop()
| 4,653
| -7
| 52
|
138d2ee0e84e4a165e40f633daacadbbb9845045
| 8,958
|
py
|
Python
|
smartsheet/types.py
|
abhijitmamarde/smartsheet-python-sdk
|
d0120f13e8681a39b1012df6088999a64d3d0dda
|
[
"Apache-2.0"
] | null | null | null |
smartsheet/types.py
|
abhijitmamarde/smartsheet-python-sdk
|
d0120f13e8681a39b1012df6088999a64d3d0dda
|
[
"Apache-2.0"
] | null | null | null |
smartsheet/types.py
|
abhijitmamarde/smartsheet-python-sdk
|
d0120f13e8681a39b1012df6088999a64d3d0dda
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=C0111,R0902,R0913
# Smartsheet Python SDK.
#
# Copyright 2016 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import importlib
import json
import logging
import six
from datetime import datetime
from dateutil.parser import parse
from enum import Enum
try:
from collections import MutableSequence
except ImportError:
from collections.abc import MutableSequence
| 28.081505
| 102
| 0.59098
|
# pylint: disable=C0111,R0902,R0913
# Smartsheet Python SDK.
#
# Copyright 2016 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import importlib
import json
import logging
import six
from datetime import datetime
from dateutil.parser import parse
from enum import Enum
try:
from collections import MutableSequence
except ImportError:
from collections.abc import MutableSequence
class TypedList(MutableSequence):
def __init__(self, item_type):
self.item_type = item_type
self.__store = []
self._log = logging.getLogger(__name__)
if isinstance(self.item_type, six.string_types):
self.item_type = getattr(
importlib.import_module(
__package__ + '.models.' + self.item_type.lower()
), self.item_type)
def __len__(self):
return len(self.__store)
def __getitem__(self, idx):
return self.__store[idx]
def __setitem__(self, idx, value):
self._log.debug('__setitem__, %s, %s', idx, value)
self.__store[idx] = self.convert(value)
def __delitem__(self, idx):
del self.__store[idx]
def insert(self, idx, value):
self.__store.insert(idx, self.convert(value))
def convert(self, item):
"""Convert the input item to the desired object type."""
try:
if isinstance(item, self.item_type):
return item
# allow explicit null to be passed through to the list
elif hasattr(item, 'is_explicit_null'):
return item
except TypeError:
raise
try:
retval = self.item_type(item)
self._log.debug('item converted to %s: %s -> %s',
self.item_type, item, retval)
return retval
except (ValueError, TypeError):
raise ValueError(
"Can't convert %s to %s in TypedList", item, self.item_type)
def purge(self):
"""Zero out the underlying list object."""
del self.__store[:]
def to_list(self):
return self.__store
def load(self, value):
if isinstance(value, list):
self.purge()
self.extend([
(item if isinstance(item, self.item_type) else self.item_type(item)) for item in value
])
elif isinstance(value, TypedList):
self.purge()
self.extend(value.to_list())
elif isinstance(value, self.item_type):
self.purge()
self.append(value)
elif hasattr(value, 'is_explicit_null'):
self.purge()
self.append(value)
else:
raise ValueError("Can't load to TypedList(%s) from '%s'", self.item_type, value)
def __repr__(self):
tmp = json.dumps(self.__store)
return "TypedList(item_type=%s, contents=%s)" % (self.item_type, tmp)
def __str__(self):
return json.dumps(self.__store)
class TypedObject(object):
def __init__(self, object_type):
self.object_type = object_type
self._value = None
self._log = logging.getLogger(__name__)
if isinstance(self.object_type, six.string_types):
self.object_type = getattr(
importlib.import_module(
__package__ + '.models.' + self.object_type.lower()
), self.object_type)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if isinstance(value, self.object_type):
self._value = value
elif isinstance(value, dict):
self._value = self.object_type(value)
elif hasattr(value, 'is_explicit_null'):
self._value = value
else:
raise ValueError("`{0}` invalid type for {1} value".format(value, self.object_type))
def __str(self):
return json.dumps(self._value)
class Number(object):
def __init__(self, initial_value=None):
self._value = None
if initial_value:
self.value = initial_value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value is None:
self._value = None
elif isinstance(value, (six.integer_types, float)):
self._value = value
else:
raise ValueError("`{0}` invalid type for Number value".format(value))
def __str__(self):
return str(self.value)
class String(object):
def __init__(self, initial_value=None, accept=None):
self._value = None
if initial_value:
self.value = initial_value
self._accept = None
if accept:
self.accept = accept
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value is None:
self._value = None
elif isinstance(value, six.string_types):
if self.accept and value not in self._accept:
raise ValueError(
"`{0}` is not in accept list, must be one of {1}".format(
value, self.accept))
self._value = value
else:
raise ValueError("`{0}` invalid type for String value".format(value))
@property
def accept(self):
return self._accept
@accept.setter
def accept(self, value):
if isinstance(value, list):
self._accept = value
elif isinstance(value, six.string_types):
self._accept = [value]
else:
raise ValueError("`{0}` invalid type for accept".format(value))
def __str__(self):
return self._value
class Boolean(object):
def __init__(self, initial_value=None):
self._value = None
if initial_value:
self.value = initial_value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value is None:
self._value = None
elif isinstance(value, bool):
self._value = value
else:
raise ValueError("`{0}` invalid type for Boolean value".format(value))
def __str__(self):
return str(self._value)
class Timestamp(object):
def __init__(self, initial_value=None):
self._value = None
if initial_value:
self.value = initial_value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value is None:
self._value = None
elif isinstance(value, datetime):
self._value = value
elif isinstance(value, six.string_types):
value = parse(value)
self._value = value
else:
raise ValueError("`{0}` invalid type for Timestamp value".format(value))
def __str__(self):
return str(self._value)
class EnumeratedValue(object):
def __init__(self, enum, value=None):
self.__enum = enum
self._value = None
if value:
self.set(value)
@property
def value(self):
return self._value
def set(self, value):
if isinstance(value, six.string_types):
try:
self._value = self.__enum[value]
except KeyError:
self._value = None
elif isinstance(value, Enum):
self._value = value;
else:
self._value = None
def __eq__(self, other):
if isinstance(other, Enum):
return self._value == other
elif isinstance(other, six.string_types):
return self._value == self.__enum[other]
NotImplemented
def __str__(self):
if self._value is not None:
return self._value.name
else:
return str(None)
class EnumeratedList(TypedList):
def __init__(self, enum):
super(EnumeratedList, self).__init__(EnumeratedValue)
self.__enum = enum
def load(self, value):
if isinstance(value, TypedList):
value = value.to_list()
if isinstance(value, list):
self.purge()
self.extend([
(EnumeratedValue(self.__enum, item)) for item in value
])
else:
self.purge()
self.append(EnumeratedValue(self.__enum, value))
| 5,714
| 2,073
| 238
|
568a5b7446a765ab1575aff69cc8f331e6747a0a
| 256
|
py
|
Python
|
kora/install/orca.py
|
wannaphong/kora
|
8a9034097d07b14094e077769c02a0b4857d179b
|
[
"MIT"
] | 91
|
2020-05-26T05:54:51.000Z
|
2022-03-09T07:33:44.000Z
|
kora/install/orca.py
|
wannaphong/kora
|
8a9034097d07b14094e077769c02a0b4857d179b
|
[
"MIT"
] | 12
|
2020-10-03T10:09:11.000Z
|
2021-03-06T23:12:21.000Z
|
kora/install/orca.py
|
wannaphong/kora
|
8a9034097d07b14094e077769c02a0b4857d179b
|
[
"MIT"
] | 16
|
2020-07-07T18:39:29.000Z
|
2021-03-06T03:46:49.000Z
|
import os
from urllib.request import urlretrieve
url = "https://github.com/plotly/orca/releases/download/v1.2.1/orca-1.2.1-x86_64.AppImage"
orca = '/usr/local/bin/orca'
urlretrieve(url, orca)
os.chmod(orca, 0o755)
os.system("apt install xvfb libgconf-2-4")
| 36.571429
| 90
| 0.761719
|
import os
from urllib.request import urlretrieve
url = "https://github.com/plotly/orca/releases/download/v1.2.1/orca-1.2.1-x86_64.AppImage"
orca = '/usr/local/bin/orca'
urlretrieve(url, orca)
os.chmod(orca, 0o755)
os.system("apt install xvfb libgconf-2-4")
| 0
| 0
| 0
|
9a2ec11c81c067688541d020aa744bc48be5df2a
| 242
|
py
|
Python
|
Graphs/topological ordering/testando.py
|
lucasEngdComp/graphs
|
da71f249c3ea0496f2a6a3695c66adeb4f3db43c
|
[
"MIT"
] | null | null | null |
Graphs/topological ordering/testando.py
|
lucasEngdComp/graphs
|
da71f249c3ea0496f2a6a3695c66adeb4f3db43c
|
[
"MIT"
] | null | null | null |
Graphs/topological ordering/testando.py
|
lucasEngdComp/graphs
|
da71f249c3ea0496f2a6a3695c66adeb4f3db43c
|
[
"MIT"
] | null | null | null |
from grafo_adj import *
g = Grafo([],[])
for i in ['9','8','7','2','11','5','3', '10']:
g.adiciona_vertice(i)
for i in ['7-11', '5-8', '3-11', '7-8', '8-9','11-10','11-2', '5-10']:
g.adiciona_aresta(i)
print(g)
print(g.dfs('7'))
| 17.285714
| 70
| 0.495868
|
from grafo_adj import *
g = Grafo([],[])
for i in ['9','8','7','2','11','5','3', '10']:
g.adiciona_vertice(i)
for i in ['7-11', '5-8', '3-11', '7-8', '8-9','11-10','11-2', '5-10']:
g.adiciona_aresta(i)
print(g)
print(g.dfs('7'))
| 0
| 0
| 0
|
826936876403864987eac829cc9201052b5fcae4
| 2,736
|
py
|
Python
|
wordcount.py
|
conkytw/text-mining-wordcount
|
a10563bff9850eb5138c8c0d795ecb1ea3f846b9
|
[
"MIT"
] | 2
|
2017-03-12T06:46:03.000Z
|
2017-03-12T06:46:06.000Z
|
wordcount.py
|
conkytw/text-mining-wordcount
|
a10563bff9850eb5138c8c0d795ecb1ea3f846b9
|
[
"MIT"
] | null | null | null |
wordcount.py
|
conkytw/text-mining-wordcount
|
a10563bff9850eb5138c8c0d795ecb1ea3f846b9
|
[
"MIT"
] | null | null | null |
from collections import Counter
import nltk
from nltk.corpus import stopwords
stopwords = set(stopwords.words('english'))
# read sentence
lines = []
for line in open('building_global_community.txt'):
# delete the blank and line feed at the begining and end
line = line.strip()
# add processed line text into list 'lines'
lines.append(line)
# do Counter,
# wordCounter : all words
# wordCounter_Noun : noun words
# wordCounter_Adj : Adj words
# wordCounter_verb : Verb words
# wordCounter_Other : other POS words
wordCounter = Counter()
wordCounter_verb =Counter()
wordCounter_Adj = Counter()
wordCounter_Noun = Counter()
wordCounter_Other = Counter()
wordCounter_adv = Counter()
word_punc_tokenizer = nltk.WordPunctTokenizer()
for sen in lines:
# split sentence into words
tokens = word_punc_tokenizer.tokenize(sen)
#tokens = [word for word in nltk.word_tokenize(sen)]
#tokens= filter(lambda word: word not in '[.,\/#!$%\^&\*;:{}-=\_`~()]', tokens)
tmp_list = list()
for token in tokens:
if (token.isdigit()==False) and (token.isalpha()==True) and (token.lower() not in stopwords) :
tmp_list.append(token.lower())
for element in tmp_list:
get_pos = nltk.pos_tag(element.split())
word,pos = get_pos[0]
if pos.startswith('NN'):
wordCounter_Noun.update(word.split())
elif pos.startswith('JJ'):
wordCounter_Adj.update(word.split())
elif pos.startswith('VB'):
wordCounter_verb.update(word.split())
elif pos.startswith('RB'):
wordCounter_adv.update(word.split())
else:
wordCounter_Other.update(word.split())
wordCounter.update(tmp_list)
# show the occurance of all words
print '## All wordcount TOP-20: '
#print wordCounter.most_common(20)
for word, count in wordCounter.most_common(20):
print('{0}: {1}'.format(word, count))
# show the occurance of Noun words
print '## Noun words TOP-10: '
#print wordCounter_Noun.most_common(10)
for word, count in wordCounter_Noun.most_common(10):
print('{0}: {1}'.format(word, count))
# show the occurance of Adj words
print '## Adj words TOP-10: '
#print wordCounter_Adj.most_common(10)
for word, count in wordCounter_Adj.most_common(10):
print('{0}: {1}'.format(word, count))
# show the occurance of Adv words
print '## Adv Words TOP-10: '
#print wordCounter_adv.most_common(10)
for word, count in wordCounter_adv.most_common(10):
print('{0}: {1}'.format(word, count))
# show the occurance of Other POS words
print '## Other POS words TOP-10: '
#print wordCounter_Other.most_common(10)
for word, count in wordCounter_Other.most_common(10):
print('{0}: {1}'.format(word, count))
| 30.4
| 102
| 0.684576
|
from collections import Counter
import nltk
from nltk.corpus import stopwords
stopwords = set(stopwords.words('english'))
# read sentence
lines = []
for line in open('building_global_community.txt'):
# delete the blank and line feed at the begining and end
line = line.strip()
# add processed line text into list 'lines'
lines.append(line)
# do Counter,
# wordCounter : all words
# wordCounter_Noun : noun words
# wordCounter_Adj : Adj words
# wordCounter_verb : Verb words
# wordCounter_Other : other POS words
wordCounter = Counter()
wordCounter_verb =Counter()
wordCounter_Adj = Counter()
wordCounter_Noun = Counter()
wordCounter_Other = Counter()
wordCounter_adv = Counter()
word_punc_tokenizer = nltk.WordPunctTokenizer()
for sen in lines:
# split sentence into words
tokens = word_punc_tokenizer.tokenize(sen)
#tokens = [word for word in nltk.word_tokenize(sen)]
#tokens= filter(lambda word: word not in '[.,\/#!$%\^&\*;:{}-=\_`~()]', tokens)
tmp_list = list()
for token in tokens:
if (token.isdigit()==False) and (token.isalpha()==True) and (token.lower() not in stopwords) :
tmp_list.append(token.lower())
for element in tmp_list:
get_pos = nltk.pos_tag(element.split())
word,pos = get_pos[0]
if pos.startswith('NN'):
wordCounter_Noun.update(word.split())
elif pos.startswith('JJ'):
wordCounter_Adj.update(word.split())
elif pos.startswith('VB'):
wordCounter_verb.update(word.split())
elif pos.startswith('RB'):
wordCounter_adv.update(word.split())
else:
wordCounter_Other.update(word.split())
wordCounter.update(tmp_list)
# show the occurance of all words
print '## All wordcount TOP-20: '
#print wordCounter.most_common(20)
for word, count in wordCounter.most_common(20):
print('{0}: {1}'.format(word, count))
# show the occurance of Noun words
print '## Noun words TOP-10: '
#print wordCounter_Noun.most_common(10)
for word, count in wordCounter_Noun.most_common(10):
print('{0}: {1}'.format(word, count))
# show the occurance of Adj words
print '## Adj words TOP-10: '
#print wordCounter_Adj.most_common(10)
for word, count in wordCounter_Adj.most_common(10):
print('{0}: {1}'.format(word, count))
# show the occurance of Adv words
print '## Adv Words TOP-10: '
#print wordCounter_adv.most_common(10)
for word, count in wordCounter_adv.most_common(10):
print('{0}: {1}'.format(word, count))
# show the occurance of Other POS words
print '## Other POS words TOP-10: '
#print wordCounter_Other.most_common(10)
for word, count in wordCounter_Other.most_common(10):
print('{0}: {1}'.format(word, count))
| 0
| 0
| 0
|
130b8dfa2bd0eeb1dd43758715ab9f4b20c54970
| 4,897
|
py
|
Python
|
venv/Lib/site-packages/_TFL/Recordifier.py
|
nasir733/airbnb-clone
|
9ac746b6f3f3c8fc45f97773266e6f5f182d14b9
|
[
"MIT"
] | 6
|
2016-12-10T17:51:10.000Z
|
2021-10-11T07:51:48.000Z
|
venv/Lib/site-packages/_TFL/Recordifier.py
|
nasir733/airbnb-clone
|
9ac746b6f3f3c8fc45f97773266e6f5f182d14b9
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/_TFL/Recordifier.py
|
nasir733/airbnb-clone
|
9ac746b6f3f3c8fc45f97773266e6f5f182d14b9
|
[
"MIT"
] | 3
|
2020-03-29T07:37:03.000Z
|
2021-01-21T16:08:40.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2016 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL.Recordifier
#
# Purpose
# Provide classes supporting the conversion of formatted strings to records
#
# Revision Dates
# 17-Sep-2006 (CT) Creation
# 23-Dec-2010 (CT) Use `_print` for doctest (`%s` instead of `%r` for `v`)
# 9-Oct-2016 (CT) Move to Package_Namespace `TFL`
# 9-Oct-2016 (CT) Fix Python 3 compatibility
# ««revision-date»»···
#--
from _TFL import TFL
from _TFL.pyk import pyk
from _TFL.Regexp import re
import _TFL.Caller
import _TFL.Record
import _TFL._Meta.Object
# end def _print
# end def __init__
# end def __call__
# end class _Recordifier_
class By_Regexp (_Recordifier_) :
"""Convert strings via regexp to records.
>>> br = By_Regexp (
... TFL.Regexp
... (r"(?P<dt> (?P<y> \d{4})-(?P<m> \d{2})(?:-(?P<d> \d{2}))?)"
... r" \s+ (?P<M> \d+) \s+ (?P<w> \d+\.\d*)", re.X)
... , M = int, weight = float, y = int, m = int, d = int)
>>> _print (br ("2006-06-01 6 96.4 1.20 93.5 98.1"))
(M = 6, d = 1, dt = 2006-06-01, m = 6, w = 96.4, y = 2006)
>>> _print (br ("2006-06 6 96.4 1.20 93.5 98.1"))
(M = 6, dt = 2006-06, m = 6, w = 96.4, y = 2006)
"""
field_pat = TFL.Regexp \
( r"\(\?P< (?P<name> [a-zA-Z_][a-zA-Z0-9_]*) >"
, flags = re.VERBOSE
)
# end def __init__
# end def _field_iter
# end class By_Regexp
class By_Separator (_Recordifier_) :
"""Convert strings by splitting on whitespace into records.
>>> bw = By_Separator (
... "d", ("m", int), "avg", "err", "min", "max",
... _default_converter = float, d = str)
>>> _print (bw ("2006-06-01 6 96.4 1.20 93.5 98.1"))
(avg = 96.4, d = 2006-06-01, err = 1.2, m = 6, max = 98.1, min = 93.5)
>>> _print (bw ("2006-06-01 6 96.4 1.20 93.5"))
(avg = 96.4, d = 2006-06-01, err = 1.2, m = 6, min = 93.5)
>>> _print (bw ("2006-06-01 6 96.4 1.20 93.5 98.1 42"))
(avg = 96.4, d = 2006-06-01, err = 1.2, m = 6, max = 98.1, min = 93.5)
"""
_separator = None
_default_converter = str
# end def __init__
# end def _field_iter
# end class By_Separator
if __name__ == "__main__" :
TFL._Export_Module ()
### __END__ TFL.Recordifier
| 32.430464
| 79
| 0.506228
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2016 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL.Recordifier
#
# Purpose
# Provide classes supporting the conversion of formatted strings to records
#
# Revision Dates
# 17-Sep-2006 (CT) Creation
# 23-Dec-2010 (CT) Use `_print` for doctest (`%s` instead of `%r` for `v`)
# 9-Oct-2016 (CT) Move to Package_Namespace `TFL`
# 9-Oct-2016 (CT) Fix Python 3 compatibility
# ««revision-date»»···
#--
from _TFL import TFL
from _TFL.pyk import pyk
from _TFL.Regexp import re
import _TFL.Caller
import _TFL.Record
import _TFL._Meta.Object
def _print (r) :
print \
( "(%s)" % ", ".join \
( ( "%s = %s" % (k, v)
for (k, v) in sorted (pyk.iteritems (r._kw))
)
)
)
# end def _print
class _Recordifier_ (TFL.Meta.Object) :
def __init__ (self, Result_Type) :
self.Result_Type = Result_Type
# end def __init__
def __call__ (self, s) :
conv = self._converters
result = self.Result_Type ()
for k, v in self._field_iter (s) :
setattr (result, k, conv [k] (v))
return result
# end def __call__
# end class _Recordifier_
class By_Regexp (_Recordifier_) :
"""Convert strings via regexp to records.
>>> br = By_Regexp (
... TFL.Regexp
... (r"(?P<dt> (?P<y> \d{4})-(?P<m> \d{2})(?:-(?P<d> \d{2}))?)"
... r" \s+ (?P<M> \d+) \s+ (?P<w> \d+\.\d*)", re.X)
... , M = int, weight = float, y = int, m = int, d = int)
>>> _print (br ("2006-06-01 6 96.4 1.20 93.5 98.1"))
(M = 6, d = 1, dt = 2006-06-01, m = 6, w = 96.4, y = 2006)
>>> _print (br ("2006-06 6 96.4 1.20 93.5 98.1"))
(M = 6, dt = 2006-06, m = 6, w = 96.4, y = 2006)
"""
field_pat = TFL.Regexp \
( r"\(\?P< (?P<name> [a-zA-Z_][a-zA-Z0-9_]*) >"
, flags = re.VERBOSE
)
def __init__ (self, regexp, Result_Type = TFL.Record, ** converters) :
self.__super.__init__ (Result_Type = Result_Type)
self.regexp = rex = TFL.Regexp (regexp)
self._converters = conv = {}
for match in self.field_pat.search_iter (rex._pattern.pattern) :
name = match.group ("name")
conv [name] = \
( converters.get (name)
or converters.get ("default_converter", str)
)
# end def __init__
def _field_iter (self, s) :
match = self.regexp.search (s)
if match :
for k, v in pyk.iteritems (match.groupdict ()) :
if v is not None :
yield k, v
else :
raise ValueError \
("`%s` doesn't match `%s`" % (s, self.regexp._pattern.pattern))
# end def _field_iter
# end class By_Regexp
class By_Separator (_Recordifier_) :
"""Convert strings by splitting on whitespace into records.
>>> bw = By_Separator (
... "d", ("m", int), "avg", "err", "min", "max",
... _default_converter = float, d = str)
>>> _print (bw ("2006-06-01 6 96.4 1.20 93.5 98.1"))
(avg = 96.4, d = 2006-06-01, err = 1.2, m = 6, max = 98.1, min = 93.5)
>>> _print (bw ("2006-06-01 6 96.4 1.20 93.5"))
(avg = 96.4, d = 2006-06-01, err = 1.2, m = 6, min = 93.5)
>>> _print (bw ("2006-06-01 6 96.4 1.20 93.5 98.1 42"))
(avg = 96.4, d = 2006-06-01, err = 1.2, m = 6, max = 98.1, min = 93.5)
"""
_separator = None
_default_converter = str
def __init__ (self, * fields, ** kw) :
self.__super.__init__ \
(Result_Type = kw.get ("Result_Type", TFL.Record))
if "_separator" in kw :
self._separator = kw ["_separator"]
if "_default_converter" in kw :
self._default_converter = kw ["_default_converter"]
self._converters = conv = {}
self._fields = []
add = self._fields.append
for f in fields :
if isinstance (f, pyk.string_types) :
name = f
c = kw.get (name, self._default_converter)
else :
name, c = f
conv [name] = c
add (name)
# end def __init__
def _field_iter (self, s) :
for k, v in zip (self._fields, s.split ()) :
yield k, v
# end def _field_iter
# end class By_Separator
if __name__ == "__main__" :
TFL._Export_Module ()
### __END__ TFL.Recordifier
| 1,934
| 18
| 208
|
cddf6a78be18c0a440d59e0141c19012bfa11448
| 9,965
|
py
|
Python
|
df2onehot/utils.py
|
erdogant/df2onehot
|
0595f4f96b478d498876885fc53473f1195458a2
|
[
"MIT"
] | 2
|
2021-06-17T12:48:48.000Z
|
2022-03-13T17:39:39.000Z
|
df2onehot/utils.py
|
erdogant/df2onehot
|
0595f4f96b478d498876885fc53473f1195458a2
|
[
"MIT"
] | null | null | null |
df2onehot/utils.py
|
erdogant/df2onehot
|
0595f4f96b478d498876885fc53473f1195458a2
|
[
"MIT"
] | null | null | null |
"""Various helper functions to set the dtypes."""
# ----------------------------------------------------
# Name : df2onehot.py
# Author : E.Taskesen
# Contact : erdogant@gmail.com
# github : https://github.com/erdogant/df2onehot
# Licence : MIT
# ----------------------------------------------------
# %% Libraries
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
from tqdm import tqdm
# %% Set dtypes
def set_dtypes(df, dtypes='pandas', deep_extract=False, perc_min_num=None, num_if_decimal=True, verbose=3):
"""Set the dtypes of the dataframe.
Parameters
----------
df : pd.DataFrame()
Input dataframe for which the rows are the features, and colums are the samples.
dtypes : list of str or 'pandas', optional
Representation of the columns in the form of ['cat','num']. By default the dtype is determiend based on the pandas dataframe.
deep_extract : bool [False, True] (default : False)
True: Extract information from a vector that contains a list/array/dict.
False: converted to a string and treated as catagorical ['cat'].
perc_min_num : float [None, 0..1], optional
Force column (int or float) to be numerical if unique non-zero values are above percentage. The default is None. Alternative can be 0.8
num_if_decimal : bool [False, True], optional
Force column to be numerical if column with original dtype (int or float) show values with one or more decimals. The default is True.
verbose : int, optional
Print message to screen. The default is 3.
0: (default), 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE
Returns
-------
tuple containing dataframe and dtypes.
"""
config = {}
config['dtypes'] = dtypes
config['deep_extract'] = deep_extract
config['perc_min_num'] = perc_min_num
config['num_if_decimal'] = num_if_decimal
config['verbose'] = verbose
# Determine dtypes for columns
config['dtypes'] = _auto_dtypes(df, config['dtypes'], deep_extract=config['deep_extract'], perc_min_num=config['perc_min_num'], num_if_decimal=config['num_if_decimal'], verbose=config['verbose'])
# Setup dtypes in columns
df = _set_types(df.copy(), config['dtypes'], verbose=config['verbose'])
# return
return(df, config['dtypes'])
# %% Setup columns in correct dtypes
# %% Setup columns in correct dtypes
# %% Set y
def set_y(y, y_min=None, numeric=False, verbose=3):
"""Group labels if required.
Parameters
----------
y : list
input labels.
y_min : int, optional
If unique y-labels are less then absolute y_min, labels are grouped into the _other_ group. The default is None.
numeric : bool [True, False], optional
Convert to numeric labels. The default is False.
verbose : int, optional
Print message to screen. The default is 3.
0: (default), 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE
Returns
-------
list of labels.
"""
y = y.astype(str)
if not isinstance(y_min, type(None)):
if verbose>=3: print('[df2onehot] >Group [y] labels that contains less then %d occurences are grouped under one single name [_other_]' %(y_min))
[uiy, ycounts] = np.unique(y, return_counts=True)
labx = uiy[ycounts<y_min]
y = y.astype('O')
y[np.isin(y, labx)] = '_other_' # Note that this text is captured in compute_significance! Do not change or also change it over there!
y = y.astype(str)
if numeric:
y = label_encoder.fit_transform(y).astype(int)
return(y)
# %% function to remove non-ASCII
# %% Convert to pandas dataframe
def is_DataFrame(data, verbose=3):
"""Convert data into dataframe.
Parameters
----------
data : array-like
Array-like data matrix.
verbose : int, optional
Print message to screen. The default is 3.
0: (default), 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE
Returns
-------
pd.dataframe()
"""
if isinstance(data, list):
data = pd.DataFrame(data)
elif isinstance(data, np.ndarray):
data = pd.DataFrame(data)
elif isinstance(data, pd.DataFrame):
pass
else:
if verbose>=3: print('Typing should be pd.DataFrame()!')
data=None
return(data)
| 39.701195
| 199
| 0.560863
|
"""Various helper functions to set the dtypes."""
# ----------------------------------------------------
# Name : df2onehot.py
# Author : E.Taskesen
# Contact : erdogant@gmail.com
# github : https://github.com/erdogant/df2onehot
# Licence : MIT
# ----------------------------------------------------
# %% Libraries
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
from tqdm import tqdm
# %% Set dtypes
def set_dtypes(df, dtypes='pandas', deep_extract=False, perc_min_num=None, num_if_decimal=True, verbose=3):
"""Set the dtypes of the dataframe.
Parameters
----------
df : pd.DataFrame()
Input dataframe for which the rows are the features, and colums are the samples.
dtypes : list of str or 'pandas', optional
Representation of the columns in the form of ['cat','num']. By default the dtype is determiend based on the pandas dataframe.
deep_extract : bool [False, True] (default : False)
True: Extract information from a vector that contains a list/array/dict.
False: converted to a string and treated as catagorical ['cat'].
perc_min_num : float [None, 0..1], optional
Force column (int or float) to be numerical if unique non-zero values are above percentage. The default is None. Alternative can be 0.8
num_if_decimal : bool [False, True], optional
Force column to be numerical if column with original dtype (int or float) show values with one or more decimals. The default is True.
verbose : int, optional
Print message to screen. The default is 3.
0: (default), 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE
Returns
-------
tuple containing dataframe and dtypes.
"""
config = {}
config['dtypes'] = dtypes
config['deep_extract'] = deep_extract
config['perc_min_num'] = perc_min_num
config['num_if_decimal'] = num_if_decimal
config['verbose'] = verbose
# Determine dtypes for columns
config['dtypes'] = _auto_dtypes(df, config['dtypes'], deep_extract=config['deep_extract'], perc_min_num=config['perc_min_num'], num_if_decimal=config['num_if_decimal'], verbose=config['verbose'])
# Setup dtypes in columns
df = _set_types(df.copy(), config['dtypes'], verbose=config['verbose'])
# return
return(df, config['dtypes'])
# %% Setup columns in correct dtypes
def _auto_dtypes(df, dtypes, deep_extract=False, perc_min_num=None, num_if_decimal=True, verbose=3):
if isinstance(dtypes, str):
if verbose>=3: print('\n[df2onehot] >Auto detecting dtypes.')
disable = (True if (verbose==0 or verbose>3) else False)
max_str_len = np.max(list(map(len, df.columns.values.astype(str).tolist())))
dtypes = [''] * df.shape[1]
logstr = ' '
for i in tqdm(range(0, df.shape[1]), disable=disable):
if 'float' in str(df.dtypes[i]):
dtypes[i]='num'
logstr = ('[float]')
elif 'int' in str(df.dtypes[i]):
# logstr = (' > [integer]: Set to categorical. Uniqueness=%.2f' %(df.iloc[:,i].unique().shape[0]/df.shape[0]))
dtypes[i]='cat'
logstr = ('[int] ')
elif 'str' in str(df.dtypes[i]):
dtypes[i]='cat'
logstr = ('[str] ')
elif ('object' in str(df.dtypes[i])) and not deep_extract:
dtypes[i]='cat'
logstr = ('[obj] ')
elif 'object' in str(df.dtypes[i]) and deep_extract:
# Check whether this is a list or array
logstr = ('[obj] ')
tmpdf = df.iloc[:, i]
Iloc = ~tmpdf.isna()
if np.any(Iloc):
tmpdf = tmpdf.loc[Iloc].values[0]
else:
tmpdf = None
if isinstance(list(), type(tmpdf)):
dtypes[i]='list'
elif 'numpy.ndarray' in str(type(tmpdf)):
dtypes[i]='list'
elif isinstance(dict(), type(tmpdf)):
dtypes[i]='dict'
else:
dtypes[i]='cat'
elif 'bool' in str(df.dtypes[i]):
dtypes[i]='bool'
logstr = ('[bool] ')
else:
dtypes[i]='cat'
logstr = ('[???] ')
# Force numerical if unique elements are above percentage
if (perc_min_num is not None) and (('float' in str(df.dtypes[i])) or ('int' in str(df.dtypes[i]))):
tmpvalues = df.iloc[:,i].dropna().astype(float).copy()
perc=0
if len(tmpvalues)>0:
perc = (len(np.unique(tmpvalues)) / len(tmpvalues))
if (perc>=perc_min_num):
dtypes[i]='num'
logstr = ('[force]')
# logstr=' > [numerical]: Uniqueness %.2f>=%.2f' %((df.iloc[:,i].unique().shape[0]/df.shape[0]), perc_min_num)
# Force numerical if values are found with decimals
if num_if_decimal and (('float' in str(df.dtypes[i])) or ('int' in str(df.dtypes[i]))):
tmpvalues = df.iloc[:, i].dropna().copy()
if np.any(tmpvalues.astype(int) - tmpvalues.astype(float) > 0):
dtypes[i] = 'num'
logstr = ('[force]')
# Remove the non-ascii chars from categorical values
if dtypes[i]=='cat':
df.iloc[:,i] = _remove_non_ascii(df.iloc[:,i])
try:
makespaces = ''.join([' '] * (max_str_len - len(df.columns[i])))
if verbose>=4: print('[df2onehot] >[%s]%s > %s > [%s] [%.0d]' %(df.columns[i], makespaces, logstr, dtypes[i], len(df.iloc[:,i].dropna().unique())))
except:
if verbose>=4: print('[df2onehot] >[%s]%s > %s > [%s] [%.0d]' %(df.columns[i], makespaces, logstr, dtypes[i], len(df.iloc[:,i].dropna())))
# assert len(dtypes)==df.shape[1], 'Length of dtypes and dataframe columns does not match'
return(dtypes)
# %% Setup columns in correct dtypes
def _set_types(df, dtypes, verbose=3):
assert len(dtypes)==df.shape[1], 'Number of dtypes and columns in df does not match'
if verbose>=3: print('[df2onehot] >Set dtypes in dataframe..')
max_str_len = np.max(list(map(len, df.columns.values.astype(str).tolist()))) + 2
# remcols=[]
for col, dtype in zip(df.columns, dtypes):
makespaces = ''.join([' '] * (max_str_len - len(col)))
if verbose>=4: print('[df2onehot] >%s' %(col))
if dtype=='num':
df[col]=df[col].astype(float)
elif dtype=='cat':
Inull = df[col].isna().values
df[col].loc[Inull] = None
df[col] = df[col].astype(str)
# df[col] = df[col].astype('category')
elif dtype=='bool':
Inull = df[col].isna().values
df[col].loc[Inull] = None
df[col] = df[col].astype(bool)
else:
if verbose>=5: print('[df2onehot] >[%s] %s > deep extract > [%s]' %(col, makespaces, dtype))
return(df)
# %% Set y
def set_y(y, y_min=None, numeric=False, verbose=3):
"""Group labels if required.
Parameters
----------
y : list
input labels.
y_min : int, optional
If unique y-labels are less then absolute y_min, labels are grouped into the _other_ group. The default is None.
numeric : bool [True, False], optional
Convert to numeric labels. The default is False.
verbose : int, optional
Print message to screen. The default is 3.
0: (default), 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE
Returns
-------
list of labels.
"""
y = y.astype(str)
if not isinstance(y_min, type(None)):
if verbose>=3: print('[df2onehot] >Group [y] labels that contains less then %d occurences are grouped under one single name [_other_]' %(y_min))
[uiy, ycounts] = np.unique(y, return_counts=True)
labx = uiy[ycounts<y_min]
y = y.astype('O')
y[np.isin(y, labx)] = '_other_' # Note that this text is captured in compute_significance! Do not change or also change it over there!
y = y.astype(str)
if numeric:
y = label_encoder.fit_transform(y).astype(int)
return(y)
# %% function to remove non-ASCII
def _remove_non_ascii(dfc):
# Get the current dtype
dftype = dfc.dtype
# Set as string
dfc = dfc.astype('str')
# Find the nans
Iloc = ~( (dfc.str.lower()=='nan') | (dfc.str.lower()=='none') | dfc.isnull() )
# Remove non-ascii chars
dfc.loc[Iloc] = np.array(list(map(lambda x: str(x).encode('ascii','ignore').decode('ascii','ignore').strip(), dfc.loc[Iloc])))
dfc.loc[Iloc] = np.array(list(map(lambda x: str(x).encode('unicode_escape').decode('ascii','ignore').strip(), dfc.loc[Iloc])))
# dfc.loc[Iloc] = dfc.loc[Iloc].replace(r'\W+', ' ', regex=True)
dfc.loc[Iloc] = dfc.loc[Iloc].replace('[^\x00-\x7F]', ' ')
# Set the None back
dfc.loc[~Iloc] = None
# Bring back to origial dtype
dfc = dfc.astype(dftype)
# Return
return dfc
# %% Convert to pandas dataframe
def is_DataFrame(data, verbose=3):
"""Convert data into dataframe.
Parameters
----------
data : array-like
Array-like data matrix.
verbose : int, optional
Print message to screen. The default is 3.
0: (default), 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE
Returns
-------
pd.dataframe()
"""
if isinstance(data, list):
data = pd.DataFrame(data)
elif isinstance(data, np.ndarray):
data = pd.DataFrame(data)
elif isinstance(data, pd.DataFrame):
pass
else:
if verbose>=3: print('Typing should be pd.DataFrame()!')
data=None
return(data)
| 5,491
| 0
| 66
|
6faaed9836f82883490ceb00ff9ecdba6a7b7435
| 4,121
|
py
|
Python
|
src/prototype/server.py
|
Ultra-Seven/newStream
|
6ae7c152d33c0a0d02b44b13a45f72b20ba8ef16
|
[
"MIT"
] | null | null | null |
src/prototype/server.py
|
Ultra-Seven/newStream
|
6ae7c152d33c0a0d02b44b13a45f72b20ba8ef16
|
[
"MIT"
] | null | null | null |
src/prototype/server.py
|
Ultra-Seven/newStream
|
6ae7c152d33c0a0d02b44b13a45f72b20ba8ef16
|
[
"MIT"
] | null | null | null |
import random
import struct
import json
import flask
import time
import numpy as np
from collections import defaultdict
from threading import Thread
from Queue import Queue
from StringIO import StringIO
from sqlalchemy import create_engine
from flask import Flask, render_template, Response, request, stream_with_context
from py.ds import *
from py.manager import *
app = Flask(__name__)
#
# Global variables
#
flask.DEBUG = False
flask.val = 0
flask.dist = []
flask.dist_update_time = None
flask.queries = {}
flask.db = create_engine("postgresql://localhost/test")
flask.manager = Manager()
@app.route("/")
@app.route("/attr/stats", methods=["post", "get"])
def table_stats():
"""
Used by client to get the domain of the x and y axis expressions/attributes
opts: {
table: <table name>
attrs: {
<attrname>: <data type> ("continuous" | "discrete")
}
}
"""
discq = "SELECT DISTINCT %s FROM %s ORDER BY %s"
contq = "SELECT min(%s), max(%s) FROM %s"
opts = json.loads(request.data)
table = opts['table']
contattrs = []
ret = {}
for attr, typ in opts.get("attrs", {}).items():
if typ == "discrete":
q = discq % (attr, table, attr)
ret[attr] = zip(*flask.db.execute(q).fetchall())[0]
else:
q = contq % (attr, attr, table)
ret[attr] = list(flask.db.execute(q).fetchone())
return Response(json.dumps(ret))
@app.route("/register/querytemplate", methods=["post"])
def register_qtemplate():
"""
Registers a query template. Uses the query template name to instantiate (if possible)
the corresponding data structure based on those in ds_klasses
"""
template = json.loads(request.data)
flask.queries[template["tid"]] = template
tid = template['tid']
if flask.manager.has_data_structure(tid):
return Response("ok", mimetype="application/wu")
for ds_klass in ds_klasses:
if ds_klass.can_answer(template):
try:
ds = ds_klass(None, template)
ds.id = tid
flask.manager.add_data_structure(ds)
except Exception as e:
print e
continue
return Response("ok", mimetype="application/wu")
@app.route("/distribution/set", methods=["post"])
def dist_set():
"""
Set the current query distribution
A distribution is currently defined as a list of [query, probability]
where query is a dictionary: {
template: <output of js template's .toWire()>
data: { paramname: val }
}
The corresponding client files are in js/dist.js
"""
flask.dist = json.loads(request.data)
flask.dist_update_time = time.time()
if flask.DEBUG:
print "got query distribution"
return Response("ok", mimetype="application/wu")
@app.route("/data")
def data():
"""
This API opens the data stream and starts sending data via the Manager object.
The current implementation doesn't take advantage of the streaming nature and simply implements:
1. waits for a new query distribution,
2. picks the highest non-zero probability query
3. sends the cached data to the client
In effect, this implements a basic request-response model of interaction.
Details:
The data stream has a simple encoding:
[length of payload (32 bits)][encoding id (32 bits)][payload (a byte array)]
The payload is encoded based on the particular data structure
"""
return Response(flask.manager(), mimetype="test/event-stream")
@app.route("/fakedata")
if __name__ == '__main__':
import psycopg2
DEC2FLOAT = psycopg2.extensions.new_type(
psycopg2.extensions.DECIMAL.values,
'DEC2FLOAT',
lambda value, curs: float(value) if value is not None else None)
psycopg2.extensions.register_type(DEC2FLOAT)
app.run(host="localhost", port=5000, debug=0, threaded=1)#
| 26.248408
| 98
| 0.680175
|
import random
import struct
import json
import flask
import time
import numpy as np
from collections import defaultdict
from threading import Thread
from Queue import Queue
from StringIO import StringIO
from sqlalchemy import create_engine
from flask import Flask, render_template, Response, request, stream_with_context
from py.ds import *
from py.manager import *
app = Flask(__name__)
#
# Global variables
#
flask.DEBUG = False
flask.val = 0
flask.dist = []
flask.dist_update_time = None
flask.queries = {}
flask.db = create_engine("postgresql://localhost/test")
flask.manager = Manager()
@app.route("/")
def index():
return render_template("index.html")
@app.route("/attr/stats", methods=["post", "get"])
def table_stats():
"""
Used by client to get the domain of the x and y axis expressions/attributes
opts: {
table: <table name>
attrs: {
<attrname>: <data type> ("continuous" | "discrete")
}
}
"""
discq = "SELECT DISTINCT %s FROM %s ORDER BY %s"
contq = "SELECT min(%s), max(%s) FROM %s"
opts = json.loads(request.data)
table = opts['table']
contattrs = []
ret = {}
for attr, typ in opts.get("attrs", {}).items():
if typ == "discrete":
q = discq % (attr, table, attr)
ret[attr] = zip(*flask.db.execute(q).fetchall())[0]
else:
q = contq % (attr, attr, table)
ret[attr] = list(flask.db.execute(q).fetchone())
return Response(json.dumps(ret))
@app.route("/register/querytemplate", methods=["post"])
def register_qtemplate():
"""
Registers a query template. Uses the query template name to instantiate (if possible)
the corresponding data structure based on those in ds_klasses
"""
template = json.loads(request.data)
flask.queries[template["tid"]] = template
tid = template['tid']
if flask.manager.has_data_structure(tid):
return Response("ok", mimetype="application/wu")
for ds_klass in ds_klasses:
if ds_klass.can_answer(template):
try:
ds = ds_klass(None, template)
ds.id = tid
flask.manager.add_data_structure(ds)
except Exception as e:
print e
continue
return Response("ok", mimetype="application/wu")
@app.route("/distribution/set", methods=["post"])
def dist_set():
"""
Set the current query distribution
A distribution is currently defined as a list of [query, probability]
where query is a dictionary: {
template: <output of js template's .toWire()>
data: { paramname: val }
}
The corresponding client files are in js/dist.js
"""
flask.dist = json.loads(request.data)
flask.dist_update_time = time.time()
if flask.DEBUG:
print "got query distribution"
return Response("ok", mimetype="application/wu")
@app.route("/data")
def data():
"""
This API opens the data stream and starts sending data via the Manager object.
The current implementation doesn't take advantage of the streaming nature and simply implements:
1. waits for a new query distribution,
2. picks the highest non-zero probability query
3. sends the cached data to the client
In effect, this implements a basic request-response model of interaction.
Details:
The data stream has a simple encoding:
[length of payload (32 bits)][encoding id (32 bits)][payload (a byte array)]
The payload is encoded based on the particular data structure
"""
return Response(flask.manager(), mimetype="test/event-stream")
@app.route("/fakedata")
def fake_data():
s = encode_table(["a", "b"], zip(range(10), range(10)))
header = struct.pack("2I", len(s), 0)
def f():
while 1:
for j in xrange(random.randint(1, 10)):
yield header
yield s
time.sleep(0.001)
break
return Response(f(),
mimetype="text/event-stream")
if __name__ == '__main__':
import psycopg2
DEC2FLOAT = psycopg2.extensions.new_type(
psycopg2.extensions.DECIMAL.values,
'DEC2FLOAT',
lambda value, curs: float(value) if value is not None else None)
psycopg2.extensions.register_type(DEC2FLOAT)
app.run(host="localhost", port=5000, debug=0, threaded=1)#
| 338
| 0
| 44
|
b0a76137ebb58a7a46f4264eb9d1190c8162f333
| 1,110
|
py
|
Python
|
scripts/azAttrVisalizer.py
|
ejekt/rigging-system
|
dedf09cc832f56b310587b818deadfd4f8ca7b3b
|
[
"MIT"
] | 3
|
2019-12-12T03:46:41.000Z
|
2021-01-16T06:29:45.000Z
|
scripts/azAttrVisalizer.py
|
ejekt/rigging-system
|
dedf09cc832f56b310587b818deadfd4f8ca7b3b
|
[
"MIT"
] | null | null | null |
scripts/azAttrVisalizer.py
|
ejekt/rigging-system
|
dedf09cc832f56b310587b818deadfd4f8ca7b3b
|
[
"MIT"
] | null | null | null |
import maya.cmds as mc
createVisualizerNodes()
| 34.6875
| 97
| 0.679279
|
import maya.cmds as mc
def getAttrToGraph():
# returns attribute path if one is selected in the channelbox
selAttr = mc.channelBox('mainChannelBox', q=1, selectedMainAttributes=1)
if selAttr and len(selAttr) == 1:
selObj = mc.ls(sl=1)[-1]
attrPath = '{}.{}'.format(selObj, selAttr[0])
return attrPath
def createVisualizerNodes():
# create node tree
sGrpFollow = mc.group(n='grp_graphKeys', em=1)
sLocGraphThis = mc.spaceLocator(n='loc_pointOnGraph')[0]
mc.addAttr(ln='time')
mc.addAttr(ln='offsetGraph')
mc.parent(sGrpFollow, sLocGraphThis)
iStartTime = int(mc.playbackOptions(q=1, min=1))
iEndTime = int(mc.playbackOptions(q=1, max=1))
# adding keys to mark second marks
for t in range(iStartTime, iEndTime, 24):
mc.setKeyframe(sGrpFollow, at='translateX', time=[t])
# setting up the X offset
sExprCmd = '{0}.offsetGraph = {0}.time - `playbackOptions -q -min` + 1'.format(sLocGraphThis)
mc.expression(s=sExprCmd, n='expr_visualizerOffset', ae=1)
return sLocGraphThis, sGrpFollow
createVisualizerNodes()
| 1,013
| 0
| 46
|
ab546a4162bb56f06ce55dba8449286793acaa45
| 10,530
|
py
|
Python
|
main.py
|
jeremysuh/Wikipedia-Article-Comparator
|
c37d289d5063761f713d42d3db0c8c2073252170
|
[
"MIT"
] | null | null | null |
main.py
|
jeremysuh/Wikipedia-Article-Comparator
|
c37d289d5063761f713d42d3db0c8c2073252170
|
[
"MIT"
] | null | null | null |
main.py
|
jeremysuh/Wikipedia-Article-Comparator
|
c37d289d5063761f713d42d3db0c8c2073252170
|
[
"MIT"
] | null | null | null |
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.image import Image, AsyncImage
from kivy.uix.textinput import TextInput
from kivy.config import Config
from kivy.loader import Loader
from math import sin
import wikipedia
import matplotlib
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg,\
NavigationToolbar2Kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
import numpy as np
import numpy as np2
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 8})
app = WikipediaComparatorApp()
app.run()
| 30.345821
| 129
| 0.596771
|
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.image import Image, AsyncImage
from kivy.uix.textinput import TextInput
from kivy.config import Config
from kivy.loader import Loader
from math import sin
import wikipedia
import matplotlib
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg,\
NavigationToolbar2Kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
import numpy as np
import numpy as np2
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 8})
class MainLayout(FloatLayout):
current_page = 2;
first_input_confirm = False
second_input_confirm = False
article_one = ""
article_two = ""
article_one_dictionary = []
article_two_dictionary = []
article_one_freq_words = []
article_two_freq_words = []
article_one_performance = []
article_two_performance = []
def hide_show_graphs(self):
if self.current_page is 1:
self.current_page = 2
self.ids.green_bar.pos_hint = {"x":0.075,"y":0.075}
self.ids.red_bar.pos_hint = {"x":0.075,"y":0.075}
self.ids.destination.size_hint_y = 0.45
else:
self.current_page = 1
self.ids.green_bar.pos_hint = {"x":0.075,"y":-1}
self.ids.red_bar.pos_hint = {"x":0.075,"y":-1}
self.ids.destination.size_hint_y = 0
pass
def __init__(self, **kwargs):
super(FloatLayout, self).__init__(**kwargs)
fig = plt.figure()
f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=False, sharey=False)
x = [0, 1, 2, 3, 4]
ax2.set_xticks(np.arange(min(x), max(x) + 1, 1.0))
ax1.set_xticks(np.arange(min(x), max(x) + 1, 1.0))
objects = ('Python', 'C++', 'Java', 'Perl', 'Scala')
y_pos = np.arange(len(objects))
performance = [5,5,5,5,5]
ax1.bar(y_pos, performance, align='center', alpha=0.5)
ax1.set_xticklabels(('-','-','-','-','-'))
ax2.set_xticklabels(('-','-','-','-','-'))
plt.suptitle('Most frequent words')
y_pos = np.arange(len(objects))
performance = [5,5,5,5,5]
ax2.bar(y_pos, performance, align='center', alpha=0.5)
self.ids.destination.add_widget(FigureCanvasKivyAgg(plt.gcf()))
self.hide_show_graphs()
def on_click_check(self, num):
if num is 1:
try:
wikipedia.search(self.ids.search_one.text)
article = wikipedia.page(self.ids.search_one.text)
self.article_one = self.ids.search_one.text
self.first_input_confirm = True
self.ids.detect_one.background_normal = 'green_bar2.png'
if self.first_input_confirm & self.second_input_confirm:
self.ids.compare_button.disabled = False
self.ids.detect_one.background_normal = 'green_bar2.png'
except wikipedia.WikipediaException:
self.ids.compare_button.disabled = True
self.ids.detect_one.background_normal = 'red_bar2.png'
else:
try:
article = wikipedia.page(self.ids.search_two.text)
self.article_two = self.ids.search_two.text
self.second_input_confirm = True
self.ids.detect_two.background_normal = 'green_bar2.png'
if self.first_input_confirm & self.second_input_confirm:
self.ids.compare_button.disabled = False
self.ids.detect_two.background_normal = 'green_bar2.png'
except wikipedia.WikipediaException:
self.ids.compare_button.disabled = True
self.ids.detect_two.background_normal = 'red_bar2.png'
pass
def analyze(self, article, num, title, image, links, section, reference, word_count, unique_words):
print "analyze"
wikiarticle = wikipedia.page(article)
title.text = "Article Title: " + article
image.text = "Image Count: " + str(self.count_images(wikiarticle, num))
links.text = "Link Count: " + str(self.count_links(wikiarticle))
section.text = "Categories Count: " + str(self.count_section(wikiarticle))
reference.text = "Reference Count: " + str(self.count_reference(wikiarticle))
word_count.text = "Word Count: " + str(self.count_word(wikiarticle))
self.analyze_words(wikiarticle, unique_words, num)
def analyze_words(self, article, unique_words, num):
dictionary = {}
text = article.content
textsplit = text.split()
for word in textsplit:
if word in dictionary:
dictionary[word] += 1
else:
dictionary[word] = 1
unique_words.text = "Unique Words: " + str(len(dictionary))
if num is 1:
self.article_one_dictionary = dictionary
else:
self.article_two_dictionary = dictionary
pass
def get_frequent_words(self, dictionary, num):
print num
objects = []
performance = []
for i in range(0, 5):
largestCount = 0
frequentWord = ""
for key in dictionary:
if dictionary[key] > largestCount and self.is_article(key) == False and key != "=" \
and key != "=="and key != "===" and key != "-" and key != "--" and key != "_":
frequentWord = key
largestCount = dictionary[key]
objects.append(frequentWord)
performance.append(largestCount)
del dictionary[frequentWord]
if num is 1:
print("FIRST")
self.article_one_freq_words = objects
self.article_one_performance = performance
else:
print("SECOND")
self.article_two_freq_words = objects
self.article_two_performance = performance
pass
def update_graphs(self):
self.ids.destination.clear_widgets()
fig = plt.figure()
f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=False, sharey=False)
x = [0, 1, 2, 3, 4]
ax2.set_xticks(np.arange(min(x), max(x) + 1, 1.0))
ax1.set_xticks(np.arange(min(x), max(x) + 1, 1.0))
objects = ('Python', 'C++', 'Javap', 'Perlp', 'Scala')
y_pos = np.arange(len(objects))
performance = self.article_one_performance
ax1.bar(y_pos, performance, align='center', alpha=0.5)
ax1.set_xticklabels(self.article_one_freq_words)
ax2.set_xticklabels(self.article_two_freq_words)
plt.suptitle('Most frequent words')
y_pos = np.arange(len(objects))
performance = self.article_two_performance
ax2.bar(y_pos, performance, align='center', alpha=0.5)
self.ids.destination.add_widget(FigureCanvasKivyAgg(plt.gcf()))
pass
def count_word(self, wikiarticle):
text = wikiarticle.content
return len(map(len, text.split()))
def count_images(self, wikiarticle, num):
if num is 1:
if len(wikiarticle.images) != 0:
self.ids.wiki_image_one.source = wikiarticle.images[0]
else:
if len(wikiarticle.images) != 0:
self.ids.wiki_image_two.source = wikiarticle.images[0]
return len(wikiarticle.images)
def count_links(self, wikiarticle):
return len(wikiarticle.links)
def count_section(self, wikiarticle):
return len(wikiarticle.categories)
def count_reference(self, wikiarticle):
return len(wikiarticle.references)
def on_click_arrow(self):
self.hide_show_graphs()
pass
def is_article(self, word):
return False
return word == "the" or word == "a" or word == "an"
def open_link(self, num):
pass
def on_click_compare(self):
print "compare"
print self.article_one
self.analyze(self.article_one, 1,
self.ids.article_title_one,
self.ids.image_count_one,
self.ids.link_count_one,
self.ids.section_count_one,
self.ids.reference_count_one,
self.ids.word_count_one,
self.ids.unique_words_one
)
self.analyze(self.article_two, 2,
self.ids.article_title_two,
self.ids.image_count_two,
self.ids.link_count_two,
self.ids.section_count_two,
self.ids.reference_count_two,
self.ids.word_count_two,
self.ids.unique_words_two
)
self.ids.green_bar.size_hint_x = self.compare_dictionary(self.article_one_dictionary, self.article_two_dictionary) * 0.85
self.get_frequent_words(self.article_one_dictionary, 1)
self.get_frequent_words(self.article_two_dictionary, 2)
self.update_graphs()
pass
def on_click_random(self, num):
random_text = wikipedia.random()
while (len(random_text) >= 17):
random_text = wikipedia.random()
if num is 1:
self.ids.search_one.text = random_text
self.ids.compare_button.disabled = True
self.ids.detect_one.background_normal = 'red_bar2.png'
else:
self.ids.search_two.text = random_text
self.ids.compare_button.disabled = True
self.ids.detect_two.background_normal = 'red_bar2.png'
pass
def compare_dictionary(self, d1, d2):
similar_words = 0
for key in d1:
if key in d2:
similar_words+=1
return float(similar_words)/(similar_words + (len(d1)-similar_words) + (len(d2)-similar_words))
def change_text(self):
pass
pass
class WikipediaComparatorApp(App):
def build(self):
Loader.loading_image = 'tenory.gif'
Loader.error_image = 'wikilogo.png'
Config.set('graphics', 'width', '1500')
Config.set('graphics', 'height', '640')
Config.set('graphics', 'resizable', '0')
return MainLayout()
app = WikipediaComparatorApp()
app.run()
| 8,785
| 874
| 72
|
7f14c7b9add693eca4ea3f8926c5b41bca78b3f7
| 22,022
|
py
|
Python
|
tests/test_internals.py
|
ccaruceru/slack-multireact
|
08b9018c25802d440876516d3469ddd3ad42e260
|
[
"MIT"
] | null | null | null |
tests/test_internals.py
|
ccaruceru/slack-multireact
|
08b9018c25802d440876516d3469ddd3ad42e260
|
[
"MIT"
] | null | null | null |
tests/test_internals.py
|
ccaruceru/slack-multireact
|
08b9018c25802d440876516d3469ddd3ad42e260
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests for internals.py"""
from asyncio.tasks import Task
import os
import json
import sys
import unittest
import asyncio
import logging
from io import StringIO
from importlib import reload
from unittest.mock import AsyncMock, Mock, call, patch
from aiohttp.client_exceptions import ClientConnectorError
from google.cloud.storage.blob import Blob
from google.cloud.storage.bucket import Bucket
from slack_bolt.app.async_app import AsyncApp
from slack_sdk.errors import SlackApiError
from slack_sdk.web.async_client import AsyncWebClient
from slack_sdk.web.async_slack_response import AsyncSlackResponse
from multi_reaction_add.internals import check_env, setup_logger, build_home_tab_view, user_data_key,\
delete_users_data, EmojiOperator
# pylint: disable=attribute-defined-outside-init
class TestCheckEnv(unittest.TestCase):
"""Test env vars checker"""
def setUp(self):
"""Setup tests"""
self.env_keys = ["SLACK_CLIENT_ID", "SLACK_CLIENT_SECRET", "SLACK_SIGNING_SECRET",
"SLACK_INSTALLATION_GOOGLE_BUCKET_NAME", "SLACK_STATE_GOOGLE_BUCKET_NAME", "USER_DATA_BUCKET_NAME"]
def test_checkenv_ok(self):
"""Test checkenv success"""
for key in self.env_keys:
os.environ[key] = ""
check_env()
for key in self.env_keys:
del os.environ[key]
@unittest.expectedFailure
def test_checkenv_missing(self):
"""Test checkenv throws error"""
# pylint: disable=no-self-use
check_env()
class TestCloudLogging(unittest.TestCase):
"""Test logger class"""
def tearDown(self):
"""Cleanup tests"""
logging.shutdown()
reload(logging)
def test_log_format(self):
"""Test logger has correct format"""
with StringIO() as stream:
logger = setup_logger(stream=stream)
logger.info("a message")
self.assertRegex(stream.getvalue(), r'{"timestamp": "\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}", '
'"severity": "INFO", "funcName": "test_log_format", '
'"component": "root", "message": "a message"}')
def test_log_level_set(self):
"""Test log level can be set from env"""
os.environ["LOG_LEVEL"] = "WARNING"
with StringIO() as stream:
logger = setup_logger(stream=stream)
del os.environ["LOG_LEVEL"]
logger.info("a message")
logger.warning("some message")
logger.error("another message")
output = stream.getvalue()
self.assertTrue(all([
"a message" not in output,
"some message" in output,
"another message" in output
]), msg="Cannot set log level")
class TestInternals(unittest.TestCase):
"""Test light methods"""
def test_build_home_tab(self):
"""Test build_home_tab method"""
# check home tab with no urls
home_tab_dict = build_home_tab_view()
home_tab_json = json.dumps(home_tab_dict, separators=(",", ":"))
self.assertEqual(home_tab_json, '{"type":"home","blocks":[{"type":"header","text":{"type":"plain_text","text":'
'"Setting emojis :floppy_disk:","emoji":true}},{"type":"section","text":{"type"'
':"mrkdwn","text":"Type `/multireact <list of emojis>` in any chat to set a'
' list of emojis for later usage."}},{"type":"section","text":{"type":"mrkdwn",'
'"text":"You can view what you saved any moment by typing `/multireact` in'
' any chat."}},{"type":"divider"},{"type":"header","text":{"type":"plain_text",'
'"text":"Adding Reactions :star-struck:","emoji":true}},{"type":"section",'
'"text":{"type":"mrkdwn","text":"Go to a message, click `More Actions`, then'
' click on `Multireact` to react with the saved emojis to the message.\\n\\nIf'
' you can\'t see `Multireact`, click `More message shortcuts...`'
' to find it."}}]}')
# check home tab with urls
home_tab_dict = build_home_tab_view(app_url="localhost")
home_tab_json = json.dumps(home_tab_dict, separators=(",", ":"))
self.assertEqual(home_tab_json, '{"type":"home","blocks":[{"type":"header","text":{"type":"plain_text","text":'
'"Setting emojis :floppy_disk:","emoji":true}},{"type":"section","text":{"type"'
':"mrkdwn","text":"Type `/multireact <list of emojis>` in any chat to set a'
' list of emojis for later usage."}},{"type":"image","image_url":'
'"localhost/img/reaction-write-emojis.png?w=1024&ssl=1","alt_text":'
'"write emojis"},{"type":"image","image_url":'
'"localhost/img/reaction-save.png?w=1024&ssl=1","alt_text":'
'"saved emojis"},{"type":"section","text":{"type":"mrkdwn","text":'
'"You can view what you saved any moment by typing `/multireact` in any'
' chat."}},{"type":"image","image_url":'
'"localhost/img/reaction-write-nothing.png?w=1024&ssl=1","alt_text":'
'"view emojis"},{"type":"image","image_url":'
'"localhost/img/reaction-view.png?w=1024&ssl=1","alt_text":"view emojis"},'
'{"type":"divider"},{"type":"header","text":{"type":"plain_text","text":'
'"Adding Reactions :star-struck:","emoji":true}},{"type":"section","text":'
'{"type":"mrkdwn","text":"Go to a message, click `More Actions`, then click on'
' `Multireact` to react with the saved emojis to the message.\\n\\nIf you'
' can\'t see `Multireact`, click `More message shortcuts...` to find it."}},'
'{"type":"image","image_url":'
'"localhost/img/reaction-none.png?w=1024&ssl=1","alt_text":"message with no'
' reactions"},{"type":"image","image_url":'
'"localhost/img/reaction-menu.png?w=1024&ssl=1","alt_text":"message menu"},'
'{"type":"image","image_url":'
'"localhost/img/reaction-add.png?w=1024&ssl=1","alt_text":'
'"message with reactions"}]}')
def test_user_data_key(self):
"""Test user_data_key method"""
self.assertEqual(
user_data_key("client_id", "enter_id", "team_id", "user_id"),
"client_id/enter_id-team_id/user_id")
self.assertEqual(
user_data_key("client_id", None, "team_id", "user_id"),
"client_id/none-team_id/user_id")
class TestDeleteUserData(unittest.IsolatedAsyncioTestCase):
"""Test user data deletion"""
async def asyncSetUp(self):
"""Setup tests"""
self.bucket = Mock(spec=Bucket)
self.blob = Blob(name="name", bucket=self.bucket)
self.blob.delete = Mock()
self.bucket.blob = Mock(return_value=self.blob)
@classmethod
def setUpClass(cls):
"""Setup tests once"""
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
@patch("multi_reaction_add.internals.user_data_key")
async def test_delete_users_data(self, mock_user_data_key: Mock):
"""Test delete_users_data method"""
# test user data exists
self.blob.exists = Mock(return_value=True)
await delete_users_data(self.bucket, "client_id", "enter_id", "team_id", ["user_id"])
self.blob.exists.assert_called_once()
self.blob.delete.assert_called_once()
self.blob.delete.reset_mock()
# test user data doesn't exist
self.blob.exists = Mock(return_value=False)
await delete_users_data(self.bucket, "client_id", "enter_id", "team_id", ["user_id"])
self.blob.exists.assert_called_once()
self.blob.delete.assert_not_called()
# test multiple user data
await delete_users_data(self.bucket, "client_id", "enter_id", "team_id", ["user_id1", "user_id2"])
mock_user_data_key.assert_has_calls([call(slack_client_id="client_id",
enterprise_id="enter_id",
team_id="team_id",
user_id="user_id1"),
call(slack_client_id="client_id",
enterprise_id="enter_id",
team_id="team_id",
user_id="user_id2")])
class TestEmojiOperator(unittest.IsolatedAsyncioTestCase):
"""Test EmojiOperator class"""
# pylint: disable=protected-access
async def asyncSetUp(self):
"""Setup tests"""
self.client = AsyncMock(AsyncWebClient)
self.client.token = None
self.http_args = {"client": self.client, "http_verb": "POST", "api_url": "some-api", "req_args": {},
"headers": {}, "status_code": 200}
self.app = AsyncMock(AsyncApp)
self.app.client = self.client
self.logger = logging.getLogger()
self.logger.handlers = []
@classmethod
def setUpClass(cls):
"""Setup tests once"""
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
async def test_get_user_reactions(self):
"""Test get_user_reactions method"""
# check no reactions
response = AsyncSlackResponse(**{**self.http_args, **{"data": {"type": "message", "message": {}}} })
self.client.reactions_get.return_value = response
emojis = await EmojiOperator.get_user_reactions(client=self.client,
channel_id="channel_id",
message_ts="message_ts",
user_id="user_id")
self.assertEqual(emojis, [])
# sample response: https://api.slack.com/methods/reactions.get
# check reactions on message
response = AsyncSlackResponse(**{**self.http_args, **{"data": {"type": "message", "message": {
"reactions": [{
"name": "smile",
"users": [ "user_id1", "user_id2" ]
}, {
"name": "wink",
"users": [ "user_id2", "user_id3" ]
}]
}}}})
self.client.reactions_get.return_value = response
emojis = await EmojiOperator.get_user_reactions(client=self.client,
channel_id="channel_id",
message_ts="message_ts",
user_id="user_id2")
self.assertEqual(emojis, ["smile", "wink"])
# check reactions on file
response = AsyncSlackResponse(**{**self.http_args, **{"data": {"type": "file", "file": {
"reactions": [{
"name": "laugh",
"users": [ "user_id1", "user_id2" ]
}]
}}}})
self.client.reactions_get.return_value = response
emojis = await EmojiOperator.get_user_reactions(client=self.client,
channel_id="channel_id",
message_ts="message_ts",
user_id="user_id1")
self.assertEqual(emojis, ["laugh"])
# check reactions on file_comment
response = AsyncSlackResponse(**{**self.http_args, **{"data": {"type": "file_comment", "comment": {
"reactions": [{
"name": "heart",
"users": [ "user_id1", "user_id2" ]
}]
}}}})
self.client.reactions_get.return_value = response
emojis = await EmojiOperator.get_user_reactions(client=self.client,
channel_id="channel_id",
message_ts="message_ts",
user_id="user_id2")
self.assertEqual(emojis, ["heart"])
@patch("aiohttp.ClientSession.get")
async def test_get_reactions_in_team(self, get: AsyncMock):
"""Test get_reactions_in_team method"""
mock_context_manager: AsyncMock = get.return_value.__aenter__.return_value
mock_context_manager.status = 200
mock_context_manager.text.return_value = \
'[{"base":"anguished"}, {"base":"sad_face"}, {"base":"clap"}]'
# sample response: https://api.slack.com/methods/emoji.list
slack_response = AsyncSlackResponse(**{**self.http_args, **{"data": {
"emoji": {
"longcat": "some url",
"doge": "alias",
"partyparrot": "some url",
},
"categories": [ {
"name": "faces",
"emoji_names": ["smile", "wink"]
}, {
"name": "flags",
"emoji_names": ["flag1", "flag2", "flag3"]
}
]
}}})
self.client.emoji_list.return_value = slack_response
# test standard emojis response ok
emojis = await EmojiOperator._get_reactions_in_team(client=self.client, logger=self.logger)
self.client.emoji_list.assert_awaited_once_with(include_categories=True)
# session.get.assert_called_once_with("https://www.emojidex.com/api/v1/utf_emoji")
mock_context_manager.text.assert_awaited_once_with(encoding="utf-8")
self.assertEqual(set(emojis),
set(["longcat", "doge", "partyparrot", "smile", "wink", "flag1", "flag2", "flag3",
"anguished", "sad_face", "clap"]),
msg="Could not parse all emojis")
mock_context_manager.reset_mock()
get.reset_mock()
# test standard emojis response not ok
get.return_value.__aenter__.return_value.status = 500
emojis = await EmojiOperator._get_reactions_in_team(client=self.client, logger=self.logger)
mock_context_manager.text.assert_not_awaited()
self.assertEqual(set(emojis),
set(["longcat", "doge", "partyparrot", "smile", "wink", "flag1", "flag2", "flag3"]),
msg="Should not return standard emojis when invalid http request")
mock_context_manager.reset_mock()
get.reset_mock()
# test standard emojis response exception
get.return_value.__aenter__.side_effect = ClientConnectorError(None, Mock())
emojis = await EmojiOperator._get_reactions_in_team(client=self.client, logger=self.logger)
mock_context_manager.text.assert_not_awaited()
self.assertEqual(set(emojis),
set(["longcat", "doge", "partyparrot", "smile", "wink", "flag1", "flag2", "flag3"]),
msg="Should not return standard emojis when connection error")
@patch("multi_reaction_add.internals.EmojiOperator._get_reactions_in_team")
async def test_update_emoji_list(self, get_reactions: AsyncMock):
"""Test update_emoji_list method"""
get_reactions.return_value = ["some", "emojis"]
emoji_operator = EmojiOperator()
self.client.token = "old token"
# test normal execution
try:
await asyncio.wait_for(
emoji_operator._update_emoji_list(
app=self.app,
token="new token",
logger=self.logger,
sleep=1),
timeout=1.5)
except asyncio.TimeoutError:
pass
get_reactions.assert_awaited_once_with(self.client, self.logger)
self.assertEqual(emoji_operator._all_emojis, ["some", "emojis"])
self.assertEqual(self.client.token, "old token")
# test all_emojis left unchanged on slack api error
get_reactions.side_effect = SlackApiError(None, None)
try:
await asyncio.wait_for(
emoji_operator._update_emoji_list(
app=self.app,
token="new token",
logger=self.logger,
sleep=1),
timeout=1.5)
except asyncio.TimeoutError:
pass
self.assertEqual(emoji_operator._all_emojis, ["some", "emojis"])
self.assertEqual(self.client.token, "old token")
# test all_emojis unset on slack api exception
emoji_operator._all_emojis = None
get_reactions.side_effect = SlackApiError(None, None)
try:
await asyncio.wait_for(
emoji_operator._update_emoji_list(
app=self.app,
token="new token",
logger=self.logger,
sleep=1),
timeout=1.5)
except asyncio.TimeoutError:
pass
self.assertEqual(emoji_operator._all_emojis, None)
self.assertEqual(self.client.token, "old token")
async def test_stop_emoji_thread(self):
"""Test stop_emoji_thread method"""
emoji_operator = EmojiOperator()
emoji_operator._emoji_task = asyncio.create_task(some_method())
await emoji_operator.stop_emoji_update()
await asyncio.sleep(0.1) # task will be canceled when it will be scheduled in the event loop
self.assertTrue(emoji_operator._emoji_task.done())
@patch("multi_reaction_add.internals.EmojiOperator._get_reactions_in_team")
async def test_get_valid_reactions(self, get_reactions: AsyncMock):
"""Test get_valid_reactions method"""
emoji_operator = EmojiOperator()
emoji_operator._emoji_task = Mock(spec=Task)
emoji_operator._emoji_task.done.return_value = False
emoji_operator._update_emoji_list = AsyncMock()
emoji_operator._all_emojis = ["smile", "wink", "face", "laugh", "some-emoji", "-emj-", "_emj_", "some_emoji",
"+one", "'quote'", "54"]
# check empty input
emojis = await emoji_operator.get_valid_reactions(text="",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, [])
# check no emojis in input
emojis = await emoji_operator.get_valid_reactions(text="some text",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, [])
# check no valid emojis
emojis = await emoji_operator.get_valid_reactions(text="::::",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, [])
# check valid input
emojis = await emoji_operator.get_valid_reactions(text=":smile: :wink:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile", "wink"])
# check emojis special characters
emojis = await emoji_operator.get_valid_reactions(
text=":some-emoji: :-emj-: :_emj_: :some_emoji: :+one: :'quote': :54:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["some-emoji", "-emj-", "_emj_", "some_emoji", "+one", "'quote'", "54"])
# check remove duplicates
emojis = await emoji_operator.get_valid_reactions(text=":smile: :wink: :smile:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile", "wink"])
# check emoji with modifier
emojis = await emoji_operator.get_valid_reactions(text=":face::skin-tone-2:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["face::skin-tone-2"])
# check no space in input
emojis = await emoji_operator.get_valid_reactions(
text=":smile::wink::face::skin-tone-2::face::skin-tone-3::laugh:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile", "wink", "face::skin-tone-2", "face::skin-tone-3", "laugh"])
# check text and emojis
emojis = await emoji_operator.get_valid_reactions(
text="sometext:smile:anothertext:wink:moretext:laugh:endoftext",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile", "wink", "laugh"])
# check invalid emoji
emojis = await emoji_operator.get_valid_reactions(text=":smile: :invalid:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile"])
# check emoji_task is started when finished
get_reactions.return_value = ["joy"]
emoji_operator._emoji_task.done.return_value = True
emojis = await emoji_operator.get_valid_reactions(text=":joy:",
client=self.client,
app=self.app,
logger=self.logger)
get_reactions.assert_awaited_once_with(self.client, self.logger)
self.assertEqual(emojis, ["joy"])
| 45.219713
| 120
| 0.563164
|
# -*- coding: utf-8 -*-
"""Tests for internals.py"""
from asyncio.tasks import Task
import os
import json
import sys
import unittest
import asyncio
import logging
from io import StringIO
from importlib import reload
from unittest.mock import AsyncMock, Mock, call, patch
from aiohttp.client_exceptions import ClientConnectorError
from google.cloud.storage.blob import Blob
from google.cloud.storage.bucket import Bucket
from slack_bolt.app.async_app import AsyncApp
from slack_sdk.errors import SlackApiError
from slack_sdk.web.async_client import AsyncWebClient
from slack_sdk.web.async_slack_response import AsyncSlackResponse
from multi_reaction_add.internals import check_env, setup_logger, build_home_tab_view, user_data_key,\
delete_users_data, EmojiOperator
# pylint: disable=attribute-defined-outside-init
class TestCheckEnv(unittest.TestCase):
"""Test env vars checker"""
def setUp(self):
"""Setup tests"""
self.env_keys = ["SLACK_CLIENT_ID", "SLACK_CLIENT_SECRET", "SLACK_SIGNING_SECRET",
"SLACK_INSTALLATION_GOOGLE_BUCKET_NAME", "SLACK_STATE_GOOGLE_BUCKET_NAME", "USER_DATA_BUCKET_NAME"]
def test_checkenv_ok(self):
"""Test checkenv success"""
for key in self.env_keys:
os.environ[key] = ""
check_env()
for key in self.env_keys:
del os.environ[key]
@unittest.expectedFailure
def test_checkenv_missing(self):
"""Test checkenv throws error"""
# pylint: disable=no-self-use
check_env()
class TestCloudLogging(unittest.TestCase):
"""Test logger class"""
def tearDown(self):
"""Cleanup tests"""
logging.shutdown()
reload(logging)
def test_log_format(self):
"""Test logger has correct format"""
with StringIO() as stream:
logger = setup_logger(stream=stream)
logger.info("a message")
self.assertRegex(stream.getvalue(), r'{"timestamp": "\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}", '
'"severity": "INFO", "funcName": "test_log_format", '
'"component": "root", "message": "a message"}')
def test_log_level_set(self):
"""Test log level can be set from env"""
os.environ["LOG_LEVEL"] = "WARNING"
with StringIO() as stream:
logger = setup_logger(stream=stream)
del os.environ["LOG_LEVEL"]
logger.info("a message")
logger.warning("some message")
logger.error("another message")
output = stream.getvalue()
self.assertTrue(all([
"a message" not in output,
"some message" in output,
"another message" in output
]), msg="Cannot set log level")
class TestInternals(unittest.TestCase):
"""Test light methods"""
def test_build_home_tab(self):
"""Test build_home_tab method"""
# check home tab with no urls
home_tab_dict = build_home_tab_view()
home_tab_json = json.dumps(home_tab_dict, separators=(",", ":"))
self.assertEqual(home_tab_json, '{"type":"home","blocks":[{"type":"header","text":{"type":"plain_text","text":'
'"Setting emojis :floppy_disk:","emoji":true}},{"type":"section","text":{"type"'
':"mrkdwn","text":"Type `/multireact <list of emojis>` in any chat to set a'
' list of emojis for later usage."}},{"type":"section","text":{"type":"mrkdwn",'
'"text":"You can view what you saved any moment by typing `/multireact` in'
' any chat."}},{"type":"divider"},{"type":"header","text":{"type":"plain_text",'
'"text":"Adding Reactions :star-struck:","emoji":true}},{"type":"section",'
'"text":{"type":"mrkdwn","text":"Go to a message, click `More Actions`, then'
' click on `Multireact` to react with the saved emojis to the message.\\n\\nIf'
' you can\'t see `Multireact`, click `More message shortcuts...`'
' to find it."}}]}')
# check home tab with urls
home_tab_dict = build_home_tab_view(app_url="localhost")
home_tab_json = json.dumps(home_tab_dict, separators=(",", ":"))
self.assertEqual(home_tab_json, '{"type":"home","blocks":[{"type":"header","text":{"type":"plain_text","text":'
'"Setting emojis :floppy_disk:","emoji":true}},{"type":"section","text":{"type"'
':"mrkdwn","text":"Type `/multireact <list of emojis>` in any chat to set a'
' list of emojis for later usage."}},{"type":"image","image_url":'
'"localhost/img/reaction-write-emojis.png?w=1024&ssl=1","alt_text":'
'"write emojis"},{"type":"image","image_url":'
'"localhost/img/reaction-save.png?w=1024&ssl=1","alt_text":'
'"saved emojis"},{"type":"section","text":{"type":"mrkdwn","text":'
'"You can view what you saved any moment by typing `/multireact` in any'
' chat."}},{"type":"image","image_url":'
'"localhost/img/reaction-write-nothing.png?w=1024&ssl=1","alt_text":'
'"view emojis"},{"type":"image","image_url":'
'"localhost/img/reaction-view.png?w=1024&ssl=1","alt_text":"view emojis"},'
'{"type":"divider"},{"type":"header","text":{"type":"plain_text","text":'
'"Adding Reactions :star-struck:","emoji":true}},{"type":"section","text":'
'{"type":"mrkdwn","text":"Go to a message, click `More Actions`, then click on'
' `Multireact` to react with the saved emojis to the message.\\n\\nIf you'
' can\'t see `Multireact`, click `More message shortcuts...` to find it."}},'
'{"type":"image","image_url":'
'"localhost/img/reaction-none.png?w=1024&ssl=1","alt_text":"message with no'
' reactions"},{"type":"image","image_url":'
'"localhost/img/reaction-menu.png?w=1024&ssl=1","alt_text":"message menu"},'
'{"type":"image","image_url":'
'"localhost/img/reaction-add.png?w=1024&ssl=1","alt_text":'
'"message with reactions"}]}')
def test_user_data_key(self):
"""Test user_data_key method"""
self.assertEqual(
user_data_key("client_id", "enter_id", "team_id", "user_id"),
"client_id/enter_id-team_id/user_id")
self.assertEqual(
user_data_key("client_id", None, "team_id", "user_id"),
"client_id/none-team_id/user_id")
class TestDeleteUserData(unittest.IsolatedAsyncioTestCase):
"""Test user data deletion"""
async def asyncSetUp(self):
"""Setup tests"""
self.bucket = Mock(spec=Bucket)
self.blob = Blob(name="name", bucket=self.bucket)
self.blob.delete = Mock()
self.bucket.blob = Mock(return_value=self.blob)
@classmethod
def setUpClass(cls):
"""Setup tests once"""
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
@patch("multi_reaction_add.internals.user_data_key")
async def test_delete_users_data(self, mock_user_data_key: Mock):
"""Test delete_users_data method"""
# test user data exists
self.blob.exists = Mock(return_value=True)
await delete_users_data(self.bucket, "client_id", "enter_id", "team_id", ["user_id"])
self.blob.exists.assert_called_once()
self.blob.delete.assert_called_once()
self.blob.delete.reset_mock()
# test user data doesn't exist
self.blob.exists = Mock(return_value=False)
await delete_users_data(self.bucket, "client_id", "enter_id", "team_id", ["user_id"])
self.blob.exists.assert_called_once()
self.blob.delete.assert_not_called()
# test multiple user data
await delete_users_data(self.bucket, "client_id", "enter_id", "team_id", ["user_id1", "user_id2"])
mock_user_data_key.assert_has_calls([call(slack_client_id="client_id",
enterprise_id="enter_id",
team_id="team_id",
user_id="user_id1"),
call(slack_client_id="client_id",
enterprise_id="enter_id",
team_id="team_id",
user_id="user_id2")])
class TestEmojiOperator(unittest.IsolatedAsyncioTestCase):
"""Test EmojiOperator class"""
# pylint: disable=protected-access
async def asyncSetUp(self):
"""Setup tests"""
self.client = AsyncMock(AsyncWebClient)
self.client.token = None
self.http_args = {"client": self.client, "http_verb": "POST", "api_url": "some-api", "req_args": {},
"headers": {}, "status_code": 200}
self.app = AsyncMock(AsyncApp)
self.app.client = self.client
self.logger = logging.getLogger()
self.logger.handlers = []
@classmethod
def setUpClass(cls):
"""Setup tests once"""
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
async def test_get_user_reactions(self):
"""Test get_user_reactions method"""
# check no reactions
response = AsyncSlackResponse(**{**self.http_args, **{"data": {"type": "message", "message": {}}} })
self.client.reactions_get.return_value = response
emojis = await EmojiOperator.get_user_reactions(client=self.client,
channel_id="channel_id",
message_ts="message_ts",
user_id="user_id")
self.assertEqual(emojis, [])
# sample response: https://api.slack.com/methods/reactions.get
# check reactions on message
response = AsyncSlackResponse(**{**self.http_args, **{"data": {"type": "message", "message": {
"reactions": [{
"name": "smile",
"users": [ "user_id1", "user_id2" ]
}, {
"name": "wink",
"users": [ "user_id2", "user_id3" ]
}]
}}}})
self.client.reactions_get.return_value = response
emojis = await EmojiOperator.get_user_reactions(client=self.client,
channel_id="channel_id",
message_ts="message_ts",
user_id="user_id2")
self.assertEqual(emojis, ["smile", "wink"])
# check reactions on file
response = AsyncSlackResponse(**{**self.http_args, **{"data": {"type": "file", "file": {
"reactions": [{
"name": "laugh",
"users": [ "user_id1", "user_id2" ]
}]
}}}})
self.client.reactions_get.return_value = response
emojis = await EmojiOperator.get_user_reactions(client=self.client,
channel_id="channel_id",
message_ts="message_ts",
user_id="user_id1")
self.assertEqual(emojis, ["laugh"])
# check reactions on file_comment
response = AsyncSlackResponse(**{**self.http_args, **{"data": {"type": "file_comment", "comment": {
"reactions": [{
"name": "heart",
"users": [ "user_id1", "user_id2" ]
}]
}}}})
self.client.reactions_get.return_value = response
emojis = await EmojiOperator.get_user_reactions(client=self.client,
channel_id="channel_id",
message_ts="message_ts",
user_id="user_id2")
self.assertEqual(emojis, ["heart"])
@patch("aiohttp.ClientSession.get")
async def test_get_reactions_in_team(self, get: AsyncMock):
"""Test get_reactions_in_team method"""
mock_context_manager: AsyncMock = get.return_value.__aenter__.return_value
mock_context_manager.status = 200
mock_context_manager.text.return_value = \
'[{"base":"anguished"}, {"base":"sad_face"}, {"base":"clap"}]'
# sample response: https://api.slack.com/methods/emoji.list
slack_response = AsyncSlackResponse(**{**self.http_args, **{"data": {
"emoji": {
"longcat": "some url",
"doge": "alias",
"partyparrot": "some url",
},
"categories": [ {
"name": "faces",
"emoji_names": ["smile", "wink"]
}, {
"name": "flags",
"emoji_names": ["flag1", "flag2", "flag3"]
}
]
}}})
self.client.emoji_list.return_value = slack_response
# test standard emojis response ok
emojis = await EmojiOperator._get_reactions_in_team(client=self.client, logger=self.logger)
self.client.emoji_list.assert_awaited_once_with(include_categories=True)
# session.get.assert_called_once_with("https://www.emojidex.com/api/v1/utf_emoji")
mock_context_manager.text.assert_awaited_once_with(encoding="utf-8")
self.assertEqual(set(emojis),
set(["longcat", "doge", "partyparrot", "smile", "wink", "flag1", "flag2", "flag3",
"anguished", "sad_face", "clap"]),
msg="Could not parse all emojis")
mock_context_manager.reset_mock()
get.reset_mock()
# test standard emojis response not ok
get.return_value.__aenter__.return_value.status = 500
emojis = await EmojiOperator._get_reactions_in_team(client=self.client, logger=self.logger)
mock_context_manager.text.assert_not_awaited()
self.assertEqual(set(emojis),
set(["longcat", "doge", "partyparrot", "smile", "wink", "flag1", "flag2", "flag3"]),
msg="Should not return standard emojis when invalid http request")
mock_context_manager.reset_mock()
get.reset_mock()
# test standard emojis response exception
get.return_value.__aenter__.side_effect = ClientConnectorError(None, Mock())
emojis = await EmojiOperator._get_reactions_in_team(client=self.client, logger=self.logger)
mock_context_manager.text.assert_not_awaited()
self.assertEqual(set(emojis),
set(["longcat", "doge", "partyparrot", "smile", "wink", "flag1", "flag2", "flag3"]),
msg="Should not return standard emojis when connection error")
@patch("multi_reaction_add.internals.EmojiOperator._get_reactions_in_team")
async def test_update_emoji_list(self, get_reactions: AsyncMock):
"""Test update_emoji_list method"""
get_reactions.return_value = ["some", "emojis"]
emoji_operator = EmojiOperator()
self.client.token = "old token"
# test normal execution
try:
await asyncio.wait_for(
emoji_operator._update_emoji_list(
app=self.app,
token="new token",
logger=self.logger,
sleep=1),
timeout=1.5)
except asyncio.TimeoutError:
pass
get_reactions.assert_awaited_once_with(self.client, self.logger)
self.assertEqual(emoji_operator._all_emojis, ["some", "emojis"])
self.assertEqual(self.client.token, "old token")
# test all_emojis left unchanged on slack api error
get_reactions.side_effect = SlackApiError(None, None)
try:
await asyncio.wait_for(
emoji_operator._update_emoji_list(
app=self.app,
token="new token",
logger=self.logger,
sleep=1),
timeout=1.5)
except asyncio.TimeoutError:
pass
self.assertEqual(emoji_operator._all_emojis, ["some", "emojis"])
self.assertEqual(self.client.token, "old token")
# test all_emojis unset on slack api exception
emoji_operator._all_emojis = None
get_reactions.side_effect = SlackApiError(None, None)
try:
await asyncio.wait_for(
emoji_operator._update_emoji_list(
app=self.app,
token="new token",
logger=self.logger,
sleep=1),
timeout=1.5)
except asyncio.TimeoutError:
pass
self.assertEqual(emoji_operator._all_emojis, None)
self.assertEqual(self.client.token, "old token")
async def test_stop_emoji_thread(self):
"""Test stop_emoji_thread method"""
emoji_operator = EmojiOperator()
async def some_method():
pass
emoji_operator._emoji_task = asyncio.create_task(some_method())
await emoji_operator.stop_emoji_update()
await asyncio.sleep(0.1) # task will be canceled when it will be scheduled in the event loop
self.assertTrue(emoji_operator._emoji_task.done())
@patch("multi_reaction_add.internals.EmojiOperator._get_reactions_in_team")
async def test_get_valid_reactions(self, get_reactions: AsyncMock):
"""Test get_valid_reactions method"""
emoji_operator = EmojiOperator()
emoji_operator._emoji_task = Mock(spec=Task)
emoji_operator._emoji_task.done.return_value = False
emoji_operator._update_emoji_list = AsyncMock()
emoji_operator._all_emojis = ["smile", "wink", "face", "laugh", "some-emoji", "-emj-", "_emj_", "some_emoji",
"+one", "'quote'", "54"]
# check empty input
emojis = await emoji_operator.get_valid_reactions(text="",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, [])
# check no emojis in input
emojis = await emoji_operator.get_valid_reactions(text="some text",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, [])
# check no valid emojis
emojis = await emoji_operator.get_valid_reactions(text="::::",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, [])
# check valid input
emojis = await emoji_operator.get_valid_reactions(text=":smile: :wink:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile", "wink"])
# check emojis special characters
emojis = await emoji_operator.get_valid_reactions(
text=":some-emoji: :-emj-: :_emj_: :some_emoji: :+one: :'quote': :54:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["some-emoji", "-emj-", "_emj_", "some_emoji", "+one", "'quote'", "54"])
# check remove duplicates
emojis = await emoji_operator.get_valid_reactions(text=":smile: :wink: :smile:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile", "wink"])
# check emoji with modifier
emojis = await emoji_operator.get_valid_reactions(text=":face::skin-tone-2:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["face::skin-tone-2"])
# check no space in input
emojis = await emoji_operator.get_valid_reactions(
text=":smile::wink::face::skin-tone-2::face::skin-tone-3::laugh:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile", "wink", "face::skin-tone-2", "face::skin-tone-3", "laugh"])
# check text and emojis
emojis = await emoji_operator.get_valid_reactions(
text="sometext:smile:anothertext:wink:moretext:laugh:endoftext",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile", "wink", "laugh"])
# check invalid emoji
emojis = await emoji_operator.get_valid_reactions(text=":smile: :invalid:",
client=self.client,
app=self.app,
logger=self.logger)
self.assertEqual(emojis, ["smile"])
# check emoji_task is started when finished
get_reactions.return_value = ["joy"]
emoji_operator._emoji_task.done.return_value = True
emojis = await emoji_operator.get_valid_reactions(text=":joy:",
client=self.client,
app=self.app,
logger=self.logger)
get_reactions.assert_awaited_once_with(self.client, self.logger)
self.assertEqual(emojis, ["joy"])
| 20
| 0
| 30
|
2c7b34da71ec90cd5fdbc145e15b19d24623d2c5
| 812
|
py
|
Python
|
api/api.py
|
idrissneumann/imalive
|
a5c4c9f34c9d2e2b24095a6558bcaca022297f26
|
[
"Apache-2.0"
] | null | null | null |
api/api.py
|
idrissneumann/imalive
|
a5c4c9f34c9d2e2b24095a6558bcaca022297f26
|
[
"Apache-2.0"
] | null | null | null |
api/api.py
|
idrissneumann/imalive
|
a5c4c9f34c9d2e2b24095a6558bcaca022297f26
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
from flask_restful import Api
from multiprocessing import Process
from heartbit import heartbit
from api_health import HealthEndPoint
from api_manifest import ManifestEndPoint
from api_metrics import MetricsEndPoint
app = Flask(__name__)
api = Api(app)
async_process = Process(
target=heartbit,
daemon=True
)
async_process.start()
health_check_routes = ['/', '/health', '/health/', '/v1', '/v1/', '/v1/health', '/v1/health/']
manifest_routes = ['/manifest', '/manifest/', '/v1/manifest', '/v1/manifest/']
disk_routes = ['/metrics', '/metrics/', '/v1/metrics', '/v1/metrics/']
api.add_resource(HealthEndPoint, *health_check_routes)
api.add_resource(ManifestEndPoint, *manifest_routes)
api.add_resource(MetricsEndPoint, *disk_routes)
if __name__ == '__main__':
app.run()
| 28
| 94
| 0.742611
|
from flask import Flask
from flask_restful import Api
from multiprocessing import Process
from heartbit import heartbit
from api_health import HealthEndPoint
from api_manifest import ManifestEndPoint
from api_metrics import MetricsEndPoint
app = Flask(__name__)
api = Api(app)
async_process = Process(
target=heartbit,
daemon=True
)
async_process.start()
health_check_routes = ['/', '/health', '/health/', '/v1', '/v1/', '/v1/health', '/v1/health/']
manifest_routes = ['/manifest', '/manifest/', '/v1/manifest', '/v1/manifest/']
disk_routes = ['/metrics', '/metrics/', '/v1/metrics', '/v1/metrics/']
api.add_resource(HealthEndPoint, *health_check_routes)
api.add_resource(ManifestEndPoint, *manifest_routes)
api.add_resource(MetricsEndPoint, *disk_routes)
if __name__ == '__main__':
app.run()
| 0
| 0
| 0
|
a5ca672ff6b58ca59e41f14bb06794c17d777769
| 1,531
|
py
|
Python
|
Field_D_main.py
|
McJones/Text2SATB
|
adfefb05daacf5ecde2be39890dfcdca9f034c15
|
[
"MIT"
] | null | null | null |
Field_D_main.py
|
McJones/Text2SATB
|
adfefb05daacf5ecde2be39890dfcdca9f034c15
|
[
"MIT"
] | 2
|
2018-08-20T09:15:49.000Z
|
2018-08-20T09:18:26.000Z
|
Field_D_main.py
|
McJones/Text2SATB
|
adfefb05daacf5ecde2be39890dfcdca9f034c15
|
[
"MIT"
] | 1
|
2018-12-11T23:53:50.000Z
|
2018-12-11T23:53:50.000Z
|
from Field_D_SupportingClasses import *
ProgramID = "DF Word Score Sonifier v1.0"
WorkTitle = "Untitled Sonification"
Lyricist = ""
Input = DF_TextInput()
WorkTitle = Input.provideTitle()
Lyricist = Input.provideLyricist()
verses = Input.provideVerses()
positions = Input.providePositions()
scores = Input.provideScrabbleScores()
Planner = DF_SongPlanner(verses, positions, scores)
verseKeys = Planner.getVerseKeys()
Planner.getBassPart(Planner.homeKey)
Planner.getTenorPart(Planner.homeKey)
Planner.getAltoPart(Planner.homeKey)
Planner.getSopPart(Planner.homeKey)
X = DF_MusicXML(WorkTitle, ProgramID, Lyricist)
basNotes = Planner.bassNotes
basDurations = Planner.bassRhythms
basLyric = Planner.bassWords
basPos = Planner.bassPositions
basTies = Planner.bassTies
tenNotes = Planner.tenNotes
tenDurations = Planner.tenRhythms
tenLyric = Planner.tenWords
tenPos = Planner.tenPositions
tenTies = Planner.tenTies
altoNotes = Planner.altoNotes
altoDurations = Planner.altoRhythms
altoLyric = Planner.altoWords
altoPos = Planner.altoPositions
altoTies = Planner.altoTies
sopNotes = Planner.sopNotes
sopDurations = Planner.sopRhythms
sopLyric = Planner.sopWords
sopPos = Planner.sopPositions
sopTies = Planner.sopTies
X.writeSop(sopNotes, sopDurations, sopLyric, sopPos, sopTies)
X.writeAlto(altoNotes, altoDurations, altoLyric, altoPos, altoTies)
X.writeTenor(tenNotes, tenDurations, tenLyric, tenPos, tenTies)
X.writeBass(basNotes, basDurations, basLyric, basPos, basTies)
X.endXMLFile()
| 34.022222
| 68
| 0.793599
|
from Field_D_SupportingClasses import *
ProgramID = "DF Word Score Sonifier v1.0"
WorkTitle = "Untitled Sonification"
Lyricist = ""
Input = DF_TextInput()
WorkTitle = Input.provideTitle()
Lyricist = Input.provideLyricist()
verses = Input.provideVerses()
positions = Input.providePositions()
scores = Input.provideScrabbleScores()
Planner = DF_SongPlanner(verses, positions, scores)
verseKeys = Planner.getVerseKeys()
Planner.getBassPart(Planner.homeKey)
Planner.getTenorPart(Planner.homeKey)
Planner.getAltoPart(Planner.homeKey)
Planner.getSopPart(Planner.homeKey)
X = DF_MusicXML(WorkTitle, ProgramID, Lyricist)
basNotes = Planner.bassNotes
basDurations = Planner.bassRhythms
basLyric = Planner.bassWords
basPos = Planner.bassPositions
basTies = Planner.bassTies
tenNotes = Planner.tenNotes
tenDurations = Planner.tenRhythms
tenLyric = Planner.tenWords
tenPos = Planner.tenPositions
tenTies = Planner.tenTies
altoNotes = Planner.altoNotes
altoDurations = Planner.altoRhythms
altoLyric = Planner.altoWords
altoPos = Planner.altoPositions
altoTies = Planner.altoTies
sopNotes = Planner.sopNotes
sopDurations = Planner.sopRhythms
sopLyric = Planner.sopWords
sopPos = Planner.sopPositions
sopTies = Planner.sopTies
X.writeSop(sopNotes, sopDurations, sopLyric, sopPos, sopTies)
X.writeAlto(altoNotes, altoDurations, altoLyric, altoPos, altoTies)
X.writeTenor(tenNotes, tenDurations, tenLyric, tenPos, tenTies)
X.writeBass(basNotes, basDurations, basLyric, basPos, basTies)
X.endXMLFile()
| 0
| 0
| 0
|
6320d6eba1e101f8a2417333f0da0649a54cd36f
| 7,055
|
py
|
Python
|
broker/libs/git.py
|
ebloc/eBlocBroker
|
52d507835a0fe3c930df2e2c816724d26a3484a7
|
[
"MIT"
] | 7
|
2018-02-10T22:57:28.000Z
|
2020-11-20T14:46:18.000Z
|
broker/libs/git.py
|
ebloc/eBlocBroker
|
52d507835a0fe3c930df2e2c816724d26a3484a7
|
[
"MIT"
] | 5
|
2020-10-30T18:43:27.000Z
|
2021-02-04T12:39:30.000Z
|
broker/libs/git.py
|
ebloc/eBlocBroker
|
52d507835a0fe3c930df2e2c816724d26a3484a7
|
[
"MIT"
] | 5
|
2017-07-06T14:14:13.000Z
|
2019-02-22T14:40:16.000Z
|
#!/usr/bin/env python3
import gzip
import io
import os
import time
import git
from broker.config import env, logging
from broker.libs.ipfs import decrypt_using_gpg
from broker.utils import cd, is_gzip_file_empty, log, path_leaf, run
# from subprocess import CalledProcessError
def initialize_check(path):
""".git/ folder should exist within the target folder"""
with cd(path):
if not is_initialized(path):
try:
run(["git", "init", "--initial-branch=master"])
add_all()
except Exception as error:
logging.error(f"E: {error}")
return False
return True
def diff_patch(path, source_code_hash, index, target_path):
"""
* "git diff HEAD" for detecting all the changes:
* Shows all the changes between the working directory and HEAD (which includes changes in the index).
* This shows all the changes since the last commit, whether or not they have been staged for commit
* or not.
"""
sep = "*" # separator in between the string infos
is_file_empty = False
with cd(path):
log(f"==> Navigate to {path}")
"""TODO
if not is_initialized(path):
upload everything, changed files!
"""
repo = git.Repo(".", search_parent_directories=True)
try:
repo.git.config("core.fileMode", "false") # git config core.fileMode false
# first ignore deleted files not to be added into git
run(["bash", f"{env.EBLOCPATH}/broker/bash_scripts/git_ignore_deleted.sh"])
head_commit_id = repo.rev_parse("HEAD")
patch_name = f"patch{sep}{head_commit_id}{sep}{source_code_hash}{sep}{index}.diff"
except:
return False
patch_upload_name = f"{patch_name}.gz" # file to be uploaded as zip
patch_file = f"{target_path}/{patch_upload_name}"
logging.info(f"patch_path={patch_upload_name}")
try:
repo.git.add(A=True)
diff_and_gzip(patch_file)
except:
return False
time.sleep(0.25)
if is_gzip_file_empty(patch_file):
log("==> Created patch file is empty, nothing to upload")
os.remove(patch_file)
is_file_empty = True
return patch_upload_name, patch_file, is_file_empty
def apply_patch(git_folder, patch_file, is_gpg=False):
"""Apply git patch.
https://stackoverflow.com/a/15375869/2402577
"""
if is_gpg:
decrypt_using_gpg(patch_file)
with cd(git_folder):
base_name = path_leaf(patch_file)
log(f"==> {base_name}")
# folder_name = base_name_split[2]
try:
# base_name_split = base_name.split("_")
# git_hash = base_name_split[1]
# run(["git", "checkout", git_hash])
# run(["git", "reset", "--hard"])
# run(["git", "clean", "-f"])
# echo "\n" >> patch_file.txt seems like fixing it
with open(patch_file, "a") as myfile:
myfile.write("\n")
# output = repo.git.apply("--reject", "--whitespace=fix", patch_file)
run(["git", "apply", "--reject", "--whitespace=fix", "--verbose", patch_file])
return True
except:
return False
def generate_git_repo(folders):
"""Create git repositories in the given folders if it does not exist."""
if isinstance(folders, list):
for folder in folders:
_generate_git_repo(folder)
else: # if string given "/home/user/folder" retreive string instead of "/" with for above
_generate_git_repo(folders)
# def extract_gzip():
# pass
| 32.662037
| 105
| 0.591212
|
#!/usr/bin/env python3
import gzip
import io
import os
import time
import git
from broker.config import env, logging
from broker.libs.ipfs import decrypt_using_gpg
from broker.utils import cd, is_gzip_file_empty, log, path_leaf, run
# from subprocess import CalledProcessError
def initialize_check(path):
""".git/ folder should exist within the target folder"""
with cd(path):
if not is_initialized(path):
try:
run(["git", "init", "--initial-branch=master"])
add_all()
except Exception as error:
logging.error(f"E: {error}")
return False
return True
def is_initialized(path) -> bool:
with cd(path):
try:
repo = git.Repo(".", search_parent_directories=True)
working_tree_dir = repo.working_tree_dir
except:
return False
return path == working_tree_dir
def diff_and_gzip(filename):
repo = git.Repo(".", search_parent_directories=True)
with gzip.open(filename, "wb") as output:
# We cannot directly write Python objects like strings!
# We must first convert them into a bytes format using io.BytesIO() and then write it
with io.TextIOWrapper(output, encoding="utf-8") as encode:
encode.write(repo.git.diff("--binary", "HEAD", "--minimal", "--ignore-submodules=dirty"))
def decompress_gzip(filename):
if not is_gzip_file_empty(filename):
with gzip.open(filename, "rb") as ip:
with io.TextIOWrapper(ip, encoding="utf-8") as decoder:
# Let's read the content using read()
content = decoder.read()
print(content)
def diff_patch(path, source_code_hash, index, target_path):
"""
* "git diff HEAD" for detecting all the changes:
* Shows all the changes between the working directory and HEAD (which includes changes in the index).
* This shows all the changes since the last commit, whether or not they have been staged for commit
* or not.
"""
sep = "*" # separator in between the string infos
is_file_empty = False
with cd(path):
log(f"==> Navigate to {path}")
"""TODO
if not is_initialized(path):
upload everything, changed files!
"""
repo = git.Repo(".", search_parent_directories=True)
try:
repo.git.config("core.fileMode", "false") # git config core.fileMode false
# first ignore deleted files not to be added into git
run(["bash", f"{env.EBLOCPATH}/broker/bash_scripts/git_ignore_deleted.sh"])
head_commit_id = repo.rev_parse("HEAD")
patch_name = f"patch{sep}{head_commit_id}{sep}{source_code_hash}{sep}{index}.diff"
except:
return False
patch_upload_name = f"{patch_name}.gz" # file to be uploaded as zip
patch_file = f"{target_path}/{patch_upload_name}"
logging.info(f"patch_path={patch_upload_name}")
try:
repo.git.add(A=True)
diff_and_gzip(patch_file)
except:
return False
time.sleep(0.25)
if is_gzip_file_empty(patch_file):
log("==> Created patch file is empty, nothing to upload")
os.remove(patch_file)
is_file_empty = True
return patch_upload_name, patch_file, is_file_empty
def add_all(repo=None):
if not repo:
repo = git.Repo(".", search_parent_directories=True)
try:
# subprocess.run(["chmod", "-R", "755", "."])
# subprocess.run(["chmod", "-R", "775", ".git"]) # https://stackoverflow.com/a/28159309/2402577
# required for files to be access on the cluster side due to permission issues
run(["sudo", "chmod", "-R", "775", "."]) # changes folder's hash
except:
pass
try:
repo.git.add(A=True) # git add -A .
try:
changed_file_len = len(repo.index.diff("HEAD")) # git diff HEAD --name-only | wc -l
except:
# if it is the first commit HEAD might not exist
changed_file_len = len(repo.git.diff("--cached", "--name-only").split("\n"))
if changed_file_len > 0:
repo.git.commit("-m", "update") # git commit -m update
return True
except:
return False
def commit_changes(path) -> bool:
with cd(path):
repo = git.Repo(".", search_parent_directories=True)
try:
output = run(["ls", "-l", ".git/refs/heads"])
except Exception as e:
raise Exception("E: Problem on git.commit_changes()") from e
if output == "total 0":
logging.warning("There is no first commit")
else:
changed_files = [item.a_path for item in repo.index.diff(None)]
if len(changed_files) > 0:
logging.info(f"Adding changed files:\{changed_files}")
repo.git.add(A=True)
if len(repo.index.diff("HEAD")) == 0:
log(f"==> {path} is committed with the given changes using git")
return True
try:
add_all(repo)
except Exception as e:
logging.error(f"E: {e}")
return False
return True
def apply_patch(git_folder, patch_file, is_gpg=False):
"""Apply git patch.
https://stackoverflow.com/a/15375869/2402577
"""
if is_gpg:
decrypt_using_gpg(patch_file)
with cd(git_folder):
base_name = path_leaf(patch_file)
log(f"==> {base_name}")
# folder_name = base_name_split[2]
try:
# base_name_split = base_name.split("_")
# git_hash = base_name_split[1]
# run(["git", "checkout", git_hash])
# run(["git", "reset", "--hard"])
# run(["git", "clean", "-f"])
# echo "\n" >> patch_file.txt seems like fixing it
with open(patch_file, "a") as myfile:
myfile.write("\n")
# output = repo.git.apply("--reject", "--whitespace=fix", patch_file)
run(["git", "apply", "--reject", "--whitespace=fix", "--verbose", patch_file])
return True
except:
return False
def is_repo(folders):
for folder in folders:
with cd(folder):
if not is_initialized(folder):
logging.warning(f".git does not exits in {folder}. Applying: `git init`")
run(["git", "init", "--initial-branch=master"])
def _generate_git_repo(folder):
log(folder, "green")
try:
initialize_check(folder)
commit_changes(folder)
except Exception as e:
raise e
def generate_git_repo(folders):
"""Create git repositories in the given folders if it does not exist."""
if isinstance(folders, list):
for folder in folders:
_generate_git_repo(folder)
else: # if string given "/home/user/folder" retreive string instead of "/" with for above
_generate_git_repo(folders)
# def extract_gzip():
# pass
| 3,198
| 0
| 161
|
fa106b624dcaed91c2eb981434a403060cd1c1fc
| 1,763
|
py
|
Python
|
cognite/v05/tagmatching.py
|
boyeah/cognite-sdk-python
|
39abf5c98d758c59609cb33f5f3e2c009712005d
|
[
"Apache-2.0"
] | null | null | null |
cognite/v05/tagmatching.py
|
boyeah/cognite-sdk-python
|
39abf5c98d758c59609cb33f5f3e2c009712005d
|
[
"Apache-2.0"
] | null | null | null |
cognite/v05/tagmatching.py
|
boyeah/cognite-sdk-python
|
39abf5c98d758c59609cb33f5f3e2c009712005d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tag Matching Module
This module mirrors the Tag Matching API. It allows the user to search for tag id matches.
https://doc.cognitedata.com/0.5/#Cognite-API-Tag-Matching
"""
import cognite._utils as _utils
import cognite.config as config
from cognite.v05.dto import TagMatchingResponse
def tag_matching(tag_ids, fuzzy_threshold=0, platform=None, **kwargs):
"""Returns a TagMatchingObject containing a list of matched tags for the given query.
This method takes an arbitrary string as argument and performs fuzzy matching with a user defined threshold
toward tag ids in the system.
Args:
tag_ids (list): The tag_ids to retrieve matches for.
fuzzy_threshold (int): The threshold to use when searching for matches. A fuzzy threshold of 0 means you only
want to accept perfect matches. Must be >= 0.
platform (str): The platform to search on.
Keyword Args:
api_key (str): Your api-key.
project (str): Project name.
Returns:
v05.dto.TagMatchingResponse: A data object containing the requested data with several getter methods with different
output formats.
"""
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
url = config.get_base_url(api_version=0.5) + "/projects/{}/tagmatching".format(project)
body = {"tagIds": tag_ids, "metadata": {"fuzzyThreshold": fuzzy_threshold, "platform": platform}}
headers = {"api-key": api_key, "content-type": "*/*", "accept": "application/json"}
res = _utils.post_request(url=url, body=body, headers=headers, cookies=config.get_cookies())
return TagMatchingResponse(res.json())
| 41.97619
| 123
| 0.688032
|
# -*- coding: utf-8 -*-
"""Tag Matching Module
This module mirrors the Tag Matching API. It allows the user to search for tag id matches.
https://doc.cognitedata.com/0.5/#Cognite-API-Tag-Matching
"""
import cognite._utils as _utils
import cognite.config as config
from cognite.v05.dto import TagMatchingResponse
def tag_matching(tag_ids, fuzzy_threshold=0, platform=None, **kwargs):
"""Returns a TagMatchingObject containing a list of matched tags for the given query.
This method takes an arbitrary string as argument and performs fuzzy matching with a user defined threshold
toward tag ids in the system.
Args:
tag_ids (list): The tag_ids to retrieve matches for.
fuzzy_threshold (int): The threshold to use when searching for matches. A fuzzy threshold of 0 means you only
want to accept perfect matches. Must be >= 0.
platform (str): The platform to search on.
Keyword Args:
api_key (str): Your api-key.
project (str): Project name.
Returns:
v05.dto.TagMatchingResponse: A data object containing the requested data with several getter methods with different
output formats.
"""
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
url = config.get_base_url(api_version=0.5) + "/projects/{}/tagmatching".format(project)
body = {"tagIds": tag_ids, "metadata": {"fuzzyThreshold": fuzzy_threshold, "platform": platform}}
headers = {"api-key": api_key, "content-type": "*/*", "accept": "application/json"}
res = _utils.post_request(url=url, body=body, headers=headers, cookies=config.get_cookies())
return TagMatchingResponse(res.json())
| 0
| 0
| 0
|
96e189f608858537ed90f75689036cf2feaa0c16
| 5,464
|
py
|
Python
|
main.py
|
rmccaffr/IrishRailBot
|
bd346d157a41680d0fb13dd78f280bb8df34aa9a
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
rmccaffr/IrishRailBot
|
bd346d157a41680d0fb13dd78f280bb8df34aa9a
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
rmccaffr/IrishRailBot
|
bd346d157a41680d0fb13dd78f280bb8df34aa9a
|
[
"Apache-2.0"
] | null | null | null |
import StringIO
import json
import logging
import random
import urllib
import urllib2
from xml.dom import minidom
# for sending images
from PIL import Image
import multipart
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
TOKEN = '119152358:AAFvnvYU_5sxfTInk0LNQ55a_U5FMY3pyUo'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
# ================================
# ================================
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
| 32.52381
| 157
| 0.600842
|
import StringIO
import json
import logging
import random
import urllib
import urllib2
from xml.dom import minidom
# for sending images
from PIL import Image
import multipart
# standard app engine imports
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
TOKEN = '119152358:AAFvnvYU_5sxfTInk0LNQ55a_U5FMY3pyUo'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
# ================================
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
# ================================
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
if not text:
logging.info('no text')
return
def reply(msg=None, img=None):
if msg:
resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
'chat_id': str(chat_id),
'text': msg.encode('utf-8'),
'disable_web_page_preview': 'true',
'reply_to_message_id': str(message_id),
})).read()
elif img:
resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
('chat_id', str(chat_id)),
('reply_to_message_id', str(message_id)),
], [
('photo', 'image.jpg', img),
])
else:
logging.error('no msg or img specified')
resp = None
logging.info('send response:')
logging.info(resp)
if text.startswith('/'):
if text == '/start':
reply('Bot enabled')
setEnabled(chat_id, True)
elif text == '/stop':
reply('Bot disabled')
setEnabled(chat_id, False)
elif text == '/image':
img = Image.new('RGB', (512, 512))
base = random.randint(0, 16777216)
pixels = [base+i*j for i in range(512) for j in range(512)] # generate sample image
img.putdata(pixels)
output = StringIO.StringIO()
img.save(output, 'JPEG')
reply(img=output.getvalue())
elif text in ('/Pearse','/pearse') :
input_url='http://api.irishrail.ie/realtime/realtime.asmx/getStationDataByNameXML?StationDesc=Dublin%20Pearse'
response = urllib2.urlopen(input_url)
xmldoc = minidom.parse(response)
root= xmldoc.getElementsByTagName('objStationData')
output=''
for child in root:
Destination=child.getElementsByTagName('Destination')
Duein=child.getElementsByTagName('Duein')
output+=Destination[0].firstChild.data+" due in " + Duein[0].firstChild.data + " mins\n"
if output=='' :
reply('Invalid station or no trains are currently available.')
else:
reply(output)
elif text == '/info':
reply(' Type / followed by your station name for live departure times.\nResults:"Destination" due in "X" mins.\nCreated by Robert McCaffrey')
else:
station=text[1:]
input_url='http://api.irishrail.ie/realtime/realtime.asmx/getStationDataByNameXML?StationDesc='
input_url=input_url+station
response = urllib2.urlopen(input_url)
xmldoc = minidom.parse(response)
root= xmldoc.getElementsByTagName('objStationData')
output=''
for child in root:
Destination=child.getElementsByTagName('Destination')
Duein=child.getElementsByTagName('Duein')
output+=Destination[0].firstChild.data+" due in " + Duein[0].firstChild.data + " mins\n"
if output=='' :
reply('Invalid station or no trains are currently available.')
else:
reply(output)
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
| 4,265
| 199
| 265
|
579e371e955563dc8f4d6412367c0fd0a22213bb
| 167
|
py
|
Python
|
Exercicios Python/exlista01.py
|
oswaldo-spadari/Python-Exec
|
3c3a237ed7c30af43f23a3619f6c6b92f6fcb12e
|
[
"MIT"
] | null | null | null |
Exercicios Python/exlista01.py
|
oswaldo-spadari/Python-Exec
|
3c3a237ed7c30af43f23a3619f6c6b92f6fcb12e
|
[
"MIT"
] | null | null | null |
Exercicios Python/exlista01.py
|
oswaldo-spadari/Python-Exec
|
3c3a237ed7c30af43f23a3619f6c6b92f6fcb12e
|
[
"MIT"
] | null | null | null |
#Faça um Programa que leia um vetor de 5 números inteiros e mostre-os.
lista=[]
for i in range(1, 6):
lista.append(int(input('Digite um número: ')))
print(lista)
| 23.857143
| 70
| 0.694611
|
#Faça um Programa que leia um vetor de 5 números inteiros e mostre-os.
lista=[]
for i in range(1, 6):
lista.append(int(input('Digite um número: ')))
print(lista)
| 0
| 0
| 0
|
49ee0242e9e0863870fce27ed6fe2b52fdc6ebac
| 1,662
|
py
|
Python
|
corehq/apps/app_manager/views/cli.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/app_manager/views/cli.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
corehq/apps/app_manager/views/cli.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
from django.utils.text import slugify
from soil import DownloadBase
from corehq.apps.hqmedia.tasks import build_application_zip
from corehq.util.view_utils import absolute_reverse, json_error
from corehq.apps.domain.models import Domain
from dimagi.utils.web import json_response
from corehq.apps.domain.decorators import (
login_or_digest_or_basic,
)
from corehq.apps.app_manager.dbaccessors import get_app
@json_error
@login_or_digest_or_basic()
@json_error
| 33.918367
| 78
| 0.662455
|
from django.utils.text import slugify
from soil import DownloadBase
from corehq.apps.hqmedia.tasks import build_application_zip
from corehq.util.view_utils import absolute_reverse, json_error
from corehq.apps.domain.models import Domain
from dimagi.utils.web import json_response
from corehq.apps.domain.decorators import (
login_or_digest_or_basic,
)
from corehq.apps.app_manager.dbaccessors import get_app
@json_error
@login_or_digest_or_basic()
def list_apps(request, domain):
def app_to_json(app):
return {
'name': app.name,
'version': app.version,
'app_id': app.get_id,
'download_url': absolute_reverse('direct_ccz', args=[domain],
params={'app_id': app.get_id})
}
applications = Domain.get_by_name(domain).applications()
return json_response({
'status': 'success',
'applications': map(app_to_json, applications),
})
@json_error
def direct_ccz(request, domain):
if 'app_id' in request.GET:
app = get_app(domain, request.GET['app_id'])
app.set_media_versions(None)
download = DownloadBase()
build_application_zip(
include_multimedia_files=False,
include_index_files=True,
app=app,
download_id=download.download_id,
compress_zip=True,
filename='{}.ccz'.format(slugify(app.name)),
)
return DownloadBase.get(download.download_id).toHttpResponse()
msg = "You must specify `app_id` in your GET parameters"
return json_response({'status': 'error', 'message': msg}, status_code=400)
| 1,149
| 0
| 44
|
11667cf487b0c5ac83e5590cc9e8485871c9addb
| 6,026
|
py
|
Python
|
ingestion/datasources/coinmarketcap.py
|
JesseCorrington/CryptoHypeTrader
|
33f79251f5327d459818ab6d07f104b89596d3b3
|
[
"MIT"
] | null | null | null |
ingestion/datasources/coinmarketcap.py
|
JesseCorrington/CryptoHypeTrader
|
33f79251f5327d459818ab6d07f104b89596d3b3
|
[
"MIT"
] | null | null | null |
ingestion/datasources/coinmarketcap.py
|
JesseCorrington/CryptoHypeTrader
|
33f79251f5327d459818ab6d07f104b89596d3b3
|
[
"MIT"
] | null | null | null |
import re
import datetime
from ingestion import datasource as ds
# Provides access to coinmarketcap.com data, using the API when available,
# or web scraping when there is no public API
class CoinList(ds.DataSource):
"""Used to get a list of all the coins on coinmarketcap"""
class Ticker(CoinList):
"""Used to get current price/marketcap/volume data for all coins"""
class CoinLinks(ds.DataSource):
"""Used to get social media links for a coin (subreddit, twitter, btctalk)"""
class HistoricalPrices(ds.DataSource):
"""Used to get historical price data for a coin
This requires scraping the site, because there is no API for this data
This is only used for the initial data import, and after that we can just periodically get the ticker
"""
| 33.853933
| 124
| 0.559409
|
import re
import datetime
from ingestion import datasource as ds
# Provides access to coinmarketcap.com data, using the API when available,
# or web scraping when there is no public API
class CoinList(ds.DataSource):
"""Used to get a list of all the coins on coinmarketcap"""
def __init__(self):
# limit defaults to 100, but coinmarketcap doesn't have a max for the limit,
# so just set it super high to make sure we get all the coins
# this may eventually fail if they put a max for limit, so we'll check for that
# error after the request
super().__init__(
"https://api.coinmarketcap.com/v1/ticker",
{"limit": 10000}
)
def parse(self, all_coins):
# Note that cryptocurrency symbols are not guaranteed to be unique so, we
# use the unique id as the index, rather than the symbol
ret = []
for coin in all_coins:
ret.append({
"cmc_id": coin["id"],
"symbol": coin["symbol"],
"name": coin["name"]
})
# make sure limit is working as expected
# 1200 is a sanity check, roughly the number of coins as of 10/2017
if len(ret) < 1200 or len(ret) == self.params["limit"]:
raise Exception("cmc limit not working as expected, this likely means they changed the API to have a limit max")
return ret
class Ticker(CoinList):
"""Used to get current price/marketcap/volume data for all coins"""
def parse(self, all_coins):
ret = []
for coin in all_coins:
# This might not be the exact time cmc updated the ticker, but it's close enough
# and prevents any potential issues with time zone issues screwing up our dates in the db
today = datetime.datetime.utcnow()
def to_float(s):
return float(s) if s else None
ret.append({
"cmc_id": coin["id"],
"date": today,
"price": to_float(coin["price_usd"]),
"price_btc": to_float(coin["price_btc"]),
"volume": to_float(coin["24h_volume_usd"]),
"market_cap": to_float(coin["market_cap_usd"]),
"supply_avail": to_float(coin["available_supply"]),
"supply_total": to_float(coin["total_supply"]),
"supply_max": to_float(coin["max_supply"])
})
return ret
class CoinLinks(ds.DataSource):
"""Used to get social media links for a coin (subreddit, twitter, btctalk)"""
def __init__(self, coin):
url = "https://coinmarketcap.com/currencies/{}".format(coin["cmc_id"])
super().__init__(url, response_format="text")
def parse(self, html):
# We have to scrape for the reddit url, because there is no api to get it
# a simple regex does the trick
links = {}
def find_link(pattern):
match = re.search(pattern, html)
if match is not None:
return match.group(1)
return None
# Find and save all the links we're looking for
subreddit = find_link("reddit\\.com\\/r\\/([^/.]*)\\.")
if subreddit:
links["subreddit"] = subreddit
twitter = find_link('class="twitter-timeline" href="https://twitter.com/([^"]*)')
if twitter:
links["twitter"] = twitter
ann = find_link('href="https:\\/\\/bitcointalk\\.org\\/index\\.php\\?topic=([^"]*)')
if ann:
links["btctalk_ann"] = ann
icon = find_link('src="(https:\\/\\/s2.coinmarketcap.com\\/static\\/img\\/coins\\/[0-9]*x[0-9]*\\/[0-9]*.png)"')
if icon:
links["icon"] = icon
return links
class HistoricalPrices(ds.DataSource):
"""Used to get historical price data for a coin
This requires scraping the site, because there is no API for this data
This is only used for the initial data import, and after that we can just periodically get the ticker
"""
def __init__(self, coin, start=datetime.datetime(2011, 1, 1), end=datetime.datetime.utcnow()):
date_format = "%Y%m%d"
params = {
"start": start.strftime(date_format),
"end": end.strftime(date_format)
}
url = "https://coinmarketcap.com/currencies/{}/historical-data".format(coin["cmc_id"])
super().__init__(url, params, "soup")
def parse(self, soup):
# There's no API to get historical price data, but we can scrape it from a table
# on the /historical-data page
div = soup.find("div", attrs={"class": "table-responsive"})
table = div.find('table', attrs={'class': 'table'})
table_body = table.find('tbody')
rows = table_body.find_all('tr')
historical_data = []
def to_float(text):
if text is None:
return None
text = text.strip()
text = text.replace(",", "")
if text == "-":
# Some of the old volume data is missing on coin market cap
return None
return float(text)
for row in rows:
cols = row.find_all('td')
if len(cols) < 7:
return None
date = cols[0].text.strip()
date = datetime.datetime.strptime(date, "%b %d, %Y")
open = to_float(cols[1].text)
high = to_float(cols[2].text)
low = to_float(cols[3].text)
close = to_float(cols[4].text)
volume = to_float(cols[5].text)
market_cap = to_float(cols[6].text)
daily_ticker = {
"date": date,
"open": open,
"high": high,
"low": low,
"close": close,
"volume": volume,
"market_cap": market_cap
}
historical_data.append(daily_ticker)
return historical_data
| 5,057
| 0
| 189
|
2e11e28c5f1185cc9d68c8d87fd77f68ee0d1717
| 199
|
py
|
Python
|
SCRIPTS/script14.py
|
oasys-kit/ShadowOui-Tutorial
|
50e9416efdd57ffad11cb3c866aa143a9254bd33
|
[
"MIT"
] | 4
|
2018-11-01T14:24:06.000Z
|
2021-02-16T18:25:16.000Z
|
SCRIPTS/script14.py
|
oasys-kit/ShadowOui-Tutorial
|
50e9416efdd57ffad11cb3c866aa143a9254bd33
|
[
"MIT"
] | 1
|
2019-05-30T20:29:30.000Z
|
2019-05-30T20:29:30.000Z
|
SCRIPTS/script14.py
|
oasys-kit/ShadowOui-Tutorial
|
50e9416efdd57ffad11cb3c866aa143a9254bd33
|
[
"MIT"
] | 5
|
2019-06-13T03:42:28.000Z
|
2021-12-04T17:04:32.000Z
|
#create file myaperture.dat needed for source optimization
f = open("myaperture.dat",'w')
f.write(" 50.0 -0.002 0.002 -0.002 0.002")
f.close()
print("File written to disk: myaperture.dat")
| 33.166667
| 58
| 0.678392
|
#create file myaperture.dat needed for source optimization
f = open("myaperture.dat",'w')
f.write(" 50.0 -0.002 0.002 -0.002 0.002")
f.close()
print("File written to disk: myaperture.dat")
| 0
| 0
| 0
|
f94d644e8fe326f1ac3d7e60411ecb5a9f795961
| 2,435
|
py
|
Python
|
sql_app/schemas/schemas_invitation.py
|
l-vincent-l/fastapi-boilerplate
|
d9530e7f1d7fe4d79e11c08e0b86da6e62592f32
|
[
"MIT"
] | 3
|
2021-04-02T14:35:17.000Z
|
2022-03-04T14:40:26.000Z
|
sql_app/schemas/schemas_invitation.py
|
l-vincent-l/fastapi-boilerplate
|
d9530e7f1d7fe4d79e11c08e0b86da6e62592f32
|
[
"MIT"
] | 1
|
2021-09-20T09:23:57.000Z
|
2021-09-20T09:25:40.000Z
|
sql_app/schemas/schemas_invitation.py
|
l-vincent-l/fastapi-boilerplate
|
d9530e7f1d7fe4d79e11c08e0b86da6e62592f32
|
[
"MIT"
] | 1
|
2022-01-21T16:27:14.000Z
|
2022-01-21T16:27:14.000Z
|
print(">>>>>> import schemas_invitation.py > Invitation ...")
from typing import List, Optional, Any
import datetime
from pydantic import BaseModel, EmailStr
# from uuid import UUID
from .schemas_choices import ItemType, InvitationStatus, InviteeType, InvitationStatusAction
from .schemas_auths import AuthsInfosBasics
from .schemas_user import User, UserInDBBaseLight
# print("=== SCH-schemas_invitation > InvitationBase : ", InvitationBase)
# class InvitationList(Invitation):
# pass
| 22.971698
| 92
| 0.765092
|
print(">>>>>> import schemas_invitation.py > Invitation ...")
from typing import List, Optional, Any
import datetime
from pydantic import BaseModel, EmailStr
# from uuid import UUID
from .schemas_choices import ItemType, InvitationStatus, InviteeType, InvitationStatusAction
from .schemas_auths import AuthsInfosBasics
from .schemas_user import User, UserInDBBaseLight
class Invitee(BaseModel):
invitee_type: InviteeType = InviteeType.user
invitee_email: Optional[EmailStr]
invitee_id: Optional[int]
class InvitationBasics(BaseModel):
### basic infos
title: Optional[str] = "My invitation title"
message: Optional[str] = "My invitation message"
### linked data
# invitor_id: int
invitation_to_item_id: int
invitees: Optional[List[Invitee]] = []
# auth levels
auths: Optional[AuthsInfosBasics]
class InvitationToGroup(InvitationBasics):
invitation_to_item_type: ItemType = ItemType.group
class InvitationToWorkspace(InvitationBasics):
invitation_to_item_type: ItemType = ItemType.workspace
class InvitationToDataset(InvitationBasics):
invitation_to_item_type: ItemType = ItemType.dataset
class InvitationToTablemeta(InvitationBasics):
invitation_to_item_type: ItemType = ItemType.table
class InvitationResponse(BaseModel):
### basic infos
invitation_id: int
action: InvitationStatusAction
class InvitationBase(BaseModel):
### basic infos
title: str = "My invitation"
# message_title: Optional[str] = "My invitation title"
message: Optional[str] = "My invitation message"
### linked data
invitation_status: InvitationStatus = InvitationStatus.pending
invitation_to_item_type: ItemType = ItemType.workspace
invitation_to_item_id: int
invitee: EmailStr
invitee_type: Optional[str]
invitee_id: Optional[int]
# auth levels
auths: Optional[AuthsInfosBasics]
# print("=== SCH-schemas_invitation > InvitationBase : ", InvitationBase)
class InvitationCreate(InvitationBase):
pass
class InvitationUpdate(InvitationBase):
pass
class Invitation(InvitationBase):
### meta
item_type: str = "invitation"
id: int
created_date: Optional[datetime.datetime]
is_active: bool = True
### owner
owner_id: int
owner: UserInDBBaseLight
### invitation item
# invitation_item = Any
class Config:
orm_mode = True
# class InvitationList(Invitation):
# pass
class InvitationsList(BaseModel):
# pass
__root__: List[Invitation] = []
| 0
| 1,656
| 276
|
b468c6206281ac95b1aee98e564195c11c66b966
| 958
|
py
|
Python
|
tests/test_vessel_class_filter.py
|
lkattis-signal/SignalSDK
|
f085b9cae0495f4e016b9982df271efc6fd0a8f5
|
[
"Apache-2.0"
] | 10
|
2020-09-29T06:36:45.000Z
|
2022-03-14T18:15:50.000Z
|
tests/test_vessel_class_filter.py
|
lkattis-signal/SignalSDK
|
f085b9cae0495f4e016b9982df271efc6fd0a8f5
|
[
"Apache-2.0"
] | 53
|
2020-10-08T10:05:00.000Z
|
2022-03-29T14:21:18.000Z
|
tests/test_vessel_class_filter.py
|
lkattis-signal/SignalSDK
|
f085b9cae0495f4e016b9982df271efc6fd0a8f5
|
[
"Apache-2.0"
] | 5
|
2020-09-25T07:48:04.000Z
|
2021-11-23T07:08:56.000Z
|
import pytest
from signal_ocean import VesselClassFilter
from .builders import create_vessel_class
@pytest.mark.parametrize(
'name_like',
[
'matching name', 'matching', 'name', 'mat', 'me', 'ing na',
'MATCHING NAME', 'MATCHING', 'NAME', 'MAT', 'ME', 'ING NA',
'mAtchiNG NamE', 'Matching', 'nAME', 'MaT', 'mE', 'INg nA',
' '
]
)
| 29.030303
| 67
| 0.696242
|
import pytest
from signal_ocean import VesselClassFilter
from .builders import create_vessel_class
def test_does_not_filter_anything_by_default():
vessel_class_filter = VesselClassFilter()
unfiltered = [create_vessel_class(1), create_vessel_class(2)]
filtered = vessel_class_filter._apply(unfiltered)
assert list(filtered) == unfiltered
@pytest.mark.parametrize(
'name_like',
[
'matching name', 'matching', 'name', 'mat', 'me', 'ing na',
'MATCHING NAME', 'MATCHING', 'NAME', 'MAT', 'ME', 'ING NA',
'mAtchiNG NamE', 'Matching', 'nAME', 'MaT', 'mE', 'INg nA',
' '
]
)
def test_filters_vessel_classes_by_name(name_like: str):
vessel_class_filter = VesselClassFilter(name_like=name_like)
unmatched = create_vessel_class(1, 'x')
matched = create_vessel_class(3, 'matching name')
filtered = vessel_class_filter._apply([unmatched, matched])
assert list(filtered) == [matched]
| 537
| 0
| 45
|
1c456142bbc95af7e87173cb0cb84afd5f28b013
| 929
|
py
|
Python
|
interprete/src/models/gpt/example.py
|
serjtroshin/PLBART
|
58e5de3041a2fc8b98e54648c6489fb3c23db9cb
|
[
"MIT"
] | null | null | null |
interprete/src/models/gpt/example.py
|
serjtroshin/PLBART
|
58e5de3041a2fc8b98e54648c6489fb3c23db9cb
|
[
"MIT"
] | null | null | null |
interprete/src/models/gpt/example.py
|
serjtroshin/PLBART
|
58e5de3041a2fc8b98e54648c6489fb3c23db9cb
|
[
"MIT"
] | null | null | null |
# from transformers import pipeline
# generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B')
# generator("EleutherAI has", do_sample=True, min_length=50)
# [{'generated_text': 'EleutherAI has made a commitment to create new software packages for each of its major clients and has'}]
from transformers import GPT2Tokenizer, GPT2Model
model_name = "microsoft/CodeGPT-small-java-adaptedGPT2"
# model_name = "./CodeGPT-small-java-adaptedGPT2"
tokenizer = GPT2Tokenizer.from_pretrained(model_name) # CodeGPT-small-java-adaptedGPT2
model = GPT2Model.from_pretrained(model_name)
# tokenizer.save_pretrained(f"./{model_name}")
# model.save_pretrained(f"./{model_name}")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
print(model)
output = model(**encoded_input, output_hidden_states=True)
print(len(output["hidden_states"]))
print(output["hidden_states"][0].shape)
| 42.227273
| 128
| 0.779333
|
# from transformers import pipeline
# generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B')
# generator("EleutherAI has", do_sample=True, min_length=50)
# [{'generated_text': 'EleutherAI has made a commitment to create new software packages for each of its major clients and has'}]
from transformers import GPT2Tokenizer, GPT2Model
model_name = "microsoft/CodeGPT-small-java-adaptedGPT2"
# model_name = "./CodeGPT-small-java-adaptedGPT2"
tokenizer = GPT2Tokenizer.from_pretrained(model_name) # CodeGPT-small-java-adaptedGPT2
model = GPT2Model.from_pretrained(model_name)
# tokenizer.save_pretrained(f"./{model_name}")
# model.save_pretrained(f"./{model_name}")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
print(model)
output = model(**encoded_input, output_hidden_states=True)
print(len(output["hidden_states"]))
print(output["hidden_states"][0].shape)
| 0
| 0
| 0
|
f06f432978e16d74cb920eb585b1eabcd866ddb6
| 946
|
py
|
Python
|
sheet.py
|
albertuscrs/atlink
|
5ee1482871d5337214fa37ffc168766caaf01dba
|
[
"MIT"
] | null | null | null |
sheet.py
|
albertuscrs/atlink
|
5ee1482871d5337214fa37ffc168766caaf01dba
|
[
"MIT"
] | null | null | null |
sheet.py
|
albertuscrs/atlink
|
5ee1482871d5337214fa37ffc168766caaf01dba
|
[
"MIT"
] | null | null | null |
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from datetime import datetime
from pprint import pprint
import pytz
import locale
import sys
import process
sys.path.insert(0,'./process.py')
#set locale
locale.setlocale(locale.LC_TIME, 'id_ID.UTF-8')
#Set up credentials
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name("creds.json", scope)
client = gspread.authorize(creds)
#Open gsheet
sa = client.open("Copy of Absensi CBP 2022")
now = datetime.now()
localtz = pytz.timezone('Asia/Jakarta')
date_jkt = int(localtz.localize(now).strftime("%d"))
month_jkt = localtz.localize(now).strftime("%B")
wks = sa.worksheet(month_jkt)
#Get all data
values=wks.get_all_values()
absen=values[2:]
hasil = process.yang_masuk(absen)
| 30.516129
| 181
| 0.77907
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from datetime import datetime
from pprint import pprint
import pytz
import locale
import sys
import process
sys.path.insert(0,'./process.py')
#set locale
locale.setlocale(locale.LC_TIME, 'id_ID.UTF-8')
#Set up credentials
scope = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"]
creds = ServiceAccountCredentials.from_json_keyfile_name("creds.json", scope)
client = gspread.authorize(creds)
#Open gsheet
sa = client.open("Copy of Absensi CBP 2022")
now = datetime.now()
localtz = pytz.timezone('Asia/Jakarta')
date_jkt = int(localtz.localize(now).strftime("%d"))
month_jkt = localtz.localize(now).strftime("%B")
wks = sa.worksheet(month_jkt)
#Get all data
values=wks.get_all_values()
absen=values[2:]
hasil = process.yang_masuk(absen)
| 0
| 0
| 0
|
a255e4ef851b5ce35cf88229286c05b98240f3b3
| 4,433
|
py
|
Python
|
tests/halfvec_cudatest.py
|
fthaler/dace
|
ba2b703f142c6b6d37c7ca3f20c268bc50c6c7a8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/halfvec_cudatest.py
|
fthaler/dace
|
ba2b703f142c6b6d37c7ca3f20c268bc50c6c7a8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/halfvec_cudatest.py
|
fthaler/dace
|
ba2b703f142c6b6d37c7ca3f20c268bc50c6c7a8
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests for half-precision syntax quirks. """
import dace
import math
import numpy as np
from dace.transformation.dataflow import MapFusion, Vectorization
from dace.transformation.optimizer import Optimizer
N = dace.symbol('N')
def _test_half(veclen):
""" Tests a set of elementwise operations on a vector half type. """
@dace.program
A = np.random.rand(24).astype(np.float16)
B = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_strict_transformations()
sdfg.apply_gpu_transformations()
# Apply vectorization on each map and count applied
applied = 0
for xform in Optimizer(sdfg).get_pattern_matches(patterns=[Vectorization]):
xform.vector_len = veclen
xform.postamble = False
xform.apply(sdfg)
applied += 1
assert applied == 2
out = sdfg(A=A, B=B, N=24)
assert np.allclose(out, A * B + A)
def test_half4():
""" Tests a set of elementwise operations on half with vector length 4. """
_test_half(4)
def test_half8():
""" Tests a set of elementwise operations on half with vector length 8. """
_test_half(8)
def test_exp_vec():
""" Tests an exp operator on a vector half type. """
@dace.program
A = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1
out = sdfg(A=A, N=24)
assert np.allclose(out, np.exp(A))
def test_relu_vec():
""" Tests a ReLU operator on a vector half type. """
@dace.program
A = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1
out = sdfg(A=A, N=24)
assert np.allclose(out, np.maximum(A, 0))
def test_dropout_vec():
""" Tests a dropout operator on a vector half type. """
@dace.program
A = np.random.rand(24).astype(np.float16)
mask = np.random.randint(0, 2, size=[24]).astype(np.float16)
sdfg: dace.SDFG = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1
out = sdfg(A=A, mask=mask, N=24)
assert np.allclose(out, A * mask)
def test_gelu_vec():
""" Tests a GELU operator on a vector half type. """
s2pi = math.sqrt(2.0 / math.pi)
@dace.program
A = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=4)) == 1
out = sdfg(A=A, N=24)
expected = 0.5 * A * (
1 + np.tanh(math.sqrt(2.0 / math.pi) * (A + 0.044715 * (A**3))))
assert np.allclose(out, expected, rtol=1e-2, atol=1e-4)
if __name__ == '__main__':
# Prerequisite for test: CUDA compute capability >= 6.0
dace.Config.set('compiler', 'cuda', 'cuda_arch', value='60')
test_half4()
test_half8()
test_exp_vec()
test_relu_vec()
test_dropout_vec()
test_gelu_vec()
| 30.572414
| 80
| 0.600947
|
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests for half-precision syntax quirks. """
import dace
import math
import numpy as np
from dace.transformation.dataflow import MapFusion, Vectorization
from dace.transformation.optimizer import Optimizer
N = dace.symbol('N')
def _test_half(veclen):
""" Tests a set of elementwise operations on a vector half type. """
@dace.program
def halftest(A: dace.float16[N], B: dace.float16[N]):
return A * B + A
A = np.random.rand(24).astype(np.float16)
B = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_strict_transformations()
sdfg.apply_gpu_transformations()
# Apply vectorization on each map and count applied
applied = 0
for xform in Optimizer(sdfg).get_pattern_matches(patterns=[Vectorization]):
xform.vector_len = veclen
xform.postamble = False
xform.apply(sdfg)
applied += 1
assert applied == 2
out = sdfg(A=A, B=B, N=24)
assert np.allclose(out, A * B + A)
def test_half4():
""" Tests a set of elementwise operations on half with vector length 4. """
_test_half(4)
def test_half8():
""" Tests a set of elementwise operations on half with vector length 8. """
_test_half(8)
def test_exp_vec():
""" Tests an exp operator on a vector half type. """
@dace.program
def halftest(A: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
a << A[i]
o >> out[i]
o = math.exp(a)
return out
A = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1
out = sdfg(A=A, N=24)
assert np.allclose(out, np.exp(A))
def test_relu_vec():
""" Tests a ReLU operator on a vector half type. """
@dace.program
def halftest(A: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
a << A[i]
o >> out[i]
o = max(a, dace.float16(0))
return out
A = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1
out = sdfg(A=A, N=24)
assert np.allclose(out, np.maximum(A, 0))
def test_dropout_vec():
""" Tests a dropout operator on a vector half type. """
@dace.program
def halftest(A: dace.float16[N], mask: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
a << A[i]
d << mask[i]
o >> out[i]
o = a * d
return out
A = np.random.rand(24).astype(np.float16)
mask = np.random.randint(0, 2, size=[24]).astype(np.float16)
sdfg: dace.SDFG = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=8)) == 1
out = sdfg(A=A, mask=mask, N=24)
assert np.allclose(out, A * mask)
def test_gelu_vec():
""" Tests a GELU operator on a vector half type. """
s2pi = math.sqrt(2.0 / math.pi)
@dace.program
def halftest(A: dace.float16[N]):
out = np.ndarray([N], dace.float16)
for i in dace.map[0:N]:
with dace.tasklet:
a << A[i]
o >> out[i]
o = dace.float16(0.5) * a * (dace.float16(1) + math.tanh(
dace.float16(s2pi) * (a + dace.float16(0.044715) * (a**3))))
return out
A = np.random.rand(24).astype(np.float16)
sdfg = halftest.to_sdfg()
sdfg.apply_gpu_transformations()
assert sdfg.apply_transformations(Vectorization, dict(vector_len=4)) == 1
out = sdfg(A=A, N=24)
expected = 0.5 * A * (
1 + np.tanh(math.sqrt(2.0 / math.pi) * (A + 0.044715 * (A**3))))
assert np.allclose(out, expected, rtol=1e-2, atol=1e-4)
if __name__ == '__main__':
# Prerequisite for test: CUDA compute capability >= 6.0
dace.Config.set('compiler', 'cuda', 'cuda_arch', value='60')
test_half4()
test_half8()
test_exp_vec()
test_relu_vec()
test_dropout_vec()
test_gelu_vec()
| 1,134
| 0
| 130
|