blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd4c7e6854eef7d83740ba16570aec3b7664d4ef | 0606f6af48d8e20c5231f3205d692f6a3943807f | /plot_hist.py | dd3b3b0b569537b8560a0ec250e48b02964f8d9f | [] | no_license | pahbs/HRSI | 9c95833ff89c2d4a72f8cf063781425892875fff | f83b790a04ad448589576f54bc5251781962831d | refs/heads/master | 2021-12-11T10:11:26.684339 | 2021-11-29T23:17:53 | 2021-11-29T23:17:53 | 75,756,652 | 4 | 1 | null | 2017-02-23T20:21:00 | 2016-12-06T17:51:32 | Python | UTF-8 | Python | false | false | 2,325 | py | #!/usr/bin/env python
#
# Utility to plot a histogram of a raster
import sys
import os
import argparse
import numpy as np
from pygeotools.lib import iolib
from pygeotools.lib import malib
from pygeotools.lib import geolib
from pygeotools.lib import filtlib
from pygeotools.lib import warplib
from dem_control import sample_ma
import matplotlib
##https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable
matplotlib.use('Agg')
import matplotlib.pyplot, matplotlib.mlab, math
import scipy.stats
def getparser():
parser = argparse.ArgumentParser(description="Utility to get histogram from a raster")
parser.add_argument('ras_fn', type=str, help='Raster filename')
parser.add_argument('-min_val', type=float, default=None, help='Min value that will be included')
parser.add_argument('-max_val', type=float, default=None, help='Max value that will be included')
parser.add_argument('-sample_step', type=int, default=50, help='Sampling step value')
parser.add_argument('-axis_lab_x', type=str, default="X", help='X-axis label')
return parser
def main():
parser = getparser()
args = parser.parse_args()
ras_fn = args.ras_fn
min_val = args.min_val
max_val = args.max_val
sample_step = args.sample_step
# Get ma
ma = iolib.fn_getma(ras_fn)
# Sample ma
if min_val is not None:
ma = np.ma.masked_less(ma, min_val)
if max_val is not None:
ma = np.ma.masked_greater(ma, max_val)
ma = sample_ma(ma, sample_step)
if ma is None:
print "No histogram. Array is None."
fig_name = ""
else:
sample_step_str = "%03d" % (sample_step)
histo = matplotlib.pyplot.hist(ma.compressed(), 300, normed=True, color='gray', alpha = 0.5)
matplotlib.pyplot.xticks(np.arange(min_val, max_val, 1.0))
matplotlib.pyplot.xlabel(args.axis_lab_x, fontsize=12)
#Write histogram
fig_name = ras_fn.split('/')[-1].strip('.tif') + '_hist.png'
matplotlib.pyplot.savefig(os.path.join(os.path.dirname(ras_fn),fig_name))
matplotlib.pyplot.clf()
print "Saved histogram fig:"
print os.path.join(os.path.dirname(ras_fn),fig_name)
return fig_name
if __name__ == "__main__":
main() | [
"paul.montesano@gmail.com"
] | paul.montesano@gmail.com |
08b41d7233c27bcbd3ff46bae419f401ad35f9e4 | a67263ccde6de5d18de409a8924b64c38c8a71c1 | /djangosample/main/middlewares.py | 99578eeab540c0ba43bedc6888aeeac7c53cecb0 | [] | no_license | annshress/Demo | 8613437e7d626f7df4786e4d8b3297edb5e75286 | a73d064d9092d2764e873a208f61ceec1e93f62a | refs/heads/master | 2022-12-07T08:33:09.621364 | 2020-07-02T09:49:26 | 2020-07-02T09:49:26 | 239,692,452 | 0 | 0 | null | 2022-11-22T03:17:32 | 2020-02-11T06:28:28 | JavaScript | UTF-8 | Python | false | false | 3,316 | py | import jwt
from channels.auth import AuthMiddlewareStack
from django.contrib.auth import get_user_model
from django.contrib.auth.middleware import get_user
from django.contrib.auth.models import AnonymousUser
from django.db import close_old_connections
from django.utils.functional import SimpleLazyObject
from rest_framework import exceptions
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.request import Request
from rest_framework_jwt.settings import api_settings
jwt_decode_handler = api_settings.JWT_DECODE_HANDLER
jwt_get_username_from_payload = api_settings.JWT_PAYLOAD_GET_USERNAME_HANDLER
def get_user_jwt(request):
user = get_user(request)
if user.is_authenticated:
return user
try:
user_jwt = JSONWebTokenAuthentication().authenticate(Request(request))
if user_jwt is not None:
return user_jwt[0]
except:
pass
return user
class JWTTokenAuthenticationMiddleware:
"""
Middleware for setting user from jwt in the `request` object.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
assert hasattr(request, 'session'), "The Django authentication middleware requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
request.user = SimpleLazyObject(lambda: get_user_jwt(request))
response = self.get_response(request)
return response
class JWTTokenAuthMiddleware:
"""
I'mlazyasF
inspiration: https://gist.github.com/rluts/22e05ed8f53f97bdd02eafdf38f3d60a
JWT Token authorization middleware for Django Channels 2
"""
def __init__(self, inner):
self.inner = inner
def __call__(self, scope):
headers = dict(scope['headers'])
if b'sec-websocket-protocol' in headers:
jwt_value = headers[b'sec-websocket-protocol'].decode()
try:
payload = jwt_decode_handler(jwt_value)
user = self.authenticate_credentials(payload)
close_old_connections()
scope['user'] = user
except exceptions.AuthenticationFailed:
scope['user'] = AnonymousUser()
except (jwt.ExpiredSignature, jwt.DecodeError, jwt.InvalidTokenError):
scope['user'] = AnonymousUser()
return self.inner(scope)
def authenticate_credentials(self, payload):
"""
Returns an active user that matches the payload's user id and email.
"""
user_model = get_user_model()
username = jwt_get_username_from_payload(payload)
if not username:
msg = "User not found."
raise exceptions.AuthenticationFailed(msg)
try:
user = user_model.objects.get_by_natural_key(username)
except user_model.DoesNotExist:
msg = "Invalid signature."
raise exceptions.AuthenticationFailed(msg)
if not user.is_active:
msg = "User account is disabled."
raise exceptions.AuthenticationFailed(msg)
return user
JWTTokenAuthMiddlewareStack = lambda inner: JWTTokenAuthMiddleware(AuthMiddlewareStack(inner))
| [
"ann.shress@gmail.com"
] | ann.shress@gmail.com |
927ca15778a1bf9acbf23fb2734e6a222231aeb2 | 7404a76d7c9820c77db6f73e6b37f81f6d7e40e4 | /preprocess_dreambox_ts_file.py | 23d9df49d12f5c69a4e6112aceb8d7374b09e998 | [] | no_license | svenXY/video_stuff | 2638eeb7d97890ec701b7c47cfdc8d7fb4931765 | 711d1f9a150b288c8cb5b5dfa6b2bf4c7a6ef9a7 | refs/heads/master | 2021-01-13T01:36:17.323282 | 2013-09-27T14:08:35 | 2013-09-27T14:08:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
# Name : preprocess_dreambox_ts_file.py
# Description : rename a ts and ts.meta file according to it's metadata
and place them in a properly named subdirectory for
further processing
# Author : Sven Hergenhahn
'''
import os
import sys
import argparse
class ExistsError(Exception):
def __init__(self, file):
self.msg = "File exists already: %s" % file
def __str__(self):
return self.msg
pass
def get_movie_name(meta_file):
try:
with open(meta_file) as meta:
(name, description) = meta.readlines()[1:3]
except IOError, e:
print "Problem with meta file: %s" % e
sys.exit(1)
if name != description:
name = name.rstrip() + '-' + description.rstrip()
else:
name = name.rstrip()
name = name.replace(' ', '_')
name = name.replace('/', '-')
print "Movie name taken from meta file: %s" % name
return name
def restructure(tsfile, meta_file, movie_name, force=False):
try:
os.mkdir(movie_name)
except OSError, e:
if e.errno == 17:
print "Directory %s exists, continuing anyway" % movie_name
pass
try:
for ext in ['.ts', '.ts.meta' ]:
file = os.path.join(movie_name, movie_name + ext)
if os.path.exists(file):
raise ExistsError(file)
except ExistsError, e:
if force:
print "--force is set, overwriting files"
else:
print e
return
os.rename(tsfile, os.path.join(movie_name, movie_name + '.ts'))
os.rename(meta_file, os.path.join(movie_name, movie_name + '.ts.meta'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='%(prog)s [options] <movie.ts> [<movie2.ts>, ...]')
parser.add_argument("--force", '-f', help="Overwrite files in destination directory", action="store_true")
parser.add_argument("tsfile", help='one or more TS files', nargs=argparse.REMAINDER)
args = parser.parse_args()
if not args.tsfile:
parser.error('Missing TS file(s)')
for tsfile in args.tsfile:
if not tsfile.endswith('.ts'):
parser.print_usage()
print 'File %s is not a TS file. Skipping.' % tsfile
continue
print "## Processing %s" % tsfile
meta_file = tsfile + '.meta'
movie_name = get_movie_name(meta_file)
restructure(tsfile, meta_file, movie_name, force=args.force)
| [
"sven@hergenhahn-web.de"
] | sven@hergenhahn-web.de |
023f8af1473e4b26b26918a3f4baac47e3c12473 | 7de9d5c8ca5bd65cc5322a96537c88d0389bc5c9 | /apps/main_app/views.py | 0219dfc2c8e6166c15a9fe498bc9f55a6f0b9c8d | [] | no_license | SeattleAmy/deploy | f3cb6f89bc4e0cdfe234d8b80acdef109d90072f | bf549dffb468913c6daba896acefb69dad03b835 | refs/heads/master | 2021-01-12T08:12:05.262309 | 2016-12-15T00:13:45 | 2016-12-15T00:13:45 | 76,500,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | from django.shortcuts import render, redirect
from .models import Email
from django.contrib import messages
# Create your views here.
def index(request):
return render(request, 'main_app/index.html')
def create(request):
if request.method =="POST":
result= Email.objects.register(request.POST['email'])
if result[0]==False:
messages.error(request,result[1])
return redirect('/')
else:
return redirect('/success')
def success(request):
context = {
"emails" : Email.objects.all(),
}
return render (request, 'main_app/success.html', context)
def destroy(request, id):
Email.objects.get(id=id).delete()
return redirect('/')
| [
"Amy@Amys-Air.hsd1.wa.comcast.net"
] | Amy@Amys-Air.hsd1.wa.comcast.net |
7d083c582e9faa973f538fd894def8a7c80648f6 | 29754007ad2fe1d8b5f029bc5fa1f9462170bb1a | /Day1to10/Day10/dayten.py | b240bb54fdef107bba341677f2d42b52ddf161c4 | [] | no_license | ivymorenomt/100daysofPython | 97a934a8789fcd9209cc0e2a9a3283ab66b99ced | 93517ff10bbac50441c9c18ed09e39da2157da02 | refs/heads/master | 2023-03-20T11:03:35.604992 | 2021-03-01T03:50:21 | 2021-03-01T03:50:21 | 339,278,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | ##### For Loop
numbers = [1,2,3,4]
doubled_numbers = []
for num in numbers:
doubled_number = num*2
doubled_numbers.append(doubled_number)
print(doubled_numbers) #[2,4,6,8]
##### List Comprehension
numbers = [1,2,3,4]
doubled_numbers = [num * 2 for num in numbers]
print(doubled_numbers) #[2,4,6,8]
name = 'colt'
new_list = [char.upper() for char in name]
print(new_list)
friends = ['ashley', 'matt', 'michael']
new_friend = [friend.capitalize() for friend in friends]
print(new_friend)
print([num*10 for num in range(1,6)])
print([bool(val) for val in [1, [], '']]) #returns true or false
numbers = [1,2,3,4,5]
string_list = [str(num) for num in numbers]
print(string_list)
with_vowels = 'This is so much fun!'
print(''.join(char for char in with_vowels if char not in 'aeiou'))
#Using list comprehensions:
answer = [person[0] for person in ["Elie", "Tim", "Matt"]]
answer2 = [val for val in [1,2,3,4,5,6] if val % 2 == 0]
print(answer)
print(answer2)
#Using good old manual loops:
answer = []
for person in ["Elie", "Tim", "Matt"]:
answer.append(person[0])
answer2 = []
for num in [1,2,3,4,5,6]:
if num % 2 == 0:
answer2.append(num)
| [
"morenomt27@gmail.com"
] | morenomt27@gmail.com |
2ce930a77f53d08bd7633bac3cdee86c6e5cdd88 | f7327136419a3b895fb185bdc0af7a08256f8aed | /python/paddle/nn/layer/fused_transformer.py | 0084f7ff339df3e185dbe727d4632f758e7e9255 | [
"Apache-2.0"
] | permissive | paddlelaw/Paddle | 45a7598535d6a4b9dd0cfb9bbc61540ff9c1c21e | 12865234fe1e28fe5df50a43901845ceaea42c2d | refs/heads/develop | 2023-08-28T01:19:16.786973 | 2021-10-09T14:39:35 | 2021-10-09T14:39:35 | 331,300,511 | 0 | 0 | Apache-2.0 | 2021-10-09T14:39:36 | 2021-01-20T12:29:27 | Python | UTF-8 | Python | false | false | 19,928 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FusedMultiHeadAttention(Layer):
"""
Attention mapps queries and a set of key-value pairs to outputs, and
Multi-Head Attention performs multiple parallel attention to jointly attending
to information from different representation subspaces.
Please refer to `Attention Is All You Need <https://arxiv.org/pdf/1706.03762.pdf>`_
for more details.
Parameters:
embed_dim (int): The expected feature size in the input and output.
num_heads (int): The number of heads in multi-head attention.
dropout (float, optional): The dropout probability used on attention
weights to drop some attention targets. 0 for no dropout. Default 0
kdim (int, optional): The feature size in key. If None, assumed equal to
`embed_dim`. Default None.
vdim (int, optional): The feature size in value. If None, assumed equal to
`embed_dim`. Default None.
need_weights (bool, optional): Indicate whether to return the attention
weights. Default False.
weight_attr(ParamAttr, optional): To specify the weight parameter property.
Default: None, which means the default weight parameter property is used.
See usage for details in :code:`ParamAttr` .
bias_attr (ParamAttr|bool, optional): To specify the bias parameter property.
Default: None, which means the default bias parameter property is used.
If it is set to False, this layer will not have trainable bias parameter.
See usage for details in :code:`ParamAttr` .
Examples:
.. code-block:: python
import paddle
# encoder input: [batch_size, sequence_length, d_model]
query = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, num_heads, query_len, query_len]
attn_mask = paddle.rand((2, 2, 4, 4))
multi_head_attn = paddle.nn.MultiHeadAttention(128, 2)
output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128]
"""
Cache = collections.namedtuple("Cache", ["k", "v"])
StaticCache = collections.namedtuple("StaticCache", ["k", "v"])
def __init__(self,
embed_dim,
num_heads,
dropout=0.,
kdim=None,
vdim=None,
need_weights=False,
weight_attr=None,
bias_attr=None):
super(FusedMultiHeadAttention, self).__init__()
raise NotImplementedError()
def forward(self, query, key=None, value=None, attn_mask=None, cache=None):
"""
Applies multi-head attention to map queries and a set of key-value pairs
to outputs.
Parameters:
query (Tensor): The queries for multi-head attention. It is a
tensor with shape `[batch_size, query_length, embed_dim]`. The
data type should be float32 or float64.
key (Tensor, optional): The keys for multi-head attention. It is
a tensor with shape `[batch_size, key_length, kdim]`. The
data type should be float32 or float64. If None, use `query` as
`key`. Default None.
value (Tensor, optional): The values for multi-head attention. It
is a tensor with shape `[batch_size, value_length, vdim]`.
The data type should be float32 or float64. If None, use `query` as
`value`. Default None.
attn_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
cache (MultiHeadAttention.Cache|MultiHeadAttention.StaticCache, optional):
It is a namedtuple with `k` and `v` as fields, and stores tensors
shaped `[batch_size, num_heads, length, embed_dim]` which are results
of linear projection, reshape and transpose calculations in
MultiHeadAttention. If it is an instance of `Cache`, `k` and `v`
fields reserve intermediate results of previous positions, which
mostly used for decoder self attention. If it is an instance of
`StaticCache`, `key` and `value` args would be ignored, `k` and
`v` fields would be used as calculated results on `key` and
`value`, which mostly used for decoder-encoder cross attention.
It is only used for inference and should be None for training.
Default None.
Returns:
Tensor|tuple: It is a tensor that has the same shape and data type \
as `query`, representing attention output. Or a tuple if \
`need_weights` is True or `cache` is not None. If `need_weights` \
is True, except for attention output, the tuple also includes \
the attention weights tensor shaped `[batch_size, num_heads, query_length, key_length]`. \
If `cache` is not None, the tuple then includes the new cache \
having the same type as `cache`, and if it is `StaticCache`, it \
is same as the input `cache`, if it is `Cache`, the new cache \
reserves tensors concatanating raw tensors with intermediate \
results of current query.
"""
raise NotImplementedError()
class FusedFeedForward(Layer):
def __init__(self,
d_model,
dim_feedforward,
dropout=0.1,
activation="relu",
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None):
super(FusedFeedForward, self).__init__()
raise NotImplementedError()
def forward(self, src, cache=None):
raise NotImplementedError()
class FusedTransformerEncoderLayer(Layer):
"""
TransformerEncoderLayer is composed of two sub-layers which are self (multi-head)
attention and feedforward network. Before and after each sub-layer, pre-process
and post-precess would be applied on the input and output accordingly. If
`normalize_before` is True, pre-process is layer normalization and post-precess
includes dropout, residual connection. Otherwise, no pre-process and post-precess
includes dropout, residual connection, layer normalization.
Parameters:
d_model (int): The expected feature size in the input and output.
nhead (int): The number of heads in multi-head attention(MHA).
dim_feedforward (int): The hidden layer size in the feedforward network(FFN).
dropout (float, optional): The dropout probability used in pre-process
and post-precess of MHA and FFN sub-layer. Default 0.1
activation (str, optional): The activation function in the feedforward
network. Default relu.
attn_dropout (float, optional): The dropout probability used
in MHA to drop some attention target. If None, use the value of
`dropout`. Default None
act_dropout (float, optional): The dropout probability used after FFN
activition. If None, use the value of `dropout`. Default None
normalize_before (bool, optional): Indicate whether to put layer normalization
into preprocessing of MHA and FFN sub-layers. If True, pre-process is layer
normalization and post-precess includes dropout, residual connection.
Otherwise, no pre-process and post-precess includes dropout, residual
connection, layer normalization. Default False
weight_attr(ParamAttr|list|tuple, optional): To specify the weight parameter property.
If it is a list/tuple, `weight_attr[0]` would be used as `weight_attr` for
MHA, and `weight_attr[1]` would be used as `weight_attr` for linear in FFN.
Otherwise, MHA and FFN both use it as `weight_attr` to create parameters.
Default: None, which means the default weight parameter property is used.
See usage for details in :code:`ParamAttr` .
bias_attr (ParamAttr|list|tuple|bool, optional): To specify the bias parameter property.
If it is a list/tuple, `bias_attr[0]` would be used as `bias_attr` for
MHA, and `bias_attr[1]` would be used as `bias_attr` for linear in FFN.
Otherwise, MHA and FFN both use it as `bias_attr` to create parameters.
The `False` value means the corresponding layer would not have trainable
bias parameter. See usage for details in :code:`ParamAttr` . Default: None,
which means the default bias parameter property is used.
Examples:
.. code-block:: python
import paddle
from paddle.nn import TransformerEncoderLayer
# encoder input: [batch_size, src_len, d_model]
enc_input = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, n_head, src_len, src_len]
attn_mask = paddle.rand((2, 2, 4, 4))
encoder_layer = TransformerEncoderLayer(128, 2, 512)
enc_output = encoder_layer(enc_input, attn_mask) # [2, 4, 128]
"""
def __init__(self,
d_model,
nhead,
dim_feedforward,
dropout=0.1,
activation="relu",
attn_dropout=None,
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None):
self._config = locals()
self._config.pop("self")
self._config.pop("__class__", None) # py3
super(FusedTransformerEncoderLayer, self).__init__()
raise NotImplementedError()
def forward(self, src, src_mask=None, cache=None):
"""
Applies a Transformer encoder layer on the input.
Parameters:
src (Tensor): The input of Transformer encoder layer. It is
a tensor with shape `[batch_size, sequence_length, d_model]`.
The data type should be float32 or float64.
src_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
cache (Tensor, optional): It is an instance of `MultiHeadAttention.Cache`.
See `TransformerEncoderLayer.gen_cache` for more details. It is
only used for inference and should be None for training. Default
None.
Returns:
Tensor|tuple: It is a tensor that has the same shape and data type \
as `enc_input`, representing the output of Transformer encoder \
layer. Or a tuple if `cache` is not None, except for encoder \
layer output, the tuple includes the new cache which is same \
as input `cache` argument but `incremental_cache` has an \
incremental length. See `MultiHeadAttention.gen_cache` and \
`MultiHeadAttention.forward` for more details.
"""
raise NotImplementedError()
class FusedTransformer(Layer):
"""
A Transformer model composed of an instance of `TransformerEncoder` and an
instance of `TransformerDecoder`. While the embedding layer and output layer
are not included.
Please refer to `Attention is all you need <http://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf>`_ ,
and see `TransformerEncoder` and `TransformerDecoder` for more details.
Users can configurate the model architecture with corresponding parameters.
Note the usage of `normalize_before` representing where to apply layer
normalization (in pre-process or post-precess of multi-head attention or FFN),
and some transformer like models are different on this, such as
`BERT <https://arxiv.org/abs/1810.04805>`_ and `GPT2 <https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf>`_ .
The default architecture here places layer normalization in post-process and
applies another layer normalization on the output of last encoder/decoder layer.
Parameters:
d_model (int, optional): The expected feature size in the encoder/decoder input
and output. Default 512
nhead (int, optional): The number of heads in multi-head attention(MHA). Default 8
num_encoder_layers (int, optional): The number of layers in encoder. Default 6
num_decoder_layers (int, optional): The number of layers in decoder. Default 6
dim_feedforward (int, optional): The hidden layer size in the feedforward network(FFN). Default 2048
dropout (float, optional): The dropout probability used in pre-process
and post-precess of MHA and FFN sub-layer. Default 0.1
activation (str, optional): The activation function in the feedforward
network. Default relu.
attn_dropout (float, optional): The dropout probability used
in MHA to drop some attention target. If None, use the value of
`dropout`. Default None
act_dropout (float, optional): The dropout probability used after FFN
activition. If None, use the value of `dropout`. Default None
normalize_before (bool, optional): Indicate whether to put layer normalization
into preprocessing of MHA and FFN sub-layers. If True, pre-process is layer
normalization and post-precess includes dropout, residual connection.
Otherwise, no pre-process and post-precess includes dropout, residual
connection, layer normalization. Default False
weight_attr(ParamAttr|list|tuple, optional): To specify the weight parameter property.
If it is a list/tuple, the length of `weight_attr` could be 1, 2 or 3. If it is 3,
`weight_attr[0]` would be used as `weight_attr` for self attention, `weight_attr[1]`
would be used as `weight_attr` for cross attention of `TransformerDecoder`,
and `weight_attr[2]` would be used as `weight_attr` for linear in FFN.
If it is 2, `weight_attr[0]` would be used as `weight_attr` both for self attention
and cross attntion and `weight_attr[1]` would be used as `weight_attr` for
linear in FFN. If it is 1, `weight_attr[0]` would be used as `weight_attr`
for self attention, cross attention and linear in FFN. Otherwise,
the three sub-layers all uses it as `weight_attr` to create parameters.
Default: None, which means the default weight parameter property is used.
See usage for details
in :code:`ParamAttr` .
bias_attr (ParamAttr|list|tuple|bool, optional): To specify the bias parameter property.
If it is a list/tuple, the length of `bias_attr` could be 1, 2 or 3. If it is 3,
`bias_attr[0]` would be used as `bias_attr` for self attention, `bias_attr[1]`
would be used as `bias_attr` for cross attention of `TransformerDecoder`,
and `bias_attr[2]` would be used as `bias_attr` for linear in FFN.
If it is 2, `bias_attr[0]` would be used as `bias_attr` both for self attention
and cross attntion and `bias_attr[1]` would be used as `bias_attr` for
linear in FFN. If it is 1, `bias_attr[0]` would be used as `bias_attr`
for self attention, cross attention and linear in FFN. Otherwise,
the three sub-layers all uses it as `bias_attr` to create parameters.
The `False` value means the corresponding layer would not have trainable
bias parameter. See usage for details in :code:`ParamAttr` .
Default: None,which means the default bias parameter property is used.
custom_encoder (Layer, optional): If custom encoder is provided, use it as the encoder.
Default None
custom_decoder (Layer, optional): If custom decoder is provided, use it as the decoder.
Default None
Examples:
.. code-block:: python
import paddle
from paddle.nn import Transformer
# src: [batch_size, tgt_len, d_model]
enc_input = paddle.rand((2, 4, 128))
# tgt: [batch_size, src_len, d_model]
dec_input = paddle.rand((2, 6, 128))
# src_mask: [batch_size, n_head, src_len, src_len]
enc_self_attn_mask = paddle.rand((2, 2, 4, 4))
# tgt_mask: [batch_size, n_head, tgt_len, tgt_len]
dec_self_attn_mask = paddle.rand((2, 2, 6, 6))
# memory_mask: [batch_size, n_head, tgt_len, src_len]
cross_attn_mask = paddle.rand((2, 2, 6, 4))
transformer = Transformer(128, 2, 4, 4, 512)
output = transformer(enc_input,
dec_input,
enc_self_attn_mask,
dec_self_attn_mask,
cross_attn_mask) # [2, 6, 128]
"""
def __init__(self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
attn_dropout=None,
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None,
custom_encoder=None,
custom_decoder=None):
super(fusedTransformer, self).__init__()
raise NotImplementedError()
def forward(self, src, tgt, src_mask=None, tgt_mask=None, memory_mask=None):
raise NotImplementedError()
| [
"noreply@github.com"
] | noreply@github.com |
f5792eceda5ffb1389886679e52cd15a6aa98aec | 8161e5820ff5ce6d780ff58b428b760559c68838 | /py_bing_search/httputil.py | 5bf4f083276f517a2481001f038230da07b7524b | [
"MIT"
] | permissive | trusty/py-bing-search | 02f8e4760e72d75ae33985a140344986e6e25efc | 9e65adb1dfb24969e3ba90bfb259fa07c85fce5a | refs/heads/master | 2021-01-15T13:05:57.868091 | 2016-06-30T07:58:12 | 2016-06-30T07:58:12 | 49,337,988 | 0 | 1 | null | 2016-06-30T07:58:12 | 2016-01-09T19:23:28 | Python | UTF-8 | Python | false | false | 373 | py | import requests
import threading
def get_requests_session():
"""Returns a new (or existing) requests.Session object for current thread"""
try:
ret = get_requests_session.tld.requests_session
except AttributeError:
ret = get_requests_session.tld.requests_session = requests.Session()
return ret
get_requests_session.tld = threading.local()
| [
"asj@vulcantechsoftware.com"
] | asj@vulcantechsoftware.com |
c421acb76a5856f072bae09257a60d9442f0928d | b7d435bbd9780eaaeb559c3969eb010506a10bdf | /dhis2_core/src/dhis2/core/metadata/models/system_info.py | f3d03bcca46fcd5203cafe4e403e1346bc5e650f | [
"BSD-3-Clause"
] | permissive | dhis2/dhis2-python | 29e3377e5bef52789b937a2337a69aa4619dd1d8 | d5ec976a5c04e6897756e3be14924ec74a4456fd | refs/heads/main | 2021-12-16T09:15:35.727187 | 2021-11-29T06:27:52 | 2021-11-29T06:27:52 | 308,135,669 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | from typing import Optional
from pydantic import BaseModel
class DatabaseInfo(BaseModel):
name: Optional[str]
user: Optional[str]
url: Optional[str]
databaseVersion: Optional[str]
spatialSupport: Optional[bool]
class SystemInfo(BaseModel):
contextPath: str
userAgent: str
calendar: str
dateFormat: str
serverDate: str
lastAnalyticsTableSuccess: str
intervalSinceLastAnalyticsTableSuccess: str
lastAnalyticsTableRuntime: str
lastSystemMonitoringSuccess: str
version: str
revision: str
buildTime: str
jasperReportsVersion: str
environmentVariable: str
environmentVariable: str
readOnlyMode: Optional[str]
nodeId: Optional[str]
javaVersion: Optional[str]
javaVendor: Optional[str]
javaOpts: Optional[str]
osName: Optional[str]
osArchitecture: Optional[str]
osVersion: Optional[str]
externalDirectory: Optional[str]
databaseInfo: Optional[DatabaseInfo]
readReplicaCount: Optional[int]
memoryInfo: Optional[str]
cpuCores: Optional[int]
encryption: bool
emailConfigured: bool
redisEnabled: bool
systemId: str
systemName: str
instanceBaseUrl: str
clusterHostname: str
isMetadataVersionEnabled: bool
isMetadataVersionEnabled: bool
| [
"mortenoh@gmail.com"
] | mortenoh@gmail.com |
6b90cca26ffb65e7c70dea2a4bb236f579e6791d | 948ce56cc061db1cd67a9ce6aee1619604dce1fb | /Code_Kinder/elias_02.py | 2198dda1fe82d2056b19e6e552b3972ad221e481 | [] | no_license | tomobones/hort | 3bde6e61e9b47b65b8e06be903470d2d348878ee | 3c5ffe0f861221de738334f0919424be4f1d1e38 | refs/heads/master | 2023-05-29T18:07:33.967243 | 2021-06-21T14:26:28 | 2021-06-21T14:26:28 | 316,201,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | import turtle
stift=turtle.Pen()
turtle.bgcolor('navy')
for x in range(999):
stift.forward(x)
stift.left(200)
stift.pencolor('green')
stift.forward(100)
stift.left(200)
stift.pencolor('white')
stift.forward(100)
stift.left(200)
stift.pencolor('purple')
stift.forward(100)
stift.left(200)
stift.pencolor('cyan')
stift.forward(x)
stift.left(30)
stift.pencolor('red')
stift.forward(x)
stift.left(2)
stift.circle(100)
stift.up()
stift.forward(123)
stift.down()
stift.pencolor('brown')
stift.forward(220)
stift.left(201)
turtle.Screen().exitonclick()
| [
"thomas.vogg@posteo.de"
] | thomas.vogg@posteo.de |
31f85f215a9f769b8f6cf5f1c88dce4b0be8c037 | 4f0f411d8d9abe3d520a962d30da67959e6bff2f | /tests/sequence/test_phylo.py | d40dbd398c6fe4c8b0188a102157cb630725e1f8 | [
"BSD-3-Clause"
] | permissive | ajshedivy/biotite | 685f196416cc7be74d299a13f23947f85eb5825e | 15fe39de165aba6e8bd6376fa8f8ddf069718fb5 | refs/heads/master | 2023-08-24T14:45:25.239920 | 2021-10-06T14:32:40 | 2021-10-06T14:32:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,165 | py | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
from os.path import join
import numpy as np
import pytest
import biotite
import biotite.sequence.phylo as phylo
from ..util import data_dir
@pytest.fixture
def distances():
# Distances are based on the example
# "Dendrogram of the BLOSUM62 matrix"
# with the small modification M[i,j] += i+j
# to reduce ambiguity in the tree construction.
return np.loadtxt(join(data_dir("sequence"), "distances.txt"), dtype=int)
@pytest.fixture
def upgma_newick():
# Newick notation of the tree created from 'distances.txt',
# created via DendroUPGMA
with open(join(data_dir("sequence"), "newick_upgma.txt"), "r") as file:
newick = file.read().strip()
return newick
@pytest.fixture
def tree(distances):
return phylo.upgma(distances)
def test_upgma(tree, upgma_newick):
"""
Compare the results of `upgma()` with DendroUPGMA.
"""
ref_tree = phylo.Tree.from_newick(upgma_newick)
# Cannot apply direct tree equality assertion because the distance
# might not be exactly equal due to floating point rounding errors
for i in range(len(tree)):
for j in range(len(tree)):
# Check for equal distances and equal topologies
assert tree.get_distance(i,j) \
== pytest.approx(ref_tree.get_distance(i,j), abs=1e-3)
assert tree.get_distance(i,j, topological=True) \
== ref_tree.get_distance(i,j, topological=True)
def test_neighbor_joining():
"""
Compare the results of `neighbor_join()` with a known tree.
"""
dist = np.array([
[ 0, 5, 4, 7, 6, 8],
[ 5, 0, 7, 10, 9, 11],
[ 4, 7, 0, 7, 6, 8],
[ 7, 10, 7, 0, 5, 9],
[ 6, 9, 6, 5, 0, 8],
[ 8, 11, 8, 9, 8, 0],
])
ref_tree = phylo.Tree(phylo.TreeNode(
[
phylo.TreeNode(
[
phylo.TreeNode(
[
phylo.TreeNode(index=0),
phylo.TreeNode(index=1),
],
[1,4]
),
phylo.TreeNode(index=2),
],
[1, 2]
),
phylo.TreeNode(
[
phylo.TreeNode(index=3),
phylo.TreeNode(index=4),
],
[3,2]
),
phylo.TreeNode(index=5),
],
[1,1,5]
))
test_tree = phylo.neighbor_joining(dist)
assert test_tree == ref_tree
def test_node_distance(tree):
"""
Test whether the `distance_to()` and `lowest_common_ancestor()` work
correctly.
"""
# Tree is created via UPGMA
# -> The distances to root should be equal for all leaf nodes
dist = tree.root.distance_to(tree.leaves[0])
for leaf in tree.leaves:
assert leaf.distance_to(tree.root) == dist
# Example topological distances
assert tree.get_distance(0, 19, True) == 9
assert tree.get_distance(4, 2, True) == 10
# All pairwise leaf node distances should be sufficient
# to reconstruct the same tree via UPGMA
ref_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
ref_dist_mat[i,j] = tree.get_distance(i,j)
assert np.allclose(ref_dist_mat, ref_dist_mat.T)
new_tree = phylo.upgma(ref_dist_mat)
test_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
test_dist_mat[i,j] = new_tree.get_distance(i,j)
assert np.allclose(test_dist_mat, ref_dist_mat)
def test_leaf_list(tree):
for i, leaf in enumerate(tree.leaves):
assert i == leaf.index
def test_distances(tree):
# Tree is created via UPGMA
# -> The distances to root should be equal for all leaf nodes
dist = tree.root.distance_to(tree.leaves[0])
for leaf in tree.leaves:
assert leaf.distance_to(tree.root) == dist
# Example topological distances
assert tree.get_distance(0, 19, True) == 9
assert tree.get_distance(4, 2, True) == 10
def test_get_leaves(tree):
# Manual example cases
node = tree.leaves[6]
assert set(tree.leaves[6].parent.get_indices()) == set(
[6,11,2,3,13,8,14,5,0,15,16]
)
assert set(tree.leaves[10].get_indices()) == set([10])
assert tree.root.get_leaf_count() == 20
def test_copy(tree):
assert tree is not tree.copy()
assert tree == tree.copy()
def test_immutability():
node = phylo.TreeNode(index=0)
# Attributes are not writable
with pytest.raises(AttributeError):
node.children = None
with pytest.raises(AttributeError):
node.parent = None
with pytest.raises(AttributeError):
node.index = None
# A root node cannot be child
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=1)
node1.as_root()
with pytest.raises(phylo.TreeError):
phylo.TreeNode([node1, node2], [0, 0])
# A child node cannot be root
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=1)
phylo.TreeNode([node1, node2], [0, 0])
with pytest.raises(phylo.TreeError):
node1.as_root()
# A node cannot be child of a two nodes
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=1)
phylo.TreeNode([node1, node2], [0, 0])
with pytest.raises(phylo.TreeError):
phylo.TreeNode([node1, node2], [0, 0])
# Tree cannot be constructed from child nodes
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=0)
# node1 and node2 have now a parent
phylo.TreeNode([node1, node2], [0, 0])
with pytest.raises(phylo.TreeError):
phylo.Tree(node1)
@pytest.mark.parametrize("newick, labels, error", [
# Reference index out of range
("((1,0),4),2);", None, biotite.InvalidFileError),
# Empty string
("", None, biotite.InvalidFileError),
# Empty node
("();", None, biotite.InvalidFileError),
# Missing brackets
("((0,1,(2,3));", None, biotite.InvalidFileError),
# A node with three leaves
("((0,1),(2,3),(4,5));", None, None),
# A node with one leaf
("((0,1),(2,3),(4));", None, None),
# Named intermediate nodes
("((0,1,3)A,2)B;", None, None),
# Named intermediate nodes and distances
("((0:1.0,1:3.0,3:5.0)A:2.0,2:5.0)B;", None, None),
# Nodes with labels
("((((A,B),(C,D)),E),F);", ["A","B","C","D","E","F"], None),
# Nodes with labels and distances
("((((A:1,B:2),(C:3,D:4)),E:5),F:6);", ["A","B","C","D","E","F"], None),
# Newick with spaces
(" ( 0 : 1.0 , 1 : 3.0 ) A ; ", None, None),
])
def test_newick_simple(newick, labels, error):
# Read, write and read again a Newick notation and expect
# the same reult from both reads
if error is None:
tree1 = phylo.Tree.from_newick(newick, labels)
newick = tree1.to_newick(labels, include_distance=True)
tree2 = phylo.Tree.from_newick(newick, labels)
assert tree1 == tree2
else:
with pytest.raises(error):
tree1 = phylo.Tree.from_newick(newick, labels)
@pytest.mark.parametrize("use_labels", [False, True])
def test_newick_complex(upgma_newick, use_labels):
# Same as above with more complex string
if use_labels:
labels = [str(i) for i in range(20)]
else:
labels = None
tree1 = phylo.Tree.from_newick(upgma_newick, labels)
newick = tree1.to_newick(labels, include_distance=True)
tree2 = phylo.Tree.from_newick(newick, labels)
assert tree1 == tree2
@pytest.mark.parametrize("newick_in, exp_newick_out", [
("(0:1.0, 1:2.0);", "(0:1.0,1:2.0):0.0;" ),
("(0:1.0, 1:2.0, 2:3.0);", "((0:1.0,1:2.0):0.0,2:3.0):0.0;" ),
("(((0:1.0, 1:2.0):10.0):5.0, 2:8.0);", "((0:1.0,1:2.0):15.0,2:8.0):0.0;"),
("((0:1.0, 1:2.0):10.0):5.0;", "(0:1.0,1:2.0):0.0;" ),
])
def test_as_binary_cases(newick_in, exp_newick_out):
"""
Test the `as_binary()` function based on known cases.
"""
tree = phylo.Tree.from_newick(newick_in)
bin_tree = phylo.as_binary(tree)
assert bin_tree.to_newick() == exp_newick_out
def test_as_binary_distances():
"""
Test the preservation of all pairwise leaf distances after calling
`as_binary()`.
"""
# Some random newick
newick = "((((0:5, 1:1, 2:13, 5:9):4, (4:2, 6:9):7):18), 3:12);"
tree = phylo.Tree.from_newick(newick)
ref_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
ref_dist_mat[i,j] = tree.get_distance(i,j)
bin_tree = phylo.as_binary(tree)
test_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
test_dist_mat[i,j] = bin_tree.get_distance(i,j)
assert np.allclose(test_dist_mat, ref_dist_mat)
def test_equality(tree):
"""
Assert that equal trees equal each other, and non-equal trees do not
equal each other.
"""
assert tree == tree.copy()
# Order of children is not important
assert tree == phylo.Tree(phylo.TreeNode(
[tree.root.children[1].copy(), tree.root.children[0].copy()],
[tree.root.children[1].distance, tree.root.children[0].distance]
))
# Different distance -> Unequal tree
assert tree != phylo.Tree(phylo.TreeNode(
[tree.root.children[0].copy(), tree.root.children[1].copy()],
[tree.root.children[0].distance, 42]
))
# Additional node -> Unequal tree
assert tree != phylo.Tree(phylo.TreeNode(
[
tree.root.children[0].copy(),
tree.root.children[1].copy(),
phylo.TreeNode(index=len(tree))
],
[
tree.root.children[0].distance,
tree.root.children[1].distance,
42
]
))
| [
"patrick.kunzm@gmail.com"
] | patrick.kunzm@gmail.com |
9ff2f22cb931ef1b4b6f3de6cb5ba468dace744c | ae613a880eecf783ba23e7ca871f9e165ec2ce6e | /calculate_root.py | f6e918aef989a07665376a59101b386c993edc8e | [
"MIT"
] | permissive | h-mayorquin/capacity_code | 52d7e81026cd804677d5a5a6312b434bdff6ed32 | f885f0e409d3f9c54b8e20c902f7ef28584ca8a2 | refs/heads/master | 2020-08-28T00:30:14.760936 | 2020-01-31T17:26:29 | 2020-01-31T17:26:29 | 217,534,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import warnings
import pickle
import pandas as pd
import numpy as np
import random
from math import ceil, floor
from copy import deepcopy
from functions import *
warnings.filterwarnings('ignore')
minicolumns = 10
hypercolumns = 5
sequence_length = 2
number_of_sequences = 20
pattern_seed = np.random.randint(0, 20)
desired_root = 0.9
verbose = True
n_patterns = 100
pairs = produce_pairs_with_constant_number_of_patterns(n_patterns)[3:-3]
# Format is hypercolumns, minicolumns, extra
pairs = [(3, 66, 0)]
# Do the calculations
for pair in pairs:
hypercolumns, minicolumns, extra = pair
print('hypercolumns', hypercolumns)
print('minicolumns', minicolumns)
print('extra', extra)
pattern_seed = np.random.randint(0, 20)
aux = find_root_empirical(desired_root, hypercolumns, minicolumns, sequence_length, pattern_seed, tolerance=0.01, verbose=verbose)
capacity, p_root, trials = aux
# Read
data_frame = pd.read_csv('../storage_capacity_data.csv', index_col=0)
# Write
data_frame = data_frame.append({'hypercolumns':hypercolumns, 'minicolumns':minicolumns, 'sequence_length':sequence_length,
'capacity':capacity, 'p_critical':p_root, 'trials':trials }, ignore_index=True)
# Store the data base
data_frame.to_csv('../storage_capacity_data.csv')
print('Stored')
print('================')
| [
"h.mayorquin@gmail.com"
] | h.mayorquin@gmail.com |
c1337933143e4be73f495569475dbf98d651bfac | f0b52a3ae5115b9a839d6bd3e765de83ecb21a28 | /Payload_Type/Apollo/mythic/agent_functions/net_localgroup_member.py | 6b2fad53fcf068ef12c142ebdcfed4c9d96d878c | [
"BSD-3-Clause",
"MIT"
] | permissive | bopin2020/Apollo | ad98f1cb872bd2134509df55ee67a79c51e6d316 | 7660439cbc8d4f18af2b564a5b7a0ac4f8f3765a | refs/heads/master | 2023-01-12T23:50:01.266984 | 2020-11-12T07:03:13 | 2020-11-12T07:03:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,699 | py | from CommandBase import *
import json
class NetLocalgroupMemberArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"computer": CommandParameter(name="computer", required=False, type=ParameterType.String, description="Computer to enumerate."),
"group": CommandParameter(name="group", type=ParameterType.String, description="Group to enumerate.")
}
def split_commandline(self):
if self.command_line[0] == "{":
raise Exception("split_commandline expected string, but got JSON object: " + self.command_line)
inQuotes = False
curCommand = ""
cmds = []
for x in range(len(self.command_line)):
c = self.command_line[x]
if c == '"' or c == "'":
inQuotes = not inQuotes
if (not inQuotes and c == ' '):
cmds.append(curCommand)
curCommand = ""
else:
curCommand += c
if curCommand != "":
cmds.append(curCommand)
for x in range(len(cmds)):
if cmds[x][0] == '"' and cmds[x][-1] == '"':
cmds[x] = cmds[x][1:-1]
elif cmds[x][0] == "'" and cmds[x][-1] == "'":
cmds[x] = cmds[x][1:-1]
return cmds
async def parse_arguments(self):
if self.command_line[0] == "{":
self.load_args_from_json_string(self.command_line)
else:
cmds = self.split_commandline()
if len(cmds) == 1:
self.add_arg("group", cmds[0])
elif len(cmds) == 2:
self.add_arg("computer", cmds[0])
self.add_arg("group", cmds[1])
else:
raise Exception("Expected one or two arguments, but got: {}".format(cmds))
class NetLocalgroupMemberCommand(CommandBase):
cmd = "net_localgroup_member"
needs_admin = False
help_cmd = "net_localgroup_member [computer] [group]"
description = "Retrieve local group membership of the group specified by [group]. If [computer] is omitted, defaults to localhost."
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_upload_file = False
is_remove_file = False
author = "@djhohnstein"
argument_class = NetLocalgroupMemberArguments
attackmapping = []
browser_script = BrowserScript(script_name="net_localgroup_member", author="@djhohnstein")
async def create_tasking(self, task: MythicTask) -> MythicTask:
return task
async def process_response(self, response: AgentResponse):
pass | [
"djhohnstein@gmail.com"
] | djhohnstein@gmail.com |
1f354f1fd00dbff2a1beb175a30347ca857e0aa0 | 1f944a64873109fdc56627f5d82e8ac179f7ca83 | /inti_experiment.py | a1eaef52f47377bc58d162432c32edeb26cb7fe4 | [] | no_license | cqu-bdsc/probability-based-best-response-algorithm | 420fe8375f2be1fb84b79d5add625e150e33a318 | 82884e8f1af2c89c09ba25b6d823b64d97e47e02 | refs/heads/master | 2022-12-04T12:07:45.928258 | 2020-07-29T13:11:09 | 2020-07-29T13:11:09 | 285,524,488 | 0 | 1 | null | 2020-08-06T09:06:27 | 2020-08-06T09:06:26 | null | UTF-8 | Python | false | false | 5,880 | py | #!./venv python
# -*- encoding: utf-8 -*-
"""
@File : inti_experiment.py
@Contact : neard.ws@gmail.com
@Github : neardws
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/7/29 下午4:04 neardws 1.0 None
"""
from config import settings
import math
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plot
def get_fog_node(zone_length, communication_range):
"""
Args:
zone_length:
communication_range:
Returns:
"""
number = math.floor(zone_length / (2 * communication_range))
id = 0
fog_node = []
for i in range(number):
x = communication_range + i * 2 * communication_range
for j in range(number):
id += 1
y = communication_range + j * 2 * communication_range
fog_node.append({"id": id, "x": x, "y": y})
return fog_node
def get_vehicle_number_under_fog(fog_node, data_frame, communication_range):
"""
Args:
fog_node:
data_frame:
communication_range:
Returns:
"""
vehicle_number_under_fog = []
for node in fog_node:
vehicle_number = 0
node_id = node["id"]
node_x = node["x"]
node_y = node["y"]
x = data_frame["x"].tolist()
y = data_frame["y"].tolist()
vehicle_under_fog = []
for i in range(len(x)):
if np.sqrt(np.square(x[i] - node_x) + np.square(y[i] - node_y)) <= communication_range:
vehicle_number += 1
vehicle_under_fog.append({"x": x[i], "y": y[i]})
vehicle_number_under_fog.append({"node_id": node_id,
"vehicle_number": vehicle_number,
"vehicle_under_fog": vehicle_under_fog})
return vehicle_number_under_fog
def get_tasks_in_time_slots(fog_node, csv_file, time_slot, time_length, vehicle_task_number):
"""
Args:
fog_node:
csv_file:
time_slot:
time_length:
vehicle_task_number:
"""
df = pd.read_csv(csv_file)
time = []
fog_id = []
v_number = []
vehicles_under_fog = []
for i in range(1, time_length, time_slot):
df_second = df[df['time'] == i]
vehicle_number = get_vehicle_number_under_fog(fog_node,
df_second,
settings.communication_range)
for number in vehicle_number:
time.append(i)
fog_id.append(number["node_id"])
v_number.append(number["vehicle_number"])
vehicles_under_fog.append(number["vehicle_under_fog"])
init_df = pd.DataFrame({"time": time, "fog_id": fog_id, "vehicle_number": v_number, "vehicles": vehicles_under_fog})
init_df.to_csv(settings.init_csv_name, index=False)
task_fog_id = []
task_time = []
required_rate = []
required_sinr = []
task_x = []
task_y = []
for j in range(1, len(fog_node) + 1):
init_df_id = init_df[init_df["fog_id"] == j]
time = init_df_id["time"].tolist()
num = init_df_id["vehicle_number"].tolist()
vehicles = init_df_id["vehicles"]
for k in range(len(time)):
now_time = time[k]
now_vehicles = vehicles.tolist()[k]
for l in range(num[k]):
for n in range(vehicle_task_number):
task_required_rate = random.randint(settings.task_request_rate_min,
settings.task_request_rate_max)
task_required_sinr = random.randint(settings.task_request_SINR_min,
settings.task_request_SINR_max)
task_fog_id.append(j)
task_time.append(now_time)
required_rate.append(task_required_rate)
required_sinr.append(task_required_sinr)
vehicle = now_vehicles[l]
task_x.append(vehicle["x"])
task_y.append(vehicle["y"])
task_df = pd.DataFrame(
{"fog_id": task_fog_id, "time": task_time, "required_rate": required_rate, "required_sinr": required_sinr, "x": task_x, "y": task_y})
task_df.to_csv(settings.task_csv_name, index=False)
def draw_round(round_x, round_y, radius, width):
theta = np.arange(0, 2 * np.pi, 0.01)
x = round_x + radius * np.cos(theta)
y = round_y + radius * np.sin(theta)
plot.plot(x, y, color="gray", linestyle="--", linewidth=width)
def draw_fog_task_in_the_map(fog_node, time, zone_length, communication_range):
plot.xlim(0, zone_length)
plot.ylim(0, zone_length)
for node in fog_node:
node_x = node["x"]
node_y = node["y"]
plot.plot(int(node_x), int(node_y), color="black", marker="^", markersize=10, label="fog node")
draw_round(node_x, node_y, communication_range, 1)
df = pd.read_csv(settings.task_csv_name)
df = df[df["time"] == time]
task_x = df["x"].tolist()
task_y = df["y"].tolist()
for i in range(len(task_x)):
plot.plot(int(task_x[i]), int(task_y[i]), color="darkred", marker="o", label="task", markersize=3)
plot.show()
if __name__ == '__main__':
fog_node = get_fog_node(settings.zone_length, settings.communication_range)
# # print(node)
# get_tasks_in_time_slots(fog_node,
# settings.fill_xy_csv_name,
# settings.time_slot,
# settings.time_length,
# settings.vehicle_task_number)
draw_fog_task_in_the_map(fog_node=fog_node, time=1, zone_length=settings.zone_length, communication_range=settings.communication_range)
| [
"singleangel@vip.qq.com"
] | singleangel@vip.qq.com |
b06eb71cc9b0e6f69810dbb952c651d089030934 | 0983849ea9c59263bdf500764473f766bdafd5e4 | /auxiliary_functions.py | 0778df0b613f9e33df055cbaa6d7d4652762e0e6 | [] | no_license | YuenyongPhookrongnak/01_TradingSeries | 34e61c0aab368c35d1a0b383ec8a397bd9c29acd | a29d01b30e5906ff40cea0036bb34ea11d56128f | refs/heads/main | 2023-09-03T18:19:44.198538 | 2021-11-08T05:48:12 | 2021-11-08T05:48:12 | 428,158,778 | 1 | 0 | null | 2021-11-15T07:11:34 | 2021-11-15T07:11:34 | null | UTF-8 | Python | false | false | 3,313 | py | import pandas as pd
from binance.client import Client
from binance_keys import api_key, secret_key
from datetime import datetime, timedelta
import time
import math
from binance.exceptions import *
client = Client(api_key, secret_key,tld='us')
def truncate(number, decimals=0):
"""
Returns a value truncated to a specific number of decimal places.
https://stackoverflow.com/questions/783897/how-to-truncate-float-values
credit: nullstellensatz
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer.")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more.")
elif decimals == 0:
return math.trunc(number)
factor = 10.0 ** decimals
return math.trunc(number * factor) / factor
def sma(data, window):
return(data.rolling(window = window).mean())
def bollinger_band(data, sma, window, nstd):
std = data.rolling(window = window).std()
upper_band = sma + std * nstd
lower_band = sma - std * nstd
return upper_band, lower_band
def gather_data(symbols,start_n_hours_ago):
merge = False
for symbol in symbols:
klines = client.get_historical_klines(symbol=f'{symbol}USDT',
interval=client.KLINE_INTERVAL_1HOUR,
start_str=str(datetime.now()-timedelta(hours=start_n_hours_ago)))
cols = ['OpenTime',
f'{symbol}-USD_Open',
f'{symbol}-USD_High',
f'{symbol}-USD_Low',
f'{symbol}-USD_Close',
f'{symbol}-USD_volume',
'CloseTime',
f'{symbol}-QuoteAssetVolume',
f'{symbol}-NumberOfTrades',
f'{symbol}-TBBAV',
f'{symbol}-TBQAV',
f'{symbol}-ignore']
df = pd.DataFrame(klines,columns=cols)
if merge == True:
dfs = pd.merge(df,dfs,how='inner',on=['OpenTime','CloseTime'])
else:
dfs = df
merge = True
dfs['OpenTime'] = [datetime.fromtimestamp(ts / 1000) for ts in dfs['OpenTime']]
dfs['CloseTime'] = [datetime.fromtimestamp(ts / 1000) for ts in dfs['CloseTime']]
for col in dfs.columns:
if not 'Time' in col:
dfs[col] = dfs[col].astype(float)
for symbol in symbols:
dfs[f'{symbol}_sma'] = sma(dfs[f'{symbol}-USD_Close'],window=20)
dfs[f'{symbol}_upper_band'], dfs[f'{symbol}_lower_band'] = bollinger_band(data=dfs[f'{symbol}-USD_Close'],
sma=dfs[f'{symbol}_sma'],
window=20,
nstd=3)
dfs.dropna(inplace=True)
return dfs
def get_states(df, symbols):
states = {}
for symbol in symbols:
if df[f'{symbol}-USD_Close'].iloc[-1] < df[f'{symbol}_lower_band'].iloc[-1]:
states[symbol] = 'below'
elif df[f'{symbol}-USD_Close'].iloc[-1] > df[f'{symbol}_upper_band'].iloc[-1]:
states[symbol] = 'above'
else:
states[symbol] = 'inside'
return states | [
"leeschmalz@gmail.com"
] | leeschmalz@gmail.com |
138d463273707cc372256d3eec3ca9c5d5fe1e11 | 590ca008f00869276f29ac6d3e1da6d82778584b | /api/migrations/0029_auto_20200914_0235.py | 4a0c74b45fc6b6bf5a3ddb60b84a2c1120fb68ac | [] | no_license | sali73/Donuts_App | 92b1226cf97968481c7be15588204c3aee6151e3 | 75911b8523cbe0792c7550466f1cedeba7ae01a6 | refs/heads/master | 2023-01-13T07:43:25.681619 | 2020-11-26T01:51:39 | 2020-11-26T01:51:39 | 293,178,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # Generated by Django 3.0.7 on 2020-09-14 02:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0028_auto_20200914_0231'),
]
operations = [
migrations.AlterField(
model_name='product',
name='qty',
field=models.IntegerField(default='0'),
),
]
| [
"sally.elgendy@hotmail.com"
] | sally.elgendy@hotmail.com |
7c12f5d4d292eebf97006f379c6face0c50cdb10 | 6bf00ba9925e4c846f4f3bdb1bd39f02461d4bcc | /SISR_bachelor_project/SRCNN/test.py | fc4e756f4b11d2d9ce0214a6044c47fa495d7fb5 | [] | no_license | teouw/SISR_bachelor_project | b898172683db706dcf9d139ebe7d1e3ab7e43cb1 | 47aed6300d4c52b0574c552b21f0efcfd29d6f2c | refs/heads/master | 2023-07-12T12:43:48.530067 | 2021-08-27T18:06:16 | 2021-08-27T18:06:16 | 398,080,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | import os
from model import SRCNN
from lib import *
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
scale = 3
c_dim = 3
dirname = './output/'
image_size = None
if __name__ == '__main__':
#create the srcnn model
srcnn = SRCNN(
image_size=image_size,
c_dim=c_dim,
is_training=False)
#load the testing data
X_pre_test, X_test, Y_test = load_test(scale=scale, dim=c_dim)
predicted_list = []
for cnt, img in enumerate(X_test):
start_time = time.time()
if c_dim == 3:
predicted = srcnn.process(img.reshape(1,img.shape[0],img.shape[1],3))
predicted_reshaped = predicted.reshape(predicted.shape[1],predicted.shape[2],3)
else:
predicted = srcnn.process(img.reshape(1,img.shape[0],img.shape[1],1))
predicted_reshaped = predicted.reshape(predicted.shape[1],predicted.shape[2],1)
name = 'image{:02}'.format(cnt)
cv2.imwrite(os.path.join(dirname,name+'_input_image.bmp'), X_test[cnt])
cv2.imwrite(os.path.join(dirname,name+'_original_image.bmp'), Y_test[cnt])
cv2.imwrite(os.path.join(dirname,name+'_predicted_image.bmp'), predicted_reshaped)
| [
"teo.kaltrachian@etu.hesge.ch"
] | teo.kaltrachian@etu.hesge.ch |
d24bb4698a8a5f9fed82640a66867981f43abc05 | 037b8c568181045fc33ba0922d2b3831ed5736d5 | /scripts/rdf/github.py | db2c1f842a000c66976098132037e48ba507d3e4 | [
"Apache-2.0"
] | permissive | avullo/ensembl-production | b20aaec17f66e997f37500062021a0be46448bc0 | 4c1b898ecdb7803916a5b79a406c3707669f3400 | refs/heads/master | 2020-04-05T00:11:45.591933 | 2018-11-01T10:56:53 | 2018-11-01T10:56:53 | 156,387,224 | 0 | 3 | Apache-2.0 | 2018-12-24T14:04:02 | 2018-11-06T13:23:41 | Perl | UTF-8 | Python | false | false | 2,304 | py | #!/usr/bin/env python
"""Module providing a function which allows to push a project (i.e. Ensembl, EnsemblGenomes)
VOID file to the respective branch in the EBI SPOT GitHub repository."""
import requests
import base64
import json
import datetime
def push_to_repo_branch(gitHubFileName, fileName, branch, user, token):
message = "Automated update " + str(datetime.datetime.now())
path = "https://api.github.com/repos/EBISPOT/RDF-platform/branches/%s" % branch
r = requests.get(path, auth=(user,token))
if not r.ok:
print("Error when retrieving branch info from %s" % path)
print("Reason: %s [%d]" % (r.text, r.status_code))
raise
rjson = r.json()
treeurl = rjson['commit']['commit']['tree']['url']
r2 = requests.get(treeurl, auth=(user,token))
if not r2.ok:
print("Error when retrieving commit tree from %s" % treeurl)
print("Reason: %s [%d]" % (r2.text, r2.status_code))
raise
r2json = r2.json()
sha = None
for file in r2json['tree']:
# Found file, get the sha code
if file['path'] == gitHubFileName:
sha = file['sha']
# if sha is None after the for loop, we did not find the file name!
if sha is None:
print "Could not find " + gitHubFileName + " in repos 'tree' "
raise Exception
with open(fileName) as data:
content = base64.b64encode(data.read())
# gathered all the data, now let's push
inputdata = {}
inputdata["path"] = gitHubFileName
inputdata["branch"] = branch
inputdata["message"] = message
inputdata["content"] = content
if sha:
inputdata["sha"] = str(sha)
updateURL = "https://api.github.com/repos/EBISPOT/RDF-platform/contents/" + gitHubFileName
try:
rPut = requests.put(updateURL, auth=(user,token), data = json.dumps(inputdata))
if not rPut.ok:
print("Error when pushing to %s" % updateURL)
print("Reason: %s [%d]" % (rPut.text, rPut.status_code))
raise Exception
except requests.exceptions.RequestException as e:
print 'Something went wrong! I will print all the information that is available so you can figure out what happend!'
print rPut
print rPut.headers
print rPut.text
print e
| [
"avullo@ebi.ac.uk"
] | avullo@ebi.ac.uk |
4cecb1fdea73ca9f39f2bdf440f6840a5f57c2f2 | 7aebfaec6957ad67523f1d8851856af88fb997a6 | /catkin_ws/build/robotiq/robotiq_3f_gripper_control/catkin_generated/pkg.develspace.context.pc.py | 472d6f1b90073d4764f3841493bb27694a2f8bfa | [] | no_license | k-makihara/ROS | 918e79e521999085ab628b6bf27ec28a51a8ab87 | 45b60e0488a5ff1e3d8f1ca09bfd191dbf8c0508 | refs/heads/master | 2023-01-28T06:00:55.943392 | 2020-11-26T05:27:16 | 2020-11-26T05:27:16 | 316,127,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/mslab/catkin_ws/devel/include".split(';') if "/home/mslab/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "controller_manager;diagnostic_updater;dynamic_reconfigure;hardware_interface;robotiq_ethercat;roscpp;rospy;socketcan_interface;std_srvs;robotiq_3f_gripper_articulated_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lrobotiq_3f_gripper_control".split(';') if "-lrobotiq_3f_gripper_control" != "" else []
PROJECT_NAME = "robotiq_3f_gripper_control"
PROJECT_SPACE_DIR = "/home/mslab/catkin_ws/devel"
PROJECT_VERSION = "1.0.0"
| [
"makihara@ms.esys.tsukuba.ac.jp"
] | makihara@ms.esys.tsukuba.ac.jp |
9a6cfcf00b74e17e98d8ab6cc2bd547aeb55651e | 73d67a4fa7991ba429506c2da9e5712e1bc80963 | /Curso-em-video-Python3-mundo3/ex095.py | 2f40d7f2efe3244df50bd1c52d9cfdd58e7c4c18 | [
"MIT"
] | permissive | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 454f9926c42989b2f513f877b1454b38fe621faf | 0347a8325443fce84e0a753c96f523a22858537b | refs/heads/main | 2023-01-19T07:23:10.258846 | 2020-12-01T01:13:27 | 2020-12-01T01:13:27 | 308,421,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | time = []
jogador = {}
while True:
jogador['nome'] = str(input('Nome: ')).strip().capitalize()
nPartidas = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
jogador['gols'] = []
for i in range(0, nPartidas):
nGols = int(input(f' Quantos gols na partida {i+1}? '))
jogador['gols'].append(nGols)
jogador['total'] = sum(jogador['gols'])
time.append(jogador.copy())
jogador.clear()
while True:
resp = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if resp in 'SN':
break
print('ERRO! Responda apenas S ou N.')
if resp == 'N':
break
print('-=' * 30)
print('-' * 40)
print('{:<3} {:<13} {:<15} {:<5}'.format("cod", "nome", "gols", "total"))
print('-' * 40)
for k, j in enumerate(time):
print('{:>3} {:<13} {:<15} {:<5}'.format(k, j['nome'], str(j['gols']), j['total']))
print('-' * 40)
while True:
cod = int(input('Mostrar dados de qual jogador? (999 para parar) '))
if cod == 999:
break
if cod > len(time) - 1:
print('ERRO! Não existe jogador com o código {}!'.format(cod))
else:
print(' -- LEVANTAMENTO DO JOGADOR {}'.format(time[cod]['nome']))
for k, v in enumerate(time[cod]['gols']):
print(f' No jogo {k+1} fez {v} gols.')
print('-' * 40)
print('<< VOLTE SEMPRE >>')
| [
"73315353+bernardombraga@users.noreply.github.com"
] | 73315353+bernardombraga@users.noreply.github.com |
db3a22f33075fc3cfdd60234dff8c37f23410c54 | 6026aa86f6854f8ed1c81687408a78a07279b696 | /script/PCA.py | 16cf373a3208780d25cf81a3a1a6701346d0605c | [] | no_license | Zchristian955/Telecommunication_Challenge | db3adeeb1404963fd50c3f0b24ad9c3de856dd35 | 5e99188e53d1e1e6faccebdff8657c122ca500a3 | refs/heads/main | 2023-08-16T10:39:04.293660 | 2021-09-16T23:19:52 | 2021-09-16T23:19:52 | 386,347,978 | 0 | 0 | null | 2021-09-16T21:44:02 | 2021-07-15T15:59:55 | Jupyter Notebook | UTF-8 | Python | false | false | 1,759 | py |
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
pd.set_option('max_column', None)
df = pd.read_csv("df", na_values=['?', None])
from sklearn.preprocessing import scale #data scaling
x = scale(df)
#PCA
from sklearn import decomposition #PCA
pca= decomposition.PCA(n_components=3)
pca.fit(x)
#import pandas as pd
scores= pca.transform(x)
scores_df =pd.DataFrame(scores,columns=['PC1','PC2','PC3'])
scores_df
#Retrieve loading value
loadings=pca.components_.T
df_loadings=pd.DataFrame(loadings,columns=['PC1','PC2','PC3'],index=df.columns)
df_loadings
#Culmulative variance
explained_variance =pca.explained_variance_ratio_
explained_variance
explained_variance =np.insert(explained_variance,0,0)
cumulative_variance = np.cumsum(np.round(explained_variance,decimals=3))
pc_df=pd.DataFrame(['','PC1','PC2','PC3'],columns=['PC'])
explained_variance_df=pd.DataFrame(explained_variance,columns=['Explained Variance'])
cumulative_variance_df=pd.DataFrame(cumulative_variance,columns=['Cumulative Variance'])
df_explained_variance= pd.concat([pc_df,explained_variance_df,cumulative_variance_df],axis=1)
df_explained_variance
#Make plot
def plot_bar(df:pd.DataFrame, x_col:str, y_col:str, title:str, xlabel:str, ylabel:str)->None:
plt.figure(figsize=(12, 7))
sns.barplot(data = df, x=x_col, y=y_col)
plt.title(title, size=20)
plt.xticks(rotation=75, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16)
plt.show()
plot_bar(df_explained_variance,'PC','Explained Variance','Explained Variance','PC','Explained Variance
#3D plot
import plotly.express as px
fig=px.scatter_3d(scores_df,x='PC1',y='PC2',z='PC3')
fig.show() | [
"zchristian955@gmail.com"
] | zchristian955@gmail.com |
27889d659fe45c001298a48e6f24daf50af2c223 | 66871d8baaabcb3bebbf32f5c4207e6eadef9d81 | /instance/config.py | dfb67f53cb5ae44213709b851a2d9b257b29363c | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Roney-juma/News_app | 2f19c1b5458de703bf1ca2cbd4b0bf2656fac8e5 | 5d0e1a6de2d1cb50759b4ab2f51a397e640a9b35 | refs/heads/master | 2023-05-29T15:56:26.165348 | 2021-06-09T11:52:47 | 2021-06-09T11:52:47 | 374,165,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | NEWS_API_KEY = '384e999ffb31459da519c72d9c8f6ba2'
SECRET_KEY = '384e999ffb31459da519c72d9c8f6ba2' | [
"ochirone10@gmail.com"
] | ochirone10@gmail.com |
b1b669b81b3b3af232520b452ae2ec8d589ea9bc | 2104153bfc138b7e677aba0da9da376c38b7dcd0 | /apps/extensions/db.py | cfe285977011e170da1d102d9769b0ee4dfad351 | [] | no_license | lucassimon/flask-api-users | 992c75491cf577f74649097e49a04fbc74ea50e1 | 510443d481486f09a09398a5225ace82320477f3 | refs/heads/master | 2023-06-09T16:17:39.873516 | 2023-06-05T04:24:56 | 2023-06-05T04:24:56 | 137,128,845 | 22 | 16 | null | 2018-11-20T18:00:53 | 2018-06-12T21:18:41 | Python | UTF-8 | Python | false | false | 71 | py | # Third
from flask_mongoengine import MongoEngine
db = MongoEngine()
| [
"lucassrod@gmail.com"
] | lucassrod@gmail.com |
0b132d3ff7fa0c5fde1128588d8259239264b31d | 50575fd3465ac749d520036cdfecf1cebdfcd414 | /metaflow/metaflow/plugins/conda/__init__.py | 2535ea25ad99d2c8fa6605f679a5425d04f97807 | [
"Apache-2.0"
] | permissive | UesleiJf/Netflix-Metaflow | 5fb442ed2645ee626bd4c27a320587fac8ab31e7 | 2c164afbf875b5d63c030f6d4cab15c54bcf646b | refs/heads/master | 2023-01-04T16:58:09.372588 | 2020-01-19T19:16:51 | 2020-01-19T19:16:51 | 227,640,463 | 2 | 1 | Apache-2.0 | 2022-12-21T02:41:29 | 2019-12-12T15:42:04 | Python | UTF-8 | Python | false | false | 1,210 | py | import errno
import os
import json
import fcntl
CONDA_MAGIC_FILE = 'conda.dependencies'
def get_conda_manifest_path(ds_root, flow_name):
return os.path.join(ds_root, flow_name, CONDA_MAGIC_FILE)
def read_conda_manifest(ds_root, flow_name):
path = get_conda_manifest_path(ds_root, flow_name)
if os.path.exists(path) and os.path.getsize(path) > 0:
with open(path) as f:
return json.load(f)
else:
return {}
def write_to_conda_manifest(ds_root, flow_name, key, value):
path = get_conda_manifest_path(ds_root, flow_name)
try:
os.makedirs(os.path.dirname(path))
except OSError as x:
if x.errno != errno.EEXIST:
raise
with os.fdopen(os.open(path, os.O_RDWR | os.O_CREAT), 'r+') as f:
try:
fcntl.flock(f, fcntl.LOCK_EX)
data = {}
if os.path.getsize(path) > 0:
f.seek(0)
data = json.load(f)
data[key] = value
f.seek(0)
json.dump(data, f)
f.truncate()
except IOError as e:
if e.errno != errno.EAGAIN:
raise
finally:
fcntl.flock(f, fcntl.LOCK_UN) | [
"uesleijf@gmail.com"
] | uesleijf@gmail.com |
109e032b250691b3bf5f5ea34a9982e509cbd868 | 3d9825900eb1546de8ad5d13cae893eb0d6a9b14 | /AutoWorkup/SEMTools/setup.py | 94c4428d8e6622b3e676c76f782d87775210107d | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | rtkarcher/BRAINSTools | 20d69f96e6d5ca92adaeb06aa4fe6556b5e7b268 | 961135366450400409cece431423ed480855d34c | refs/heads/master | 2021-01-15T08:53:48.961607 | 2013-06-26T19:09:34 | 2013-06-26T19:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('SEMTools', parent_package, top_path)
config.add_data_dir('diffusion')
config.add_data_dir('segmentation')
config.add_data_dir('filtering')
config.add_data_dir('brains')
config.add_data_dir('utilities')
config.add_data_dir('registration')
config.add_data_dir('utility')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| [
"hans-johnson@uiowa.edu"
] | hans-johnson@uiowa.edu |
30bcffde3d4ff50bd5b5f4ea7c1cec217c2fc340 | 7b73c2ea792f4b00add3391040cd67cd4a11cc4c | /admin_lalo/clients/forms.py | e1ab8ae6e34eddb3463b2969c128d99a10435ac6 | [] | no_license | LaloAR/Administrador-con-DJANGO | bffd18d77875c7d378bac7bf074c571148f2018d | 3940d232b5aaf3f523338ed47677606acf9f54c2 | refs/heads/master | 2020-03-19T23:38:29.866643 | 2018-06-11T16:20:20 | 2018-06-11T16:20:20 | 136,948,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,940 | py | # -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from .models import Client
from .models import SocialNetwork
'''
Constants
'''
ERROR_MESSAGE_USER = {'required' : 'El username es requerido', 'unique' : 'El username ya se encuentra registrado', 'invalid' : 'Ingresa un username válido'}
ERROR_MESSAGE_PASSWORD = {'required' : 'El password es requerido'}
ERROR_MESSAGE_EMAIL = {'required' : 'El email es requerido', 'invalid' : 'Ingresa un correo válido'}
'''
Functions
'''
def must_be_gt(value_password):
if len(value_password) < 5:
raise forms.ValidationError('El password debe contener al menos 5 caracteres')
'''
Class
'''
# Los formularios en Django hay que declararlos en una clase
class LoginUserForm(forms.Form):
username = forms.CharField(max_length=20)
password = forms.CharField(max_length=20, widget=forms.PasswordInput())
# Constructor
def __init__(self, *args, **kwargs):
super(LoginUserForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update( {'id': 'username_login', 'class': 'input_login' } )
self.fields['password'].widget.attrs.update( {'id': 'password_login', 'class': 'input_login' } )
# Formulario basado en Modelos
class CreateUserForm(forms.ModelForm):
username = forms.CharField(max_length=20, error_messages = ERROR_MESSAGE_USER)
password = forms.CharField(max_length=20, widget=forms.PasswordInput(), error_messages = ERROR_MESSAGE_PASSWORD)
email = forms.CharField(error_messages = ERROR_MESSAGE_EMAIL)
def __init__(self, *args, **kwargs):
super(CreateUserForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update( {'id': 'username_create' } )
self.fields['password'].widget.attrs.update( {'id': 'password_create' } )
self.fields['email'].widget.attrs.update( {'id': 'email_create' } )
# Validación de email
def clean_email(self):
email = self.cleaned_data.get('email')
if User.objects.filter(email = email).count():
raise forms.ValidationError('El email ya existe')
return email
class Meta:
model = User
fields = ('username', 'password', 'email')
class EditUserForm(forms.ModelForm):
username = forms.CharField(max_length=20, error_messages = ERROR_MESSAGE_USER)
email = forms.CharField(error_messages = ERROR_MESSAGE_EMAIL)
first_name = forms.CharField(label = 'Nombre(s)',required=False)
last_name = forms.CharField(label='Apellidos',required=False)
class Meta:
model = User
fields = ('username', 'email', 'first_name', 'last_name')
# Validación de email
def clean_email(self):
email = self.cleaned_data.get('email')
if User.objects.filter(email = email).exclude(pk=self.instance.id).count():
raise forms.ValidationError('El email ya existe')
return email
class EditPasswordForm(forms.Form):
password = forms.CharField(max_length=20, widget=forms.PasswordInput())
new_password = forms.CharField(max_length=20, widget=forms.PasswordInput(), validators=[must_be_gt])
repeat_password = forms.CharField(max_length=20, widget=forms.PasswordInput(), validators=[must_be_gt])
def clean(self):
clean_data = super(EditPasswordForm,self).clean()
password1 = clean_data.get('new_password')
password2 = clean_data.get('repeat_password')
if password1 != password2:
raise forms.ValidationError('La contraseña no coincide')
# def clean_new_password(self):
# value_password = self.cleaned_data['new_password']
# if len(value_password) < 5:
# raise forms.ValidationError('El password debe contener al menos 5 caracteres')
# value_password
class EditClientForm(forms.ModelForm):
job = forms.CharField(label = "Trabajo actual", required=False)
bio = forms.CharField(label = "Biografía", widget=forms.Textarea, required=False)
class Meta:
model = Client
#fields = ('bio','job')
exclude = ['user']
class EditClientSocial(forms.ModelForm):
class Meta:
model = SocialNetwork
exclude = ['user'] | [
"eduardo.alcantara.rios@outlook.com"
] | eduardo.alcantara.rios@outlook.com |
766788054d2e78f8f56b6d0304c365f753c5d19a | 1dc93ee1beead5930e8c857fc9edede92854e589 | /pa6/ga.py | 1864dec99ac615ee02d2bfc75eebc76d890b3d3b | [] | no_license | hanette/CMPM-146 | fd78674efdb59207fe46fdef83cf76f7cb2a13c2 | c972c0e330d3776c46c428ceb665cfebdccddcfe | refs/heads/main | 2022-12-21T01:21:26.438757 | 2020-10-07T05:53:34 | 2020-10-07T05:53:34 | 301,936,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,331 | py | import copy
import heapq
import metrics
import multiprocessing.pool as mpool
import os
import random
import shutil
import time
import math
width = 200
height = 16
options = [
"-", # an empty space
"X", # a solid wall
"?", # a question mark block with a coin
"M", # a question mark block with a mushroom
"B", # a breakable block
"o", # a coin
"|", # a pipe segment
"T", # a pipe top
"E", # an enemy
#"f", # a flag, do not generate
#"v", # a flagpole, do not generate
#"m" # mario's start position, do not generate
]
# The level as a grid of tiles
class Individual_Grid(object):
__slots__ = ["genome", "_fitness"]
def __init__(self, genome):
self.genome = copy.deepcopy(genome)
self._fitness = None
# Update this individual's estimate of its fitness.
# This can be expensive so we do it once and then cache the result.
def calculate_fitness(self):
measurements = metrics.metrics(self.to_level())
# Print out the possible measurements or look at the implementation of metrics.py for other keys:
# print(measurements.keys())
# Default fitness function: Just some arbitrary combination of a few criteria. Is it good? Who knows?
# STUDENT Modify this, and possibly add more metrics. You can replace this with whatever code you like.
coefficients = dict(
meaningfulJumpVariance=.5,
jumps=.5,
leniency=.5,
decorationPercentage=1,
solvability=5,
)
self._fitness = sum(map(lambda m: coefficients[m] * measurements[m],
coefficients))
return self
# Return the cached fitness value or calculate it as needed.
def fitness(self):
if self._fitness is None:
self.calculate_fitness()
return self._fitness
# Mutate a genome into a new genome. Note that this is a _genome_, not an individual!
# Mutate a genome into a new genome. Note that this is a _genome_, not an individual!
# Mutate a genome into a new genome. Note that this is a _genome_, not an individual!
# Mutate a genome into a new genome. Note that this is a _genome_, not an individual!
def mutate(self, genome):
# STUDENT implement a mutation operator, also consider not mutating this individual
# STUDENT also consider weighting the different tile types so it's not uniformly random
# STUDENT consider putting more constraints on this to prevent pipes in the air, etc
left = 1
right = width - 1
mutation_rate = 0.4
for y in reversed(range(height)):
hole = random.randint(2,3) # Gets 2 or 3
pipe = random.randint(1,3) # Gets 1-3
block = random.randint(3,4) # Get 3-4
hole_check = False
block_check = False
for x in range(left, right):
choice = random.random() # Gets a number between 0-1
if x >= width-3: # ignore last column
pass
elif y == height-1: # Floors or Holes ===
if hole_check:
if hole > 0:
genome[y][x] = "-"
hole -= 1
else:
hole_check = False
hole = random.randint(2,3) #Gets 2 or 3
elif choice < 0.05 and random.random() < mutation_rate: # Hole time: 5%
genome[y][x] = "-"
hole -= 1
hole_check = True
else:
if genome[y+1][x] == "X" and random.random() < mutation_rate: # If it's above ground title
if genome [y+1][x-1] == "X": # If below on the left is ground
if choice < 0.3: # Hill: 30%
genome[y][x] = "X"
else:
if choice < .35: # Pipe Starter: 5%
if genome[y+1][x+1] == "X": # Make sure pipe is on both floors
genome[y][x] = "|"
elif choice < .45: # Enemy Spawner: 10%
genome[y][x] = "E"
elif choice < .55: # Ground Spawner: 10%
genome[y][x] = "X"
else:
if choice < .05: # Pipe Starter: 5%
genome[y][x] = "|"
elif choice < .25: # Enemy Spawner: 20%
genome[y][x] = "E"
elif choice > .8: # Ground Spawner: 20%
genome[y][x] = "X"
elif genome[y+1][x] == "|": # If below is pipe
if pipe > 0:
pipe -= 1
if choice < 0.5: # Close/Continue Pipe: 50%
genome[y][x] = "|"
else:
genome[y][x] = "T"
pipe = random.randint(1,3) #Gets 1-3
else:
genome[y][x] = "T"
pipe = random.randint(1,3) #Gets 1-3
elif y <= height-5 and y >= height-9 and genome[y+1][x] == "-" and genome[y+2][x] == "-" and genome[y+3][x] == "-": # If 3 below is air
if genome[y-1][x-1] == "-" and genome[y+1][x-1] == "-" and genome[y+2][x-1] == "-" and genome[y+3][x-1] == "-": # If 3 left-below is air
if block_check:
if block > 0:
if choice < .33:
genome[y][x] = "?"
if choice < .46:
genome[y][x] = "M"
else:
genome[y][x] = "B"
block -= 1
else:
block_check = False
block = random.randint(3,4) # Gets 3 or 4
elif choice < 0.03 and random.random() < mutation_rate: # Hole time: 2%
if choice < .01:
genome[y][x] = "M"
if choice < .02:
genome[y][x] = "?"
else:
genome[y][x] = "B"
block_check = True
else:
if choice < .05 and random.random() < mutation_rate:
genome[y][x] = "o"
return genome
# Create zero or more children from self and other
def generate_children(self, other):
new_genome = copy.deepcopy(self.genome)
new_genome2 = copy.deepcopy(self.genome)
left = 1
right = width - 1
# Leaving first and last columns alone...
# do crossover with other
# multi point crossover - 3 points
# list containing all the points
points = []
# first point
point1 = random.randint(left, right)
points.append(point1)
# second point
point2 = random.randint(left, right)
points.append(point2)
# makes sure we don't grab the same point
while point1 == point2:
points.remove(point2)
point2 = random.randint(left, right)
points.append(point2)
# third point
point3 = random.randint(left, right)
points.append(point3)
# makes sure we don't grab the same point
while point1 == point3 or point2 == point3:
points.remove(point3)
point3 = random.randint(left, right)
points.append(point3)
points.sort()
for y in range(height):
for x in range(left, right):
# takes self because genome is copied from self
# from left to point1, new_genome stays the same and new_genome2 takes other
if x in range(left, points[0]):
new_genome2[y][x] = other.genome[y][x]
# from point1 to point2, new_genome takes other and new_genome2 stays the same
elif x in range(points[0], points[1]):
new_genome[y][x] = other.genome[y][x]
# from point2 to point3, new_genome stays the same and new_genome2 takes other
elif x in range(points[1], points[2]):
new_genome2[y][x] = other.genome[y][x]
# from point3 to right, new_genome takes other and new_genome2 stays the same
elif x in range(points[2], right):
new_genome[y][x] = other.genome[y][x]
# in the end, new_genome should go self, other, self, other
# new_genome2 goes other, self, other, self
# STUDENT consider putting more constraints on this to prevent pipes in the air, etc
# do mutation; note we're returning a one-element tuple here
# figure out how to return more than one child
return (Individual_Grid(self.mutate(new_genome)))
# Turn the genome into a level string (easy for this genome)
def to_level(self):
return self.genome
# These both start with every floor tile filled with Xs
# STUDENT Feel free to change these
@classmethod
def empty_individual(cls):
g = [["-" for col in range(width)] for row in range(height)]
g[15][:] = ["X"] * width
g[14][0] = "m"
g[7][-1] = "v"
for col in range(8, 14):
g[col][-1] = "f"
for col in range(14, 16):
g[col][-1] = "X"
return cls(g)
@classmethod
def random_individual(cls):
# STUDENT consider putting more constraints on this to prevent pipes in the air, etc
# STUDENT also consider weighting the different tile types so it's not uniformly random
g = [random.choices(options, k=width) for row in range(height)]
g[15][:] = ["X"] * width
g[14][0] = "m"
g[7][-1] = "v"
g[8:14][-1] = ["f"] * 6
g[14:16][-1] = ["X", "X"]
return cls(g)
def offset_by_upto(val, variance, min=None, max=None):
val += random.normalvariate(0, variance**0.5)
if min is not None and val < min:
val = min
if max is not None and val > max:
val = max
return int(val)
def clip(lo, val, hi):
if val < lo:
return lo
if val > hi:
return hi
return val
# Inspired by https://www.researchgate.net/profile/Philippe_Pasquier/publication/220867545_Towards_a_Generic_Framework_for_Automated_Video_Game_Level_Creation/links/0912f510ac2bed57d1000000.pdf
class Individual_DE(object):
# Calculating the level isn't cheap either so we cache it too.
__slots__ = ["genome", "_fitness", "_level"]
# Genome is a heapq of design elements sorted by X, then type, then other parameters
def __init__(self, genome):
self.genome = list(genome)
heapq.heapify(self.genome)
self._fitness = None
self._level = None
# Calculate and cache fitness
def calculate_fitness(self):
measurements = metrics.metrics(self.to_level())
# Print out the possible measurements or look at the implementation of metrics.py for other keys:
# print(measurements.keys())
# Default fitness function: Just some arbitrary combination of a few criteria. Is it good? Who knows?
# STUDENT Modify this, and possibly add more metrics. You can replace this with whatever code you like.
coefficients = dict(
meaningfulJumpVariance=.5,
jumps=.5,
leniency=.5,
decorationPercentage=1,
solvability=5,
)
self._fitness = sum(map(lambda m: coefficients[m] * measurements[m],
coefficients))
return self
def fitness(self):
if self._fitness is None:
self.calculate_fitness()
return self._fitness
def mutate(self, new_genome):
# STUDENT How does this work? Explain it in your writeup.
# STUDENT consider putting more constraints on this, to prevent generating weird things
if random.random() < 0.1 and len(new_genome) > 0:
to_change = random.randint(0, len(new_genome) - 1)
de = new_genome[to_change]
new_de = de
x = de[0]
de_type = de[1]
choice = random.random()
if de_type == "4_block":
y = de[2]
breakable = de[3]
if choice < 0.33:
x = offset_by_upto(x, width / 8, min=1, max=width - 2)
elif choice < 0.66:
y = offset_by_upto(y, height / 2, min=0, max=height - 1)
else:
breakable = not de[3]
new_de = (x, de_type, y, breakable)
elif de_type == "5_qblock":
y = de[2]
has_powerup = de[3] # boolean
if choice < 0.33:
x = offset_by_upto(x, width / 8, min=1, max=width - 2)
elif choice < 0.66:
y = offset_by_upto(y, height / 2, min=0, max=height - 1)
else:
has_powerup = not de[3]
new_de = (x, de_type, y, has_powerup)
elif de_type == "3_coin":
y = de[2]
if choice < 0.5:
x = offset_by_upto(x, width / 8, min=1, max=width - 2)
else:
y = offset_by_upto(y, height / 2, min=0, max=height - 1)
new_de = (x, de_type, y)
elif de_type == "7_pipe":
h = de[2]
if choice < 0.25: # lower x change
x = offset_by_upto(x, width / 8, min=1, max=width - 2)
else:
h = random.randint(height-3, height-1) # 1-4 tall
new_de = (x, de_type, h)
elif de_type == "0_hole":
w = de[2]
if choice < 0.5:
x = offset_by_upto(x, width / 8, min=1, max=width - 2)
else:
w = offset_by_upto(w, 4, min=1, max=width - 2)
new_de = (x, de_type, w)
elif de_type == "6_stairs":
h = de[2]
dx = de[3] # -1 or 1
if choice < 0.33:
x = offset_by_upto(x, width / 8, min=1, max= 2)
elif choice < 0.66:
if dx == -1:
h = random.randint(h, height - 2) # 1-3 tall if reverse
else:
h = offset_by_upto(h, 8, min=1, max=height - 4)
else:
dx = -dx
new_de = (x, de_type, h, dx)
elif de_type == "1_platform":
w = de[2]
y = de[3]
madeof = de[4] # from "?", "X", "B"
if choice < 0.25:
x = offset_by_upto(x, width / 8, min=1, max=width - 2)
elif choice < 0.5:
w = offset_by_upto(w, 8, min=1, max=width - 2)
elif choice < 0.75:
y = offset_by_upto(y, height, min=0, max=height - 1)
else:
madeof = random.choice(["?", "X", "B"])
new_de = (x, de_type, w, y, madeof)
elif de_type == "2_enemy":
pass
if x < width-3:
new_genome.pop(to_change)
heapq.heappush(new_genome, new_de)
return new_genome
def generate_children(self, other):
# STUDENT How does this work? Explain it in your writeup.
pa = random.randint(0, len(self.genome) - 1)
pb = random.randint(0, len(other.genome) - 1)
a_part = self.genome[:pa] if len(self.genome) > 0 else []
b_part = other.genome[pb:] if len(other.genome) > 0 else []
ga = a_part + b_part
b_part = other.genome[:pb] if len(other.genome) > 0 else []
a_part = self.genome[pa:] if len(self.genome) > 0 else []
gb = b_part + a_part
# do mutation
return Individual_DE(self.mutate(ga))
# Apply the DEs to a base level.
def to_level(self):
if self._level is None:
base = Individual_Grid.empty_individual().to_level()
for de in sorted(self.genome, key=lambda de: (de[1], de[0], de)):
# de: x, type, ...
x = de[0]
de_type = de[1]
if de_type == "4_block":
y = de[2]
breakable = de[3]
base[y][x] = "B" if breakable else "X"
elif de_type == "5_qblock":
y = de[2]
has_powerup = de[3] # boolean
base[y][x] = "M" if has_powerup else "?"
elif de_type == "3_coin":
y = de[2]
base[y][x] = "o"
elif de_type == "7_pipe":
h = de[2]
base[height - h - 1][x] = "T"
for y in range(height - h, height):
base[y][x] = "|"
elif de_type == "0_hole":
w = de[2]
for x2 in range(w):
base[height - 1][clip(1, x + x2, width - 2)] = "-"
elif de_type == "6_stairs":
h = de[2]
dx = de[3] # -1 or 1
for x2 in range(1, h + 1):
for y in range(x2 if dx == 1 else h - x2):
base[clip(0, height - y - 1, height - 1)][clip(1, x + x2, width - 2)] = "X"
elif de_type == "1_platform":
w = de[2]
h = de[3]
madeof = de[4] # from "?", "X", "B"
for x2 in range(w):
base[clip(0, height - h - 1, height - 1)][clip(1, x + x2, width - 2)] = madeof
elif de_type == "2_enemy":
base[height - 2][x] = "E"
self._level = base
return self._level
@classmethod
def empty_individual(_cls):
# STUDENT Maybe enhance this
g = []
return Individual_DE(g)
@classmethod
def random_individual(_cls):
# STUDENT Maybe enhance this
elt_count = random.randint(8, 128)
g = [random.choice([
(random.randint(1, width - 2), "0_hole", random.randint(1, 8)),
(random.randint(1, width - 2), "1_platform", random.randint(1, 8), random.randint(0, height - 1), random.choice(["?", "X", "B"])),
(random.randint(1, width - 2), "2_enemy"),
(random.randint(1, width - 2), "3_coin", random.randint(0, height - 1)),
(random.randint(1, width - 2), "4_block", random.randint(0, height - 1), random.choice([True, False])),
(random.randint(1, width - 2), "5_qblock", random.randint(0, height - 1), random.choice([True, False])),
(random.randint(1, width - 2), "6_stairs", random.randint(1, height - 4), random.choice([-1, 1])),
(random.randint(1, width - 2), "7_pipe", random.randint(2, height - 4))
]) for i in range(elt_count)]
return Individual_DE(g)
# Individual = Individual_DE
Individual = Individual_Grid
def generate_successors(population):
# remove the lowest 2
results = population
order2 = []
for i in population:
order2.append((i,i.fitness()))
order2.sort(key=fit_sort)
results.remove(order2[0][0])
results.remove(order2[1][0])
if len(order2[len(order2)-1][0].genome) != 0 and len(order2[len(order2)-2][0].genome) != 0:
order2[len(order2)-1][0].generate_children(order2[len(order2)-2][0])
# STUDENT Design and implement this
# Hint: Call generate_children() on some individuals and fill up results.
order = []
rank_selection = []
# tournament selection ==============================
# takes x elements chosen at random and then compares them - the best is allowed to reproduce
size_of_pool = len(population) - 1
selected_parent = []
# runs tournament select twice, choosing two parents
while (len(selected_parent) != 2):
counter = 0
tournament_parent = []
# grabs 24 potential parents from the population
while counter < 24:
random_int = random.randint(0, size_of_pool)
if population[random_int] not in tournament_parent:
tournament_parent.append(population[random_int])
counter += 1
best_parent = None
# compares the 24 parents and returns the best one
for parent in tournament_parent:
# if there is no best - set the first one as the best
if best_parent == None:
best_parent = parent
# compares the current best to the current parent
if parent.fitness() > best_parent.fitness():
best_parent = parent
# adds the best one to selected_parent
if best_parent not in selected_parent:
selected_parent.append(best_parent)
# generates children from selected_parent and adds them to results
if len(selected_parent[0].genome) != 0 and len(selected_parent[1].genome) != 0:
results.append(selected_parent[0].generate_children(selected_parent[1]))
# RANK SELECTION ===================
# Ordering the population
for i in population:
if len(i.genome) != 0:
order.append((i,i.fitness()))
order.sort(key=fit_sort)
# List with rank selection
count = 1
for i in order:
counter = 0
while counter < count:
rank_selection.append(i[0])
counter += 1
count += 1
# Pick 2 from list
ran1 = rank_selection[random.randint(0, len(rank_selection)-1)]
ran2 = rank_selection[random.randint(0, len(rank_selection)-1)]
while ran2 == ran1:
ran2 = rank_selection[random.randint(0, len(rank_selection)-1)]
if len(ran1.genome) != 0 and len(ran2.genome) != 0:
results.append(ran1.generate_children(ran2))
# ==================================
return results
def fit_sort(i):
return i[1]
def ga():
# STUDENT Feel free to play with this parameter
pop_limit = 480
# Code to parallelize some computations
batches = os.cpu_count()
if pop_limit % batches != 0:
print("It's ideal if pop_limit divides evenly into " + str(batches) + " batches.")
batch_size = int(math.ceil(pop_limit / batches))
with mpool.Pool(processes=os.cpu_count()) as pool:
init_time = time.time()
# STUDENT (Optional) change population initialization
population = [Individual.random_individual() if random.random() < 0.9
else Individual.empty_individual()
for _g in range(pop_limit)]
# But leave this line alone; we have to reassign to population because we get a new population that has more cached stuff in it.
population = pool.map(Individual.calculate_fitness,
population,
batch_size)
init_done = time.time()
print("Created and calculated initial population statistics in:", init_done - init_time, "seconds")
generation = 0
start = time.time()
now = start
print("Use ctrl-c to terminate this loop manually.")
try:
while True:
now = time.time()
# Print out statistics
if generation > 0:
best = max(population, key=Individual.fitness)
print("Generation:", str(generation))
print("Max fitness:", str(best.fitness()))
print("Average generation time:", (now - start) / generation)
print("Net time:", now - start)
with open("levels/last.txt", 'w') as f:
for row in best.to_level():
f.write("".join(row) + "\n")
generation += 1
# STUDENT Determine stopping condition
stop_condition = False
if stop_condition:
break
# STUDENT Also consider using FI-2POP as in the Sorenson & Pasquier paper
gentime = time.time()
next_population = generate_successors(population)
gendone = time.time()
print("Generated successors in:", gendone - gentime, "seconds")
# Calculate fitness in batches in parallel
if Individual != None:
next_population = pool.map(Individual.calculate_fitness,
next_population,
batch_size)
popdone = time.time()
print("Calculated fitnesses in:", popdone - gendone, "seconds")
population = next_population
except KeyboardInterrupt:
pass
return population
if __name__ == "__main__":
final_gen = sorted(ga(), key=Individual.fitness, reverse=True)
best = final_gen[0]
print("Best fitness: " + str(best.fitness()))
now = time.strftime("%m_%d_%H_%M_%S")
# STUDENT You can change this if you want to blast out the whole generation, or ten random samples, or...
for k in range(0, 10):
with open("levels/" + now + "_" + str(k) + ".txt", 'w') as f:
for row in final_gen[k].to_level():
f.write("".join(row) + "\n")
| [
"noreply@github.com"
] | noreply@github.com |
df173b09e52bcebac847d81f36209822a42c1363 | ee84ead460e1af5711114ea07ad6cbe70a8107e2 | /project/project/urls.py | ef57ff8a25b379a2839b4b0e6cc5a12f1ea6e1dd | [] | no_license | TrellixVulnTeam/senproj_93I4 | a576d08572561e980ec96091a39c21b33eb95ad3 | 4c1102461d8d2c67abfdc7a80ba3eb6ff9048d8c | refs/heads/master | 2023-03-16T14:35:24.657546 | 2018-05-01T19:31:46 | 2018-05-01T19:31:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^class/', include('class.urls')),
url(r'^', include('class.urls')),
]
| [
"hk@LAPTOP-ERRNQLE4.localdomain"
] | hk@LAPTOP-ERRNQLE4.localdomain |
d86001addfb479c6d7b2af5d233c1d029b88d3ee | 78f33b74e5794362b3beed16887d27afc7f37b3d | /app/auth/views.py | 74f54ded9071e47ad791f47939366c6bea6671a1 | [
"MIT"
] | permissive | JamesMusyoka/minutepitch | 1e68bac76ccfa161efb3b7ff6a856e54baf103ac | 60b0a47a97cfe7a3e7d48917243cca48cbd63f05 | refs/heads/master | 2020-04-21T23:19:20.080383 | 2018-09-14T18:58:10 | 2018-09-14T18:58:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,472 | py | from flask import render_template,request,flash,redirect,url_for
from flask_login import current_user, login_user, logout_user, login_required
from app.models import User
from werkzeug.urls import url_parse
from . import auth
from .forms import LoginForm,EditProfileForm, RegistrationForm,ResetPasswordRequestForm
from app import db
from datetime import datetime
from app.email import *
from ..email import mail_message
##################Registration route section#############
@auth.route('/register', methods = ['GET','POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Registration successfull!')
mail_message("Welcome to watchlist","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
return render_template('auth/register.html', title='Register', form=form)
'''
first I ensure that the user that invokes this route is
not logged in. Logic inside if validate_on_submit() creates a
new user with the username, email and password provide, writes it to the db
and then redirects to the login prompt so that the user can ogin
'''
##################End Registration route section#############
############Log in section##############
'''
The user log in is facilitated by Flask-Login's login_user() function, the value
of the next query string argument is obtained. Flask provides a request variable that
contains all the info that the client sent with the request.
request.args attribute exposes the contents of the query string in a friendly dictionary format
'''
@auth.route('/login', methods=['GET','POST'])
# @login_required
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('invalid username or password')
return redirect(url_for('auth.login'))
login_user(user, remember = form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc!= '':
next_page = url_for('main.index')
return redirect(next_page)
return render_template('auth/login.html', title='Sig In', form = form)
'''
First step is to load the user from the db,then query
the db with the log in username to find the user.
the result of filetr_by is a query that only
includes the objects that have a matching username
since there is only going to be one or zero user results,
I use first() which will return the user object if it exists,
or None if it does not.
first() method is another commonly used way to
execute a query, when you only need to have
one result
Also I call the check_password() method to determine if the password entered in the form matches the hash or not
'''
############End Log in section##############
@auth.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index'))
'''
offers users the option to log out of the application
'''
###############Log out route end##############
###############User_profile route end##############
@auth.route('/user/<username>')
@login_required
def user_profile(username):
user = User.query.filter_by(username=username).first_or_404()
posts = [
{
'author':user, 'body':'test Post#1'
}
]
return render_template('profile/user_profile.html',posts=posts, user=user)
'''
i have used a variant of first() called fist_or_404()
which works exactly like first() when there are results, and in case there
are no results it auto sends a 404 error back
'''
@auth.route('/edit_profile', methods=['GET','POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash('Your changes have been saved')
return redirect(url_for('main.index'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.bio
return render_template('profile/edit_profile.html', title='Edit Profile', form=form)
'''
If validate_on_submit() returns True the data is copied from the form into the user object and then writen the object to the database.
'''
###############End user profile route##############
# @auth.route('/reset_password_request', methods=['GET', 'POST'])
# def reset_password_request():
# if current_user.is_authenticated:
# return redirect(url_for('main.index'))
# form = ResetPasswordRequestForm()
# if form.validate_on_submit():
# user = User.query.filter_by(email=form.email.data).first()
# if user:
# send_password_reset_email(user)
# flash('Check your email for the instructions to reset your password')
# return redirect(url_for('auth.login'))
# return render_template('email/reset_password_request.html',title='Reset Password', form=form)
# '''
# first, i make sure the user is not logged in,when the form is submitted and valid, i look up the user email provided in the form
# ,if the user is found, a password reset email will be sent using
# send_password_reset_email()
# '''
#########Rsetting password######
# @auth.route('/reset_password/<token>', methods=['GET', 'POST'])
# def reset_password(token):
# if current_user.is_authenticated:
# return redirect(url_for('main.index'))
# user = User.verify_reset_password_token(token)
# if not user:
# return redirect(url_for('main.index'))
# form = ResetPasswordForm()
# if form.validate_on_submit():
# user.set_password(form.password.data)
# db.session.commit()
# flash('Your password has been reset.')
# return redirect(url_for('auth.login'))
# return render_template('email/reset_paword.html', form=form) | [
"kibetedgar@gmail.com"
] | kibetedgar@gmail.com |
9a5f5a9b2ceb8570c4c33c2875e302fa1ed9964d | f9a45d303cbf490aa3a0d6f846ef230280104184 | /AeModel.py | 2200f4bbeae7a84c1a5d0d2af093ece83f859de9 | [
"MIT"
] | permissive | LilMarc0/DL_Colorisation | fc23aaaa0ce7922a80b6631aad448c85a1f21f66 | c8ef878020cc2654d3c023e4542ff64e90c8b498 | refs/heads/master | 2022-12-09T18:13:40.732782 | 2020-09-17T14:23:12 | 2020-09-17T14:23:12 | 296,348,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,158 | py | import tensorflow as tf
import tensorflow.keras.layers as layers
from tensorflow.keras.models import load_model
import os
# import SGD and Adam optimizers
from keras.optimizers import Adam
from DataSet import *
import numpy as np
import cv2 as cv
from keras.callbacks import ReduceLROnPlateau
from ResNext import *
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error
class AeModel:
def __init__(self, data_set: DataSet):
self.data_set = data_set
self.num_epochs = 50
self.batch_size = 2
self.learning_rate = 10 ** -5
self.model = None
self.checkpoint_dir = './checkpoints/'
self.X_val = []
self.Y_val = []
def define_my_model(self):
self.model = resAE(mc)
def compile_the_model(self):
# compilam modelul
# defineste optimizatorul
optimizer = Adam(lr=10**-6)
# apeleaza functia 'compile' cu parametrii corespunzatori.
self.model.compile(optimizer=optimizer, loss='mse')
def train_the_model(self):
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=2, min_lr=10**-8)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
# definim callback-ul pentru checkpoint
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=self.checkpoint_dir + '/model.{epoch:05d}.hdf5')
train, self.X_val, y_train, self.Y_val = train_test_split(self.data_set.input_training_images,
self.data_set.ground_truth_training_images,
test_size=0.2,
random_state=1)
self.model.fit(train,
y_train,
epochs=self.num_epochs,
callbacks=[checkpoint_callback, reduce_lr],
batch_size=self.batch_size,
validation_data=(self.X_val, self.Y_val))
def evaluate_the_model(self):
best_epoch = self.num_epochs # puteti incerca si cu alta epoca de exemplu cu prima epoca,
best_model = load_model(os.path.join(self.checkpoint_dir, 'model.%05d.hdf5') % best_epoch)
for i in range(len(self.data_set.input_test_images)):
# prezicem canalele ab pe baza input_test_images[i]
pred_ab = best_model.predict(np.expand_dims(self.data_set.input_test_images[i], axis=0))[0]
a, b = cv.split(pred_ab)
# reconstruim reprezentarea Lab
Lab_image = cv.merge((self.data_set.input_test_images[i], a*128, b*128))
# convertim din Lab in BGR
pred_image = cv.cvtColor(Lab_image, cv.COLOR_LAB2BGR) * 255
# convertim imaginea de input din L in 'grayscale'
input_image = np.uint8(self.data_set.input_test_images[i] / 100 * 255)
# imaginea ground-truth in format bgr
gt_image = np.uint8(self.data_set.ground_truth_bgr_test_images[i])
# pred_image este imaginea prezisa in format BGR.
concat_images = self.concat_images(input_image, pred_image, gt_image)
cv.imwrite(os.path.join(self.data_set.dir_output_images, '%d.png' % i), concat_images)
def concat_images(self, input_image, pred, ground_truth):
"""
:param input_image: imaginea grayscale (canalul L din reprezentarea Lab).
:param pred: imaginea prezisa.
:param ground_truth: imaginea ground-truth.
:return: concatenarea imaginilor.
"""
h, w, _ = input_image.shape
space_btw_images = int(0.2 * h)
image = np.ones((h, w * 3 + 2 * space_btw_images, 3)) * 255
# add input_image
image[:, :w] = input_image
# add predicted
offset = w + space_btw_images
image[:, offset: offset + w] = pred
# add ground truth
offset = 2 * (w + space_btw_images)
image[:, offset: offset + w] = ground_truth
return np.uint8(image) | [
"Trypodificaton@gmail.com"
] | Trypodificaton@gmail.com |
ebe07b6c084e6824573cbad59b09aeeccd77287e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03130/s063741533.py | 8e8ac571e89e7cbb1f076333c2fcb83f461a3bff | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | def examA():
N, K = LI()
if (N+1)//2>=K:
print("YES")
else:
print("NO")
return
def examB():
d = [0]*4
for _ in range(3):
a, b = LI()
a -=1; b -=1
d[a] +=1
d[b] +=1
for i in d:
if i>=3 or i==0:
print("NO")
return
print("YES")
return
def examC():
ans = 0
print(ans)
return
def examD():
ans = 0
print(ans)
return
def examE():
ans = 0
print(ans)
return
def examF():
ans = 0
print(ans)
return
import sys,copy,bisect,itertools,heapq,math
from heapq import heappop,heappush,heapify
from collections import Counter,defaultdict,deque
def I(): return int(sys.stdin.readline())
def LI(): return list(map(int,sys.stdin.readline().split()))
def LFI(): return list(map(float,sys.stdin.readline().split()))
def LSI(): return list(map(str,sys.stdin.readline().split()))
def LS(): return sys.stdin.readline().split()
def SI(): return sys.stdin.readline().strip()
global mod,mod2,inf,alphabet
mod = 10**9 + 7
mod2 = 998244353
inf = 10**18
alphabet = [chr(ord('a') + i) for i in range(26)]
if __name__ == '__main__':
examB()
"""
""" | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2b9cbaa72278d4ee2b17c60d3fc5d1436d85591a | 5df21c11a43f0ecf4347ff880742a2135ad48e53 | /scripts/dataslicer/Slicer.py | 14787afcb2d2af8c65daec11730a9d682535815d | [] | no_license | DrGilion/DataVisualization | 2c372b7b213e35a0b06b23d2158b0393c91fadba | 0d0ce8c9956f89585e22d225af1247f404a9f857 | refs/heads/master | 2020-03-11T02:44:32.150980 | 2018-06-11T11:16:36 | 2018-06-11T11:16:36 | 129,727,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,331 | py | import json, collections, csv, os
class Slicer:
def __init__(self, dump_file, output_dir='data/'):
self.dump_file = dump_file
self.output_dir = output_dir
self.data = None
def init(self):
if not self.data:
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
with open(self.dump_file, 'r') as f:
self.data = json.load(f)
def get_valid_categories(self):
with open('ressources/valid_categories') as f:
content = f.read().splitlines()
return [cat.strip().lower() for cat in content]
def get_ccs(self):
with open('ressources/cc.txt') as f:
content = f.read().splitlines()
ccs = {}
ccl = [c.split(' ', 1) for c in content]
for cco in ccl:
ccs[cco[0]] = cco[1]
if cco[1].count(",") == 1:
parts = cco[1].split(',', 1)
complete = "%(pre)s (%(post)s %(pre)s)" % {
"pre" : parts[0].strip(),
"post" : parts[1].strip()
}
ccs[cco[0]] = complete
return ccs
def set_json_file(self, name, data):
file = '%s%s.json' % (self.output_dir, name)
with open(file, 'w') as f:
f.write(json.dumps(data))
def gen_cats_file(self):
cats = {}
valid_categories = self.get_valid_categories()
with open('ressources/products.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['ProductGroup'].lower() in valid_categories:
cats[row['ProductGroup']] = row['ProductGroupDescription'].strip()
self.set_json_file("categories", cats)
def gen_cc_file(self):
self.init()
ccs = self.get_ccs()
result = {}
for c in self.data:
cc = c["cc"]
all_products = set(c["data"]["import"].keys())
all_products.update(c["data"]["export"].keys())
result[cc] = {
"full_name": ccs[cc],
"years":{}
}
if len(all_products) == 0:
continue
for product in all_products:
imports = c["data"]["import"].get(product, {})
exports = c["data"]["export"].get(product, {})
all_years = set(imports.keys())
all_years.update(exports.keys())
result[cc]["years"][product] = sorted(list(all_years))
self.set_json_file("countries", result)
def slice_country_products_partners(self):
self.init()
valid_ccs = self.get_ccs().keys()
for c in self.data:
cc = c["cc"]
all_products = set(c["data"]["import"].keys())
all_products.update(c["data"]["export"].keys())
for product in all_products:
imports = c["data"]["import"].get(product, {})
exports = c["data"]["export"].get(product, {})
all_years = set(imports.keys())
all_years.update(exports.keys())
for year in all_years:
y_imports = imports.get(year, {})
y_exports = exports.get(year, {})
file = "partners_%(country)s_%(year)s_%(product)s" % {
"country":cc.lower(),
"year":year,
"product":product.lower()
}
data = {
"imports": [
{"cc":cc, "amount":y_imports[cc]}
for cc in y_imports.keys()
if cc in valid_ccs
],
"exports": [
{"cc": cc, "amount": y_exports[cc]}
for cc in y_exports.keys()
if cc in valid_ccs
]
}
self.set_json_file(file, data)
def slice_country_aggregates(self):
self.init()
valid_categories = self.get_valid_categories()
for c in self.data:
years = collections.defaultdict(lambda: {"import": {}, "export": {}})
cc = c["cc"]
for type in c["data"].keys():
y_products = collections.defaultdict(lambda : {})
for product in c["data"][type].keys():
#if product.lower() == "total":
# continue
if product.lower() not in valid_categories:
continue
for year in c["data"][type][product].keys():
stats = c["data"][type][product][year]
total_usd = sum([stats[cc] for cc in stats.keys() if cc.lower() != "wld"])
y_products[year][product] = total_usd
# relative werte
for year in y_products.keys():
total_usd = sum([y_products[year][k] for k in y_products[year].keys() if k.lower() != "total"])
for prod in y_products[year].keys():
v = y_products[year][prod]
y_products[year][prod] = (v/total_usd)*100
years[year][type] = y_products[year]
# in ausgabeformat wandeln
op_data = {
"years": []
}
for year in years.keys():
op_data["years"].append({
"year": year,
"imports":[
{"id":product_id, "percent": round(percent,3)}
for product_id, percent in years[year]["import"].items()
],
"exports":[
{"id": product_id, "percent": round(percent,3)}
for product_id, percent in years[year]["export"].items()
]
})
self.set_json_file("aggregate_%s" % cc.lower(), op_data)
def generate_data(self):
self.slice_country_products_partners()
self.slice_country_aggregates()
self.gen_cc_file()
self.gen_cats_file()
| [
"philip.matesanz@driggle.com"
] | philip.matesanz@driggle.com |
ced2d21a6a7709401c7081f172dd9ad5055230a8 | aa18020074665dd2b70e2e09df49eed9d453cfe2 | /server.py | 4ba46b7886c01b5d3d191c4e6d5b957cfe057fe5 | [] | no_license | alfredgg/personal-webpage | 95f1d928f41437a81f61155602efe9bfee77d53d | c6ebc530684478d25b7feed368d71d14c696b3da | refs/heads/master | 2020-12-29T02:21:44.940667 | 2017-03-20T11:48:02 | 2017-03-20T11:48:02 | 39,331,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from webpage import app
from flask_debugtoolbar import DebugToolbarExtension
if __name__ == '__main__':
app.debug = True
app.config['SECRET_KEY'] = 'dt'
# DebugToolbarExtension(app)
app.run()
| [
"alfredgg@yahoo.es"
] | alfredgg@yahoo.es |
b836ab3184c86a5580190a268a1f5b5241677048 | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/fusion/tests/wpst_crm/feature_tests/TBIRD/F110_Port_Monitoring/port_monitor_support_module.py | a6deffdae867986fcbebc06b5c2da63aca16ddb9 | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | '''
This module is used to include some helper function for Port Monitoring
'''
def set_networkuri_lig(data_variable, get_output):
'''
Build the network URI's from the network Name and form the
LIG body
'''
temp = data_variable
for i in range(len(temp['uplinkSets'])):
for j in range(len(temp['uplinkSets'][i]['networkUris'])):
for x in get_output['members']:
if temp['uplinkSets'][i]['networkUris'][j] == x['name']:
temp['uplinkSets'][i]['networkUris'][j] = x['uri']
return temp | [
"akul@SAC0MKUVCQ.asiapacific.hpqcorp.net"
] | akul@SAC0MKUVCQ.asiapacific.hpqcorp.net |
a38b2d22735f142d5aef497ce84ffd8739a0983a | b887177e3adb181fd1bc3b83c75814e9e3e4f535 | /env/bin/qr | 920b4680a1c4625ff8e74475601cd3bd9fcab659 | [] | no_license | vinitparak/QRCode_userdata | 748a0a02368a2e827a05ec6efa2a33d76c60b954 | 340bc28519cb496656340094160a2dcfcb3469e2 | refs/heads/master | 2020-09-08T17:48:24.985555 | 2019-11-12T11:19:37 | 2019-11-12T11:19:37 | 221,200,103 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | #!/home/vinit/Documents/qrcode/env/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'qrcode==5.1','console_scripts','qr'
__requires__ = 'qrcode==5.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('qrcode==5.1', 'console_scripts', 'qr')()
)
| [
"vinitparakh12@gmail.com"
] | vinitparakh12@gmail.com | |
ba6d0088df5d4e1c6183d23d78a84e55493c3225 | 79acb3dc0d4cbb8fb61c5b0955c596fdf3509570 | /make_random_metadata.py | b48553959a1093cb40f2d8f2c80adb366b40ca91 | [] | no_license | Atehortuajf/DCS-1020-Final-Project | 44053a0ecbe1079117ba399e7888f2f5e2156010 | 2a62a08228f7eb991bd2701fc728d6b5efd3c852 | refs/heads/main | 2023-02-01T03:07:56.397073 | 2020-12-20T22:47:01 | 2020-12-20T22:47:01 | 322,955,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | import csv, random
sourcefolder = '/Users/tunder/Dropbox/fiction/meta/scifimeta.csv'
allrows = list()
with open(sourcefolder, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
fieldnames = reader.fieldnames
for row in reader:
tagtoadd = random.choice(['teamred', 'teamblue'])
row['genretags'] = row['genretags'] + ' | ' + tagtoadd
allrows.append(row)
outfolder = '/Users/tunder/Dropbox/fiction/meta/scifimeta2.csv'
with open(outfolder, mode='w', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = fieldnames)
writer.writeheader()
for row in allrows:
writer.writerow(row)
| [
"67188088+Atehortuajf@users.noreply.github.com"
] | 67188088+Atehortuajf@users.noreply.github.com |
5b22e465d6bfea324c4799ed5375f66ac7654f7f | 4283c553bea803fc9ca7544ee43654c180e4bfe1 | /app.py | 3797b799b9c27726443620e95a099aa873cdf9db | [] | no_license | Sarellerchy/wuhan2020_coronavirus_NCP_dashboard | 8b0f84d1e556bce4b268c4cb2d74aa92cba3f756 | 8f4aad774349ca235c6b879a68d087f1f003791a | refs/heads/master | 2021-01-01T11:15:20.379390 | 2020-02-15T10:32:21 | 2020-02-15T10:32:21 | 239,254,628 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | from flask import Flask,render_template
from spider import data_total,news_data
import json
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template("index.html",confirmtotal = '{:,}'.format(data_total.gntotal),suspecttotal = '{:,}'.format(data_total.sustotal),
healtotal = '{:,}'.format(data_total.curetotal),deadtotal = '{:,}'.format(data_total.deathtotal),
news_01_time = news_data.news_list[0][3]+' '+news_data.news_list[0][2],news_01_title = news_data.news_list[0][0],
news_02_time = news_data.news_list[1][3]+' '+news_data.news_list[1][2],news_02_title = news_data.news_list[1][0],
news_03_time = news_data.news_list[2][3]+' '+news_data.news_list[2][2],news_03_title = news_data.news_list[2][0],
news_01_link = news_data.news_list[0][1],news_02_link = news_data.news_list[1][1],news_03_link = news_data.news_list[2][1],
line_y_json = json.loads(data_total.df_data_total_history.sort_values(by='date')[['date','confirm','dead','heal','suspect']].
to_json(orient = 'records', force_ascii = False)))
if __name__ == '__main__':
app.run()
| [
"mandybear319@163.com"
] | mandybear319@163.com |
145f60532ab8170abd0c6a4876d81608c2693f9c | fa1b4f613f0cd08d839510b906b4bd18293a0e46 | /model/locations.py | 532e35a690150e3a26ef39862afc6966f5694b2f | [] | no_license | JoseRoberts87/python-webapp-template | 041f9eb8771e4166909744d6510172381bc560b4 | a1e27cb9886899c3e90ef8191f621412640d4315 | refs/heads/master | 2020-03-27T05:02:32.319095 | 2018-08-24T12:45:30 | 2018-08-24T12:45:30 | 145,990,175 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | from app import db, ma
from common import utils
class Locations(db.Model):
__tablename__ = 'locations'
location_id = db.Column(db.String(45), primary_key=True)
name = db.Column(db.String(45))
type = db.Column(db.String(45))
level = db.Column(db.Integer)
status = db.Column(db.String(45))
class LocationsSchema(ma.Schema):
class Meta(object):
fields = utils.class_attributes(Locations)
LOCATION_SCHEMA = LocationsSchema()
LOCATIONS_SCHEMA = LocationsSchema(many=True)
| [
"robenjos@amazon.com"
] | robenjos@amazon.com |
1ea84b735f256c9f7ba20bcbe7400cfcd0602006 | 7a7f71b81b610744122b241bfd2ea6560fb9b64d | /sgd.py | 8c945ccfa418c3858e16e1d286ef0dfb8324561f | [] | no_license | rhhc/compress-modern-CNNs | cc408cee97816d30c32fedd0bf64ea7757e08c81 | 8ec767042419fdd92a185aae31a9150745ed97c6 | refs/heads/master | 2022-04-25T11:28:52.961941 | 2020-04-27T13:20:10 | 2020-04-27T13:20:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,098 | py | import torch
import torch.optim as optim
import math
class SGD(optim.SGD):
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for idx, p in enumerate(group['params']):
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
mask = torch.ne(p.data, 0).to(torch.float32)
d_p = d_p * mask
p.data.add_(-group['lr'], d_p)
return loss
class SGD_caffe(optim.SGD):
def step(self,closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for idx, p in enumerate(group['params']):
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
param_state['momentum_buffer'] = torch.zeros_like(p.data)
diff = group['lr'] * d_p + momentum * param_state['momentum_buffer']
p.data.add_(-diff)
param_state['momentum_buffer'] = diff
return loss
class CaffeSGD(torch.optim.SGD):
def __init__(self, *args, **kwargs):
super(CaffeSGD, self).__init__(*args, **kwargs)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
if d_p.dim() != 1:
d_p.add_(weight_decay, p.data)
if d_p.dim() == 1:
d_p.mul_( group['lr'] * 2 )
else:
d_p.mul_(group['lr'])
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
mask = torch.ne(p.data, 0).to(torch.float32)
d_p = d_p * mask
p.data.sub_(d_p)
return loss | [
"noreply@github.com"
] | noreply@github.com |
c1f7a9a1be4dab922be13ef7a1acc032b4a8ac3b | c53adaefd26bda80cd7ce22ea0cae877e364c2c5 | /lib/network.py | 52ca885512d164a52547c7b33578191591958ef4 | [
"MIT"
] | permissive | Morningstarpayments/electrum | a59ac886b23ef5abcc940a5d51ea1ebef78908b7 | a035bac8577d53a805dc111eb3ba89f48e96fe34 | refs/heads/master | 2021-07-06T08:59:44.926421 | 2017-10-03T00:21:10 | 2017-10-03T00:21:10 | 105,598,670 | 0 | 0 | null | 2017-10-03T00:19:17 | 2017-10-03T00:19:17 | null | UTF-8 | Python | false | false | 33,849 | py | # Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import Queue
import os
import errno
import sys
import random
import select
import traceback
from collections import defaultdict, deque
from threading import Lock
import socks
import socket
import json
import util
import bitcoin
from bitcoin import *
from interface import Connection, Interface
from blockchain import Blockchain
from version import ELECTRUM_VERSION, PROTOCOL_VERSION
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
#There is a schedule to move the default list to e-x (electrumx) by Jan 2018
#Schedule is as follows:
#move ~3/4 to e-x by 1.4.17
#then gradually switch remaining nodes to e-x nodes
DEFAULT_SERVERS = {
'192.241.172.143': DEFAULT_PORTS,
}
def set_testnet():
global DEFAULT_PORTS, DEFAULT_SERVERS
DEFAULT_PORTS = {'t':'51001', 's':'51002'}
DEFAULT_SERVERS = {
}
def set_nolnet():
global DEFAULT_PORTS, DEFAULT_SERVERS
DEFAULT_PORTS = {'t':'52001', 's':'52002'}
DEFAULT_SERVERS = {
}
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
try:
is_recent = cmp(util.normalize_version(version), util.normalize_version(PROTOCOL_VERSION)) >= 0
except Exception:
is_recent = False
if out and is_recent:
out['pruning'] = pruning_level
servers[host] = out
return servers
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if type(p) != dict:
return None
return ':'.join([p.get('mode'),p.get('host'), p.get('port'), p.get('user'), p.get('password')])
def deserialize_proxy(s):
if type(s) not in [str, unicode]:
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).split(':')
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if type(config) == type({}) else config
self.num_server = 8 if not self.config.get('oneserver') else 0
self.blockchain = Blockchain(self.config, self)
# A deque of interface header requests, processed left-to-right
self.bc_requests = deque()
# Server for addresses and transactions
self.default_server = self.config.get('server')
# Sanitize default server
try:
deserialize_server(self.default_server)
except:
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
self.heights = {}
self.merkle_roots = {}
self.utxo_roots = {}
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# subscriptions and requests
self.subscribed_addresses = set()
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.socket_queue = Queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r") as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w") as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.heights.get(self.default_server, 0)
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
self.queue_request('blockchain.relayfee', [])
for addr in self.subscribed_addresses:
self.queue_request('blockchain.address.subscribe', [addr])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return self.interfaces.keys()
def get_servers(self):
if self.irc_servers:
out = self.irc_servers.copy()
out.update(DEFAULT_SERVERS)
else:
out = DEFAULT_SERVERS
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._socket.getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in self.interfaces.values():
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = Queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self, suggestion = None):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
if suggestion and self.protocol == deserialize_server(suggestion)[2]:
self.switch_to_interface(suggestion)
else:
self.switch_to_random_interface()
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.fee_estimates[i] = fee
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
callbacks = []
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.address.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.address.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.heights.pop(server, None)
self.notify('interfaces')
def new_interface(self, server, socket):
self.add_recent_server(server)
self.interfaces[server] = interface = Interface(server, socket)
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
for interface in self.interfaces.values():
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
def request_chunk(self, interface, data, idx):
interface.print_error("requesting chunk %d" % idx)
self.queue_request('blockchain.block.get_chunk', [idx], interface)
data['chunk_idx'] = idx
data['req_time'] = time.time()
def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
if self.bc_requests:
req_if, data = self.bc_requests[0]
req_idx = data.get('chunk_idx')
# Ignore unsolicited chunks
if req_if == interface and req_idx == response['params'][0]:
idx = self.blockchain.connect_chunk(req_idx, response['result'])
# If not finished, get the next chunk
if idx < 0 or self.get_local_height() >= data['if_height']:
self.bc_requests.popleft()
self.notify('updated')
else:
self.request_chunk(interface, data, idx)
def request_header(self, interface, data, height):
interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.get_header', [height], interface)
data['header_height'] = height
data['req_time'] = time.time()
if not 'chain' in data:
data['chain'] = []
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
if self.blockchain.downloading_headers:
return
if self.bc_requests:
req_if, data = self.bc_requests[0]
req_height = data.get('header_height', -1)
# Ignore unsolicited headers
if req_if == interface and req_height == response['params'][0]:
next_height = self.blockchain.connect_header(data['chain'], response['result'])
# If not finished, get the next header
if next_height in [True, False]:
self.bc_requests.popleft()
if next_height:
self.switch_lagging_interface(interface.server)
self.notify('updated')
else:
interface.print_error("header didn't connect, dismissing interface")
interface.close()
else:
self.request_header(interface, data, next_height)
def bc_request_headers(self, interface, data):
'''Send a request for the next header, or a chunk of them,
if necessary.
'''
if self.blockchain.downloading_headers:
return False
local_height, if_height = self.get_local_height(), data['if_height']
if if_height <= local_height:
return False
elif if_height > local_height + 50:
self.request_chunk(interface, data, (local_height + 1) / 2016)
else:
self.request_header(interface, data, if_height)
return True
def handle_bc_requests(self):
'''Work through each interface that has notified us of a new header.
Send it requests if it is ahead of our blockchain object.
'''
while self.bc_requests:
interface, data = self.bc_requests.popleft()
# If the connection was lost move on
if not interface in self.interfaces.values():
continue
req_time = data.get('req_time')
if not req_time:
# No requests sent yet. This interface has a new height.
# Request headers if it is ahead of our blockchain
if not self.bc_request_headers(interface, data):
continue
elif time.time() - req_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
# Put updated request state back at head of deque
self.bc_requests.appendleft((interface, data))
break
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as (code, msg):
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def run(self):
self.blockchain.init()
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.handle_bc_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_header(self, i, header):
height = header.get('block_height')
if not height:
return
self.heights[i.server] = height
self.merkle_roots[i.server] = header.get('merkle_root')
self.utxo_roots[i.server] = header.get('utxo_root')
# Queue this interface's height for asynchronous catch-up
self.bc_requests.append((i, {'if_height': height}))
if i == self.interface:
self.switch_lagging_interface()
self.notify('updated')
def get_header(self, tx_height):
return self.blockchain.read_header(tx_height)
def get_local_height(self):
return self.blockchain.height()
def synchronous_get(self, request, timeout=30):
queue = Queue.Queue()
self.send([request], queue.put)
try:
r = queue.get(True, timeout)
except Queue.Empty:
raise BaseException('Server did not answer')
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
| [
"you@example.com"
] | you@example.com |
2b2602042f1ed0d95c722a129a06ec21856cab22 | cc90d98a64693ca4542c999b5d2241b60eb33aac | /Problem62-3.py | e12955f5179fe407cd38b8141c64e187aee5cac8 | [] | no_license | Nan-Do/eulerproject | 1f63b23a4d4e344c8525238b2333920e733b03c9 | d33033d6af10d1aca8f7db9bcf187ef8f6005040 | refs/heads/master | 2021-01-10T15:51:39.594159 | 2016-04-14T05:41:16 | 2016-04-14T05:41:16 | 48,170,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | from itertools import count, permutations
from math import ceil
def gen_cubes():
for n in count(start=1):
yield n ** 3
def is_cube(n):
v = ceil(pow(n, (1/3.)))
return (v ** 3) == n
def check_cube(number, limit):
n_str = str(number)
count = 0
repeated = set()
for n in set(permutations(n_str)):
if n[0] = '0' or n in repeated:
continue
repeated.add(n)
if is_cube(int(p_str)):
count += 1
if count == limit:
return True
return False
for n in gen_cubes():
if check_cube(n, 4):
print n
break
| [
"icemanf@gmail.com"
] | icemanf@gmail.com |
18122f8ba0ea425a2f59eac84a1b4f2b379a77d3 | 5204b7b60f1780e2af1bd785beed4145f8d38d83 | /python/Gohan/core/__init__.py | 123b3426cb27438160bffca3d7edf46567d62acd | [] | no_license | sdss/mangadesign | e5001c30db25e6efe9a439359e0e67fd9b5266e4 | 6852432aeb682b19d46eff22f8cf57bbac272b7e | refs/heads/master | 2021-06-08T16:42:33.819938 | 2020-08-24T14:58:13 | 2020-08-24T14:58:13 | 89,097,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | from .configuration import get_config
| [
"gallegoj@uw.edu"
] | gallegoj@uw.edu |
ef7f5dea2b7f0cf33e446cecaaa9707330e93d6b | e3365cc72f7bfbc7d95066a1b84e332544f41803 | /Http_network_crawling.py | 4c50279a5ae078c3d3a4119a86b0fd11b2333834 | [] | no_license | webshell520/My_Python_Tools | b419a11bff15f965a6b99b165ee5bbdce0c18e40 | 1a04a6f1e4181f2cc588c8745b69c09620fd24c3 | refs/heads/master | 2020-03-14T06:36:12.842354 | 2017-10-20T09:04:01 | 2017-10-20T09:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,582 | py | #!/usr/bin/env python
#-*-coding=utf-8-*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import re
import sys
import Queue
import threading
import argparse
import requests
from IPy import IP
printLock = threading.Semaphore(1) #lock Screen print
TimeOut = 5 #request timeout
#User-Agent
header = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36','Connection':'close'}
class crawl(object):
def __init__(self,ip,filename,port,threadnum,writename='_result.txt'):
self.queue=Queue.Queue()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',
'Accept': '*/*',
'Referer': 'http://www.google.com',
'Cookie': 'whoami=21232f297a57a5a743894a0e4a801fc3'
}
self.ips=ip
self.ports=port
self.threadnum=threadnum
self.writename=writename
self.filename=filename
self.print_lock=threading.Lock()
def _request(self,url):
try:
return requests.get(url, timeout=10,headers=self.headers)
except requests.exceptions.ConnectTimeout:
return False
except requests.exceptions.ConnectionError:
return False
except requests.exceptions.Timeout:
return False
def load_queue(self):
if self.ips !='':
self.ips= list(IP(self.ips))
for ip in self.ips:
for port in self.ports:
self.queue.put('http://'+str(ip)+':'+str(port))
if self.filename !='':
with open(self.filename) as file:
domains=file.readlines()
for domain in domains:
for port in self.ports:
domain=domain.strip()
if 'http' not in domain:
self.queue.put('http://'+domain+':'+str(port))
else:
self.queue.put(domain+ ':' + str(port))
def _write_file(self,content):
with open(self.writename,'a+') as file:
file.write(content+'\n')
def _message(self,header,_text):
if header:
try:
Server = header['Server']
except Exception:
Server = 'Unknow'
try:
Powered = header['X-Powered-By']
except:
Powered = 'Unknow'
print Server, Powered
if _text:
title=re.search('<title>(.*?)</title>',_text)
if title:
print title.group(1)[:30]
def scan(self):
while True:
if self.queue.empty():
break
target=self.queue.get()
start=True
with self.print_lock:
#print target
pass
html=self._request(target)
try:#如果返回bool就不进行扫描了
html.status_code
except TypeError:
start=False
except AttributeError:
start=False
with self.print_lock:
if start:
try:
banner=html.headers['server']
except KeyError:
banner='Unknow'
try:
Powered=html.headers['X-Powered-By']
except KeyError:
Powered='Unknow'
title=re.search(r'<title>(.*)</title>',html.text)
if title:
title= title.group(1)
else:
title='None'
_write=u'【'+target+u'】\t【'+Powered+u'】\t【'+banner+u'】\t【'+title+u'】'
self._write_file(_write)
message= "|%-26s|%-8s|%-10s|%-20s|" % (target[:26], Powered[:8], banner[:10], title[:20])
print message
print '+'+'+'.rjust(27,'-')+'+'.rjust(10,'-')+'+'.rjust(11,'-')+'+'.rjust(21,'-')
def run(self):
self.load_queue()
print '+'+'URL'.center(26,'-')+'+'+'Powered'.center(10,'-')+'+'+'BANNER'.center(11,'-')+'+'+'TITLE'.center(20,'-')+'+'
threads=[]
for i in range(int(self.threadnum)):
t=threading.Thread(target=self.scan)
threads.append(t)
t.start()
for i in threads:
i.join()
parse = argparse.ArgumentParser()
parse.add_argument("--ip", dest="ip",default='',metavar='\t --ip 123.45.25.0/24')
parse.add_argument("--file", dest="filename", default='',metavar='\t--file scan.txt')
parse.add_argument("--ports", dest="ports", default='80',metavar='\t--ports 80,8080(default 80)')
parse.add_argument("--threadnum", dest="threadnum", default=30, type=int,metavar='\t --threadnum 30 (default 30)')
parse.add_argument("--save", dest="save",default='scan_result.txt',metavar='\t --save res.txt')
args = parse.parse_args()
def main(args):
ip=args.ip
file=args.filename
port=args.ports.split(',')
threadnum=args.threadnum
save=args.save
r = crawl(ip=ip, filename=file, port=port,threadnum=threadnum,writename=save)
r.run()
if __name__ == "__main__":
if len(sys.argv)>1:
main(args)
else:
parse.print_help()
| [
"noreply@github.com"
] | noreply@github.com |
175cd537ba734aea16b54646d227d7f043eae53f | 3027a838581e2b0778bd6ae40f9a6c72017b3b0d | /loss.py | 84c014beac4352f30db99c0f34f0a9b4f0f3262b | [] | no_license | arthur-qiu/robust | 2617adf3be8ea24592990e66b35123d02b0db045 | 3f40b45a740a1d3f2ba81a18e2cb510fe613d616 | refs/heads/master | 2020-12-04T12:08:52.665675 | 2020-02-26T10:37:34 | 2020-02-26T10:37:34 | 231,758,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,164 | py | import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
def Entropy(input_):
bs = input_.size(0)
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
def grl_hook(coeff):
def fun1(grad):
return -coeff*grad.clone()
return fun1
def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None):
softmax_output = input_list[1].detach()
feature = input_list[0]
if random_layer is None:
op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1))
ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1)))
else:
random_out = random_layer.forward([feature, softmax_output])
ad_out = ad_net(random_out.view(-1, random_out.size(1)))
batch_size = softmax_output.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
# if ad_out.shape[0] != 128 or dc_target.shape[0] != 128:
# print(ad_out.shape)
# print(dc_target.shape)
# print(softmax_output.shape)
# print(feature.shape)
if entropy is not None:
entropy.register_hook(grl_hook(coeff))
entropy = 1.0+torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[feature.size(0)//2:] = 0
source_weight = entropy*source_mask
target_mask = torch.ones_like(entropy)
target_mask[0:feature.size(0)//2] = 0
target_weight = entropy*target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduction='none')(ad_out, dc_target)) / torch.sum(weight).detach().item()
else:
return nn.BCELoss()(ad_out, dc_target)
def DANN(features, ad_net):
ad_out = ad_net(features)
batch_size = ad_out.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
return nn.BCELoss()(ad_out, dc_target)
| [
"Arthur"
] | Arthur |
8ecc7ec6faf9146994bb71be5fc8aaf8d245c4bc | 0b7309cc322def6bac3abb76ec1db25a0a7bee98 | /jd_qjd.py | 57e6d938295d582509a024a573939082c6c69ba8 | [] | no_license | yaotootmd/linmudaye | 50f372db9ec6087e4961ac9f68d24df329adfbe6 | 724b4a463a510b4b6fd3c33e0b5ec726e39aca99 | refs/heads/main | 2023-07-02T07:15:21.134247 | 2021-08-05T01:49:15 | 2021-08-05T01:49:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,040 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*
# cron 5 0,8 * * * jd_qjd.py
#全民抢京豆(7.22-7.31)
'''
项目名称: JD-Script / jd_qjd
Author: Curtin
功能:全民抢京豆(7.22-7.31):https://h5.m.jd.com/rn/3MQXMdRUTeat9xqBSZDSCCAE9Eqz/index.html?has_native=0
满160豆需要20人助力,每个用户目前只能助力2次不同的用户。
Date: 2021/7/3 上午10:02
TG交流 https://t.me/topstyle996
TG频道 https://t.me/TopStyle2021
update: 2021.7.24 14:21
* 修复了助力活动不存在、增加了随机UA(如果未定义ua则启用随机UA)
* 新增推送
* 修复0点不能开团
* 兼容pin为中文转码编码
'''
# print("全民抢京豆(7.2-7.15)--活动已结束\nTG交流 https://t.me/topstyle996\nTG频道 https://t.me/TopStyle2021")
# exit(0)
#ck 优先读取【JDCookies.txt】 文件内的ck 再到 ENV的 变量 JD_COOKIE='ck1&ck2' 最后才到脚本内 cookies=ck
cookies = ''
qjd_zlzh = ['Your JD_User', '买买买']
# Env环境设置 通知服务
# export BARK='' # bark服务,苹果商店自行搜索;
# export SCKEY='' # Server酱的SCKEY;
# export TG_BOT_TOKEN='' # tg机器人的TG_BOT_TOKEN;
# export TG_USER_ID='' # tg机器人的TG_USER_ID;
# export TG_API_HOST='' # tg 代理api
# export TG_PROXY_IP='' # tg机器人的TG_PROXY_IP;
# export TG_PROXY_PORT='' # tg机器人的TG_PROXY_PORT;
# export DD_BOT_ACCESS_TOKEN='' # 钉钉机器人的DD_BOT_ACCESS_TOKEN;
# export DD_BOT_SECRET='' # 钉钉机器人的DD_BOT_SECRET;
# export QQ_SKEY='' # qq机器人的QQ_SKEY;
# export QQ_MODE='' # qq机器人的QQ_MODE;
# export QYWX_AM='' # 企业微信;http://note.youdao.com/s/HMiudGkb
# export PUSH_PLUS_TOKEN='' # 微信推送Plus+ ;
#####
# 建议调整一下的参数
# UA 可自定义你的,注意格式: jdapp;iPhone;10.0.4;13.1.1;93b4243eeb1af72d142991d85cba75c66873dca5;network/wifi;ADID/8679C062-A41A-4A25-88F1-50A7A3EEF34A;model/iPhone13,1;addressid/3723896896;appBuild/167707;jdSupportDarkMode/0
UserAgent = ''
# 限制速度 (秒)
sleepNum = 0.1
import os, re, sys
import random, string
try:
import requests
except Exception as e:
print(e, "\n缺少requests 模块,请执行命令安装:python3 -m pip install requests")
exit(3)
from urllib.parse import unquote
import json
import time
requests.packages.urllib3.disable_warnings()
pwd = os.path.dirname(os.path.abspath(__file__)) + os.sep
t = time.time()
aNum = 0
beanCount = 0
userCount = {}
## 获取通知服务
class msg(object):
def __init__(self, m):
self.str_msg = m
self.message()
def message(self):
global msg_info
print(self.str_msg)
try:
msg_info = "{}\n{}".format(msg_info, self.str_msg)
except:
msg_info = "{}".format(self.str_msg)
sys.stdout.flush()
def getsendNotify(self, a=0):
if a == 0:
a += 1
try:
url = 'https://gitee.com/curtinlv/Public/raw/master/sendNotify.py'
response = requests.get(url)
if 'curtinlv' in response.text:
with open('sendNotify.py', "w+", encoding="utf-8") as f:
f.write(response.text)
else:
if a < 5:
a += 1
return self.getsendNotify(a)
else:
pass
except:
if a < 5:
a += 1
return self.getsendNotify(a)
else:
pass
def main(self):
global send
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(cur_path)
if os.path.exists(cur_path + "/sendNotify.py"):
try:
from sendNotify import send
except:
self.getsendNotify()
try:
from sendNotify import send
except:
print("加载通知服务失败~")
else:
self.getsendNotify()
try:
from sendNotify import send
except:
print("加载通知服务失败~")
###################
msg("").main()
##############
def getEnvs(label):
try:
if label == 'True' or label == 'yes' or label == 'true' or label == 'Yes':
return True
elif label == 'False' or label == 'no' or label == 'false' or label == 'No':
return False
except Exception as e:
pass
try:
if '.' in label:
return float(label)
elif '&' in label:
return label.split('&')
elif '@' in label:
return label.split('@')
else:
return int(label)
except:
return label
class getJDCookie(object):
# 适配各种平台环境ck
def getckfile(self):
global v4f
curf = pwd + 'JDCookies.txt'
v4f = '/jd/config/config.sh'
ql_new = '/ql/config/env.sh'
ql_old = '/ql/config/cookie.sh'
if os.path.exists(curf):
with open(curf, "r", encoding="utf-8") as f:
cks = f.read()
f.close()
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
cks = r.findall(cks)
if len(cks) > 0:
return curf
else:
pass
if os.path.exists(ql_new):
print("当前环境青龙面板新版")
return ql_new
elif os.path.exists(ql_old):
print("当前环境青龙面板旧版")
return ql_old
elif os.path.exists(v4f):
print("当前环境V4")
return v4f
return curf
# 获取cookie
def getCookie(self):
global cookies
ckfile = self.getckfile()
try:
if os.path.exists(ckfile):
with open(ckfile, "r", encoding="utf-8") as f:
cks = f.read()
f.close()
if 'pt_key=' in cks and 'pt_pin=' in cks:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
cks = r.findall(cks)
if len(cks) > 0:
if 'JDCookies.txt' in ckfile:
print("当前获取使用 JDCookies.txt 的cookie")
cookies = ''
for i in cks:
if 'pt_key=xxxx' in i:
pass
else:
cookies += i
return
else:
with open(pwd + 'JDCookies.txt', "w", encoding="utf-8") as f:
cks = "#多账号换行,以下示例:(通过正则获取此文件的ck,理论上可以自定义名字标记ck,也可以随意摆放ck)\n账号1【Curtinlv】cookie1;\n账号2【TopStyle】cookie2;"
f.write(cks)
f.close()
if "JD_COOKIE" in os.environ:
if len(os.environ["JD_COOKIE"]) > 10:
cookies = os.environ["JD_COOKIE"]
print("已获取并使用Env环境 Cookie")
except Exception as e:
print(f"【getCookie Error】{e}")
# 检测cookie格式是否正确
def getUserInfo(self, ck, pinName, userNum):
url = 'https://me-api.jd.com/user_new/info/GetJDUserInfoUnion?orgFlag=JD_PinGou_New&callSource=mainorder&channel=4&isHomewhite=0&sceneval=2&sceneval=2&callback=GetJDUserInfoUnion'
headers = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'close',
'Referer': 'https://home.m.jd.com/myJd/home.action',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'me-api.jd.com',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Mobile/15E148 Safari/604.1',
'Accept-Language': 'zh-cn'
}
try:
resp = requests.get(url=url, verify=False, headers=headers, timeout=60).text
r = re.compile(r'GetJDUserInfoUnion.*?\((.*?)\)')
result = r.findall(resp)
userInfo = json.loads(result[0])
nickname = userInfo['data']['userInfo']['baseInfo']['nickname']
return ck, nickname
except Exception:
context = f"账号{userNum}【{pinName}】Cookie 已失效!请重新获取。"
print(context)
return ck, False
def iscookie(self):
"""
:return: cookiesList,userNameList,pinNameList
"""
cookiesList = []
userNameList = []
pinNameList = []
if 'pt_key=' in cookies and 'pt_pin=' in cookies:
r = re.compile(r"pt_key=.*?pt_pin=.*?;", re.M | re.S | re.I)
result = r.findall(cookies)
if len(result) >= 1:
print("您已配置{}个账号".format(len(result)))
u = 1
for i in result:
r = re.compile(r"pt_pin=(.*?);")
pinName = r.findall(i)
pinName = unquote(pinName[0])
# 获取账号名
ck, nickname = self.getUserInfo(i, pinName, u)
if nickname != False:
cookiesList.append(ck)
userNameList.append(nickname)
pinNameList.append(pinName)
else:
u += 1
continue
u += 1
if len(cookiesList) > 0 and len(userNameList) > 0:
return cookiesList, userNameList, pinNameList
else:
print("没有可用Cookie,已退出")
exit(3)
else:
print("cookie 格式错误!...本次操作已退出")
exit(4)
else:
print("cookie 格式错误!...本次操作已退出")
exit(4)
getCk = getJDCookie()
getCk.getCookie()
# 获取v4环境 特殊处理
if os.path.exists(v4f):
try:
with open(v4f, 'r', encoding='utf-8') as f:
curenv = locals()
for i in f.readlines():
r = re.compile(r'^export\s(.*?)=[\'\"]?([\w\.\-@#!&=_,\[\]\{\}\(\)]{1,})+[\'\"]{0,1}$', re.M | re.S | re.I)
r = r.findall(i)
if len(r) > 0:
for i in r:
if i[0] != 'JD_COOKIE':
curenv[i[0]] = getEnvs(i[1])
except:
pass
if "qjd_zlzh" in os.environ:
if len(os.environ["qjd_zlzh"]) > 1:
qjd_zlzh = os.environ["qjd_zlzh"]
qjd_zlzh = qjd_zlzh.replace('[', '').replace(']', '').replace('\'', '').replace(' ', '').split(',')
print("已获取并使用Env环境 qjd_zlzh:", qjd_zlzh)
def userAgent():
"""
随机生成一个UA
:return:
"""
if not UserAgent:
uuid = ''.join(random.sample('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 40))
iosVer = ''.join(random.sample(["14.5.1", "14.4", "14.3", "14.2", "14.1", "14.0.1", "13.7", "13.1.2", "13.1.1"], 1))
iPhone = ''.join(random.sample(["8", "9", "10", "11", "12", "13"], 1))
return f'jdapp;iPhone;10.0.4;{iosVer};{uuid};network/wifi;ADID/8679C062-A41A-4A25-88F1-50A7A3EEF34A;model/iPhone{iPhone},1;addressid/3723896896;appBuild/167707;jdSupportDarkMode/0'
else:
return UserAgent
def getShareCode(ck):
global aNum
try:
# uuid = ''.join(random.sample('123456789abcdef123456789abcdef123456789abcdef123456789abcdef', 40))
v_num1 = ''.join(random.sample(["1", "2", "3", "4", "5", "6", "7", "8", "9"], 1)) + ''.join(random.sample(string.digits, 4))
url1 = f'https://api.m.jd.com/client.action?functionId=signGroupHit&body=%7B%22activeType%22%3A2%7D&appid=ld&client=apple&clientVersion=10.0.6&networkType=wifi&osVersion=14.3&uuid=&jsonp=jsonp_' + str(int(round(t * 1000))) + '_' + v_num1
url = 'https://api.m.jd.com/client.action?functionId=signBeanGroupStageIndex&body=%7B%22monitor_refer%22%3A%22%22%2C%22rnVersion%22%3A%223.9%22%2C%22fp%22%3A%22-1%22%2C%22shshshfp%22%3A%22-1%22%2C%22shshshfpa%22%3A%22-1%22%2C%22referUrl%22%3A%22-1%22%2C%22userAgent%22%3A%22-1%22%2C%22jda%22%3A%22-1%22%2C%22monitor_source%22%3A%22bean_m_bean_index%22%7D&appid=ld&client=apple&clientVersion=&networkType=&osVersion=&uuid=&jsonp=jsonp_' + str(int(round(t * 1000))) + '_' + v_num1
head = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'keep-alive',
'Referer': 'https://h5.m.jd.com/rn/3MQXMdRUTeat9xqBSZDSCCAE9Eqz/index.html?has_native=0',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'api.m.jd.com',
# 'User-Agent': 'Mozilla/5.0 (iPhone CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Mobile/15E148 Safari/604.1',
'User-Agent': userAgent(),
'Accept-Language': 'zh-cn'
}
requests.get(url1, headers=head, verify=False, timeout=30)
resp = requests.get(url=url, headers=head, verify=False, timeout=30).text
r = re.compile(r'jsonp_.*?\((.*?)\)\;', re.M | re.S | re.I)
result = r.findall(resp)
jsonp = json.loads(result[0])
try:
groupCode = jsonp['data']['groupCode']
shareCode = jsonp['data']['shareCode']
activityId = jsonp['data']['activityMsg']['activityId']
sumBeanNumStr = int(jsonp['data']['sumBeanNumStr'])
except:
if aNum < 5:
aNum += 1
return getShareCode(ck)
else:
groupCode = 0
shareCode = 0
sumBeanNumStr = 0
aNum = 0
activityId = 0
aNum = 0
return groupCode, shareCode, sumBeanNumStr, activityId
except Exception as e:
print(f"getShareCode Error", e)
def helpCode(ck, groupCode, shareCode,u, unum, user, activityId):
try:
v_num1 = ''.join(random.sample(["1", "2", "3", "4", "5", "6", "7", "8", "9"], 1)) + ''.join(random.sample(string.digits, 4))
headers = {
'Cookie': ck,
'Accept': '*/*',
'Connection': 'keep-alive',
'Referer': f'https://h5.m.jd.com/rn/42yjy8na6pFsq1cx9MJQ5aTgu3kX/index.html?jklActivityId=115&source=SignSuccess&jklGroupCode={groupCode}&ad_od=1&jklShareCode={shareCode}',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'api.m.jd.com',
'User-Agent': userAgent(),
'Accept-Language': 'zh-cn'
}
url = 'https://api.m.jd.com/client.action?functionId=signGroupHelp&body=%7B%22activeType%22%3A2%2C%22groupCode%22%3A%22' + str(groupCode) + '%22%2C%22shareCode%22%3A%22' + shareCode + f'%22%2C%22activeId%22%3A%22{activityId}%22%2C%22source%22%3A%22guest%22%7D&appid=ld&client=apple&clientVersion=10.0.4&networkType=wifi&osVersion=13.7&uuid=&openudid=&jsonp=jsonp_{int(round(t * 1000))}_{v_num1}'
resp = requests.get(url=url, headers=headers, verify=False, timeout=30).text
r = re.compile(r'jsonp_.*?\((.*?)\)\;', re.M | re.S | re.I)
result = r.findall(resp)
jsonp = json.loads(result[0])
helpToast = jsonp['data']['helpToast']
pageFlag = jsonp['data']['pageFlag']
if pageFlag == 0:
print(f"账号{unum}【{u}】助力失败! 原因:{helpToast}")
if '满' in helpToast:
print(f"## 恭喜账号【{user}】团已满,今日累计获得160豆")
return True
return False
else:
if '火' in helpToast:
print(f"账号{unum}【{u}】助力失败! 原因:{helpToast}")
else:
print(f"账号{unum}【{u}】{helpToast} , 您也获得1豆哦~")
return False
except Exception as e:
print(f"helpCode Error ", e)
def start():
scriptName='### 全民抢京豆-助力 ###'
print(scriptName)
global cookiesList, userNameList, pinNameList, ckNum, beanCount, userCount
cookiesList, userNameList, pinNameList = getCk.iscookie()
for ckname in qjd_zlzh:
try:
ckNum = userNameList.index(ckname)
except Exception as e:
try:
ckNum = pinNameList.index(unquote(ckname))
except:
print(f"请检查被助力账号【{ckname}】名称是否正确?提示:助力名字可填pt_pin的值、也可以填账号名。")
continue
print(f"### 开始助力账号【{userNameList[int(ckNum)]}】###")
groupCode, shareCode, sumBeanNumStr, activityId = getShareCode(cookiesList[ckNum])
if groupCode == 0:
msg(f"## {userNameList[int(ckNum)]} 获取互助码失败。请手动分享后再试~ 或建议早上再跑。")
continue
u = 0
for i in cookiesList:
if i == cookiesList[ckNum]:
u += 1
continue
result = helpCode(i, groupCode, shareCode, userNameList[u], u+1, userNameList[int(ckNum)], activityId)
time.sleep(sleepNum)
if result:
break
u += 1
groupCode, shareCode, sumBeanNumStr, activityId = getShareCode(cookiesList[ckNum])
userCount[f'{userNameList[ckNum]}'] = sumBeanNumStr
beanCount += sumBeanNumStr
print("\n-------------------------")
for i in userCount.keys():
msg(f"账号【{i}】已抢京豆: {userCount[i]}")
msg(f"## 今日累计获得 {beanCount} 京豆")
try:
send(scriptName, msg_info)
except:
pass
if __name__ == '__main__':
start()
| [
"noreply@github.com"
] | noreply@github.com |
bfa64414e10648e405e89258f858138cfe2bcc91 | f4e21b9a042577400689e83a7ae11c0eee13cecf | /gneiss/regression/tests/test_transformer.py | 3f7aa1cd6eebc62d528cecdf3407afee1faff1f6 | [] | no_license | ebolyen/gneiss | 8facaaffe9904c8641f418fdd1461c1ae447e593 | bb47be8805bf887afcc40b72365b062aa74ff823 | refs/heads/master | 2022-12-21T21:08:09.162341 | 2017-04-21T01:30:10 | 2017-04-21T01:30:10 | 88,930,099 | 0 | 0 | null | 2017-04-21T02:20:16 | 2017-04-21T02:20:16 | null | UTF-8 | Python | false | false | 2,346 | py | # ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from gneiss.regression._format import (LinearRegressionFormat_g,
LinearMixedEffectsFormat_g)
from qiime2.plugin.testing import TestPluginBase
from gneiss.regression._ols import OLSModel
from gneiss.regression._mixedlm import LMEModel
import pandas.util.testing as pdt
class TestLinearRegressionTransformers(TestPluginBase):
package = "gneiss.regression.tests"
def test_ols_model_to_regression_format(self):
filepath = self.get_data_path('ols.pickle')
transformer = self.get_transformer(OLSModel, LinearRegressionFormat_g)
input = OLSModel.read_pickle(filepath)
obs = transformer(input)
obs = OLSModel.read_pickle(str(obs))
pdt.assert_frame_equal(input.pvalues, obs.pvalues)
def test_regression_format_to_ols_model(self):
filename = 'ols.pickle'
input, obs = self.transform_format(LinearRegressionFormat_g, OLSModel,
filename)
exp = OLSModel.read_pickle(str(input))
pdt.assert_frame_equal(exp.pvalues, obs.pvalues)
class TestLinearMixedEffectsTransformers(TestPluginBase):
package = "gneiss.regression.tests"
def test_lme_model_to_regression_format(self):
filepath = self.get_data_path('lme.pickle')
transformer = self.get_transformer(LMEModel,
LinearMixedEffectsFormat_g)
input = LMEModel.read_pickle(filepath)
obs = transformer(input)
obs = LMEModel.read_pickle(str(obs))
pdt.assert_frame_equal(input.pvalues, obs.pvalues)
def test_regression_format_to_lme_model(self):
filename = 'lme.pickle'
input, obs = self.transform_format(LinearMixedEffectsFormat_g,
LMEModel, filename)
exp = LMEModel.read_pickle(str(input))
pdt.assert_frame_equal(exp.pvalues, obs.pvalues)
if __name__ == '__main__':
unittest.main()
| [
"jamietmorton@gmail.com"
] | jamietmorton@gmail.com |
3f3d24122a5ecb807bc70e05d545127bfeb290d9 | 5bf6ab98cc26a6f04389b11bb87a6e90385f50c2 | /app/user/test/test_user_api.py | fc57fd3fb7a2f3ec480b7879d61b62248cdc5869 | [
"MIT"
] | permissive | Rahatcseru2014/recipe-app-api | 04874ab93777001bcab164d4e7184893b0a0dfca | e4ab491361273025c9d4be6d5b753b4ff7ce66e6 | refs/heads/master | 2022-06-28T15:20:56.783604 | 2020-05-14T17:13:40 | 2020-05-14T17:13:40 | 256,877,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,337 | py | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserAPITests(TestCase):
"""Tets the users API (Public)"""
def setup(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successfull"""
payload = {
'email' : 'testuser01@gmail.com',
'password' : 'randompass123',
'name' : 'John Doe'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""test creating a user that already exist fails"""
payload = {
'email' : 'testuser01@gmail.com',
'password' : 'randompass123',
'name' : 'Test',
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that paswword is longer than five characters"""
payload = {
'email' : 'testuser01@gmail.com',
'password' : 'pwd',
'name' : 'Test',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email = payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for user"""
payload = {
'email' : 'testuser01@gmail.com',
'password' : 'testpass123',
'name' : 'Test',
}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are provided"""
create_user(
email = 'testuser01@gmail.com',
password = 'testpass123'
)
payload = {
'email' : 'testuser01@gmail.com',
'password' : 'testpss'
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created as user does not exists"""
payload = {
'email' : 'testuser01@gmail.com',
'password' : 'testpss'
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
payload = {
'email' : 'testuser',
'password' : ''
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserAPITests(TestCase):
"""Test API requests that requir authentication"""
def setUp(self):
self.user = create_user(
email = 'testuser@gmail.com',
password = 'testpass',
name = 'name'
)
self.client = APIClient()
self.client.force_authenticate(user = self.user)
def test_retrieve_profile_success(self):
"""Test retrieve profile for logged in used"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data,{
'name' : self.user.name,
'email' : self.user.email
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {
'name' : 'new name',
'password' : 'newpassword123',
}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| [
"rahat.javafx@gmail.com"
] | rahat.javafx@gmail.com |
271813ce9df854023fe3b6d50c40601bd44a2d32 | b80059648afab4474e567ec1035d63d060d9b3a6 | /src/analyze.py | e18911d2f1160107000f7ce93c5532bf18c7c900 | [
"MIT"
] | permissive | SteemData/classify.steemdata.com | 8b34d7ae9e666b9dfe9930c82dc347650356fb94 | 507d2d537a502701dd6e28c9581c132942084b7a | refs/heads/master | 2021-03-19T05:57:34.360839 | 2017-11-09T22:30:59 | 2017-11-09T22:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | import boto3
from easydict import EasyDict as ed
config = ed(
region_name = 'us-west-2',
s3_bucket_name = 'steem-hackaton-input'
)
rkg = boto3.client('rekognition', region_name=config.region_name)
def nsfw(img: bytes):
response = rkg.detect_moderation_labels(
Image={'Bytes': img},
)
return response['ModerationLabels']
def labels(img: bytes):
response = rkg.detect_labels(
Image={'Bytes': img},
MaxLabels=100,
MinConfidence=80,
)
return response['Labels']
| [
"_@furion.me"
] | _@furion.me |
0d37df26911f7aa45fd992907792f711b760b1d3 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/classification/Gluon_ResNet50_v1d_for_PyTorch/timm/models/layers/involution.py | 97e83500b1f997b67fbd369776d069d277ac3bdb | [
"Apache-2.0",
"MIT",
"CC-BY-NC-4.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,501 | py | # Copyright [yyyy] [name of copyright owner]
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" PyTorch Involution Layer
Official impl: https://github.com/d-li14/involution/blob/main/cls/mmcls/models/utils/involution_naive.py
Paper: `Involution: Inverting the Inherence of Convolution for Visual Recognition` - https://arxiv.org/abs/2103.06255
"""
import torch.nn as nn
from .conv_bn_act import ConvBnAct
from .create_conv2d import create_conv2d
class Involution(nn.Module):
def __init__(
self,
channels,
kernel_size=3,
stride=1,
group_size=16,
rd_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU,
):
super(Involution, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.channels = channels
self.group_size = group_size
self.groups = self.channels // self.group_size
self.conv1 = ConvBnAct(
in_channels=channels,
out_channels=channels // rd_ratio,
kernel_size=1,
norm_layer=norm_layer,
act_layer=act_layer)
self.conv2 = self.conv = create_conv2d(
in_channels=channels // rd_ratio,
out_channels=kernel_size**2 * self.groups,
kernel_size=1,
stride=1)
self.avgpool = nn.AvgPool2d(stride, stride) if stride == 2 else nn.Identity()
self.unfold = nn.Unfold(kernel_size, 1, (kernel_size-1)//2, stride)
def forward(self, x):
weight = self.conv2(self.conv1(self.avgpool(x)))
B, C, H, W = weight.shape
KK = int(self.kernel_size ** 2)
weight = weight.view(B, self.groups, KK, H, W).unsqueeze(2)
out = self.unfold(x).view(B, self.groups, self.group_size, KK, H, W)
out = (weight * out).sum(dim=3).view(B, self.channels, H, W)
return out
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
92b30df416917ad8948f5c9e2c0d9b33201ab768 | a99dabb67ca2a6a95f13820cd708595bef88a88a | /tests/test_temporal_k_max_pooling.py | 3eca31e3ea4d9bd66a54774e6ccc977cd4985d0f | [] | no_license | svoss/vdcnn-chainer | 2c93aad28324de1f894637ed295e3cfba9de8806 | 47fdf434d9773d5d27fce5492d0c0efbf64aeac3 | refs/heads/master | 2021-05-15T12:03:22.981262 | 2017-12-03T12:45:51 | 2017-12-03T12:45:51 | 108,402,829 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,714 | py | import unittest
import sys
import numpy as np
sys.path.append(".")
from chainer import testing
from chainer import gradient_check
from vdccnn.temporal_k_max_pooling import TemporalKMaxPooling, temporal_k_max_pooling
import chainer
from chainer import cuda
from six.moves import xrange
from chainer.testing import attr
@testing.parameterize(*testing.product({
'dtype': [np.float32, np.float64],
}))
#sadly np.float16 seems not to be supported by cupy.sort
class TestTemporalKMaxPooling(unittest.TestCase):
def setUp(self):
x = np.random.randint(0, 9, size=(3, 100))
x[0, [0, 5, 6, 35, 65]] = 11
x[1] = [0 if i < 96 else 1 for i in xrange(100)]
x[2, [1, 44, 55, 98]] = 10
x[2, [4, 22, 99]] = 11
x = np.array(x, dtype=self.dtype)
self.forward_x = x # some specific edge cases for forward call
self.backward_x = np.arange(3*25, dtype=self.dtype)
np.random.shuffle(self.backward_x)
self.backward_x = self.backward_x.reshape(3, 25)
if self.dtype == np.int32:
self.y_grad = np.random.randint(0,1, size=(3, 5)).astype(self.dtype)
else:
self.y_grad = np.random.rand(3, 5).astype(self.dtype)
# when dtype is float16, less accurate (not used for now as cupy.sort doesn't seem to support np.float16)
if self.dtype == np.float16:
self.check_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
else:
self.check_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, x_data, use_cudnn='always'):
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = temporal_k_max_pooling(x, 5)
y_data = y.data
self.assertEquals(self.dtype, y_data.dtype)
expect = np.array([
[11.0, 11.0, 11.0, 11.0, 11.0],
[0.0, 1.0, 1.0, 1.0, 1.0],
[10.0, 11.0, 11.0, 10.0, 11.0]
], dtype=self.dtype)
testing.assert_allclose(expect, y_data)
def test_forward_cpu(self):
self.check_forward(self.forward_x)
def check_backward(self, x_data, y_grad, use_cudnn='always'):
def f(x):
return temporal_k_max_pooling(x, 5)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(f, x_data, y_grad, dtype='d', **self.check_backward_options)
def test_backward_cpu(self):
try:
self.check_backward(self.backward_x, self.y_grad)
except AssertionError as e:
print(e)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.forward_x))
@attr.gpu
def test_forward_gpu_non_contiguous(self):
self.check_forward(cuda.cupy.asfortranarray(cuda.to_gpu(self.forward_x)))
@attr.gpu
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.forward_x), 'never')
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.backward_x), cuda.to_gpu(self.y_grad))
@attr.gpu
def test_backward_gpu_non_contiguous(self):
self.check_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.backward_x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.y_grad)))
@attr.gpu
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.backward_x), cuda.to_gpu(self.y_grad),'never')
testing.run_module(__name__, __file__)
| [
"svoss@i-sti.nl"
] | svoss@i-sti.nl |
d7feedf3e0f9eec8c5f371d5bd23732533460493 | 5babecf71b6b3c3295219b59bd96e348e1cfaf80 | /singleylinkedlist.py | c5bb94f61e1e34fc34faa6ab4bc6d013e5858183 | [] | no_license | jefinagilbert/dataStructures | 46697a8c1ec5cdb943a1e95e887f6343a85f648b | 04773fc0dff7d18078f3960b0993ce8ab7918a19 | refs/heads/main | 2023-06-12T02:06:24.345124 | 2021-07-10T18:05:44 | 2021-07-10T18:05:44 | 384,760,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,771 | py | class node:
def __init__(self,data):
self.data = data
self.next = None
class linkedlist:
def __init__(self):
self.head = None
def printlinkedlist(self):
temp = self.head
print (temp)
while (temp):
print (temp.data,end=" -> ")
temp = temp.next
def append(self, new_data):
new_node = node(new_data)
if self.head is None:
self.head = new_node
return
last = self.head
while (last.next):
last = last.next
last.next = new_node
def push(self, new_data):
new_node = node(new_data)
new_node.next = self.head
self.head = new_node
def insertAfter(self, prev_node, new_data):
if prev_node is None:
print("The given previous node not inLinkedList.")
return
new_node = node(new_data)
new_node.next = prev_node.next
prev_node.next = new_node
def deletenode(self,key):
temp = self.head
if (temp is not None):
if key == temp.data:
self.head = temp.next
temp = None
while (temp is not None):
if temp.data == key:
break
prev = temp
temp = temp.next
if (temp is None):
return
prev.next = temp.next
temp = None
def deletelist(self):
temp = self.head # we can also use Only self.head = None
while (temp):
next = temp.next
del temp.data
temp = next
self.head = None
def deletenodeposition(self,position):
temp = self.head
if (self.head is None):
return
if position == 0:
self.head = temp.next
temp = None
return
for i in range(position - 1):
temp = temp.next
if (temp is None):
break
if (temp is None):
return
if (temp.next is None):
return
next = temp.next.next
temp.next = None
temp.next = next
if __name__ == "__main__":
llist = linkedlist()
while True:
print()
print("------ NOTES ------")
print()
print("1. Append Value")
print()
print("2. Push Value")
print()
print("3. Insert After")
print()
print("4. Display Node")
print()
print("5. Delete Node by data")
print()
print("6. Delete Node by Position")
print()
print("7. Delete Linked list")
print()
print("8. Exit")
i = int(input("Enter the Number: "))
if i == 1:
k = int(input("enter value to append : "))
llist.append(k)
print()
print(k," Appended Successfully")
elif i == 2:
k = int(input("enter value to push : "))
llist.push(k)
elif i == 3:
k = int(input("enter value to add after : "))
llist.insertAfter(llist.head.next,k)
elif i == 4:
llist.printlinkedlist()
elif i == 5:
k = int(input("enter value to deletenode : "))
llist.deletenode(k)
elif i == 6:
k = int(input("enter position to Delete : "))
llist.deletenodeposition(k)
elif i == 7:
llist.deletelist()
elif i == 8:
break
else:
print("Enter Valid Number")
| [
"noreply@github.com"
] | noreply@github.com |
16d7ca675bcd4ddd0c915762b8825ad9ea47b65e | bdb44645b8be29657682d5cf9dd7e47d1d8e8d92 | /08(8lab)/Diana_8lab_STP.py | 72cd3a74e1ce99718d514ea362061a9968c97d4c | [] | no_license | bluriiiface/Python | 19ef10480cf8148f79b8ce4a04d7680b59db7c44 | cdefbf4c1448896e4010263e25771be634d909b9 | refs/heads/main | 2023-05-14T04:34:01.532960 | 2021-05-31T20:25:58 | 2021-05-31T20:25:58 | 353,459,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | from math import sqrt
print('-----------Программа №8(8)-----------')
while 1:
print('Введите 2 числа и знак через пробел')
a, f, b = input().split(' ')
a = float(a)
b = float(b)
if f == '/':
a = a / b
print('Ответ: ', a)
break
elif f == '*':
a = a * b
print('Ответ: ', a)
break
elif f == '+':
a = a + b
print('Ответ: ', a)
break
elif f == '-':
a = a - b
print('Ответ: ', a)
break
else:
print('Некорректный ввод')
| [
"noreply@github.com"
] | noreply@github.com |
3260cc71bf51ca6c5472658a1ca8bdfa0d073a18 | 7ae00ae1659ae99e4fc91e3fea2a253db01abceb | /venv/lib/python3.7/site-packages/werkzeug/__init__.py | 02ba61089f1afec895c949e50b76b8f2c4186dd4 | [] | no_license | fredpan/PiPiXia-Server-intro-cloud-computing | 9d6e6db0ba4571c72d595a127137dac97fa3ec4c | 91bec4d18bd23177b5144845c938672f60f49c08 | refs/heads/master | 2020-08-07T13:28:26.244172 | 2019-10-10T05:50:31 | 2019-10-10T05:50:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,080 | py | """
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to
make the life of a Python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from types import ModuleType
__version__ = "0.16.0"
__all__ = ["run_simple", "Client", "Request", "Response", "__version__"]
class _DeprecatedImportModule(ModuleType):
"""Wrap a module in order to raise """
def __init__(self, name, available, removed_in):
import sys
super(_DeprecatedImportModule, self).__init__(name) # noqa F821
self._real_module = sys.modules[name] # noqa F821
sys.modules[name] = self
self._removed_in = removed_in
self._origin = {item: mod for mod, items in available.items() for item in items}
mod_all = getattr(self._real_module, "__all__", dir(self._real_module))
self.__all__ = sorted(mod_all + list(self._origin))
def __getattr__(self, item):
# Don't export internal variables.
if item in {"_real_module", "_origin", "_removed_in"}:
raise AttributeError(item)
if item in self._origin:
from importlib import import_module
origin = self._origin[item]
if origin == ".":
# No warning for the "submodule as attribute" case, it's way too messy
# and unreliable to try to distinguish 'from werkzueug import
# exceptions' and 'import werkzeug; werkzeug.exceptions'.
value = import_module(origin + item, self.__name__)
else:
from warnings import warn
# Import the module, get the attribute, and show a warning about where
# to correctly import it from.
mod = import_module(origin, self.__name__.rsplit(".")[0])
value = getattr(mod, item)
warn(
"The import '{name}.{item}' is deprecated and will be removed in"
" {removed_in}. Use 'from {name}{origin} import {item}'"
" instead.".format(
name=self.__name__,
item=item,
removed_in=self._removed_in,
origin=origin,
),
DeprecationWarning,
stacklevel=2,
)
else:
value = getattr(self._real_module, item)
# Cache the value so it won't go through this process on subsequent accesses.
setattr(self, item, value)
return value
def __dir__(self):
return sorted(dir(self._real_module) + list(self._origin))
del ModuleType
_DeprecatedImportModule(
__name__,
{
".": ["exceptions", "routing"],
"._internal": ["_easteregg"],
".datastructures": [
"Accept",
"Authorization",
"CallbackDict",
"CharsetAccept",
"CombinedMultiDict",
"EnvironHeaders",
"ETags",
"FileMultiDict",
"FileStorage",
"Headers",
"HeaderSet",
"ImmutableDict",
"ImmutableList",
"ImmutableMultiDict",
"ImmutableOrderedMultiDict",
"ImmutableTypeConversionDict",
"LanguageAccept",
"MIMEAccept",
"MultiDict",
"OrderedMultiDict",
"RequestCacheControl",
"ResponseCacheControl",
"TypeConversionDict",
"WWWAuthenticate",
],
".debug": ["DebuggedApplication"],
".exceptions": ["abort", "Aborter"],
".formparser": ["parse_form_data"],
".http": [
"cookie_date",
"dump_cookie",
"dump_header",
"dump_options_header",
"generate_etag",
"http_date",
"HTTP_STATUS_CODES",
"is_entity_header",
"is_hop_by_hop_header",
"is_resource_modified",
"parse_accept_header",
"parse_authorization_header",
"parse_cache_control_header",
"parse_cookie",
"parse_date",
"parse_dict_header",
"parse_etags",
"parse_list_header",
"parse_options_header",
"parse_set_header",
"parse_www_authenticate_header",
"quote_etag",
"quote_header_value",
"remove_entity_headers",
"remove_hop_by_hop_headers",
"unquote_etag",
"unquote_header_value",
],
".local": [
"Local",
"LocalManager",
"LocalProxy",
"LocalStack",
"release_local",
],
".middleware.dispatcher": ["DispatcherMiddleware"],
".middleware.shared_data": ["SharedDataMiddleware"],
".security": ["check_password_hash", "generate_password_hash"],
".test": ["create_environ", "EnvironBuilder", "run_wsgi_app"],
".testapp": ["test_app"],
".urls": [
"Href",
"iri_to_uri",
"uri_to_iri",
"url_decode",
"url_encode",
"url_fix",
"url_quote",
"url_quote_plus",
"url_unquote",
"url_unquote_plus",
],
".useragents": ["UserAgent"],
".utils": [
"append_slash_redirect",
"ArgumentValidationError",
"bind_arguments",
"cached_property",
"environ_property",
"escape",
"find_modules",
"format_string",
"header_property",
"html",
"HTMLBuilder",
"import_string",
"redirect",
"secure_filename",
"unescape",
"validate_arguments",
"xhtml",
],
".wrappers.accept": ["AcceptMixin"],
".wrappers.auth": ["AuthorizationMixin", "WWWAuthenticateMixin"],
".wrappers.base_request": ["BaseRequest"],
".wrappers.base_response": ["BaseResponse"],
".wrappers.common_descriptors": [
"CommonRequestDescriptorsMixin",
"CommonResponseDescriptorsMixin",
],
".wrappers.etag": ["ETagRequestMixin", "ETagResponseMixin"],
".wrappers.response": ["ResponseStreamMixin"],
".wrappers.user_agent": ["UserAgentMixin"],
".wsgi": [
"ClosingIterator",
"extract_path_info",
"FileWrapper",
"get_current_url",
"get_host",
"LimitedStream",
"make_line_iter",
"peek_path_info",
"pop_path_info",
"responder",
"wrap_file",
],
},
"Werkzeug 1.0",
)
from .serving import run_simple
from .test import Client
| [
"fredpan0821@gmaill.com"
] | fredpan0821@gmaill.com |
36656772b5db15a7080ef5290f4c8710dfa7092e | 9b3fe56255fec1766990ae310883ad44b1505520 | /Week 10/hw10_2.gyp | 165df3dbff3ca1eb31a24e0a2e103327edf8c7d6 | [] | no_license | 0zancan/AlgorithmBasics | b45fb2c579c81e6969d7ff4e58205700f323252b | 8a7336a1b7092e31cf54e3fe4ebe6135337a12f7 | refs/heads/master | 2022-04-27T12:37:24.762389 | 2020-05-03T20:45:08 | 2020-05-03T20:45:08 | 261,009,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | gyp | # Dizinin ortalamasına en yakın dizi elemanını
# bulan program
a = [4, 8, -4, 18, 9, 21, 20, 5, -17,-1]
# a = [4, 8, -4, 18, 9, 21, -200, 5,5,10,23,95,63,-86 -17,-1]
n = len(a)
sum = 0
for i in range(n):
sum += a[i] #63
average = sum / n #6.3
print(sum)
print(average)
temp = 1000000000
for i in range(n):
distance = abs(average - a[i])
if temp > distance:
temp = distance
miniDistance = a[i]
print(miniDistance)
| [
"ozancan@OCB-Mac.local"
] | ozancan@OCB-Mac.local |
a9c80f162f30fa9a320e3d70485ed145f2167f0e | 64505998dead5afec41bf3f5e7b8ee0990056878 | /teamlib/version.py | f77ea9f249daf2b00651ccb9c05f8c5efcd687d4 | [
"MIT"
] | permissive | malenkiki/team | 8f0e57ac4fdeef98f9dd0c3d35ba2cd4c8972c20 | 11b17066b72988bdf222174cae01a56f750962b0 | refs/heads/master | 2016-09-07T02:04:55.445385 | 2014-02-02T21:50:03 | 2014-02-02T21:50:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | # -*- encoding: utf-8 -*-
APP_NAME = "team"
APP_VERSION = '0.1'
| [
"petit.michel@gmail.com"
] | petit.michel@gmail.com |
cd2a46af326af67c393d564938c53923c702d089 | a36ed24b899f002946a4ef9c401872ef452711b1 | /mundo-01/ex026.py | f9b4474d9528e52ee8a12e3c55fb6dcf9f5b7bfe | [] | no_license | erickgust/python-exercises | 91d2176b140909c49b66a328d13c10470746db87 | 2cdd3cfaba7ef0124fcb198a17368e7704764ad5 | refs/heads/main | 2023-06-20T13:45:27.847192 | 2021-07-01T17:12:10 | 2021-07-01T17:12:10 | 382,102,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | frase = input('Digite uma frase qualquer: ').upper()
fraseL = frase.split()
print('A letra "A" aparece {} vezes!'.format(' '.join(fraseL).count('A')))
print('Ela aparece pela primeira vez na posição {}'.format(' '.join(fraseL).find('A')+1))
print('Ela aparece pela última vez na posição {}'.format(' '.join(fraseL).rfind('A')+1))
| [
"erick.stazs@gmail.com"
] | erick.stazs@gmail.com |
61f3f8be12d8ae177af4e7bd99f32c9284f701fc | b6f7203ff761362c5c992ed137f2b60873624193 | /tests/test_trained_embeddings.py | 766666d4e06e389fefb9c2143f9213cc6aa95ee1 | [
"Apache-2.0"
] | permissive | thaingo/adaptnlp | 98b2ac5f4a0d5ce41d37a6d674b7cc39d5e2a876 | 784ecabd2505acb8c595b6089841772a6f9b8bb5 | refs/heads/master | 2023-03-07T06:54:34.673649 | 2021-02-16T04:00:03 | 2021-02-16T04:00:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | from adaptnlp import EasyWordEmbeddings, EasyStackedEmbeddings, EasyDocumentEmbeddings
def test_easy_word_embeddings():
embeddings = EasyWordEmbeddings()
embeddings.embed_text(text="Test", model_name_or_path="bert-base-cased")
def test_easy_stacked_embeddings():
embeddings = EasyStackedEmbeddings("bert-base-cased", "distilbert-base-cased")
embeddings.embed_text(text="Test")
def test_easy_document_embeddings():
embeddings = EasyDocumentEmbeddings("bert-base-cased", "distilbert-base-cased")
embeddings.embed_pool(text="Test")
| [
"aychang995@gmail.com"
] | aychang995@gmail.com |
5e56cc78a121e1d1b486e6bc4a3fc7a7cd46762b | 2b54b1fb1540ab73d6c83cae3acd5fdd58bdead5 | /Platinum_clusters_Project/final_images/Pt13_O2_DFTsorted/Pt7_3O2_TiO2_101surface_zorderimageplotbasedondepth1.py | 4f1b59995eb2ca8b9bc48aa8fecadced15bc2251 | [] | no_license | sivachiriki/GOFEE_Pt_V_supported | 5787d44294262870075f35f2d31c096021b7ce20 | 6bd700dac1f3e7c58394b758d75246ac6e07eade | refs/heads/master | 2022-04-08T11:38:13.038455 | 2020-03-09T10:48:31 | 2020-03-09T10:48:31 | 226,359,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,837 | py | from __future__ import division
import matplotlib
#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'
#from plot_neb_tio2 import *
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
import matplotlib.patches as mpatches
from ase.data.colors import jmol_colors
from pylab import *
from ase.data import covalent_radii as aradii
from matplotlib.patches import Circle
from math import atan2,pi
import matplotlib.gridspec as gridspec
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
matplotlib.rc('font',**{'family':'sans-serif',
'sans-serif':['Helvetica'],
'size':14})
matplotlib.rc('text',usetex=True)
matplotlib.rcParams['text.latex.unicode']=True
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{bm}']
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{xfrac}']
matplotlib.rcParams['mathtext.default'] = 'regular'
matplotlib.rcParams['ps.usedistiller'] = 'xpdf'
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
def plot_atoms(ax, atoms, xyz, acols, alp, z):
ecols = [[0, 0, 0] for col in atoms]
indices = range(len(atoms))
for ia in indices:
acol = acols[ia]
ecol = ecols[ia]
# if atoms[ia].symbol == 'Ti':
# arad = aradii[atoms[ia].number] #* 0.9 * 0.5
# else:
arad = aradii[atoms[ia].number] #* 0.9
apos = atoms[ia].position
eps = arad
circ = Circle([apos[xyz[0]], apos[xyz[1]]],
fc = acol,
ec = ecol,
radius = arad,
lw = 0.5,
alpha = alp[ia],
zorder = 1 - apos[1]/1000
)
ax.add_patch(circ)
def plot_conf(ax, atoms, rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
for i, atom in enumerate(atoms):
if (atom.number ==78):
colors[i] =[0.1, 0.6, 0.6]
if (atom.number ==8 and i >= 135 and i <=149 ):
colors[i] =[0.1, 0.2, 0.5]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Al' or a.symbol == 'O':
if a.position[2] < 9.7:
alp[i] = 0.3
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
data=read('Pt13_O2_Al2O3_KRRfund9l_DFTrelaxedsorted.traj@:')
#for j in range(len(data)):
image = data[8] #* (2,2,1)
for i,a in enumerate(image):
# if a.position[1] >11.100:
# image.positions[i,1] =0.000
if i ==48 or i==3 :
image.positions[i,1] =image.positions[i,1]-12.429
image.positions[i,0] =image.positions[i,0]+7.176
# if i==148:
# image.positions[i,0] =image.positions[i,0]-14.352
#write('newimage.traj',image)
plt.figure(figsize=(6.0,7.0))
gs = gridspec.GridSpec(2, 1,
height_ratios=[7.77,9.090])
cell = image.get_cell()
# 0 0
ax = plt.subplot(gs[0, 0])
img = image.copy()
plot_conf(ax, img)
ax.set_xlim([-2.8, 11.85])
ax.set_ylim([10.0, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
# 0 1
ax = plt.subplot(gs[1, 0])
image = image * (2,2,1)
write('newimage.traj',image)
cell = image.get_cell()
img = image.copy()
plot_conf(ax, img, rot=True)
ax.set_xlim([-2.8, 11.85])
ax.set_ylim([0.80, 12.50])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
gs.update(wspace=0.00,hspace=0.00)
plt.tight_layout()
name ='Pt13_O2_Al2O3_0001_DFTopt_g{}'.format(8)
savefig(name,bbox_inches='tight')
show()
| [
"sivachiriki@phys.au.dk"
] | sivachiriki@phys.au.dk |
6849d2ec9790e047a0e8c225fd2ba62a5fdcdd56 | 3dcfa2980db0770af9b4355b0d5a5e5ef2313c50 | /corpus/exceptions.py | 173d06acf3cf5bd7493cf25b0c6f41cbc47cf052 | [
"CC-BY-NC-2.0",
"CC-BY-NC-4.0",
"Apache-2.0"
] | permissive | looselycoupled/partisan-discourse | 5f4638d984fb54a5add870d4cb59445811c412a1 | 8579924094c92e25e21ce59a26232269cf6b34bc | refs/heads/master | 2020-03-27T06:35:49.627350 | 2018-08-25T18:05:44 | 2018-08-25T18:05:44 | 146,118,079 | 0 | 0 | Apache-2.0 | 2018-08-25T18:02:38 | 2018-08-25T18:02:38 | null | UTF-8 | Python | false | false | 977 | py | # corpus.exceptions
# Custom exceptions for corpus handling.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Mon Jul 18 09:57:26 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: exceptions.py [63935bc] benjamin@bengfort.com $
"""
Custom exceptions for corpus handling.
"""
##########################################################################
## Corpus Exceptions
##########################################################################
class CorpusException(Exception):
"""
Something went wrong in the corpus app.
"""
pass
class BitlyAPIError(CorpusException):
"""
Something went wrong trying to shorten a url.
"""
pass
class FetchError(CorpusException):
"""
Something went wrong trying to fetch a url using requests.
"""
pass
class NLTKError(CorpusException):
"""
Something went wrong when using NLTK.
"""
pass
| [
"benjamin@bengfort.com"
] | benjamin@bengfort.com |
c7d84d8e4ce7c1640e416676ff78f03d60b5cb43 | 4597d219ad8849b4e80cfbda6e75600449db2de9 | /pythonProject/window8.py | d83b7d589dcdc65988563de0a6f6e39f958992d1 | [] | no_license | pass0585/Python | 660b88375bf2a50f19520ba3460542bcd4af7f29 | 917e5c1b1050a97a24069a1157489618aa45c6e1 | refs/heads/main | 2023-06-24T01:45:25.225249 | 2021-07-28T07:35:05 | 2021-07-28T07:35:05 | 382,979,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | from tkinter import *
from time import *
##전역 변수
btnList = [None]*9
fnameList = ["jeju1.gif", "jeju2.gif","jeju3.gif","jeju4.gif",
"jeju5.gif","jeju6.gif","jeju7.gif","jeju8.gif","jeju9.gif"]
photoList = [None]*9
num = 0
#함수
def clickNext():
global num
num+=1
if num>8:
num=0
photo = PhotoImage(file = "C:\GitRepository\Python&iot\Python\Image\GIF/" + fnameList[num])
name = fnameList[num]
pLabel.configure(image = photo)
nameLabel.configure(text = fnameList[num])
pLabel.image = photo
def clickPrev():
global num
num-= 1
if num < 0 :
num = 8
photo = PhotoImage(file = "C:\GitRepository\Python&iot\Python\Image\GIF/" + fnameList[num])
pLabel.configure(image = photo)
pLabel.image = photo
nameLabel.configure(text = fnameList[num])
##메인코드
window = Tk()
window.geometry("700x500")
window.title("사진 앨범 보기기")
btnPrev = Button(window, text = "<<이전", command = clickPrev)
btnNext = Button(window, text = "다음>>", command = clickNext)
photo = PhotoImage(file = "C:\GitRepository\Python&iot\Python\Image\GIF/" + fnameList[0])
pLabel = Label(window, image = photo)
nameLabel = Label(window, text = fnameList[0])
btnPrev.place(x = 250, y = 10)
btnNext.place(x = 400, y = 10)
pLabel.place(x = 15, y = 50)
nameLabel.place(x = 325, y = 10)
window.mainloop()
| [
"pass0585@gmail.com"
] | pass0585@gmail.com |
a5ae575a5d08b866c988d7daff8b8357e695454b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03148/s977826358.py | 4dcf39217ab9acca14bc5415bf0a46880be55e2c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
import heapq
class PriorityQueue:
class Reverse:
def __init__(self, val):
self.val = val
def __lt__(self, other):
return self.val > other.val
def __repr__(self):
return repr(self.val)
def __init__(self, x=None, desc=False):
if not x:
x = []
if desc:
for i in range(len(x)):
x[i] = self.Reverse(x[i])
self._desc = desc
self._container = x
heapq.heapify(self._container)
@property
def is_empty(self):
return not self._container
def pop(self):
if self._desc:
return heapq.heappop(self._container).val
else:
return heapq.heappop(self._container)
def push(self, item):
if self._desc:
heapq.heappush(self._container, self.Reverse(item))
else:
heapq.heappush(self._container, item)
def top(self):
if self._desc:
return self._container[0].val
else:
return self._container[0]
def sum(self):
return sum(self._container)
def __len__(self):
return len(self._container)
def main():
from operator import itemgetter
n, k = list(map(int, readline().split()))
sushis_original = [list(map(int, readline().split())) for _ in range(n)]
sushis_original.sort(key=itemgetter(1))
sushis_original.sort(key=itemgetter(0))
new_type = 0
prev = -1
for i in range(n):
cur = sushis_original[i][0]
if prev != cur:
new_type += 1
if cur > new_type:
sushis_original[i][0] = new_type
prev = cur
type_num = sushis_original[-1][0]
sushis = {i: [] for i in range(1, type_num + 1)}
for sushi_type, val in sushis_original:
sushis[sushi_type].append(val)
eat_sushis = PriorityQueue()
rem_sushis = PriorityQueue(desc=True)
rem = k
if rem >= type_num:
for i in range(1, type_num + 1):
eat_sushis.push(sushis[i].pop())
rem -= type_num
for vals in sushis.values():
for val in vals:
rem_sushis.push(val)
for _ in range(rem):
eat_sushis.push(rem_sushis.pop())
else:
for i in range(1, type_num + 1):
eat_sushis.push(sushis[i].pop())
discard_num = type_num - k
for _ in range(discard_num):
eat_sushis.pop()
for vals in sushis.values():
for val in vals:
rem_sushis.push(val)
cur_type = min(k, type_num)
sub_next = 2 * cur_type - 1
while rem_sushis:
cur_val = eat_sushis.top()
new_val = rem_sushis.top()
diff = new_val - cur_val
if diff >= sub_next:
eat_sushis.pop()
eat_sushis.push(rem_sushis.pop())
cur_type -= 1
sub_next = 2 * cur_type - 1
else:
break
ans = cur_type ** 2 + eat_sushis.sum()
print(ans)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
64c8644b126a25c7e0bc1a294f63066cde7a5f27 | 1f6b2c9978b1bfaed48560db811075025f93261f | /jobsAdmin/urls.py | 75f62f9b25edb8563025713c5695e6a80ab84150 | [
"MIT"
] | permissive | JaviMiot/Job_manager_backend | b999cb23cb7dd875ec7dd8403471c44ccee49034 | da91e1e2f64c1d6f1fd04fa262239abd0accece2 | refs/heads/master | 2023-08-17T18:39:55.851883 | 2021-10-11T20:39:49 | 2021-10-11T20:39:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | """jobsAdmin URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include((('public_jobs.urls', 'public_jobs')), namespace='public_jobs'))
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"51manobandajavier@gmail.com"
] | 51manobandajavier@gmail.com |
3d6b7fb65d3fd36dfe2b7e314f717ceb5d5edcc7 | 9f0aea4ee3f6516dbc7112fc20c4498196c5721a | /RL/main.py | 0e2d8d5a0468d348f65e84ca6d4a26a9f24b7b8f | [] | no_license | gobind452/Miscellaneous-Projects | 1286d7d6dcb822eb0ca9f40ac284caa0f273dd3d | d9035fcc5951537ddf2fe065438ff5eb74e1a4dc | refs/heads/master | 2020-08-07T16:34:48.970562 | 2019-12-09T13:49:20 | 2019-12-09T13:49:20 | 213,526,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,164 | py | import os
import sys
import pygame
import random
from pygame import *
pygame.init()
scr_size = (width,height) = (600,300) # Screen Size
FPS = 60 # Frames Per Second
gravity = 0.6 # Gravity for jumps
black = (0,0,0)
white = (255,255,255)
background_col = (235,235,235) # Colours
high_score = 0 # Initial High Score
screen = pygame.display.set_mode(scr_size) # Init screen
clock = pygame.time.Clock() # Clock
pygame.display.set_caption("T-Rex Rush") # Caption
jump_sound = pygame.mixer.Sound('sprites/jump.wav') # Load Sounds
die_sound = pygame.mixer.Sound('sprites/die.wav')
checkPoint_sound = pygame.mixer.Sound('sprites/checkPoint.wav')
def load_image(name,sizex=-1,sizey=-1,colorkey=None,): # Load Images
fullname = os.path.join('sprites', name)
image = pygame.image.load(fullname)
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey, RLEACCEL)
if sizex != -1 or sizey != -1:
image = pygame.transform.scale(image, (sizex, sizey))
return (image, image.get_rect())
def load_sprite_sheet(sheetname,nx,ny,scalex = -1,scaley = -1,colorkey = None,):
fullname = os.path.join('sprites',sheetname)
sheet = pygame.image.load(fullname)
sheet = sheet.convert()
sheet_rect = sheet.get_rect()
sprites = []
sizex = sheet_rect.width/nx
sizey = sheet_rect.height/ny
for i in range(0,ny):
for j in range(0,nx):
rect = pygame.Rect((j*sizex,i*sizey,sizex,sizey))
image = pygame.Surface(rect.size)
image = image.convert()
image.blit(sheet,(0,0),rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey,RLEACCEL)
if scalex != -1 or scaley != -1:
image = pygame.transform.scale(image,(scalex,scaley))
sprites.append(image)
sprite_rect = sprites[0].get_rect()
return sprites,sprite_rect
def disp_gameOver_msg(retbutton_image,gameover_image):
retbutton_rect = retbutton_image.get_rect()
retbutton_rect.centerx = width / 2
retbutton_rect.top = height*0.52
gameover_rect = gameover_image.get_rect()
gameover_rect.centerx = width / 2
gameover_rect.centery = height*0.35
screen.blit(retbutton_image, retbutton_rect)
screen.blit(gameover_image, gameover_rect)
def extractDigits(number):
if number > -1:
digits = []
i = 0
while(number/10 != 0):
digits.append(number%10)
number = int(number/10)
digits.append(number%10)
for i in range(len(digits),5):
digits.append(0)
digits.reverse()
return digits
class Dino():
def __init__(self,sizex=-1,sizey=-1):
self.images,self.rect = load_sprite_sheet('dino.png',5,1,sizex,sizey,-1)
self.images1,self.rect1 = load_sprite_sheet('dino_ducking.png',2,1,59,sizey,-1)
self.rect.bottom = int(0.98*height)
self.rect.left = width/15
self.image = self.images[0]
self.index = 0
self.counter = 0
self.score = 0
self.isJumping = False
self.isDead = False
self.isDucking = False
self.isBlinking = False
self.movement = [0,0] # Distance,Velocity ? Right Velocity,Up Velocity
self.jumpSpeed = 11.5
self.stand_pos_width = self.rect.width
self.duck_pos_width = self.rect1.width
def draw(self):
screen.blit(self.image,self.rect)
def checkbounds(self):
if self.rect.bottom > int(0.98*height): # Check Bounds (Drop the dino)
self.rect.bottom = int(0.98*height)
self.isJumping = False
def update(self): # Updates the game
if self.isJumping:
self.movement[1] = self.movement[1] + gravity # Gravity Timestep
self.index = 0
elif self.isBlinking:
if self.index == 0:
if self.counter % 400 == 399:
self.index = (self.index + 1)%2
else:
if self.counter % 20 == 19:
self.index = (self.index + 1)%2
elif self.isDucking:
if self.counter % 5 == 0:
self.index = (self.index + 1)%2
else:
if self.counter % 5 == 0:
self.index = (self.index + 1)%2 + 2
if self.isDead:
self.index = 4
if not self.isDucking:
self.image = self.images[self.index]
self.rect.width = self.stand_pos_width
else:
self.image = self.images1[(self.index)%2]
self.rect.width = self.duck_pos_width
self.rect = self.rect.move(self.movement)
self.checkbounds()
if not self.isDead and self.counter % 7 == 6 and self.isBlinking == False:
self.score += 1
self.counter = (self.counter + 1)
class Cactus(pygame.sprite.Sprite):
def __init__(self,speed=5,sizex=-1,sizey=-1):
pygame.sprite.Sprite.__init__(self,self.containers)
self.images,self.rect = load_sprite_sheet('cacti-small.png',3,1,sizex,sizey,-1)
self.rect.bottom = int(0.98*height)
self.rect.left = width + self.rect.width
self.image = self.images[random.randrange(0,3)]
self.movement = [-1*speed,0]
def draw(self):
screen.blit(self.image,self.rect)
def update(self):
self.rect = self.rect.move(self.movement)
if self.rect.right < 0:
self.kill()
class Ptera(pygame.sprite.Sprite):
def __init__(self,speed=5,sizex=-1,sizey=-1):
pygame.sprite.Sprite.__init__(self,self.containers)
self.images,self.rect = load_sprite_sheet('ptera.png',2,1,sizex,sizey,-1)
self.ptera_height = [height*0.82,height*0.75,height*0.60]
self.rect.centery = self.ptera_height[random.randrange(0,3)]
self.rect.left = width + self.rect.width
self.image = self.images[0]
self.movement = [-1*speed,0]
self.index = 0
self.counter = 0
def draw(self):
screen.blit(self.image,self.rect)
def update(self):
if self.counter % 10 == 0:
self.index = (self.index+1)%2
self.image = self.images[self.index]
self.rect = self.rect.move(self.movement)
self.counter = (self.counter + 1)
if self.rect.right < 0:
self.kill()
class Scoreboard():
def __init__(self,x=-1,y=-1):
self.score = 0
self.tempimages,self.temprect = load_sprite_sheet('numbers.png',12,1,11,int(11*6/5),-1)
self.image = pygame.Surface((55,int(11*6/5)))
self.rect = self.image.get_rect()
if x == -1:
self.rect.left = width*0.89
else:
self.rect.left = x
if y == -1:
self.rect.top = height*0.1
else:
self.rect.top = y
def draw(self):
screen.blit(self.image,self.rect)
def update(self,score):
score_digits = extractDigits(score)
self.image.fill(background_col)
for s in score_digits:
self.image.blit(self.tempimages[s],self.temprect)
self.temprect.left += self.temprect.width
self.temprect.left = 0
def introscreen():
temp_dino = Dino(44,47)
temp_dino.isBlinking = True
gameStart = False
callout,callout_rect = load_image('call_out.png',196,45,-1)
callout_rect.left = width*0.05
callout_rect.top = height*0.4
logo,logo_rect = load_image('logo.png',240,40,-1)
logo_rect.centerx = width*0.6
logo_rect.centery = height*0.6
while not gameStart:
if pygame.display.get_surface() == None:
print("Couldn't load display surface")
return True
else:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE or event.key == pygame.K_UP:
temp_dino.isJumping = True
temp_dino.isBlinking = False
temp_dino.movement[1] = -1*temp_dino.jumpSpeed
temp_dino.update()
if pygame.display.get_surface() != None:
screen.fill(background_col)
if temp_dino.isBlinking:
screen.blit(logo,logo_rect)
screen.blit(callout,callout_rect)
temp_dino.draw()
pygame.display.update()
clock.tick(FPS)
if temp_dino.isJumping == False and temp_dino.isBlinking == False:
gameStart = True
def gameplay():
global high_score
gamespeed = 4
startMenu = False
gameOver = False
gameQuit = False
playerDino = Dino(44,47)
scb = Scoreboard()
highsc = Scoreboard(width*0.78)
counter = 0
cacti = pygame.sprite.Group()
pteras = pygame.sprite.Group()
last_obstacle = pygame.sprite.Group()
Cactus.containers = cacti
Ptera.containers = pteras
retbutton_image,retbutton_rect = load_image('replay_button.png',35,31,-1)
gameover_image,gameover_rect = load_image('game_over.png',190,11,-1)
temp_images,temp_rect = load_sprite_sheet('numbers.png',12,1,11,int(11*6/5),-1)
HI_image = pygame.Surface((22,int(11*6/5)))
HI_rect = HI_image.get_rect()
HI_image.fill(background_col)
HI_image.blit(temp_images[10],temp_rect)
temp_rect.left += temp_rect.width
HI_image.blit(temp_images[11],temp_rect)
HI_rect.top = height*0.1
HI_rect.left = width*0.73
while not gameQuit:
while not gameOver:
if pygame.display.get_surface() == None:
print("Couldn't load display surface")
gameQuit = True
gameOver = True
else:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit = True
gameOver = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
if playerDino.rect.bottom == int(0.98*height):
playerDino.isJumping = True
if pygame.mixer.get_init() != None:
jump_sound.play()
playerDino.movement[1] = -1*playerDino.jumpSpeed
if event.key == pygame.K_DOWN:
if playerDino.isJumping and not playerDino.isDead:
playerDino.movement[1]+= 5*gravity
if not (playerDino.isJumping and playerDino.isDead):
playerDino.isDucking = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
playerDino.isDucking = False
for c in cacti:
c.movement[0] = -1*gamespeed
if pygame.sprite.collide_mask(playerDino,c):
playerDino.isDead = True
if pygame.mixer.get_init() != None:
die_sound.play()
for p in pteras:
p.movement[0] = -1*gamespeed
if pygame.sprite.collide_mask(playerDino,p):
playerDino.isDead = True
if pygame.mixer.get_init() != None:
die_sound.play()
if len(cacti) < 2:
if len(cacti) == 0:
last_obstacle.empty()
last_obstacle.add(Cactus(gamespeed,40,40))
else:
for l in last_obstacle:
if l.rect.right < width*0.7 and random.randrange(0,50) == 10:
last_obstacle.empty()
last_obstacle.add(Cactus(gamespeed, 40, 40))
if len(pteras) == 0 and random.randrange(0,200) == 10 and counter > 500:
for l in last_obstacle:
if l.rect.right < width*0.8:
last_obstacle.empty()
last_obstacle.add(Ptera(gamespeed, 46, 40))
playerDino.update()
cacti.update()
pteras.update()
scb.update(playerDino.score)
highsc.update(high_score)
if pygame.display.get_surface() != None:
screen.fill(background_col)
scb.draw()
if high_score != 0:
highsc.draw()
screen.blit(HI_image,HI_rect)
cacti.draw(screen)
pteras.draw(screen)
playerDino.draw()
pygame.display.update()
clock.tick(FPS)
if playerDino.isDead:
gameOver = True
if playerDino.score > high_score:
high_score = playerDino.score
if counter%700 == 699:
gamespeed += 1
counter = (counter + 1)
if gameQuit:
break
while gameOver:
if pygame.display.get_surface() == None:
print("Couldn't load display surface")
gameQuit = True
gameOver = False
else:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit = True
gameOver = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gameQuit = True
gameOver = False
if event.key == pygame.K_RETURN or event.key == pygame.K_SPACE:
gameOver = False
gameplay()
highsc.update(high_score)
if pygame.display.get_surface() != None:
disp_gameOver_msg(retbutton_image,gameover_image)
if high_score != 0:
highsc.draw()
screen.blit(HI_image,HI_rect)
pygame.display.update()
clock.tick(FPS)
pygame.quit()
quit()
def main():
isGameQuit = introscreen()
if not isGameQuit:
gameplay()
main() | [
"noreply@github.com"
] | noreply@github.com |
35bb8f52dd1d329bd18ccf91dc8b94a834d6c0e9 | 58110bd765daad7ad8d41175da001000e63d7256 | /baselines/scripts_python/python_packages/pwNBCBk/citmi_simple.py | 558ac5b782473964acd246c5aa6398071bfd9f98 | [] | no_license | mivanovitch/causal_discovery_for_time_series | c7c9c9081167bd7b53eadc6bffa2f39b84ec084b | 970c4d00c3f8f52923802ed030d4014e24723187 | refs/heads/master | 2023-08-30T20:46:50.498706 | 2021-11-04T16:35:46 | 2021-11-04T16:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173,762 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Discovery of summary causal graphs for time series: script implementing
the PCTMI and FCITMI methods.
Parallelization is done across variables for the skeleton construction step and for the rule origin of causality.
Date: Dec 2019
Author: Karim Assaad, karimassaad3@gmail.com, karim.assaad@univ.grenoble.alpes.fr, karim.assaad@coservit.com
paper: soon
"""
import numpy as np
import pandas as pd
import itertools
from joblib import Parallel, delayed
from baselines.scripts_python.python_packages.CITMI.ctmi import window_representation, get_sampling_rate, get_alpha, align_pair
from baselines.scripts_python.python_packages.CITMI.ctmi_new import i_ctmi, ctmi, align_matrix, tmi, get_index_of_aligned_dict, align_xy, aligned_dict_to_df
# from gctmi import gctmi
from baselines.scripts_python.python_packages.CITMI.ctmi_new import indep_test
# todo: make ctmi use graph by adding None to cell in gamma matrix which is associated to two independent time series
# todo: make ranking based on statistics (not pvalue)
class Graph:
"""
Graph structure
0: no edge
1: a tail -
2: arrow head ->
"""
def __init__(self, d):
"""
:param d: number of nodes
"""
self.d = d
# self.edges = np.subtract(np.ones([n, n]), np.eye(n))
self.edges = np.ones([d, d])
self.sep = np.zeros([d, d, d])
def del_edge(self, p, q):
"""
:param p: index of a time series
:param q: index of a time series
"""
self.edges[p, q] = 0
self.edges[q, p] = 0
def add_sep(self, p, q, r):
"""
:param p: index of a time series
:param q: index of a time series
:param r: index of seperation set
"""
self.sep[p, q, r] = 1
self.sep[q, p, r] = 1
def search_adj(self, p):
"""
:param p: index of a time series
:return: list of adjacencies of time series p and the number of adjacencies
"""
adj_1 = np.argwhere(self.edges[p, :] != 0)
adj_2 = np.argwhere(self.edges[:, p] != 0)
adj = np.intersect1d(adj_1, adj_2)
if self.edges[p, p] == 1:
adj = adj[adj != p]
num_adj = len(adj)
return adj, num_adj
def search_adj_all(self):
"""
:return: list of adjacencies of all time series and the number of adjacencies per time series
"""
l_num_adj = []
l_adj = []
for p in range(self.d):
adj, num_adj = self.search_adj(p)
l_adj.append(adj.tolist())
l_num_adj.append(num_adj)
return l_adj, l_num_adj
class RankingList:
def __init__(self):
self.val = np.array([])
self.elem_p = np.array([], dtype='int')
self.elem_q = np.array([], dtype='int')
self.elem_r = []
def add(self, p, q, val, r):
"""
:param p: index of a time series
:param q: index of a time series
:param val: value of mutual information
:param r: index of set of conditionals
"""
self.val = np.append(self.val, val)
self.elem_p = np.append(self.elem_p, p)
self.elem_q = np.append(self.elem_q, q)
self.elem_r.append(r)
def sort(self, descending=True):
"""
:param descending: (bool) sort ascending vs. descending. By default True
"""
idx = np.argsort(self.val)
if descending:
idx = np.flip(idx)
# self.val = self.val[idx]
# self.elem_p = self.elem_p[idx]
# self.elem_q = self.elem_q[idx]
# self.elem_r = self.elem_r[idx]
self.val = np.take_along_axis(self.val, idx, axis=0)
self.elem_p = np.take_along_axis(self.elem_p, idx, axis=0)
self.elem_q = np.take_along_axis(self.elem_q, idx, axis=0)
sorted_elem_r = []
for i in idx:
sorted_elem_r.append(self.elem_r[i])
self.elem_r = sorted_elem_r
class CITMI:
def __init__(self, series, sig_lev=0.05, lag_max=5, p_value=True, rank_using_p_value=False, verbose=True, num_processor=-1,
graphical_optimization=True):
"""
Causal inference (Wrapper) using TMI and CTMI (contain functions for skeleton construction)
:param series: d-time series (with possibility of different sampling rate)
:param sig_lev: significance level. By default 0.05
:param p_value: Use p_value for decision making. By default True
:param verbose: Print results. By default: True
:param num_processor: number of processors for parallelization. By default -1 (all)
"""
self.series = series
self.graph = Graph(series.shape[1])
self.n = series.shape[0]
self.d = series.shape[1]
self.names = self.series.columns
self.num_processor = num_processor
self.p_value = p_value
self.graphical_optimization = graphical_optimization
if self.p_value == rank_using_p_value:
self.rank_using_p_value = rank_using_p_value
elif not rank_using_p_value:
self.rank_using_p_value = rank_using_p_value
else:
print("Warning: rank_using_p_value can be True iff p_value is True. Using rank_using_p_value=False")
self.rank_using_p_value = False
self.verbose = verbose
self.data_dict = dict()
self.instantaneous_dict = dict()
self.lags = []
self.sampling_rate = dict()
for col in range(series.shape[1]):
_, s_r = get_sampling_rate(self.series[self.names[col]])
self.sampling_rate[self.names[col]] = s_r
self.sig_lev = sig_lev
self.alpha = get_alpha(series)
for col in range(series.shape[1]):
# self.lags.append(window_size(series[series.columns[col]], alpha=self.alpha, lag_max=lag_max))
self.lags.append(1)
self.data_dict[self.names[col]] = window_representation(self.series[self.names[col]],
windows_size=self.lags[col])
self.instantaneous_dict[self.names[col]] = True
self.gamma_matrix = align_matrix(self.data_dict, series.columns, self.sampling_rate)
self.cap_gamma_df = pd.DataFrame(columns=["p", "q", "r", "Grp", "Grq"])
self.mi_array = np.ones([self.graph.d, self.graph.d])
self.cmi_array = np.ones([self.graph.d, self.graph.d])
if self.verbose:
print("n: "+str(self.n))
print("d: "+str(self.d))
print("names: "+str(self.names))
print("sampling_rate: "+str(self.sampling_rate))
print("significance level:"+str(self.sig_lev))
print("alpha:"+str(self.alpha))
print("window size:"+str(self.lags))
print("gamma matrix:"+str(self.gamma_matrix))
print("instantaneous dict :"+str(self.instantaneous_dict))
def _mi_pq(self, p, q):
"""
estimate tmi between two time series
:param p: time series with index p
:param q: time series with index q
:return: p, q and the estimated value of tmi(p,q)
"""
x = self.data_dict[self.names[p]]
y = self.data_dict[self.names[q]]
mi_pval, mi_val = tmi(x, y, sampling_rate_tuple=(self.sampling_rate[self.names[p]],
self.sampling_rate[self.names[q]]),
gamma=self.gamma_matrix[self.names[q]].loc[self.names[p]], p_value=self.p_value)
# mi_pval, mi_val = ctmi(x, y, None, self.names[p], self.names[q], self.sampling_rate,
# gamma_matrix=self.gamma_matrix, p_value=self.rank_using_p_value)
return p, q, mi_pval
def skeleton_initialize(self):
"""
initialize graph, remove all unconditional independencies and rank neighbors
"""
if self.verbose:
print("######################################")
print("Skeletion Initialization")
print("######################################")
# p_list, q_list = np.where(np.triu(self.graph.edges) > 0)
p_list, q_list = np.where((np.triu(self.graph.edges)-np.diag(np.diag(self.graph.edges))) > 0)
res = Parallel(n_jobs=self.num_processor)(delayed(self._mi_pq)(p, q) for p, q in zip(p_list, q_list))
for pq in range(len(res)):
p, q, mi = res[pq][0], res[pq][1], res[pq][2]
self.mi_array[p, q] = mi
self.mi_array[q, p] = mi
if self.verbose:
print("p=" + str(p) + "; q=" + str(q) + "; I(p,q)=" + "{: 0.5f}".format(self.mi_array[p, q]), end=" ")
if self.p_value:
test = self.mi_array[p, q] > self.sig_lev
else:
test = self.mi_array[p, q] < self.alpha
if test:
if self.verbose:
print("=> Remove link between "+str(p)+" and "+str(q))
self.graph.edges[p, q] = 0
self.graph.edges[q, p] = 0
else:
if self.verbose:
print()
def align_xyz(self, name_x, name_y, v_x, v_y, idx_x, idx_y, z, sampling_rate_dict, k, gamma_matrix, instantaneous_dict,
graph):
sampling_rate_tuple = (sampling_rate_dict[name_x], sampling_rate_dict[name_y])
# v_x, v_y, idx_x, idx_y = align_xy(xy[name_x], xy[name_y], g_xy, sampling_rate_tuple)
idx_x2 = idx_x.copy()
names_z = [*z.keys()]
v_z = dict()
nz_visted = []
# k = 0
for nz in names_z:
g_xz = gamma_matrix[nz].loc[name_x]
idx_xy = idx_x
idx_xy2 = idx_x2
g = g_xz
nz_processed = z[nz]
xyz_dict = {name_x: v_x, nz: nz_processed}
xyz_dict[name_x].index = idx_xy
sampling_rate_tuple = (sampling_rate_dict[name_x], sampling_rate_dict[nz])
bool_idx = pd.DataFrame([False] * len(idx_xy), columns=['bool'])
bool_idx.index = idx_xy
bool_idx2 = pd.DataFrame([False] * len(idx_xy2), columns=['bool'])
bool_idx2.index = idx_xy2
v_x, z_processed, idx_x, _ = align_xy(xyz_dict[name_x], xyz_dict[nz], g, sampling_rate_tuple)
bool_idx.loc[idx_x] = True
bool_idx = bool_idx['bool'].values
v_y = v_y[bool_idx]
idx_y = idx_y[bool_idx]
for nz_v in nz_visted:
v_z[nz_v] = v_z[nz_v][bool_idx]
v_z[nz] = z_processed
nz_visted.append(nz)
names_z = [*v_z.keys()]
index_zx = get_index_of_aligned_dict(v_z, names_z)
v_x = v_x.reset_index(drop=True)
v_y = v_y.reset_index(drop=True)
for nz_v in nz_visted:
v_z[nz_v] = v_z[nz_v].reset_index(drop=True)
return v_x, v_y, v_z
def ctmi(self, x, y, z, name_x, name_y, sampling_rate_dict, gamma_matrix, instantaneous_dict, graph=None, p_value=False,
k=10, sig_samples=10000):
sampling_rate_tuple = (sampling_rate_dict[name_x], sampling_rate_dict[name_y])
g_xy = gamma_matrix[name_y].loc[name_x]
v_x, v_y, idx_x, idx_y = align_xy(x, y, g_xy, sampling_rate_tuple)
if z:
v_x, v_y, v_z = self.align_xyz(name_x, name_y, v_x, v_y, idx_x, idx_y, z, sampling_rate_dict, k, gamma_matrix,
instantaneous_dict, graph)
names_z = [*z.keys()]
if len(names_z) > 0:
v_z = aligned_dict_to_df(v_z)
else:
v_z = None
else:
v_z = None
if len(v_x.shape) == 1:
v_x = v_x.to_frame()
if len(v_y.shape) == 1:
v_y = v_y.to_frame()
cmi_pval, cmi_val = indep_test(v_x, v_y, z=v_z, sig_samples=sig_samples, p_value=p_value, measure="cmiknn", k=k)
return cmi_pval, cmi_val
def _cmi_sep_set_pq(self, p, q, set_size):
"""
estimate ctmi between two time series conditioned on each set of neighbors with cardinality equal to set_size
:param p: time series with index p
:param q: time series with index q
:param set_size: cardinality of the set of neighbors
:return: p, q, list if estimated value of ctmi(p,q,r_set), and list of all r_sets
"""
v_list = []
r_list = [r for r in range(self.graph.d) if (r != p) and (r != q) and ((
self.graph.edges[p, r] != 0)) and (
(self.gamma_matrix[self.names[p]].loc[self.names[r]] >= 0))]
r_list = [list(r) for r in itertools.combinations(r_list, set_size)]
r_list_temp = r_list.copy()
# if set_size == 1:
for rs in r_list_temp:
print(rs)
print(all(elem >= self.d for elem in rs))
if all(elem >= self.d for elem in rs):
r_list.remove(rs)
del r_list_temp
x = self.data_dict[self.names[p]]
y = self.data_dict[self.names[q]]
for rs in r_list:
z = dict()
for r in rs:
z[self.names[r]] = self.data_dict[self.names[r]]
cmi_pval, cmi_val = self.ctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,
gamma_matrix=self.gamma_matrix, p_value=self.rank_using_p_value,
instantaneous_dict=self.instantaneous_dict)
print("p=" + str(p) + "; q=" + str(q) + "; r=" + str(r_list) + "; I(p,q|r)=" + "{: 0.5f}".format(
cmi_val), end=" ")
for r in rs:
print("gamma :"+str(self.gamma_matrix[self.names[p]].loc[self.names[r]]))
if self.rank_using_p_value:
v_list.append(cmi_pval)
else:
v_list.append(cmi_val)
if v_list:
return p, q, v_list, r_list
def rank_cmi_sep_set_parallel(self, set_size):
"""
rank pairs of time series based on the estimation of ctmi between each pair of connected time series
:param set_size: cardinality of the set of neighbors
:return: ranking of each pair of connected time series based ctmi
"""
list_adj, list_num_adj = self.graph.search_adj_all()
p_list = [p for p in range(len(list_num_adj)) if list_num_adj[p] > set_size]
q_list = [list_adj[p] for p in p_list]
p_list = [p_list[p] for p in range(len(p_list)) for _ in q_list[p]]
q_list = [q for sublist in q_list for q in sublist]
pq_list = [(p, q) for p, q in zip(p_list, q_list)]
print(pq_list)
# temp_pq = pq_list.copy()
# temp_p = p_list.copy()
# temp_q = q_list.copy()
# for pq in range(len(temp_pq)):
# if (temp_pq[pq][1], temp_pq[pq][0]) in pq_list:
# pq_list.remove((temp_pq[pq][0], temp_pq[pq][1]))
# p_list.remove(temp_p[pq])
# q_list.remove(temp_q[pq])
# del temp_pq, temp_p, temp_q
# res = Parallel(n_jobs=self.num_processor)(delayed(self._cmi_sep_set_pq)(p, q, set_size) for p, q in
# zip(p_list, q_list))
res = []
for p, q in zip(p_list, q_list):
res.append(self._cmi_sep_set_pq(p, q, set_size))
ranks = RankingList()
for pq in range(len(res)):
if res[pq] is not None:
if isinstance(res[pq][2], list):
for r in range(len(res[pq][2])):
ranks.add(res[pq][0], res[pq][1], res[pq][2][r], res[pq][3][r])
else:
ranks.add(res[pq][0], res[pq][1], res[pq][2], res[pq][3])
if self.rank_using_p_value:
ranks.sort(descending=True)
else:
ranks.sort(descending=False)
return ranks
def find_sep_set(self):
"""
find the most contributing separation set (if it exists) between each pair of time series
"""
if self.verbose:
print("######################################")
print("Skeletion Speperation")
print("######################################")
print("max set size = " + str(self.graph.d-1))
for set_size in range(1, self.graph.d-1):
ranks = self.rank_cmi_sep_set_parallel(set_size)
if self.verbose:
print("Ranking:")
print("p: "+str(ranks.elem_p))
print("p: " + str(ranks.elem_q))
print("p: " + str(ranks.elem_r))
print("p: " + str(ranks.val))
for p, q, r_set, cmi in zip(ranks.elem_p, ranks.elem_q, ranks.elem_r, ranks.val):
test = (self.graph.edges[p, q] != 0)
for r in r_set:
if not test:
break
test = test and ((self.graph.edges[q, r] != 0) or (self.graph.edges[p, r] != 0))
# test = test and ((self.graph.sep[p, r, q] == 0) and (self.graph.sep[q, r, p] == 0))
if test:
mi = self.mi_array[p, q]
if self.p_value != self.rank_using_p_value:
x = self.data_dict[self.names[p]]
y = self.data_dict[self.names[q]]
z = dict()
for r in r_set:
z[self.names[r]] = self.data_dict[self.names[r]]
cmi, _ = self.ctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,
gamma_matrix=self.gamma_matrix,
p_value=self.p_value,
instantaneous_dict=self.instantaneous_dict)
if self.verbose:
print("p=" + str(p) + "; q=" + str(q) + "; r=" + str(r_set) + "; I(p,q|r)=" + "{: 0.5f}".format(
cmi) + "; I(p,q)=" + "{: 0.5f}".format(mi), end=" ")
if self.p_value:
test = mi < self.sig_lev < cmi
else:
test = cmi < self.alpha
if test:
self.cmi_array[p, q] = cmi
self.cmi_array[q, p] = cmi
if self.verbose:
print("=> remove link between " + str(p) + " and " + str(q))
self.graph.edges[p, q] = 0
self.graph.edges[q, p] = 0
for r in r_set:
self.graph.add_sep(q, p, r)
else:
if self.verbose:
print()
# self._exclude_past()
class PCTMI(CITMI):
def __init__(self, series, sig_lev=0.05, lag_max=5, p_value=True, rank_using_p_value=False, verbose=True, num_processor=-1,
graphical_optimization=False):
"""
PC for time series using TMI and CTMI
:param series: d-time series (with possibility of different sampling rate)
:param sig_lev: significance level. By default 0.05
:param p_value: Use p_value for decision making. By default True
:param verbose: Print results. By default: True
:param num_processor: number of processors for parallelization. By default -1 (all)
"""
CITMI.__init__(self, series, sig_lev, lag_max, p_value, rank_using_p_value, verbose, num_processor,
graphical_optimization)
def _find_shortest_directed_paths_util(self, p, q, visited, path, all_path):
"""
sub function of _find_shortest_directed_paths
:param p: index of time series
:param q: index of time series
:param visited: list of visited nodes
:param path: current path
:param all_path: list of all discovered paths
"""
# Mark the current node as visited and store in path
visited[p] = True
path.append(p)
# If current vertex is same as destination, then print
# current path[]
if p == q:
if len(path) > 2:
all_path.append(path.copy()[1:-1])
return path
else:
# If current vertex is not destination
# Recur for all the vertices child of this vertex
child_p = np.where(self.graph.edges[p, :] == 2)[0]
for k in child_p:
if not visited[k]:
self._find_shortest_directed_paths_util(k, q, visited, path, all_path)
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited[p] = False
def _find_shortest_directed_paths(self, p, q):
"""
find shortest directed path between time series of index p and time series of index q
:param p: index of time series
:param q: index of time series
:return: all directed paths from p to q
"""
# Mark all the vertices as not visited
visited = [False] * self.d
# Create an array to store paths
path = []
all_path = []
# Call the recursive helper function to print all paths
self._find_shortest_directed_paths_util(p, q, visited, path, all_path)
return all_path
def _oc_pq(self, p, q):
"""
estimate ctmi between two time series conditioned of their sep set + each non oriented connected neighbor
:param p: index of time series
:param q: index of time series
:return: p, q, the most contributing node and the MI conditioned on sep set and the most contributing node
"""
v_list = []
k_list = [k for k in range(self.d) if (k != p) and (k != q)
and ((self.graph.edges[q, k] == 1) and (self.graph.edges[p, k] == 1)
and (self.graph.edges[k, q] == 1) and (self.graph.edges[k, p] == 1)
and (self.graph.sep[p, q, k] == 0))]
if len(k_list) > 0:
x = self.data_dict[self.names[p]]
y = self.data_dict[self.names[q]]
sep = np.where(self.graph.sep[p, q, :] == 1)[0]
for k in k_list:
if k not in sep:
# sep_k = sep.tolist() + [k]
# z = dict()
# for name_k in self.names[sep_k]:
# z[name_k] = self.data_dict[name_k]
# cmi_pval, cmi_val = ctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,
# gamma_matrix=self.gamma_matrix, p_value=self.p_value, mission="ictmi")
z = self.data_dict[self.names[k]]
print(z.shape)
cmi_pval, cmi_val = i_ctmi(x, y, z, self.names[p], self.names[q], self.names[k], self.sampling_rate,
p_value=self.p_value)
v_list.append(cmi_pval)
if v_list:
if self.p_value:
idx = int(np.argmax(v_list))
else:
idx = int(np.argmin(v_list))
return p, q, v_list[idx], k_list[idx]
def rank_oc_parallel(self):
"""
rank unsheilded triples based on the estimation of ctmi
:return: ranking of each unsheilded triple based ctmi
"""
p_list = []
q_list = []
for p in range(self.d):
for q in range(p+1, self.d):
if (self.graph.edges[p, q] == 0) and (self.graph.edges[q, p] == 0):
p_list.append(p)
q_list.append(q)
res = Parallel(n_jobs=self.num_processor)(delayed(self._oc_pq)(p, q) for p, q in zip(p_list, q_list))
ranks = RankingList()
for pq in range(len(res)):
if res[pq] is not None:
ranks.add(res[pq][0], res[pq][1], res[pq][2], res[pq][3])
if self.p_value:
ranks.sort(descending=False)
else:
ranks.sort(descending=True)
return ranks
def rule_origin_causality(self):
"""
rule 0 (origin of causality) from PC
"""
if self.verbose:
print("######################################")
print("Rule Origin of Causality")
print("######################################")
ranks = self.rank_oc_parallel()
for p, q, k, cmi in zip(ranks.elem_p, ranks.elem_q, ranks.elem_r, ranks.val):
if (self.graph.edges[q, k] == 1) and (self.graph.edges[p, k] == 1) \
and (self.graph.edges[k, q] == 1) and (self.graph.edges[k, p] == 1):
sep = np.where(self.graph.sep[p, q, :] == 1)[0]
print("sep = " + str(sep))
# if len(sep) > 0:
# mi = self.cmi_array[p, q]
# else:
mi = self.mi_array[p, q]
if k not in sep:
if self.verbose:
print("p=" + str(p) + "; q=" + str(q) + "; r=" + str(
k) + "; s=" + str(
sep) + "; I(p,q|r,s)=" + "{: 0.5f}".format(
cmi) + "; I(p,q|s)=" + "{: 0.5f}".format(mi), end=" ")
if self.p_value:
test = cmi < mi
# test = cmi < self.alpha
else:
test = mi - cmi < 0
if test:
if self.verbose:
print("=> orient " + str(p) + " -> " + str(k) + " <- " + str(q))
self.graph.edges[p, k] = 2
self.graph.edges[q, k] = 2
else:
if self.verbose:
print()
def rule_propagation_causality(self):
"""
rule 1 from PC
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule Propagation of Causality")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 0) and (self.graph.edges[j, i] == 0):
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and
(((self.graph.edges[j, k] == 2) and (self.graph.edges[k, i] == 1) and
(self.graph.edges[i, k] == 1)) or ((self.graph.edges[i, k] == 2) and
(self.graph.edges[k, j] == 1) and
(self.graph.edges[j, k] == 1)))]
if len(k_list) > 0:
test_find_orientation = True
for k in k_list:
if self.graph.edges[i, k] == 2:
if self.verbose:
print(str(i) + "->" + str(k) + "-" + str(j), end=" ")
print("=> orient " + str(i) + "-> " + str(k) + " -> " + str(j))
self.graph.edges[k, j] = 2
else:
if self.verbose:
print(str(j) + "->" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(j) + "-> " + str(k) + " -> " + str(i))
self.graph.edges[k, i] = 2
return test_find_orientation
def rule_2(self):
"""
rule 2 from PC
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 3")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
j_list = np.where(self.graph.edges[i, :] == 1)[0].tolist()
if i in j_list:
j_list.remove(i)
for j in j_list:
if self.graph.edges[j, i] == 1:
shortest_directed_path = self._find_shortest_directed_paths(i, j)
if len(shortest_directed_path) > 0:
self.graph.edges[i, j] = 2
test_find_orientation = True
if self.verbose:
print_path = '->'.join(map(str, shortest_directed_path[0]))
print(str(i)+"-"+str(j)+" and "+str(i) + "->" + print_path + "->" + str(j), end=" ")
print("=> orient " + str(i) + "->" + str(j))
return test_find_orientation
def rule_3(self):
"""
rule 3 from PC
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 4")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 0) and (self.graph.edges[j, i] == 0):
colliders = [k for k in range(self.graph.d) if (k != i) and (k != j) and (
(self.graph.edges[j, k] == 2) and (self.graph.edges[i, k] == 2))]
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and (
(self.graph.edges[j, k] == 1) and (self.graph.edges[i, k] == 1))
and (self.graph.edges[k, j] == 1) and (self.graph.edges[k, i] == 1)]
if len(colliders) > 0 and len(k_list) > 0:
for c in colliders:
for k in k_list:
if (self.graph.edges[c, k] == 1) and (self.graph.edges[k, c] == 1):
test_find_orientation = True
self.graph.edges[k, c] = 2
if self.verbose:
print(str(i) + "->" + str(c) + "<-" + str(j) + " and " + str(i) + "-" +
str(k) + "-" + str(j) + " and " + str(k) + "-" + str(c),
end=" ")
print("=> orient " + str(k) + "->" + str(c))
return test_find_orientation
def rule_commun_confounder_and_causal_chain(self):
"""
new rules (rule commun confounder (4) and rule causal_chain (5) from paper)
"""
if self.verbose:
print("######################################")
print("Rule commun confounder and causal chain")
print("######################################")
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 0) and (self.graph.edges[j, i] == 0):
# k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and (
# ((self.graph.edges[j, k] == 1) and (self.graph.edges[k, j] == 1) and
# (self.graph.edges[i, k] == 1) and (self.graph.edges[k, i] == 1)))]
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and (
((self.graph.edges[j, k] == 1) and (self.graph.edges[k, j] == 1) and
(self.graph.edges[i, k] == 1) and (self.graph.edges[k, i] == 1)) or (
((self.graph.edges[j, k] == 2) and (self.graph.edges[k, j] == 1) and
(self.graph.edges[i, k] == 1) and (self.graph.edges[k, i] == 1))) or (
((self.graph.edges[j, k] == 1) and (self.graph.edges[k, j] == 2) and
(self.graph.edges[i, k] == 1) and (self.graph.edges[k, i] == 1))) or (
((self.graph.edges[j, k] == 1) and (self.graph.edges[k, j] == 1) and
(self.graph.edges[i, k] == 2) and (self.graph.edges[k, i] == 1))) or (
((self.graph.edges[j, k] == 1) and (self.graph.edges[k, j] == 1) and
(self.graph.edges[i, k] == 1) and (self.graph.edges[k, i] == 2))))]
if len(k_list) > 0:
for k in k_list:
gki = self.gamma_matrix[self.names[i]].loc[self.names[k]]
gkj = self.gamma_matrix[self.names[j]].loc[self.names[k]]
i_is_not_effet = (sum(self.graph.edges[:, i] == 2) == 0)
j_is_not_effet = (sum(self.graph.edges[:, j] == 2) == 0)
k_is_not_effet = (sum(self.graph.edges[:, k] == 2) == 0)
#Lagged common cause
if (gki > 0) and (gkj > 0):
if i_is_not_effet and j_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)>0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " -> " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
#Lagged instantaneous confounder
elif (gki > 0) and (gkj == 0):
if i_is_not_effet:
if j_is_not_effet and k_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)==0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " - " + str(j))
self.graph.edges[k, i] = 2
elif j_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)==0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " -> " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
elif k_is_not_effet:
if self.verbose:
print(
str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)==0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " <- " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[j, k] = 2
elif (gki == 0) and (gkj > 0):
if j_is_not_effet:
if i_is_not_effet and k_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)==0 and gamma(k,j)>0",
end=" ")
print("=> orient " + str(i) + "- " + str(k) + " -> " + str(j))
self.graph.edges[k, j] = 2
elif i_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)==0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " -> " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
elif k_is_not_effet:
if self.verbose:
print(
str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,j)>0 and gamma(k,i)==0",
end=" ")
print("=> orient " + str(i) + "-> " + str(k) + " -> " + str(j))
self.graph.edges[k, j] = 2
self.graph.edges[i, k] = 2
# lagged instanteneous causal chain
elif (gki >= 0) and (gkj < 0):
if j_is_not_effet and k_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)>0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " <- " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[j, k] = 2
elif (gki < 0) and (gkj >= 0):
if i_is_not_effet and k_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)>0",
end=" ")
print("=> orient " + str(i) + "-> " + str(k) + " -> " + str(j))
self.graph.edges[i, k] = 2
self.graph.edges[k, j] = 2
def rule_mediator(self):
"""
new rules (rule mediator (6) from paper)
"""
if self.verbose:
print("######################################")
print("Rule mediator")
print("######################################")
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] != 0) and (self.graph.edges[j, i] != 0):
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and (
((self.graph.edges[j, k] == 1) and (self.graph.edges[k, j] == 1) and
(self.graph.edges[i, k] == 1) and (self.graph.edges[k, i] == 1)))]
if len(k_list) > 0:
for k in k_list:
gij = self.gamma_matrix[self.names[j]].loc[self.names[i]]
gik = self.gamma_matrix[self.names[k]].loc[self.names[i]]
gjk = self.gamma_matrix[self.names[k]].loc[self.names[j]]
# g_list = [(gij, gik, gjk), (gij, gik, -gjk), (-gij, gjk, gik), (-gij, gjk, -gik),
# (-gik, -gjk, gij), (-gik, -gjk, -gij)]
# i->j->k, i->k->j, j->i->k, j->k->->i, k->i->j, k->j->i
g_list = [(gij, gjk, gik), (gik, -gjk, gij), (-gij, gik, gjk), (gjk, -gik, -gij),
(-gik, gij, -gjk), (-gjk, -gij, -gik)]
g_list_common = [(gij, gjk, gik), (gik, -gjk, gij), (gjk, -gik, -gij)]
msk = [(x[0] > 0) and (x[1] > 0) and (x[2] >= 0) for x in g_list]
msk_common = [(x[0] == 0) and (x[1] > 0) and (x[2] > 0) for x in g_list_common]
if any(msk):
print(g_list)
print(msk)
s = int(np.argwhere(msk)[0])
# g1, g2, g3 = g_list[s]
if s == 0:
if (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if (self.graph.edges[j, i] == 1) and (self.graph.edges[k, i] == 1) \
and (self.graph.edges[k, j] == 1):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif s == 1:
if (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if (self.graph.edges[j, i] == 1) and (self.graph.edges[k, i] == 1) \
and (self.graph.edges[k, j] == 1):
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(k) + "-> " + str(j) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[k, j] = 2
elif s == 2:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if (self.graph.edges[i, j] == 1) and (self.graph.edges[k, i] == 1) \
and (self.graph.edges[k, j] == 1):
if self.verbose:
print(str(j) + "-" + str(i) + "-" + str(k) + "-" + str(j), end=" ")
print("=> orient " + str(j) + "-> " + str(i) + " -> " + str(k) + " <- "
+ str(j))
self.graph.edges[j, i] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
if s == 3:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if (self.graph.edges[i, j] == 1) and (self.graph.edges[i, k] == 1) \
and (self.graph.edges[k, j] == 1):
if self.verbose:
print(str(j) + "-" + str(k) + "-" + str(i) + "-" + str(j), end=" ")
print("=> orient " + str(j) + "-> " + str(k) + "-> " + str(i) + " <- "
+ str(j))
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
self.graph.edges[j, k] = 2
elif s == 4:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if (self.graph.edges[j, i] == 1) and (self.graph.edges[i, k] == 1) \
and (self.graph.edges[j, k] == 1):
if self.verbose:
print(str(k) + "-" + str(i) + "-" + str(j) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(i) + "-> " + str(j) + " <- "
+ str(k))
self.graph.edges[i, j] = 2
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
elif s == 5:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if (self.graph.edges[i, j] == 1) and (self.graph.edges[i, k] == 1) \
and (self.graph.edges[j, k] == 1):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
elif any(msk_common):
s = int(np.argwhere(msk_common)[0])
if s == 0:
if (self.graph.edges[j, i] == 1) and (self.graph.edges[k, i] == 1) \
and (self.graph.edges[k, j] == 1):
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "- " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(j) + "-> " + str(i) + " -> " + str(k) + " <- "
+ str(j))
self.graph.edges[j, i] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif s == 1:
if (self.graph.edges[j, i] == 1) and (self.graph.edges[k, i] == 1) \
and (self.graph.edges[k, j] == 1):
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "- " + str(k) + " -> " + str(j) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[k, j] = 2
elif (sum(self.graph.edges[:, k] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[k, j] = 2
elif (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(i) + "-" + str(j) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(i) + " -> " + str(j) + " <- "
+ str(k))
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
self.graph.edges[i, j] = 2
elif s == 2:
if (self.graph.edges[j, i] == 1) and (self.graph.edges[k, i] == 1) \
and (self.graph.edges[k, j] == 1):
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "- " + str(k) + " -> " + str(j) + " <- "
+ str(i))
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
elif (sum(self.graph.edges[:, k] == 2) == 0) and (
sum(self.graph.edges[:, i] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[j, k] = 2
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
elif (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, i] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(i) + "-" + str(j) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(i) + " -> " + str(j) + " <- "
+ str(k))
self.graph.edges[k, j] = 2
self.graph.edges[k, i] = 2
self.graph.edges[j, i] = 2
else:
if (self.graph.edges[i, j] == 1) and (self.graph.edges[i, k] == 1) \
and (self.graph.edges[k, j] == 1):
if (gij!=0) and (gik==0) and (gjk==0):
if gij>0:
if (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif gij<0:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[j, i] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif (gij==0) and (gik!=0) and (gjk==0):
if gik>0:
if (sum(self.graph.edges[:, k] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[i, k] = 2
self.graph.edges[i, j] = 2
self.graph.edges[k, j] = 2
if gik<0:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[k, i] = 2
self.graph.edges[i, j] = 2
self.graph.edges[k, j] = 2
elif (gij == 0) and (gik == 0) and (gjk != 0):
if gjk>0:
if (sum(self.graph.edges[:, k] == 2) == 0) and (
sum(self.graph.edges[:, i] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[j, k] = 2
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
if gjk<0:
if (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, i] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[k, j] = 2
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
def rule_proba_raising_principle(self):
"""
new rules (rule prob raising principle from paper)
"""
if self.verbose:
print("######################################")
print("Rule prob raising principle")
print("######################################")
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 1) and (self.graph.edges[j, i] == 1):
adjacent_i_is_1 = (sum(np.delete(self.graph.edges[:, i], i) != 0) == 1)
adjacent_j_is_1 = (sum(np.delete(self.graph.edges[:, j], j) != 0) == 1)
if adjacent_i_is_1 and adjacent_j_is_1:
gij = self.gamma_matrix[self.names[j]].loc[self.names[i]]
if gij > 0:
if self.verbose:
print(str(i) + "-" + str(j) + "g(i,j)>0", end=" ")
print("=> orient " + str(i) + "-> " + str(j))
self.graph.edges[i, j] = 2
elif gij < 0:
if self.verbose:
print(str(i) + "-" + str(j) + "g(i,j)<0", end=" ")
print("=> orient " + str(i) + "<- " + str(j))
self.graph.edges[j, i] = 2
def check_past(self):
if self.verbose:
print("#######################################")
print("########### Check past ###########")
print("#######################################")
self._include_past_check_past()
for p in range(self.d):
r_list = [r for r in range(self.graph.d) if (r != p) and ((self.graph.edges[r, p] == 2))]
x = self.data_dict[self.names[p]]
x_past = self.data_dict[self.names[p]]
for rs in r_list:
z = dict()
for r in rs:
z[self.names[r]] = self.data_dict[self.names[r]]
if self.graphical_optimization:
cmi_pval, cmi_val = self.ctmi(x, x_past, z, self.names[p], self.names[x_past], self.sampling_rate,
gamma_matrix=self.gamma_matrix, graph=self.graph.edges,
p_value=self.rank_using_p_value, instantaneous_dict=self.instantaneous_dict)
else:
cmi_pval, cmi_val = ctmi(x, x_past, z, self.names[p], self.names[x_past], self.sampling_rate,
gamma_matrix=self.gamma_matrix, p_value=self.rank_using_p_value,
instantaneous_dict=self.instantaneous_dict)
self._exclude_past()
def _include_past_check_past(self):
counter = 0
for i in range(self.d):
self.instantaneous_dict[self.names[i] + "_past"] = False
if self.gamma_matrix[self.names[i]].loc[self.names[i]] > 0:
self.names = self.names.insert(self.d + counter, self.names[i] + "_past")
counter = counter + 1
self.gamma_matrix[self.names[i] + "_past"] = 0
self.gamma_matrix = self.gamma_matrix.append(pd.Series([0] * (self.d + counter),
name=self.names[i] + "_past",
index=self.gamma_matrix.columns))
self.gamma_matrix[self.names[i]].loc[self.names[i] + "_past"] = \
self.gamma_matrix[self.names[i]].loc[self.names[i]]
self.gamma_matrix[self.names[i] + "_past"].loc[self.names[i]] = - \
self.gamma_matrix[self.names[i]].loc[self.names[i]]
self.graph.edges[self.d + i, i] = 2
self.graph.edges[i, self.d + i] = 1
self.data_dict[self.names[i] + "_past"] = series[self.names[i]]
self.sampling_rate[self.names[i] + "_past"] = self.sampling_rate[self.names[i]]
def fit(self):
"""
run PCTMI
:return: graph (CPDAG)
"""
if self.verbose:
print("#######################################")
print("########### Starting PCTMI simple ###########")
print("#######################################")
# initialize skeleton
self.skeleton_initialize()
# get separation sets
self.find_sep_set()
self.rule_gap_orientation()
# orientation
self.rule_origin_causality()
test_rp = True
test_r2 = True
test_r3 = True
while test_rp or test_r2 or test_r3:
test_rp = self.rule_propagation_causality()
test_r2 = self.rule_2()
test_r3 = self.rule_3()
if self.verbose:
print("######################################")
print("Final Results (PCTMI)")
print("######################################")
print("Summary Graph:")
print(self.graph.edges)
return self.graph.edges
def rule_gap_orientation(self):
"""
gamma heuristic rule from paper
"""
if self.verbose:
print("######################################")
print("Rule gap orientation")
print("######################################")
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 1) and (self.graph.edges[j, i] == 1):
if self.gamma_matrix[self.names[j]].loc[self.names[i]] > 0:
if self.verbose:
print(str(i) + "-" + str(j) + "g(i,j)>0", end=" ")
print("=> orient " + str(i) + "-> " + str(j))
self.graph.edges[i, j] = 2
if self.gamma_matrix[self.names[j]].loc[self.names[i]] < 0:
if self.verbose:
print(str(i) + "-" + str(j) + "g(i,j)<0", end=" ")
print("=> orient " + str(i) + "<- " + str(j))
self.graph.edges[j, i] = 2
def fit_gap_orientation(self):
"""
run PCTMI-gamma (requirements: run PCTMI)
:return: graph (CPDAG)
"""
self.rule_gap_orientation()
return self.graph.edges
class TPCTMI(PCTMI):
def __init__(self, series, sig_lev=0.05, lag_max=5, p_value=True, rank_using_p_value=False, verbose=True, num_processor=-1,
graphical_optimization=False):
"""
PC for time series using TMI and CTMI
:param series: d-time series (with possibility of different sampling rate)
:param sig_lev: significance level. By default 0.05
:param p_value: Use p_value for decision making. By default True
:param verbose: Print results. By default: True
:param num_processor: number of processors for parallelization. By default -1 (all)
"""
PCTMI.__init__(self, series, sig_lev, lag_max, p_value, rank_using_p_value, verbose, num_processor,
graphical_optimization)
# self.ts_window_size = 0
self.ts_data_dict = dict()
self.ts_names = []
self.ts_names_dict = dict()
self.ts_data_df = pd.DataFrame()
self.tgraph_dict = dict()
for name in self.names:
self.tgraph_dict[name] = []
def check_cycles(self):
if self.verbose:
print("######################################")
print("Check Cycles")
print("######################################")
print(self.graph.edges)
for p in range(self.graph.d):
for q in range(self.graph.d):
if (self.graph.edges[p, q] == 2) and (self.graph.edges[q, p] == 1):
temp_matrix = self.gamma_matrix.copy()
temp_matrix[self.names[p] + "_future"] = None
temp_matrix = temp_matrix.append(pd.Series([None]*(self.graph.d+1), name=self.names[p] + "_future",
index=temp_matrix.columns))
for i in range(self.graph.d):
if i != p:
x = self.data_dict[self.names[i]]
y = self.data_dict[self.names[p]]
# todo lag max
g = align_pair(x, y, (self.sampling_rate[self.names[i]], self.sampling_rate[self.names[p]]),
max_gamma=5, set_numbers="N")
temp_matrix[self.names[p] + "_future"].loc[self.names[i]] = g
temp_matrix[self.names[i]].loc[self.names[p] + "_future"] = -g
else:
temp_matrix[self.names[p] + "_future"].loc[self.names[p] + "_future"] = self.gamma_matrix[self.names[p]].loc[self.names[p]]
temp_matrix[self.names[p]].loc[self.names[p] + "_future"] = self.gamma_matrix[self.names[p]].loc[self.names[p]]
temp_matrix[self.names[p] + "_future"].loc[self.names[p]] = self.gamma_matrix[self.names[p]].loc[self.names[p]]
par_q = np.where(self.graph.edges[:, q] == 2)[0]
x = self.data_dict[self.names[q]]
y = self.data_dict[self.names[p]]
z = dict()
for name_z in self.names[par_q]:
z[name_z] = self.data_dict[name_z]
temp_sampling_rate = self.sampling_rate.copy()
temp_sampling_rate[self.names[p] + "_future"] = self.sampling_rate[self.names[p]]
cmi, _ = ctmi(x, y, z, self.names[q], self.names[p] + "_future", temp_sampling_rate,
gamma_matrix=temp_matrix, p_value=self.p_value)
if self.verbose:
print("p=" + str(p) + "; q=" + str(q) + "; r=" + str(par_q) + "; I(p,q|r)=" + "{: 0.5f}".format(
cmi), end=" ")
if cmi > self.alpha:
if self.verbose:
print(str(p) + "->" + str(q), end=" ")
print("=> orient " + str(p) + "->" + str(q) + " and " + str(q) + "->" + str(p))
self.graph.edges[q, p] = 2
def _ts_init(self):
lag_gamma = []
for i in range(self.d):
for j in range(self.d):
if (self.graph.edges[i, j] == 2) and (self.graph.edges[j, i] == 1):
lag_gamma.append(abs(self.gamma_matrix[self.names[j]].loc[self.names[i]]) + self.lags[j])
elif (self.graph.edges[i, j] == 1) and (self.graph.edges[j, i] == 2):
lag_gamma.append(abs(self.gamma_matrix[self.names[i]].loc[self.names[j]]) + self.lags[i])
elif (self.graph.edges[i, j] == 1) and (self.graph.edges[j, i] == 1):
lag_gamma.append(abs(self.gamma_matrix[self.names[j]].loc[self.names[i]]) + self.lags[j])
lag_gamma.append(abs(self.gamma_matrix[self.names[i]].loc[self.names[j]]) + self.lags[i])
self.ts_window_size = max(lag_gamma)
for col in range(self.graph.d):
self.ts_data_dict[self.names[col]] = window_representation(self.series[self.names[col]],
windows_size=self.ts_window_size)
for col in range(self.d):
self.ts_names_dict[self.names[col]] = self.ts_data_dict[self.names[col]].columns
self.ts_names.extend(self.ts_data_dict[self.names[col]].columns.values)
self.ts_data_df[self.ts_data_dict[self.names[col]].columns] = self.ts_data_dict[self.names[col]]
self.ts_names_dict_inv = {v: k for k, v_list in self.ts_names_dict.items() for v in v_list}
temporal_to_unit_idx = []
ts_sampling_rate_temp = []
j = 0
for i in range(len(self.sampling_rate)):
ts_sampling_rate_temp = ts_sampling_rate_temp + \
[self.sampling_rate[self.names[i]]] * self.ts_window_size
temporal_to_unit_idx = temporal_to_unit_idx + [j] * self.ts_window_size
j = j + 1
ts_sampling_rate_temp = np.array(ts_sampling_rate_temp)
self.ts_sampling_rate = dict()
for i in range(len(self.ts_names)):
self.ts_sampling_rate[self.ts_names[i]] = ts_sampling_rate_temp[i]
temp = []
j = 0
for i in range(self.d):
for lag in range(self.ts_window_size):
if lag == 0:
temp.append(lag)
else:
temp.append(lag + self.ts_sampling_rate[self.ts_names[j]] - 1)
j = j + 1
self.time_table = pd.DataFrame([temp], columns=self.ts_names)
# self.ts_mi_array = np.ones([self.graph.d, self.graph.d, self.ts_window_size])
self.ts_mi_array = np.ones([self.ts_window_size * self.d, self.ts_window_size * self.d])
# self.ts_cmi_array = np.ones([self.graph.d, self.graph.d, self.ts_window_size])
self.ts_cmi_array = np.ones([self.ts_window_size * self.d, self.ts_window_size * self.d])
self.tgraph = Graph(self.d * self.ts_window_size)
if self.verbose:
print("ts_names: " + str(self.ts_names))
print("ts_sampling_rate: " + str(self.ts_sampling_rate))
print("time_table: " + str(self.time_table))
print("temporal window size:" + str(self.ts_window_size))
def summary_to_temporal_array(self):
"""
transfer knowledge from summary graph to temporal graph
"""
self._ts_init()
time_table = self.time_table.values.reshape(-1)
counter = 0
for i in range(len(self.names)):
# lag of each time series i
lag = len(self.ts_names_dict[self.names[i]])
# use temporal priority property to add arrows from past to future in the same time series
if self.graph.edges[i, i] == 1:
a = self.temporal_priority(self.tgraph.edges[counter:counter + lag, counter:counter + lag])
self.tgraph.edges[counter:counter + lag, counter:counter + lag] = a
# iterate on each time step in time series i
for ti in range(counter, counter + lag):
counter_j = counter + lag
for j in range(i + 1, len(self.names)):
sep = np.where(self.graph.sep[i, j, :] == 1)[0]
# lag of each time series j
lag_j = len(self.ts_names_dict[self.names[j]])
# iterate on each time step in time series j
for tj in range(counter_j, counter_j + lag_j):
# in case of independency between two time series, all sub variables will be independent
if self.graph.edges[i, j] == 0:
self.tgraph.edges[ti, tj] = 0
self.tgraph.edges[tj, ti] = 0
for s in sep:
counter_s = self.ts_window_size*s
# todo: take into account only gamma
g_is = self.gamma_matrix[self.names[i]].loc[self.names[s]]
g_js = self.gamma_matrix[self.names[j]].loc[self.names[s]]
for ts in range(counter_s, counter_s + self.ts_window_size):
if (ts < ti) or (ts < tj):
self.tgraph.sep[ti, tj, ts] = 1
elif self.graph.edges[i, j] == 2:
if time_table[tj]-time_table[ti] >= self.gamma_matrix[self.names[j]].loc[self.names[i]]:
self.tgraph.edges[ti, tj] = 2
self.tgraph.edges[tj, ti] = 1
else:
self.tgraph.edges[ti, tj] = 0
self.tgraph.edges[tj, ti] = 0
elif self.graph.edges[j, i] == 2:
if time_table[ti]-time_table[tj] >= self.gamma_matrix[self.names[i]].loc[self.names[j]]:
self.tgraph.edges[tj, ti] = 2
self.tgraph.edges[ti, tj] = 1
else:
self.tgraph.edges[ti, tj] = 0
self.tgraph.edges[tj, ti] = 0
else:
# in case of dependency between two time series, transform arrows to circles or dots
if time_table[ti] < time_table[tj]:
if time_table[tj] - time_table[ti] >= self.gamma_matrix[self.names[j]].loc[self.names[i]]:
self.tgraph.edges[ti, tj] = 2
self.tgraph.edges[tj, ti] = 1
else:
self.tgraph.edges[ti, tj] = 0
self.tgraph.edges[tj, ti] = 0
elif time_table[ti] > time_table[tj]:
if time_table[ti] - time_table[tj] >= self.gamma_matrix[self.names[i]].loc[self.names[j]]:
self.tgraph.edges[tj, ti] = 2
self.tgraph.edges[ti, tj] = 1
else:
self.tgraph.edges[ti, tj] = 0
self.tgraph.edges[tj, ti] = 0
else:
self.tgraph.edges[ti, tj] = 1
self.tgraph.edges[tj, ti] = 1
counter_j = counter_j + lag_j
counter = counter + lag
@staticmethod
def temporal_priority(a):
"""
:param a: square matrix that represent relations between sub variables in the same time series
:param n_markov: if false consider one markov in the same time series
:return: square matrix that takes into account temporal priority
"""
for i in range(a.shape[0]-1):
a[i, i] = 0
for j in range(i+1, a.shape[0]):
a[i, j] = 2
a[j, i] = 1
return a
def temporal_to_temporal_dict(self):
if self.verbose:
print("######################################")
print("Summary to temporal graph")
print("######################################")
for i in range(self.graph.d):
for j in range(i, self.graph.d):
if i == j:
# self.tgraph_dict[self.names[j]].append((self.names[i], -self.gamma_matrix[self.names[j]].loc[self.names[i]]))
for lag in range(self.gamma_matrix[self.names[j]].loc[self.names[i]],
self.gamma_matrix[self.names[j]].loc[self.names[i]] + self.lags[j]):
self.tgraph_dict[self.names[j]].append((self.names[i], -lag))
else:
if self.graph.edges[i, j] == 2:
for lag in range(self.gamma_matrix[self.names[j]].loc[self.names[i]], self.gamma_matrix[self.names[j]].loc[self.names[i]] + self.lags[j]):
self.tgraph_dict[self.names[j]].append((self.names[i], -lag))
for l in range(self.ts_window_size - lag):
self.tgraph[self.d*l + i, self.d*(l + lag) + j] = 2
elif self.graph.edges[j, i] == 2:
for lag in range(self.gamma_matrix[self.names[i]].loc[self.names[j]], self.gamma_matrix[self.names[i]].loc[self.names[j]] + self.lags[i]):
self.tgraph_dict[self.names[i]].append((self.names[j], -lag))
for l in range(self.ts_window_size - lag):
self.tgraph[self.d*l + j, self.d*(l + lag) + i] = 2
elif (self.graph.edges[i, j] == 1) and (self.graph.edges[j, i] == 1):
for lag in range(self.ts_window_size):
self.tgraph_dict[self.names[j]].append((self.names[i], -lag))
self.tgraph_dict[self.names[i]].append((self.names[j], -lag))
# for lag in range(self.gamma_matrix[j].loc[i], self.lags[j]):
# self.tgraph_dict[self.names[i]].append((self.names[i], lag))
# for lag in range(self.gamma_matrix[i].loc[j], self.lags[i]):
# self.tgraph_dict[self.names[j]].append((self.names[j], lag))
#todo
def _ts_cmi_sep_set_pq(self, p, q, set_size):
"""
estimate ctmi between two time series conditioned on each set of neighbors with cardinality equal to set_size
:param p: time series with index p
:param q: time series with index q
:param set_size: cardinality of the set of neighbors
:return: p, q, list if estimated value of ctmi(p,q,r_set), and list of all r_sets
"""
zeros_matrix = pd.DataFrame(np.zeros([self.tgraph.d, self.tgraph.d]), columns=self.ts_names,
index=self.ts_names)
v_list = []
r_list = [r for r in range(self.tgraph.d) if (r != p) and (r != q) and ((
self.tgraph.edges[p, r] == 2) or (self.tgraph.edges[q, r] == 2) or
((self.tgraph.edges[p, r] == 1)
and (self.tgraph.edges[r, p] == 1))
or ((self.tgraph.edges[q, r] == 1) and
(self.tgraph.edges[r, q] == 1)))]
r_list = [list(r) for r in itertools.combinations(r_list, set_size)]
x = self.ts_data_df[self.ts_names[p]]
y = self.ts_data_df[self.ts_names[q]]
for rs in r_list:
z = dict()
for r in rs:
z[self.ts_names[r]] = self.ts_data_df[self.ts_names[r]]
cmi_pval, cmi_val = self.cmi_dsr(x, y, z, p_value=self.rank_using_p_value)
if self.rank_using_p_value:
v_list.append(cmi_pval)
else:
v_list.append(cmi_val)
if v_list:
return p, q, v_list, r_list
def ts_rank_cmi_sep_set_parallel(self, set_size):
"""
rank pairs of time series based on the estimation of ctmi between each pair of connected time series
:param set_size: cardinality of the set of neighbors
:return: ranking of each pair of connected time series based ctmi
"""
list_adj, list_num_adj = self.tgraph.search_adj_all()
p_list = [p for p in range(len(list_num_adj)) if list_num_adj[p] > set_size]
q_list = [list_adj[p] for p in p_list]
p_list = [p_list[p] for p in range(len(p_list)) for _ in q_list[p]]
q_list = [q for sublist in q_list for q in sublist]
pq_list = [(p, q) for p, q in zip(p_list, q_list)]
temp_pq = pq_list.copy()
temp_p = p_list.copy()
temp_q = q_list.copy()
for pq in range(len(temp_pq)):
if (temp_pq[pq][1], temp_pq[pq][0]) in pq_list:
pq_list.remove((temp_pq[pq][0], temp_pq[pq][1]))
p_list.remove(temp_p[pq])
q_list.remove(temp_q[pq])
del temp_pq, temp_p, temp_q
res = Parallel(n_jobs=self.num_processor)(delayed(self._ts_cmi_sep_set_pq)(p, q, set_size) for p, q in
zip(p_list, q_list))
ranks = RankingList()
for pq in range(len(res)):
if res[pq] is not None:
if isinstance(res[pq][2], list):
for r in range(len(res[pq][2])):
ranks.add(res[pq][0], res[pq][1], res[pq][2][r], res[pq][3][r])
else:
ranks.add(res[pq][0], res[pq][1], res[pq][2], res[pq][3])
if self.rank_using_p_value:
ranks.sort(descending=True)
else:
ranks.sort(descending=False)
return ranks
def ts_find_sep_set(self):
"""
find the most contributing separation set (if it exists) between each pair of time series
"""
if self.verbose:
print("######################################")
print("Temporal Skeletion Speperation")
print("######################################")
for set_size in range(1, self.tgraph.d-1):
ranks = self.ts_rank_cmi_sep_set_parallel(set_size)
if self.verbose:
print("Ranking:")
print("p: "+str(ranks.elem_p))
print("q: " + str(ranks.elem_q))
print("r: " + str(ranks.elem_r))
print("val: " + str(ranks.val))
for p, q, r_set, cmi in zip(ranks.elem_p, ranks.elem_q, ranks.elem_r, ranks.val):
test = (self.tgraph.edges[p, q] != 0)
for r in r_set:
if not test:
break
test = test and ((self.tgraph.edges[q, r] != 0) or (self.tgraph.edges[p, r] != 0))
if test:
mi = self.ts_mi_array[p, q]
if self.p_value != self.rank_using_p_value:
x = self.ts_data_df[self.ts_names[p]]
y = self.ts_data_df[self.ts_names[q]]
z = dict()
for r in r_set:
z[self.ts_names[r]] = self.ts_data_df[self.ts_names[r]]
cmi, _ = self.cmi_dsr(x, y, z, p_value=self.p_value)
if self.verbose:
print("p=" + self.ts_names[p] + "; q=" + self.ts_names[q] + "; r=" + str(r_set) + "; I(p,q|r)=" + "{: 0.5f}".format(
cmi) + "; I(p,q)=" + "{: 0.5f}".format(mi), end=" ")
if self.p_value:
test = self.sig_lev < cmi
else:
test = cmi < self.alpha
if test:
self.ts_cmi_array[p, q] = cmi
self.ts_cmi_array[q, p] = cmi
self.ts_remove_hom_edges(p, q, self.ts_names_dict_inv[self.ts_names[p]],
self.ts_names_dict_inv[self.ts_names[q]])
# self.tgraph.edges[p, q] = 0
# self.tgraph.edges[q, p] = 0
for r in r_set:
self.tgraph.add_sep(q, p, r)
else:
if self.verbose:
print()
def ts_remove_hom_edges(self, p, q, p_name, q_name):
if (p < self.tgraph.d) and (q < self.tgraph.d) and (self.ts_names_dict_inv[self.ts_names[p]] == p_name) and \
(self.ts_names_dict_inv[self.ts_names[q]] == q_name):
if self.verbose:
print("=> remove link between " + str(p) + " and " + str(q))
self.tgraph.edges[p, q] = 0
self.tgraph.edges[q, p] = 0
p = p+1
q = q+1
self.ts_remove_hom_edges(p, q, p_name, q_name)
# def is_parent_of(self, p, q, lag):
# parent_list_of_q = self.tgraph_dict[self.names[q]]
# parent_list_of_p = self.tgraph_dict[self.names[p]]
# backward = True
# for l in range(self.ts_window_size):
# backward = backward and ((self.names[q], -l) not in parent_list_of_p)
# return ((self.names[p], -lag) in parent_list_of_q) and backward
#
# def is_neighbor_of(self, p, q, lag):
# parent_list_of_q = self.tgraph_dict[self.names[q]]
# parent_list_of_p = self.tgraph_dict[self.names[p]]
# return (p in parent_list_of_q) or (q in parent_list_of_p)
def cmi_dsr(self, x, y, z_dict=dict(), p_value=False, k=10, sig_samples=10000):
# todo adapt to different sampling rate
if isinstance(x, pd.Series):
x = x.to_frame()
if isinstance(y, pd.Series):
y = y.to_frame()
# if z_dict:
names_z = [*z_dict.keys()]
if len(names_z) > 0:
z = pd.DataFrame()
for name in names_z:
if isinstance(z_dict[name], pd.Series):
z_dict[name] = z_dict[name].to_frame()
z[z_dict[name].columns] = z_dict[name].reset_index(drop=True)
else:
z = None
return indep_test(x, y, z, sig_samples=sig_samples, p_value=p_value, measure="cmiknn", k=k)
# todo
def ts_dataframe_to_dict(self):
names = self.names.tolist()
print(names)
nlags = self.ts_window_size
df =pd.DataFrame(self.tgraph.edges, columns=self.ts_names, index=self.ts_names)
g_dict = dict()
for name_y in names:
g_dict[name_y] = []
for ty in range(nlags):
for name_y in names:
t_name_y = df.columns[names.index(name_y) * self.ts_window_size + ty]
for tx in range(nlags):
for name_x in names:
t_name_x = df.columns[names.index(name_x) * self.ts_window_size + tx]
if df[t_name_y].loc[t_name_x] == 2:
if (name_x, tx - ty) not in g_dict[name_y]:
g_dict[name_y].append((name_x, tx - ty))
return g_dict
# def _ts_cmi_sep_set_pq(self, p, q, set_size):
# """
# estimate mi between two time series conditioned on each set of neighbors with cardinality equal to set_size
# :param p: time series with index p
# :param q: time series with index q
# :param set_size: cardinality of the set of neighbors
# :return: p, q, list if estimated value of ctmi(p,q,r_set), and list of all r_sets
# """
# v_list = []
# r_list = [r for r in range(self.d) if (self.is_parent_of(r, q)) or (self.is_neighbor_of(q, r) and
# not self.is_parent_of(r, q))]
# r_list = [list(r) for r in itertools.combinations(r_list, set_size)]
#
# x = self.ts_data_dict[self.names[p]]
# y = self.ts_data_dict[self.names[q]]
#
# print(x)
# for rs in r_list:
# z = dict()
# for r in rs:
# z[self.names[r]] = self.data_dict[self.names[r]]
#
# cmi_pval, cmi_val = self.cmi(x, y, z)
# if self.rank_using_p_value:
# v_list.append(cmi_pval)
# else:
# v_list.append(cmi_val)
# if v_list:
# return p, q, v_list, r_list
#
# def ts_rank_cmi_sep_set_parallel(self, set_size):
# """
# rank pairs of time series based on the estimation of ctmi between each pair of connected time series
# :param set_size: cardinality of the set of neighbors
# :return: ranking of each pair of connected time series based ctmi
# """
# pq_list = [(p, q) for p in range(self.d) for q in range(p, self.d) if self.is_neighbor_of(p, q)]
# print(pq_list)
# res = Parallel(n_jobs=self.num_processor)(delayed(self._cmi_sep_set_pq)(p, q, set_size) for p, q in
# pq_list)
# ranks = RankingList()
# for pq in range(len(res)):
# if res[pq] is not None:
# if isinstance(res[pq][2], list):
# for r in range(len(res[pq][2])):
# ranks.add(res[pq][0], res[pq][1], res[pq][2][r], res[pq][3][r])
# else:
# ranks.add(res[pq][0], res[pq][1], res[pq][2], res[pq][3])
# if self.rank_using_p_value:
# ranks.sort(descending=True)
# else:
# ranks.sort(descending=False)
# return ranks
#
# def ts_find_sep_set(self):
# """
# find the most contributing separation set (if it exists) between each pair of time series
# """
# if self.verbose:
# print("######################################")
# print("Ts Skeletion Speperation")
# print("######################################")
#
# for set_size in range(1, (self.graph.d*self.ts_window_size)-1):
# ranks = self.rank_cmi_sep_set_parallel(set_size)
# if self.verbose:
# print("Ranking:")
# print("p: "+str(ranks.elem_p))
# print("p: " + str(ranks.elem_q))
# print("p: " + str(ranks.elem_r))
# print("p: " + str(ranks.val))
# for p, q, r_set, cmi in zip(ranks.elem_p, ranks.elem_q, ranks.elem_r, ranks.val):
# test = self.is_neighbor_of(p, q)
# for r in r_set:
# if not test:
# break
# test = test and (self.is_neighbor_of(q, r) or self.is_neighbor_of(p, r))
# if test:
# mi = self.ts_mi_array[p, q, lag]
#
# if self.p_value != self.rank_using_p_value:
# x = self.data_dict[self.names[p]]
# y = self.data_dict[self.names[q]]
# z = dict()
# for r in r_set:
# z[self.names[r]] = self.data_dict[self.names[r]]
# cmi, _ = self.cmi(x, y, z)
# if self.verbose:
# print("p=" + str(p) + "; q=" + str(q) + "; r=" + str(r_set) + "; I(p,q|r)=" + "{: 0.5f}".format(
# cmi) + "; I(p,q)=" + "{: 0.5f}".format(mi), end=" ")
#
# if self.p_value:
# test = mi < self.sig_lev < cmi
# else:
# test = cmi < self.alpha
# if test:
# self.cmi_array[p, q] = cmi
# self.cmi_array[q, p] = cmi
# if self.verbose:
# print("=> remove link between " + str(p) + " and " + str(q))
# if self.is_parent_of(p, q):
# 1
# elif self.is_parent_of(q, p):
# 1
# elif self.is_neighbor_of(p, q):
# self.graph.edges[p, q] = 0
# self.graph.edges[q, p] = 0
#
# for r in r_set:
# self.graph.add_sep(q, p, r)
# else:
# if self.verbose:
# print()
def noise_based_hidden_counfounders(self):
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
for p in range(self.tgraph.d):
for q in range(self.tgraph.d):
if self.tgraph.edges[p, q] == 2:
print(p,q)
X = self.ts_data_df[self.ts_names[p]].values.reshape(-1, 1)
y = self.ts_data_df[self.ts_names[q]].values.reshape(-1, 1)
kernel = DotProduct() + WhiteKernel()
gpr = GaussianProcessRegressor(kernel=kernel, random_state = 0).fit(X, y)
print(gpr.score(X, y))
def fit(self):
"""
run PCTMI
:return: graph (CPDAG)
"""
if self.verbose:
print("#######################################")
print("########### Starting TPCTMI ###########")
print("#######################################")
# initialize skeleton
self.skeleton_initialize()
# get separation sets
self.find_sep_set()
# orientation
self.rule_origin_causality()
test_rp = True
test_r2 = True
test_r3 = True
while test_rp or test_r2 or test_r3:
test_rp = self.rule_propagation_causality()
test_r2 = self.rule_2()
test_r3 = self.rule_3()
self.rule_commun_confounder_and_causal_chain()
self.rule_mediator()
self.rule_proba_raising_principle()
if self.verbose:
print("######################################")
print("Final Results (PCTMI)")
print("######################################")
print("Summary Graph:")
print(self.graph.edges)
self.check_cycles()
self.summary_to_temporal_array()
# self.summary_to_temporal()
self.ts_find_sep_set()
self.tgraph_dict = self.ts_dataframe_to_dict()
if self.verbose:
print("######################################")
print("Final Results (TPCTMI)")
print("######################################")
print("Temporal Graph:")
print(self.tgraph.edges)
return self.tgraph_dict
class FCITMI(CITMI):
# def __init__(self, series, sig_lev=0.05, p_value=True, rank_using_p_value=False, verbose=True, num_processor=-1,
# graphical_optimization=True):
def __init__(self, series, sig_lev=0.05, lag_max=5, p_value=True, rank_using_p_value=False, verbose=True,
num_processor=-1, graphical_optimization=False):
"""
FCI for time series using TMI and CTMI
:param series: d-time series (with possibility of different sampling rate)
:param sig_lev: significance level. By default 0.05
:param p_value: Use p_value for decision making. By default True
:param verbose: Print results. By default: True
:param num_processor: number of processors for parallelization. By default -1 (all)
"""
# CITMI.__init__(self, series, sig_lev, p_value, rank_using_p_value, verbose, num_processor,
# graphical_optimization)
CITMI.__init__(self, series, sig_lev, lag_max, p_value, rank_using_p_value, verbose, num_processor,
graphical_optimization)
def dag_to_pag(self):
"""
transform dag to PAG (turn all tails to circle (undetermined))
Graph structure
0: no edge
1: a tail -
2: arrow head ->
3: Undetermined -o
"""
self.graph.edges[self.graph.edges == 1] = 3
def _find_shortest_directed_paths_util(self, i, j, visited, path, all_path):
"""
sub function of _find_shortest_directed_paths
:param i: index of time series
:param j: index of time series
:param visited: list of visited nodes
:param path: current path
:param all_path: list of all discovered paths
"""
# Mark the current node as visited and store in path
visited[i] = True
path.append(i)
# If current vertex is same as destination, then print
# current path[]
if i == j:
if len(path) > 2:
all_path.append(path.copy()[1:-1])
return path
else:
# If current vertex is not destination
# Recur for all the vertices child of this vertex
child_i = np.where(self.graph.edges[i, :] == 2)[0]
for k in child_i:
if not visited[k]:
self._find_shortest_directed_paths_util(k, j, visited, path, all_path)
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited[i] = False
def _find_shortest_directed_paths(self, i, j):
"""
find shortest directed path between time series of index i and time series of index j
:param i: index of time series
:param j: index of time series
:return: all directed paths from i to j
"""
# Mark all the vertices as not visited
visited = [False] * self.graph.d
# Create an array to store paths
path = []
all_path = []
# Call the recursive helper function to print all paths
self._find_shortest_directed_paths_util(i, j, visited, path, all_path)
return all_path
# todo
def _find_discriminating_paths_util(self, i, j, visited, path, all_path):
"""
sub function of _find_shortest_directed_paths
:param i: index of time series
:param j: index of time series
:param visited: list of visited nodes
:param path: current path
:param all_path: list of all discovered paths
"""
# Mark the current node as visited and store in path
visited[i] = True
path.append(i)
path.append(j)
all_path.append(path.copy())
path.pop()
i_child = (self.graph.edges[i, :] == 2)
i_parent = (self.graph.edges[:, i] == 2)
j_adj1 = (self.graph.edges[:, j] != 0)
j_adj2 = (self.graph.edges[j, :] != 0)
next_i = np.where([a and b and c and d for a, b, c, d in zip(i_child, i_parent, j_adj1, j_adj2)])[0]
for k in next_i:
if not visited[k]:
if (self.graph.edges[k, j] == 2) and (self.graph.edges[j, k] == 1):
self._find_shortest_directed_paths_util(k, j, visited, path, all_path)
else:
visited[k] = True
path.append(k)
path.append(j)
all_path.append(path.copy())
path.pop()
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited[i] = False
def _find_discriminating_paths(self, i, j):
"""
find discriminating path between time series of index i and time series of index j
:param i: index of time series
:param j: index of time series
:return: all discriminating paths from i to j
"""
# Mark all the vertices as not visited
visited = [False] * self.graph.d
# Create an array to store paths
path = [i]
all_path = []
i_child = (self.graph.edges[i, :] == 2)
i_non_parent = (self.graph.edges[:, i] != 2)
j_adj1 = (self.graph.edges[:, j] != 0)
j_adj2 = (self.graph.edges[j, :] != 0)
first_next_i = np.where([a and b and c and d for a, b, c, d in zip(i_child, i_non_parent, j_adj1, j_adj2)])[0]
print(first_next_i)
for f in first_next_i:
# Call the recursive helper function to print all paths
self._find_discriminating_paths_util(f, j, visited, path, all_path)
return all_path
def _find_ancestors_util(self, i, visited, path, all_path):
"""
sub function of _find_ancestors
:param i: index of time series
:param visited: list of visited nodes
:param path: current path
:param all_path: list of all discovered paths
:return:
"""
# Mark the current node as visited and store in path
visited[i] = True
path.append(i)
# If current vertex is same as destination, then print
# current path[]
parent_i = np.where(self.graph.edges[:, i] == 2)[0]
if len(parent_i) == 0:
if len(path) > 1:
all_path.append(path.copy()[1:])
else:
# If current vertex is not destination
# Recur for all the vertices child of this vertex
for k in parent_i:
if not visited[k]:
self._find_ancestors_util(k, visited, path, all_path)
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited[i] = False
def _find_ancestors(self, i):
"""
find ancestors of time series of index i
:param i: index if time series
:return: list of ancestors
"""
# Mark all the vertices as not visited
visited = [False] * self.graph.d
# Create an array to store paths
path = []
all_path = []
# Call the recursive helper function to print all paths
self._find_ancestors_util(i, visited, path, all_path)
ancestors = [item for path in all_path for item in path]
ancestors = list(set(ancestors))
return ancestors
def _find_possible_d_sep_ij_util(self, i, j, v, before_v, anc_i, anc_j, visited, path, possible_d_sep):
# Mark the current node as visited and store in path
visited[v] = True
path.append(v)
print(v, before_v, possible_d_sep)
if (before_v != v) and (self.graph.edges[before_v, v] != 2):
if len(path) > 1:
print("one")
possible_d_sep.append(v)
else:
# If current vertex is not destination
# Recur for all the vertices child of this vertex
adj_v = np.where(self.graph.edges[:, v] == 2)[0]
adj_v = [k for k in adj_v if not visited[k]]
print(visited)
for k in adj_v:
if not visited[k]:
if (v != i) and (v != j):
print("two")
possible_d_sep.append(v)
self._find_possible_d_sep_ij_util(i, j, k, v, anc_i, anc_j, visited, path, possible_d_sep)
def _find_possible_d_sep_ij(self, i, j):
"""
:param i: index of time series
:param j: index of time series
:return: all possible d-sep if i and j
"""
anc_i = self._find_ancestors(i)
anc_j = self._find_ancestors(j)
# Mark all the vertices as not visited
visited = [False] * self.graph.d
# Create an array to store paths
path = []
possible_d_sep = []
# Call the recursive helper function to print all paths
self._find_possible_d_sep_ij_util(i, j, i, i, anc_i, anc_j, visited, path, possible_d_sep)
return possible_d_sep
def _cmi_possible_d_sep_ij(self, p, q, set_size):
"""
estimate ctmi between two time series conditioned on each possible-d-set with cardinality equal to set_size
:param i: time series with index i
:param j: time series with index j
:param set_size: cardinality of the set of neighbors
:return: i, j, list of estimated values of ctmi(p,q,possible-d-set), and list of all possible-d-sets
"""
v_list = []
k_list = self._find_possible_d_sep_ij(p, q)
k_list = [list(k) for k in itertools.combinations(k_list, set_size)]
print(p, q, k_list)
x = self.data_dict[self.names[p]]
y = self.data_dict[self.names[q]]
for ks in k_list:
z = dict()
for k in ks:
z[self.names[k]] = self.data_dict[self.names[k]]
if self.graphical_optimization:
cmi_pval, cmi_val = ctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,
gamma_matrix=self.gamma_matrix, graph=self.graph.edges,
p_value=self.p_value)
else:
cmi_pval, cmi_val = ctmi(x, y, z, self.names[p], self.names[q], self.sampling_rate,
gamma_matrix=self.gamma_matrix, p_value=self.p_value)
v_list.append(cmi_pval)
if v_list:
return p, q, v_list, k_list
def rank_possible_d_sep_parallel(self, set_size):
"""
rank pairs of connected time series conditioned of their possible-d-sep based on the estimation of ctmi
:param set_size: cardinality of the possible-d-sep
:return: ranking of each pair of connected time series based ctmi
"""
list_adj, list_num_adj = self.graph.search_adj_all()
i_list = [[i]*list_num_adj[i] for i in range(len(list_num_adj)) if list_num_adj[i] > 0]
i_list = [i for sublist in i_list for i in sublist]
j_list = [list_adj[j] for j in range(len(list_num_adj)) if list_num_adj[j] > 0]
j_list = [j for sublist in j_list for j in sublist]
res = Parallel(n_jobs=self.num_processor)(delayed(self._cmi_possible_d_sep_ij)(i, j, set_size) for i, j in
zip(i_list, j_list))
ranks = RankingList()
for ij in range(len(res)):
if res[ij] is not None:
if isinstance(res[ij][2], list):
for k in range(len(res[ij][2])):
ranks.add(res[ij][0], res[ij][1], res[ij][2][k], res[ij][3][k])
else:
ranks.add(res[ij][0], res[ij][1], res[ij][2], res[ij][3])
if self.p_value:
ranks.sort(descending=True)
else:
ranks.sort(descending=False)
return ranks
def find_d_sep(self):
"""
find the most contributing d sep (if it exists) between each pair of time series
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("d-seperation")
print("######################################")
test_remove_links = False
for set_size in range(1, self.graph.d-1):
ranks = self.rank_possible_d_sep_parallel(set_size)
for i, j, ks, cmi in zip(ranks.elem_p, ranks.elem_q, ranks.elem_r, ranks.val):
test = (self.graph.edges[i, j] != 0)
for k in ks:
if not test:
break
test = test and ((self.graph.edges[j, k] != 0) or (self.graph.edges[i, k] != 0))
if test:
mi = self.mi_array[i, j]
if self.verbose:
print("i=" + str(i) + "; j=" + str(j) + "; z=" + str(ks) + "; I(i,j|z)=" + "{: 0.5f}".format(
cmi) + "; I(i,j)=" + "{: 0.5f}".format(mi), end=" ")
if self.p_value:
test = mi < self.sig_lev < cmi
else:
test = cmi < self.alpha
if test:
test_remove_links = True
self.cmi_array[i, j] = cmi
self.cmi_array[j, i] = cmi
if self.verbose:
print("=> remove link between " + str(i) + " and " + str(j))
self.graph.edges[i, j] = 0
self.graph.edges[j, i] = 0
for k in ks:
self.graph.add_sep(j, i, k)
else:
if self.verbose:
print()
return test_remove_links
def remove_orientation(self):
"""
turn all vertex into undetermined vertex
"""
if self.verbose:
print("######################################")
print("Remove orientation")
print("######################################")
for i in range(self.graph.d):
for j in range(self.graph.d):
if i != j:
if self.graph.edges[i, j] != 0:
self.graph.edges[i, j] = 3
def _oc_pq(self, p, q):
"""
estimate ctmi between two time series conditioned of their sep set + each non oriented connected neighbor
:param p: index of time series
:param q: index of time series
:return: p, q, the most contributing node and the MI conditioned on sep set and the most contributing node
"""
v_list = []
k_list = [k for k in range(self.graph.d) if (k != p) and (k != q)
and ((self.graph.edges[q, k] == 3) and (self.graph.edges[p, k] == 3)
and (self.graph.edges[k, q] == 3) and (self.graph.edges[k, p] == 3)
and (self.graph.sep[p, q, k] == 0))]
if len(k_list) > 0:
x = self.data_dict[self.names[p]]
y = self.data_dict[self.names[q]]
sep = np.where(self.graph.sep[p, q, :] == 1)[0]
for k in k_list:
if k not in sep:
z = self.data_dict[self.names[k]]
print(z.shape)
cmi_pval, cmi_val = i_ctmi(x, y, z, self.names[p], self.names[q], self.names[k], self.sampling_rate,
p_value=self.p_value)
v_list.append(cmi_pval)
if v_list:
if self.p_value:
idx = int(np.argmax(v_list))
else:
idx = int(np.argmin(v_list))
return p, q, v_list[idx], k_list[idx]
def rank_oc_parallel(self):
"""
rank unsheilded triples based on the estimation of ctmi
:return: ranking of each unsheilded triple based ctmi
"""
p_list = []
q_list = []
for p in range(self.d):
for q in range(p+1, self.d):
if (self.graph.edges[p, q] == 0) and (self.graph.edges[q, p] == 0):
p_list.append(p)
q_list.append(q)
res = Parallel(n_jobs=self.num_processor)(delayed(self._oc_pq)(p, q) for p, q in zip(p_list, q_list))
ranks = RankingList()
for pq in range(len(res)):
if res[pq] is not None:
ranks.add(res[pq][0], res[pq][1], res[pq][2], res[pq][3])
if self.p_value:
ranks.sort(descending=False)
else:
ranks.sort(descending=True)
return ranks
def rule_origin_causality(self):
"""
rule 0 (origin of causality) from FCI
"""
if self.verbose:
print("######################################")
print("Rule Origin of Causality")
print("######################################")
ranks = self.rank_oc_parallel()
for p, q, k, cmi in zip(ranks.elem_p, ranks.elem_q, ranks.elem_r, ranks.val):
if (self.graph.edges[q, k] == 3) and (self.graph.edges[p, k] == 3) \
and (self.graph.edges[k, q] == 3) and (self.graph.edges[k, p] == 3):
sep = np.where(self.graph.sep[p, q, :] == 1)[0]
print("sep = " + str(sep))
# if len(sep) > 0:
# mi = self.cmi_array[p, q]
# else:
mi = self.mi_array[p, q]
if k not in sep:
if self.verbose:
print("p=" + str(p) + "; q=" + str(q) + "; r=" + str(
k) + "; s=" + str(
sep) + "; I(p,q|r,s)=" + "{: 0.5f}".format(
cmi) + "; I(p,q|s)=" + "{: 0.5f}".format(mi), end=" ")
if self.p_value:
test = cmi < mi
else:
test = mi - cmi < 0
if test:
if self.verbose:
print("=> orient " + str(p) + " -> " + str(k) + " <- " + str(q))
self.graph.edges[p, k] = 2
self.graph.edges[q, k] = 2
else:
if self.verbose:
print()
# def rule_origin_causality(self):
# """
# rule 0 (origin of causality) from FCI
# """
# # todo parallelization
# if self.verbose:
# print("######################################")
# print("Rule Origin of Causality")
# print("######################################")
#
# for i in range(self.graph.d):
# for j in range(i + 1, self.graph.d):
# if (self.graph.edges[i, j] == 0) and (self.graph.edges[j, i] == 0):
# k_list = [k for k in range(self.graph.d) if (k != i) and (k != j)
# and ((self.graph.edges[j, k] == 3) and (self.graph.edges[i, k] == 3))]
# if len(k_list) > 0:
# x = self.data_dict[self.names[i]]
# y = self.data_dict[self.names[j]]
# sep = np.where(self.graph.sep[i, j, :] == 1)[0]
# print(str(i), str(j) + "sep = " + str(sep))
# if len(sep) > 0:
# mi = self.cmi_array[i, j]
# else:
# mi = self.mi_array[i, j]
# for k in k_list:
# if k not in sep:
# sep_k = sep.tolist() + [k]
# z = dict()
# for name_k in self.names[sep_k]:
# z[name_k] = self.data_dict[name_k]
# # cmi_pval, cmi_val = ctmi(x, y, z, self.names[i], self.names[j], self.sampling_rate,
# # gamma_matrix=self.gamma_matrix, p_value=self.p_value, mission="ictmi")
# cmi_pval, cmi_val = i_ctmi(x, y, z, self.names[i], self.names[j], self.names[k],
# self.sampling_rate,
# p_value=self.p_value)
# if self.verbose:
# print("i=" + str(i) + "; j=" + str(j) + "; z=" + str(
# k) + "; u=" + str(
# sep) + "; I(i,j|u,z)=" + "{: 0.5f}".format(
# cmi_pval) + "; I(i,j|u)=" + "{: 0.5f}".format(mi), end=" ")
# if self.p_value:
# test = cmi_pval < mi
# else:
# test = mi - cmi_pval < 0
# if test:
# if self.verbose:
# print("=> orient " + str(i) + " -> " + str(k) + " <- " + str(j))
# self.graph.edges[i, k] = 2
# self.graph.edges[j, k] = 2
# else:
# if self.verbose:
# print()
def rule_temporal_priority_within_time_series(self):
for i in range(self.graph.d):
if self.graph.edges[i, i] == 3:
self.graph.edges[i, i] = 1
def rule_propagation_causality(self):
"""
rule 1 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule Propagation of Causality")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 0) and (self.graph.edges[j, i] == 0):
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and
(((self.graph.edges[j, k] == 2) and
(self.graph.edges[k, j] != 0) and (self.graph.edges[k, i] != 0) and
(self.graph.edges[i, k] == 3)) or ((self.graph.edges[i, k] == 2) and
(self.graph.edges[k, i] != 0) and
(self.graph.edges[k, j] != 0) and
(self.graph.edges[j, k] == 3)))]
if len(k_list) > 0:
test_find_orientation = True
for k in k_list:
if self.graph.edges[i, k] == 2:
if self.verbose:
print(str(i) + "*->" + str(k) + "°-*" + str(j), end=" ")
print("=> orient " + str(i) + "*-> " + str(k) + " -> " + str(j))
self.graph.edges[k, j] = 2
self.graph.edges[j, k] = 1
else:
if self.verbose:
print(str(j) + "*->" + str(k) + "°-*" + str(i), end=" ")
print("=> orient " + str(j) + "*-> " + str(k) + " -> " + str(i))
self.graph.edges[k, i] = 2
self.graph.edges[i, k] = 1
return test_find_orientation
def rule_2(self):
"""
rule 2 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 3")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
j_list = np.where(self.graph.edges[i, :] == 3)[0].tolist()
if i in j_list:
j_list.remove(i)
for j in j_list:
shortest_directed_path = self._find_shortest_directed_paths(i, j)
if len(shortest_directed_path) > 0:
self.graph.edges[i, j] = 2
test_find_orientation = True
if self.verbose:
print_path = '*->'.join(map(str, shortest_directed_path[0]))
print(str(i)+"*-0"+str(j)+" and "+str(i) + "*->" + print_path + "*->" + str(j), end=" ")
print("=> orient " + str(i) + "*->" + str(j))
return test_find_orientation
def rule_3(self):
"""
rule 3 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 3")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 0) and (self.graph.edges[j, i] == 0):
colliders = [k for k in range(self.graph.d) if (k != i) and (k != j) and (
(self.graph.edges[j, k] == 2) and (self.graph.edges[i, k] == 2))]
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and (
(self.graph.edges[j, k] == 3) and (self.graph.edges[i, k] == 3))]
if len(colliders) > 0 and len(k_list) > 0:
for c in colliders:
for k in k_list:
if self.graph.edges[k, c] == 3:
test_find_orientation = True
self.graph.edges[k, c] = 2
if self.verbose:
print(str(i) + "*->" + str(c) + "<-*" + str(j) + " and " + str(i) + "*-0" +
str(k) + "0-*" + str(j) + " and " + str(k) + "*-0" + str(c),
end=" ")
print("=> orient " + str(k) + "*->" + str(c))
return test_find_orientation
def rule_4(self):
"""
rule 4 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 4")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(self.graph.d):
if (i != j and self.graph.edges[i, j] == 0) and (self.graph.edges[j, i] == 0):
discriminating_paths = self._find_discriminating_paths(i, j)
for dp in discriminating_paths:
k = dp[-2]
if self.graph.edges[j, k] == 3:
self.graph.edges[j, k] = 1
self.graph.edges[k, j] = 2
else:
self.graph.edges[j, k] = 2
self.graph.edges[k, j] = 2
s = dp[-3]
self.graph.edges[s, k] = 2
self.graph.edges[k, s] = 2
return test_find_orientation
def _find_uncovered_path_util(self, i, j, i_2, i_1, visited, path, all_path):
"""
sub function of _find_uncovered_path
:param i: index of time series
:param j: index of time series
:param i_2: index of time series at before the previous iteration
:param i_1: index of time series at the previous iteration
:param visited: list of visited nodes
:param path: current path
:param all_path: list of all discovered paths
:return:
"""
# Mark the current node as visited and store in path
visited[i] = True
path.append(i)
# If current vertex is same as destination, then print
# current path[]
if i == j:
if len(path) > 2:
if len(path) == 3:
print(i, i_2)
print(path)
print(self.graph.edges[i, i_2])
if self.graph.edges[i, i_2] == 0:
all_path.append(path.copy())
else:
all_path.append(path.copy())
else:
# If current vertex is not destination
# Recur for all the vertices child of this vertex
child_i = np.where(self.graph.edges[i, :] != 0)[0]
for k in child_i:
if not visited[k]:
if len(path) > 2:
if self.graph.edges[i, i_2] == 0:
self._find_uncovered_path_util(k, j, i_1, i, visited, path, all_path)
elif len(path) == 2:
self._find_uncovered_path_util(k, j, i_2, i, visited, path, all_path)
else:
self._find_uncovered_path_util(k, j, i_2, i_1, visited, path, all_path)
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited[i] = False
def _find_uncovered_path(self, i, j):
"""
find uncovered path between time series of index i and time series of index j
:param i: index of time series
:param j: index of time series
:return: all uncovered paths from i to j
"""
# Mark all the vertices as not visited
visited = [False] * self.graph.d
# Create an array to store paths
path = []
all_uncovered_path = []
# Call the recursive helper function to print all paths
self._find_uncovered_path_util(i, j, i, i, visited, path, all_uncovered_path)
return all_uncovered_path
def _is_circle_path(self, path):
"""
check if path is a circle path
:param path: any path in the graph
:return: bool
"""
test_list = []
for p in range(len(path)-1):
test_list.append((self.graph.edges[path[p], path[p+1]] == 3) and
(self.graph.edges[path[p+1], path[p]] == 3))
return all(test_list)
def rule_5(self):
"""
rule 5 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 5")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 3) and (self.graph.edges[j, i] == 3):
uncovered_path_list = self._find_uncovered_path(i, j)
if len(uncovered_path_list) > 0:
for ucp in uncovered_path_list:
if self._is_circle_path(ucp[1:-1]):
if (self.graph.edges[ucp[0], ucp[-2]] == 0) and \
(self.graph.edges[ucp[-1], ucp[1]] == 0):
test_find_orientation = True
if self.verbose:
print(str(i) + "0-0" + str(j) + " and found an uncovered path", end=" ")
print("=> orient " + str(i) + "- " + str(j))
self.graph.edges[i, j] = 1
self.graph.edges[j, i] = 1
for p in range(len(ucp)-1):
if self.verbose:
print(str(ucp[p]) + "0-0" + str(ucp[p+1]), end=" ")
print("=> orient " + str(ucp[p]) + "- " + str(ucp[p+1]))
self.graph.edges[ucp[p], ucp[p + 1]] = 1
self.graph.edges[ucp[p + 1], ucp[p]] = 1
return test_find_orientation
def rule_6(self):
"""
rule 6 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 6")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and
(((self.graph.edges[j, k] == 3) and (self.graph.edges[k, j] != 0) and
(self.graph.edges[k, i] == 1) and (self.graph.edges[i, k] == 1)) or
((self.graph.edges[i, k] == 3) and (self.graph.edges[k, i] != 0) and
(self.graph.edges[k, j] == 1) and (self.graph.edges[j, k] == 1)))]
if len(k_list) > 0:
test_find_orientation = True
for k in k_list:
if self.graph.edges[j, k] == 3:
if self.verbose:
print(str(i) + "-" + str(k) + "0-*" + str(j), end=" ")
print("=> orient " + str(i) + "- " + str(k) + " -* " + str(j))
self.graph.edges[j, k] = 1
else:
if self.verbose:
print(str(j) + "-" + str(k) + "0-*" + str(i), end=" ")
print("=> orient " + str(j) + "- " + str(k) + " -* " + str(i))
self.graph.edges[i, k] = 1
return test_find_orientation
def rule_7(self):
"""
rule 7 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 7")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 0) and (self.graph.edges[j, i] == 0):
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and
(((self.graph.edges[j, k] == 3) and (self.graph.edges[k, j] != 0) and
(self.graph.edges[k, i] == 1) and (self.graph.edges[i, k] == 3)) or
((self.graph.edges[i, k] == 3) and (self.graph.edges[k, i] != 0) and
(self.graph.edges[k, j] == 1) and (self.graph.edges[j, k] == 3)))]
if len(k_list) > 0:
test_find_orientation = True
for k in k_list:
if self.graph.edges[k, i] == 1:
if self.verbose:
print(str(i) + "-0" + str(k) + "0-*" + str(j), end=" ")
print("=> orient " + str(i) + "-0 " + str(k) + " -* " + str(j))
self.graph.edges[j, k] = 1
else:
if self.verbose:
print(str(j) + "-0" + str(k) + "0-*" + str(i), end=" ")
print("=> orient " + str(j) + "-0 " + str(k) + " -* " + str(i))
self.graph.edges[i, k] = 1
return test_find_orientation
def _is_potentially_directed(self, path):
"""
check if path is a potentially directed path
:param path: any path in the graph
:return: bool
"""
test_list1 = []
for p in range(len(path)-1):
test_list1.append((self.graph.edges[path[p+1], path[p]] != 2))
return all(test_list1)
def rule_8(self):
"""
rule 8 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 8")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(self.graph.d):
if (self.graph.edges[i, j] == 2) and (self.graph.edges[j, i] == 3):
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and
(((self.graph.edges[i, k] == 2) and (self.graph.edges[k, i] == 1) and
(self.graph.edges[k, j] == 2) and (self.graph.edges[j, k] == 1)) or
((self.graph.edges[i, k] == 3) and (self.graph.edges[k, i] == 1) and
(self.graph.edges[k, j] == 2) and (self.graph.edges[j, k] == 1)))]
if len(k_list) > 0:
test_find_orientation = True
for k in k_list:
if self.verbose:
if self.graph.edges[i, k] == 3:
print(str(i) + "-0" + str(k) + "->" + str(j) + " and "+str(i) + "0->" + str(j),
end=" ")
print("=> orient " + str(i) + " -> " + str(j))
else:
print(str(i) + "->" + str(k) + "->" + str(j) + " and "+str(i) + "0->" + str(j),
end=" ")
print("=> orient " + str(i) + " -> " + str(j))
self.graph.edges[j, i] = 1
return test_find_orientation
def rule_9(self):
"""
rule 9 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 9")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(self.graph.d):
if (self.graph.edges[i, j] == 2) and (self.graph.edges[j, i] == 3):
uncovered_path_list = self._find_uncovered_path(i, j)
if len(uncovered_path_list) > 0:
for p_d in uncovered_path_list:
if self._is_potentially_directed(p_d):
if self.graph.edges[p_d[-1], p_d[1]] == 0:
test_find_orientation = True
if self.verbose:
print(str(i) + "0->" + str(j) + " and found a potential directed path", end=" ")
print("=> orient " + str(i) + "->" + str(j))
self.graph.edges[j, i] = 1
return test_find_orientation
def rule_10(self):
"""
rule 10 from FCI
:return: (bool) True if the rule made a change in the graph and False otherwise
"""
if self.verbose:
print("######################################")
print("Rule 10")
print("######################################")
test_find_orientation = False
for i in range(self.graph.d):
for j in range(self.graph.d):
if (self.graph.edges[i, j] == 2) and (self.graph.edges[j, i] == 3):
colliders_tails = [k for k in range(self.graph.d) if (k != j) and (
(self.graph.edges[k, j] == 2) and (self.graph.edges[j, k] == 1))]
colliders_tails = [list(k) for k in itertools.combinations(colliders_tails, 2)]
for ks in colliders_tails:
beta = ks[0]
theta = ks[1]
uncovered_path_list1 = self._find_uncovered_path(i, beta)
uncovered_path_list2 = self._find_uncovered_path(i, theta)
if (len(uncovered_path_list1) > 0) and (len(uncovered_path_list2) > 0):
for p1 in uncovered_path_list1:
if self._is_potentially_directed(p1):
for p2 in uncovered_path_list2:
if self._is_potentially_directed(p2):
mu = p1[1]
w = p2[1]
if (mu != w) and (self.graph.edges[mu, w] == 0):
test_find_orientation = True
if self.verbose:
print(str(i) + "0->" + str(j) + " and ...", end=" ")
print("=> orient " + str(i) + "->" + str(j))
self.graph.edges[j, i] = 1
return test_find_orientation
def rule_commun_confounder_and_causal_chain(self):
"""
new rules (rule commun confounder (4) and rule causal_chain (5) from paper)
"""
if self.verbose:
print("######################################")
print("Rule commun confounder and causal chain")
print("######################################")
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 0) and (self.graph.edges[j, i] == 0):
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and (
((self.graph.edges[j, k] == 3) and (self.graph.edges[k, j] == 3) and
(self.graph.edges[i, k] == 3) and (self.graph.edges[k, i] == 3)))]
if len(k_list) > 0:
for k in k_list:
gki = self.gamma_matrix[self.names[i]].loc[self.names[k]]
gkj = self.gamma_matrix[self.names[j]].loc[self.names[k]]
i_is_not_effet = (sum(self.graph.edges[:, i] == 2) == 0)
j_is_not_effet = (sum(self.graph.edges[:, j] == 2) == 0)
k_is_not_effet = (sum(self.graph.edges[:, k] == 2) == 0)
#Lagged common cause
if (gki > 0) and (gkj > 0):
if i_is_not_effet and j_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)>0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " -> " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
#Lagged instantaneous confounder
elif (gki > 0) and (gkj == 0):
if i_is_not_effet:
if j_is_not_effet and k_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)==0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " - " + str(j))
self.graph.edges[k, i] = 2
elif j_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)==0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " -> " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
elif k_is_not_effet:
if self.verbose:
print(
str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)==0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " <- " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[j, k] = 2
elif (gki == 0) and (gkj > 0):
if j_is_not_effet:
if i_is_not_effet and k_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)==0 and gamma(k,j)>0",
end=" ")
print("=> orient " + str(i) + "- " + str(k) + " -> " + str(j))
self.graph.edges[k, j] = 2
elif i_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)==0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " -> " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
elif k_is_not_effet:
if self.verbose:
print(
str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,j)>0 and gamma(k,i)==0",
end=" ")
print("=> orient " + str(i) + "-> " + str(k) + " -> " + str(j))
self.graph.edges[k, j] = 2
self.graph.edges[i, k] = 2
# lagged instanteneous causal chain
elif (gki >= 0) and (gkj < 0):
if j_is_not_effet and k_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)>0",
end=" ")
print("=> orient " + str(i) + "<- " + str(k) + " <- " + str(j))
self.graph.edges[k, i] = 2
self.graph.edges[j, k] = 2
elif (gki < 0) and (gkj >= 0):
if i_is_not_effet and k_is_not_effet:
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "and gamma(k,i)>0 and gamma(k,j)>0",
end=" ")
print("=> orient " + str(i) + "-> " + str(k) + " -> " + str(j))
self.graph.edges[i, k] = 2
self.graph.edges[k, j] = 2
def rule_mediator(self):
"""
new rules (rule mediator (6) from paper)
"""
if self.verbose:
print("######################################")
print("Rule mediator")
print("######################################")
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] != 0) and (self.graph.edges[j, i] != 0):
k_list = [k for k in range(self.graph.d) if (k != i) and (k != j) and (
((self.graph.edges[j, k] == 3) and (self.graph.edges[k, j] == 3) and
(self.graph.edges[i, k] == 3) and (self.graph.edges[k, i] == 3)))]
if len(k_list) > 0:
for k in k_list:
gij = self.gamma_matrix[self.names[j]].loc[self.names[i]]
gik = self.gamma_matrix[self.names[k]].loc[self.names[i]]
gjk = self.gamma_matrix[self.names[k]].loc[self.names[j]]
# g_list = [(gij, gik, gjk), (gij, gik, -gjk), (-gij, gjk, gik), (-gij, gjk, -gik),
# (-gik, -gjk, gij), (-gik, -gjk, -gij)]
# i->j->k, i->k->j, j->i->k, j->k->->i, k->i->j, k->j->i
g_list = [(gij, gjk, gik), (gik, -gjk, gij), (-gij, gik, gjk), (gjk, -gik, -gij),
(-gik, gij, -gjk), (-gjk, -gij, -gik)]
g_list_common = [(gij, gjk, gik), (gik, -gjk, gij), (gjk, -gik, -gij)]
msk = [(x[0] > 0) and (x[1] > 0) and (x[2] >= 0) for x in g_list]
msk_common = [(x[0] == 0) and (x[1] > 0) and (x[2] > 0) for x in g_list_common]
if any(msk):
print(g_list)
print(msk)
s = int(np.argwhere(msk)[0])
# g1, g2, g3 = g_list[s]
if s == 0:
if (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if (self.graph.edges[j, i] == 3) and (self.graph.edges[k, i] == 3) \
and (self.graph.edges[k, j] == 3):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif s == 1:
if (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if (self.graph.edges[j, i] == 3) and (self.graph.edges[k, i] == 3) \
and (self.graph.edges[k, j] == 3):
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(k) + "-> " + str(j) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[k, j] = 2
elif s == 2:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if (self.graph.edges[i, j] == 3) and (self.graph.edges[k, i] == 3) \
and (self.graph.edges[k, j] == 3):
if self.verbose:
print(str(j) + "-" + str(i) + "-" + str(k) + "-" + str(j), end=" ")
print("=> orient " + str(j) + "-> " + str(i) + " -> " + str(k) + " <- "
+ str(j))
self.graph.edges[j, i] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
if s == 3:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if (self.graph.edges[i, j] == 3) and (self.graph.edges[i, k] == 3) \
and (self.graph.edges[k, j] == 3):
if self.verbose:
print(str(j) + "-" + str(k) + "-" + str(i) + "-" + str(j), end=" ")
print("=> orient " + str(j) + "-> " + str(k) + "-> " + str(i) + " <- "
+ str(j))
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
self.graph.edges[j, k] = 2
elif s == 4:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if (self.graph.edges[j, i] == 3) and (self.graph.edges[i, k] == 3) \
and (self.graph.edges[j, k] == 3):
if self.verbose:
print(str(k) + "-" + str(i) + "-" + str(j) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(i) + "-> " + str(j) + " <- "
+ str(k))
self.graph.edges[i, j] = 2
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
elif s == 5:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if (self.graph.edges[i, j] == 3) and (self.graph.edges[i, k] == 3) \
and (self.graph.edges[j, k] == 3):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
elif any(msk_common):
s = int(np.argwhere(msk_common)[0])
if s == 0:
if (self.graph.edges[j, i] == 3) and (self.graph.edges[k, i] == 3) \
and (self.graph.edges[k, j] == 3):
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "- " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(j) + "-> " + str(i) + " -> " + str(k) + " <- "
+ str(j))
self.graph.edges[j, i] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif s == 1:
if (self.graph.edges[j, i] == 3) and (self.graph.edges[k, i] == 3) \
and (self.graph.edges[k, j] == 3):
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "- " + str(k) + " -> " + str(j) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[k, j] = 2
elif (sum(self.graph.edges[:, k] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[k, j] = 2
elif (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(i) + "-" + str(j) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(i) + " -> " + str(j) + " <- "
+ str(k))
self.graph.edges[k, i] = 2
self.graph.edges[k, j] = 2
self.graph.edges[i, j] = 2
elif s == 2:
if (self.graph.edges[j, i] == 3) and (self.graph.edges[k, i] == 3) \
and (self.graph.edges[k, j] == 3):
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(k) + "-" + str(j) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "- " + str(k) + " -> " + str(j) + " <- "
+ str(i))
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
elif (sum(self.graph.edges[:, k] == 2) == 0) and (
sum(self.graph.edges[:, i] == 2) == 0):
if self.verbose:
print(str(i) + "-" + str(j) + "-" + str(k) + "-" + str(i), end=" ")
print("=> orient " + str(i) + "-> " + str(j) + " -> " + str(k) + " <- "
+ str(i))
self.graph.edges[j, k] = 2
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
elif (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, i] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(i) + "-" + str(j) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(i) + " -> " + str(j) + " <- "
+ str(k))
self.graph.edges[k, j] = 2
self.graph.edges[k, i] = 2
self.graph.edges[j, i] = 2
else:
if (self.graph.edges[i, j] == 3) and (self.graph.edges[i, k] == 3) \
and (self.graph.edges[k, j] == 3):
if (gij!=0) and (gik==0) and (gjk==0):
if gij>0:
if (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[i, j] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif gij<0:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, k] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[j, i] = 2
self.graph.edges[i, k] = 2
self.graph.edges[j, k] = 2
elif (gij==0) and (gik!=0) and (gjk==0):
if gik>0:
if (sum(self.graph.edges[:, k] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[i, k] = 2
self.graph.edges[i, j] = 2
self.graph.edges[k, j] = 2
if gik<0:
if (sum(self.graph.edges[:, i] == 2) == 0) and (
sum(self.graph.edges[:, j] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[k, i] = 2
self.graph.edges[i, j] = 2
self.graph.edges[k, j] = 2
elif (gij == 0) and (gik == 0) and (gjk != 0):
if gjk>0:
if (sum(self.graph.edges[:, k] == 2) == 0) and (
sum(self.graph.edges[:, i] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[j, k] = 2
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
if gjk<0:
if (sum(self.graph.edges[:, j] == 2) == 0) and (
sum(self.graph.edges[:, i] == 2) == 0):
if self.verbose:
print(str(k) + "-" + str(j) + "-" + str(i) + "-" + str(k), end=" ")
print("=> orient " + str(k) + "-> " + str(j) + " -> " + str(i) + " <- "
+ str(k))
self.graph.edges[k, j] = 2
self.graph.edges[j, i] = 2
self.graph.edges[k, i] = 2
def rule_proba_raising_principle(self):
"""
new rules (rule prob raising principle from paper)
"""
if self.verbose:
print("######################################")
print("Rule prob raising principle")
print("######################################")
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 3) and (self.graph.edges[j, i] == 3):
adjacent_i_is_1 = (sum(np.delete(self.graph.edges[:, i], i) != 0) == 1)
adjacent_j_is_1 = (sum(np.delete(self.graph.edges[:, j], j) != 0) == 1)
if adjacent_i_is_1 and adjacent_j_is_1:
gij = self.gamma_matrix[self.names[j]].loc[self.names[i]]
if gij > 0:
if self.verbose:
print(str(i) + "-" + str(j) + "g(i,j)>0", end=" ")
print("=> orient " + str(i) + "-> " + str(j))
self.graph.edges[i, j] = 2
elif gij < 0:
if self.verbose:
print(str(i) + "-" + str(j) + "g(i,j)<0", end=" ")
print("=> orient " + str(i) + "<- " + str(j))
self.graph.edges[j, i] = 2
def rule_gap_orientation(self):
"""
gamma heuristic rule from paper
"""
if self.verbose:
print("######################################")
print("Rule gap orientation")
print("######################################")
for i in range(self.graph.d):
for j in range(i + 1, self.graph.d):
if (self.graph.edges[i, j] == 3) and (self.graph.edges[j, i] == 3):
if self.gamma_matrix[self.names[j]].loc[self.names[i]] > 0:
if self.verbose:
print(str(i) + "-" + str(j) + "g(i,j)>0", end=" ")
print("=> orient " + str(i) + "-> " + str(j))
self.graph.edges[i, j] = 2
if self.gamma_matrix[self.names[j]].loc[self.names[i]] < 0:
if self.verbose:
print(str(i) + "-" + str(j) + "g(i,j)<0", end=" ")
print("=> orient " + str(i) + "<- " + str(j))
self.graph.edges[j, i] = 2
def fit(self):
"""
run FCITMI
:return: graph (PAG)
"""
if self.verbose:
print("#######################################")
print("########### Starting FCITMI ###########")
print("#######################################")
# initialize skeleton
self.skeleton_initialize()
# get separation sets
self.find_sep_set()
# include circle in the skeleton
self.dag_to_pag()
# orientation
self.rule_temporal_priority_within_time_series()
self.rule_origin_causality()
# find possible d sep
test_remove_links = self.find_d_sep()
# remove orientation
if test_remove_links:
self.remove_orientation()
# orientation
self.rule_origin_causality()
test_rp = True
test_r2 = True
test_r3 = True
test_r4 = True
while test_rp or test_r2 or test_r3 or test_r4:
test_rp = self.rule_propagation_causality()
test_r2 = self.rule_2()
test_r3 = self.rule_3()
test_r4 = self.rule_4()
self.rule_commun_confounder_and_causal_chain()
self.rule_mediator()
self.rule_proba_raising_principle()
test_r8 = True
test_r9 = True
test_r10 = True
while test_r8 or test_r9 or test_r10:
test_r8 = self.rule_8()
test_r9 = self.rule_9()
test_r10 = self.rule_10()
if self.verbose:
print("######################################")
print("Final Results (FCITMI)")
print("######################################")
print("Summary Graph:")
print(self.graph.edges)
return self.graph.edges
def fit_gap_orientation(self):
"""
run FCITMI-gamma (requirements: run FCITMI)
:return: graph (PAG)
"""
self.rule_gap_orientation()
return self.graph.edges
if __name__ == "__main__":
import pandas as pd
path = "../../../../data/simulated_ts_data/v_structure/data_"+str(0)+".csv"
data = pd.read_csv(path, delimiter=',', index_col=0)
data = data.loc[:1000]
print(data)
import random
random.seed(2)
ci = TPCTMI(data, verbose=True, p_value=False, num_processor=1)
gamma_matrix = ci.gamma_matrix
print(gamma_matrix)
print(data.shape)
x = data[data.columns[0]].loc[1:].reset_index(drop=True)
y = data[data.columns[2]].loc[1:].reset_index(drop=True)
z = data[data.columns[0]].loc[:999].reset_index(drop=True)
z.name = "V1_past"
z = {'V1_past': z}
print(x.shape, y.shape)
sampling_rate = {'V1': 1, 'V2': 1, 'V3': 1, 'V1_past': 1, 'V2_past':1, 'V3_past':1}
cmi, _ = ctmi(x, y, z, data.columns[0], data.columns[2], sampling_rate,
gamma_matrix=gamma_matrix, p_value=True)
print(cmi)
print("done")
# ci = FCITMI(data, verbose=True, p_value=True, num_processor=1)
ci = TPCTMI(data, verbose=True, p_value=False, num_processor=1)
ci.fit()
print(ci.tgraph_dict)
ci.noise_based_hidden_counfounders()
# ar = np.zeros([4, 4])
# ar[0,1] = 2
# ar[1,0] = 1
#
# ar[1,2] = 2
# ar[2,1] = 2
# ar[1,3] = 2
# ar[3,1] = 1
#
# ar[2,3] = 2
# ar[3,2] = 2
# ci.graph.edges = ar
# print(ar)
# print(ci._find_discriminating_paths(0, 3)) | [
"karim.assaad@univ-grenoble-alpes.fr"
] | karim.assaad@univ-grenoble-alpes.fr |
3dc1ada00afa0327b9f7befb7328a8b103da9b07 | d7719b7b537a1484228d377d265ebeea8f76748a | /Robot Operating System (ROS)/ROS/Starter Code and Resources/ROS_Minimal_Projects/rospy_minimal_packages/modular_pub_sub/setup.py | f8270eaaf22d1aa84841ec98b72a858c7271b601 | [
"BSD-2-Clause"
] | permissive | OpenSUTD/coding-notes | 9724ac9d35f585ff3140a43c8a10fcdcbaedfc79 | f9b8c778f8494d0bf47bd816cfd77b88e78a5a1f | refs/heads/master | 2022-07-16T22:17:21.930385 | 2019-07-03T10:11:30 | 2019-07-03T10:11:30 | 166,292,417 | 7 | 5 | BSD-2-Clause | 2019-07-03T11:20:16 | 2019-01-17T20:30:47 | C++ | UTF-8 | Python | false | false | 275 | py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages = ['modular_pub_sub'],
package_dir = {'': 'src'},
install_requires = ['']
)
setup(**setup_args)
| [
"methylDragon@gmail.com"
] | methylDragon@gmail.com |
f562201b3685766803426c0adbb30f5e6e1eb296 | 12e6ce3fb8628230a3b4bed6f4a41a0449f04135 | /assignment2/cs231n/layers.py | 2060da31b0b0be241b02f22b46082d7bf87738cf | [] | no_license | axydes/cs231n | bb833bab6dfe49a8e784d1cf5cea3e7d48978bb8 | c7016368b4f4235a652afb0539d18a759841739e | refs/heads/main | 2021-07-12T23:51:48.234182 | 2021-03-21T22:29:29 | 2021-03-21T22:29:29 | 45,052,817 | 0 | 0 | null | 2021-03-21T22:29:30 | 2015-10-27T16:06:21 | Jupyter Notebook | UTF-8 | Python | false | false | 11,424 | py | import numpy as np
from scipy import ndimage
from cs231n.im2col import *
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) where x[i] is the ith input.
We multiply this against a weight matrix of shape (D, M) where
D = \prod_i d_i
Inputs:
x - Input data, of shape (N, d_1, ..., d_k)
w - Weights, of shape (D, M)
b - Biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
#############################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
#############################################################################
N = x.shape[0]
D = w.shape[0]
M = b.shape[0]
in_rows = np.reshape(x, (N, D))
out = np.dot(in_rows, w) + b
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
#############################################################################
# TODO: Implement the affine backward pass. #
#############################################################################
N = x.shape[0]
D = w.shape[0]
M = b.shape[0]
in_rows = np.reshape(x, (N, D))
dx = np.dot(dout, w.T)
dx = np.reshape(dx, x.shape)
dw = np.dot(in_rows.T, dout)
db = np.sum(dout,axis=0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
#############################################################################
# TODO: Implement the ReLU forward pass. #
#############################################################################
out = np.maximum(0,x)
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
#############################################################################
# TODO: Implement the ReLU backward pass. #
#############################################################################
dx = dout
dx[x <= 0] = 0
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and width
W. We convolve each input with F different filters, where each filter spans
all C channels and has height HH and width WW.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
#############################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
#############################################################################
stride = conv_param['stride']
pad = conv_param['pad']
N, C, H, W = x.shape
F, C, HH, WW = w.shape
H_prime = 1 + (H + 2 * pad - HH) / stride
W_prime = 1 + (W + 2 * pad - WW) / stride
out = np.zeros((N, F, H_prime, W_prime), dtype=x.dtype)
X_col = im2col_indices(x, HH, WW, pad, stride)
W_row = w.reshape(F,-1)
dt = np.dot(W_row, X_col) + b.reshape(-1,1)
dt = np.reshape(dt, (F, H_prime, W_prime, N))
out = dt.transpose(3, 0, 1, 2)
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, X_col, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
#############################################################################
# TODO: Implement the convolutional backward pass. #
#############################################################################
x, X_col, w, b, conv_param = cache
stride = conv_param['stride']
pad = conv_param['pad']
N, C, H, W = x.shape
F, C, HH, WW = w.shape
dout_new = dout.transpose(1,2,3,0).reshape(F,-1)
dX_col = np.dot(w.reshape(F,-1).T, dout_new)
dx = col2im_indices(dX_col, (N, C, H, W), HH, WW, pad, stride)
dw = np.dot(dout_new, X_col.T).reshape(w.shape)
db = np.sum(dout, axis=(0,2,3))
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
#############################################################################
# TODO: Implement the max pooling forward pass #
#############################################################################
ph, pw, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
N, C, H, W = x.shape
H2 = (H - ph) / stride + 1
W2 = (W - pw) / stride + 1
out = np.zeros((N, C, H2, W2))
x_res = x.reshape(N*C, 1, H, W)
X_col = im2col_indices(x_res, ph, pw, 0, stride)
idxs = np.argmax(X_col, axis=0)
X_col_max = X_col[idxs, np.arange(X_col.shape[1])]
dt = np.reshape(X_col_max, (H2, W2, N, C))
out = dt.transpose(2, 3, 0, 1)
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, X_col, idxs, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
#############################################################################
# TODO: Implement the max pooling backward pass #
#############################################################################
x, X_col, idxs, pool_param = cache
ph, pw, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
N, C, H, W = x.shape
dout_flt = dout.transpose(2, 3, 0, 1).flatten()
dX_col = np.zeros_like(X_col)
dX_col[idxs, np.arange(dX_col.shape[1])] = dout_flt
dx = col2im_indices(dX_col, (N*C, 1, H, W), ph, pw, 0, stride)
dx = dx.reshape(x.shape)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(N), y])) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
| [
"alexander.xydes@navy.mil"
] | alexander.xydes@navy.mil |
376f35618f0977feb1eee220ccd2117317c0936f | ade41423a943e1fcb8fef7b869af1f0e81bdfa3d | /call_stack.py | 7e3e192fd58afe19067e268d0603aec2e6003e9a | [] | no_license | aheinous/waffle-compiler | 955d411db46435f94bd8b5d606250ca75f29e481 | d5e5816a4d4d8ea38f0d2a3623694a2bb66c2b96 | refs/heads/master | 2023-07-08T05:34:57.104140 | 2021-08-09T03:08:23 | 2021-08-09T03:08:23 | 394,131,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | class _FuncPush:
def __init__(self, call_stack):
self._call_stack = call_stack
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self._call_stack._pop()
class CallStack:
def __init__(self):
self._stack = []
def push(self, func):
self._stack.append(func)
return _FuncPush(self)
def _pop(self):
self._stack.pop()
def peek(self):
return self._stack[-1]
def checkRtnTypeOkay(self, rtnVal, pos):
rtnVal.checkCanAssignTo(self.peek().rtn_type, pos)
| [
"aheinous@gmail.com"
] | aheinous@gmail.com |
7c8d333ddbf6eb99adc0a90adae46b0f37287b85 | d6c07c3128ebf5586e6640b72fc065d8bc54ad03 | /rose/bin/python-config | fe0e5df737913facfbebf0383dcbc7aed853aa8c | [] | no_license | newserg/my-first-blog | f647ea12f9cd0a7c77945c2bb660acbb81116ba1 | 9565c9951f97d9de8a79987f881a569ea28385d9 | refs/heads/master | 2021-01-09T09:38:19.376567 | 2017-02-08T11:43:55 | 2017-02-08T11:43:55 | 81,197,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | #!/home/serg/newproject1/rose/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"morrcv@ukr.net"
] | morrcv@ukr.net | |
ad2cf41e2835382146d98e659562119b73b04000 | 10425fd2f058afb9dd823929314bfede0a4eb513 | /flaskaiohttp_websocket/app.py | 67a5c4b45adbbf95106696a4225673ec018d7153 | [] | no_license | gaozhidf/flask_websocket | 60883571a469a7c283e3da9a8fbf81d752f82f71 | 41653f71b7fd6d07d3592a22a11f29e795ba45d8 | refs/heads/master | 2022-11-29T04:31:08.953294 | 2017-08-12T08:53:24 | 2017-08-12T08:53:24 | 49,828,952 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,060 | py | import json
import asyncio
import aiohttp
from flask import Flask, current_app
from flask_aiohttp import AioHTTP
from flask_aiohttp.helper import async, websocket
app = Flask(__name__)
aio = AioHTTP(app)
@app.route('/echo')
@websocket
def echo():
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(msg.data)
elif msg.tp == aiohttp.MsgType.close:
print('websocket connection closed')
break
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception %s',
aio.ws.exception())
break
@app.route('/api')
@async
def api():
response = yield from aiohttp.request(
'GET', 'https://graph.facebook.com/zuck')
data = yield from response.read()
return data
@app.route('/param/<arg>')
@websocket
def param(arg):
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(arg)
elif msg.tp == aiohttp.MsgType.close:
print('websocket connection closed')
break
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception %s',
aio.ws.exception())
break
@app.route('/late')
@async
def late():
yield from asyncio.sleep(3)
data = {
'data': 'done'
}
data = json.dumps(data)
current_app.response_class(data, headers={
'Content-Type': 'application/json',
}, status=201)
return 'done'
@app.route('/plain')
def plain():
return 'Hello, World!'
@app.route('/stream')
def stream():
def f():
yield 'Hello, '
yield 'World!'
return app.response_class(f())
@app.route('/async-stream')
@async
def async_stream():
def f():
yield 'I\'m '
yield 'sorry!'
yield from asyncio.sleep(1)
return app.response_class(f())
def main():
aio.run(app, debug=True)
if __name__ == '__main__':
main() | [
"gaozhidf@gmail.com"
] | gaozhidf@gmail.com |
bf88e5c883b3f3d08cb7ed1988fbaa74e23cf3f7 | b5e10ed8500a39f999b7a664915ceb1dc7f371bd | /py_services/populateMed1000.py | f5b20654e59ff15a68701e594654b228b31c97b1 | [] | no_license | juanpablocontreras/storage-experiment | bb42b4e5ca44774c1ac31ffc464f5ef3d674d3d8 | 1c59f55373144b976a7a2fe789b81721d9543950 | refs/heads/master | 2022-11-29T18:33:30.674944 | 2020-07-30T00:02:14 | 2020-07-30T00:02:14 | 278,735,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | #populates the medium size items database
import mysql.connector
cnx_orig = mysql.connector.connect(
user='juan',
password='LapinCoquin13',
host='127.0.0.1',
database='EXP_ORIG')
orig_cursor = cnx_orig.cursor()
item = "a"
for s in range(999):
item = item + "a"
for i in range(100):
insert_row = f"INSERT INTO Med1000 (id,data_item) VALUES ({i},'{item}');"
orig_cursor.execute(insert_row)
cnx_orig.commit()
orig_cursor.close()
cnx_orig.close()
| [
"juanpablocontreras@cmail.carleton.ca"
] | juanpablocontreras@cmail.carleton.ca |
e6abd68ee8891fd33558e2c79ba7b61eeb2bd4b5 | b35aea9f4411f5dc7942392d78dc31bb76c7ec73 | /djangoProject/services/migrations/0004_alter_sub_head.py | 142021ffe6400739d7b16a3cef8ad1787f49ebfd | [] | no_license | ashkanusefi/rondshow | 1079b81704fff55a1d54fa8dee2712ab61e92f4a | 7e5a80fcc6e326b8b1737a54fb53becc4195e475 | refs/heads/master | 2023-09-01T18:45:33.170465 | 2021-09-18T11:24:52 | 2021-09-18T11:24:52 | 407,820,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # Generated by Django 3.2.5 on 2021-07-13 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0003_alter_sub_head'),
]
operations = [
migrations.AlterField(
model_name='sub',
name='head',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='services.service', verbose_name='نام سردسته'),
),
]
| [
"yousefi.ashkan96@gmail.com"
] | yousefi.ashkan96@gmail.com |
17461c469a3504f56403dc6907b5fbb8c118437e | 75b2c2d56597ad7a96a7be761acdd0beb41324e9 | /api/models/product.py | 05e451685f7d920056fa163f8a52c8c1e6431bfa | [] | no_license | mauodias/bahzar | 46e94ceb313c8b3681bedf7e4783fb1702d8403a | 4edfc69f688bee4400581777abefe3cbc9003f24 | refs/heads/master | 2020-04-26T07:19:37.441730 | 2019-04-26T23:14:19 | 2019-04-26T23:14:19 | 173,390,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | from .db import get_connection
from .model import Model
from bson import ObjectId
class Product(Model):
collection = get_connection()['products']
def __init__(self, owner, name, price, tags, description, available=True, images=[]):
self.owner = owner
self.name = name
self.price = price
self.tags = tags
self.description = description
self.available = available
self.images = images
def sell(self):
self.available = False
def insert_images(self, images):
for image in images:
self.images.append(image)
@staticmethod
def init_with_document(document):
_id = document.get('_id', None)
owner = document.get('owner', None)
name = document.get('name', None)
price = document.get('price', None)
tags = document.get('tags', None)
description = document.get('description', None)
available = document.get('available', True)
images = document.get('images', [])
result = Product(owner=owner, name=name, price=price, tags=tags, description=description, available=available, images=images)
result._id = _id
return result
| [
"mau.dias@gmail.com"
] | mau.dias@gmail.com |
47a76feedcb553cbdfed82f9a1800c56ebd293fe | a79519c0032ce5630710bef726886e226369ad1a | /bikeshed/refs/__init__.py | 23d416226c7293aba42df293012d75de99a3883b | [
"CC0-1.0"
] | permissive | ylafon/bikeshed | 9598aee18ac5a9f5fc3094bc847f30f768d4ee07 | 5471ba1f1c21f5b90546909fa30891f0148a750c | refs/heads/main | 2022-07-31T23:42:22.592066 | 2022-07-27T09:17:23 | 2022-07-27T09:17:28 | 311,297,114 | 0 | 0 | CC0-1.0 | 2020-11-09T10:08:03 | 2020-11-09T10:08:02 | null | UTF-8 | Python | false | false | 100 | py | from .manager import ReferenceManager
from .source import RefSource
from .wrapper import RefWrapper
| [
"jackalmage@gmail.com"
] | jackalmage@gmail.com |
8cd569f757c981fe1f87ca5c21072e8f3c7c65ec | e2ef942079699b5642eefd0ec9bab5c62db322cd | /Python/51.py | 500aba8759ffb3d0b2b380b1e38e1accf639f21d | [] | no_license | ShiinaMashiro1314/Project-Euler | b016e54b065efec1227da8d8272bd00a202d7124 | 7ff16a0fe86b77360e3e0f830f9390700e6876dc | refs/heads/master | 2020-04-02T08:06:18.040557 | 2016-07-12T18:16:52 | 2016-07-12T18:16:52 | 63,181,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | i = 100000
def same(a,b):
d = {}
e = {}
while (a>0):
if(a%10 in d):
d[a%10] += 1
else:
d[a%10] = 1
a /= 10
while (b>0):
if(b%10 in e):
e[b%10] += 1
else:
e[b%10] = 1
b /= 10
return d == e
def test(i):
for j in xrange(2,7):
if(not same(i,i*j)):
return False
return True
while(not test(i)):
i += 1
print i | [
"thomaspanzhao@gmail.com"
] | thomaspanzhao@gmail.com |
b3cc9b33bc9311bcaa289ae601c3203b02d393e9 | 66edaf6303b9a66f23f96d1b05ba21efe07279ef | /extractIdioms.py | 2d7c7c1c8f162c15f8e22fc60c809cd2d5e1bbae | [] | no_license | agoconcept/shertonenglish | b10504100eec0030443a3100bff9c26a0795861d | e35c439e0015e27a7cfaf0091d61f6a3dc5f3ed8 | refs/heads/master | 2016-09-06T14:22:51.716405 | 2014-03-02T12:24:45 | 2014-03-02T12:24:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | #!/usr/bin/env python
import argparse
import mechanize
import lxml.html
def process_table(table_elem):
rows = table_elem.getchildren()
for row in rows:
line = row.text_content().split('\n')
print "%s - %s" % (line[1].strip(), line[2].strip())
def main():
# Handle argument
parser = argparse.ArgumentParser()
parser.add_argument('url', help='Sherton URL to parse')
args = parser.parse_args()
# Open URL
response = mechanize.urlopen(args.url).read()
# Create DOM
dom = lxml.html.document_fromstring(response)
# Find tags to process
tags = dom.get_element_by_id('mainLeft').getchildren()
for tag in tags:
if tag.tag == 'table':
process_table(tag)
#######################
if __name__ == '__main__':
main()
| [
"santi@spotify.com"
] | santi@spotify.com |
2d746288f264a39910102e51657a5ada3af029f0 | 78d15452bb580a8146610ff182947cdaeb717219 | /credentials/g_translate.py | 1aa3ab7a7aa5c7ade9592de9d62eef0b0bacd86c | [] | no_license | jowrjowr/tri_common | 2d62ec393ad39b931265a78c3052abc3f33e45ed | d78eedb6a351df79f7db2134f9bf6ea06bad1b2f | refs/heads/master | 2020-04-04T13:08:39.750280 | 2019-01-14T20:25:10 | 2019-01-14T20:25:10 | 155,950,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | translate_api_key = 'AIzaSyBnWXRMQjbsJafFLpMcgFKqXYR-40yj_Jw'
| [
"jowr.pi@gmail.com"
] | jowr.pi@gmail.com |
4f86503e9967ceaa9cb417c55dc2f4ceb6706b4e | a8595670862f9475050abf73399afe34faaa083b | /wb_api/urls.py | f2d8bb64d1f13e51f8b884542b8f6d173580934d | [] | no_license | skiboorg/wb_api | 14392df2da8569212c0ba05e527b46fcd9c30338 | c45d8c340a45958bc6d380c2a431d13d0f1ebf37 | refs/heads/master | 2022-12-06T03:52:58.621255 | 2020-08-26T19:25:24 | 2020-08-26T19:25:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('api/v1/', include('api.urls')),
path('ckeditor/', include('ckeditor_uploader.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"11@11.11"
] | 11@11.11 |
0c088c841c94aab3aea8a44a61b33cfa24ea29bc | 963097c2b25b5db8428000c1ab5d9e1a31f856fe | /24.py | 4befeb7960ea49b7830e4f4fab55edd0ec89bacf | [] | no_license | AkkarinForest/ProjectEuler | a9dd02015b1423e4760590c87885a8a7866add2e | 0489be7803578debcd69bd2a65a395157ffb9222 | refs/heads/master | 2021-05-12T07:18:03.609878 | 2018-01-24T19:02:40 | 2018-01-24T19:02:40 | 117,238,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from itertools import permutations as perm
print(list(perm(range(10)))[999999])
| [
"akkarinforest@gmail.com"
] | akkarinforest@gmail.com |
74dea5ec4fa37052d4958c0450ee7c5508e1b29a | bb6bc84759af0961b283c79590dfcac1ddc15178 | /study/skitLearnTest.py | 50438b2f8a6f48e96345d965f0395e5e6c8594c9 | [] | no_license | AnnieGaoXiaoQing/machine | cef9607cb9f78e0cdbd06331ca5d2827c927e80f | 8bdb83091195c99bacf716d8153e24822393624c | refs/heads/master | 2021-10-10T01:49:12.798767 | 2019-01-06T02:25:40 | 2019-01-06T02:25:40 | 115,302,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | #encoding=utf-8
import os;
def main():
#数据预处理
from sklearn.datasets import load_iris
iris = load_iris();
print(iris)
print(len(iris["data"]))
from sklearn.model_selection import train_test_split #引入交叉验证
# 验证集占20%,random_state==1即随机选择30个数据集
train_data,test_data,train_target,test_target = \
train_test_split(iris.data,iris.target,test_size=0.2,random_state=1)
#建模
from sklearn import tree
clf = tree.DecisionTreeClassifier(criterion="entropy") #criterion:信息熵
clf.fit(train_data,train_target)#建立模型
y_pred = clf.predict(test_data)#预测
#验证(准确率和混淆矩阵)
from sklearn import metrics
print(metrics.accuracy_score(y_true=test_target,y_pred=y_pred)) #准确率(y_true:真实值,y_pred:预测值)
print(metrics.confusion_matrix(y_true=test_target,y_pred=y_pred)) #混淆矩阵 横轴实际值,纵轴预测值
#结果(理想的是对角矩阵,仅对角有值) 1代表预测有问题,真实值是1,预测值是3
#[[11 0 0]
#[0 12 1]
#[0 0 6]]
#决策树直接输出文件
with open(os.path.abspath(os.path.dirname(os.getcwd())) + "/data/tree.dot","w") as fw:
tree.export_graphviz(clf,out_file=fw)
if __name__ == '__main__':
main()
| [
"gxq741718618@163.com"
] | gxq741718618@163.com |
48217e37537211a9bfd2b671886a356efa6a7a8d | b047a32da65cc0fafe249160f57765ddbe80176e | /apps/support/templatetags/forum.py | 20f193d4a3ba8af04bb65ae72f774e72f6431c3b | [
"MIT"
] | permissive | fengjinqi/website | 0568c679e7964bdbb637831a4f1dec7c5e8d767c | 340eecec49ce0d66cd6a491d0ae9ad23ec9f841b | refs/heads/master | 2023-02-18T10:12:52.158471 | 2023-02-16T08:40:13 | 2023-02-16T08:40:13 | 160,755,540 | 367 | 114 | MIT | 2022-12-08T01:42:40 | 2018-12-07T01:44:34 | Python | UTF-8 | Python | false | false | 1,226 | py | from datetime import datetime
from django.contrib.sessions.models import Session
from django.core.cache import cache
#from apps.forum.views import get_online_count
from apps.support.models import QQ
from apps.forum.models import Forum
from django import template
from django.utils.timezone import now, timedelta
from apps.user.models import User
register = template.Library()
@register.inclusion_tag('pc/aside/forum_side.html')
def get_fourm():
qq = QQ.objects.all()
fourm = Forum.objects.filter(hidden=False,category__name='求职招聘')[:10]
sessions = Session.objects.filter(expire_date__gte=datetime.now()).count()
#print(get_online_count())
user = User.objects.count()
cur_date = now().date() + timedelta(days=0)
days = Forum.objects.filter(hidden=False,add_time__gte=cur_date).count()
count = Forum.objects.filter(hidden=False).count()
Hottest = Forum.objects.filter(hidden=False).order_by('-click_nums')[:10]
return {'fourm':fourm,'qq':qq,'user':user,'sessions':sessions,'days':days,'count':count,'Hottest':Hottest}
@register.filter
def get_count(x):
return x.filter(hidden=False).count()
@register.filter
def get_counts(x):
return x.filter(is_show=True).count() | [
"tarena_feng@126.com"
] | tarena_feng@126.com |
4a98ed5c35bc602fa3cf5522d5f85ab078bbcb92 | 009f9761767f93a2986f8b5a2ba61bac6f33dc59 | /examples/intro/8/example.py | 0fc03bff85a41b5054ceffe6fd6a14aa7ee9e136 | [
"MIT"
] | permissive | crasiak/ginkgo | 8798d28d16732cc1c5b18f8e5df0d17f8866f999 | 2592de2c8acfe6e62f33e7ac1f79cc5613567908 | refs/heads/master | 2021-01-16T20:07:45.269511 | 2012-04-06T07:26:46 | 2012-04-06T07:26:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | import gevent
from gevent.pywsgi import WSGIServer
from gevent.server import StreamServer
from gevent.socket import create_connection
from ginkgo.core import Service
class TcpClient(Service):
def __init__(self, address, handler):
self.address = address
self.handler = handler
def do_start(self):
self.spawn(self.handler, self.address)
class MyApplication(Service):
def __init__(self, config):
self.add_service(WSGIServer(('127.0.0.1', config['http_port']), self.handle_http))
self.add_service(StreamServer(('127.0.0.1', config['tcp_port']), self.handle_tcp))
self.add_service(TcpClient(config['connect_address'], self.client_connect))
def client_connect(self, address):
sockfile = create_connection(address).makefile()
while True:
line = sockfile.readline() # returns None on EOF
if line is not None:
print "<<<", line,
else:
break
def handle_tcp(self, socket, address):
print 'new tcp connection!'
while True:
socket.send('hello\n')
gevent.sleep(1)
def handle_http(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
print 'new http request!'
return ["hello world"]
| [
"progrium@gmail.com"
] | progrium@gmail.com |
ac6a434988cb3b6fd6ab6b0f68895d30c101c406 | d9f14357a7f09dcb09c6a24a9e9cdb45254ecfe7 | /friskby/__init__.py | a0b3a233e4b5716532c4a2c2091d7a249c10c586 | [] | no_license | pgdr/python-friskby | 727213a8163b4405724d9b2c183c5ed31f76f950 | 33962db0ad8336baa71e24dbff7da748dd5d9f10 | refs/heads/master | 2021-01-23T03:43:35.588479 | 2017-04-21T22:15:31 | 2017-04-21T22:15:31 | 86,116,215 | 0 | 0 | null | 2017-03-24T22:40:47 | 2017-03-24T22:40:46 | null | UTF-8 | Python | false | false | 2,553 | py | """The friskby module.
The _friskby_ module is a Python module for reading from external sensors
(currently, only the `SDS011` sensor is supported), store measurements to a
temporary database and post the measurements to a webserver.
In the future we will add other sensors for temperature, relative humidity,
barometric pressure, noise level detection, luminosity, etc, and will post data
for such sensors to a webserver.
# Overview of functionality
The module consists of three primary features:
1. sampling
2. submitting
3. updating
# FriskbySampler
The FriskbySampler is a module that connects to the sensor and samples
information about the weather, air, climate or surrounding environment, and
stores the data to the *FriskbyDao*. The *FriskbyDao* is a data access object
that currently persists measured data to an SQLITE file using the `sqlite3`
Python module.
The underlying database _scheme_ is simple:
```sql
CREATE TABLE samples (
`id` INTEGER PRIMARY KEY,
`value` FLOAT NOT NULL,
`sensor` TEXT NOT NULL,
`timestamp` TEXT NOT NULL,
`uploaded` BOOL DEFAULT 0
);
```
The `sensor` describes contains the sensor ID and describes what type of
measurement has been done. The value is its value (in an assumed known (SI)
unit). The `uploaded` flag signifies whether the measurement has been uploaded,
which is dealt with by the
# FriskbySubmitter
The FriskbySubmitter reads non-uploaded measurements from the FriskbyDao and
submits these measurements to the prescribed webserver. The URL and API key for
the webserver is defined in a config file provided by the caller. The submitter
uses the `requests` module in Python (it is recommended that one has
`requests>=2.13.0`, which is the latest one).
# FriskbyManager
The job of the FriskbyManager is to read the aforementioned config file, contact
the webserver and ask for today's news. They are rare, but may contain
reconfiguration of API keys, URL's and occasionally requests that the client
updates itself.
"""
from __future__ import absolute_import
from serial import SerialException
from .device_config import DeviceConfig
from .friskby_dao import FriskbyDao
from .friskby_sampler import FriskbySampler
from .friskby_manager import FriskbyManager
from .friskby_submitter import FriskbySubmitter
from .os_release import sys_info
from .ts import TS
from .sensors import SDS011
VERSION = '0.72.0'
__all__ = ['FriskbyDao', 'FriskbySampler', 'FriskbySubmitter', 'FriskbyManager',
'TS', 'DeviceConfig', 'sys_info']
__version__ = VERSION
| [
"PGDR@statoil.com"
] | PGDR@statoil.com |
a817a1eca57d1473579117860e8367eba37af7f3 | a98ff9449ed38f7d5b265f14793a28b8139d3112 | /src/network.py | 2fdaa5568d6cb86355153be94fbbf4c570c5cc8b | [] | no_license | ducbm95/Neural-Network | 38f0a2babb6803e6eeb447c9239b7852c3d5234a | 337c9463cf17b26ee8c8ebaae1e9a636d1cd13f0 | refs/heads/master | 2020-06-11T09:38:13.763989 | 2016-12-22T13:13:58 | 2016-12-22T13:13:58 | 75,690,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,336 | py |
"""
network.py
~~~~~~~~~~
A module to implement the stochastic gradient descent learning
algorithm for a feedforward neural network. Gradients are calculated
using backpropagation. Note that I have focused on making the code
simple, easily readable, and easily modifiable. It is not optimized,
and omits many desirable features.
"""
#### Libraries
# Standard library
import random
# Third-party libraries
import numpy as np
class Network(object):
def __init__(self, sizes):
"""The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers."""
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a) + b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The ``training_data`` is a list of tuples
``(x, y)`` representing the training inputs and the desired
outputs. The other non-optional parameters are
self-explanatory. If ``test_data`` is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out. This is useful for
tracking progress, but slows things down substantially."""
if test_data: n_test = len(test_data)
n = len(training_data)
for j in xrange(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k + mini_batch_size]
for k in xrange(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print "Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test)
else:
print "Epoch {0} complete".format(j)
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w - (eta / len(mini_batch)) * nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b - (eta / len(mini_batch)) * nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
return (output_activations - y)
#### Miscellaneous functions
def sigmoid(z):
"""The sigmoid function."""
return 1.0 / (1.0 + np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z) * (1 - sigmoid(z))
| [
"ishy159@gmail.com"
] | ishy159@gmail.com |
c37ee59446bb0ce436a571312628fce8121b88a8 | 1905e5cece92e6cdc68dac3ebb0ee1d05bef35c8 | /fuzzinator/tracker/base.py | 84de8815bb65c1977f3e473d326d6eed14335b6c | [
"BSD-3-Clause"
] | permissive | darrynza/fuzzinator | e876131d18c5f0a17ae8bdc2fb10f18d8b0084fb | e1642f75ba8c1b555f7e2557b52f43df4d17b89f | refs/heads/master | 2020-04-29T04:39:36.453300 | 2018-12-06T17:15:35 | 2019-01-08T23:36:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
class BaseTracker(object):
@property
def logged_in(self):
return True
def find_issue(self, issue):
pass
def report_issue(self, **kwargs):
pass
def issue_url(self, issue):
return ''
| [
"reni@inf.u-szeged.hu"
] | reni@inf.u-szeged.hu |
f13e72e03cd34502c6484c674639f20496aad914 | c5702c50820680fee40bd156b0cb401733abada3 | /e4/average_ratings.py | 18ccaa523cdec3710cb9d023eac920463274c566 | [] | no_license | erwinbai/Data-Science-353 | d03f868de2b1ddc16b91dbf3c8a14a667f6fae58 | ac0e9be50ecc179a846517c1e9a7aebedbd59002 | refs/heads/master | 2021-03-19T23:27:09.883455 | 2020-03-13T20:26:18 | 2020-03-13T20:26:18 | 247,153,190 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | #!/usr/bin/env python
# coding: utf-8
# In[12]:
import numpy as np
import pandas as pd
import sys
import difflib
# In[13]:
def remove(element):
#rstrip() idea is from https://www.programiz.com/python-programming/methods/string/rstrip
return element.rstrip() #helps with removing '|n' behind moive title
vec_remove = np.vectorize(remove)
#input1 = 'movie_list.txt'
#input2 = 'movie_ratings.csv'
input1 = sys.argv[1]
input2 = sys.argv[2]
# In[14]:
data_list = open(input1).readlines()
data = pd.DataFrame(vec_remove(data_list), columns = ['title']) # applying removing for '\n'
#data
# In[15]:
def good_enough(rating):
#Reference from given site on instructions for
#https://docs.python.org/3/library/difflib.html #difflib.get_close_matches
tmp = difflib.get_close_matches(rating,data['title'])
# Create a list with close enough matches and return the first one tmp[0] as our title
if len(tmp)==0:
return None # cant match with anything
else:
return tmp[0]
close_match = np.vectorize(good_enough)
# In[16]:
data_rating = pd.read_csv(input2)
data_rating
# In[17]:
data_rating['title'] = close_match(data_rating['title'])
data_rating
# In[22]:
output = data_rating.groupby(['title']).mean()
# output1 = np.mean(output) # this did not work since numpy operation are not valid with grouby.
#system suggested using .groupby(...).mean() instead
#How to round using website belowin format of numpy.around(a,decimals=0)
#https://docs.scipy.org/doc/numpy/reference/generated/numpy.around.html
final_output = np.around(output, decimals=2)
final_output
# In[21]:
final_output.to_csv(sys.argv[3], header=False, index=True)
# In[ ]:
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
b24b1a2dfa931fcb6ad5d2d59be1cf6b6ea70c87 | e8bab29369a41ec180e4ea60821f6ac970e57ddb | /V.py | dfce98404d1a166a0514c5fe44ab73de09b233d6 | [] | no_license | ParthJindal32/Alphabets-Patterns | 9cec7444cd0be8543ef97430907370126c32e5b0 | ceba69d26df1a821e3205047102a80e46b2e4bf7 | refs/heads/master | 2022-11-19T03:40:23.150170 | 2019-08-10T10:48:38 | 2019-08-10T10:48:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | for row in range(7):
for col in range(5):
if ((col==0 or col==4) and row!=5 and row!=6) or (row==5 and col==1) or (row==5 and col==3) or (row==6 and col==2):
print("*",end=" ")
else:
print(end=" ")
print()
| [
"noreply@github.com"
] | noreply@github.com |
cefb14c0b85be76145a015d5a4ae758860595ddc | 21da8e3490769f0062fad806abc119ed0334b7cc | /CodeChef/MAY17/CHEFROUT.py | 90b2672f87242f8bdddc4bdedfbcdcb58dc1aa7f | [] | no_license | nikhilhassija/CompetitiveProgramming | d2e0606c1badd3209452d670a99234a3ef0d5d6c | 794deb2ef26d03471e78c6fb77f3a72d810d19f1 | refs/heads/master | 2020-04-05T23:06:20.363385 | 2017-12-22T21:19:16 | 2017-12-22T21:19:16 | 42,791,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | t = int(input())
for _ in range(t):
A = input()
if(A == ''.join(sorted(A))):
print("yes")
else:
print("no") | [
"nikhil.hassija@gmail.com"
] | nikhil.hassija@gmail.com |
df8f34cabb73d62adf171eeae4b7788cbdfdf467 | 6db8aba817161dc573f16cde185f4a1c02c753e0 | /XOR.py | 0c07e2db55b43284b1baa73054a707acc0f131b8 | [] | no_license | Prakashchater/Leetcode-array-easy-questions | 456153a13397c895acae6550dad8f1b1851ff854 | 7c5d40f9d68dbf61f4a61a33d9b54f769473b057 | refs/heads/main | 2023-06-19T14:01:52.483440 | 2021-07-22T19:44:40 | 2021-07-22T19:44:40 | 354,926,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | xor = 0
out = []
arr = [10,11,1,2,3]
n = arr[len(arr)-1]
for i in range(len(arr)-1):
out.append(arr[i]^arr[i+1])
out.append(arr[len(arr)-1])
print(out) | [
"prakashchater@gmail.com"
] | prakashchater@gmail.com |
ac7f9a29b1083ff198275d312b01fecad5ed4fc3 | 039446516b188899e2fd21a41087ad20f06d666b | /src/server_code/game_logic.py | 9934157d88fc8ff515d2b4ff8f39a8e5a2c028ab | [] | no_license | Tyorat/TicTacToe | 62ebbeee168568a0c590a5923127a3ac529ba134 | 0281f3c7b293256e2c73ac1530786308cea385af | refs/heads/main | 2023-06-24T22:37:18.744669 | 2021-07-12T23:11:30 | 2021-07-12T23:11:30 | 377,521,858 | 0 | 0 | null | 2021-07-12T23:11:31 | 2021-06-16T14:20:21 | Python | UTF-8 | Python | false | false | 2,001 | py | import secrets
WIN_COMBO = ((1, 2, 3),
(4, 5, 6),
(7, 8, 9),
(1, 4, 7),
(2, 5, 8),
(3, 6, 9),
(1, 5, 9),
(7, 5, 3)
)
SWITCH_TURN = {"x": "o", "o": "x"}
class WrongMove(Exception): pass
class Game:
def __init__(self, player_one, player_two):
self.__field = list(range(1, 10))
self.switch_turn = {player_one: player_two, player_two: player_one}
self.__turn = None
self.choose_random_player()
turn = property()
@turn.getter
def turn(self):
return self.__turn
def check_end_game(self):
self.show_field()
for combo in WIN_COMBO:
if self.__field[combo[0] - 1] == self.__field[combo[1] - 1] == self.__field[combo[2] - 1]:
return{"endgame": True, "message": f"win {self.__field[combo[0] - 1]}"}
if not any(list(map(lambda x: str(x).isdigit(), self.__field))):
return {"endgame": True, "message": "draw"}
else:
return {"endgame": False, "message": "wait for opponent"}
def check_turn(self, index, who):
if self.__field[index - 1] != index:
raise WrongMove("The cell is already occupied")
elif who not in self.switch_turn.keys():
raise WrongMove("Wrong player")
elif who != self.__turn:
raise WrongMove("Not your turn")
self.__field[index - 1] = who
res = self.check_end_game()
self.__turn = self.switch_turn[self.__turn]
return res
def choose_random_player(self):
print(self.switch_turn.keys())
self.__turn = secrets.choice(list(self.switch_turn.keys()))
def show_field(self):
print("************")
print("|" + "|".join(map(str, self.__field[:3])) + "|")
print("|" + "|".join(map(str, self.__field[3:6])) + "|")
print("|" + "|".join(map(str, self.__field[6:])) + "|")
print("************")
| [
"you@example.com"
] | you@example.com |
2adc3290b0636bfc307eb640585ba7eb808a7f36 | 05d076fb95e048082732e0b16b2047ae1b394e6a | /church/migrations/0006_comment_responses.py | 4b718c4daa122321e80255309c1bf47a7d200dff | [] | no_license | wanderemah/lizzy | 86f46080847042dcdb0069eff195c63f85a30bd6 | f40f3213e902aea2dd3376e8e1a0cb64ed16662d | refs/heads/master | 2020-04-09T05:25:02.856731 | 2018-12-02T16:51:57 | 2018-12-02T16:51:57 | 160,063,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | # Generated by Django 2.1 on 2018-08-11 04:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('church', '0005_video'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('query', models.TextField(max_length=255, primary_key=True, serialize=False)),
('person', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='church.signUp')),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='church.Event')),
],
),
migrations.CreateModel(
name='Responses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.TextField(blank=True)),
('person', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='church.signUp')),
('query', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='church.Comment')),
],
),
]
| [
"wanderemah@gmail.com"
] | wanderemah@gmail.com |
4f91db0c179ce5761c01cb598130a254ba26e16f | 1e8d9a251b36f2e80a851d541321522ce4e812fa | /igmspec/scripts/plot_igmspec.py | c3e68e4c64d8f0b820e6ba8daf9c1882d20f2c30 | [] | no_license | Waelthus/igmspec | c81f31360e9528bd150a991ad96b8b4ca94962d0 | 8fdbb622360ca5263711f75d0f7571ed955f6e28 | refs/heads/master | 2020-12-25T21:55:50.001007 | 2016-07-17T21:17:08 | 2016-07-17T21:17:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | #!/usr/bin/env python
""" Loads and plots a requested spectrum
"""
import pdb
def parser(options=None):
import argparse
parser = argparse.ArgumentParser(description='plot_igmspec script v0.2')
parser.add_argument("coord", type=str, help="Coordinates, e.g. J081240+320808")
parser.add_argument("--tol", default=5., type=float, help="Maximum offset in arcsec [default=5.]")
parser.add_argument("--meta", default=True, help="Show meta data? [default: True]", action="store_true")
parser.add_argument("-s", "--survey", help="Name of Survey to use")
parser.add_argument("--select", default=0, type=int, help="Index of spectrum to plot (when multiple exist)")
parser.add_argument("--mplot", default=False, help="Use simple matplotlib plot [default: False]")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args, unit_test=False, **kwargs):
""" Run
"""
from astropy import units as u
from igmspec.igmspec import IgmSpec
from igmspec import cat_utils as icu
# init
igmsp = IgmSpec(**kwargs)
# Grab
all_spec, all_meta = igmsp.spec_from_coord(args.coord, tol=args.tol*u.arcsec, isurvey=args.survey)
# Outcome
if len(all_meta) == 0:
print("No source found, try another location or a larger tolerance.")
return
elif len(all_meta) == 1: # One survey hit
spec = all_spec[0]
meta = all_spec[0]
else: # More than 1 survey
idx = 0
spec = all_spec[idx]
meta = all_meta[idx]
surveys = [meta.meta['survey'] for meta in all_meta]
print("Source located in more than one survey")
print("Using survey {:s}. You can choose from this list {}".format(surveys[idx], surveys))
#print("Choose another survey from this list (as you wish): {}".format(surveys))
if args.meta:
igmsp.idb.show_meta()
# Load spectra
spec.select = args.select
if unit_test:
return
# Show [may transition to xspec]
if args.mplot:
spec.plot()
else:
spec.plot(xspec=True)
| [
"xavier@ucolick.org"
] | xavier@ucolick.org |
e8bb5f9bf30cda2cbd8824240840e0681ff4e02d | db68b542951ec427378f86d9616d68d7f51a1447 | /CJComparison/tests.py | d9e50ec81b870877be3f9ea091fb6c1fdaebcfb8 | [] | no_license | alittleTom/treeshow | 8a4debaf27d8d4528ecebaa38096e621a78a27ec | 3941e5b9080d29da2f45aad1cef01ac174b20e55 | refs/heads/master | 2020-06-14T17:45:54.658954 | 2016-12-02T01:42:38 | 2016-12-02T01:42:38 | 75,349,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.test import TestCase
from CJComparison import views
# Create your tests here.
class T(TestCase):
def testname(self):
print("test")
views.executeSql() | [
"2659221136@qq.com"
] | 2659221136@qq.com |
c8df91551a44a334be5a4cd94d26220e4cc54a07 | 84e661d5d293ec0c544fedab7727767f01e7ddcf | /target/migrations/0011_auto_20201101_1147.py | 9ca50acaa24915c420cbf92d31354e33dd6cdc7f | [
"BSD-3-Clause"
] | permissive | groundupnews/gu | ea6734fcb9509efc407061e35724dfe8ba056044 | 4c036e79fd735dcb1e5a4f15322cdf87dc015a42 | refs/heads/master | 2023-08-31T13:13:47.178119 | 2023-08-18T11:42:58 | 2023-08-18T11:42:58 | 48,944,009 | 21 | 23 | BSD-3-Clause | 2023-09-14T13:06:42 | 2016-01-03T11:56:48 | JavaScript | UTF-8 | Python | false | false | 882 | py | # Generated by Django 3.0.10 on 2020-11-01 09:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('target', '0010_auto_20201101_1127'),
]
operations = [
migrations.AddField(
model_name='target',
name='tweet_notified_published',
field=models.BooleanField(default=False, editable=False),
),
migrations.AddField(
model_name='target',
name='tweet_notified_solution',
field=models.BooleanField(default=False, editable=False),
),
migrations.AlterField(
model_name='target',
name='publish_solution_after',
field=models.SmallIntegerField(default=24, help_text='Make solution available after this many hours', null=True, verbose_name='solution time'),
),
]
| [
"nathangeffen@gmail.com"
] | nathangeffen@gmail.com |
141d834f0dac790c181757c76b4db31a2ce4202e | f063768db23504e214eb0fdc73156f24d2b7abc4 | /sorting/count-sort/count_sort.py | 9c16de551f15378a850be48f5fc66b1472679781 | [] | no_license | raghavsaboo/algorithms | 616b8074ffe5ad00a3e2f06e9e666e517bf16184 | 5e9a5010c52d8c0c94ac9b6e06e9ab8a41a8e84a | refs/heads/master | 2023-02-21T15:48:11.619848 | 2021-01-20T10:49:36 | 2021-01-20T10:49:36 | 274,221,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | def count_sort(arr):
k = max(arr)
output = [0 for _ in range(0,len(arr))]
count_array = [0 for _ in range(0,k+1)]
for i in range(0, len(arr)):
count_array[arr[i]] += 1
for i in range(1, k + 1):
count_array[i] += count_array[i-1]
for i in reversed(range(0, len(arr))):
output[count_array[arr[i]] - 1] = arr[i]
count_array[arr[i]] -= 1
return output
if __name__ == "__main__":
arr = [4, 2, 2, 5, 3, 3, 1]
output = count_sort(arr)
print(output)
| [
"saboo.raghav@gmail.com"
] | saboo.raghav@gmail.com |
be1ff21d8d3789702fd02aa6333e49ef6cfe5047 | cddfa750235344aa5e04244ce5c36871d3c6465b | /mayan/apps/document_states/links.py | f2d41f7e5898b86968e418d4195fb20c1560a36b | [
"Apache-2.0"
] | permissive | Lomascolo/mayan-edms | 76e0fdcad98605838df6737d109c95d67d9ebba5 | f7f0d27a059b1e010b9bbcdf371b9867f6fcfa45 | refs/heads/master | 2021-01-24T08:30:07.480929 | 2017-05-30T06:01:31 | 2017-05-30T06:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,176 | py | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
from .permissions import (
permission_workflow_create, permission_workflow_delete,
permission_workflow_edit, permission_workflow_tools,
permission_workflow_view,
)
link_document_workflow_instance_list = Link(
icon='fa fa-sitemap', permissions=(permission_workflow_view,),
text=_('Workflows'),
view='document_states:document_workflow_instance_list',
args='resolved_object.pk'
)
link_setup_workflow_create = Link(
permissions=(permission_workflow_create,), text=_('Create workflow'),
view='document_states:setup_workflow_create'
)
link_setup_workflow_delete = Link(
permissions=(permission_workflow_delete,), tags='dangerous',
text=_('Delete'), view='document_states:setup_workflow_delete',
args='object.pk'
)
link_setup_workflow_document_types = Link(
permissions=(permission_workflow_edit,), text=_('Document types'),
view='document_states:setup_workflow_document_types', args='object.pk'
)
link_setup_workflow_edit = Link(
permissions=(permission_workflow_edit,), text=_('Edit'),
view='document_states:setup_workflow_edit', args='object.pk'
)
link_setup_workflow_list = Link(
permissions=(permission_workflow_view,), icon='fa fa-sitemap',
text=_('Workflows'), view='document_states:setup_workflow_list'
)
link_setup_workflow_state_create = Link(
permissions=(permission_workflow_edit,), text=_('Create state'),
view='document_states:setup_workflow_state_create', args='object.pk'
)
link_setup_workflow_state_delete = Link(
permissions=(permission_workflow_edit,), tags='dangerous',
text=_('Delete'), view='document_states:setup_workflow_state_delete',
args='object.pk'
)
link_setup_workflow_state_edit = Link(
permissions=(permission_workflow_edit,), text=_('Edit'),
view='document_states:setup_workflow_state_edit', args='object.pk'
)
link_setup_workflow_states = Link(
permissions=(permission_workflow_view,), text=_('States'),
view='document_states:setup_workflow_states', args='object.pk'
)
link_setup_workflow_transition_create = Link(
permissions=(permission_workflow_edit,), text=_('Create transition'),
view='document_states:setup_workflow_transition_create', args='object.pk'
)
link_setup_workflow_transition_delete = Link(
permissions=(permission_workflow_edit,), tags='dangerous',
text=_('Delete'), view='document_states:setup_workflow_transition_delete',
args='object.pk'
)
link_setup_workflow_transition_edit = Link(
permissions=(permission_workflow_edit,), text=_('Edit'),
view='document_states:setup_workflow_transition_edit', args='object.pk'
)
link_setup_workflow_transitions = Link(
permissions=(permission_workflow_view,), text=_('Transitions'),
view='document_states:setup_workflow_transitions', args='object.pk'
)
link_tool_launch_all_workflows = Link(
icon='fa fa-sitemap',
permissions=(permission_workflow_tools,),
text=_('Launch all workflows'),
view='document_states:tool_launch_all_workflows'
)
link_workflow_instance_detail = Link(
permissions=(permission_workflow_view,), text=_('Detail'),
view='document_states:workflow_instance_detail', args='resolved_object.pk'
)
link_workflow_instance_transition = Link(
text=_('Transition'),
view='document_states:workflow_instance_transition',
args='resolved_object.pk'
)
link_workflow_document_list = Link(
permissions=(permission_workflow_view,), text=_('Workflow documents'),
view='document_states:workflow_document_list', args='resolved_object.pk'
)
link_workflow_list = Link(
permissions=(permission_workflow_view,), icon='fa fa-sitemap',
text=_('Workflows'), view='document_states:workflow_list'
)
link_workflow_state_document_list = Link(
permissions=(permission_workflow_view,),
text=_('State documents'), view='document_states:workflow_state_document_list',
args='resolved_object.pk'
)
link_workflow_state_list = Link(
permissions=(permission_workflow_view,),
text=_('States'), view='document_states:workflow_state_list',
args='resolved_object.pk'
)
| [
"roberto.rosario.gonzalez@gmail.com"
] | roberto.rosario.gonzalez@gmail.com |
76410cb5b1f1e1be3f5cc6c8e1e19127295abdaa | e5f9471907c45c17ae8dd785daed328984683a63 | /SecureVision_LPR/lpr_backend/lpr_backend/asgi.py | 1366431453f851eff42cd4357ae8feb498e0f51d | [] | no_license | aleclawlor/SecureVision | c74d4850dc6655bde193529f035ec8282fb74bac | 8b9cda9152f540d037380b47d0dfe9da737e77c0 | refs/heads/master | 2022-12-01T13:59:11.987216 | 2020-08-23T23:53:14 | 2020-08-23T23:53:14 | 288,057,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for lpr_backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lpr_backend.settings')
application = get_asgi_application()
| [
"lawlorab@bc.edu"
] | lawlorab@bc.edu |
007ee5b8228f8de322122564f9f44722684aa6cf | 87a9706379670da62739b3c1fbbdd75edb5107b8 | /Django/django_celery_test/django_celery_test/celeryconfig.py | cda60c880a754a04fa66f089f2be5f3d6b7e1eed | [] | no_license | zxbzxb180/python_work | ba21ab74f842e0d560a8bb192bb8a874d356b9e1 | 6406024e011aa06d1bda78d97cfecc47f7f2058c | refs/heads/master | 2022-12-12T23:53:36.887963 | 2020-03-04T07:20:29 | 2020-03-04T07:20:29 | 194,494,744 | 0 | 0 | null | 2022-11-22T03:54:47 | 2019-06-30T08:48:44 | Python | UTF-8 | Python | false | false | 854 | py | import djcelery
djcelery.setup_loader()
BROKER_BACKEND = 'redis'
BROKER_URL = 'redis://:6222580@localhost:6379/1'
CELERY_RESULT_BACKEND = 'redis://:6222580@localhost:6379/2'
CELERY_QUEUES = {
'beat_tasks': {
'exchange': 'beat_tasks',
'exchange_type': 'direct',
'binding_key': 'beat_tasks'
},
'work_queue': {
'exchange': 'work_queue',
'exchange_type': 'direct',
'binding_key': 'work_queue'
}
}
CELERY_DEFAULT_QUEUE = 'work_queue'
CELERY_IMPORTS = (
'course.tasks',
)
#有些情况防止死锁
CELERYD_FORCE_EXECV = True
#设置并发的worker数量
CELERYD_CONCURRENCY = 4
#允许重试
CELERY_ACKS_LATE = True
#每个worker最多执行100个任务,可以防止内存泄漏
CELERYD_MAX_TASKS_PER_CHILD = 100
#单个任务最大执行时间
CELERYD_TASK_TIME_LIMIT = 12 * 30
| [
"616529325@qq.com"
] | 616529325@qq.com |
404653f6a2fb686bb9b4064178804bae80845a0b | d201c70e0b094ee9e1c035b089f6d2f4cc003f4b | /algorithm_simulation/01-plot-length-optimal-bit.py | 966f027d3980f2b5e26741a3a94f7b935ea12e79 | [] | no_license | dangsj123456/simulations | f40cacd355ad13668b276861fbd7927be8d015de | f071fe399a269c17aee28c1d68e736466863f374 | refs/heads/master | 2022-04-03T11:48:41.220431 | 2020-02-12T14:41:44 | 2020-02-12T14:41:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,292 | py | # Copyright (c) 2015, Malte Schwarzkopf
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of qjump-nsdi15-plotting nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
### Marco, Roshan ###
'''
Adapted from the following original code:
https://github.com/camsas/qjump-nsdi15-plotting/blob/master/figure1b_3b/plot_memcached_latency_cdfs.py
'''
import os, sys, re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import pylab
from utils import *
from matplotlib.ticker import FuncFormatter
#x-axis: number-of sequences (fix size_of_sequences to 16)
#y-axis: average(scs-length) over the 6 seeds
paper_mode = True
subset_mode = False
outname = "01-optimal-seq-7-bitcost"
fnames = []
for i in range(0, len(sys.argv) - 1, 1):
#mode = sys.argv[2 + i]
fnames.append(sys.argv[1 + i])
if paper_mode:
fig = plt.figure(figsize=(3.33,1.2))
set_paper_rcs()
else:
fig = plt.figure()
set_rcs()
colours = ['b', 'g', 'r', 'c', 'm', 'y', 'v']
dpscs = []
greedy = []
hierarchical = []
fastgreedy = []
dp = []
gr = []
hier = []
fast = []
dpscsmap = {}
greedymap = {}
hierarchicalmap = {}
fastgreedymap = {}
fname = "results_aggregation-renamed-final-4.txt"
filenames = []
filenames.append(fname)
dict_list = {"dpscs": dpscsmap, "greedy": greedymap, "hierarchical": hierarchicalmap, "fastgreedy": fastgreedymap}
for f in filenames:
print "Analyzing file %s: " % (f)
for line in open(f).readlines()[1:]:
fields = [x.strip() for x in line.split()]
algo = fields[0] # algo
generator = fields[1] # random generator
seed = fields[2] # random seed
nof_sq = int(fields[3]) # no of sequences
seq_size = int(fields[4]) # seq size
cost = fields[5] # bitcost/entrycost
scs_length = int(fields[6]) # scs length
timems = float(fields[7]) # time in millisecond
bitcost = (scs_length + seq_size) * scs_length
if algo == "dpscs" and seq_size == 7 and generator == "random":
dpscsmap.setdefault(nof_sq, [])
dpscsmap[nof_sq].append(bitcost)
elif algo == "hierarchical" and seq_size == 7 and generator == "random":
hierarchicalmap.setdefault(nof_sq, [])
hierarchicalmap[nof_sq].append(bitcost)
elif algo == "greedy" and seq_size == 7 and generator == "random":
greedymap.setdefault(nof_sq, [])
greedymap[nof_sq].append(bitcost)
elif algo == "fastgreedy" and seq_size == 7 and generator == "random":
fastgreedymap.setdefault(nof_sq, [])
fastgreedymap[nof_sq].append(bitcost)
for item in dict_list:
dict_ = dict_list[item]
for key in sorted(dict_.keys()):
if len(dict_[key]) < 3:
continue
row = np.average(dict_[key])
if item == "dpscs":
dpscs.append(row)
dp.append(key)
elif (item == "greedy"):
greedy.append(row)
gr.append(key)
elif (item == "hierarchical"):
hierarchical.append(row)
hier.append(key)
elif (item == "fastgreedy"):
fastgreedy.append(row)
fast.append(key)
plt.xlabel("number of sequences", fontsize=10)
plt.ylabel("Memory cost [bit]")
#plt.xscale('log')
#plt.title('sizeofsequence=16')
#plt.ylim(0,2000)
plt.xlim(2, 8)
miny=0
plt.ylim(miny, 1100)
plt.yticks(range(miny, 1101, 200), [str(x) for x in range(miny, 1101, 200)])
plt.plot(dp, dpscs, label="dpscs",color='magenta', lw=1.0, linestyle='-',marker= 'x', mfc='none', mec='magenta', ms=3)
plt.plot(gr, greedy, label="greedy",color='red', lw=1.0, linestyle='-',marker= '>', mfc='none', mec='red', ms=3)
plt.plot(hier, hierarchical, label="hierarchical",color='blue', lw=1.0, linestyle='-',marker= 's', mfc='none', mec='blue', ms=3)
plt.plot(fast, fastgreedy, label="fastgreedy",color='green', lw=1.0, linestyle='-',marker= 'v', mfc='none', mec='green', ms=3)
plt.legend(loc='upper left', frameon=False, ncol=2)
plt.savefig("%s.pdf" % outname, format="pdf", bbox_inches='tight', pad_inches=0.05)
| [
"haorany@kth.se"
] | haorany@kth.se |
735034f13fef965422c11192f4e7e6f58e6ca739 | d11d1dcf1e70e816edecac6745f5563e2cea8cfd | /mysite/venv/Scripts/pip-script.py | 0cf0153d6237ab15a1a2c03dc8c1bac6bd2b90a2 | [] | no_license | Saipraneeth1001/mydjango | 8372fdc18748d6191a3d661d0bb77ee049e4e7c6 | 5fd7ff3bcc2229f816befc9cf29dcfa320098ff8 | refs/heads/master | 2020-03-20T08:32:17.231440 | 2018-06-21T07:12:39 | 2018-06-21T07:12:39 | 137,311,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | #!C:\Users\saipr\Desktop\mysite\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"saipraneeth0448@gmail.com"
] | saipraneeth0448@gmail.com |
c67e108295a0391eecf4709031367b101c0ffd35 | 416b166fefb63ca50f1895cfb0abac3b773727f9 | /scripts/discharge | a8fc249fbbfaa615d8c64e7b5c9a5a8fd78a43b5 | [
"BSD-2-Clause"
] | permissive | RichardOfWard/discharge | 8f88ea847e437d8b5f244c4e1b00018988e248e4 | 5e3e541a6afcee51e32e3bc39176ca08d6d3b25c | refs/heads/master | 2021-01-01T05:47:54.145334 | 2013-11-26T00:04:13 | 2013-11-26T00:04:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | #!/usr/bin/env python
from discharge.cmd import do_cmd
if __name__ == '__main__':
do_cmd()
| [
"richard@richard.ward.name"
] | richard@richard.ward.name | |
96b3c170c94a416fb457bfeb482e5368eab6df09 | 9f7c59e822c3b54dfd413b1f30a0a36693fdd1d7 | /BlogStan/post/models.py | f2702b6a7bfafb31c13446da90a77283cdea5188 | [] | no_license | kunalp-gohire/django-social-media-blog-app | 8f3d4733258a872d46398dfed581e470f9d9fe90 | 1fa8c3b97db1584b483d7066939cca73057bfeaf | refs/heads/master | 2022-11-22T23:57:27.561992 | 2020-07-23T05:32:43 | 2020-07-23T05:32:43 | 281,737,540 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,804 | py | from django.db import models
from django.conf import settings
from django.urls import reverse
import misaka
from django.contrib.auth import get_user_model
User = get_user_model()
# Create your models here.
class Post(models.Model):
user = models.ForeignKey(
User,
related_name='posts',
on_delete=models.CASCADE,
)
created_at = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=256)
message = models.TextField()
message_html = models.TextField(editable=False)
def __str__(self):
return self.message
def save(self, *args, **kwargs):
self.message_html = misaka.html(self.message)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('posts:single',
kwargs={
'username': self.user.username,
'pk': self.pk
})
class Meta:
ordering = ['-created_at']
unique_together = ['user', 'message']
class Comment(models.Model):
user = models.ForeignKey(
User,
related_name='comments',
on_delete=models.CASCADE,
null=True,
)
post = models.ForeignKey(
Post,
related_name='comments',
on_delete=models.CASCADE,
)
text = models.TextField()
# text_html = models.TextField(editable=False,null=True)
created_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.text
def get_absolute_url(self):
return reverse('posts:single',
kwargs={
'username': self.user.username,
'pk': self.pk
})
class Meta:
ordering = ['-created_at']
| [
"kunalp.gohire@gmail.com"
] | kunalp.gohire@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.