Skip to content
Snippets Groups Projects
Commit 7bab5dd0 authored by BALDIT Adrien's avatar BALDIT Adrien
Browse files

ab: add analysis and inout

parent 148ca84c
No related branches found
No related tags found
No related merge requests found
Showing
with 6166 additions and 0 deletions
analysis only
\ No newline at end of file
# -*- coding: utf-8 -*-
__all__=['image_analysis',\
'mechanical',\
'vtk',\
]
This diff is collapsed.
# -*- coding: utf-8 -*-
__all__=[]
analysis/image_analysis/illustrations_image_analysis/instruction_point_tracking.png

157 KiB

analysis/image_analysis/illustrations_image_analysis/sample_1_face1.jpg

32.1 KiB

analysis/image_analysis/illustrations_image_analysis/sample_1_face2.jpg

32.5 KiB

analysis/image_analysis/illustrations_image_analysis/sample_1_face3.jpg

32.8 KiB

This diff is collapsed.
# -*- coding: utf-8 -*-
__all__=[]
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/python
# -*- coding:utf8 -*-
import pylab as pl
import numpy as np
from matplotlib.patches import Ellipse
class Utilities :
"""Tools Class"""
def get_cov_ellipse(self,cov, centre, nstd, **kwargs):
"""
Return a matplotlib Ellipse patch representing the covariance matrix
cov centred at centre and scaled by the factor nstd.
"""
# Find and sort eigenvalues and eigenvectors into descending order
eigvals, eigvecs = pl.linalg.eigh(cov)
order = eigvals.argsort()[::-1]
eigvals, eigvecs = eigvals[order], eigvecs[:, order]
# The anti-clockwise angle to rotate our ellipse by
vx, vy = eigvecs[:,0][0], eigvecs[:,0][1]
theta = pl.arctan2(vy, vx)
# Width and height of ellipse to draw
width, height = 2 * nstd * pl.sqrt(eigvals)
return Ellipse(xy=centre, width=width, height=height,
angle=pl.degrees(theta), **kwargs)
def exponential_1(self,max_y,D_y,tau,x_list,exp_type="increasing"):
"""
Return a one characteristic tau exponential function of which :
Args:
max_y (float): is the asymptotic value is max_y
D_y (float): is the delta or amplitude of the exponential defined as the absolute value of the first term minus the last one through increasing x_list items
tau (float): is the characteristic time or length or any element related to x_list
x_list (list): is the variable list
exp_type (str): is the type of exponential, i.e. increasing or decreasing
Return:
y_list (list): is the list of exponential values.
"""
if exp_type=="increasing":
y_arr = max_y-D_y*(pl.exp(-pl.array(x_list)/tau))
if exp_type=="decreasing":
y_arr = max_y-D_y*(1.-pl.exp(-pl.array(x_list)/tau))
y_list = list(y_arr)
return y_list
def exponential_n(self,max_y,list_D_y,list_tau,x_list,exp_type="increasing"):
"""
Return a n characteristic tau (len of list_tau) exponential function of which :
Args:
max_y (float): is the asymptotic value is max_y
list_D_y (float): is the list of delta or amplitude of the exponential defined as the absolute value of the first term minus the last one through increasing x_list items
list_tau (float): is the list of characteristic time or length or any element related to x_list
x_list (list): is the variable list
exp_type (str): is the type of exponential, i.e. increasing or decreasing
Return:
y_list (list): is the list of exponential values.
"""
# check data consistency
if len(list_D_y)!=len(list_tau):
print("number of amplitudes and characteristic elements have to be the same")
exit()
# exponential ignition
y_arr = max_y*pl.ones(len(x_list))
# loop over the elements
for inc,tau in enumerate(list_tau) :
if exp_type=="increasing":
y_arr -= list_D_y[inc]*(pl.exp(-pl.array(x_list)/tau))
if exp_type=="decreasing":
y_arr -= list_D_y[inc]*(1.-pl.exp(-pl.array(x_list)/tau))
y_list = list(y_arr)
return y_list
def interp_data(self, x = [], y = [], ech = [], type_interp = "linear") :
"""Interpolation of data"""
# Interpolation de la fonction
f = inte.interp1d(x, y, kind = type_interp)
# Echantillonage de la fonction
nbr_pts_ech = len(ech)
print("Nombre de points d'echantillonage : " + str(nbr_pts_ech) + '\n')
ech = pl.array(ech)
y_ech = f(ech)
return y_ech
def linear_fit(self,x_data,y_data,fitting_order=1) :
"""compute linear curve to fit an almost linear curve :\n
\t x_data : array of x_data\n
\t y_data : array of y_data\n
"""
# first order polynomial fitting
param = np.polyfit(x_data,y_data,fitting_order)
return param
def search_value_in_list(self,liste = [], valeur = 0., tolerance = 0.01) :
"""Recherche de la -valeur- dans la -liste- a une -tolerance- prés"""
i = 0
#~ print "tata"
while i <= len(liste) :
#~ print liste[i]
#~ print valeur
if abs(liste[i] - valeur) <= tolerance :
return i
else :
i += 1
def clean_list_static_value(self,master_list,slave_list) :
""" It cleans the master and slave lists of equal initial values in master list """
# offset define as not reached
offset = 0
# While offset not reached
while offset == 0 :
# two consecutive terms of the master list are equal
if master_list[1] == master_list[0] :
# print "disp_values1 : ",disp_values1[1]," and ", disp_values1[0]
# delete slave and master lists consecutive term
slave_list=pl.delete(slave_list, [0])
master_list=pl.delete(master_list, [0])
# else offset reached
else :
offset = 1
# return master and slave list corrected
return master_list,slave_list
def clean_list_to_threshold(self,master_list,slave_lists,threshold) :
"""This function take off initial values from slave listS based on the threshold define on the master list
Args:
master_list (list): list containing the data in which the threshold has to be found
slave_lists (list): list of slave lists
threshold (float): threshold value
Return:
master_list, slave_lists
"""
# offset variable defined as not reached
offset = 0
# While offset not reached
while offset == 0 :
# if the threshold is not reached in master list
if master_list[0] < threshold :
# make a loop on all slave list
for i in range(len(slave_lists)) :
# delete the value under threshold in slave list
slave_lists[i]=pl.delete(slave_lists[i], [0])
# delete the value under threshold in master list
master_list=pl.delete(master_list, [0])
# Else offset is reached
else :
# exit while loop
offset = 1
# return master and slave lists corrected
return master_list,slave_lists
def Get_Integrate(self,x_data, y_data) :
"""REPLACED BY get_trapezoidal_integration \n
Integrate regular curves where y_data= fct(x_data)"""
# initial value
integration = 0.0
# print '------------------------------------------------------------'
# print 'Integration - Boundaries - x min = ',x_data[0],' x max = ',x_data[-1]
# print 'Integration - Boundaries - y min = ',y_data[0],' y max = ',y_data[-1]
# trapezoidal rule
for i in range(len(x_data)-1):
integration += (x_data[i+1]-x_data[i])*(y_data[i+1]+y_data[i])*0.5
return integration
def get_trapezoidal_integrate(self,x_data, y_data) :
"""Trapezoidal integrate of regular curves where y_data= fct(x_data):
Args:
x_data (list): x values
y_data (list): y values related to x ones.
Comments:
- pay attention to data order: increasing will lead to a positive value and decreasing a negative one.
Return:
integration (scalar): trapezoidal integration value"""
# initial value
integration = 0.0
# trapezoidal rule
for i in range(len(x_data)-1):
integration += (x_data[i+1]-x_data[i])*(y_data[i+1]+y_data[i])*0.5
return integration
def savitzky_golay_smoothing(self, y, window_size, order, deriv=0):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techhniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
try:
#~ print window_size
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except(ValueError):
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv]
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m, y, mode='valid')
def scalar_error(self, data1 = [], data2 = [], visu = "on") :
"""Determine les erreurs"""
Erreur = (np.array(data2) - np.array(data1))/np.max(np.array(data1))
Sum_Err2 = np.sum(Erreur**2)
out = "-----------------------------------------------------\n"
out += "\tError = " + str(Sum_Err2)+ "\n"
out += "-----------------------------------------------------\n"
if visu == "on" :
print(out)
return Sum_Err2
def scalar_error2(self, data1 = [], data2 = [], visu = "on") :
"""Determine les erreurs sum of squares"""
Erreur = (pl.array(data2) - pl.array(data1))
Sum_Err2 = np.sum(Erreur**2)
out = "-----------------------------------------------------\n"
out += "\tError = " + str(Sum_Err2)+ "\n"
out += "-----------------------------------------------------\n"
if visu == "on" :
print(out)
return Sum_Err2
def mean_squared_error(self, data1, data2, visu = "on"):
"""Calculate the mean squared error (MSE) of two data sets.
Args:
data1 (list): List with first data set
data2 (list): List with second data set
Return:
mean squared error between data1 and data2
"""
difference = (pl.array(data2) - pl.array(data1))
mse = pl.sum(difference**2)/len(data1)
out = "-----------------------------------------------------\n"
out += "\tError = " + str(mse)+ "\n"
out += "-----------------------------------------------------\n"
if visu == "on" :
print(out)
return mse
input output scripts only
\ No newline at end of file
# -*- coding: utf-8 -*-
__all__=[]
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment