Schichtenverbund Auswertung hinzugefügt

This commit is contained in:
Markus Clauß 2022-08-22 16:18:58 +02:00
parent 26744a8a9e
commit 1fbd58b68d
9 changed files with 575 additions and 38 deletions

View File

@ -0,0 +1,17 @@
# main __init__.py
from .analysis import *
from .io import *
from .versuche import *
__all__ = [
# IO
"read_geosys",
# Versuche
"TestSchichtenverbundV2GeoSys",
"TestSchichtenverbundV2GeoSysExtractedEMPA",
# Analyse
"fit_cos_eval",
"fit_cos_simple",
"fit_cos",
]

View File

@ -0,0 +1,9 @@
from .regression import *
__all__ = [
# regession models
"fit_cos_simple",
"fit_cos",
#helper functions
"fit_cos_eval",
]

View File

@ -0,0 +1,161 @@
import lmfit as lm
import numpy as np
import scipy.special as sf
from scipy.optimize import curve_fit
def cosfunc(t, A, w, p, c, e):
"""
definition of the sin function
"""
return A * np.cos(2 * np.pi * w * t + p) + e * t + c
def fit_cos_eval(x, par):
"""
par: dict
fitting results
"""
ys = cosfunc(x, par['amp'], par['freq'], par['phase'], par['offset'],
par['slope'])
return ys
def regression_sine_fft():
"""
fast fourier transformation for sine data
"""
return []
def fit_cos_simple(x, y, freq=10.0):
"""
simple sine regression
x: vector or list
y: vector or list
freq: float
initial frequency of regression analysis
"""
tt = np.array(x)
yy = np.array(y)
guess_offset = np.mean(yy)
offset_b = 0.4 * abs(guess_offset)
guess_amp = abs(np.max(yy) - np.min(yy)) / 2.0
param_bounds = ([
0.3 * guess_amp, 0.99 * freq, -np.inf, guess_offset - offset_b, -np.inf
], [1.3 * guess_amp, 1.01 * freq, np.inf, guess_offset + offset_b, np.inf])
popt, pcov = curve_fit(cosfunc, tt, yy, bounds=param_bounds)
A, w, p, c, e = popt
return {
"amp": A,
"freq": w,
"phase": p,
"offset": c,
"slope": e,
}
def fit_cos(x, y, freq=10.0):
"""
sine regression
x: vector or list
y: vector or list
freq: float
initial frequency of regression analysis
"""
# step 1
res_step1 = fit_cos_simple(x, y, freq=freq)
# step 2: lmfit
res = {}
mod = lm.models.Model(cosfunc)
mod.set_param_hint(
'A',
value=res_step1['amp'],
#min=res_step1['amp'] - 0.5 * abs(res_step1['amp']),
#max=res_step1['amp'] + 0.5 * abs(res_step1['amp'])
)
mod.set_param_hint(
'w',
value=freq,
vary=True,
#min=res_step1['freq'] - 0.3 * abs(res_step1['freq']),
#max=res_step1['freq'] + 0.3 * abs(res_step1['freq'])
)
mod.set_param_hint('p', value=res_step1['phase'], vary=True)
mod.set_param_hint('c', value=res_step1['offset'],
vary=True) #, min = -0.5, max = 0.5)
mod.set_param_hint('e', value=res_step1['slope'], vary=True)
parms_fit = [
mod.param_hints['A']['value'], mod.param_hints['w']['value'],
mod.param_hints['p']['value'], mod.param_hints['c']['value'],
mod.param_hints['e']['value']
]
abweichung = []
chis = []
chis_red = []
results = []
r2 = []
methods = ['leastsq', 'powell']
dof = len(y) - len(parms_fit)
for method in methods:
result = mod.fit(y, t=x, method=method, verbose=False)
r2temp = 1 - result.residual.var() / np.var(y)
# r2temp = result.redchi / np.var(yfit, ddof=2)
if r2temp < 0.:
r2temp = 0
r2.append(r2temp)
chi = result.chisqr
chis_red.append(result.redchi)
abweichung.append(sf.gammaincc(dof / 2., chi / 2))
chis.append(chi)
results.append(result)
ret = {}
best = np.nanargmax(r2)
res[f'amp'] = results[best].best_values['A']
res[f'freq'] = results[best].best_values['w']
res[f'phase'] = results[best].best_values['p']
res[f'offset'] = results[best].best_values['c']
res[f'slope'] = results[best].best_values['e']
res[f'r2'] = r2[best]
return res

View File

@ -1 +1,3 @@
from .io import read_data from .geosys import read_geosys
__all__ = ["read_geosys"]

View File

@ -1,18 +1,19 @@
from numpy import array
from pandas import to_datetime, DataFrame
import csv import csv
import os import os
from sys import getsizeof from sys import getsizeof
from numpy import array
from pandas import DataFrame, to_datetime
from versuche.helper import normalice_header from versuche.helper import normalice_header
def detect_tabnum(filename, tabstr,encoding='utf-8'): def detect_tabnum(filename, tabstr, encoding='utf-8'):
filename = os.path.normpath(filename) filename = os.path.normpath(filename)
tabstr = tabstr.lower() tabstr = tabstr.lower()
#Einlesen #Einlesen
with open(filename,'r', encoding=encoding) as inFile: with open(filename, 'r', encoding=encoding) as inFile:
reader = csv.reader(inFile, delimiter='\t') reader = csv.reader(inFile, delimiter='\t')
counter = 0 counter = 0
for row in reader: for row in reader:
@ -24,25 +25,24 @@ def detect_tabnum(filename, tabstr,encoding='utf-8'):
counter += 1 counter += 1
if counter>100: if counter > 100:
return False return False
def str2float(str): def str2float(str):
try: try:
str = str.replace(',','.') str = str.replace(',', '.')
return float(str) return float(str)
except: except:
return None return None
def read_data(filename, def read_geosys(filename,
table, table,
pkdata = '001', pkdata='001',
encoding='utf-8', encoding='utf-8',
to_si = False, to_si=False,
debug=False): debug=False):
''' '''
:param filename: File-Name :param filename: File-Name
@ -66,12 +66,11 @@ def read_data(filename,
#Daten einlesen und umwandeln #Daten einlesen und umwandeln
#--------------------------------------------------------------------- #---------------------------------------------------------------------
data = [] data = []
zuordnung = [] zuordnung = []
#Einlesen #Einlesen
with open(filename,'r', encoding=encoding) as inFile: with open(filename, 'r', encoding=encoding) as inFile:
reader = csv.reader(inFile, delimiter='\t') reader = csv.reader(inFile, delimiter='\t')
for row in reader: for row in reader:
if len(row) > 2: if len(row) > 2:
@ -82,7 +81,6 @@ def read_data(filename,
#aufräumen #aufräumen
##Datenstruktur anlegen ##Datenstruktur anlegen
data_clean = {} data_clean = {}
data_clean['head'] = [] data_clean['head'] = []
data_clean['data'] = [] data_clean['data'] = []
@ -98,7 +96,7 @@ def read_data(filename,
# aufräumen # aufräumen
data = data_clean data = data_clean
del(data_clean) del (data_clean)
if debug: if debug:
print('data_clean fin') print('data_clean fin')
@ -112,11 +110,14 @@ def read_data(filename,
id_name = None id_name = None
for idx_name, name in enumerate(row): for idx_name, name in enumerate(row):
if name in [r'Probekörberdurchmesser',r'Diameter of specimen', 'PK-Durchmesser', 'Probekörper-Durchmesser']: if name in [
r'Probekörberdurchmesser', r'Diameter of specimen',
'PK-Durchmesser', 'Probekörper-Durchmesser'
]:
id_durchmesser = idx_name id_durchmesser = idx_name
elif name in [r'Probekörperbezeichnung']: elif name in [r'Probekörperbezeichnung']:
id_name = idx_name id_name = idx_name
elif name in ['Probekörperhöhe','Gap length','PK-Höhe']: elif name in ['Probekörperhöhe', 'Gap length', 'PK-Höhe']:
id_hoehe = idx_name id_hoehe = idx_name
if debug: if debug:
@ -143,11 +144,13 @@ def read_data(filename,
except: except:
pass pass
header = {'d': durchmesser, header = {
'h': hoehe, 'd': durchmesser,
'name': name, 'h': hoehe,
'unit_h': unit_hoehe, 'name': name,
'unit_d': unit_durch} 'unit_h': unit_hoehe,
'unit_d': unit_durch
}
if debug: if debug:
print('header\n', header) print('header\n', header)
@ -183,7 +186,7 @@ def read_data(filename,
data = array(temp) data = array(temp)
if debug: if debug:
print(data_head,data_units) print(data_head, data_units)
## Bezeichnungen der Daten normalisieren ## Bezeichnungen der Daten normalisieren
data_head = normalice_header(data_head) data_head = normalice_header(data_head)
@ -209,7 +212,6 @@ def read_data(filename,
return header, data return header, data
except: except:
print('Fehler beim lesen') print('Fehler beim lesen')
raise raise

View File

@ -0,0 +1,9 @@
# versuche
from .schichtenverbund import *
__all__ = [
"fit_single_data",
"TestSchichtenverbundV2GeoSys",
"TestSchichtenverbundV2GeoSysExtractedEMPA",
]

View File

@ -0,0 +1,335 @@
import os
import sys
from multiprocessing import Pool, cpu_count
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from aenum import enum
from fsutil import exists
from pytestpavement.analysis import fit_cos, fit_cos_eval
def fit_single_data(g):
"""
iterate over data and fit
"""
try:
i, d = g
d = d.loc[i]
#d = d.reset_index()
Ns = d['N'].unique()
e = d[(d['N'] > Ns[-7]) & (d['N'] <= Ns[-2])].copy()
if e.empty:
return
e.index = e.index - e.index[0]
e = e.reset_index()
res_par = {}
res_par['T_set'] = i[0]
res_par['sigma_set'] = i[1]
res_par['f_set'] = float(i[2])
res_par['ext_set'] = i[3]
r2 = []
for col in ['F', 's1', 's2']:
x = e['time'].values
y = e[col].values
res_step = fit_cos(x, y, freq=res_par['f_set'])
r2.append(res_step['r2'])
for key in res_step.keys():
res_par[key + f'_{col}'] = res_step[key]
except:
return
return res_par
class TestSchichtenverbundV2GeoSys():
"""
read and process test of type Schichtenverbund
Configuration created for TU Dresden
...
Attributes
----------------
filename : str
filename to read
tablenum : str
table number of geosys file
Methodes
-------------
Returns
-------------
"""
def __init__(self,
filename: str,
diameter: float,
spalt: float = 1.0,
tablenum: str = '038',
debug: bool = False,
plot_fit: bool = False,
plot_fit_error: bool = True):
self.file = filename
self.diameter = diameter
self.spalt = spalt
self._tablenum = tablenum
self._plot = plot_fit
self._plot_on_error = plot_fit_error
self._debug = debug
self.data = None
self._check_file_exists()
self._run()
def _run(self):
if self._debug:
print('debug mode')
self._read()
self._normalize_data()
self._set_units()
self._check_data()
self._transform_data()
self._fit_data()
self._calc_Es()
def __str__(self):
return f"filename: {self.file}, table number: {self._tablenum}"
def _check_file_exists(self):
assert os.path.exists(self.file)
def _read(self):
self.data = []
def _normalize_data(self):
return
def _set_units(self):
return
def _check_data(self):
must_have_values = [
'T_set',
'sigma_set',
'f_set',
'ext_set',
'time',
'F',
's1',
's2',
'N',
]
check = [item in self.data.columns for item in must_have_values]
assert all(check)
pass
#
def _transform_data(self):
self.data = self.data.set_index(
['T_set', 'sigma_set', 'f_set', 'ext_set', 'time']).sort_index()
def _fit_data(self):
if not self._debug:
with Pool(cpu_count()) as pool:
ret_list = pool.map(
fit_single_data,
[(i, d) for i, d in self.data.groupby(level=[0, 1, 2, 3])])
else:
ret_list = []
for i, d in self.data.groupby(level=[0, 1, 2, 3]):
ret_list.append(fit_single_data((i, d)))
self.res = pd.DataFrame.from_dict(
[r for r in ret_list if isinstance(r, dict)])
self.res = self.res.set_index(
['T_set', 'sigma_set', 'f_set', 'ext_set']).sort_index()
#self.res.sort_index(axis=0, inplace=True)
#self.res.sort_index(axis=1, inplace=True)
def _plot_single_data(self, i):
ylabels = {
'F': 'Force in N',
's1': 'Displacement $s_1$ in $\mu m$',
's2': 'Displacement $s_2$ in $\mu m$'
}
par = self.res.loc[i].to_dict()
df = self.data.loc[i]
Ns = df['N'].unique()
e = df[(df['N'] > Ns[-7]) & (df['N'] <= Ns[-2])].copy()
e.index = e.index - e.index[0]
if e.empty:
return
fig, axs = plt.subplots(3, 1, sharex=True)
fig.set_figheight(1.5 * fig.get_figheight())
for i, col in enumerate(['F', 's1', 's2']):
ax = axs[i]
x, y = e.index, e[col]
ax.plot(x, y, c='k', label='data')
par_sel = [key for key in par.keys() if col in key]
par_sel = dict((k.split('_')[0], par[k]) for k in par_sel)
ys = fit_cos_eval(x, par_sel)
r2_sel = np.round(par_sel['r2'], 3)
ax.plot(x, ys, c='C1', label=f'fit (R² = {r2_sel}')
ax.legend(loc=0)
ax.set_ylabel(ylabels[col])
ax.set_xlabel('Time in s')
plt.tight_layout()
plt.show()
def plot_fitted_data(self, num: int = 7):
"""
plot fit
"""
counter = 0
for i, r in self.res.groupby(level=[0, 1, 2, 3]):
self._plot_single_data(i)
counter += 1
if (num != None) & (counter >= num):
break
def plot_fitted_data_error(self, rmin: float = 0.9):
"""
plot fit
"""
sel_res = self.res[(self.res['r2_F'] <= rmin)]
if sel_res.empty:
print('no errors')
return
for i, r in sel_res.groupby(level=[0, 1, 2, 3]):
self._plot_single_data(i)
def _calc_Es(self):
area = np.pi * self.diameter**2 / 4
ampF = self.res['amp_F']
ampS = self.res[['amp_s1', 'amp_s2']].mean(axis=1)
tau = ampF.div(area)
gamma = ampS.div(1000.0).div(self.spalt)
self.res['Es'] = tau / gamma
class TestSchichtenverbundV2GeoSysExtractedEMPA(TestSchichtenverbundV2GeoSys):
def _read(self):
if self._debug:
nrows = 15000
else:
nrows = None
self.data = pd.read_csv(self.file, sep='\t', decimal='.', nrows=nrows)
self.data.drop(index=[0], inplace=True)
self.data.dropna(axis=0, inplace=True)
for col in self.data.columns:
self.data[col] = pd.to_numeric(self.data[col])
def _normalize_data(self):
col = list(self.data.columns)
for i, d in enumerate(col):
if 'soll temperature' in d:
col[i] = 'T_set'
elif 'soll sigma' in d:
col[i] = 'sigma_set'
elif 'soll frequency' in d:
col[i] = 'f_set'
elif 'soll extension' in d:
col[i] = 'ext_set'
elif 'vertical load from hydraulic pressure' in d:
col[i] = 'F'
elif 'Vertical position from LVDT 1' in d:
col[i] = 's1'
elif 'Vertical position from LVDT 2' in d:
col[i] = 's2'
elif 'Zyklenzähler' in d:
col[i] = 'N'
self.data.columns = col

View File

@ -1,15 +1,17 @@
from setuptools import setup from setuptools import setup
setup( setup(
name='PyTestPavement', name='PyTestPavement',
version='0.1.0', version='0.1.0',
author='Markus Clauß', author='Markus Clauß',
author_email='markus.clauss@tu-dresden.de', author_email='markus.clauss@tu-dresden.de',
packages=['pytestpavement',], packages=[
#scripts=['bin/script1','bin/script2'], 'pytestpavement',
#url='http://pypi.python.org/pypi/PackageName/', ],
#license='LICENSE.txt', #scripts=['bin/script1','bin/script2'],
description='', #url='http://pypi.python.org/pypi/PackageName/',
#long_description=open('README.txt').read(), #license='LICENSE.txt',
install_requires=['lmfit', 'pandas', 'numpy'], description='',
#long_description=open('README.txt').read(),
install_requires=['lmfit', 'pandas', 'numpy', 'scipy'],
) )