@incollection{KMV01a, author = {A. Kukush and I. Markovsky and S. {Van Huffel}}, title = {On consistent estimators in linear and bilinear multivariate errors-in-variables models}, editor = {S. {Van Huffel} and P. Lemmerling}, booktitle = {Total Least Squares and Errors-in-Variables Modeling: Analysis, Algorithms and Applications}, publisher = {Kluwer}, year = {2002}, pages = {155--164}, doi = {10.1007/978-94-017-3552-0_14}, url = {ftp://ftp.esat.kuleuven.be/pub/SISTA/markovsky/reports/01-82.ps.gz}, abstract = {We consider three multivariate regression models related to the TLS problem. The errors are allowed to have unequal variances. For the model $AX = B$, the elementwise-weighted TLS estimator is considered. The matrix $[A\ B]$ is observed with errors and has independent rows, but the errors in a row are correlated. In addition, the corresponding error covariance matrices may differ from row to row and some of the columns are allowed to be error-free. We give mild conditions for weak consistency of the estimator, when the number of rows in $A$ increases. We derive the objective function for the estimator and propose an iterative procedure to compute the solution. In a bilinear model $AXB=C$, where the data $A,B,C$ are perturbed by errors, an adjusted least squares estimator is considered, which is consistent, i.e. converges to $X$, as the number $m$ of rows in $A$ and the number $q$ of columns in $B$ increase. A similar approach is applied in a related model, arising in motion analysis. The model is $v^TFu=0$, where the vectors $u$ and $v$ are homogeneous coordinates of the projections of the same rigid object point in two images, and $F$ is a rank deficient matrix. Each pair $(u,v)$ is observed with measurement errors. We construct a consistent estimator of $F$ in three steps: a) estimate the measurement error variance, b) construct a preliminary matrix estimate, and c) project that estimate on the subspace of singular matrices. A simulation study illustrates the theoretical results.} }
@incollection{KMV02b, author = {A. Kukush and I. Markovsky and S. {Van Huffel}}, title = {Consistent estimation of an ellipsoid with known center}, booktitle = {Comput. Stat. (COMPSTAT)}, pages = {1369--1376}, year = {2004}, editor = {J. Antoch}, publisher = {Physica--Verlag}, url = {ftp://ftp.esat.kuleuven.ac.be/pub/SISTA/markovsky/reports/02-119a.ps.gz}, isbn = {3-7908-1554-3}, doi = {10.1007/s00211-004-0526-9} }
@incollection{MV05b, author = {I. Markovsky and S. {Van Huffel}}, title = {On weighted structured total least squares}, booktitle = {Large-Scale Scientific Computing}, pages = {695--702}, year = {2006}, editor = {I. Lirkov and S. Margenov and J. Wa{\'s}niewski}, volume = {3743}, series = {Lecture Notes in Computer Science}, publisher = {Springer--Verlag}, doi = {10.1007/11666806_80}, pdf = {http://eprints.soton.ac.uk/263491/1/llncs_proof.pdf} }
@incollection{MAV09, author = {I. Markovsky and A. Amann and S. {Van Huffel}}, editor = {L. Wang and H. Garnier and T. Jakeman}, booktitle = {System Identification, Environmental Modelling, and Control System Design}, title = {Application of filtering methods for removal of resuscitation artifacts from human {ECG} signals}, publisher = {Springer}, year = {2009}, doi = {10.1007/978-0-85729-974-1_14}, pdf = {http://eprints.soton.ac.uk/265957/1/ecg_embs.pdf}, software = {ftp://ftp.esat.kuleuven.be/pub/SISTA/markovsky/reports/06-212b.tar} }
@incollection{M09c, author = {I. Markovsky}, editor = {A. Iske and others}, title = {Algorithms and literate programs for weighted low-rank approximation with missing data}, publisher = {Springer}, year = {2011}, volume = {3}, chapter = {12}, optseries = {Springer Proc. in Mathematics}, pages = {255--273}, pdf = {http://eprints.soton.ac.uk/268296/1/missing-data-2x1.pdf}, software = {http://eprints.soton.ac.uk/268296/2/missing-data.tar}, doi = {10.1007/978-3-642-16876-5_12} }
@incollection{dist-chapter, author = {I. Markovsky}, title = {Rank constrained optimization problems in computer vision}, booktitle = {Regularization, Optimization, Kernels, and Support Vector Machines}, publisher = {Chapman \& Hall/CRC Machine Learning}, year = {2014}, editor = {J. Suykens, M. Signoretto, A. Argyriou}, series = {Pattern Recognition}, chapter = {13}, pages = {293--312}, pdf = {https://imarkovs.github.io/publications/dist-chapter.pdf}, isbn = {9781482241396}, doi = {10.1201/b17558-16} }
@incollection{kpca, author = {I. Markovsky and K. Usevich}, title = {Nonlinearly structured low-rank approximation}, booktitle = {Low-Rank and Sparse Modeling for Visual Analysis}, publisher = {Springer}, year = {2014}, editor = {Yun Raymond Fu}, pages = {1--22}, pdf = {https://imarkovs.github.io/publications/kpca.pdf}, doi = {10.1007/978-3-319-12000-3_1}, abstract = {Polynomially structured low-rank approximation problems occur in algebraic curve fitting, e.g., conic section fitting, subspace clustering (generalized principal component analysis), and nonlinear and parameter-varying system identification. The maximum likelihood estimation principle applied to these nonlinear models leads to nonconvex optimization problems and yields inconsistent estimators in the errors-in-variables (measurement errors) setting. We propose a computationally cheap and statistically consistent estimator based on a bias correction procedure, called adjusted least-squares estimation. The method is successfully used for conic section fitting and was recently generalized to algebraic curve fitting. The contribution of this book's chapter is the application of the polynomially structured low-rank approximation problem and, in particular, the adjusted least-squares method to subspace clustering, nonlinear and parameter-varying system identification. The classical in system identification input-output notion of a dynamical model is replaced by the behavioral definition of a model as a set, represented by implicit nonlinear difference equations.}, keywords = {structured low-rank approximation, conic section fitting, subspace clustering, nonlinear system identification.} }
@incollection{ident-prague, author = {I. Markovsky}, title = {System identification in the behavioral setting: A structured low-rank approximation approach}, booktitle = {Latent Variable Analysis and Signal Separation}, publisher = {Springer}, volume = {9237}, series = {Lecture Notes in Computer Science}, editor = {E. Vincent and others}, isbn = {978-3-319-22481-7}, doi = {10.1007/978-3-319-22482-4_27}, year = {2015}, pages = {235--242}, pdf = {https://imarkovs.github.io/publications/ident-prague-2x1.pdf}, abstract = {System identification is a fast growing research area that encompasses a broad range of problems and solution methods. It is desirable to have a unifying setting and a few common principles that are sufficient to understand the currently existing identification methods. The behavioral approach to system and control, put forward in the mid 80's, is such a unifying setting. Till recently, however, the behavioral approach lacked supporting numerical solution methods. In the last 10 yeas, the structured low-rank approximation setting was used to fulfill this gap. In this paper, we summarize recent progress on methods for system identification in the behavioral setting and pose some open problems. First, we show that errors-in-variables and output error system identification problems are equivalent to Hankel structured low-rank approximation. Then, we outline three generic solution approaches: 1) methods based on local optimization, 2) methods based on convex relaxations, and 3) subspace methods. A specific example of a subspace identification method---data-driven impulse response computation---is presented in full details. In order to achieve the desired unification, the classical ARMAX identification problem should also be formulated as a structured low-rank approximation problem. This is an outstanding open problem.}, keywords = {system identification; errors-in-variables modeling, behavioral approach; Hankel matrix, low-rank approximation, impulse response estimation, ARMAX identification.} }
@incollection{ica18a, author = {I. Markovsky and A. Fazzi and N. Guglielmi}, title = {Applications of polynomial common factor computation in signal processing}, booktitle = {Latent Variable Analysis and Signal Separation}, publisher = {Springer}, optvolume = {10891}, series = {Lecture Notes in Computer Science}, opteditor = {Y. Deville et al.}, pages = {99--106}, year = {2018}, pdf = {https://imarkovs.github.io/publications/ica18a.pdf}, abstract = {We consider the problem of computing the greatest common divisor of a set of univariate polynomials and present applications of this problem in system theory and signal processing. One application is blind system identification: given the responses of a system to unknown inputs, find the system. Assuming that the unknown system is finite impulse response and at least two experiments are done with inputs that have finite support and their Z-transforms have no common factors, the impulse response of the system can be computed up to a scaling factor as the greatest common divisor of the Z-transforms of the outputs. Other applications of greatest common divisor problem in system theory and signal processing are finding the distance of a system to the set of uncontrollable systems and common dynamics estimation in a multi-channel sum-of-exponentials model.}, keywords = {blind system identification; sum-of-exponentials modeling; distance to uncontrollability; approximate common factor; low-rank approximation}, doi = {10.1007/978-3-319-93764-9_10} }
@incollection{ica18b, author = {I. Markovsky and P.-L. Dragotti}, title = {Using structured low-rank approximation for sparse signal recovery}, booktitle = {Latent Variable Analysis and Signal Separation}, publisher = {Springer}, optvolume = {10891}, series = {Lecture Notes in Computer Science}, opteditor = {Y. Deville et al.}, pages = {479--487}, year = {2018}, pdf = {https://imarkovs.github.io/publications/ica18b.pdf}, abstract = {Structured low-rank approximation is used in model reduction, system identification, and signal processing to find low-complexity models from data. The rank constraint imposes the condition that the approximation has bounded complexity and the optimization criterion aims to find the best match between the data---a trajectory of the system---and the approximation. In some applications, however, the data is sub-sampled from a trajectory, which poses the problem of sparse approximation using the the low-rank prior. This paper considers a modified structured low-rank approximation problem where the observed data is a linear transformation of a system's trajectory with reduced dimension. We reformulate this problem as a structured low-rank approximation with missing data and propose a solution methods based on the variable projections principle. We compare the structured low-rank approximation approach with the classical sparsity inducing method of $\ell_1$-norm regularization. The $\ell_1$-norm regularization method is effective for sum-of-exponentials modeling with a large number of samples, however, it is not suitable for identification of systems with damping.}, keywords = {structured low-rank approximation, sparse approximation, missing data estimation, sum-of-exponentials modeling, $\ell_1$-norm regularization}, software = {https://imarkovs.github.io/software/ica18.tgz}, doi = {10.1007/978-3-319-93764-9_44} }
@incollection{sensor-ml, author = {I. Markovsky}, title = {Dynamic measurement}, booktitle = {Data-driven filtering and control design: Methods and applications}, chapter = {6}, pages = {97--108}, publisher = {IET}, year = {2019}, doi = {10.1049/PBCE123E_ch6}, editors = {C. Novara and S. Formentin}, pdf = {https://imarkovs.github.io/publications/sensor-ml.pdf}, abstract = {In metrology, a given measurement technique has fundamental speed and accuracy limitations imposed by physical laws. Data processing allows us to overcome these limitations by using prior knowledge about the sensor dynamics. The prior knowledge considered in this paper is a model class to which the sensor dynamics belongs. We present methods that are applicable to linear time-invariant processes and are suitable for real-time implementation on a digital signal processor.}, keywords = {System identification; subspace methods, real-time estimation, Kalman filtering; metrology.} }
This file was generated by bibtex2html 1.98.