@article{LHM99, author = {M. Lemmon and K. He and I. Markovsky}, title = {Supervisory Hybrid Systems}, journal = {IEEE Control Systems Magazine}, month = {August}, volume = {19}, number = {4}, pages = {42--55}, year = {1999}, doi = {10.1109/37.777788}, abstract = {Supervisory hybrid systems are systems generating a mixture of continuous-valued and discrete-valued signals. This systems paradigm is particularly useful in modeling applications where high-level decision making is used to supervise process behavior. Hybrid system methodologies are also applicable to switched systems where the system switches between various setpoints or operational modes to extend its effective operating range. Hybrid systems, therefore, embrace a diverse set of applications. There has been considerable activity in the area of hybrid systems theory, and this article provides an introduction to some of the basic concepts and trends in this emergent field. The term hybrid refers to a mixing of two fundamentally different types of objects or methods. The paper deals with supervisory hybrid systems. Supervisory hybrid systems are systems that combine discrete event and continuous-valued dynamics. The article is organized as follows. We first provide an example of a hybrid system, to be used throughout as a pedagogical tool illustrating various concepts in hybrid systems theory. We then discuss modeling frameworks for hybrid systems, paying specific attention to the hybrid automaton. Not only may the system have a hybrid character, but the specifications on desired system behaviors may also be hybrid. The article also discusses specification logics that express system requirements on both the discrete and continuous states of the system. The article continues with a survey of current methods and concepts used to verify or validate desired system behaviors, and concludes with a survey of current methods for hybrid control system synthesis} }
@article{KMV01b, author = {A. Kukush and I. Markovsky and S. {Van Huffel}}, title = {Consistent fundamental matrix estimation in a quadratic measurement error model arising in motion analysis}, journal = {Comput. Statist. Data Anal.}, year = {2002}, volume = {41}, number = {1}, pages = {3--18}, abstract = {A bilinear multivariate errors-in-variables model is considered. It corresponds to an overdetermined set of linear equations $AXB=C$, where $A$ is $m\times n$, $B$ is $p\times q$, and $A$, $B$, $C$ are perturbed by errors. The total least squares estimator is inconsistent in this case. An adjusted least squares estimator is constructed, which converges to the true value of $X$, as $m$ and $q$ go to infinity. A small sample modification of the estimator is presented, which is more stable for small $m$ and $q$ and is asymptotically equivalent to the adjusted least squares estimator. The theoretical results are confirmed by a simulation study.}, keywords = {bilinear multivariate measurement error models, errors-in-variables models, adjusted least squares, consistency, asymptotic normality, small sample modification}, doi = {10.1016/S0167-9473(02)00068-3}, pdf = {http://eprints.soton.ac.uk/263294/1/ellest_stat_published.pdf} }
@article{KMV01c, author = {A. Kukush and I. Markovsky and S. {Van Huffel}}, title = {Consistent estimation in the bilinear multivariate errors-in-variables model}, journal = {Metrika}, year = {2003}, volume = {57}, number = {3}, pages = {253--285}, abstract = {A multivariate measurement error model $AX = B $ is considered. The errors in $[A \ B]$ are rowwise independent, but within each row the errors may be correlated. Some of the columns are observed without errors, and in addition the error covariance matrices may differ from row to row. The total covariance structure of the errors is supposed to be known up to a scalar factor. The fully weighted total least squares estimator of $X$ is studied, which in the case of normal errors coincides with the maximum likelihood estimator. We give mild conditions for weak and strong consistency of the estimator, when the number of rows in $A$ increases. The results generalize the conditions of Gallo given for a univariate homoscedastic model (where $B$ is a vector), and extend the conditions of Gleser given for the multivariate homoscedastic model. We derive the objective function for the estimator and propose an iteratively reweighted numerical procedure.}, keywords = {linear errors-in-variables model, elementwise-weighted total least squares, consistency, iteratively reweighted procedure}, doi = {10.1007/s001840200217}, pdf = {http://eprints.soton.ac.uk/263292/1/axb_published.pdf} }
@article{MRPKV02, author = {I. Markovsky and M. Rastello and A. Premoli and A. Kukush and S. {Van Huffel}}, title = {The element-wise weighted total least squares problem}, journal = {Comput. Statist. Data Anal.}, pages = {181--209}, volume = {50}, number = {1}, year = {2005}, abstract = {We consider a new technique for parameter estimation in a linear measurement error model $AX = B$, $A = A_0 + E_A$, $B = B_0 + E_B$, $A_0X_0=B_0$ with row-wise independent and non-identically distributed measurement errors $E_A$, $E_B$. Here $A_0$ and~$B_0$ are the true values of the measurements $A$ and~$B$, and~$X_0$ is the true value of the parameter~$X$. The total least squares method yields an inconsistent estimate of the parameter in this case. We formulate a modified total least squares problem, called element-wise weighted total least squares, that provides a consistent estimator, i.e., the estimate~$\hat X$ converges to the true value~$X_0$ as the number of measurements increases. The new estimator is a solution of an optimization problem with the parameter estimate~$\hat X$ and the correction $\Delta D = [\Delta A \ \Delta B]$, applied to the measured data~$D=[A \ B]$, as decision variables. We derive an equivalent unconstrained problem by minimizing analytically over the correction~$\Delta D$ and propose an iterative algorithm for its solution, based on the first order optimality condition. The algorithm is locally convergent with linear convergence rate. For large sample size the convergence rate tends to quadratic.}, keywords = {Total least squares; Multivariate errors-in-variables model; Unequally sized errors; Non-convex optimization; Re-weighted least squares}, doi = {10.1016/j.csda.2004.07.014}, pdf = {http://eprints.ecs.soton.ac.uk/13293/1/ewtls_published.pdf}, software = {https://imarkovs.github.io/software/wtls.tgz} }
@article{KMV02c, author = {A. Kukush and I. Markovsky and S. {Van Huffel}}, title = {Consistent estimation in an implicit quadratic measurement error model}, journal = {Comput. Statist. Data Anal.}, year = {2004}, volume = {47}, number = {1}, pages = {123--147}, abstract = {An adjusted least squares estimator is derived that yields a consistent estimate of the parameters of an implicit quadratic measurement error model. In addition, a consistent estimator for the measurement error noise variance is proposed. Important assumptions are: (1) all errors are uncorrelated identically distributed and (2) the error distribution is normal. The estimators for the quadratic measurement error model are used to estimate consistently conic sections and ellipsoids. Simulation examples, comparing the adjusted least squares estimator with the ordinary least squares method and the orthogonal regression method, are shown for the ellipsoid fitting problem.}, keywords = {Adjusted least squares; Conic fitting; Consistent estimator; Ellipsoid fitting; Quadratic measurement error model}, doi = {10.1016/j.csda.2003.10.022}, pdf = {http://eprints.soton.ac.uk/263294/1/ellest_stat_published.pdf}, software = {ftp://ftp.esat.kuleuven.ac.be/pub/SISTA/markovsky/reports/02-116b.m} }
@article{MKV02d, author = {I. Markovsky and A. Kukush and S. {Van Huffel}}, title = {Consistent least squares fitting of ellipsoids}, journal = {Numerische Mathematik}, year = {2004}, volume = {98}, number = {1}, pages = {177--194}, abstract = {A parameter estimation problem for ellipsoid fitting in the presence of measurement errors is considered. The ordinary least squares estimator is inconsistent, and due to the nonlinearity of the model, the orthogonal regression estimator is inconsistent as well, i.e., these estimators do not converge to the true value of the parameters, as the sample size tends to infinity. A consistent estimator is proposed, based on a proper correction of the ordinary least squares estimator. The correction is explicitly given in terms of the true value of the noise variance.}, keywords = {adjusted least squares -- consistent estimator -- ellipsoid fitting -- orthogonal regression -- quadratic measurement error model -- total least squares}, doi = {10.1007/s00211-004-0526-9}, pdf = {http://eprints.soton.ac.uk/263295/1/ellest_comp_published.pdf}, software = {ftp://ftp.esat.kuleuven.ac.be/pub/SISTA/markovsky/reports/02-116b.m} }
@article{KMV02e, author = {A. Kukush and I. Markovsky and S. {Van Huffel}}, title = {Consistency of the structured total least squares estimator in a multivariate errors-in-variables model}, journal = {J. Statist. Plann. Inference}, pages = {315--358}, volume = {133}, number = {2}, year = {2005}, abstract = {The structured total least squares estimator, defined via a constrained optimization problem, is a generalization of the total least squares estimator when the data matrix and the applied correction satisfy given structural constraints. In the paper, an affine structure with additional assumptions is considered. In particular, Toeplitz and Hankel structured, noise free and unstructured blocks are allowed simultaneously in the augmented data matrix. An equivalent optimization problem is derived that has as decision variables only the estimated parameters. The cost function of the equivalent problem is used to prove consistency of the structured total least squares estimator. The results for the general affine structured multivariate model are illustrated by examples of special models. Modification of the results for block-Hankel/Toeplitz structures is also given. As a by-product of the analysis of the cost function, an iterative algorithm for the computation of the structured total least squares estimator is proposed.}, keywords = {Block-Hankel/Toeplitz structure; Consistency; Dynamic errors-in-variables model; Iterative algorithm; Structured total least squares; Total least squares.}, doi = {10.1016/j.jspi.2003.12.020}, pdf = {http://eprints.soton.ac.uk/263296/1/stls_published.pdf} }
@article{MVK02, author = {I. Markovsky and S. {Van Huffel} and A. Kukush}, title = {On the computation of the structured total least squares estimator}, journal = {Numer. Linear. Algebra Appl.}, year = {2004}, volume = {11}, pages = {591--608}, abstract = {A multivariate structured total least squares problem is considered, in which the extended data matrix is partitioned into blocks and each of the blocks is Toeplitz/Hankel structured, unstructured, or noise free. Two types of numerical solution methods for this problem are proposed: i) standard local optimization methods in combination with efficient evaluation of the cost function and its first derivative, and ii) an iterative procedure proposed originally for the element-wise weighted total least squares problem. The computational efficiency of the proposed methods is compared with this of alternative methods.}, keywords = {parameter estimation; total least squares; structured total least squares; system identification.}, doi = {10.1002/nla.361}, pdf = {http://eprints.soton.ac.uk/263297/1/stls_comp_published.pdf}, software = {http://slra.github.io/software-slra.html} }
@article{MV03a, author = {I. Markovsky and S. {Van Huffel} and R. Pintelon}, title = {Block-{T}oeplitz/{H}ankel structured total least squares}, journal = {SIAM J. Matrix Anal. Appl.}, pages = {1083--1099}, volume = {26}, number = {4}, year = {2005}, abstract = {A structured total least squares problem is considered, in which the extended data matrix is partitioned into blocks and each of the blocks is block-Toeplitz/Hankel structured, unstructured, or exact. An equivalent optimization problem is derived and its properties are established. The special structure of the equivalent problem enables to improve the computational efficiency of the numerical solution methods. By exploiting the structure, the computational complexity of the algorithms (local optimization methods) per iteration is linear in the sample size. Application of the method for system identification and for model reduction is illustrated by simulation examples. }, keywords = {Parameter estimation, Total least squares, Structured total least squares, System identification, Model reduction.}, doi = {10.1137/S0895479803434902}, pdf = {http://eprints.ecs.soton.ac.uk/13298/1/stls_block_published.pdf}, software = {http://slra.github.io/software-slra.html} }
@article{MDM03, author = {I. Markovsky and B. {De Moor}}, title = {Linear dynamic filtering with noisy input and output}, journal = {Automatica}, volume = {41}, number = {1}, pages = {167--171}, year = {2005}, abstract = {State estimation problems for linear time-invariant systems with noisy inputs and outputs are considered. An efficient recursive algorithm for the smoothing problem is presented. The equivalence between the optimal filter and an appropriately modified Kalman filter is established. The optimal estimate of the input signal is derived from the optimal state estimate. The result shows that the noisy input/output filtering problem is not fundamentally different from the classical Kalman filtering problem.}, keywords = {errors-in-variables, Kalman filtering, optimal smoothing, misfit, latency.}, doi = {10.1016/j.automatica.2004.08.014}, opturl = {http://eprints.soton.ac.uk/263299/} }
@article{MWRDM04, author = {I. Markovsky and J. C. Willems and P. Rapisarda and B. {De Moor}}, title = {Algorithms for deterministic balanced subspace identification}, journal = {Automatica}, pages = {755--766}, volume = {41}, number = {5}, year = {2005}, abstract = {New algorithms for identification of a balanced state space representation are proposed. They are based on a procedure for estimation of the impulse response and sequential zero input responses directly from data. The proposed algorithms are more efficient than the existing alternatives that compute the whole Hankel matrix of Markov parameters. It is shown that the computations can be performed on Hankel matrices of the input-output data of various dimensions. By choosing wider matrices, we need persistency of excitation of smaller order. Moreover, this leads to computational savings and improved statistical accuracy when the data is noisy. Using a finite amount of input-output data, the existing algorithms compute finite time balanced representation and the identified models have a lower bound on the distance to an exact balanced representation. The proposed algorithm can approximate arbitrarily closely an exact balanced representation. Moreover, the finite time balancing parameter can be selected automatically by monitoring the decay of the impulse response. We show what is the optimal in terms of minimal identifiability condition partition of the data into ``past'' and ``future''.}, keywords = {Exact deterministic subspace identification; Balanced model reduction; Approximate system identification; MPUM.}, doi = {10.1016/j.automatica.2004.10.007}, opturl = {http://eprints.soton.ac.uk/262202/}, pdf = {http://eprints.soton.ac.uk/262202/1/AutomaticaSubspaceID.pdf}, software = {ftp://ftp.esat.kuleuven.be/pub/SISTA/markovsky/reports/05-122.tar.gz} }
@article{MPV04a, author = {I. Markovsky and J. C. Willems and S. {Van Huffel} and B. {De Moor} and R. Pintelon}, title = {Application of structured total least squares for system identification and model reduction}, volume = {50}, number = {10}, pages = {1490--1500}, journal = {IEEE Trans. Automat. Contr.}, year = {2005}, abstract = {The following identification problem is considered: minimize the $l_2$ norm of the difference between a given time series and an approximating one under the constraint that the approximating time series is a trajectory of a linear time invariant system of a fixed complexity. The complexity is measured by the input dimension and the maximum lag. The question leads to a problem that is known as the global total least squares problem and alternatively can be viewed as maximum likelihood identification in the errors-in-variables setup. Multiple time series and latent variables can be considered in the same setting. Special cases of the problem are autonomous system identification, approximate realization, and finite time optimal $l_2$ model reduction. The identification problem is related to the structured total least squares problem. The paper presents an efficient software package that implements the theory. The proposed method and software are tested on data sets from the database for the identification of systems DAISY.}, keywords = {Errors-in-variables, system identification, model reduction, structured total least squares, numerical software, DAISY, MPUM.}, doi = {10.1109/TAC.2005.856643}, pdf = {http://eprints.ecs.soton.ac.uk/13300/1/stls_appl_published.pdf}, software = {http://slra.github.io/software-ident.html} }
@article{WRMDM04, author = {J. C. Willems and P. Rapisarda and I. Markovsky and B. {De Moor}}, title = {A note on persistency of excitation}, journal = {Systems \& Control Lett.}, volume = {54}, number = {4}, pages = {325--329}, year = {2005}, abstract = {We prove that if a component of the response signal of a controllable linear time-invariant system is persistently exciting of sufficiently high order, then the windows of the signal span the full system behavior. This is then applied to obtain conditions under which the state trajectory of a state representation spans the whole state space. The related question of when the matrix formed formed from a state sequence has linearly independent rows from the matrix formed from an input sequence and a finite number of its shifts is of central importance in subspace system identification. }, keywords = {Behavioral systems, persistency of excitation, lags, annihilators, system identification.}, doi = {10.1016/j.sysconle.2004.09.003}, pdf = {http://eprints.ecs.soton.ac.uk/12195/1/PersistencyExcitation.pdf} }
@article{MV04, author = {I. Markovsky and S. {Van Huffel}}, title = {High-performance numerical algorithms and software for structured total least squares}, journal = {J. Comput. Appl. Math.}, pages = {311--331}, volume = {180}, number = {2}, year = {2005}, abstract = {We present a software package for structured total least squares approximation problems. The allowed structures in the data matrix are block-Toeplitz, block-Hankel, unstructured, and exact. Combination of blocks with these structures can be specified. The computational complexity of the algorithms is~$O(m)$, where~$m$ is the sample size. We show simulation examples with different approximation problems. Application of the method for multivariable system identification is illustrated on examples from the database for identification of systems DAISY. }, keywords = {Parameter estimation; Structured total least squares; Deconvolution; System identification; Numerical linear algebra.}, software = {http://slra.github.io/software-slra.html}, doi = {10.1016/j.cam.2004.11.003}, pdf = {http://eprints.soton.ac.uk/263301/1/stls_pack_published.pdf} }
@article{SMWV04, author = {M. Schuermans and I. Markovsky and P. Wentzell and S. {Van Huffel}}, title = {On the equivalence between total least squares and maximum likelihood {PCA}}, journal = {Analytica Chimica Acta}, pages = {254--267}, number = {1--2}, volume = {544}, year = {2005}, abstract = {The maximum likelihood PCA (MLPCA) method has been devised in chemometrics as a generalization of the well-known PCA method in order to derive consistent estimators in the presence of errors with known error distribution. For similar reasons, the total least squares (TLS) method has been generalized in the field of computational mathematics and engineering to maintain consistency of the parameter estimates in linear models with measurement errors of known distribution. The basic motivation for TLS is the following. Let a set of multidimensional data points (vectors) be given. How can one obtain a linear model that explains these data? The idea is to modify all data points in such a way that some norm of the modification is minimized subject to the constraint that the modified vectors satisfy a linear relation. Although the name “total least squares” appeared in the literature only 25 years ago, this method of fitting is certainly not new and has a long history in the statistical literature, where the method is known as “orthogonal regression”, “errors-in-variables regression” or “measurement error modeling”. The purpose of this paper is to explore the tight equivalences between MLPCA and element-wise weighted TLS (EW-TLS). Despite their seemingly different problem formulation, it is shown that both methods can be reduced to the same mathematical kernel problem, i.e. finding the closest (in a certain sense) weighted low rank matrix approximation where the weight is derived from the distribution of the errors in the data. Different solution approaches, as used in MLPCA and EW-TLS, are discussed. In particular, we will discuss the weighted low rank approximation (WLRA), the MLPCA, the EW-TLS and the generalized TLS (GTLS) problems. These four approaches tackle an equivalent weighted low rank approximation problem, but different algorithms are used to come up with the best approximation matrix. We will compare their computation times on chemical data and discuss their convergence behavior.}, keywords = {TLS; MLPCA; Rank reduction; Measurement errors}, doi = {10.1016/j.aca.2004.12.059}, pdf = {http://eprints.soton.ac.uk/263302/1/mieke.pdf} }
@article{MV05, author = {I. Markovsky and S. {Van Huffel}}, title = {Overview of total least squares methods}, journal = {Signal Processing}, year = {2007}, volume = {87}, pages = {2283--2302}, abstract = {We review the development and extensions of the classical total least squares method and describe algorithms for its generalization to weighted and structured approximation problems. In the generic case, the classical total least squares problem has a unique solution, which is given in analytic form in terms of the singular value decomposition of the data matrix. The weighted and structured total least squares problems have no such analytic solution and are currently solved numerically by local optimization methods. We explain how special structure of the weight matrix and the data matrix can be exploited for efficient cost function and first derivative computation. This allows to obtain computationally efficient solution methods. The total least squares family of methods has a wide range of applications in system theory, signal processing, and computer algebra. We describe the applications for deconvolution, linear prediction, and errors-in-variables system identification.}, keywords = {Total least squares; Orthogonal regression; Errors-in-variables model; Deconvolution; Linear prediction; System identification.}, doi = {10.1016/j.sigpro.2007.04.004}, pdf = {http://eprints.soton.ac.uk/263855/1/tls_overview.pdf} }
@article{SKMV05, author = {S. Shklyar and A. Kukush and I. Markovsky and S. {Van Huffel}}, title = {On the conic section fitting problem}, journal = {Journal of Multivariate Analysis}, year = {2007}, volume = {98}, pages = {588--624}, abstract = {For the conic section problem adjusted least squares estimators (ALS) are considered. Consistency of the translation invariant version of ALS estimator is proved. The similarity invariance of the ALS2 estimator is shown. The conditions for consistency of the ALS2 estimator are relaxed compared with the paper Kukush et al. (2004), Comput. Statist. Data Anal., Vol. 47, No. 1, 123--147.}, keywords = {Adjusted least squares; Conic fitting; Consistent estimator; Ellipsoid fitting; Quadratic measurement error model}, doi = {10.1016/j.jmva.2005.12.003}, pdf = {http://eprints.soton.ac.uk/263303/1/shklyar_published.pdf} }
@article{MV05c, author = {I. Markovsky and S. {Van Huffel}}, title = {Left vs right representations for solving weighted low rank approximation problems}, journal = {Linear Algebra Appl.}, volume = {422}, pages = {540--552}, year = {2007}, abstract = {The weighted low-rank approximation problem in general has no analytical solution in terms of the singular value decomposition and is solved numerically using optimization methods. Four representations of the rank constraint that turn the abstract problem formulation into parameter optimization problems are presented. The parameter optimization problem is partially solved analytically, which results in an equivalent quadratically constrained problem. A commonly used re-parameterization avoids the quadratic constraint and makes the equivalent problem a nonlinear least squares problem, however, it might be necessary to change this re-parameterization during the iteration process. It is shown how the cost function can be computed efficiently in two special cases: row-wise and column-wise weighting.}, keywords = {Weighted low-rank approximation, total least squares, parameter optimization.}, doi = {10.1016/j.laa.2006.11.012}, pdf = {http://eprints.soton.ac.uk/263421/1/wtls_note_proof.pdf}, software = {https://imarkovs.github.io/software/wtls.tgz} }
@article{SMV06, author = {M. Schuermans and I. Markovsky and S. {Van Huffel}}, title = {An adapted version of the element-wise weighted {TLS} method for applications in chemometrics}, journal = {Chemometrics and Intelligent Laboratory Systems}, year = {2007}, volume = {85}, number = {1}, pages = {40--46}, abstract = {The Maximum Likelihood PCA (MLPCA) method has been devised in chemometrics as a generalization of the well-known PCA method in order to derive consistent estimators in the presence of errors with known error distribution. For similar reasons, the Total Least Squares (TLS) method has been generalized in the field of computational mathematics and engineering to maintain consistency of the parameter estimates in linear models with measurement errors of known distribution. In a previous paper [M. Schuermans, I. Markovsky, P.D. Wentzell, S. Van Huffel, On the equivalance between total least squares and maximum likelihood PCA, Anal. Chim. Acta, 544 (2005), 254–267], the tight equivalences between MLPCA and Element-wise Weighted TLS (EW-TLS) have been explored. The purpose of this paper is to adapt the EW-TLS method in order to make it useful for problems in chemometrics. We will present a computationally efficient algorithm and compare this algorithm with the standard EW-TLS algorithm and the MLPCA algorithm in computation time and convergence behaviour on chemical data.}, keywords = {EW-TLS; MLPCA; Rank reduction; Measurement errors}, doi = {10.1016/j.chemolab.2006.04.003}, pdf = {http://eprints.soton.ac.uk/263422/1/mieke2.pdf} }
@article{MR07, author = {I. Markovsky and P. Rapisarda}, title = {Data-driven simulation and control}, journal = {Int. J. Contr.}, volume = {81}, number = {12}, pages = {1946--1959}, year = {2008}, abstract = {Classical linear time-invariant system simulation methods are based on a transfer function, impulse response, or input/state/output representation. We present a method for computing the response of a system to a given input and initial conditions directly from a trajectory of the system, without explicitly identifying the system from the data. Similarly to the classical approach for simulation, the classical approach for control is model-based: first a model representation is derived from given data of the plant and then a control law is synthesised using the model and the control specifications. We present an approach for computing a linear quadratic tracking control signal that circumvents the identification step. The results are derived assuming exact data and the simulated response or control input is constructed off-line.}, keywords = {simulation, data-driven control, output matching, linear quadratic tracking, system identification.}, doi = {10.1080/00207170801942170}, opturl = {https://eprints.soton.ac.uk/263423}, pdf = {http://eprints.soton.ac.uk/263423/1/ddctr.pdf} }
@article{M07, author = {I. Markovsky}, title = {Structured low-rank approximation and its applications}, journal = {Automatica}, year = {2008}, volume = {44}, number = {4}, pages = {891--909}, abstract = {Fitting data by a bounded complexity linear model is equivalent to low-rank approximation of a matrix constructed from the data. The data matrix being Hankel structured is equivalent to the existence of a linear time-invariant system that fits the data and the rank constraint is related to a bound on the model complexity. In the special case of fitting by a static model, the data matrix and its low-rank approximation are unstructured. We outline applications in system theory (approximate realization, model reduction, output error and errors-in-variables identification), signal processing (harmonic retrieval, sum-of-damped exponentials and finite impulse response modeling), and computer algebra (approximate common divisor). Algorithms based on heuristics and local optimization methods are presented. Generalizations of the low-rank approximation problem result from different approximation criteria (e.g., weighted norm) and constraints on the data matrix (e.g., nonnegativity). Related problems are rank minimization and structured pseudospectra.}, keywords = {Low-rank approximation, total least squares, system identification, errors-in-variables modeling, behaviors.}, software = {http://slra.github.io/}, doi = {10.1016/j.automatica.2007.09.011}, pdf = {http://eprints.soton.ac.uk/263379/2/slra_published.pdf} }
@article{KMV07, author = {A. Kukush and I. Markovsky and S. {Van Huffel}}, title = {Estimation in a linear multivariate measurement error model with a change point in the data}, journal = {Comput. Statist. Data Anal.}, year = {2007}, volume = {52}, number = {2}, pages = {1167--1182}, abstract = {A linear multivariate measurement error model $AX=B$ is considered. The errors in $[ A \ B ]$ are row-wise finite dependent, and within each row, the errors may be correlated. Some of the columns may be observed without errors, and in addition the error covariance matrix may differ from row to row. The columns of the error matrix are united into two uncorrelated blocks, and in each block, the total covariance structure is supposed to be known up to a corresponding scalar factor. Moreover the row data are clustered into two groups, according to the behavior of the rows of true $A$ matrix. The change point is unknown and estimated in the paper. After that, based on the method of corrected objective function, strongly consistent estimators of the scalar factors and~$X$ are constructed, as the numbers of rows in the clusters tend to infinity. Since Toeplitz/Hankel structure is allowed, the results are applicable to system identification, with a change point in the input data.}, keywords = {Linear errors-in-variables model; Corrected objective function; Clustering; Dynamic errors-in-variables model; Consistent estimator.}, doi = {10.1016/j.csda.2007.06.010}, pdf = {http://eprints.soton.ac.uk/263885/1/noisevar_stat_new_final.pdf} }
@article{MN08, author = {I. Markovsky and M. Niranjan}, title = {Approximate low-rank factorization with structured factors}, journal = {Comput. Statist. Data Anal.}, volume = {54}, pages = {3411--3420}, year = {2008}, abstract = {An approximate rank revealing factorization problem with structure constraints on the normalized factors is considered. Examples of structure, motivated by an application in microarray data analysis, are sparsity, nonnegativity, periodicity, and smoothness. In general, the approximate rank revealing factorization problem is nonconvex. An alternating projections algorithm is developed, which is globally convergent to a locally optimal solution. Although the algorithm is developed for a specific application in microarray data analysis, the approach is applicable to other types of structure.}, keywords = {rank revealing factorization; numerical rank; low-rank approximation; maximum likelihood PCA; total least squares; errors-in-variables; microarray data.}, software = {http://eprints.soton.ac.uk/267440/2/factorize.tar}, doi = {10.1016/j.csda.2009.06.003}, url = {http://eprints.ecs.soton.ac.uk/17440/}, pdf = {http://eprints.soton.ac.uk/267440/1/factorize_rev.pdf} }
@article{MM08, author = {I. Markovsky and S. Mahmoodi}, title = {Least-squares contour alignment}, journal = {IEEE Signal Proc. Letters}, volume = {16}, number = {1}, pages = {41--44}, year = {2009}, abstract = {The contour alignment problem, considered in this paper, is to compute the minimal distance in a least squares sense, between two explicitly represented contours, specified by corresponding points, after arbitrary rotation, scaling, and translation of one of the contours. This is a constrained nonlinear optimization problem with respect to the translation, rotation and scaling parameters, however, it is transformed into an equivalent linear least squares problem by a nonlinear change of variables. Therefore, a global solution of the contour alignment problem can be computed efficiently. It is shown that a normalized minimum value of the cost function is invariant to ordering and affine transformation of the contours and can be used as a measure for the distance between the contours. A solution is proposed to the problem of finding a point correspondence between the contours.}, keywords = {Contour alignment, image registration, translation, rotation, scaling, affine invariance, least squares.}, software = {http://eprints.soton.ac.uk/266829/1/dist.tgz}, doi = {10.1109/LSP.2008.2008588}, pdf = {http://eprints.soton.ac.uk/266829/2/dist.pdf} }
@article{M08b, author = {I. Markovsky}, title = {Closed-loop data-driven simulation}, journal = {Int. J. Contr.}, year = {2010}, volume = {83}, number = {10}, pages = {2134--2139}, abstract = {Closed-loop data-driven simulation refers to the problem of finding the set of all responses of a closed-loop system to a given reference signal directly from an input/output trajectory of the plant and a representation of the controller. Conditions under which the problem has a solution are given and an algorithm for computing the solution is presented. The problem formulation and its solution are in the spirit of the deterministic subspace identification algorithms, i.e., in the theoretical analysis of the method, the data is assumed exact (noise free). The results have applications in data-driven control, e.g., testing controller's performance directly from closed-loop data of the plant in feedback with possibly different controller.}, keywords = {System identification; Subspace methods; Persistency of excitation; Data-driven simulation and control.}, software = {http://eprints.soton.ac.uk/266868/5/test_cdds.m}, doi = {10.1080/00207179.2010.508093}, url = {http://eprints.soton.ac.uk/266868/}, pdf = {http://eprints.soton.ac.uk/266868/1/unfalsified_control_rev3.pdf} }
@article{MSV09, author = {I. Markovsky and D. Sima and S. {Van Huffel}}, title = {Total least squares methods}, journal = {Wiley Interdisciplinary Reviews: Comput. Stat.}, year = {2010}, volume = {2}, number = {2}, pages = {212--217}, abstract = {Recent advances in total least squares approaches for solving various errors-in-variables modeling problems are reviewed, with emphasis on the following generalizations: 1) the use of weighted norms as a measure of the data perturbation size, capturing prior knowledge about uncertainty in the data; 2) the addition of constraints on the perturbation to preserve the structure of the data matrix, motivated by structured data matrices occurring in signal and image processing, systems and control, and computer algebra; 3) the use of regularization in the problem formulation, aiming at stabilizing the solution by decreasing the effect due to intrinsic ill-conditioning of certain problems.}, doi = {10.1002/wics.65}, pdf = {http://eprints.soton.ac.uk/267223/1/tls-review-published.pdf} }
@article{LMFR08, author = {F. Le and I. Markovsky and C. Freeman and E. Rogers}, title = {Identification of electrically stimulated muscle models of stroke patients}, journal = {Control Eng. Practice}, volume = {18}, number = {4}, pages = {396--407}, year = {2010}, abstract = {Despite significant recent interest in the identification of electrically stimulated muscle models, current methods are based on underlying models and identification techniques that make them unsuitable for use with subjects with incomplete paralysis. One consequence of this is that very few model-based controllers have been used in clinical trials. Motivated by one case where a model-based controller has been applied to the upper limb of stroke patients, and the modeling limitations that were encountered, this paper first undertakes a review of existing modeling techniques with particular emphasis on their limitations. A Hammerstein structure, already known in this area, is then selected, and a suitable identification procedure and set of excitation inputs are developed to address these short-comings. The technique that is proposed to obtain the model parameters from measured data is a combination of two iterative schemes: the first of these has rapid convergence and is based on alternating least squares, and the second is a more complex method to further improve accuracy. Finally, experimental results are used to assess the efficacy of the overall approach.}, keywords = {System identification; Hammerstein system; Muscle models; Alternating least squares; Functional electrical}, doi = {10.1016/j.conengprac.2009.12.007}, pdf = {http://eprints.soton.ac.uk/268388/1/cepzoe.pdf} }
@article{M10, author = {I. Markovsky}, title = {Bibliography on total least squares and related methods}, journal = {Statistics and Its Interface}, year = {2010}, volume = {3}, pages = {329--334}, abstract = {The class of total least squares methods has been growing since the basic total least squares method was proposed by Golub and Van Loan in the 70's. Efficient and robust computational algorithms were developed and properties of the resulting estimators were established in the errors-in-variables setting. At the same time the developed methods were applied in diverse areas, leading to broad literature on the subject. This paper collects the main references and guides the reader in finding details about the total least squares methods and their applications. In addition, the paper comments on similarities and differences between the total least squares and the singular spectrum analysis methods.}, keywords = {total least squares, errors-in-variables, singular spectrum analysis}, pdf = {http://eprints.soton.ac.uk/271040/1/tls-bib.pdf} }
@article{complex-ls, author = {I. Markovsky}, title = {On the complex least squares problem with constrained phase}, journal = {SIAM J. Matrix Anal. Appl.}, year = {2011}, volume = {32}, number = {3}, pages = {987--992}, abstract = {The problem of solving approximately in the least squares sense an overdetermined linear system of equations with complex valued coefficients is considered, where the elements of the solution vector are constrained to have the same phase. A direct solution to this problem is given in [Linear Algebra and Its Applications, Vol.\ 433, pp.~1719--1721]. An alternative direct solution that reduces the problem to a generalized eigenvalue problem is derived in this paper. The new solution is related to generalized low-rank matrix approximation and makes possible one to use existing robust and efficient algorithms.}, keywords = {Linear system of equations, Phase constraint, Low-rank approximation, Total least squares.}, software = {http://eprints.soton.ac.uk/272534/5/complex-ls-code.tar}, doi = {10.1137/110826497}, pdf = {http://eprints.soton.ac.uk/272534/1/complex-ls.pdf} }
@article{LMFR12, author = {F. Le and I. Markovsky and C. Freeman and E. Rogers}, title = {Recursive identification of {H}ammerstein systems with application to electrically stimulated muscle}, journal = {Control Eng. Practice}, volume = {20}, number = {4}, pages = {386--396}, year = {2012}, abstract = {Two methods for recursive identification of Hammerstein systems are presented. In the first method, recursive least squares algorithm is applied to an overparameterized representation of the Hammerstein model and a rank-1 approximation is used to recover the linear and nonlinear parameters from the estimated overparameterized representation. In the second method, the linear and nonlinear parameters are recursively estimated in an alternate manner. Numerical example with simulated data and experimental data from human muscles show the superiority of second method.}, keywords = {recursive identification; Hammerstein system; muscle model; functional electrical stimulation.}, doi = {10.1016/j.conengprac.2011.08.001}, pdf = {http://eprints.soton.ac.uk/271583/1/zoe2-published.pdf} }
@article{slra-ext, author = {I. Markovsky and K. Usevich}, title = {Structured low-rank approximation with missing data}, year = {2013}, journal = {SIAM J. Matrix Anal. Appl.}, volume = {34}, number = {2}, pages = {814--830}, abstract = {We consider low-rank approximation of affinely structured matrices with missing elements. The method proposed is based on reformulation of the problem as inner and outer optimization. The inner minimization is a singular linear least-norm problem and admits an analytic solution. The outer problem is a nonlinear least squares problem and is solved by local optimization methods: minimization subject to quadratic equality constraints and unconstrained minimization with regularized cost function. The method is generalized to weighted low-rank approximation with missing values and is illustrated on approximate low-rank matrix completion, system identification, and data-driven simulation problems. An extended version of the paper is a literate program, implementing the method and reproducing the presented results.}, keywords = {low-rank approximation, missing data, variable projection, system identification, approximate matrix completion.}, doi = {10.1137/120883050}, software = {http://slra.github.io/software-slra.html}, pdf = {https://imarkovs.github.io/publications/slra-ext.pdf} }
@article{ident, author = {I. Markovsky}, title = {A software package for system identification in the behavioral setting}, year = {2013}, journal = {Control Eng. Practice}, volume = {21}, optnumber = {10}, pages = {1422--1436}, abstract = {An identification problem with no a priori separation of the variables into inputs and outputs and representation invariant approximation criterion is considered. The model class consists of linear time-invariant systems of bounded complexity and the approximation criterion is the minimum of a weighted 2-norm distance between the given time series and a time series that is consistent with the model. The problem is equivalent to and is solved as a mosaic-Hankel structured low-rank approximation problem. Software implementing the approach is developed and tested on benchmark problems. Additional nonstandard features of the software are specification of exact and missing variables and identification from multiple experiments.}, keywords = {system identification; model reduction; behavioral approach; missing data; low-rank approximation; total least squares; reproducible research; DAISY.}, doi = {10.1016/j.conengprac.2013.06.010}, software = {http://slra.github.io/software-ident.html}, pdf = {https://imarkovs.github.io/publications/ident.pdf} }
@article{slra-efficient, author = {K. Usevich and I. Markovsky}, title = {Variable projection for affinely structured low-rank approximation in weighted 2-norms}, journal = {J. Comput. Appl. Math.}, year = {2014}, volume = {272}, pages = {430--448}, doi = {10.1016/j.cam.2013.04.034}, abstract = {The structured low-rank approximation problem for general affine structures, weighted 2-norms and fixed elements is considered. The variable projection principle is used to reduce the dimensionality of the optimization problem. Algorithms for evaluation of the cost function, the gradient and an approximation of the Hessian are developed. For $m\times n$ mosaic Hankel matrices the algorithms have complexity $O(m^2n)$.}, keywords = {Structured low-rank approximation; Variable projection; Mosaic Hankel matrices; Weighted 2-norm; Fixed elements; Computational complexity}, pdf = {http://arxiv.org/pdf/1211.3938v2}, software = {http://slra.github.io/software-slra.html} }
@article{slra-software, author = {I. Markovsky and K. Usevich}, title = {Software for weighted structured low-rank approximation}, volume = {256}, pages = {278--292}, year = {2014}, journal = {J. Comput. Appl. Math.}, abstract = {A software package is presented that computes locally optimal solutions to low-rank approximation problems with the following features: \begin{itemize} \item {\em mosaic Hankel structure\/} constraint on the approximating matrix, \item {\em weighted 2-norm\/} approximation criterion, \item {\em fixed elements\/} in the approximating matrix, \item {\em missing elements\/} in the data matrix, and \item {\em linear constraints\/} on an approximating matrix's left kernel basis. \end{itemize} It implements a variable projection type algorithm and allows the user to choose standard local optimization methods for the solution of the parameter optimization problem. For an $m\times n$ data matrix, with $n>m$, the computational complexity of the cost function and derivative evaluation is~$O(m^2n)$. The package is suitable for applications with $n\gg m$. In statistical estimation and data modeling---the main application areas of the package---$n\gg m$ corresponds to modeling of large amount of data by a low-complexity model. Performance results on benchmark system identification problems from the database DAISY and approximate common divisor problems are presented.}, doi = {10.1016/j.cam.2013.07.048}, pdf = {https://imarkovs.github.io/publications/slra.pdf}, software = {http://slra.github.io/software-slra.html} }
@article{overview, author = {I. Markovsky}, title = {Recent progress on variable projection methods for structured low-rank approximation}, volume = {96PB}, pages = {406--419}, year = {2014}, journal = {Signal Processing}, doi = {10.1016/j.sigpro.2013.09.021}, abstract = {Rank deficiency of a data matrix is equivalent to the existence of an exact linear model for the data. For the purpose of linear static modeling, the matrix is unstructured and the corresponding modeling problem is an approximation of the matrix by another matrix of a lower rank. In the context of linear time-invariant dynamic models, the appropriate data matrix is Hankel and the corresponding modeling problems becomes structured low-rank approximation. Low-rank approximation has applications in: system identification; signal processing, machine learning, and computer algebra, where different types of structure and constraints occur. This paper gives an overview of recent progress in efficient local optimization algorithms for solving weighted mosaic-Hankel structured low-rank approximation problems. In addition, the data matrix may have missing elements and elements may be specified as exact. The described algorithms are implemented in a publicly available software package. Their application to system identification, approximate common divisor, and data-driven simulation problems is described in this paper and is illustrated by reproducible simulation examples. As a data modeling paradigm the low-rank approximation setting is closely related to the the behavioral approach in systems and control, total least squares, errors-in-variables modeling, principal component analysis, and rank minimization.}, software = {http://slra.github.io/software-slra.html}, pdf = {https://imarkovs.github.io/publications/overview.pdf} }
@article{UM12-aut, author = {K. Usevich and I. Markovsky}, title = {Optimization on a {G}rassmann manifold with application to system identification}, volume = {50}, pages = {1656--1662}, year = {2014}, journal = {Automatica}, doi = {10.1016/j.automatica.2014.04.010}, abstract = {In this paper, we consider the problem of optimization of a cost function on a Grassmann manifold. This problem appears in system identification in the behavioral setting, which is a structured low-rank approximation problem. We develop a new method for local optimization on the Grassmann manifold with switching coordinate charts. This method reduces the optimization problem on the manifold to an optimization problem in a bounded domain of an Euclidean space. Our experiments show that this method is competitive with state- of-the-art retraction-based methods. Compared to retraction-based methods, the proposed method allows to incorporate easily an arbitrary optimization method for solving the optimization subproblem in the Euclidean space.}, keywords = {system identification, over-parameterized models, Grassmann manifold, coordinate charts, structured low-rank approximation, optimization}, software = {http://slra.github.io/software-slra.html}, pdf = {http://www.gipsa-lab.grenoble-inp.fr/~konstantin.usevich/_preprints/Usevich.Markovsky14A-Optimization.pdf} }
@article{pltv, author = {I. Markovsky and J. Goos and K. Usevich and R. Pintelon}, title = {Realization and identification of autonomous linear periodically time-varying systems}, volume = {50}, pages = {1632--1640}, doi = {10.1016/j.automatica.2014.04.003}, year = {2014}, journal = {Automatica}, abstract = {Subsampling of a linear periodically time-varying system results in a collection of linear time-invariant systems with common poles. This key fact, known as ``lifting'', is used in a two step realization method. The first step is the realization of the time-invariant dynamics (the lifted system). Computationally, this step is a rank-revealing factorization of a block-Hankel matrix. The second step derives a state space representation of the periodic time-varying system. It is shown that no extra computations are required in the second step. The computational complexity of the overall method is therefore equal to the complexity for the realization of the lifted system. A modification of the realization method is proposed, which makes the complexity independent of the parameter variation period. Replacing the rank-revealing factorization in the realization algorithm by structured low-rank approximation yields a maximum likelihood identification method. Existing methods for structured low-rank approximation are used to identify efficiently linear periodically time-varying system. These methods can deal with missing data. }, keywords = {linear periodically time-varying systems, lifting, realization, Kung's algorithm, Hankel low-rank approximation, maximum likelihood estimation.}, software = {https://imarkovs.github.io/software/pltv-code.tgz}, pdf = {https://imarkovs.github.io/publications/pltv-rev3.pdf} }
@article{rslra, author = {M. Ishteva and K. Usevich and I. Markovsky}, title = {Factorization approach to structured low-rank approximation with applications}, year = {2014}, journal = {SIAM J. Matrix Anal. Appl.}, volume = {35}, number = {3}, pages = {1180--1204}, doi = {10.1137/130931655}, keywords = {low-rank approximation, affine structure, regularization, system identification, approximate greatest common divisor}, abstract = {We consider the problem of approximating an affinely structured matrix, for example a Hankel matrix, by a low-rank matrix with the same structure. This problem occurs in system identification, signal processing and computer algebra, among others. We impose the low-rank by modeling the approximation as a product of two factors with reduced dimension. The structure of the low-rank model is enforced by introducing a regularization term in the objective function. The proposed local optimization algorithm is able to solve the weighted structured low-rank approximation problem, as well as to deal with the cases of missing or fixed elements. In contrast to approaches based on kernel representations (in linear algebraic sense), the proposed algorithm is designed to address the case of small targeted rank. We compare it to existing approaches on numerical examples of system identification, approximate greatest common divisor problem, and symmetric tensor decomposition and demonstrate its consistently good performance.}, software = {http://slra.github.io/software-slra.html}, pdf = {https://imarkovs.github.io/publications/rslra.pdf} }
@article{rgtls, author = {S. Rhode and K. Usevich and I. Markovsky and F. Gauterin}, title = {A Recursive Restricted Total Least-squares Algorithm}, journal = {IEEE Trans. Signal Process.}, volume = {62}, number = {21}, pages = {5652--5662}, doi = {10.1109/TSP.2014.2350959}, keywords = {total least squares, generalized total least squares, restricted total least squares (RTLS), recursive estimation, subspace tracking, system identification}, abstract = {We show that the generalized total least squares problem with a singular noise covariance matrix is equivalent to the restricted total least squares problem and propose a recursive method for its numerical solution. The method is based on the generalized inverse iteration. The estimation error covariance matrix and the estimated augmented correction are also characterized and computed recursively. The algorithm is cheap to compute and is suitable for online implementation. Simulation results in least squares, data least squares, total least squares, and restricted total least squares noise scenarios show fast convergence of the parameter estimates to their optimal values obtained by corresponding batch algorithms.}, year = {2014}, pdf = {https://imarkovs.github.io/publications/rgtls.pdf}, software = {http://ieeexplore.ieee.org/xpl/abstractMultimedia.jsp?arnumber=6882213} }
@article{sensor-ieee, author = {I. Markovsky}, title = {Comparison of adaptive and model-free methods for dynamic measurement}, journal = {IEEE Signal Proc. Lett.}, year = {2015}, volume = {22}, number = {8}, pages = {1094--1097}, doi = {10.1109/LSP.2014.2388369}, abstract = {Dynamic measurement aims to improve the speed and accuracy characteristics of measurement devices by signal processing. State-of-the-art dynamic measurement methods are model-based adaptive methods, i.e., 1) they estimate model parameters in real-time and 2) based on the identified model perform model-based signal processing. The proposed model-free method belongs to the class of the subspace identification methods. It computes directly the quantity of interest without an explicit parameter estimation. This allows efficient computation as well as applicability to general high order multivariable processes.}, keywords = {subspace methods, total least squares, adaptive filtering, model-free signal processing.}, software = {https://imarkovs.github.io/software/sensor-ieee-code.tar}, pdf = {https://imarkovs.github.io/publications/sensor-ieee.pdf} }
@article{slra-consistency, author = {I. Markovsky and R. Pintelon}, title = {Identification of linear time-invariant systems from multiple experiments}, journal = {IEEE Trans. Signal Process.}, volume = {63}, number = {13}, pages = {3549--3554}, doi = {10.1109/TSP.2015.2428218}, year = {2015}, abstract = {A standard assumption for consistent estimation in the errors-in-variables setting is persistency of excitation of the noise free input signal. We relax this assumption by considering data from multiple experiments. Consistency is obtained asymptotically as the number of experiments tends to infinity. The main theoretical and algorithmic difficulties are related to the growing number of to-be-estimated initial conditions. The method proposed in the paper is based on analytic elimination of the initial conditions and optimization over the remaining parameters. The resulting estimator is consistent, however, achieving asymptotically efficiency remains an open problem.}, keywords = {maximum likelihood system identification, sum-of-damped exponentials modeling, consistency, structured low-rank approximation}, pdf = {https://imarkovs.github.io/papers/slra-consistency.pdf} }
@article{als-kdu, author = {K. Usevich and I. Markovsky}, title = {Adjusted least squares fitting of algebraic hypersurfaces}, journal = {Linear Algebra Appl.}, volume = {502}, pages = {243--274}, doi = {10.1016/j.laa.2015.07.023}, year = {2016}, abstract = {We consider the problem of fitting a set of points in Euclidean space by an algebraic hypersurface. We assume that points on a true hypersurface, described by a polynomial equation, are corrupted by zero mean independent Gaussian noise, and we estimate the coefficients of the true polynomial equation. The adjusted least squares estimator accounts for the bias present in the ordinary least squares estimator. The adjusted least squares estimator is based on constructing a quasi-Hankel matrix, which is a bias-corrected matrix of moments. For the case of unknown noise variance, the estimator is defined as a solution of a polynomial eigenvalue problem. In this paper, we present new results on invariance properties of the adjusted least squares estimator and an improved algorithm for computing the estimator for an arbitrary set of monomials in the polynomial equation.}, keywords = {hypersurface fitting; curve fitting; statistical estimation, errors-in-variables; Quasi-Hankel matrix; Hermite polynomials; affine invariance; subspace clustering}, pdf = {http://arxiv.org/abs/1412.2291} }
@article{sensor-cep, author = {I. Markovsky}, title = {An application of system identification in metrology}, journal = {Control Eng. Practice}, volume = {43}, pages = {85--93}, year = {2015}, doi = {10.1016/j.conengprac.2015.07.001}, abstract = {Dynamic measurement refers to problems in metrology aiming at modification of characteristics of measurement devices by signal processing. Prototypical dynamic measurement problems, used in the paper as illustrative examples, are speeding up thermometers and weight scales. The paper presents a system theoretic formalization of the dynamic measurement problem as an input estimation problem for a system with step input. If the process dynamics is a priori known, the dynamic measurement problem is equivalent to a state estimation problem and can be solved efficiently in the linear time-invariant case by the Kalman filter. In the case of unknown process dynamics, the dynamic measurement problem can be solved by adaptive filtering methods. A topic of current research, called data-driven dynamic measurement, is development of methods that compute the measurement quantity without explicitly identifying the process dynamics.}, software = {https://imarkovs.github.io/software/sensor-cep-experiments.html}, keywords = {system identification, behavioral approach, Kalman filtering, metrology, reproducible research}, pdf = {https://imarkovs.github.io/publications/sensor-cep.pdf} }
@article{slra-agcd, author = {K. Usevich and I. Markovsky}, title = {Variable projection methods for approximate (greatest) common divisor computations}, year = {2017}, pages = {176--198}, volume = {681}, doi = {10.1016/j.tcs.2017.03.028}, abstract = {We consider the problem of finding for a given $N$-tuple of polynomials the closest $N$-tuple that has a common divisor of degree at least $d$. Extended weighted Euclidean semi-norm of coefficients is used as a measure of closeness. Two equivalent formulations of the problem are considered: (i) direct optimization over common divisors and cofactors, and (ii) Sylvester lowrank approximation. We use the duality between least-squares and least-norm problems to show that (i) and (ii) are closely related to mosaic Hankel low-rank approximation. This allows us to apply recent results on complexity and accuracy of computations for mosaic Hankel low-rank approximation. We develop optimization methods based on the variable projection principle. These methods have linear complexity in the degrees of the polynomials if either $d$ is small or $d$ is of the same order as the degrees of the polynomials. We provide a software implementation that is based on a software package for structured low-rank approximation.}, journal = {Theoretical Computer Science}, pdf = {http://arxiv.org/pdf/1304.6962v1} }
@article{jan, author = {I. Markovsky}, title = {On the most powerful unfalsified model for data with missing values}, year = {2016}, volume = {95}, pages = {53--61}, journal = {Systems \& Control Lett.}, abstract = {The notion of the most powerful unfalsified model plays a key role in system identification. Since its introduction in the mid 80's, many methods have been developed for its numerical computation. All currently existing methods, however, assume that the given data is a /complete/ trajectory of the system. Motivated by the practical issues of data corruption due to failing sensors, transmission lines, or storage devices, we study the problem of computing the most powerful unfalsified model from data with missing values. We do not make assumptions about the nature or pattern of the missing values apart from the basic one that they are a part of a trajectory of a linear time-invariant system. The identification problem with missing data is equivalent to a Hankel structured low-rank matrix completion problem. The method proposed constructs rank deficient complete submatrices of the incomplete Hankel matrix. Under specified conditions the kernels of the submatrices form a nonminimal kernel representation of the data generating system. The final step of the algorithm is reduction of the nonminimal kernel representation to a minimal one. Apart from its practical relevance in identification, missing data is a useful concept in systems and control. Classic problems, such as simulation, filtering, and tracking control can be viewed as missing data estimation problems for a given system. The corresponding identification problems with missing data are ``data-driven'' equivalents of the classical simulation, filtering, and tracking control problems.}, keywords = {most powerful unfalsified model, exact system identification, subspace methods, missing data, low-rank matrix completion.}, doi = {10.1016/j.sysconle.2015.12.012}, software = {https://imarkovs.github.io/software/mpum-md.tar}, pdf = {https://imarkovs.github.io/publications/jan.pdf} }
@article{ddsp, author = {I. Markovsky}, title = {A missing data approach to data-driven filtering and control}, year = {2017}, journal = {IEEE Trans. Automat. Contr.}, pages = {1972--1978}, volume = {62}, issue = {4}, issn = {1558--2523}, abstract = {In signal processing as well as in control and other mathematical engineering areas, it is common to use a model-based approach, which splits the problem into two steps: 1) model identification and 2) model-based design. Despite its success the model-based approach has the shortcoming that the design objective is not taken into account at the identification step, i.e., the model is not optimized for its intended use. In this paper, we propose an approach for data processing, called data-driven signal processing, that combines the identification and the model-based design into one joint problem. The to-be-computed signal is modeled as a missing part of a trajectory of the (unknown) data generating system. Subsequently, the missing data estimation problem is reformulated as a mosaic-Hankel structured matrix low-rank approximation and completion problem. A local optimization methods, based on the variable projections principle, is used for the numerical solution of the matrix completion/approximation problem. The missing data estimation approach for data-driven signal processing and the local optimization method for its implementation in practice are illustrated on examples of state estimation, filtering/smoothing, and prediction. Development of fast algorithms with provable properties in the presence of measurement noise and disturbances will make the matrix completion approach for data-driven signal processing a practically feasible alternative to the model-based methods.}, keywords = {data-driven, Kalman filtering, structured low-rank approximation, missing data, matrix completion.}, software = {https://imarkovs.github.io/software/ddsp.tar}, pdf = {https://imarkovs.github.io/publications/ddsp.pdf}, doi = {10.1109/TAC.2016.2591178} }
@article{pk, author = {I. Markovsky and G. Mercère}, title = {Subspace identification with constraints on the impulse response}, year = {2017}, volume = {90}, issue = {8}, pages = {1728--1735}, journal = {Int. J. Contr.}, abstract = {Subspace identification methods may produce unreliable model estimates when a small number of noisy measurements are available. In such cases, the accuracy of the estimated parameters can be improved by using prior knowledge about the system. The prior knowledge considered in this paper is constraints on the impulse response. It is motivated by availability of information about the steady-state gain, overshoot, and rise time of the system, which in turn can be expressed as constraints on the impulse response. The method proposed has two steps: 1) estimation of the impulse response with linear equality and inequality constraints, and 2) realization of the estimated impulse response. The problem on step 1 is shown to be a convex quadratic programming problem. In the case of prior knowledge expressed as equality constraints, the problem on step 1 admits a closed form solution. In the general case of equality and inequality constraints, the solution is computed by standard numerical optimization methods. We illustrate the performance of the method on a mass-spring-damper system.}, keywords = {system identification, subspace methods, prior knowledge, behavioral approach.}, software = {https://codeocean.com/2017/10/12/system-identification-with-prior-knowledge-on-the-impulse-response/code}, pdf = {https://imarkovs.github.io/publications/ijc-rev.pdf}, doi = {10.1080/00207179.2016.1219922} }
@article{dist-uncontr, author = {N. Guglielmi and I. Markovsky}, title = {An {ODE} based method for computing the distance of co-prime polynomials to common divisibility}, volume = {55}, issue = {3}, pages = {1456--1482}, year = {2017}, doi = {10.1137/15M1018265}, journal = {SIAM Journal on Numerical Analysis}, abstract = {The problem of computing the distance of two real coprime polynomials to the set of polynomials with a nontrivial greatest common divisor (GCD) appears in computer algebra, signal processing, and control theory. It has been studied in the literature under the names approximate common divisor, $\eps$-GCD, and distance to uncontrollability. Existing solution methods use different types of local optimization methods and require a user defined initial approximation. In this paper, we propose a new method that allows us to include constraints on the coefficients of the polynomials. Moreover, the method proposed in the paper is more robust to the initial approximation than Newton-type optimization methods available in the literature. Our approach consists of two steps: 1) reformulate the problem as the problem of determining the structured distance to singularity of an associated Sylvester matrix, and 2) integrate a system of ordinary differential equations, which describes the gradient associated to the functional to be minimized.}, keywords = {GCD, Sylvester matrix, structured pseudospectrum, structured low-rank approximation, ODEs on matrix manifolds, structured distance to singularity.}, pdf = {https://imarkovs.github.io/publications/dist-unctr.pdf}, software = {https://imarkovs.github.io/software/dist_unctr.tar} }
@article{agcd-ode, author = {A. Fazzi and N. Guglielmi and I. Markovsky}, title = {An {ODE} based method for computing the Approximate Greatest Common Divisor of polynomials}, volume = {81}, issue = {2}, pages = {719--740}, year = {2018}, journal = {Numerical algorithms}, abstract = {Computing the greatest common divisor of a set of polynomials is a problem which plays an important role in different fields, such as linear system, control and network theory. In practice, the polynomials are obtained through measurements and computations, so that their coefficients are inexact. This poses the problem of computing an approximate common factor. We propose an improvement and a generalization of the method recently proposed in Guglielmi, N., Markovsky, I.: An ODE based method for computing the distance of coprime polynomials. SIAM J. Numer. Anal. 55, 1456–1482 (2017), which restates the problem as a (structured) distance to singularity of the Sylvester matrix. We generalize the algorithm in order to work with more than 2 polynomials and to compute an Approximate GCD (Greatest Common Divisor) of degree k > 0; moreover we show that the algorithm becomes faster by replacing the eigenvalues by the singular values.}, keywords = {Sylvester matrix, iterative methods, Approximate GCD, polynomials}, pdf = {https://imarkovs.github.io/publications/agcd-ode.pdf}, doi = {10.1007/s11075-018-0569-0} }
@article{miaomiao, author = {M. Zhang and I. Markovsky and C. Schretter and J. D'hooge}, title = {Compressed Ultrasound Signal Reconstruction using a Low-rank and Joint-sparse Representation Model}, volume = {66}, issue = {7}, pages = {1232--1245}, year = {2019}, doi = {10.1109/TUFFC.2019.2915096}, journal = {Transactions on Ultrasonics, Ferroelectrics, and Frequency Control}, abstract = {With the introduction of very dense sensor arrays in ultrasound imaging, data transfer rate and data storage can become a bottle neck in ultrasound system design. To reduce the amount of sampled channel data, we proposed a new approach based on the low-rank and joint-sparse model that allows to explore the correlations between different ultrasound channels and transmissions. With this method, the minimum number of measurements at each channel can be lower than the sparsity in compressive sensing theory. The accuracy of reconstruction is less dependent on the sparse basis. An optimization algorithm, based on simultaneous direction method of multipliers, is proposed to efficiently solve the resulting optimization problem. Results on different datasets with different experimental settings show that the proposed method is better adapted to the ultrasound signals and can recover the image with fewer samples (e.g. 10\% of the samples), while maintaining adequate image quality.}, keywords = {compressive sensing, matrix completion, low-rank and joint-sparse model, ultrasound imaging}, pdf = {https://imarkovs.github.io/publications/miaomiao.pdf} }
@article{aut-wiener, author = {I. Markovsky}, title = {On the behavior of autonomous {W}iener systems}, year = {2019}, volume = {110}, optissue = {}, pages = {108601}, journal = {Automatica}, abstract = {Wiener systems are nonlinear dynamical systems, consisting of a linear dynamical system and a static nonlinear system in a series connection. Existing results for analysis and identification of Wiener systems assume zero initial conditions. In this paper, we consider the response of a Wiener system to initial conditions only, i.e., we consider autonomous Wiener systems. Our main result is a proof that an autonomous Wiener system with a polynomial nonlinearity is equivalent to a finite-dimensional linear system. The order of the equivalent linear system is $(n + d)$-choose-$d$ --- the number of combinations with repetitions of $d$ elements out of $n$ elements --- where $n$ is the order of the linear subsystem and $d$ is the degree of the nonlinearity. The relation between the eigenvalues of the equivalent linear system and the linear subsystem is given by a rank-1 factorization of a symmetric \(d\)-way tensor. As an application of the result, we outline a procedure for identification of autonomous Wiener systems.}, keywords = {Nonlinear system identification, Block-oriented models, Wiener system}, sourcefile = {~/mfiles/aut-wiener/two-columns-reformatted.tex}, pdf = {https://imarkovs.github.io/publications/aut-wiener.pdf}, doi = {https://doi.org/10.1016/j.automatica.2019.108601} }
@article{gustavo-tim, author = {G. Q. Carapia and I. Markovsky and R. Pintelon and P. Csurcsia and D. Verbeke}, title = {Experimental validation of a data-driven step input estimation method for dynamic measurements}, year = {2020}, volume = {69}, issue = {7}, pages = {4843--4851}, journal = {IEEE Transactions on Instrumentation and Measurement}, abstract = {Simultaneous fast and accurate measurement is still a challenging and active problem in metrology. A sensor is a dynamic system that produces a transient response. For fast measurements, the unknown input needs to be estimated using the sensor transient response. When a model of the sensor exists, standard compensation filter methods can be used to estimate the input. If a model is not available, either an adaptive filter is used or a sensor model is identified before the input estimation. Recently, a signal processing method was proposed to avoid the identification stage and estimate directly the value of a step input from the sensor response. This data-driven step input estimation method requires only the order of the sensor dynamics and the sensor static gain. To validate the data-driven step input estimation method, in this paper, the uncertainty of the input estimate is studied and illustrated on simulation and real-life weighing measurements. It was found that the predicted mean- squared error of the input estimate is close to an approximate Cramér-Rao lower bound for biased estimators.}, keywords = {Metrology, Dynamic measurement, Cramér-Rao lower bound, Data-driven signal processing}, doi = {10.1109/TIM.2019.2951865}, pdf = {https://imarkovs.github.io/publications/gustavo-tim.pdf} }
@article{gustavo-csda, author = {G. Q. Carapia and I. Markovsky and R. Pintelon and P. Csurcsia and D. Verbeke}, title = {Bias and covariance of the least squares estimate in a structured errors-in-variables problem}, year = {2020}, volume = {144}, optissue = {}, pages = {106893}, journal = {Comput. Statist. Data Anal.}, abstract = {A structured errors-in-variables (EIV) problem arising in metrology is studied. The observations of a sensor response are subject to perturbation. The input estimation from the transient response leads to a structured EIV problem. Total least squares (TLS) is a typical estimation method to solve EIV problems. The TLS estimator of an EIV problem is consistent, and can be computed efficiently when the perturbations have zero mean, and are independently and identically distributed (i.i.d). If the perturbation is additionally Gaussian, the TLS solution coincides with maximum-likelihood (ML). However, the computational complexity of structured TLS and total ML prevents their real-time implementation. The least-squares (LS) estimator offers a suboptimal but simple recursive solution to structured EIV problems with correlation, but the statistical properties of the LS estimator are unknown. To know the LS estimate uncertainty in EIV problems, either structured or not, to provide confidence bounds for the estimation uncertainty, and to find the difference from the optimal solutions, the bias and variance of the LS estimates should be quantified. Expressions to predict the bias and variance of LS estimators applied to unstructured and structured EIV problems are derived. The predicted bias and variance quantify the statistical properties of the LS estimate and give an approximation of the uncertainty and the mean squared error for comparison to the Cramer-Rao lower bound of the structured EIV problem.}, keywords = {structured errors-in-variables problems, least-squares estimation, Cramer-Rao lower bound, statistical analysis, uncertainty assessment, Monte Carlo simulation}, doi = {10.1016/j.csda.2019.106893}, pdf = {https://imarkovs.github.io/publications/gustavo-csda.pdf} }
@article{gustavo-measurement, author = {G. Q. Carapia and I. Markovsky}, title = {Input parameters estimation from time-varying measurements}, year = {2020}, volume = {153}, optissue = {}, pages = {107418}, journal = {Measurement}, abstract = {A measurement is a dynamical process that aims to estimate the true value of a measurand. The measurand is the input that excites a sensor, and, as a consequence, the sensor output is a transient response. The main approach to estimate the input is applying the sensor transient response to another dynamical system. This dynamical system is designed by deconvolution to invert the sensor dynamics and compensate the sensor response. Digital signal processors enable an alternative approach to estimate the unknown input. There exists a data-driven subspace-based signal processing method that estimates a measurand, assuming it is constant during the measurement. To estimate the parameters of a measurand that varies at a constant rate, we extended the data-driven input estimation method to make it adaptive to the affine input. In this paper, we describe the proposed subspace signal processing method for the measurement of an affine measurand and compare its performance to a maximum-likelihood input estimation method and to an existing time-varying compensation filter. The subspace method is recursive and allows real-time implementations since it directly estimates the input without identifying a sensor model. The maximum-likelihood method is model-based and requires very high computational effort. In this form, the maximum-likelihood method cannot be implemented in real-time, however, we used it as a reference to evaluate the subspace method and the time-varying compensation filter results. The effectiveness of the subspace method is validated in a simulation study with a time-varying sensor. The results show that the subspace method estimation has relative errors that are one order of magnitude smaller and converges two times faster than the compensation filter.}, keywords = {Affine input estimation, Subspace estimation method, Maximum-likelihood estimation method, Recursive least squares, Dynamic weighing}, doi = {https://doi.org/10.1016/j.measurement.2019.107418}, pdf = {https://imarkovs.github.io/publications/gustavo-measurement.pdf} }
@article{cd-est, author = {I. Markovsky and T. Liu and A. Takeda}, title = {Data-driven structured noise filtering via common dynamics estimation}, year = {2020}, volume = {68}, issue = {1}, pages = {3064--3073}, journal = {IEEE Trans. Signal Process.}, abstract = {Classical signal from noise separation problems assume that the signal is a trajectory of a low-complexity linear time-invariant system and that the noise is a random process. In this paper, we generalize this classical setup to what we call data-driven structured noise filtering. In the new setup, the noise has two components: structured noise, which is also a trajectory of a low-complexity linear time-invariant system, and unstructured noise, which is a zero-mean white Gaussian process. The key assumption that makes the separation problem in the new setup well posed is that among several experiments, the signal's dynamics remains the same while the structured noise's dynamics varies. The data-driven structured noise filtering problem then becomes a problem of estimation of common linear time-invariant dynamics among several observed signals. We show that this latter problem is a structured low-rank approximation problem with multiple rank constraints and use a subspace identification approach for solving it. The resulting methods allow computationally efficient and numerically robust implementation and have the system theoretic interpretation of finding the intersection of autonomous linear time-invariant behaviors. Statistical analysis of the methods providing confidence bounds is a topic for future research.}, keywords = {Hankel structured low-rank approximation, Subspace system identification, Behavioral approach}, doi = {10.1109/TSP.2020.2993676}, pdf = {https://imarkovs.github.io/publications/cd-est.pdf}, software = {https://imarkovs.github.io/software/common-dynamics.tar} }
@article{ddctr-test, author = {V. Mishra and I. Markovsky and B. Grossmann}, title = {Data-Driven Tests for Controllability}, year = {2020}, volume = {5}, issue = {2}, pages = {517--522}, journal = {Control Systems Letters}, abstract = {The fundamental lemma due to Willems et al. plays an important role in system identification and data-driven control. One of the assumptions for the fundamental lemma is that the underlying linear time-invariant system is controllable. In this paper, the fundamental lemma is extended to address system identification for uncontrollable systems. Then, a data-driven algebraic test is derived to check whether the underlying system is controllable or not. An algorithm based on the singular value decomposition of a Hankel matrix constructed from the data is provided to implement the developed test. The algorithm has cubic computational cost. Examples are given to illustrate the theoretical results.}, doi = {10.1109/LCSYS.2020.3003770}, pdf = {https://imarkovs.github.io/publications/ddctr-test.pdf} }
@article{cd-est-simax, author = {T. Liu and I. Markovsky and T.-K. Pong and A. Takeda}, title = {A hybrid penalty method for a class of optimization problems with multiple rank constraints}, year = {2020}, volume = {41}, issue = {3}, pages = {1260--1283}, journal = {SIAM J. Matrix Anal. Appl.}, abstract = {In this paper, we consider the problem of minimizing a smooth objective over multiple rank constraints on Hankel-structured matrices. This kind of problems arises in system identification, system theory and signal processing, where the rank constraints are typically “hard constraints”. To solve these problems, we propose a hybrid penalty method that combines a penalty method with a post-processing scheme. Specifically, we solve the penalty subproblems until the penalty parameter reaches a given threshold, and then switch to a local alternating “pseudo-projection” method to further reduce constraint violation. Pseudo-projection is a generalization of the concept of projection. We show that a pseudo-projection onto a single low-rank Hankel-structured matrix constraint can be computed efficiently by existing softwares such as SLRA (Markovsky and Usevich, 2014), under mild assumptions. We also demonstrate how the penalty subproblems in the hybrid penalty method can be solved by pseudo-projection-based optimization methods, and then present some convergence results for our hybrid penalty method. Finally, the efficiency of our method is illustrated by numerical examples.}, keywords = {Hankel-structure, system identification, hybrid penalty method, pseudo-projection.}, pdf = {https://arxiv.org/pdf/1906.10396}, doi = {10.1137/19M1269919} }
@article{hankel-ode, author = {A. Fazzi and N. Guglielmi and I. Markovsky}, title = {A gradient system approach for {H}ankel structured low-rank approximation}, year = {2021}, volume = {623}, pages = {236--257}, journal = {Linear Algebra Appl.}, abstract = {Rank deficient Hankel matrices are at the core of several applications. However, in practice, the coefficients of these matrices are noisy due to e.g. measurements errors and computational errors, so generically the involved matrices are full rank. This motivates the problem of Hankel structured low-rank approximation. Structured low-rank approximation problems, in general, do not have a global and efficient solution technique. In this paper we propose a local optimization approach based on a two-levels iteration. Experimental results show that the proposed algorithm usually achieves good accuracy and shows a higher robustness with respect to the initial approximation, compared to alternative approaches.}, keywords = {Hankel matrix, low-rank approximation, gradient system, structured matrix perturbation}, doi = {10.1016/j.laa.2020.11.016}, pdf = {https://arxiv.org/abs/2002.06621v3} }
@article{set-exact-models, author = {V. Mishra and I. Markovsky}, title = {The Set of Linear Time-Invariant Unfalsified Models with Bounded Complexity is Affine}, year = {2021}, volume = {66}, issue = {9}, pages = {4432--4435}, journal = {IEEE Trans. Automat. Contr.}, abstract = {We consider exact system identification in the behavioral setting: given an exact (noise-free) finite time series, find the set of bounded complexity linear time-invariant systems that fit the data exactly. First, we modify the notion of the most powerful unfalsified model for the case of finite data by fixing the number of inputs and minimizing the order. Then, we give necessary and sufficient conditions for the existence and uniqueness of the most powerful unfalsified model. In this case, the true data generating system is identifiable and coincides with the most powerful unfalsified model. Finally, when the identifiability conditions are not satisfied, there are infinitely many exact models. We show that in this case the set of bounded complexity exact models is affine and every exact model is a sum of the most powerful unfalsified model and an autonomous model with bounded complexity.}, keywords = {Behaviors, exact system identification, Hankel matrix, most powerful unfalsified model, persistency of excitation.}, doi = {10.1109/TAC.2020.3046235}, pdf = {https://imarkovs.github.io/publications/set-exact-models.pdf} }
@article{agcd-matrix, author = {A. Fazzi and N. Guglielmi and I. Markovsky}, title = {Generalized algorithms for the approximate matrix polynomial {GCD} of reducing data uncertainties with application to {MIMO} system and control}, journal = {J. Comput. Appl. Math.}, volume = {393}, pages = {113499}, year = {2021}, abstract = {Computation of (approximate) polynomials common factors is an important problem in several fields of science, like control theory and signal processing. While the problem has been widely studied for scalar polynomials, the scientific literature in the framework of matrix polynomials seems to be limited to the problem of exact greatest common divisors computation. In this paper, we generalize two algorithms from scalar to matrix polynomials. The first one is fast and simple. The second one is more accurate but computationally more expensive. We test the performances of the two algorithms and observe similar behavior to the one in the scalar case. Finally we describe an application to multi-input multi-output linear time-invariant dynamical systems.}, doi = {10.1016/j.cam.2021.113499}, keywords = {matrix polynomials, approximate GCD, subspace method, matrix ODE}, pdf = {https://arxiv.org/abs/1907.13101} }
@article{ddint, author = {I. Markovsky and F. Dörfler}, title = {Data-driven dynamic interpolation and approximation}, year = {2022}, journal = {Automatica}, volume = {135}, pages = {110008}, pdf = {https://imarkovs.github.io/publications/ddint.pdf}, abstract = {The behavioral system theory and in particular a result that became known as the "fundamental lemma" give the theoretical foundation for nonparameteric representations of linear time-invariant systems based on Hankel matrices constructed from data. These "data-driven" representations led in turn to new system identification, signal processing, and control methods. This paper shows how the approach can be used further on for solving interpolation, extrapolation, and smoothing problems. The solution proposed and the resulting method are general---can deal simultaneously with missing, exact, and noisy data of multivariable systems---and simple---require only the solution of a linear system of equations. In the case of exact data, we provide conditions for existence and uniqueness of solution. In the case of noisy data, we propose an approximation procedure based on \(\ell_1\)-norm regularization and validate its performance on real-life datasets. The method proposed outperforms model-based approaches. Procedures used in subspace identification, such as computation of a set of free responses and zero-initial conditions responses, are recovered as special cases of the data-driven interpolation method in the paper. The results have application in missing data estimation and trajectory planning. They open a practical computational way of doing system theory and signal processing directly from data without identification of a transfer function or state-space system representation.}, keywords = {behavioral approach, system identification, data-driven methods, dynamic interpolation, missing data estimation, smoothing}, doi = {10.1016/j.automatica.2021.110008} }
@article{bias-correction-vandermonde-matrix, author = {A. Fazzi and A. Kukush and I. Markovsky}, title = {Bias correction for {V}andermonde low-rank approximation}, journal = {Econometrics and Statistics}, volume = {31}, optissue = {}, pages = {38--48}, year = {2024}, abstract = {The low-rank approximation problem, that is the problem of approximating a given matrix with a matrix of lower rank, appears in many scientific fields. In some applications the given matrix is structured and the approximation is required to have the same structure. Examples of linear structures are Hankel, Toeplitz, and Sylvester. Currently, there are only a few results for nonlinearly structured low-rank approximation problems. The problem of Vandermonde structured low-rank approximation is considered. The high condition number of the Vandermonde matrix, in combination with the noise in the data, makes the problem challenging. A numerical method based on a bias correction procedure is proposed and its properties are shown. The performance of the method is illustrated on numerical results.}, doi = {10.1016/j.ecosta.2021.09.001}, keywords = {Vandermonde matrix, bias removal, adjusted least squares, structured matrix perturbation}, optpdf = {} }
@article{overview-ddctr, author = {I. Markovsky and F. Dörfler}, title = {Behavioral systems theory in data-driven analysis, signal processing, and control}, journal = {Annual Reviews in Control}, volume = {52}, pages = {42--64}, year = {2021}, pdf = {https://imarkovs.github.io/publications/overview-ddctr.pdf}, opteprint = {https://imarkovs.github.io/publications/overview-ddctr.pdf}, abstract = {The behavioral approach to systems theory, put forward 40 years ago by Jan C. Willems, remained till recently an esoteric niche of research. The renewed interest in the last years is because of its unique suitability for the newly emerged data-driven paradigm, specifically the representation-free perspective on dynamical systems as sets of trajectories. A result derived in the behavioral setting that became known as the fundamental lemma started a new class of subspace-type data-driven methods. The fundamental lemma gives conditions for a non-parametric representation of a linear time-invariant system by the image of a Hankel matrix constructed from raw time series data. This paper reviews the fundamental lemma, its generalizations, and related data-driven analysis, signal processing, and control methods. A prototypical signal processing problem, reviewed in the paper, is missing data estimation. It includes simulation, state estimation, and output tracking control as special cases. The direct data-driven control methods using the fundamental lemma and the non-parametric representation are loosely classified as implicit and explicit approaches. Representative examples are data-enabled predictive control (an implicit method) and data-driven linear quadratic regulation (an explicit method). These methods are equally amenable to certainty-equivalence as well as to robust control. Emphasis is put on the robustness of the methods under noise. The methods allow for theoretical certification, they are computationally tractable, in comparison with machine learning methods require small amount of data, and are robustly implementable in real-time on complex physical systems.}, keywords = {Behavioral systems theory, data-driven control, missing data estimation, system identification}, doi = {10.1016/j.arcontrol.2021.09.005} }
@article{known-degrees, author = {A. Fazzi and B. Grossmann and G. Merc\`ere and I. Markovsky}, title = {MIMO System Identification Using Common Denominator and Numerators with Known Degrees}, journal = {International Journal of Adaptive Control and Signal Processing}, volume = {36}, number = {4}, pages = {870--881}, year = {2022}, abstract = {In system identification, prior knowledge about the model structure may be available. However, imposing this structure on the identified model may be nontrivial. A new discrete-time linear time-invariant identification method is presented in the paper that imposes prior knowledge of the degree of the common denominator of the system's transfer function matrix and the degrees of the numerators. First, a method is outlined for the solution in case of exact data. Then, this method is extended for noisy data in the output error setting. An initial estimate obtained by a subspace method is improved by a structured low-rank approximation method. The performance of the method imposing the structure is compared on simulated data with the performance of classical identification methods that do not impose the structure.}, doi = {10.1002/acs.3380}, keywords = {System Identification, Structural Prior, Discrete-time systems, Subspace Methods, Structured Low-Rank Approximation}, url = {https://github.com/fazziant/Computational-methods/blob/master/MIMO%20system%20identification.pdf} }
@article{bridging, author = {F. Dörfler and J. Coulson and I. Markovsky}, title = {Bridging direct \& indirect data-driven control formulations via regularizations and relaxations}, journal = {IEEE Trans. Automat. Contr.}, year = {2023}, volume = {68}, issue = {2}, pages = {883--897}, doi = {10.1109/TAC.2022.3148374}, pdf = {https://arxiv.org/abs/2101.01273}, abstract = {We discuss connections between sequential system identification and control for linear time-invariant systems, which we term indirect data-driven control, as well as a direct data-driven control approach seeking an optimal decision compatible with recorded data assembled in a Hankel matrix and robustified through suitable regularizations. We formulate these two problems in the language of behavioral systems theory and parametric mathematical programs, and we bridge them through a multi-criteria formulation trading off system identification and control objectives. We illustrate our results with two methods from subspace identification and control: namely, subspace predictive control and low-rank approximation which constrain trajectories to be consistent with a non-parametric predictor derived from (respectively, the column span of) a data Hankel matrix. In both cases we conclude that direct and regularized data-driven control can be derived as convex relaxation of the indirect approach, and the regularizations account for an implicit identification step. Our analysis further reveals a novel regularizer and sheds light on the remarkable performance of direct methods on nonlinear systems.}, keywords = {multi-objective optimization, system identification, data-driven control, DeePC, behavioral system theory} }
@article{ddsim-narx, author = {I. Markovsky}, title = {Data-driven simulation of generalized bilinear systems via linear time-invariant embedding}, journal = {IEEE Trans. Automat. Contr.}, volume = {68}, issue = {2}, pages = {1101--1106}, year = {2023}, doi = {10.1109/TAC.2022.3146726}, pdf = {https://imarkovs.github.io/publications/ddsim-narx.pdf}, software = {https://imarkovs.github.io/software/ddsim.tar}, abstract = {Nonparameteric representations of linear time-invariant systems that use Hankel matrices constructed from data are the basis for data-driven signal processing and control methods of linear time-invariant systems. This paper extends the approach to data-driven simulation of a class of nonlinear systems. The key step of the generalization is an embedding result that is also of independent interest. The behavior of a polynomial time-invariant system is included into the behavior of a linear time-invariant system. A subclass of polynomial time-invariant systems that leads to a computationally tractable data-driven simulation problem is generalized bilinear. It includes Hammerstein, finite-lag Volterra, and biliner systems. The method proposed is illustrated and compared with the classical model-based method on simulation examples in the errors-invariables setup as well as benchmark real-life data sets.}, keywords = {behavioral approach, system identification, data-driven methods, nonlinear systems.} }
@article{fl, author = {I. Markovsky and E. Prieto-Araujo and F. Dörfler}, title = {On the persistency of excitation}, journal = {Automatica}, year = {2023}, doi = {10.1016/j.automatica.2022.110657}, vol = {147}, pages = {110657}, pdf = {https://imarkovs.github.io/publications/fl-rev.pdf}, abstract = {The result of J.C. Willems et al. “A note on persistency of excitation”, System \& Control Letters, 2005 gives identifiability conditions for system identification as well as data-driven representations for data-driven control. The existing proofs however are proofs by contradiction and do not give insight into the assumptions of controllability and persistency of excitation of the input. Moreover, the existing proofs do not clarify how conservative the assumptions are. We provide an alternative constructive proof that reduces the required order of persistency of excitation to the time horizon of the data-driven representation plus the controllability index of the system. The nongeneric case in which persistency of excitation of order more than the time horizon is needed corresponds to special initial condition. It is explicitly characterized in terms of the solution of a Sylvester equation. Another contribution of the paper is a representation of a persistently exciting input of a specified order as an output of an autonomous linear time-invariant system. The result can be used for input design with constraints on the input.}, keywords = {behavioral approach; exact identification; identifiability; persistency of excitation; input design} }
@article{identifiability, author = {I. Markovsky and F. Dörfler}, title = {Identifiability in the behavioral setting}, journal = {IEEE Trans. Automat. Contr.}, volume = {68}, issue = {3}, pages = {1667--1677}, year = {2023}, pdf = {https://imarkovs.github.io/publications/identifiability.pdf}, abstract = {The behavioral approach to system identification starts from a given time series without a priori separation of the variables into inputs and outputs. The available identifiability conditions however require persistency of excitation of an input component of the time series which implicitly assumes that an input/output partitioning of the variables is given. In addition, a standard identifiability assumption is that the true data generating system is controllable. The conditions of controllability and persistency of excitation are sufficient but not necessary. Motivated by the need to infer linear-time invariant models from rank deficient Hankel matrices and to use such matrices as data-driven predictors in signal processing and control, we derive necessary and sufficient identifiability conditions that do not require a priori given input/output partitioning of the variables nor controllability of the true system. The prior knowledge needed for identifiability is the number of inputs, lag, and order of the true system. The results are based on a modification of the notion of a most powerful unfalsified model for finite data and a novel algorithm for its computation. The results in the paper are derived assuming exact data, however, low-rank approximation allows their application in the case of noisy data. We compare empirically low-rank approximation of the Hankel, Page, and trajectory matrices in the errors-in-variables setting. Although the Page and trajectory matrices are unstructured, the parameter estimates obtained from them are less accurate than the one obtained from the Hankel matrix. }, keywords = {Behavioral system theory, System identification, Most powerful unfalsified model, Hankel matrix.}, software = {https://imarkovs.github.io/software/identifiability-code.pdf}, optnote = {Available at \url{https://imarkovs.github.io/publications/identifiability.pdf}}, doi = {10.1109/TAC.2022.3209954} }
@article{tutorial, author = {I. Markovsky and L. Huang and F. Dörfler}, title = {Data-driven control based on behavioral approach: From theory to applications in power systems}, journal = {IEEE Control Systems Magazine}, year = {2023}, volume = {43}, issue = {5}, pages = {28--68}, pdf = {https://imarkovs.github.io/tutorial.pdf}, abstract = {Behavioral systems theory decouples the behavior of a system from its representation. A key result is that, under a persistency of excitation condition, the image of a Hankel matrix constructed from the data equals the set of finite-length trajectories of a linear time-variant system. The result is the cornerstone of a recently emerged approach to direct data-driven control. This self-contained tutorial reviews its foundations and shows how they can be leveraged for data-driven control. We present a generic data-driven interpolation / approximation formulation encompassing many well known problem instances, among others finite-horizon data-driven control. We embed this problem formulation into a predictive control setting, robustify it to inexact data by means of regularizations, and apply the resulting methods in the context of power electronics dominated power systems.}, keywords = {Data-driven control, Behavioral system theory, System identification, Power electronics.}, software = {https://imarkovs.github.io/tutorial}, optnote = {Available at \url{https://imarkovs.github.io/tutorial.pdf}}, doi = {10.1109/MCS.2023.3291638} }
@article{beh-dist-ejc, author = {A. Fazzi and I. Markovsky}, title = {Distance problems in the behavioral setting}, journal = {European Journal of Control}, volume = {74}, pages = {100832}, year = {2023}, abstract = {Motivated by the distance to uncontrollability problem, we define a distance between finite-length linear timeinvariant systems. The method proposed in this paper for computing the distance exploits the principal angles associated with structured matrices representing the systems.}, doi = {10.1016/j.ejcon.2023.100832} }
@article{frest, author = {I. Markovsky and H. Ossareh}, title = {Finite-data nonparametric frequency response evaluation without leakage}, journal = {Automatica}, volume = {159}, pages = {111351}, year = {2024}, doi = {10.1016/j.automatica.2023.111351}, abstract = {The existing nonparametric frequency response estimation methods suffer from leakage and have limited frequency resolution. Due to the leakage and interpolation errors these methods do not yield the correct result in case of exact data of a linear time-invariant system. Our main contribution is a nonparameteric direct data-driven frequency response estimation method that in case of exact data satisfying standard persistency of excitation condition eliminates the leakage and has infinite frequency resolution. The method is derived in the behavioral setting. It requires solving a system of linear equations and has no hyper-parameters. In case of noisy data, a modification of the method with low-rank approximation result in an effective frequency response estimator.}, keywords = {direct data-driven methods, frequency response estimation, behavioral approach, leakage elimination.}, software = {https://imarkovs.github.io/frest}, pdf = {https://imarkovs.github.io/publications/frest.pdf}, optnote = {Available at \url{https://imarkovs.github.io/publications/frest.pdf}} }
@article{addition-and-intersection, author = {A. Fazzi and I. Markovsky}, title = {Addition and intersection of linear time-invariant behaviors}, journal = {IFAC Journal of Systems and Control}, volume = {26}, pages = {100233}, year = {2023}, abstract = {We define and analyze the operations of addition and intersection of linear time-invariant systems in the behavioral setting, where systems are viewed as sets of trajectories rather than input-output maps. The classical definition of addition of input-output systems is addition of the outputs with the inputs being equal. In the behavioral setting, addition of systems is defined as addition of all variables. Intersection of linear time-invariant systems was considered before only for the autonomous case in the context of "common dynamics" estimation. We generalize the notion of common dynamics to open systems (systems with inputs) as intersection of behaviors. This is done by proposing a trajectory-based definition of these two basic operations. The main results of the paper are 1) characterization of the link between the complexities (number of inputs and order) of the sum and intersection systems, 2) algorithms for computing their kernel and image representations and 3) show a duality property of the two operations. Despite these operations not being new to the literature, we highlight that the proposed approach is different, and based on polynomial computations and linear algebra.}, doi = {https://doi.org/10.1016/j.ifacsc.2023.100233}, keywords = {System identification, Missing data, Behavioral approach, Lifting}, abstract = {The identification problem in case of data with missing values is challenging and currently not fully understood. For example, there are no general nonconservative identifiability results, nor provably correct data efficient methods. In this paper, we consider a special case of periodically missing output samples, where all but one output sample per period may be missing. The novel idea is to use a lifting operation that converts the original problem with missing data into an equivalent standard identification problem. The key step is the inverse transformation from the lifted to the original system, which requires computation of a matrix root. The well-posedness of the inverse transformation depends on the eigenvalues of the system. Under an assumption on the eigenvalues, which is not verifiable from the data, and a persistency of excitation-type assumption on the data, the method based on lifting recovers the data-generating system.} }
@article{jan-new, author = {M. Alsalti and I. Markovsky and V. G. Lopez and M. A. Müller}, title = {Data-based system representations from irregularly measured data}, journal = {IEEE Trans. Automat. Contr.}, optvolume = {??}, optpages = {??}, year = {2024}, pdf = {https://arxiv.org/pdf/2307.11589}, doi = {10.1109/TAC.2024.3423053} }
@article{mpum-md, author = {I. Markovsky and M. Alsalti and V. G. Lopez and M. A. Müller}, title = {Identification from data with periodically missing output samples}, journal = {Automatica}, volume = {169}, pages = {111869}, year = {2024}, doi = {https://doi.org/10.1016/j.automatica.2024.111869}, pdf = {https://imarkovs.github.io/publications/mpum-periodic-md.pdf} }
@article{jia-csl, author = {J. Wang and L. Hemelhof and I. Markovsky and P. Patrinos}, title = {A trust-region method for data-driven iterative learning control of nonlinear systems}, journal = {Control Systems Letters}, year = {2024}, volume = {8}, pages = {1847-1852}, pdf = {https://imarkovs.github.io/publications/jia-csl.pdf}, doi = {10.1109/LCSYS.2024.3417805} }
@article{ode-improved, author = {A. Fazzi and K. Usevich and I. Markovsky}, title = {Implementation improvements and extensions of an {ODE}-based algorithm for structured low-rank approximation}, journal = {Calcolo}, volume = {60}, number = {2}, optpages = {??}, year = {2024}, doi = {0.1007/s10092-024-00623-y} }
@article{VMVS07, author = {S. {Van Huffel} and I. Markovsky and R. J. Vaccaro and T. {S\"oderstr\"om}}, title = {Guest editorial: {T}otal least squares and errors-in-variables modeling}, journal = {Signal Processing}, year = {2007}, volume = {87}, number = {10}, pages = {2281--2282}, month = {October} }
This file was generated by bibtex2html 1.98.