@article{PaascheEberle2011, author = {Paasche, Hendrik and Eberle, Detlef}, title = {Automated compilation of pseudo-lithology maps from geophysical data sets a comparison of Gustafson-Kessel and fuzzy c-means cluster algorithms}, series = {Exploration geophysics : the bulletin of the Australian Society of Exploration Geophysicists}, volume = {42}, journal = {Exploration geophysics : the bulletin of the Australian Society of Exploration Geophysicists}, number = {4}, publisher = {CSIRO}, address = {Collingwood}, issn = {0812-3985}, doi = {10.1071/EG11014}, pages = {275 -- 285}, year = {2011}, abstract = {The fuzzy partitioning Gustafson-Kessel cluster algorithm is employed for rapid and objective integration of multi-parameter Earth-science related databases. We begin by evaluating the Gustafson-Kessel algorithm using the example of a synthetic study and compare the results to those obtained from the more widely employed fuzzy c-means algorithm. Since the Gustafson-Kessel algorithm goes beyond the potential of the fuzzy c-means algorithm by adapting the shape of the clusters to be detected and enabling a manual control of the cluster volume, we believe the results obtained from Gustafson-Kessel algorithm to be superior. Accordingly, a field database comprising airborne and ground-based geophysical data sets is analysed, which has previously been classified by means of the fuzzy c-means algorithm. This database is integrated using the Gustafson-Kessel algorithm thus minimising the amount of empirical data processing required before and after fuzzy c-means clustering. The resultant zonal geophysical map is more evenly clustered matching regional geology information available from the survey area. Even additional information about linear structures, e. g. as typically caused by the presence of dolerite dykes or faults, is visible in the zonal map obtained from Gustafson-Kessel cluster analysis.}, language = {en} } @article{LeyCooperViezzoliGuillemoteauetal.2015, author = {Ley-Cooper, Alan Yusen and Viezzoli, Andrea and Guillemoteau, Julien and Vignoli, Giulio and Macnae, James and Cox, Leif and Munday, Tim}, title = {Airborne electromagnetic modelling options and their consequences in target definition}, series = {Exploration geophysics : the bulletin of the Australian Society of Exploration Geophysicists}, volume = {46}, journal = {Exploration geophysics : the bulletin of the Australian Society of Exploration Geophysicists}, number = {1}, publisher = {CSIRO}, address = {Clayton}, issn = {0812-3985}, doi = {10.1071/EG14045}, pages = {74 -- 84}, year = {2015}, abstract = {Given the range of geological conditions under which airborne EM surveys are conducted, there is an expectation that the 2D and 3D methods used to extract models that are geologically meaningful would be favoured over ID inversion and transforms. We do after all deal with an Earth that constantly undergoes, faulting, intrusions, and erosive processes that yield a subsurface morphology, which is, for most parts, dissimilar to a horizontal layered earth. We analyse data from a survey collected in the Musgrave province, South Australia. It is of particular interest since it has been used for mineral prospecting and for a regional hydro-geological assessment. The survey comprises abrupt lateral variations, more-subtle lateral continuous sedimentary sequences and filled palaeovalleys. As consequence, we deal with several geophysical targets of contrasting conductivities, varying geometries and at different depths. We invert the observations by using several algorithms characterised by the different dimensionality of the forward operator. Inversion of airborne EM data is known to be an ill-posed problem. We can generate a variety of models that numerically adequately fit the measured data, which makes the solution non-unique. The application of different deterministic inversion codes or transforms to the same dataset can give dissimilar results, as shown in this paper. This ambiguity suggests the choice of processes and algorithms used to interpret AEM data cannot be resolved as a matter of personal choice and preference. The degree to which models generated by a ID algorithm replicate/or not measured data, can be an indicator of the data's dimensionality, which perse does not imply that data that can be fitted with a 1D model cannot be multidimensional. On the other hand, it is crucial that codes that can generate 2D and 3D models do reproduce the measured data in order for them to be considered as a plausible solution. In the absence of ancillary information, it could be argued that the simplest model with the simplest physics might be preferred.}, language = {en} }