
<lom:lom xmlns:lom="https://oer-repo.uibk.ac.at/lom" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://oer-repo.uibk.ac.at/lom/latest https://w3id.org/oerbase/profiles/lomuibk/latest/schemas/lom-uibk.xsd">
  
<lom:general>
  
<lom:identifier>
  
<lom:catalog>phaidra.ustp.at</lom:catalog>

  
<lom:entry>
  
<lom:langstring xml:lang="x-none">o:5474</lom:langstring>

  
</lom:entry>

  
</lom:identifier>

  
<lom:identifier>
  
<lom:catalog>DOI</lom:catalog>

  
<lom:entry>
  
<lom:langstring xml:lang="x-none">10.1007/s00779-023-01720-5</lom:langstring>

  
</lom:entry>

  
</lom:identifier>

  
<lom:title>
  
<lom:langstring xml:lang="en">Towards a unified terminology for sonification and visualization</lom:langstring>

  
</lom:title>

  
<lom:description>
  
<lom:langstring xml:lang="en">Both sonification and visualization convey information about data by effectively using our human perceptual system, but their ways to transform the data differ. Over the past 30 years, the sonification community has demanded a holistic perspective on data representation, including audio-visual analysis, several times. A design theory of audio-visual analysis would be a relevant step in this direction. An indispensable foundation for this endeavor is a terminology describing the combined design space. To build a bridge between the domains, we adopt three of the established theoretical constructs from visualization theory for the field of sonification. The three constructs are the spatial substrate, the visual mark, and the visual channel. In our model, we choose time to be the temporal substrate of sonification. Auditory marks are then positioned in time, such as visual marks are positioned in space. Auditory channels are encoded into auditory marks to convey information. The proposed definitions allow discussing visualization and sonification designs as well as multi-modal designs based on a common terminology. While the identified terminology can support audio-visual analytics research, it also provides a new perspective on sonification theory itself.</lom:langstring>

  
</lom:description>

  
<lom:language>eng</lom:language>

  
<lom:keyword>
  
<lom:langstring xml:lang="en">audio-visual analytics</lom:langstring>

  
</lom:keyword>

  
<lom:keyword>
  
<lom:langstring xml:lang="en">sonification</lom:langstring>

  
</lom:keyword>

  
<lom:keyword>
  
<lom:langstring xml:lang="en">sonification theory</lom:langstring>

  
</lom:keyword>

  
<lom:keyword>
  
<lom:langstring xml:lang="en">visualization theory</lom:langstring>

  
</lom:keyword>

  
<lom:keyword>
  
<lom:langstring xml:lang="en">audio-visual data analysis</lom:langstring>

  
</lom:keyword>

  
</lom:general>

  
<lom:lifecycle>
  
<lom:datetime>2023-10-22T14:34:25.762Z</lom:datetime>

  
<lom:contribute>
  
<lom:role>
  
<lom:source>
  
<lom:langstring xml:lang="x-none">LOMv1.0</lom:langstring>

  
</lom:source>

  
<lom:value>
  
<lom:langstring xml:lang="x-none">Author</lom:langstring>

  
</lom:value>

  
</lom:role>

  
<lom:centity>
  
<lom:vcard>BEGIN:VCARD
VERSION:3.0
N:Enge;Kajetan;
FN:Kajetan Enge
X-ORCID:https://orcid.org/0000-0002-5456-1140
END:VCARD</lom:vcard>

  
</lom:centity>

  
<lom:centity>
  
<lom:vcard>BEGIN:VCARD
VERSION:3.0
N:Rind;Alexander;
FN:Alexander Rind
X-ORCID:https://orcid.org/0000-0001-8788-4600
END:VCARD</lom:vcard>

  
</lom:centity>

  
<lom:centity>
  
<lom:vcard>BEGIN:VCARD
VERSION:3.0
N:Iber;Michael;
FN:Michael Iber
X-ORCID:https://orcid.org/0000-0002-5929-8716
END:VCARD</lom:vcard>

  
</lom:centity>

  
<lom:centity>
  
<lom:vcard>BEGIN:VCARD
VERSION:3.0
N:Höldrich;Robert;
FN:Robert Höldrich
X-ORCID:https://orcid.org/0000-0002-6887-6637
END:VCARD</lom:vcard>

  
</lom:centity>

  
<lom:centity>
  
<lom:vcard>BEGIN:VCARD
VERSION:3.0
N:Aigner;Wolfgang;
FN:Wolfgang Aigner
X-ORCID:https://orcid.org/0000-0001-5762-1869
END:VCARD</lom:vcard>

  
</lom:centity>

  
</lom:contribute>

  
</lom:lifecycle>

  
<lom:educational>
  
<lom:learningresourcetype>
  
<lom:source>
  
<lom:langstring xml:lang="x-none">https://w3id.org/kim/hcrt/scheme</lom:langstring>

  
</lom:source>

  
<lom:id>https://w3id.org/kim/hcrt/text</lom:id>

  
<lom:entry>
  
<lom:langstring xml:lang="de">Textdokument</lom:langstring>

  
<lom:langstring xml:lang="en">Text</lom:langstring>

  
</lom:entry>

  
</lom:learningresourcetype>

  
</lom:educational>

  
<lom:rights>
  
<lom:copyrightandotherrestrictions>
  
<lom:source>
  
<lom:langstring xml:lang="x-none">LOMv1.0</lom:langstring>

  
</lom:source>

  
<lom:value>
  
<lom:langstring xml:lang="x-none">yes</lom:langstring>

  
</lom:value>

  
</lom:copyrightandotherrestrictions>

  
<lom:description>
  
<lom:langstring xml:lang="x-t-cc-url">https://creativecommons.org/licenses/by/4.0</lom:langstring>

  
</lom:description>

  
</lom:rights>

  
<lom:technical>
  
<lom:format>application/pdf</lom:format>

  
<lom:size>1208037</lom:size>

  
<lom:location>https://phaidra.ustp.at/o:5474</lom:location>

  
<lom:thumbnail>
  
<lom:url>https://phaidra.ustp.at/api/object/o:5474/thumbnail</lom:url>

  
</lom:thumbnail>

  
</lom:technical>

  
<lom:classification>
  
<lom:purpose>
  
<lom:source>
  
<lom:langstring xml:lang="x-none">LOMv1.0</lom:langstring>

  
</lom:source>

  
<lom:value>
  
<lom:langstring xml:lang="x-none">discipline</lom:langstring>

  
</lom:value>

  
</lom:purpose>

  
</lom:classification>

  
</lom:lom>


