ba-sachsen-pica/tasks/03-ba-sachsen.sh

453 lines
12 KiB
Bash
Raw Normal View History

2020-08-01 02:04:39 +02:00
#!/bin/bash
# Generierung PICA+
# - PPNs anreichern und Exemplare clustern
# - als PICA+ exportieren
2020-07-22 11:00:38 +02:00
2020-08-01 02:04:39 +02:00
# =============================== ENVIRONMENT ================================ #
2020-07-22 11:00:38 +02:00
2020-08-01 02:04:39 +02:00
# source the main script
source "${BASH_SOURCE%/*}/../bash-refine.sh" || exit 1
# read input
if [[ $1 ]]; then
inputdir1="$(readlink -e "$1")"
else
echo 1>&2 "Please provide path to directory with input file(s)"; exit 1
fi
if [[ $2 ]]; then
inputdir2="$(readlink -e "$2")"
fi
# check requirements, set trap, create workdir and tee to logfile
init
2020-07-22 11:00:38 +02:00
# ================================= STARTUP ================================== #
2020-08-01 02:04:39 +02:00
checkpoint "Startup"; echo
# start OpenRefine server
2020-07-22 11:00:38 +02:00
refine_start; echo
# ================================== IMPORT ================================== #
2020-08-01 02:04:39 +02:00
checkpoint "Import"; echo
2020-07-22 11:00:38 +02:00
2020-08-01 02:04:39 +02:00
# TODO: Zusammenführung mit Alephino
zip -j "${workdir}/ba-sachsen.zip" "${inputdir1}"/*.csv
projects["ba-sachsen"]="${workdir}/ba-sachsen.zip"
# Neues Projekt erstellen aus Zip-Archiv
2020-07-22 11:00:38 +02:00
p="ba-sachsen"
echo "import file" "${projects[$p]}" "..."
if curl -fs --write-out "%{redirect_url}\n" \
--form project-file="@${projects[$p]}" \
--form project-name="${p}" \
--form format="text/line-based/*sv" \
--form options='{
"encoding": "UTF-8",
"includeFileSources": "false",
"separator": ","
}' \
"${endpoint}/command/core/create-project-from-upload$(refine_csrf)" \
2020-08-01 02:04:39 +02:00
> "${workdir}/${p}.id"
2020-07-22 11:00:38 +02:00
then
log "imported ${projects[$p]} as ${p}"
else
error "import of ${projects[$p]} failed!"
fi
2020-08-01 02:04:39 +02:00
refine_store "${p}" "${workdir}/${p}.id" || error "import of ${p} failed!"
2020-07-22 11:00:38 +02:00
echo
# ================================ TRANSFORM ================================= #
2020-08-01 02:04:39 +02:00
checkpoint "Transform"; echo
2020-07-22 18:13:37 +02:00
# ------------------------ 01 PPN anreichern über ISBN ----------------------- #
2020-07-22 11:00:38 +02:00
2020-07-22 18:13:37 +02:00
# TODO: Anreicherung für 0110
2020-07-22 23:04:01 +02:00
# spec_Z_04
2020-07-22 18:13:37 +02:00
echo "PPN anreichern über ISBN..."
if curl -fs \
--data project="${projects[$p]}" \
--data-urlencode "operations@-" \
"${endpoint}/command/core/apply-operations$(refine_csrf)" > /dev/null \
<< "JSON"
[
{
"op": "core/column-addition",
"engineConfig": {
"facets": [],
"mode": "row-based"
},
"baseColumnName": "2000",
"expression": "grel:with(value.replace('-',''),x,forEach(x.split('␟'),v,if(v.length()==10,with('978'+x[0,9],z,z+((10-(sum(forRange(0,12,1,i,toNumber(z[i])*(1+(i%2*2)) )) %10)) %10).toString()[0] ),v))).uniques().join('␟')",
"onError": "set-to-blank",
"newColumnName": "tmp",
"columnInsertIndex": 3
},
{
"op": "core/column-split",
"engineConfig": {
"facets": [],
"mode": "row-based"
},
"columnName": "tmp",
"guessCellType": false,
"removeOriginalColumn": true,
"mode": "separator",
"separator": "␟",
"regex": false,
"maxColumns": 0
},
{
"op": "core/column-addition",
"engineConfig": {
"facets": [
{
"type": "list",
"name": "2199",
"expression": "isBlank(value)",
"columnName": "2199",
"invert": false,
"omitBlank": false,
"omitError": false,
"selection": [
{
"v": {
"v": false,
"l": "false"
}
}
],
"selectBlank": false,
"selectError": false
},
{
"type": "list",
"name": "0100",
"expression": "isBlank(value)",
"columnName": "0100",
"invert": false,
"omitBlank": false,
"omitError": false,
"selection": [
{
"v": {
"v": true,
"l": "true"
}
}
],
"selectBlank": false,
"selectError": false
}
],
"mode": "row-based"
},
"baseColumnName": "tmp 1",
"expression": "grel:with(forEach(value.cross('ba-sachsen','tmp 1'),r,forNonBlank(r.cells['0100'].value,v,v,null)).join('␟') + '␟' + forEach(value.cross('ba-sachsen','tmp 2'),r,forNonBlank(r.cells['0100'].value,v,v,null)).join('␟'),x,x.split('␟')[0])",
"onError": "set-to-blank",
"newColumnName": "tmp 1_0100",
"columnInsertIndex": 4
},
{
"op": "core/column-addition",
"engineConfig": {
"facets": [
{
"type": "list",
"name": "2199",
"expression": "isBlank(value)",
"columnName": "2199",
"invert": false,
"omitBlank": false,
"omitError": false,
"selection": [
{
"v": {
"v": false,
"l": "false"
}
}
],
"selectBlank": false,
"selectError": false
},
{
"type": "list",
"name": "0100",
"expression": "isBlank(value)",
"columnName": "0100",
"invert": false,
"omitBlank": false,
"omitError": false,
"selection": [
{
"v": {
"v": true,
"l": "true"
}
}
],
"selectBlank": false,
"selectError": false
}
],
"mode": "row-based"
},
"baseColumnName": "tmp 2",
"expression": "grel:with(forEach(value.cross('ba-sachsen','tmp 1'),r,forNonBlank(r.cells['0100'].value,v,v,null)).join('␟') + forEach(value.cross('ba-sachsen','tmp 2'),r,forNonBlank(r.cells['0100'].value,v,v,null)).join('␟'),x,x.split('␟')[0])",
"onError": "set-to-blank",
"newColumnName": "tmp 2_0100",
"columnInsertIndex": 6
},
{
"op": "core/text-transform",
"engineConfig": {
"facets": [
{
"type": "list",
"name": "2199",
"expression": "isBlank(value)",
"columnName": "2199",
"invert": false,
"omitBlank": false,
"omitError": false,
"selection": [
{
"v": {
"v": false,
"l": "false"
}
}
],
"selectBlank": false,
"selectError": false
},
{
"type": "list",
"name": "0100",
"expression": "isBlank(value)",
"columnName": "0100",
"invert": false,
"omitBlank": false,
"omitError": false,
"selection": [
{
"v": {
"v": true,
"l": "true"
}
}
],
"selectBlank": false,
"selectError": false
}
],
"mode": "row-based"
},
"columnName": "0100",
"expression": "grel:forNonBlank(cells['tmp 1_0100'].value,v,v,forNonBlank(cells['tmp 2_0100'].value,v,v,''))",
"onError": "keep-original",
"repeat": false,
"repeatCount": 10
},
{
"op": "core/column-removal",
"columnName": "tmp 2_0100"
},
{
"op": "core/column-removal",
"columnName": "tmp 1_0100"
},
{
"op": "core/column-removal",
"columnName": "tmp 2"
},
{
"op": "core/column-removal",
"columnName": "tmp 1"
}
]
JSON
then
log "transformed ${p} (${projects[$p]})"
else
error "transform ${p} (${projects[$p]}) failed!"
fi
echo
# --------------------------- 02 Exemplare clustern -------------------------- #
# TODO: 0110 berücksichtigen
2020-07-22 23:04:01 +02:00
# spec_Z_05
2020-07-22 18:13:37 +02:00
echo "Exemplare clustern..."
if curl -fs \
--data project="${projects[$p]}" \
--data-urlencode "operations@-" \
"${endpoint}/command/core/apply-operations$(refine_csrf)" > /dev/null \
<< "JSON"
[
{
"op": "core/text-transform",
"engineConfig": {
"facets": [
{
"type": "list",
"name": "0100",
"expression": "isBlank(value)",
"columnName": "0100",
"invert": false,
"omitBlank": false,
"omitError": false,
"selection": [
{
"v": {
"v": true,
"l": "true"
}
}
],
"selectBlank": false,
"selectError": false
}
],
"mode": "row-based"
},
"columnName": "0100",
"expression": "null",
"onError": "keep-original",
"repeat": false,
"repeatCount": 10
},
{
"op": "core/text-transform",
"engineConfig": {
"facets": [
{
"type": "list",
"name": "0100",
"expression": "isBlank(value)",
"columnName": "0100",
"invert": false,
"omitBlank": false,
"omitError": false,
"selection": [
{
"v": {
"v": true,
"l": "true"
}
}
],
"selectBlank": false,
"selectError": false
}
],
"mode": "row-based"
},
"columnName": "0100",
"expression": "grel:row.record.cells[columnName].value[0]",
"onError": "keep-original",
"repeat": false,
"repeatCount": 10
},
{
"op": "core/row-reorder",
"mode": "record-based",
"sorting": {
"criteria": [
{
"valueType": "string",
"column": "0100",
"blankPosition": 2,
"errorPosition": 1,
"reverse": false,
"caseSensitive": false
}
]
}
},
{
"op": "core/column-addition",
"engineConfig": {
"facets": [],
"mode": "row-based"
},
"baseColumnName": "0100",
"expression": "grel:forNonBlank(cells['0100'].value,v,v,forNonBlank(cells['2199'].value,v,v,''))",
"onError": "set-to-blank",
"newColumnName": "id",
"columnInsertIndex": 0
},
{
"op": "core/blank-down",
"engineConfig": {
"facets": [],
"mode": "row-based"
},
"columnName": "id"
}
]
JSON
then
log "transformed ${p} (${projects[$p]})"
else
error "transform ${p} (${projects[$p]}) failed!"
fi
echo
2020-07-22 11:00:38 +02:00
# ================================== EXPORT ================================== #
2020-08-01 02:04:39 +02:00
checkpoint "Export"; echo
2020-07-22 11:00:38 +02:00
# Export in PICA+
format="pic"
echo "export ${p} to pica+ file using template..."
IFS= read -r -d '' template << "TEMPLATE"
{{
if(row.index - row.record.fromRowIndex == 0,
'' + '\n'
+ forNonBlank(cells['0100'].value, v, '003@' + ' 0' + v + '\n', '')
+ forNonBlank(cells['2000'].value, v, forEach(v.split('␟'),x,'004A' + ' 0' + x + '\n').join(''), '')
+ forNonBlank(cells['2199'].value, v, '006Y' + ' 0' + v + '\n', '')
,'')
2020-07-22 11:00:38 +02:00
}}{{
if(isNonBlank(cells['7100f'].value),
with(with(rowIndex - row.record.fromRowIndex + 1, i, '00'[0,2-i.length()] + i),exnr,
2020-07-22 23:38:39 +02:00
'208@/' + exnr + ' a' + cells['E0XX'].value + 'bn' + cells['E0XXb'].value + '\n'
+ '209A/' + exnr + ' B' + cells['7100B'].value + 'f' + cells['7100f'].value + forNonBlank(cells['7100a'].value, v, 'a' + v, '') + 'x00' + '\n'
), '')
2020-07-22 11:00:38 +02:00
}}
TEMPLATE
if echo "${template}" | head -c -2 | curl -fs \
--data project="${projects[$p]}" \
--data format="template" \
--data prefix="" \
--data suffix="" \
--data separator="" \
--data engine='{"facets":[],"mode":"row-based"}' \
--data-urlencode template@- \
"${endpoint}/command/core/export-rows" \
2020-08-01 02:04:39 +02:00
> "${workdir}/${p}.${format}"
2020-07-22 11:00:38 +02:00
then
2020-08-01 02:04:39 +02:00
log "exported ${p} (${projects[$p]}) to ${workdir}/${p}.${format}"
2020-07-22 11:00:38 +02:00
else
error "export of ${p} (${projects[$p]}) failed!"
fi
echo
# ================================== FINISH ================================== #
2020-08-01 02:04:39 +02:00
checkpoint "Finish"; echo
# stop OpenRefine server
2020-07-22 11:00:38 +02:00
refine_stop; echo
2020-08-01 02:04:39 +02:00
# calculate run time based on checkpoints
checkpoint_stats; echo
# word count on all files in workdir
count_output