upgrade_2.0_to_3.0.sh.in 7.5 KB
Newer Older
1
2
3
4
5
#!/bin/sh

prefix=@prefix@
# Include utilities. Use installed version if available and
# use build version if it isn't.
6
if [ -e "@datarootdir@/@PACKAGE_NAME@/scripts/admin-utils.sh" ]; then
7
8
9
10
11
    . @datarootdir@/@PACKAGE_NAME@/scripts/admin-utils.sh
else
    . @abs_top_builddir@/src/bin/admin/admin-utils.sh
fi

12
# Need a path for temporary files created during upgrade data migration
13
# Use the state directory in the install path directory if it exists, otherwise
14
# use the build tree
15
16
if [ -e "@localstatedir@/lib/@PACKAGE_NAME@" ]; then
    temp_file_dir="@localstatedir@/lib/@PACKAGE_NAME@"
17
18
else
    temp_file_dir="@abs_top_builddir@/src/share/database/scripts/cql"
19
20
fi

21
22
cqlargs=$@

23
# Ensures the current schema version is 2.0. If not it exits.
24
25
26
27
28
check_version() {
    version=$(cql_version $cqlargs)

    if [ "${version}" != "2.0" ]; then
        printf "This script upgrades 2.0 to 3.0. Reported version is %s. Skipping upgrade.\n" "${version}"
29
        exit 0
30
31
32
    fi
}

33
# Peforms the schema changes from 2.0 to 3.0
34
35
update_schema() {
    cqlsh $cqlargs <<EOF
Francis Dupont's avatar
Francis Dupont committed
36
-- This line starts database upgrade to version 3.0
37
38

-- Add a column holding leases for user context.
39
40
ALTER TABLE lease4 ADD user_context VARCHAR;
ALTER TABLE lease6 ADD user_context VARCHAR;
41
42

-- -----------------------------------------------------
43
-- Table logs (it is used by forensic logging hook library)
44
45
-- -----------------------------------------------------
CREATE TABLE IF NOT EXISTS logs (
46
47
48
    timeuuid TIMEUUID,  -- creation timeuuid, use dateOf() to get timestamp
    address VARCHAR,    -- address or prefix
    log VARCHAR,        -- the log itself
49
50
51
52
53
54
    PRIMARY KEY ((timeuuid))
);

-- Create search index for logs table
CREATE INDEX IF NOT EXISTS logsindex ON logs (address);

55
-- Add auth_key for storing keys for DHCPV6 reconfigure.
56
ALTER TABLE host_reservations ADD auth_key VARCHAR;
mayya's avatar
mayya committed
57

58
-- Cql requires primary keys in the WHERE here.
59
DELETE FROM schema_version WHERE version=2;
Francis Dupont's avatar
Francis Dupont committed
60
INSERT INTO schema_version (version, minor) VALUES(3, 0);
61

Francis Dupont's avatar
Francis Dupont committed
62
-- This line concludes database upgrade to version 3.0
63
64
EOF

65
    if [ "$?" -ne 0 ]
66
67
    then
        echo Schema udpate FAILED!
68
        exit 1
69
    fi
70
71
}

72
# Function to delete temporary migration files
73
74
clean_up() {
    # clean up the files
75
    if [ -e "$export_file" ]
76
77
78
79
    then
        rm $export_file
    fi

80
    if [ -e "$update_file" ]
81
82
83
84
85
    then
        rm $update_file
    fi
}

Marcin Siodelski's avatar
Marcin Siodelski committed
86
# Function to clean up and exit the script gracefully
87
88
89
90
91
92
#
# Called by migrate_host_data()
#
# Parameters:
# status - integer value to pass to sh:exit
# explanation - "quoted" text message to emit to stdout
93
94
95
96
97
exit_now() {
    status=$1;shift
    explanation=$1

    clean_up
98
    if [ "$status" -eq 0 ]
99
100
101
102
103
104
105
106
107
    then
        echo "Data Migration SUCCESS! $explanation"
    else
        echo "Data Migration FAILURE! $explanation"
    fi

    exit $status
}

108
109
110
111
112
113
114
115
116
117
118
119
120
121
# Function adds a column to the global, $update_cols if needed
#
# Called by migrate_host_data() to determine if the given column
# value needs to be updated, and if so appends CQL update text
# to a global string accumulator, $update_cols.
#
# The accumlator text is of the form:
#
#  "<column_name>=<column value>{,<column_name>=<column_value>,..}"
#
# Parameters:
# val - current numerical value of the subnet ID column in question
# col - column name of the column in question
#
122
check_column() {
123
124
125
126
127
128
129
130
131
132
    local val="$1";shift
    local col="$1"
    local old_id="0"
    local new_id="-1"
    local comma=""

    # If the current value equals the value to be replaced
    # add it to the accumulator
    if [ "$val" = "$old_id" ]
    then
133
        # If the accumulator isn't empty, we need a comma
134
135
136
137
138
139
        if [ ! -z "$update_cols" ]
        then
            comma=","
        fi

        update_cols="$update_cols$comma $col = $new_id"
140
    fi
141
142
}

143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
# This function converts subnet ID columns in of existing host_reservations
# from 0 to 0xFFFFFFFF (SUBNET_ID_UNUSED). To do this it first creates an
# excerpt from host_reservations containing one row for each reservation
# with the following columns:
#
#  id (primary key)
#  host_ipv4_subnet_id
#  host_ipv6_subnet_id
#  option_subnet_id
#
# The excerpt is captured in a temporary file, the "export" file.
#
# Next, it iterates over the export file emitting a CQL update statement
# for each row that needs at least one of the columns updated. In other
# words, if at least one of the subnet ID columns in a row is 0, an
# update statement for that row will be emitted.  The update statements
# are captured in a second temprory file, the "update" file.
#
# After exhausting the export file, the update file is submitted to
# cqlsh for execution.
#
# No parameters.
165
166
167
168
169
170
171
172
173
174
175
176
177
migrate_host_data() {
    export_file="$temp_file_dir/cql_export.csv"
    update_file="$temp_file_dir/cql_update.cql"

    clean_up

    # Fetch host_reservation data so we have primary key, plus subnet id values
    echo "Exporting host_reservation data to $export_file ..."
    query="COPY host_reservations \
        (id, host_ipv4_subnet_id, host_ipv6_subnet_id, option_subnet_id)\
        TO '$export_file'"

    cqlsh $cqlargs -e "$query"
178
    if [ "$?" -ne 0 ]
179
    then
180
181
182
183
184
185
186
187
188
189
190
        exit_now 1 "Cassandra export failed! Could not migrate data!"
    fi

    # Strip the carriage returns that CQL insists on adding.
    if [ -e "$export_file" ]
    then
        cat $export_file | tr -d '\015' > $export_file.2
        mv $export_file.2 $export_file
    else
        # Shouldn't happen but then again we're talking about CQL here
        exit_now 1 "Cassandra export file $export_file is missing?"
191
192
193
194
195
196
197
198
    fi

    # Iterate through the exported data, accumulating update statements,
    # one for each reservation that needs updating.  We should have one
    # host per line.
    line_cnt=0;
    update_cnt=0;

199
    while read -r line
200
    do
201
        line_cnt=$((line_cnt + 1));
202
203
        update_cols=""
        xIFS="$IFS"
204
205
        IFS=$','

206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
        i=1
        # Parse the column values
        for val in $line
        do
            case $i in
            1)
                host_id="$val"
                ;;
            2)
                check_column $val host_ipv4_subnet_id
                ;;
            3)
                check_column $val host_ipv6_subnet_id
                ;;
            4)
                check_column $val option_subnet_id
                ;;
            *)
                # We're going to assume that since any error is fatal
225
                exit_now 1 "Line# $line_cnt, too many values, wrong or corrupt file"
226
227
                ;;
            esac
228
            i=$((i + 1))
229
230
        done

231
        if [ "$i" -ne 5 ]
232
233
        then
            # We're going to assume that since any error is fatal
234
            exit_now 1 "Line# $line_cnt, too few values, wrong or corrupt file"
235
236
237
238
239
240
        fi

        # If any of the current host's columns need to be replace, append an update for it
        if [ ! -z "$update_cols" ]
        then
            echo "update host_reservations set $update_cols where id = $host_id;" >> $update_file
241
            update_cnt=$((update_cnt + 1))
242
243
244
245
246
247
        fi

        IFS="$xIFS"
    done <  $export_file

    # If we didn't record any updates, then hey, we're good to go!
248
    if [ "$update_cnt" -eq 0 ]
249
    then
250
        exit_now 0 "Completed successfully: No updates were needed"
251
252
253
254
255
256
    fi

    # We have at least one update in the update file, so submit it # to cqlsh.
    echo "$update_cnt update statements written to $update_file"
    echo "Running the updates..."
    cqlsh $cqlargs -f "$update_file"
257
    if [ "$?" -ne 0 ]
258
    then
259
        exit_now 1 "Cassandra updates failed"
260
261
262
263
264
265
266
267
    fi

    exit_now 0 "Updated $update_cnt of $line_cnt records"
}

check_version
update_schema
migrate_host_data