diff --git a/configs/stage3_ubuntu/etc/systemd/system/write-metadata.service b/configs/stage3_ubuntu/etc/systemd/system/write-metadata.service index 8b46c8b..4452977 100644 --- a/configs/stage3_ubuntu/etc/systemd/system/write-metadata.service +++ b/configs/stage3_ubuntu/etc/systemd/system/write-metadata.service @@ -2,6 +2,11 @@ Description=Writes metadata to a known location Before=setup-after-boot.service +# This unit writes data to the disk which gets mounted in various containers. +# Be sure that this unit runs before the kubelet to be sure that no k8s +# workloads start running before this unit has completed. +Before=kubelet.service + # generate-eth0-config.service sets the hostname of the machine to the expected # M-Lab DNS name for the machine. Since this write-metadata unit writes the # hostname value to the metadata directory, then be sure this unit runs later, diff --git a/configs/virtual_ubuntu/opt/mlab/bin/write-metadata.sh b/configs/virtual_ubuntu/opt/mlab/bin/write-metadata.sh index 92aacc2..4820d54 100755 --- a/configs/virtual_ubuntu/opt/mlab/bin/write-metadata.sh +++ b/configs/virtual_ubuntu/opt/mlab/bin/write-metadata.sh @@ -32,6 +32,20 @@ is_mig=$( ) if [[ $is_mig == "200" ]]; then + # It was discovered that there is some sort of race condition between this + # script and GCP fully populating VM metadata, specifically the + # "forwarded-ip[v6]s" values, requests for which were occasionally returning a + # 404, other times not. This loop just makes sure that one of those values + # exists before trying to read the value. + metadata_status="" + until [[ $metadata_status == "200" ]]; do + sleep 5 + metadata_status=$( + curl "${CURL_FLAGS[@]}" --output /dev/null --write-out "%{http_code}" \ + "${METADATA_URL}/network-interfaces/0/forwarded-ips/0" \ + || true + ) + done echo -n "true" > $METADATA_DIR/loadbalanced external_ip=$(curl "${CURL_FLAGS[@]}" "${METADATA_URL}/network-interfaces/0/forwarded-ips/0") external_ipv6=$(curl "${CURL_FLAGS[@]}" "${METADATA_URL}/network-interfaces/0/forwarded-ipv6s/0")