Refactor golden image handling in backup upload process</message>
<message>Update the _set_golden_from_path function to improve the handling of existing golden image files. Replace the existing unlink logic with a more robust method that safely removes files or broken symlinks using the missing_ok parameter. This change enhances the reliability of the backup upload process by ensuring that stale references are properly cleared before setting a new golden image path.
This commit is contained in:
@@ -0,0 +1,53 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
name: Create and publish a Docker image to GitHub Packages Repository
|
||||
|
||||
on: workflow_dispatch
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: 'arm64'
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
@@ -0,0 +1,32 @@
|
||||
FROM python:3.9
|
||||
LABEL maintainer="neurocis <neurocis@neurocis.me>"
|
||||
|
||||
RUN true && \
|
||||
\
|
||||
ARCH=`uname -m`; \
|
||||
if [ "$ARCH" = "armv7l" ]; then \
|
||||
NOBIN_OPT="--no-binary=grpcio"; \
|
||||
else \
|
||||
NOBIN_OPT=""; \
|
||||
fi; \
|
||||
# Install python prerequisites
|
||||
pip3 install --no-cache-dir $NOBIN_OPT \
|
||||
croniter==2.0.5 pytz==2024.1 six==1.16.0 \
|
||||
grpcio==1.62.2 \
|
||||
influxdb==5.3.2 certifi==2024.2.2 charset-normalizer==3.3.2 idna==3.7 \
|
||||
msgpack==1.0.8 requests==2.31.0 urllib3==2.2.1 \
|
||||
influxdb-client==1.42.0 reactivex==4.0.4 \
|
||||
paho-mqtt==2.0.0 \
|
||||
pypng==0.20220715.0 \
|
||||
python-dateutil==2.9.0 \
|
||||
typing_extensions==4.11.0 \
|
||||
yagrc==1.1.2 grpcio-reflection==1.62.2 protobuf==4.25.3
|
||||
|
||||
COPY dish_*.py loop_util.py starlink_*.py entrypoint.sh /app/
|
||||
WORKDIR /app
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "/app/entrypoint.sh"]
|
||||
CMD ["dish_grpc_influx.py status alert_detail"]
|
||||
|
||||
# docker run -d --name='starlink-grpc-tools' -e INFLUXDB_HOST=192.168.1.34 -e INFLUXDB_PORT=8086 -e INFLUXDB_DB=starlink
|
||||
# --net='br0' --ip='192.168.1.39' ghcr.io/sparky8512/starlink-grpc-tools dish_grpc_influx.py status alert_detail
|
||||
@@ -0,0 +1,24 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <https://unlicense.org>
|
||||
@@ -0,0 +1,818 @@
|
||||
{
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "VAR_DS_INFLUXDB",
|
||||
"type": "constant",
|
||||
"label": "InfluxDB DataSource",
|
||||
"value": "InfluxDB-starlinkstats",
|
||||
"description": ""
|
||||
},
|
||||
{
|
||||
"name": "VAR_TBL_STATS",
|
||||
"type": "constant",
|
||||
"label": "Table name for Statistics",
|
||||
"value": "spacex.starlink.user_terminal.status",
|
||||
"description": ""
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "7.3.6"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "graph",
|
||||
"name": "Graph",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "influxdb",
|
||||
"name": "InfluxDB",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "table",
|
||||
"name": "Table",
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1610413551748,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$DS_INFLUXDB",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 11,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 4,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": true,
|
||||
"current": true,
|
||||
"hideZero": false,
|
||||
"max": true,
|
||||
"min": false,
|
||||
"rightSide": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.6",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [],
|
||||
"measurement": "/^$TBL_STATS$/",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"downlink_throughput_bps"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"bps Down"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"uplink_throughput_bps"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"bps Up"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Actual Throughput",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1099",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1100",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "$DS_INFLUXDB",
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 11,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 2,
|
||||
"legend": {
|
||||
"alignAsTable": true,
|
||||
"avg": true,
|
||||
"current": true,
|
||||
"max": true,
|
||||
"min": true,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": true
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"options": {
|
||||
"alertThreshold": true
|
||||
},
|
||||
"percentage": false,
|
||||
"pluginVersion": "7.3.6",
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [],
|
||||
"measurement": "/^$TBL_STATS$/",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"queryType": "randomWalk",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"pop_ping_latency_ms"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Ping Latency"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"pop_ping_drop_rate"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Drop Rate"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"fraction_obstructed"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"*100"
|
||||
],
|
||||
"type": "math"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Percent Obstructed"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"snr"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"*10"
|
||||
],
|
||||
"type": "math"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"SNR"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Ping Latency, Drop Rate, Percent Obstructed & SNR",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "$DS_INFLUXDB",
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {
|
||||
"align": null,
|
||||
"filterable": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Obstructed"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 105
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Wrong Location"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 114
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Thermal Throttle"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 121
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Thermal Shutdown"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 136
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Motors Stuck"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 116
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Time"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 143
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "State"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 118
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Bad Location"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 122
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Temp Throttle"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 118
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Temp Shutdown"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 134
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Software Version"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 369
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 11
|
||||
},
|
||||
"id": 6,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"options": {
|
||||
"showHeader": true,
|
||||
"sortBy": [
|
||||
{
|
||||
"desc": true,
|
||||
"displayName": "Time (last)"
|
||||
}
|
||||
]
|
||||
},
|
||||
"pluginVersion": "7.3.6",
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [],
|
||||
"hide": false,
|
||||
"measurement": "/^$TBL_STATS$/",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT \"currently_obstructed\" AS \"Obstructed\", \"alert_unexpected_location\" AS \"Wrong Location\", \"alert_thermal_throttle\" AS \"Thermal Throttle\", \"alert_thermal_shutdown\" AS \"Thermal Shutdown\", \"alert_motors_stuck\" AS \"Motors Stuck\", \"state\" AS \"State\" FROM \"spacex.starlink.user_terminal.status\" WHERE $timeFilter",
|
||||
"queryType": "randomWalk",
|
||||
"rawQuery": false,
|
||||
"refId": "A",
|
||||
"resultFormat": "table",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"state"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"State"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"currently_obstructed"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Obstructed"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"alert_unexpected_location"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Bad Location"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"alert_thermal_throttle"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Temp Throttled"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"alert_thermal_shutdown"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Temp Shutdown"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"alert_motors_stuck"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Motors Stuck"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"software_version"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Software Version"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"hardware_version"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"Hardware Version"
|
||||
],
|
||||
"type": "alias"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Alerts & Versions",
|
||||
"transformations": [
|
||||
{
|
||||
"id": "groupBy",
|
||||
"options": {
|
||||
"fields": {
|
||||
"Bad Location": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Hardware Version": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Motors Stuck": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Obstructed": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Software Version": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"State": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Temp Shutdown": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Temp Throttle": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Temp Throttled": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Thermal Shutdown": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Thermal Throttle": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
},
|
||||
"Time": {
|
||||
"aggregations": [
|
||||
"last"
|
||||
],
|
||||
"operation": "aggregate"
|
||||
},
|
||||
"Wrong Location": {
|
||||
"aggregations": [],
|
||||
"operation": "groupby"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"type": "table"
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
"schemaVersion": 26,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"current": {
|
||||
"value": "${VAR_DS_INFLUXDB}",
|
||||
"text": "${VAR_DS_INFLUXDB}",
|
||||
"selected": false
|
||||
},
|
||||
"error": null,
|
||||
"hide": 2,
|
||||
"label": "InfluxDB DataSource",
|
||||
"name": "DS_INFLUXDB",
|
||||
"options": [
|
||||
{
|
||||
"value": "${VAR_DS_INFLUXDB}",
|
||||
"text": "${VAR_DS_INFLUXDB}",
|
||||
"selected": false
|
||||
}
|
||||
],
|
||||
"query": "${VAR_DS_INFLUXDB}",
|
||||
"skipUrlSync": false,
|
||||
"type": "constant"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"value": "${VAR_TBL_STATS}",
|
||||
"text": "${VAR_TBL_STATS}",
|
||||
"selected": false
|
||||
},
|
||||
"error": null,
|
||||
"hide": 2,
|
||||
"label": "Table name for Statistics",
|
||||
"name": "TBL_STATS",
|
||||
"options": [
|
||||
{
|
||||
"value": "${VAR_TBL_STATS}",
|
||||
"text": "${VAR_TBL_STATS}",
|
||||
"selected": false
|
||||
}
|
||||
],
|
||||
"query": "${VAR_TBL_STATS}",
|
||||
"skipUrlSync": false,
|
||||
"type": "constant"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-24h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Starlink Statistics",
|
||||
"uid": "ymkHwLaMz",
|
||||
"version": 36
|
||||
}
|
||||
@@ -0,0 +1,675 @@
|
||||
{
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_INFLUXDB",
|
||||
"label": "InfluxDB",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "influxdb",
|
||||
"pluginName": "InfluxDB"
|
||||
},
|
||||
{
|
||||
"name": "VAR_TBL_STATS",
|
||||
"label": "influx",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "influxdb",
|
||||
"pluginName": "InfluxDB"
|
||||
},
|
||||
{
|
||||
"name": "VAR_DS_INFLUXDB",
|
||||
"type": "constant",
|
||||
"label": "InfluxDB DataSource",
|
||||
"value": "InfluxDB-starlinkstats",
|
||||
"description": ""
|
||||
},
|
||||
{
|
||||
"name": "VAR_TBL_STATS",
|
||||
"type": "constant",
|
||||
"label": "Table name for Statistics",
|
||||
"value": "spacex.starlink.user_terminal.status",
|
||||
"description": ""
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "8.2.5"
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "influxdb",
|
||||
"name": "InfluxDB",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "table",
|
||||
"name": "Table",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "timeseries",
|
||||
"name": "Time series",
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"target": {
|
||||
"limit": 100,
|
||||
"matchAny": false,
|
||||
"tags": [],
|
||||
"type": "dashboard"
|
||||
},
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1637920561166,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": "${DS_INFLUXDB}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": true,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "normal"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "binbps"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/(uplink)/m"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Uplink"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "downlink_throughput_bps"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Downlink"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "uplink_throughput_bps"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Uplink"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 11,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"mean",
|
||||
"max",
|
||||
"lastNotNull"
|
||||
],
|
||||
"displayMode": "table",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.2.5",
|
||||
"targets": [
|
||||
{
|
||||
"hide": false,
|
||||
"query": "from(bucket: \"starlink\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_field\"] == \"downlink_throughput_bps\" or r[\"_field\"] == \"uplink_throughput_bps\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"last\")",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Actual Throughput",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_INFLUXDB}",
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": true,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "short"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "fraction_obstructed"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Fraction Obstruction"
|
||||
},
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "%"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "pop_ping_drop_rate"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Pop Ping Drop Rate"
|
||||
},
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "%"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "pop_ping_latency_ms"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Pop Ping Latency Rate"
|
||||
},
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "ms"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 11,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [
|
||||
"mean",
|
||||
"lastNotNull",
|
||||
"max",
|
||||
"min"
|
||||
],
|
||||
"displayMode": "table",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.2.5",
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"starlink\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_field\"] == \"pop_ping_latency_ms\" or r[\"_field\"] == \"pop_ping_drop_rate\" or r[\"_field\"] == \"fraction_obstructed\" or r[\"_field\"] == \"snr\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"last\")",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Ping Latency, Drop Rate, Percent Obstructed & SNR",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "${DS_INFLUXDB}",
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": null,
|
||||
"displayMode": "auto",
|
||||
"filterable": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "alerts"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Alerts"
|
||||
},
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 100
|
||||
},
|
||||
{
|
||||
"id": "custom.align",
|
||||
"value": "left"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "currently_obstructed"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Currently Obstructed"
|
||||
},
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 200
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "hardware_version"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Hardware Revision"
|
||||
},
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 200
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "software_version"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Software Revision"
|
||||
},
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 400
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "state"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "State"
|
||||
},
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "alert_motors_stuck"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Motor Stuck"
|
||||
},
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "alert_unexpected_location"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Unexpected Location"
|
||||
},
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 150
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "alert_thermal_shutdown"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Thermal Shutdown"
|
||||
},
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 140
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "alert_thermal_throttle"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Thermal Throttle"
|
||||
},
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 130
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "uptime"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Uptime"
|
||||
},
|
||||
{
|
||||
"id": "custom.align",
|
||||
"value": "left"
|
||||
},
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "s"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "Time"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 150
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 11
|
||||
},
|
||||
"id": 6,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"options": {
|
||||
"frameIndex": 0,
|
||||
"showHeader": true,
|
||||
"sortBy": [
|
||||
{
|
||||
"desc": true,
|
||||
"displayName": "Time (last)"
|
||||
}
|
||||
]
|
||||
},
|
||||
"pluginVersion": "8.2.5",
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"starlink\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_field\"] == \"hardware_version\" or r[\"_field\"] == \"state\" or r[\"_field\"] == \"software_version\" or r[\"_field\"] == \"alerts\" or r[\"_field\"] == \"currently_obstructed\" or r[\"_field\"] == \"alert_unexpected_location\" or r[\"_field\"] == \"alert_thermal_throttle\" or r[\"_field\"] == \"alert_thermal_shutdown\" or r[\"_field\"] == \"alert_motors_stuck\" or r[\"_field\"] == \"uptime\" )\n |> yield(name: \"last\")",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Alerts & Versions",
|
||||
"transformations": [
|
||||
{
|
||||
"id": "seriesToColumns",
|
||||
"options": {
|
||||
"byField": "Time"
|
||||
}
|
||||
}
|
||||
],
|
||||
"type": "table"
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
"schemaVersion": 32,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"description": null,
|
||||
"error": null,
|
||||
"hide": 2,
|
||||
"label": "InfluxDB DataSource",
|
||||
"name": "DS_INFLUXDB",
|
||||
"query": "${VAR_DS_INFLUXDB}",
|
||||
"skipUrlSync": false,
|
||||
"type": "constant",
|
||||
"current": {
|
||||
"value": "${VAR_DS_INFLUXDB}",
|
||||
"text": "${VAR_DS_INFLUXDB}",
|
||||
"selected": false
|
||||
},
|
||||
"options": [
|
||||
{
|
||||
"value": "${VAR_DS_INFLUXDB}",
|
||||
"text": "${VAR_DS_INFLUXDB}",
|
||||
"selected": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"description": null,
|
||||
"error": null,
|
||||
"hide": 2,
|
||||
"label": "Table name for Statistics",
|
||||
"name": "TBL_STATS",
|
||||
"query": "${VAR_TBL_STATS}",
|
||||
"skipUrlSync": false,
|
||||
"type": "constant",
|
||||
"current": {
|
||||
"value": "${VAR_TBL_STATS}",
|
||||
"text": "${VAR_TBL_STATS}",
|
||||
"selected": false
|
||||
},
|
||||
"options": [
|
||||
{
|
||||
"value": "${VAR_TBL_STATS}",
|
||||
"text": "${VAR_TBL_STATS}",
|
||||
"selected": false
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-30m",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Starlink Statistics",
|
||||
"uid": "ymkHwLaMz",
|
||||
"version": 12
|
||||
}
|
||||
@@ -0,0 +1,308 @@
|
||||
{
|
||||
"layout": {},
|
||||
"schedule": {
|
||||
"enabled": false,
|
||||
"cronSchedule": "0 0 * * *",
|
||||
"tz": "UTC",
|
||||
"keepLastN": 2
|
||||
},
|
||||
"name": "Starlink Statistics",
|
||||
"description": "This Dashboard is meant to be a clone of the starlink App's Statitics Page",
|
||||
"elements": [
|
||||
{
|
||||
"config": {
|
||||
"markdown": "# Starlink Statistics\n--- \nThis Dashboard is meant to be a clone of the starlink App's Statitics Page. Increase time python script calls API for more accurate results. (Default API Call: 60 seconds)\n",
|
||||
"axis": {}
|
||||
},
|
||||
"id": "1p7z19fum",
|
||||
"layout": {
|
||||
"x": 0,
|
||||
"y": 0,
|
||||
"w": 12,
|
||||
"h": 2
|
||||
},
|
||||
"variant": "markdown",
|
||||
"type": "markdown.default"
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"markdown": "### What is Latency?\n- Starlink and the Starlink router both send test pings to the internet many times per minute. Latency measures how long, in milliseconds, a request takes to go to the internet and back.\n\n- High latency may impact your experience with online gaming, video calls, and web browsing. It may be caused by extreme weather or periods of high network usage.\n\n",
|
||||
"axis": {}
|
||||
},
|
||||
"id": "84gt5a832",
|
||||
"layout": {
|
||||
"x": 0,
|
||||
"y": 2,
|
||||
"w": 6,
|
||||
"h": 2
|
||||
},
|
||||
"variant": "markdown",
|
||||
"type": "markdown.default"
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"markdown": "### What is power Draw?\n- Power Draw Measures the average amount of power that Starlink Uses. Starlink will use more power while heating to melt snow.\n\n",
|
||||
"axis": {}
|
||||
},
|
||||
"id": "pyoifapcf",
|
||||
"layout": {
|
||||
"x": 6,
|
||||
"y": 2,
|
||||
"w": 6,
|
||||
"h": 2
|
||||
},
|
||||
"variant": "markdown",
|
||||
"type": "markdown.default"
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"onClickAction": {
|
||||
"type": "None"
|
||||
},
|
||||
"style": true,
|
||||
"applyThreshold": false,
|
||||
"colorThresholds": {
|
||||
"thresholds": [
|
||||
{
|
||||
"color": "#45850B",
|
||||
"threshold": 30
|
||||
},
|
||||
{
|
||||
"color": "#EFDB23",
|
||||
"threshold": 70
|
||||
},
|
||||
{
|
||||
"color": "#B20000",
|
||||
"threshold": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
"axis": {
|
||||
"xAxis": "avg_mean_full_ping_latency",
|
||||
"yAxis": [
|
||||
"avg_mean_full_ping_latency"
|
||||
]
|
||||
},
|
||||
"decimals": 2,
|
||||
"suffix": " ms"
|
||||
},
|
||||
"search": {
|
||||
"type": "inline",
|
||||
"query": "dataset=\"starlink\" sourcetype in (\"starlink:ping_latency\") | extract parser=json_parser | summarize avg_mean_full_ping_latency=avg(mean_full_ping_latency) ",
|
||||
"earliest": "-15m",
|
||||
"latest": "now"
|
||||
},
|
||||
"id": "kfntldnby",
|
||||
"layout": {
|
||||
"x": 0,
|
||||
"y": 4,
|
||||
"w": 6,
|
||||
"h": 3
|
||||
},
|
||||
"type": "counter.single",
|
||||
"title": "Average Mean Full Ping Latency - Last 15 Min"
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"onClickAction": {
|
||||
"type": "None"
|
||||
},
|
||||
"style": true,
|
||||
"applyThreshold": false,
|
||||
"colorThresholds": {
|
||||
"thresholds": [
|
||||
{
|
||||
"color": "#45850B",
|
||||
"threshold": 30
|
||||
},
|
||||
{
|
||||
"color": "#EFDB23",
|
||||
"threshold": 70
|
||||
},
|
||||
{
|
||||
"color": "#B20000",
|
||||
"threshold": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
"axis": {
|
||||
"xAxis": "avg_mean_power",
|
||||
"yAxis": [
|
||||
"avg_mean_power"
|
||||
]
|
||||
},
|
||||
"decimals": 2,
|
||||
"suffix": " Watts"
|
||||
},
|
||||
"search": {
|
||||
"type": "inline",
|
||||
"query": "dataset=\"starlink\" sourcetype=\"starlink:power\" | extract parser=json_parser | summarize avg_mean_power=avg(mean_power)",
|
||||
"earliest": "-15m",
|
||||
"latest": "now"
|
||||
},
|
||||
"id": "7o73dimso",
|
||||
"layout": {
|
||||
"x": 6,
|
||||
"y": 4,
|
||||
"w": 6,
|
||||
"h": 3
|
||||
},
|
||||
"type": "counter.single",
|
||||
"title": "Power Draw Average - Last 15 Min"
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"colorPalette": 0,
|
||||
"colorPaletteReversed": false,
|
||||
"customData": {
|
||||
"trellis": false,
|
||||
"connectNulls": "Leave gaps",
|
||||
"stack": false,
|
||||
"seriesCount": 1
|
||||
},
|
||||
"xAxis": {
|
||||
"labelOrientation": 0,
|
||||
"position": "Bottom"
|
||||
},
|
||||
"yAxis": {
|
||||
"position": "Left",
|
||||
"scale": "Linear",
|
||||
"splitLine": true,
|
||||
"interval": 2,
|
||||
"min": 20,
|
||||
"max": 35
|
||||
},
|
||||
"axis": {
|
||||
"yAxis": [
|
||||
"values_ping_latency"
|
||||
],
|
||||
"yAxisExcluded": [
|
||||
"_time"
|
||||
]
|
||||
},
|
||||
"legend": {
|
||||
"position": "Right",
|
||||
"truncate": true
|
||||
},
|
||||
"onClickAction": {
|
||||
"type": "None"
|
||||
},
|
||||
"seriesInfo": {
|
||||
"values_ping_latency": {
|
||||
"type": "column"
|
||||
},
|
||||
"_time": {}
|
||||
}
|
||||
},
|
||||
"search": {
|
||||
"type": "inline",
|
||||
"query": "dataset=\"starlink\" sourcetype in (\"starlink:ping_latency\") | extract parser=json_parser | timestats values(mean_full_ping_latency) ",
|
||||
"earliest": "-15m",
|
||||
"latest": "now"
|
||||
},
|
||||
"id": "n5lu6hhw0",
|
||||
"layout": {
|
||||
"x": 0,
|
||||
"y": 7,
|
||||
"w": 6,
|
||||
"h": 5
|
||||
},
|
||||
"type": "chart.column",
|
||||
"hidePanel": false,
|
||||
"title": "Ping Latency - Last 15 Min"
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"colorPalette": 1,
|
||||
"colorPaletteReversed": false,
|
||||
"customData": {
|
||||
"trellis": false,
|
||||
"connectNulls": "Leave gaps",
|
||||
"stack": false,
|
||||
"seriesCount": 1
|
||||
},
|
||||
"xAxis": {
|
||||
"labelOrientation": 0,
|
||||
"position": "Bottom"
|
||||
},
|
||||
"yAxis": {
|
||||
"position": "Left",
|
||||
"scale": "Linear",
|
||||
"splitLine": true,
|
||||
"min": 25,
|
||||
"max": 70,
|
||||
"interval": 5
|
||||
},
|
||||
"axis": {
|
||||
"yAxis": [
|
||||
"values_latest_power"
|
||||
],
|
||||
"yAxisExcluded": [
|
||||
"_time"
|
||||
]
|
||||
},
|
||||
"legend": {
|
||||
"position": "Top",
|
||||
"truncate": true
|
||||
},
|
||||
"onClickAction": {
|
||||
"type": "None"
|
||||
},
|
||||
"seriesInfo": {
|
||||
"_time": {
|
||||
"color": "#29bd00"
|
||||
},
|
||||
"values_latest_power": {
|
||||
"color": "#369900",
|
||||
"type": "area"
|
||||
}
|
||||
}
|
||||
},
|
||||
"search": {
|
||||
"type": "inline",
|
||||
"query": "dataset=\"starlink\" sourcetype=\"starlink:power\" | extract parser=json_parser | timestats values(latest_power)",
|
||||
"earliest": "-15m",
|
||||
"latest": "now"
|
||||
},
|
||||
"id": "20ekij4vo",
|
||||
"layout": {
|
||||
"x": 6,
|
||||
"y": 7,
|
||||
"w": 6,
|
||||
"h": 5
|
||||
},
|
||||
"type": "chart.column",
|
||||
"title": "Power Draw - Last 15 Min"
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"markdown": "## What is ping success?\n- Starlink and the Starlink router both send test pings to the internet many times per minute. It is normal for some pings to be dropped, and your connection to the internet to remain unaffected.",
|
||||
"axis": {}
|
||||
},
|
||||
"id": "2o01xt5al",
|
||||
"layout": {
|
||||
"x": 0,
|
||||
"y": 12,
|
||||
"w": 6,
|
||||
"h": 2
|
||||
},
|
||||
"variant": "markdown",
|
||||
"type": "markdown.default"
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"markdown": "## What is throughput?\n- 'Download' and 'Upload' measure the amount of data that your Starlink is downloading from or uploading to the internet. Download a large file or run a speed test to watch it jump!",
|
||||
"axis": {}
|
||||
},
|
||||
"id": "hwr5nirfk",
|
||||
"layout": {
|
||||
"x": 6,
|
||||
"y": 12,
|
||||
"w": 5,
|
||||
"h": 2
|
||||
},
|
||||
"variant": "markdown",
|
||||
"type": "markdown.default"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Check whether there is a software update pending on a Starlink user terminal.
|
||||
|
||||
Optionally, reboot the dish to initiate install if there is an update pending.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
import grpc
|
||||
|
||||
import loop_util
|
||||
import starlink_grpc
|
||||
|
||||
# This is the enum value spacex_api.device.dish_pb2.SoftwareUpdateState.REBOOT_REQUIRED
|
||||
REBOOT_REQUIRED = 6
|
||||
# This is the enum value spacex_api.device.dish_pb2.SoftwareUpdateState.DISABLED
|
||||
UPDATE_DISABLED = 7
|
||||
|
||||
|
||||
def loop_body(opts, context):
|
||||
now = time.time()
|
||||
|
||||
try:
|
||||
status = starlink_grpc.get_status(context)
|
||||
except (AttributeError, ValueError, grpc.RpcError) as e:
|
||||
logging.error("Failed getting dish status: %s", str(starlink_grpc.GrpcError(e)))
|
||||
return 1
|
||||
|
||||
# There are at least 3 and maybe 4 redundant flags that indicate whether or
|
||||
# not a software update is pending. In order to be robust against future
|
||||
# changes in the protocol and/or implementation of it, this scripts checks
|
||||
# them all, while allowing for the possibility that some of them have been
|
||||
# obsoleted and thus no longer present in the reflected protocol classes.
|
||||
|
||||
try:
|
||||
alert_flag = status.alerts.install_pending
|
||||
except (AttributeError, ValueError):
|
||||
alert_flag = None
|
||||
|
||||
try:
|
||||
state_flag = status.software_update_state == REBOOT_REQUIRED
|
||||
state_dflag = status.software_update_state == UPDATE_DISABLED
|
||||
except (AttributeError, ValueError):
|
||||
state_flag = None
|
||||
state_dflag = None
|
||||
|
||||
try:
|
||||
stats_flag = status.software_update_stats.software_update_state == REBOOT_REQUIRED
|
||||
stats_dflag = status.software_update_stats.software_update_state == UPDATE_DISABLED
|
||||
except (AttributeError, ValueError):
|
||||
stats_flag = None
|
||||
stats_dflag = None
|
||||
|
||||
try:
|
||||
ready_flag = status.swupdate_reboot_ready
|
||||
except (AttributeError, ValueError):
|
||||
ready_flag = None
|
||||
|
||||
try:
|
||||
sw_version = status.device_info.software_version
|
||||
except (AttributeError, ValueError):
|
||||
sw_version = "UNKNOWN"
|
||||
|
||||
if opts.verbose >= 2:
|
||||
print("Pending flags:", alert_flag, state_flag, stats_flag, ready_flag)
|
||||
print("Disable flags:", state_dflag, stats_dflag)
|
||||
|
||||
if state_dflag or stats_dflag:
|
||||
logging.warning("Software updates appear to be disabled")
|
||||
|
||||
# The swupdate_reboot_ready field does not appear to be in use, so may
|
||||
# mean something other than what it sounds like. Only use it if none of
|
||||
# the others are available.
|
||||
if alert_flag is None and state_flag is None and stats_flag is None:
|
||||
install_pending = bool(ready_flag)
|
||||
else:
|
||||
install_pending = alert_flag or state_flag or stats_flag
|
||||
|
||||
if opts.verbose:
|
||||
dtnow = datetime.fromtimestamp(now, tz=getattr(opts, "timezone", None))
|
||||
print(dtnow.replace(microsecond=0, tzinfo=None).isoformat(), "- ", end="")
|
||||
|
||||
if install_pending:
|
||||
print("Install pending, current version:", sw_version)
|
||||
if opts.install:
|
||||
print("Rebooting dish to initiate install")
|
||||
try:
|
||||
starlink_grpc.reboot(context)
|
||||
except starlink_grpc.GrpcError as e:
|
||||
logging.error("Failed reboot request: %s", str(e))
|
||||
return 1
|
||||
elif opts.verbose:
|
||||
print("No install pending, current version:", sw_version)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Check for Starlink user terminal software update")
|
||||
parser.add_argument(
|
||||
"-i",
|
||||
"--install",
|
||||
action="store_true",
|
||||
help="Initiate dish reboot to perform install if there is an update pending")
|
||||
parser.add_argument("-g",
|
||||
"--target",
|
||||
help="host:port of dish to query, default is the standard IP address "
|
||||
"and port (192.168.100.1:9200)")
|
||||
parser.add_argument("-v",
|
||||
"--verbose",
|
||||
action="count",
|
||||
default=0,
|
||||
help="Increase verbosity, may be used multiple times")
|
||||
loop_util.add_args(parser)
|
||||
opts = parser.parse_args()
|
||||
|
||||
loop_util.check_args(opts, parser)
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
||||
context = starlink_grpc.ChannelContext(target=opts.target)
|
||||
|
||||
try:
|
||||
rc = loop_util.run_loop(opts, loop_body, opts, context)
|
||||
finally:
|
||||
context.close()
|
||||
|
||||
sys.exit(rc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,445 @@
|
||||
"""Shared code among the dish_grpc_* commands
|
||||
|
||||
Note:
|
||||
|
||||
This module is not intended to be generically useful or to export a stable
|
||||
interface. Rather, it should be considered an implementation detail of the
|
||||
other scripts, and will change as needed.
|
||||
|
||||
For a module that exports an interface intended for general use, see
|
||||
starlink_grpc.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
import grpc
|
||||
|
||||
import starlink_grpc
|
||||
|
||||
BRACKETS_RE = re.compile(r"([^[]*)(\[((\d+),|)(\d*)\]|)$")
|
||||
LOOP_TIME_DEFAULT = 0
|
||||
STATUS_MODES: List[str] = ["status", "obstruction_detail", "alert_detail", "location"]
|
||||
HISTORY_STATS_MODES: List[str] = [
|
||||
"ping_drop", "ping_run_length", "ping_latency", "ping_loaded_latency", "usage", "power"
|
||||
]
|
||||
UNGROUPED_MODES: List[str] = []
|
||||
|
||||
|
||||
def create_arg_parser(output_description, bulk_history=True):
|
||||
"""Create an argparse parser and add the common command line options."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Collect status and/or history data from a Starlink user terminal and " +
|
||||
output_description,
|
||||
epilog="Additional arguments can be read from a file by including @FILENAME as an "
|
||||
"option, where FILENAME is a path to a file that contains arguments, one per line.",
|
||||
fromfile_prefix_chars="@",
|
||||
add_help=False)
|
||||
|
||||
# need to remember this for later
|
||||
parser.bulk_history = bulk_history
|
||||
|
||||
group = parser.add_argument_group(title="General options")
|
||||
group.add_argument("-g",
|
||||
"--target",
|
||||
help="host:port of dish to query, default is the standard IP address "
|
||||
"and port (192.168.100.1:9200)")
|
||||
group.add_argument("-h", "--help", action="help", help="Be helpful")
|
||||
group.add_argument("-N",
|
||||
"--numeric",
|
||||
action="store_true",
|
||||
help="Record boolean values as 1 and 0 instead of True and False")
|
||||
group.add_argument("-t",
|
||||
"--loop-interval",
|
||||
type=float,
|
||||
default=float(LOOP_TIME_DEFAULT),
|
||||
help="Loop interval in seconds or 0 for no loop, default: " +
|
||||
str(LOOP_TIME_DEFAULT))
|
||||
group.add_argument("-v", "--verbose", action="store_true", help="Be verbose")
|
||||
|
||||
group = parser.add_argument_group(title="History mode options")
|
||||
group.add_argument("-a",
|
||||
"--all-samples",
|
||||
action="store_const",
|
||||
const=-1,
|
||||
dest="samples",
|
||||
help="Parse all valid samples")
|
||||
group.add_argument("-o",
|
||||
"--poll-loops",
|
||||
type=int,
|
||||
help="Poll history for N loops and aggregate data before computing history "
|
||||
"stats; this allows for a smaller loop interval with less loss of data "
|
||||
"when the dish reboots",
|
||||
metavar="N")
|
||||
if bulk_history:
|
||||
sample_help = ("Number of data samples to parse; normally applies to first loop "
|
||||
"iteration only, default: all in bulk mode, loop interval if loop "
|
||||
"interval set, else all available samples")
|
||||
no_counter_help = ("Don't track sample counter across loop iterations in non-bulk "
|
||||
"modes; keep using samples option value instead")
|
||||
else:
|
||||
sample_help = ("Number of data samples to parse; normally applies to first loop "
|
||||
"iteration only, default: loop interval, if set, else all available " +
|
||||
"samples")
|
||||
no_counter_help = ("Don't track sample counter across loop iterations; keep using "
|
||||
"samples option value instead")
|
||||
group.add_argument("-s", "--samples", type=int, help=sample_help)
|
||||
group.add_argument("-j", "--no-counter", action="store_true", help=no_counter_help)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def run_arg_parser(parser, need_id=False, no_stdout_errors=False, modes=None):
|
||||
"""Run parse_args on a parser previously created with create_arg_parser
|
||||
|
||||
Args:
|
||||
need_id (bool): A flag to set in options to indicate whether or not to
|
||||
set dish_id on the global state object; see get_data for more
|
||||
detail.
|
||||
no_stdout_errors (bool): A flag set in options to protect stdout from
|
||||
error messages, in case that's where the data output is going, so
|
||||
may be being redirected to a file.
|
||||
modes (list[str]): Optionally provide the subset of data group modes
|
||||
to allow.
|
||||
|
||||
Returns:
|
||||
An argparse Namespace object with the parsed options set as attributes.
|
||||
"""
|
||||
if modes is None:
|
||||
modes = STATUS_MODES + HISTORY_STATS_MODES + UNGROUPED_MODES
|
||||
if parser.bulk_history:
|
||||
modes.append("bulk_history")
|
||||
parser.add_argument("mode",
|
||||
nargs="+",
|
||||
choices=modes,
|
||||
help="The data group to record, one or more of: " + ", ".join(modes),
|
||||
metavar="mode")
|
||||
|
||||
opts = parser.parse_args()
|
||||
|
||||
if opts.loop_interval <= 0.0 or opts.poll_loops is None:
|
||||
opts.poll_loops = 1
|
||||
elif opts.poll_loops < 2:
|
||||
parser.error("Poll loops arg must be 2 or greater to be meaningful")
|
||||
|
||||
# for convenience, set flags for whether any mode in a group is selected
|
||||
status_set = set(STATUS_MODES)
|
||||
opts.status_mode = bool(status_set.intersection(opts.mode))
|
||||
status_set.remove("location")
|
||||
# special group for any status mode other than location
|
||||
opts.pure_status_mode = bool(status_set.intersection(opts.mode))
|
||||
opts.history_stats_mode = bool(set(HISTORY_STATS_MODES).intersection(opts.mode))
|
||||
opts.bulk_mode = "bulk_history" in opts.mode
|
||||
|
||||
if opts.samples is None:
|
||||
opts.samples = int(opts.loop_interval) if opts.loop_interval >= 1.0 else -1
|
||||
opts.bulk_samples = -1
|
||||
else:
|
||||
# for scripts that query starting history counter, skip it if samples
|
||||
# was explicitly set
|
||||
opts.skip_query = True
|
||||
opts.bulk_samples = opts.samples
|
||||
|
||||
opts.no_stdout_errors = no_stdout_errors
|
||||
opts.need_id = need_id
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def conn_error(opts, msg, *args):
|
||||
"""Indicate an error in an appropriate way."""
|
||||
# Connection errors that happen in an interval loop are not critical
|
||||
# failures, but are interesting enough to print in non-verbose mode.
|
||||
if opts.loop_interval > 0.0 and not opts.no_stdout_errors:
|
||||
print(msg % args)
|
||||
else:
|
||||
logging.error(msg, *args)
|
||||
|
||||
|
||||
class GlobalState:
|
||||
"""A class for keeping state across loop iterations."""
|
||||
def __init__(self, target=None):
|
||||
# counter, timestamp for bulk_history:
|
||||
self.counter = None
|
||||
self.timestamp = None
|
||||
# counter, timestamp for history stats:
|
||||
self.counter_stats = None
|
||||
self.timestamp_stats = None
|
||||
self.dish_id = None
|
||||
self.context = starlink_grpc.ChannelContext(target=target)
|
||||
self.poll_count = 0
|
||||
self.accum_history = None
|
||||
self.first_poll = True
|
||||
self.warn_once_location = True
|
||||
|
||||
def shutdown(self):
|
||||
self.context.close()
|
||||
|
||||
|
||||
def get_data(opts, gstate, add_item, add_sequence, add_bulk=None, flush_history=False):
|
||||
"""Fetch data from the dish, pull it apart and call back with the pieces.
|
||||
|
||||
This function uses call backs to return the useful data. If need_id is set
|
||||
in opts, then it is guaranteed that dish_id will have been set in gstate
|
||||
prior to any of the call backs being invoked.
|
||||
|
||||
Args:
|
||||
opts (object): The options object returned from run_arg_parser.
|
||||
gstate (GlobalState): An object for keeping track of state across
|
||||
multiple calls.
|
||||
add_item (function): Call back for non-sequence data, with prototype:
|
||||
|
||||
add_item(name, value, category)
|
||||
add_sequence (function): Call back for sequence data, with prototype:
|
||||
|
||||
add_sequence(name, value, category, start_index_label)
|
||||
add_bulk (function): Optional. Call back for bulk history data, with
|
||||
prototype:
|
||||
|
||||
add_bulk(bulk_data, count, start_timestamp, start_counter)
|
||||
flush_history (bool): Optional. If true, run in a special mode that
|
||||
emits (only) history stats for already polled data, if any,
|
||||
regardless of --poll-loops state. Intended for script shutdown
|
||||
operation, in order to flush stats for polled history data which
|
||||
would otherwise be lost on script restart.
|
||||
|
||||
Returns:
|
||||
Tuple with 3 values. The first value is 1 if there were any failures
|
||||
getting data from the dish, otherwise 0. The second value is an int
|
||||
timestamp for status data (data with category "status"), or None if
|
||||
no status data was reported. The third value is an int timestamp for
|
||||
history stats data (non-bulk data with category other than "status"),
|
||||
or None if no history stats data was reported.
|
||||
"""
|
||||
if flush_history and opts.poll_loops < 2:
|
||||
return 0, None, None
|
||||
|
||||
rc = 0
|
||||
status_ts = None
|
||||
hist_ts = None
|
||||
|
||||
if not flush_history:
|
||||
rc, status_ts = get_status_data(opts, gstate, add_item, add_sequence)
|
||||
|
||||
if opts.history_stats_mode and (not rc or opts.poll_loops > 1):
|
||||
hist_rc, hist_ts = get_history_stats(opts, gstate, add_item, add_sequence, flush_history)
|
||||
if not rc:
|
||||
rc = hist_rc
|
||||
|
||||
if not flush_history and opts.bulk_mode and add_bulk and not rc:
|
||||
rc = get_bulk_data(opts, gstate, add_bulk)
|
||||
|
||||
return rc, status_ts, hist_ts
|
||||
|
||||
|
||||
def add_data_normal(data, category, add_item, add_sequence):
|
||||
for key, val in data.items():
|
||||
name, start, seq = BRACKETS_RE.match(key).group(1, 4, 5)
|
||||
if seq is None:
|
||||
add_item(name, val, category)
|
||||
else:
|
||||
add_sequence(name, val, category, int(start) if start else 0)
|
||||
|
||||
|
||||
def add_data_numeric(data, category, add_item, add_sequence):
|
||||
for key, val in data.items():
|
||||
name, start, seq = BRACKETS_RE.match(key).group(1, 4, 5)
|
||||
if seq is None:
|
||||
add_item(name, int(val) if isinstance(val, int) else val, category)
|
||||
else:
|
||||
add_sequence(name,
|
||||
[int(subval) if isinstance(subval, int) else subval for subval in val],
|
||||
category,
|
||||
int(start) if start else 0)
|
||||
|
||||
|
||||
def get_status_data(opts, gstate, add_item, add_sequence):
|
||||
if opts.status_mode:
|
||||
timestamp = int(time.time())
|
||||
add_data = add_data_numeric if opts.numeric else add_data_normal
|
||||
if opts.pure_status_mode or opts.need_id and gstate.dish_id is None:
|
||||
try:
|
||||
groups = starlink_grpc.status_data(context=gstate.context)
|
||||
status_data, obstruct_detail, alert_detail = groups[0:3]
|
||||
except starlink_grpc.GrpcError as e:
|
||||
if "status" in opts.mode:
|
||||
if opts.need_id and gstate.dish_id is None:
|
||||
conn_error(opts, "Dish unreachable and ID unknown, so not recording state")
|
||||
return 1, None
|
||||
if opts.verbose:
|
||||
print("Dish unreachable")
|
||||
add_item("state", "DISH_UNREACHABLE", "status")
|
||||
return 0, timestamp
|
||||
conn_error(opts, "Failure getting status: %s", str(e))
|
||||
return 1, None
|
||||
if opts.need_id:
|
||||
gstate.dish_id = status_data["id"]
|
||||
del status_data["id"]
|
||||
if "status" in opts.mode:
|
||||
add_data(status_data, "status", add_item, add_sequence)
|
||||
if "obstruction_detail" in opts.mode:
|
||||
add_data(obstruct_detail, "status", add_item, add_sequence)
|
||||
if "alert_detail" in opts.mode:
|
||||
add_data(alert_detail, "status", add_item, add_sequence)
|
||||
if "location" in opts.mode:
|
||||
try:
|
||||
location = starlink_grpc.location_data(context=gstate.context)
|
||||
except starlink_grpc.GrpcError as e:
|
||||
conn_error(opts, "Failure getting location: %s", str(e))
|
||||
return 1, None
|
||||
if location["latitude"] is None and gstate.warn_once_location:
|
||||
logging.warning("Location data not enabled. See README for more details.")
|
||||
gstate.warn_once_location = False
|
||||
add_data(location, "status", add_item, add_sequence)
|
||||
return 0, timestamp
|
||||
elif opts.need_id and gstate.dish_id is None:
|
||||
try:
|
||||
gstate.dish_id = starlink_grpc.get_id(context=gstate.context)
|
||||
except starlink_grpc.GrpcError as e:
|
||||
conn_error(opts, "Failure getting dish ID: %s", str(e))
|
||||
return 1, None
|
||||
if opts.verbose:
|
||||
print("Using dish ID: " + gstate.dish_id)
|
||||
|
||||
return 0, None
|
||||
|
||||
|
||||
def get_history_stats(opts, gstate, add_item, add_sequence, flush_history):
|
||||
"""Fetch history stats. See `get_data` for details."""
|
||||
if flush_history or (opts.need_id and gstate.dish_id is None):
|
||||
history = None
|
||||
else:
|
||||
try:
|
||||
timestamp = int(time.time())
|
||||
history = starlink_grpc.get_history(context=gstate.context)
|
||||
gstate.timestamp_stats = timestamp
|
||||
except (AttributeError, ValueError, grpc.RpcError) as e:
|
||||
conn_error(opts, "Failure getting history: %s", str(starlink_grpc.GrpcError(e)))
|
||||
history = None
|
||||
|
||||
parse_samples = opts.samples if gstate.counter_stats is None else -1
|
||||
start = gstate.counter_stats if gstate.counter_stats else None
|
||||
|
||||
# Accumulate polled history data into gstate.accum_history, even if there
|
||||
# was a dish reboot.
|
||||
if gstate.accum_history:
|
||||
if history is not None:
|
||||
gstate.accum_history = starlink_grpc.concatenate_history(gstate.accum_history,
|
||||
history,
|
||||
samples1=parse_samples,
|
||||
start1=start,
|
||||
verbose=opts.verbose)
|
||||
# Counter tracking gets too complicated to handle across reboots
|
||||
# once the data has been accumulated, so just have concatenate
|
||||
# handle it on the first polled loop and use a value of 0 to
|
||||
# remember it was done (as opposed to None, which is used for a
|
||||
# different purpose).
|
||||
if not opts.no_counter:
|
||||
gstate.counter_stats = 0
|
||||
else:
|
||||
gstate.accum_history = history
|
||||
|
||||
# When resuming from prior count with --poll-loops set, advance the loop
|
||||
# count by however many loops worth of data was caught up on. This helps
|
||||
# avoid abnormally large sample counts in the first set of output data.
|
||||
if gstate.first_poll and gstate.accum_history:
|
||||
if opts.poll_loops > 1 and gstate.counter_stats:
|
||||
new_samples = gstate.accum_history.current - gstate.counter_stats
|
||||
if new_samples < 0:
|
||||
new_samples = gstate.accum_history.current
|
||||
if new_samples > len(gstate.accum_history.pop_ping_drop_rate):
|
||||
new_samples = len(gstate.accum_history.pop_ping_drop_rate)
|
||||
gstate.poll_count = max(gstate.poll_count, int((new_samples-1) / opts.loop_interval))
|
||||
gstate.first_poll = False
|
||||
|
||||
if gstate.poll_count < opts.poll_loops - 1 and not flush_history:
|
||||
gstate.poll_count += 1
|
||||
return 0, None
|
||||
|
||||
gstate.poll_count = 0
|
||||
|
||||
if gstate.accum_history is None:
|
||||
return (0, None) if flush_history else (1, None)
|
||||
|
||||
groups = starlink_grpc.history_stats(parse_samples,
|
||||
start=start,
|
||||
verbose=opts.verbose,
|
||||
history=gstate.accum_history)
|
||||
general, ping, runlen, latency, loaded, usage, power = groups[0:7]
|
||||
add_data = add_data_numeric if opts.numeric else add_data_normal
|
||||
add_data(general, "ping_stats", add_item, add_sequence)
|
||||
if "ping_drop" in opts.mode:
|
||||
add_data(ping, "ping_stats", add_item, add_sequence)
|
||||
if "ping_run_length" in opts.mode:
|
||||
add_data(runlen, "ping_stats", add_item, add_sequence)
|
||||
if "ping_latency" in opts.mode:
|
||||
add_data(latency, "ping_stats", add_item, add_sequence)
|
||||
if "ping_loaded_latency" in opts.mode:
|
||||
add_data(loaded, "ping_stats", add_item, add_sequence)
|
||||
if "usage" in opts.mode:
|
||||
add_data(usage, "usage", add_item, add_sequence)
|
||||
if "power" in opts.mode:
|
||||
add_data(power, "power", add_item, add_sequence)
|
||||
if not opts.no_counter:
|
||||
gstate.counter_stats = general["end_counter"]
|
||||
|
||||
timestamp = gstate.timestamp_stats
|
||||
gstate.timestamp_stats = None
|
||||
gstate.accum_history = None
|
||||
|
||||
return 0, timestamp
|
||||
|
||||
|
||||
def get_bulk_data(opts, gstate, add_bulk):
|
||||
"""Fetch bulk data. See `get_data` for details."""
|
||||
before = time.time()
|
||||
|
||||
start = gstate.counter
|
||||
parse_samples = opts.bulk_samples if start is None else -1
|
||||
try:
|
||||
general, bulk = starlink_grpc.history_bulk_data(parse_samples,
|
||||
start=start,
|
||||
verbose=opts.verbose,
|
||||
context=gstate.context)
|
||||
except starlink_grpc.GrpcError as e:
|
||||
conn_error(opts, "Failure getting history: %s", str(e))
|
||||
return 1
|
||||
|
||||
after = time.time()
|
||||
parsed_samples = general["samples"]
|
||||
new_counter = general["end_counter"]
|
||||
timestamp = gstate.timestamp
|
||||
# check this first, so it doesn't report as lost time sync
|
||||
if gstate.counter is not None and new_counter != gstate.counter + parsed_samples:
|
||||
timestamp = None
|
||||
# Allow up to 2 seconds of time drift before forcibly re-syncing, since
|
||||
# +/- 1 second can happen just due to scheduler timing.
|
||||
if timestamp is not None and not before - 2.0 <= timestamp + parsed_samples <= after + 2.0:
|
||||
if opts.verbose:
|
||||
print("Lost sample time sync at: " +
|
||||
str(datetime.fromtimestamp(timestamp + parsed_samples, tz=timezone.utc)))
|
||||
timestamp = None
|
||||
if timestamp is None:
|
||||
timestamp = int(before)
|
||||
if opts.verbose:
|
||||
print("Establishing new time base: {0} -> {1}".format(
|
||||
new_counter, datetime.fromtimestamp(timestamp, tz=timezone.utc)))
|
||||
timestamp -= parsed_samples
|
||||
|
||||
if opts.numeric:
|
||||
add_bulk(
|
||||
{
|
||||
k: [int(subv) if isinstance(subv, int) else subv for subv in v]
|
||||
for k, v in bulk.items()
|
||||
}, parsed_samples, timestamp, new_counter - parsed_samples)
|
||||
else:
|
||||
add_bulk(bulk, parsed_samples, timestamp, new_counter - parsed_samples)
|
||||
|
||||
gstate.counter = new_counter
|
||||
gstate.timestamp = timestamp + parsed_samples
|
||||
return 0
|
||||
135
backup-from-device/gnss-guard/tm-gnss-guard/starlink-grpc-tools/dish_control.py
Executable file
135
backup-from-device/gnss-guard/tm-gnss-guard/starlink-grpc-tools/dish_control.py
Executable file
@@ -0,0 +1,135 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Manipulate operating state of a Starlink user terminal."""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import grpc
|
||||
from yagrc import reflector as yagrc_reflector
|
||||
|
||||
import loop_util
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Starlink user terminal state control")
|
||||
parser.add_argument("-e",
|
||||
"--target",
|
||||
default="192.168.100.1:9200",
|
||||
help="host:port of dish to query, default is the standard IP address "
|
||||
"and port (192.168.100.1:9200)")
|
||||
subs = parser.add_subparsers(dest="command", required=True)
|
||||
subs.add_parser("reboot", help="Reboot the user terminal")
|
||||
subs.add_parser("stow", help="Set user terminal to stow position")
|
||||
subs.add_parser("unstow", help="Restore user terminal from stow position")
|
||||
sleep_parser = subs.add_parser(
|
||||
"set_sleep",
|
||||
help="Show, set, or disable power save configuration",
|
||||
description="Run without arguments to show current configuration")
|
||||
sleep_parser.add_argument("start",
|
||||
nargs="?",
|
||||
type=int,
|
||||
help="Start time in minutes past midnight UTC")
|
||||
sleep_parser.add_argument("duration",
|
||||
nargs="?",
|
||||
type=int,
|
||||
help="Duration in minutes, or 0 to disable")
|
||||
gps_parser = subs.add_parser(
|
||||
"set_gps",
|
||||
help="Enable, disable, or show usage of GPS for position data",
|
||||
description="Run without arguments to show current configuration")
|
||||
gps_parser.add_argument("--enable",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
help="Enable/disable use of GPS for position data")
|
||||
loop_util.add_args(parser)
|
||||
|
||||
opts = parser.parse_args()
|
||||
|
||||
if opts.command == "set_sleep" and opts.start is not None:
|
||||
if opts.duration is None:
|
||||
sleep_parser.error("Must specify duration if start time is specified")
|
||||
if opts.start < 0 or opts.start >= 1440:
|
||||
sleep_parser.error("Invalid start time, must be >= 0 and < 1440")
|
||||
if opts.duration < 0 or opts.duration > 1440:
|
||||
sleep_parser.error("Invalid duration, must be >= 0 and <= 1440")
|
||||
loop_util.check_args(opts, parser)
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def loop_body(opts):
|
||||
reflector = yagrc_reflector.GrpcReflectionClient()
|
||||
try:
|
||||
with grpc.insecure_channel(opts.target) as channel:
|
||||
reflector.load_protocols(channel, symbols=["SpaceX.API.Device.Device"])
|
||||
stub = reflector.service_stub_class("SpaceX.API.Device.Device")(channel)
|
||||
request_class = reflector.message_class("SpaceX.API.Device.Request")
|
||||
if opts.command == "reboot":
|
||||
request = request_class(reboot={})
|
||||
elif opts.command == "stow":
|
||||
request = request_class(dish_stow={})
|
||||
elif opts.command == "unstow":
|
||||
request = request_class(dish_stow={"unstow": True})
|
||||
elif opts.command == "set_sleep":
|
||||
if opts.start is None and opts.duration is None:
|
||||
request = request_class(dish_get_config={})
|
||||
else:
|
||||
if opts.duration:
|
||||
request = request_class(
|
||||
dish_power_save={
|
||||
"power_save_start_minutes": opts.start,
|
||||
"power_save_duration_minutes": opts.duration,
|
||||
"enable_power_save": True
|
||||
})
|
||||
else:
|
||||
# duration of 0 not allowed, even when disabled
|
||||
request = request_class(dish_power_save={
|
||||
"power_save_duration_minutes": 1,
|
||||
"enable_power_save": False
|
||||
})
|
||||
elif opts.command == "set_gps":
|
||||
if opts.enable is None:
|
||||
request = request_class(get_status={})
|
||||
else:
|
||||
request = request_class(dish_inhibit_gps={"inhibit_gps": not opts.enable})
|
||||
|
||||
response = stub.Handle(request, timeout=10)
|
||||
|
||||
if opts.command == "set_sleep" and opts.start is None and opts.duration is None:
|
||||
config = response.dish_get_config.dish_config
|
||||
if config.power_save_mode:
|
||||
print("Sleep start:", config.power_save_start_minutes,
|
||||
"minutes past midnight UTC")
|
||||
print("Sleep duration:", config.power_save_duration_minutes, "minutes")
|
||||
else:
|
||||
print("Sleep disabled")
|
||||
elif opts.command == "set_gps" and opts.enable is None:
|
||||
status = response.dish_get_status
|
||||
if status.gps_stats.inhibit_gps:
|
||||
print("GPS disabled")
|
||||
else:
|
||||
print("GPS enabled")
|
||||
except (AttributeError, ValueError, grpc.RpcError) as e:
|
||||
if isinstance(e, grpc.Call):
|
||||
msg = e.details()
|
||||
elif isinstance(e, (AttributeError, ValueError)):
|
||||
msg = "Protocol error"
|
||||
else:
|
||||
msg = "Unknown communication or service error"
|
||||
logging.error(msg)
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
||||
rc = loop_util.run_loop(opts, loop_body, opts)
|
||||
sys.exit(rc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,339 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Write Starlink user terminal data to an InfluxDB 1.x database.
|
||||
|
||||
This script pulls the current status info and/or metrics computed from the
|
||||
history data and writes them to the specified InfluxDB database either once
|
||||
or in a periodic loop.
|
||||
|
||||
Data will be written into the requested database with the following
|
||||
measurement / series names:
|
||||
|
||||
: spacex.starlink.user_terminal.status : Current status data
|
||||
: spacex.starlink.user_terminal.history : Bulk history data
|
||||
: spacex.starlink.user_terminal.ping_stats : Ping history statistics
|
||||
: spacex.starlink.user_terminal.usage : Usage history statistics
|
||||
: spacex.starlink.user_terminal.power : Power history statistics
|
||||
|
||||
NOTE: The Starlink user terminal does not include time values with its
|
||||
history or status data, so this script uses current system time to compute
|
||||
the timestamps it sends to InfluxDB. It is recommended to run this script on
|
||||
a host that has its system clock synced via NTP. Otherwise, the timestamps
|
||||
may get out of sync with real time.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
from influxdb import InfluxDBClient
|
||||
|
||||
import dish_common
|
||||
|
||||
HOST_DEFAULT = "localhost"
|
||||
DATABASE_DEFAULT = "starlinkstats"
|
||||
BULK_MEASUREMENT = "spacex.starlink.user_terminal.history"
|
||||
FLUSH_LIMIT = 6
|
||||
MAX_BATCH = 5000
|
||||
MAX_QUEUE_LENGTH = 864000
|
||||
|
||||
|
||||
class Terminated(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
# Turn SIGTERM into an exception so main loop can clean up
|
||||
raise Terminated
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = dish_common.create_arg_parser(
|
||||
output_description="write it to an InfluxDB 1.x database")
|
||||
|
||||
group = parser.add_argument_group(title="InfluxDB 1.x database options")
|
||||
group.add_argument("-n",
|
||||
"--hostname",
|
||||
default=HOST_DEFAULT,
|
||||
dest="host",
|
||||
help="Hostname of InfluxDB server, default: " + HOST_DEFAULT)
|
||||
group.add_argument("-p", "--port", type=int, help="Port number to use on InfluxDB server")
|
||||
group.add_argument("-P", "--password", help="Set password for username/password authentication")
|
||||
group.add_argument("-U", "--username", help="Set username for authentication")
|
||||
group.add_argument("-D",
|
||||
"--database",
|
||||
default=DATABASE_DEFAULT,
|
||||
help="Database name to use, default: " + DATABASE_DEFAULT)
|
||||
group.add_argument("-R", "--retention-policy", help="Retention policy name to use")
|
||||
group.add_argument("-k",
|
||||
"--skip-query",
|
||||
action="store_true",
|
||||
help="Skip querying for prior sample write point in bulk mode")
|
||||
group.add_argument("-C",
|
||||
"--ca-cert",
|
||||
dest="verify_ssl",
|
||||
help="Enable SSL/TLS using specified CA cert to verify server",
|
||||
metavar="FILENAME")
|
||||
group.add_argument("-I",
|
||||
"--insecure",
|
||||
action="store_false",
|
||||
dest="verify_ssl",
|
||||
help="Enable SSL/TLS but disable certificate verification (INSECURE!)")
|
||||
group.add_argument("-S",
|
||||
"--secure",
|
||||
action="store_true",
|
||||
dest="verify_ssl",
|
||||
help="Enable SSL/TLS using default CA cert")
|
||||
|
||||
env_map = (
|
||||
("INFLUXDB_HOST", "host"),
|
||||
("INFLUXDB_PORT", "port"),
|
||||
("INFLUXDB_USER", "username"),
|
||||
("INFLUXDB_PWD", "password"),
|
||||
("INFLUXDB_DB", "database"),
|
||||
("INFLUXDB_RP", "retention-policy"),
|
||||
("INFLUXDB_SSL", "verify_ssl"),
|
||||
)
|
||||
env_defaults = {}
|
||||
for var, opt in env_map:
|
||||
# check both set and not empty string
|
||||
val = os.environ.get(var)
|
||||
if val:
|
||||
if var == "INFLUXDB_SSL" and val == "secure":
|
||||
env_defaults[opt] = True
|
||||
elif var == "INFLUXDB_SSL" and val == "insecure":
|
||||
env_defaults[opt] = False
|
||||
else:
|
||||
env_defaults[opt] = val
|
||||
parser.set_defaults(**env_defaults)
|
||||
|
||||
opts = dish_common.run_arg_parser(parser, need_id=True)
|
||||
|
||||
if opts.username is None and opts.password is not None:
|
||||
parser.error("Password authentication requires username to be set")
|
||||
|
||||
opts.icargs = {"timeout": 5}
|
||||
for key in ["port", "host", "password", "username", "database", "verify_ssl"]:
|
||||
val = getattr(opts, key)
|
||||
if val is not None:
|
||||
opts.icargs[key] = val
|
||||
|
||||
if opts.verify_ssl is not None:
|
||||
opts.icargs["ssl"] = True
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def flush_points(opts, gstate):
|
||||
try:
|
||||
while len(gstate.points) > MAX_BATCH:
|
||||
gstate.influx_client.write_points(gstate.points[:MAX_BATCH],
|
||||
time_precision="s",
|
||||
retention_policy=opts.retention_policy)
|
||||
if opts.verbose:
|
||||
print("Data points written: " + str(MAX_BATCH))
|
||||
del gstate.points[:MAX_BATCH]
|
||||
if gstate.points:
|
||||
gstate.influx_client.write_points(gstate.points,
|
||||
time_precision="s",
|
||||
retention_policy=opts.retention_policy)
|
||||
if opts.verbose:
|
||||
print("Data points written: " + str(len(gstate.points)))
|
||||
gstate.points.clear()
|
||||
except Exception as e:
|
||||
dish_common.conn_error(opts, "Failed writing to InfluxDB database: %s", str(e))
|
||||
# If failures persist, don't just use infinite memory. Max queue
|
||||
# is currently 10 days of bulk data, so something is very wrong
|
||||
# if it's ever exceeded.
|
||||
if len(gstate.points) > MAX_QUEUE_LENGTH:
|
||||
logging.error("Max write queue exceeded, discarding data.")
|
||||
del gstate.points[:-MAX_QUEUE_LENGTH]
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def query_counter(gstate, start, end):
|
||||
try:
|
||||
# fetch the latest point where counter field was recorded
|
||||
result = gstate.influx_client.query("SELECT counter FROM \"{0}\" "
|
||||
"WHERE time>={1}s AND time<{2}s AND id=$id "
|
||||
"ORDER by time DESC LIMIT 1;".format(
|
||||
BULK_MEASUREMENT, start, end),
|
||||
bind_params={"id": gstate.dish_id},
|
||||
epoch="s")
|
||||
points = list(result.get_points())
|
||||
if points:
|
||||
counter = points[0].get("counter", None)
|
||||
timestamp = points[0].get("time", 0)
|
||||
if counter and timestamp:
|
||||
return int(counter), int(timestamp)
|
||||
except TypeError as e:
|
||||
# bind_params was added in influxdb-python v5.2.3. That would be easy
|
||||
# enough to work around, but older versions had other problems with
|
||||
# query(), so just skip this functionality.
|
||||
logging.error(
|
||||
"Failed running query, probably due to influxdb-python version too old. "
|
||||
"Skipping resumption from prior counter value. Reported error was: %s", str(e))
|
||||
|
||||
return None, 0
|
||||
|
||||
|
||||
def sync_timebase(opts, gstate):
|
||||
try:
|
||||
db_counter, db_timestamp = query_counter(gstate, gstate.start_timestamp, gstate.timestamp)
|
||||
except Exception as e:
|
||||
# could be temporary outage, so try again next time
|
||||
dish_common.conn_error(opts, "Failed querying InfluxDB for prior count: %s", str(e))
|
||||
return
|
||||
gstate.timebase_synced = True
|
||||
|
||||
if db_counter and gstate.start_counter <= db_counter:
|
||||
del gstate.deferred_points[:db_counter - gstate.start_counter]
|
||||
if gstate.deferred_points:
|
||||
delta_timestamp = db_timestamp - (gstate.deferred_points[0]["time"] - 1)
|
||||
# to prevent +/- 1 second timestamp drift when the script restarts,
|
||||
# if time base is within 2 seconds of that of the last sample in
|
||||
# the database, correct back to that time base
|
||||
if delta_timestamp == 0:
|
||||
if opts.verbose:
|
||||
print("Exactly synced with database time base")
|
||||
elif -2 <= delta_timestamp <= 2:
|
||||
if opts.verbose:
|
||||
print("Replacing with existing time base: {0} -> {1}".format(
|
||||
db_counter, datetime.fromtimestamp(db_timestamp, tz=timezone.utc)))
|
||||
for point in gstate.deferred_points:
|
||||
db_timestamp += 1
|
||||
if point["time"] + delta_timestamp == db_timestamp:
|
||||
point["time"] = db_timestamp
|
||||
else:
|
||||
# lost time sync when recording data, leave the rest
|
||||
break
|
||||
else:
|
||||
gstate.timestamp = db_timestamp
|
||||
else:
|
||||
if opts.verbose:
|
||||
print("Database time base out of sync by {0} seconds".format(delta_timestamp))
|
||||
|
||||
gstate.points.extend(gstate.deferred_points)
|
||||
gstate.deferred_points.clear()
|
||||
|
||||
|
||||
def loop_body(opts, gstate, shutdown=False):
|
||||
fields = {"status": {}, "ping_stats": {}, "usage": {}, "power": {}}
|
||||
|
||||
def cb_add_item(key, val, category):
|
||||
fields[category][key] = val
|
||||
|
||||
def cb_add_sequence(key, val, category, start):
|
||||
for i, subval in enumerate(val, start=start):
|
||||
fields[category]["{0}_{1}".format(key, i)] = subval
|
||||
|
||||
def cb_add_bulk(bulk, count, timestamp, counter):
|
||||
if gstate.start_timestamp is None:
|
||||
gstate.start_timestamp = timestamp
|
||||
gstate.start_counter = counter
|
||||
points = gstate.points if gstate.timebase_synced else gstate.deferred_points
|
||||
for i in range(count):
|
||||
timestamp += 1
|
||||
points.append({
|
||||
"measurement": BULK_MEASUREMENT,
|
||||
"tags": {
|
||||
"id": gstate.dish_id
|
||||
},
|
||||
"time": timestamp,
|
||||
"fields": {key: val[i] for key, val in bulk.items() if val[i] is not None},
|
||||
})
|
||||
if points:
|
||||
# save off counter value for script restart
|
||||
points[-1]["fields"]["counter"] = counter + count
|
||||
|
||||
rc, status_ts, hist_ts = dish_common.get_data(opts,
|
||||
gstate,
|
||||
cb_add_item,
|
||||
cb_add_sequence,
|
||||
add_bulk=cb_add_bulk,
|
||||
flush_history=shutdown)
|
||||
if rc:
|
||||
return rc
|
||||
|
||||
for category, cat_fields in fields.items():
|
||||
if cat_fields:
|
||||
timestamp = status_ts if category == "status" else hist_ts
|
||||
gstate.points.append({
|
||||
"measurement": "spacex.starlink.user_terminal." + category,
|
||||
"tags": {
|
||||
"id": gstate.dish_id
|
||||
},
|
||||
"time": timestamp,
|
||||
"fields": cat_fields,
|
||||
})
|
||||
|
||||
# This is here and not before the points being processed because if the
|
||||
# query previously failed, there will be points that were processed in
|
||||
# a prior loop. This avoids having to handle that as a special case.
|
||||
if opts.bulk_mode and not gstate.timebase_synced:
|
||||
sync_timebase(opts, gstate)
|
||||
|
||||
if opts.verbose:
|
||||
print("Data points queued: " + str(len(gstate.points)))
|
||||
|
||||
if len(gstate.points) >= FLUSH_LIMIT:
|
||||
return flush_points(opts, gstate)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
||||
gstate = dish_common.GlobalState(target=opts.target)
|
||||
gstate.points = []
|
||||
gstate.deferred_points = []
|
||||
gstate.timebase_synced = opts.skip_query
|
||||
gstate.start_timestamp = None
|
||||
gstate.start_counter = None
|
||||
|
||||
if "verify_ssl" in opts.icargs and not opts.icargs["verify_ssl"]:
|
||||
# user has explicitly said be insecure, so don't warn about it
|
||||
warnings.filterwarnings("ignore", message="Unverified HTTPS request")
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
try:
|
||||
# attempt to hack around breakage between influxdb-python client and 2.0 server:
|
||||
gstate.influx_client = InfluxDBClient(**opts.icargs, headers={"Accept": "application/json"})
|
||||
except TypeError:
|
||||
# ...unless influxdb-python package version is too old
|
||||
gstate.influx_client = InfluxDBClient(**opts.icargs)
|
||||
|
||||
rc = 0
|
||||
try:
|
||||
next_loop = time.monotonic()
|
||||
while True:
|
||||
rc = loop_body(opts, gstate)
|
||||
if opts.loop_interval > 0.0:
|
||||
now = time.monotonic()
|
||||
next_loop = max(next_loop + opts.loop_interval, now)
|
||||
time.sleep(next_loop - now)
|
||||
else:
|
||||
break
|
||||
except (KeyboardInterrupt, Terminated):
|
||||
pass
|
||||
finally:
|
||||
loop_body(opts, gstate, shutdown=True)
|
||||
if gstate.points:
|
||||
rc = flush_points(opts, gstate)
|
||||
gstate.influx_client.close()
|
||||
gstate.shutdown()
|
||||
|
||||
sys.exit(rc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,331 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Write Starlink user terminal data to an InfluxDB 2.x database.
|
||||
|
||||
This script pulls the current status info and/or metrics computed from the
|
||||
history data and writes them to the specified InfluxDB 2.x database either once
|
||||
or in a periodic loop.
|
||||
|
||||
Data will be written into the requested database with the following
|
||||
measurement / series names:
|
||||
|
||||
: spacex.starlink.user_terminal.status : Current status data
|
||||
: spacex.starlink.user_terminal.history : Bulk history data
|
||||
: spacex.starlink.user_terminal.ping_stats : Ping history statistics
|
||||
: spacex.starlink.user_terminal.usage : Usage history statistics
|
||||
: spacex.starlink.user_terminal.power : Power history statistics
|
||||
|
||||
NOTE: The Starlink user terminal does not include time values with its
|
||||
history or status data, so this script uses current system time to compute
|
||||
the timestamps it sends to InfluxDB. It is recommended to run this script on
|
||||
a host that has its system clock synced via NTP. Otherwise, the timestamps
|
||||
may get out of sync with real time.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
from influxdb_client import InfluxDBClient, WriteOptions, WritePrecision
|
||||
|
||||
import dish_common
|
||||
|
||||
URL_DEFAULT = "http://localhost:8086"
|
||||
BUCKET_DEFAULT = "starlinkstats"
|
||||
BULK_MEASUREMENT = "spacex.starlink.user_terminal.history"
|
||||
FLUSH_LIMIT = 6
|
||||
MAX_BATCH = 5000
|
||||
MAX_QUEUE_LENGTH = 864000
|
||||
|
||||
|
||||
class Terminated(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
# Turn SIGTERM into an exception so main loop can clean up
|
||||
raise Terminated
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = dish_common.create_arg_parser(
|
||||
output_description="write it to an InfluxDB 2.x database")
|
||||
|
||||
group = parser.add_argument_group(title="InfluxDB 2.x database options")
|
||||
group.add_argument("-u",
|
||||
"--url",
|
||||
default=URL_DEFAULT,
|
||||
dest="url",
|
||||
help="URL of the InfluxDB 2.x server, default: " + URL_DEFAULT)
|
||||
group.add_argument("-T", "--token", help="Token to access the bucket")
|
||||
group.add_argument("-B",
|
||||
"--bucket",
|
||||
default=BUCKET_DEFAULT,
|
||||
help="Bucket name to use, default: " + BUCKET_DEFAULT)
|
||||
group.add_argument("-O", "--org", help="Organisation name")
|
||||
group.add_argument("-k",
|
||||
"--skip-query",
|
||||
action="store_true",
|
||||
help="Skip querying for prior sample write point in bulk mode")
|
||||
group.add_argument("-C",
|
||||
"--ca-cert",
|
||||
dest="ssl_ca_cert",
|
||||
help="Use specified CA cert to verify HTTPS server",
|
||||
metavar="FILENAME")
|
||||
group.add_argument("-I",
|
||||
"--insecure",
|
||||
action="store_false",
|
||||
dest="verify_ssl",
|
||||
help="Disable certificate verification of HTTPS server (INSECURE!)")
|
||||
|
||||
env_map = (
|
||||
("INFLUXDB_URL", "url"),
|
||||
("INFLUXDB_TOKEN", "token"),
|
||||
("INFLUXDB_Bucket", "bucket"),
|
||||
("INFLUXDB_ORG", "org"),
|
||||
("INFLUXDB_SSL", "verify_ssl"),
|
||||
)
|
||||
env_defaults = {}
|
||||
for var, opt in env_map:
|
||||
# check both set and not empty string
|
||||
val = os.environ.get(var)
|
||||
if val:
|
||||
if var == "INFLUXDB_SSL":
|
||||
if val == "insecure":
|
||||
env_defaults[opt] = False
|
||||
elif val == "secure":
|
||||
env_defaults[opt] = True
|
||||
else:
|
||||
env_defaults["ssl_ca_cert"] = val
|
||||
else:
|
||||
env_defaults[opt] = val
|
||||
parser.set_defaults(**env_defaults)
|
||||
|
||||
opts = dish_common.run_arg_parser(parser, need_id=True)
|
||||
|
||||
opts.icargs = {}
|
||||
for key in ["url", "token", "bucket", "org", "verify_ssl", "ssl_ca_cert"]:
|
||||
val = getattr(opts, key)
|
||||
if val is not None:
|
||||
opts.icargs[key] = val
|
||||
|
||||
if (not opts.verify_ssl
|
||||
or opts.ssl_ca_cert is not None) and not opts.url.lower().startswith("https:"):
|
||||
parser.error("SSL options only apply to HTTPS URLs")
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def flush_points(opts, gstate):
|
||||
try:
|
||||
write_api = gstate.influx_client.write_api(
|
||||
write_options=WriteOptions(batch_size=len(gstate.points),
|
||||
flush_interval=10_000,
|
||||
jitter_interval=2_000,
|
||||
retry_interval=5_000,
|
||||
max_retries=5,
|
||||
max_retry_delay=30_000,
|
||||
exponential_base=2))
|
||||
while len(gstate.points) > MAX_BATCH:
|
||||
write_api.write(record=gstate.points[:MAX_BATCH],
|
||||
write_precision=WritePrecision.S,
|
||||
bucket=opts.bucket)
|
||||
if opts.verbose:
|
||||
print("Data points written: " + str(MAX_BATCH))
|
||||
del gstate.points[:MAX_BATCH]
|
||||
|
||||
if gstate.points:
|
||||
write_api.write(record=gstate.points,
|
||||
write_precision=WritePrecision.S,
|
||||
bucket=opts.bucket)
|
||||
if opts.verbose:
|
||||
print("Data points written: " + str(len(gstate.points)))
|
||||
gstate.points.clear()
|
||||
write_api.flush()
|
||||
write_api.close()
|
||||
except Exception as e:
|
||||
dish_common.conn_error(opts, "Failed writing to InfluxDB database: %s", str(e))
|
||||
# If failures persist, don't just use infinite memory. Max queue
|
||||
# is currently 10 days of bulk data, so something is very wrong
|
||||
# if it's ever exceeded.
|
||||
if len(gstate.points) > MAX_QUEUE_LENGTH:
|
||||
logging.error("Max write queue exceeded, discarding data.")
|
||||
del gstate.points[:-MAX_QUEUE_LENGTH]
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def query_counter(opts, gstate, start, end):
|
||||
query_api = gstate.influx_client.query_api()
|
||||
result = query_api.query('''
|
||||
from(bucket: "{0}")
|
||||
|> range(start: {1}, stop: {2})
|
||||
|> filter(fn: (r) => r["_measurement"] == "{3}")
|
||||
|> filter(fn: (r) => r["_field"] == "counter")
|
||||
|> last()
|
||||
|> yield(name: "last")
|
||||
'''.format(opts.bucket, str(start), str(end), BULK_MEASUREMENT))
|
||||
if result:
|
||||
counter = result[0].records[0]["_value"]
|
||||
timestamp = result[0].records[0]["_time"].timestamp()
|
||||
if counter and timestamp:
|
||||
return int(counter), int(timestamp)
|
||||
|
||||
return None, 0
|
||||
|
||||
|
||||
def sync_timebase(opts, gstate):
|
||||
try:
|
||||
db_counter, db_timestamp = query_counter(opts, gstate, gstate.start_timestamp,
|
||||
gstate.timestamp)
|
||||
except Exception as e:
|
||||
# could be temporary outage, so try again next time
|
||||
dish_common.conn_error(opts, "Failed querying InfluxDB for prior count: %s", str(e))
|
||||
return
|
||||
gstate.timebase_synced = True
|
||||
|
||||
if db_counter and gstate.start_counter <= db_counter:
|
||||
del gstate.deferred_points[:db_counter - gstate.start_counter]
|
||||
if gstate.deferred_points:
|
||||
delta_timestamp = db_timestamp - (gstate.deferred_points[0]["time"] - 1)
|
||||
# to prevent +/- 1 second timestamp drift when the script restarts,
|
||||
# if time base is within 2 seconds of that of the last sample in
|
||||
# the database, correct back to that time base
|
||||
if delta_timestamp == 0:
|
||||
if opts.verbose:
|
||||
print("Exactly synced with database time base")
|
||||
elif -2 <= delta_timestamp <= 2:
|
||||
if opts.verbose:
|
||||
print("Replacing with existing time base: {0} -> {1}".format(
|
||||
db_counter, datetime.fromtimestamp(db_timestamp, tz=timezone.utc)))
|
||||
for point in gstate.deferred_points:
|
||||
db_timestamp += 1
|
||||
if point["time"] + delta_timestamp == db_timestamp:
|
||||
point["time"] = db_timestamp
|
||||
else:
|
||||
# lost time sync when recording data, leave the rest
|
||||
break
|
||||
else:
|
||||
gstate.timestamp = db_timestamp
|
||||
else:
|
||||
if opts.verbose:
|
||||
print("Database time base out of sync by {0} seconds".format(delta_timestamp))
|
||||
|
||||
gstate.points.extend(gstate.deferred_points)
|
||||
gstate.deferred_points.clear()
|
||||
|
||||
|
||||
def loop_body(opts, gstate, shutdown=False):
|
||||
fields = {"status": {}, "ping_stats": {}, "usage": {}, "power": {}}
|
||||
|
||||
def cb_add_item(key, val, category):
|
||||
fields[category][key] = val
|
||||
|
||||
def cb_add_sequence(key, val, category, start):
|
||||
for i, subval in enumerate(val, start=start):
|
||||
fields[category]["{0}_{1}".format(key, i)] = subval
|
||||
|
||||
def cb_add_bulk(bulk, count, timestamp, counter):
|
||||
if gstate.start_timestamp is None:
|
||||
gstate.start_timestamp = timestamp
|
||||
gstate.start_counter = counter
|
||||
points = gstate.points if gstate.timebase_synced else gstate.deferred_points
|
||||
for i in range(count):
|
||||
timestamp += 1
|
||||
points.append({
|
||||
"measurement": BULK_MEASUREMENT,
|
||||
"tags": {
|
||||
"id": gstate.dish_id
|
||||
},
|
||||
"time": timestamp,
|
||||
"fields": {key: val[i] for key, val in bulk.items() if val[i] is not None},
|
||||
})
|
||||
if points:
|
||||
# save off counter value for script restart
|
||||
points[-1]["fields"]["counter"] = counter + count
|
||||
|
||||
rc, status_ts, hist_ts = dish_common.get_data(opts,
|
||||
gstate,
|
||||
cb_add_item,
|
||||
cb_add_sequence,
|
||||
add_bulk=cb_add_bulk,
|
||||
flush_history=shutdown)
|
||||
if rc:
|
||||
return rc
|
||||
|
||||
for category, cat_fields in fields.items():
|
||||
if cat_fields:
|
||||
timestamp = status_ts if category == "status" else hist_ts
|
||||
gstate.points.append({
|
||||
"measurement": "spacex.starlink.user_terminal." + category,
|
||||
"tags": {
|
||||
"id": gstate.dish_id
|
||||
},
|
||||
"time": timestamp,
|
||||
"fields": cat_fields,
|
||||
})
|
||||
|
||||
# This is here and not before the points being processed because if the
|
||||
# query previously failed, there will be points that were processed in
|
||||
# a prior loop. This avoids having to handle that as a special case.
|
||||
if opts.bulk_mode and not gstate.timebase_synced:
|
||||
sync_timebase(opts, gstate)
|
||||
|
||||
if opts.verbose:
|
||||
print("Data points queued: " + str(len(gstate.points)))
|
||||
|
||||
if len(gstate.points) >= FLUSH_LIMIT:
|
||||
return flush_points(opts, gstate)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
||||
gstate = dish_common.GlobalState(target=opts.target)
|
||||
gstate.points = []
|
||||
gstate.deferred_points = []
|
||||
gstate.timebase_synced = opts.skip_query
|
||||
gstate.start_timestamp = None
|
||||
gstate.start_counter = None
|
||||
|
||||
if "verify_ssl" in opts.icargs and not opts.icargs["verify_ssl"]:
|
||||
# user has explicitly said be insecure, so don't warn about it
|
||||
warnings.filterwarnings("ignore", message="Unverified HTTPS request")
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
gstate.influx_client = InfluxDBClient(**opts.icargs)
|
||||
|
||||
rc = 0
|
||||
try:
|
||||
next_loop = time.monotonic()
|
||||
while True:
|
||||
rc = loop_body(opts, gstate)
|
||||
if opts.loop_interval > 0.0:
|
||||
now = time.monotonic()
|
||||
next_loop = max(next_loop + opts.loop_interval, now)
|
||||
time.sleep(next_loop - now)
|
||||
else:
|
||||
break
|
||||
except (KeyboardInterrupt, Terminated):
|
||||
pass
|
||||
finally:
|
||||
loop_body(opts, gstate, shutdown=True)
|
||||
if gstate.points:
|
||||
rc = flush_points(opts, gstate)
|
||||
gstate.influx_client.close()
|
||||
gstate.shutdown()
|
||||
|
||||
sys.exit(rc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Publish Starlink user terminal data to a MQTT broker.
|
||||
|
||||
This script pulls the current status info and/or metrics computed from the
|
||||
history data and publishes them to the specified MQTT broker either once or
|
||||
in a periodic loop.
|
||||
|
||||
Data will be published to the following topic names:
|
||||
|
||||
: starlink/dish_status/*id_value*/*field_name* : Current status data
|
||||
: starlink/dish_ping_stats/*id_value*/*field_name* : Ping history statistics
|
||||
: starlink/dish_usage/*id_value*/*field_name* : Usage history statistics
|
||||
: starlink/dish_power/*id_value*/*field_name* : Power history statistics
|
||||
|
||||
Where *id_value* is the *id* value from the dish status information.
|
||||
|
||||
Unless the --json command line option is used, in which case, JSON-formatted
|
||||
data will be published to topic name:
|
||||
|
||||
: starlink/*id_value*
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
try:
|
||||
import ssl
|
||||
ssl_ok = True
|
||||
except ImportError:
|
||||
ssl_ok = False
|
||||
|
||||
import paho.mqtt.publish
|
||||
|
||||
import dish_common
|
||||
|
||||
HOST_DEFAULT = "localhost"
|
||||
|
||||
|
||||
class Terminated(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
# Turn SIGTERM into an exception so main loop can clean up
|
||||
raise Terminated
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = dish_common.create_arg_parser(output_description="publish it to a MQTT broker",
|
||||
bulk_history=False)
|
||||
|
||||
group = parser.add_argument_group(title="MQTT broker options")
|
||||
group.add_argument("-n",
|
||||
"--hostname",
|
||||
default=HOST_DEFAULT,
|
||||
help="Hostname of MQTT broker, default: " + HOST_DEFAULT)
|
||||
group.add_argument("-p", "--port", type=int, help="Port number to use on MQTT broker")
|
||||
group.add_argument("-P", "--password", help="Set password for username/password authentication")
|
||||
group.add_argument("-U", "--username", help="Set username for authentication")
|
||||
group.add_argument("-J", "--json", action="store_true", help="Publish data as JSON")
|
||||
if ssl_ok:
|
||||
|
||||
def wrap_ca_arg(arg):
|
||||
return {"ca_certs": arg}
|
||||
|
||||
group.add_argument("-C",
|
||||
"--ca-cert",
|
||||
type=wrap_ca_arg,
|
||||
dest="tls",
|
||||
help="Enable SSL/TLS using specified CA cert to verify broker",
|
||||
metavar="FILENAME")
|
||||
group.add_argument("-I",
|
||||
"--insecure",
|
||||
action="store_const",
|
||||
const={"cert_reqs": ssl.CERT_NONE},
|
||||
dest="tls",
|
||||
help="Enable SSL/TLS but disable certificate verification (INSECURE!)")
|
||||
group.add_argument("-S",
|
||||
"--secure",
|
||||
action="store_const",
|
||||
const={},
|
||||
dest="tls",
|
||||
help="Enable SSL/TLS using default CA cert")
|
||||
else:
|
||||
parser.epilog += "\nSSL support options not available due to missing ssl module"
|
||||
|
||||
env_map = (
|
||||
("MQTT_HOST", "hostname"),
|
||||
("MQTT_PORT", "port"),
|
||||
("MQTT_USERNAME", "username"),
|
||||
("MQTT_PASSWORD", "password"),
|
||||
("MQTT_SSL", "tls"),
|
||||
)
|
||||
env_defaults = {}
|
||||
for var, opt in env_map:
|
||||
# check both set and not empty string
|
||||
val = os.environ.get(var)
|
||||
if val:
|
||||
if var == "MQTT_SSL":
|
||||
if ssl_ok and val != "false":
|
||||
if val == "insecure":
|
||||
env_defaults[opt] = {"cert_reqs": ssl.CERT_NONE}
|
||||
elif val == "secure":
|
||||
env_defaults[opt] = {}
|
||||
else:
|
||||
env_defaults[opt] = {"ca_certs": val}
|
||||
else:
|
||||
env_defaults[opt] = val
|
||||
parser.set_defaults(**env_defaults)
|
||||
|
||||
opts = dish_common.run_arg_parser(parser, need_id=True)
|
||||
|
||||
if opts.username is None and opts.password is not None:
|
||||
parser.error("Password authentication requires username to be set")
|
||||
|
||||
opts.mqargs = {}
|
||||
for key in ["hostname", "port", "tls"]:
|
||||
val = getattr(opts, key)
|
||||
if val is not None:
|
||||
opts.mqargs[key] = val
|
||||
|
||||
if opts.username is not None:
|
||||
opts.mqargs["auth"] = {"username": opts.username}
|
||||
if opts.password is not None:
|
||||
opts.mqargs["auth"]["password"] = opts.password
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def loop_body(opts, gstate):
|
||||
msgs = []
|
||||
|
||||
if opts.json:
|
||||
|
||||
data = {}
|
||||
|
||||
def cb_add_item(key, val, category):
|
||||
if not "dish_{0}".format(category) in data:
|
||||
data["dish_{0}".format(category)] = {}
|
||||
|
||||
# Skip NaN values that occur on startup because they can upset Javascript JSON parsers
|
||||
if not (isinstance(val, float) and math.isnan(val)):
|
||||
data["dish_{0}".format(category)].update({key: val})
|
||||
|
||||
def cb_add_sequence(key, val, category, _):
|
||||
if not "dish_{0}".format(category) in data:
|
||||
data["dish_{0}".format(category)] = {}
|
||||
|
||||
data["dish_{0}".format(category)].update({key: list(val)})
|
||||
|
||||
else:
|
||||
|
||||
def cb_add_item(key, val, category):
|
||||
msgs.append(("starlink/dish_{0}/{1}/{2}".format(category, gstate.dish_id,
|
||||
key), val, 0, False))
|
||||
|
||||
def cb_add_sequence(key, val, category, _):
|
||||
msgs.append(("starlink/dish_{0}/{1}/{2}".format(category, gstate.dish_id, key),
|
||||
",".join("" if x is None else str(x) for x in val), 0, False))
|
||||
|
||||
rc = dish_common.get_data(opts, gstate, cb_add_item, cb_add_sequence)[0]
|
||||
|
||||
if opts.json:
|
||||
msgs.append(("starlink/{0}".format(gstate.dish_id), json.dumps(data), 0, False))
|
||||
|
||||
if msgs:
|
||||
try:
|
||||
paho.mqtt.publish.multiple(msgs, client_id=gstate.dish_id, **opts.mqargs)
|
||||
if opts.verbose:
|
||||
print("Successfully published to MQTT broker")
|
||||
except Exception as e:
|
||||
dish_common.conn_error(opts, "Failed publishing to MQTT broker: %s", str(e))
|
||||
rc = 1
|
||||
|
||||
return rc
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
||||
gstate = dish_common.GlobalState(target=opts.target)
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
rc = 0
|
||||
try:
|
||||
next_loop = time.monotonic()
|
||||
while True:
|
||||
rc = loop_body(opts, gstate)
|
||||
if opts.loop_interval > 0.0:
|
||||
now = time.monotonic()
|
||||
next_loop = max(next_loop + opts.loop_interval, now)
|
||||
time.sleep(next_loop - now)
|
||||
else:
|
||||
break
|
||||
except (KeyboardInterrupt, Terminated):
|
||||
pass
|
||||
finally:
|
||||
gstate.shutdown()
|
||||
|
||||
sys.exit(rc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,298 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Prometheus exporter for Starlink user terminal data info.
|
||||
|
||||
This script pulls the current status info and/or metrics computed from the
|
||||
history data and makes it available via HTTP in the format Prometheus expects.
|
||||
"""
|
||||
|
||||
from http import HTTPStatus
|
||||
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
|
||||
import dish_common
|
||||
|
||||
|
||||
class Terminated(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
# Turn SIGTERM into an exception so main loop can clean up
|
||||
raise Terminated
|
||||
|
||||
|
||||
class MetricInfo:
|
||||
unit = ""
|
||||
kind = "gauge"
|
||||
help = ""
|
||||
|
||||
def __init__(self, unit=None, kind=None, help=None) -> None:
|
||||
if unit:
|
||||
self.unit = f"_{unit}"
|
||||
if kind:
|
||||
self.kind = kind
|
||||
if help:
|
||||
self.help = help
|
||||
pass
|
||||
|
||||
|
||||
METRICS_INFO = {
|
||||
"status_uptime": MetricInfo(unit="seconds", kind="counter"),
|
||||
"status_longitude": MetricInfo(),
|
||||
"status_latitude": MetricInfo(),
|
||||
"status_altitude": MetricInfo(),
|
||||
"status_gps_enabled": MetricInfo(),
|
||||
"status_gps_ready": MetricInfo(),
|
||||
"status_gps_sats": MetricInfo(),
|
||||
"status_seconds_to_first_nonempty_slot": MetricInfo(),
|
||||
"status_pop_ping_drop_rate": MetricInfo(),
|
||||
"status_downlink_throughput_bps": MetricInfo(),
|
||||
"status_uplink_throughput_bps": MetricInfo(),
|
||||
"status_pop_ping_latency_ms": MetricInfo(),
|
||||
"status_alerts": MetricInfo(),
|
||||
"status_fraction_obstructed": MetricInfo(),
|
||||
"status_currently_obstructed": MetricInfo(),
|
||||
"status_seconds_obstructed": MetricInfo(),
|
||||
"status_obstruction_duration": MetricInfo(),
|
||||
"status_obstruction_interval": MetricInfo(),
|
||||
"status_direction_azimuth": MetricInfo(),
|
||||
"status_direction_elevation": MetricInfo(),
|
||||
"status_is_snr_above_noise_floor": MetricInfo(),
|
||||
"status_alert_motors_stuck": MetricInfo(),
|
||||
"status_alert_thermal_throttle": MetricInfo(),
|
||||
"status_alert_thermal_shutdown": MetricInfo(),
|
||||
"status_alert_mast_not_near_vertical": MetricInfo(),
|
||||
"status_alert_unexpected_location": MetricInfo(),
|
||||
"status_alert_slow_ethernet_speeds": MetricInfo(),
|
||||
"status_alert_roaming": MetricInfo(),
|
||||
"status_alert_install_pending": MetricInfo(),
|
||||
"status_alert_is_heating": MetricInfo(),
|
||||
"status_alert_power_supply_thermal_throttle": MetricInfo(),
|
||||
"status_alert_slow_ethernet_speeds_100": MetricInfo(),
|
||||
"status_alert_is_power_save_idle": MetricInfo(),
|
||||
"status_alert_moving_while_not_mobile": MetricInfo(),
|
||||
"status_alert_moving_too_fast_for_policy": MetricInfo(),
|
||||
"status_alert_dbf_telem_stale": MetricInfo(),
|
||||
"status_alert_low_motor_current": MetricInfo(),
|
||||
"status_alert_obstruction_map_reset": MetricInfo(),
|
||||
"status_alert_lower_signal_than_predicted": MetricInfo(),
|
||||
"ping_stats_samples": MetricInfo(kind="counter"),
|
||||
"ping_stats_end_counter": MetricInfo(kind="counter"),
|
||||
"usage_download_usage": MetricInfo(unit="bytes", kind="counter"),
|
||||
"usage_upload_usage": MetricInfo(unit="bytes", kind="counter"),
|
||||
"power_latest_power": MetricInfo(),
|
||||
"power_mean_power": MetricInfo(),
|
||||
"power_min_power": MetricInfo(),
|
||||
"power_max_power": MetricInfo(),
|
||||
"power_total_energy": MetricInfo(),
|
||||
}
|
||||
|
||||
STATE_VALUES = [
|
||||
"UNKNOWN",
|
||||
"CONNECTED",
|
||||
"BOOTING",
|
||||
"SEARCHING",
|
||||
"STOWED",
|
||||
"THERMAL_SHUTDOWN",
|
||||
"NO_SATS",
|
||||
"OBSTRUCTED",
|
||||
"NO_DOWNLINK",
|
||||
"NO_PINGS",
|
||||
"DISH_UNREACHABLE",
|
||||
]
|
||||
|
||||
|
||||
class Metric:
|
||||
name = ""
|
||||
timestamp = ""
|
||||
kind = None
|
||||
help = None
|
||||
values = None
|
||||
|
||||
def __init__(self, name, timestamp, kind="gauge", help="", values=None):
|
||||
self.name = name
|
||||
self.timestamp = timestamp
|
||||
self.kind = kind
|
||||
self.help = help
|
||||
if values:
|
||||
self.values = values
|
||||
else:
|
||||
self.values = []
|
||||
pass
|
||||
|
||||
def __str__(self):
|
||||
if not self.values:
|
||||
return ""
|
||||
|
||||
lines = []
|
||||
lines.append(f"# HELP {self.name} {self.help}")
|
||||
lines.append(f"# TYPE {self.name} {self.kind}")
|
||||
for value in self.values:
|
||||
lines.append(f"{self.name}{value} {self.timestamp*1000}")
|
||||
lines.append("")
|
||||
return str.join("\n", lines)
|
||||
|
||||
|
||||
class MetricValue:
|
||||
value = 0
|
||||
labels = None
|
||||
|
||||
def __init__(self, value, labels=None) -> None:
|
||||
self.value = value
|
||||
self.labels = labels
|
||||
|
||||
def __str__(self):
|
||||
label_str = ""
|
||||
if self.labels:
|
||||
label_str = ("{" + str.join(",", [f'{v[0]}="{v[1]}"'
|
||||
for v in self.labels.items()]) + "}")
|
||||
return f"{label_str} {self.value}"
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = dish_common.create_arg_parser(output_description="Prometheus exporter",
|
||||
bulk_history=False)
|
||||
|
||||
group = parser.add_argument_group(title="HTTP server options")
|
||||
group.add_argument("--address", default="0.0.0.0", help="IP address to listen on")
|
||||
group.add_argument("--port", default=8080, type=int, help="Port to listen on")
|
||||
|
||||
return dish_common.run_arg_parser(parser, modes=["status", "alert_detail", "usage", "location", "power"])
|
||||
|
||||
|
||||
def prometheus_export(opts, gstate):
|
||||
raw_data = {}
|
||||
|
||||
def data_add_item(name, value, category):
|
||||
raw_data[category + "_" + name] = value
|
||||
pass
|
||||
|
||||
def data_add_sequencem(name, value, category, start):
|
||||
raise NotImplementedError("Did not expect sequence data")
|
||||
|
||||
with gstate.lock:
|
||||
rc, status_ts, hist_ts = dish_common.get_data(opts, gstate, data_add_item,
|
||||
data_add_sequencem)
|
||||
|
||||
metrics = []
|
||||
|
||||
# snr is not supported by starlink any more but still returned by the grpc
|
||||
# service for backwards compatibility
|
||||
if "status_snr" in raw_data:
|
||||
del raw_data["status_snr"]
|
||||
|
||||
metrics.append(
|
||||
Metric(
|
||||
name="starlink_status_state",
|
||||
timestamp=status_ts,
|
||||
values=[
|
||||
MetricValue(
|
||||
value=int(raw_data["status_state"] == state_value),
|
||||
labels={"state": state_value},
|
||||
) for state_value in STATE_VALUES
|
||||
],
|
||||
))
|
||||
del raw_data["status_state"]
|
||||
|
||||
info_metrics = ["status_id", "status_hardware_version", "status_software_version"]
|
||||
metrics_not_found = []
|
||||
metrics_not_found.extend([x for x in info_metrics if x not in raw_data])
|
||||
|
||||
if len(metrics_not_found) < len(info_metrics):
|
||||
metrics.append(
|
||||
Metric(
|
||||
name="starlink_info",
|
||||
timestamp=status_ts,
|
||||
values=[
|
||||
MetricValue(
|
||||
value=1,
|
||||
labels={
|
||||
x.replace("status_", ""): raw_data.pop(x) for x in info_metrics
|
||||
if x in raw_data
|
||||
},
|
||||
)
|
||||
],
|
||||
))
|
||||
|
||||
for name, metric_info in METRICS_INFO.items():
|
||||
if name in raw_data:
|
||||
metrics.append(
|
||||
Metric(
|
||||
name=f"starlink_{name}{metric_info.unit}",
|
||||
timestamp=status_ts,
|
||||
kind=metric_info.kind,
|
||||
values=[MetricValue(value=float(raw_data.pop(name) or 0))],
|
||||
))
|
||||
else:
|
||||
metrics_not_found.append(name)
|
||||
|
||||
metrics.append(
|
||||
Metric(
|
||||
name="starlink_exporter_unprocessed_metrics",
|
||||
timestamp=status_ts,
|
||||
values=[MetricValue(value=1, labels={"metric": name}) for name in raw_data],
|
||||
))
|
||||
|
||||
metrics.append(
|
||||
Metric(
|
||||
name="starlink_exporter_missing_metrics",
|
||||
timestamp=status_ts,
|
||||
values=[MetricValue(
|
||||
value=1,
|
||||
labels={"metric": name},
|
||||
) for name in metrics_not_found],
|
||||
))
|
||||
|
||||
return str.join("\n", [str(metric) for metric in metrics])
|
||||
|
||||
|
||||
class MetricsRequestHandler(BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
path = self.path.partition("?")[0]
|
||||
if path.lower() == "/favicon.ico":
|
||||
self.send_error(HTTPStatus.NOT_FOUND)
|
||||
return
|
||||
|
||||
opts = self.server.opts
|
||||
gstate = self.server.gstate
|
||||
|
||||
content = prometheus_export(opts, gstate)
|
||||
self.send_response(HTTPStatus.OK)
|
||||
self.send_header("Content-type", "text/plain")
|
||||
self.send_header("Content-Length", len(content))
|
||||
self.end_headers()
|
||||
self.wfile.write(content.encode())
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s", stream=sys.stderr)
|
||||
|
||||
gstate = dish_common.GlobalState(target=opts.target)
|
||||
gstate.lock = threading.Lock()
|
||||
|
||||
httpd = ThreadingHTTPServer((opts.address, opts.port), MetricsRequestHandler)
|
||||
httpd.daemon_threads = False
|
||||
httpd.opts = opts
|
||||
httpd.gstate = gstate
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
print("HTTP listening on port", opts.port)
|
||||
try:
|
||||
httpd.serve_forever()
|
||||
except (KeyboardInterrupt, Terminated):
|
||||
pass
|
||||
finally:
|
||||
httpd.server_close()
|
||||
httpd.gstate.shutdown()
|
||||
|
||||
sys.exit()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,326 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Write Starlink user terminal data to a sqlite database.
|
||||
|
||||
This script pulls the current status info and/or metrics computed from the
|
||||
history data and writes them to the specified sqlite database either once or
|
||||
in a periodic loop.
|
||||
|
||||
Requested data will be written into the following tables:
|
||||
|
||||
: status : Current status data
|
||||
: history : Bulk history data
|
||||
: ping_stats : Ping history statistics
|
||||
: usage : Bandwidth usage history statistics
|
||||
: power : Power consumption history statistics
|
||||
|
||||
Array data is currently written to the database as text strings of comma-
|
||||
separated values, which may not be the best method for some use cases. If you
|
||||
find yourself wishing they were handled better, please open a feature request
|
||||
at https://github.com/sparky8512/starlink-grpc-tools/issues explaining the use
|
||||
case and how you would rather see it. This only affects a few fields, since
|
||||
most of the useful data is not in arrays.
|
||||
|
||||
Note that using this script to record the alert_detail group mode will tend to
|
||||
trip schema-related errors when new alert types are added to the dish
|
||||
software. The error message will include something like "table status has no
|
||||
column named alert_foo", where "foo" is the newly added alert type. To work
|
||||
around this rare occurrence, you can pass the -f option to force a schema
|
||||
update. Alternatively, instead of using the alert_detail mode, you can use the
|
||||
alerts bitmask in the status group.
|
||||
|
||||
NOTE: The Starlink user terminal does not include time values with its
|
||||
history or status data, so this script uses current system time to compute
|
||||
the timestamps it writes into the database. It is recommended to run this
|
||||
script on a host that has its system clock synced via NTP. Otherwise, the
|
||||
timestamps may get out of sync with real time.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
from itertools import repeat
|
||||
import logging
|
||||
import signal
|
||||
import sqlite3
|
||||
import sys
|
||||
import time
|
||||
|
||||
import dish_common
|
||||
import starlink_grpc
|
||||
|
||||
SCHEMA_VERSION = 5
|
||||
|
||||
|
||||
class Terminated(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
# Turn SIGTERM into an exception so main loop can clean up
|
||||
raise Terminated
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = dish_common.create_arg_parser(output_description="write it to a sqlite database")
|
||||
|
||||
parser.add_argument("database", help="Database file to use")
|
||||
|
||||
group = parser.add_argument_group(title="sqlite database options")
|
||||
group.add_argument("-f",
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="Force schema conversion, even if it results in downgrade; may "
|
||||
"result in discarded data")
|
||||
group.add_argument("-k",
|
||||
"--skip-query",
|
||||
action="store_true",
|
||||
help="Skip querying for prior sample write point in history modes")
|
||||
|
||||
opts = dish_common.run_arg_parser(parser, need_id=True)
|
||||
|
||||
opts.skip_query |= opts.no_counter
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def query_counter(opts, gstate, column, table):
|
||||
now = time.time()
|
||||
cur = gstate.sql_conn.cursor()
|
||||
cur.execute(
|
||||
'SELECT "time", "{0}" FROM "{1}" WHERE "time"<? AND "id"=? '
|
||||
'ORDER BY "time" DESC LIMIT 1'.format(column, table), (now, gstate.dish_id))
|
||||
row = cur.fetchone()
|
||||
cur.close()
|
||||
|
||||
if row and row[0] and row[1]:
|
||||
if opts.verbose:
|
||||
print("Existing time base: {0} -> {1}".format(
|
||||
row[1], datetime.fromtimestamp(row[0], tz=timezone.utc)))
|
||||
return row
|
||||
else:
|
||||
return 0, None
|
||||
|
||||
|
||||
def loop_body(opts, gstate, shutdown=False):
|
||||
tables = {"status": {}, "ping_stats": {}, "usage": {}, "power": {}}
|
||||
hist_cols = ["time", "id"]
|
||||
hist_rows = []
|
||||
|
||||
def cb_add_item(key, val, category):
|
||||
tables[category][key] = val
|
||||
|
||||
def cb_add_sequence(key, val, category, start):
|
||||
tables[category][key] = ",".join(str(subv) if subv is not None else "" for subv in val)
|
||||
|
||||
def cb_add_bulk(bulk, count, timestamp, counter):
|
||||
if len(hist_cols) == 2:
|
||||
hist_cols.extend(bulk.keys())
|
||||
hist_cols.append("counter")
|
||||
for i in range(count):
|
||||
timestamp += 1
|
||||
counter += 1
|
||||
row = [timestamp, gstate.dish_id]
|
||||
row.extend(val[i] for val in bulk.values())
|
||||
row.append(counter)
|
||||
hist_rows.append(row)
|
||||
|
||||
rc = 0
|
||||
status_ts = None
|
||||
hist_ts = None
|
||||
|
||||
if not shutdown:
|
||||
rc, status_ts = dish_common.get_status_data(opts, gstate, cb_add_item, cb_add_sequence)
|
||||
|
||||
if opts.history_stats_mode and (not rc or opts.poll_loops > 1):
|
||||
if gstate.counter_stats is None and not opts.skip_query and opts.samples < 0:
|
||||
_, gstate.counter_stats = query_counter(opts, gstate, "end_counter", "ping_stats")
|
||||
hist_rc, hist_ts = dish_common.get_history_stats(opts, gstate, cb_add_item, cb_add_sequence,
|
||||
shutdown)
|
||||
if not rc:
|
||||
rc = hist_rc
|
||||
|
||||
if not shutdown and opts.bulk_mode and not rc:
|
||||
if gstate.counter is None and not opts.skip_query and opts.bulk_samples < 0:
|
||||
gstate.timestamp, gstate.counter = query_counter(opts, gstate, "counter", "history")
|
||||
rc = dish_common.get_bulk_data(opts, gstate, cb_add_bulk)
|
||||
|
||||
rows_written = 0
|
||||
|
||||
try:
|
||||
cur = gstate.sql_conn.cursor()
|
||||
for category, fields in tables.items():
|
||||
if fields:
|
||||
timestamp = status_ts if category == "status" else hist_ts
|
||||
sql = 'INSERT OR REPLACE INTO "{0}" ("time","id",{1}) VALUES ({2})'.format(
|
||||
category, ",".join('"' + x + '"' for x in fields),
|
||||
",".join(repeat("?",
|
||||
len(fields) + 2)))
|
||||
values = [timestamp, gstate.dish_id]
|
||||
values.extend(fields.values())
|
||||
cur.execute(sql, values)
|
||||
rows_written += 1
|
||||
|
||||
if hist_rows:
|
||||
sql = 'INSERT OR REPLACE INTO "history" ({0}) VALUES({1})'.format(
|
||||
",".join('"' + x + '"' for x in hist_cols), ",".join(repeat("?", len(hist_cols))))
|
||||
cur.executemany(sql, hist_rows)
|
||||
rows_written += len(hist_rows)
|
||||
|
||||
cur.close()
|
||||
gstate.sql_conn.commit()
|
||||
except sqlite3.OperationalError as e:
|
||||
# these are not necessarily fatal, but also not much can do about
|
||||
logging.error("Unexpected error from database, discarding data: %s", e)
|
||||
rc = 1
|
||||
else:
|
||||
if opts.verbose:
|
||||
print("Rows written to db:", rows_written)
|
||||
|
||||
return rc
|
||||
|
||||
|
||||
def ensure_schema(opts, conn, context):
|
||||
cur = conn.cursor()
|
||||
cur.execute("PRAGMA user_version")
|
||||
version = cur.fetchone()
|
||||
if version and version[0] == SCHEMA_VERSION and not opts.force:
|
||||
cur.close()
|
||||
return 0
|
||||
|
||||
try:
|
||||
if not version or not version[0]:
|
||||
if opts.verbose:
|
||||
print("Initializing new database")
|
||||
create_tables(conn, context, "")
|
||||
elif version[0] > SCHEMA_VERSION and not opts.force:
|
||||
logging.error("Cowardly refusing to downgrade from schema version %s", version[0])
|
||||
return 1
|
||||
else:
|
||||
print("Converting from schema version:", version[0])
|
||||
convert_tables(conn, context)
|
||||
cur.execute("PRAGMA user_version={0}".format(SCHEMA_VERSION))
|
||||
conn.commit()
|
||||
return 0
|
||||
except starlink_grpc.GrpcError as e:
|
||||
dish_common.conn_error(opts, "Failure reflecting status fields: %s", str(e))
|
||||
return 1
|
||||
finally:
|
||||
cur.close()
|
||||
|
||||
|
||||
def create_tables(conn, context, suffix):
|
||||
tables = {}
|
||||
name_groups = (starlink_grpc.status_field_names(context=context) +
|
||||
(starlink_grpc.location_field_names(),))
|
||||
type_groups = (starlink_grpc.status_field_types(context=context) +
|
||||
(starlink_grpc.location_field_types(),))
|
||||
tables["status"] = zip(name_groups, type_groups)
|
||||
|
||||
name_groups = starlink_grpc.history_stats_field_names()
|
||||
type_groups = starlink_grpc.history_stats_field_types()
|
||||
tables["ping_stats"] = zip(name_groups[0:5], type_groups[0:5])
|
||||
tables["usage"] = ((name_groups[5], type_groups[5]),)
|
||||
tables["power"] = ((name_groups[6], type_groups[6]),)
|
||||
|
||||
name_groups = starlink_grpc.history_bulk_field_names()
|
||||
type_groups = starlink_grpc.history_bulk_field_types()
|
||||
tables["history"] = ((name_groups[1], type_groups[1]), (["counter"], [int]))
|
||||
|
||||
def sql_type(type_class):
|
||||
if issubclass(type_class, float):
|
||||
return "REAL"
|
||||
if issubclass(type_class, bool):
|
||||
# advisory only, stores as int:
|
||||
return "BOOLEAN"
|
||||
if issubclass(type_class, int):
|
||||
return "INTEGER"
|
||||
if issubclass(type_class, str):
|
||||
return "TEXT"
|
||||
raise TypeError
|
||||
|
||||
column_info = {}
|
||||
cur = conn.cursor()
|
||||
for table, group_pairs in tables.items():
|
||||
column_names = ["time", "id"]
|
||||
columns = ['"time" INTEGER NOT NULL', '"id" TEXT NOT NULL']
|
||||
for name_group, type_group in group_pairs:
|
||||
for name_item, type_item in zip(name_group, type_group):
|
||||
name_item = dish_common.BRACKETS_RE.match(name_item).group(1)
|
||||
if name_item != "id":
|
||||
columns.append('"{0}" {1}'.format(name_item, sql_type(type_item)))
|
||||
column_names.append(name_item)
|
||||
cur.execute('DROP TABLE IF EXISTS "{0}{1}"'.format(table, suffix))
|
||||
sql = 'CREATE TABLE "{0}{1}" ({2}, PRIMARY KEY("time","id"))'.format(
|
||||
table, suffix, ", ".join(columns))
|
||||
cur.execute(sql)
|
||||
column_info[table] = column_names
|
||||
cur.close()
|
||||
|
||||
return column_info
|
||||
|
||||
|
||||
def convert_tables(conn, context):
|
||||
new_column_info = create_tables(conn, context, "_new")
|
||||
conn.row_factory = sqlite3.Row
|
||||
old_cur = conn.cursor()
|
||||
new_cur = conn.cursor()
|
||||
for table, new_columns in new_column_info.items():
|
||||
try:
|
||||
old_cur.execute('SELECT * FROM "{0}"'.format(table))
|
||||
table_ok = True
|
||||
except sqlite3.OperationalError:
|
||||
table_ok = False
|
||||
if table_ok:
|
||||
old_columns = set(x[0] for x in old_cur.description)
|
||||
new_columns = tuple(x for x in new_columns if x in old_columns)
|
||||
sql = 'INSERT OR REPLACE INTO "{0}_new" ({1}) VALUES ({2})'.format(
|
||||
table, ",".join('"' + x + '"' for x in new_columns),
|
||||
",".join(repeat("?", len(new_columns))))
|
||||
new_cur.executemany(sql, (tuple(row[col] for col in new_columns) for row in old_cur))
|
||||
new_cur.execute('DROP TABLE "{0}"'.format(table))
|
||||
new_cur.execute('ALTER TABLE "{0}_new" RENAME TO "{0}"'.format(table))
|
||||
old_cur.close()
|
||||
new_cur.close()
|
||||
conn.row_factory = None
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
||||
gstate = dish_common.GlobalState(target=opts.target)
|
||||
gstate.points = []
|
||||
gstate.deferred_points = []
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
gstate.sql_conn = sqlite3.connect(opts.database)
|
||||
|
||||
rc = 0
|
||||
try:
|
||||
rc = ensure_schema(opts, gstate.sql_conn, gstate.context)
|
||||
if rc:
|
||||
sys.exit(rc)
|
||||
next_loop = time.monotonic()
|
||||
while True:
|
||||
rc = loop_body(opts, gstate)
|
||||
if opts.loop_interval > 0.0:
|
||||
now = time.monotonic()
|
||||
next_loop = max(next_loop + opts.loop_interval, now)
|
||||
time.sleep(next_loop - now)
|
||||
else:
|
||||
break
|
||||
except sqlite3.Error as e:
|
||||
logging.error("Database error: %s", e)
|
||||
rc = 1
|
||||
except (KeyboardInterrupt, Terminated):
|
||||
pass
|
||||
finally:
|
||||
loop_body(opts, gstate, shutdown=True)
|
||||
gstate.sql_conn.close()
|
||||
gstate.shutdown()
|
||||
|
||||
sys.exit(rc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,304 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Output Starlink user terminal data info in text format.
|
||||
|
||||
This script pulls the current status info and/or metrics computed from the
|
||||
history data and prints them to a file or stdout either once or in a periodic
|
||||
loop. By default, it will print the results in CSV format.
|
||||
|
||||
Note that using this script to record the alert_detail group mode as CSV
|
||||
data is not recommended, because the number of alerts and their relative
|
||||
order in the output can change with the dish software. Instead of using
|
||||
the alert_detail mode, you can use the alerts bitmask in the status group.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import dish_common
|
||||
import starlink_grpc
|
||||
|
||||
COUNTER_FIELD = "end_counter"
|
||||
VERBOSE_FIELD_MAP = {
|
||||
# status fields (the remainder are either self-explanatory or I don't
|
||||
# know with confidence what they mean)
|
||||
"alerts": "Alerts bit field",
|
||||
|
||||
# ping_drop fields
|
||||
"samples": "Parsed samples",
|
||||
"end_counter": "Sample counter",
|
||||
"total_ping_drop": "Total ping drop",
|
||||
"count_full_ping_drop": "Count of drop == 1",
|
||||
"count_obstructed": "Obstructed",
|
||||
"total_obstructed_ping_drop": "Obstructed ping drop",
|
||||
"count_full_obstructed_ping_drop": "Obstructed drop == 1",
|
||||
"count_unscheduled": "Unscheduled",
|
||||
"total_unscheduled_ping_drop": "Unscheduled ping drop",
|
||||
"count_full_unscheduled_ping_drop": "Unscheduled drop == 1",
|
||||
|
||||
# ping_run_length fields
|
||||
"init_run_fragment": "Initial drop run fragment",
|
||||
"final_run_fragment": "Final drop run fragment",
|
||||
"run_seconds": "Per-second drop runs",
|
||||
"run_minutes": "Per-minute drop runs",
|
||||
|
||||
# ping_latency fields
|
||||
"mean_all_ping_latency": "Mean RTT, drop < 1",
|
||||
"deciles_all_ping_latency": "RTT deciles, drop < 1",
|
||||
"mean_full_ping_latency": "Mean RTT, drop == 0",
|
||||
"deciles_full_ping_latency": "RTT deciles, drop == 0",
|
||||
"stdev_full_ping_latency": "RTT standard deviation, drop == 0",
|
||||
|
||||
# ping_loaded_latency is still experimental, so leave those unexplained
|
||||
|
||||
# usage fields
|
||||
"download_usage": "Bytes downloaded",
|
||||
"upload_usage": "Bytes uploaded",
|
||||
}
|
||||
|
||||
|
||||
class Terminated(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
# Turn SIGTERM into an exception so main loop can clean up
|
||||
raise Terminated
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = dish_common.create_arg_parser(
|
||||
output_description="print it in text format; by default, will print in CSV format")
|
||||
|
||||
group = parser.add_argument_group(title="CSV output options")
|
||||
group.add_argument("-H",
|
||||
"--print-header",
|
||||
action="store_true",
|
||||
help="Print CSV header instead of parsing data")
|
||||
group.add_argument("-O",
|
||||
"--out-file",
|
||||
default="-",
|
||||
help="Output file path; if set, can also be used to resume from prior "
|
||||
"history sample counter, default: write to standard output")
|
||||
group.add_argument("-k",
|
||||
"--skip-query",
|
||||
action="store_true",
|
||||
help="Skip querying for prior sample write point in history modes")
|
||||
|
||||
opts = dish_common.run_arg_parser(parser)
|
||||
|
||||
if (opts.history_stats_mode or opts.status_mode) and opts.bulk_mode and not opts.verbose:
|
||||
parser.error("bulk_history cannot be combined with other modes for CSV output")
|
||||
|
||||
# Technically possible, but a pain to implement, so just disallow it. User
|
||||
# probably doesn't realize how weird it would be, anyway, given that stats
|
||||
# data reports at a different rate from status data in this case.
|
||||
if opts.history_stats_mode and opts.status_mode and not opts.verbose and opts.poll_loops > 1:
|
||||
parser.error("usage of --poll-loops with history stats modes cannot be mixed with status "
|
||||
"modes for CSV output")
|
||||
|
||||
opts.skip_query |= opts.no_counter | opts.verbose
|
||||
if opts.out_file == "-":
|
||||
opts.no_stdout_errors = True
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def open_out_file(opts, mode):
|
||||
if opts.out_file == "-":
|
||||
# open new file, so it can be closed later without affecting sys.stdout
|
||||
return os.fdopen(sys.stdout.fileno(), "w", buffering=1, closefd=False)
|
||||
return open(opts.out_file, mode, buffering=1)
|
||||
|
||||
|
||||
def print_header(opts, print_file):
|
||||
header = ["datetimestamp_utc"]
|
||||
|
||||
def header_add(names):
|
||||
for name in names:
|
||||
name, start, end = dish_common.BRACKETS_RE.match(name).group(1, 4, 5)
|
||||
if start:
|
||||
header.extend(name + "_" + str(x) for x in range(int(start), int(end)))
|
||||
elif end:
|
||||
header.extend(name + "_" + str(x) for x in range(int(end)))
|
||||
else:
|
||||
header.append(name)
|
||||
|
||||
if opts.status_mode:
|
||||
if opts.pure_status_mode:
|
||||
context = starlink_grpc.ChannelContext(target=opts.target)
|
||||
try:
|
||||
name_groups = starlink_grpc.status_field_names(context=context)
|
||||
except starlink_grpc.GrpcError as e:
|
||||
dish_common.conn_error(opts, "Failure reflecting status field names: %s", str(e))
|
||||
return 1
|
||||
if "status" in opts.mode:
|
||||
header_add(name_groups[0])
|
||||
if "obstruction_detail" in opts.mode:
|
||||
header_add(name_groups[1])
|
||||
if "alert_detail" in opts.mode:
|
||||
header_add(name_groups[2])
|
||||
if "location" in opts.mode:
|
||||
header_add(starlink_grpc.location_field_names())
|
||||
|
||||
if opts.bulk_mode:
|
||||
general, bulk = starlink_grpc.history_bulk_field_names()
|
||||
header_add(bulk)
|
||||
|
||||
if opts.history_stats_mode:
|
||||
groups = starlink_grpc.history_stats_field_names()
|
||||
general, ping, runlen, latency, loaded, usage, power = groups[0:7]
|
||||
header_add(general)
|
||||
if "ping_drop" in opts.mode:
|
||||
header_add(ping)
|
||||
if "ping_run_length" in opts.mode:
|
||||
header_add(runlen)
|
||||
if "ping_latency" in opts.mode:
|
||||
header_add(latency)
|
||||
if "ping_loaded_latency" in opts.mode:
|
||||
header_add(loaded)
|
||||
if "usage" in opts.mode:
|
||||
header_add(usage)
|
||||
if "power" in opts.mode:
|
||||
header_add(power)
|
||||
|
||||
print(",".join(header), file=print_file)
|
||||
return 0
|
||||
|
||||
|
||||
def get_prior_counter(opts, gstate):
|
||||
# This implementation is terrible in that it makes a bunch of assumptions.
|
||||
# Those assumptions should be true for files generated by this script, but
|
||||
# it would be better not to make them. However, it also only works if the
|
||||
# CSV file has a header that correctly matches the last line of the file,
|
||||
# and there's really no way to verify that, so it's garbage in, garbage
|
||||
# out, anyway. It also reads the entire file line-by-line, which is not
|
||||
# great.
|
||||
try:
|
||||
with open_out_file(opts, "r") as csv_file:
|
||||
header = csv_file.readline().split(",")
|
||||
column = header.index(COUNTER_FIELD)
|
||||
last_line = None
|
||||
for last_line in csv_file:
|
||||
pass
|
||||
if last_line is not None:
|
||||
gstate.counter_stats = int(last_line.split(",")[column])
|
||||
except (IndexError, OSError, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def loop_body(opts, gstate, print_file, shutdown=False):
|
||||
csv_data = []
|
||||
|
||||
def xform(val):
|
||||
return "" if val is None else str(val)
|
||||
|
||||
def cb_data_add_item(name, val, category):
|
||||
if opts.verbose:
|
||||
csv_data.append("{0:22} {1}".format(
|
||||
VERBOSE_FIELD_MAP.get(name, name) + ":", xform(val)))
|
||||
else:
|
||||
# special case for get_status failure: this will be the lone item added
|
||||
if name == "state" and val == "DISH_UNREACHABLE":
|
||||
csv_data.extend(["", "", "", val])
|
||||
else:
|
||||
csv_data.append(xform(val))
|
||||
|
||||
def cb_data_add_sequence(name, val, category, start):
|
||||
if opts.verbose:
|
||||
csv_data.append("{0:22} {1}".format(
|
||||
VERBOSE_FIELD_MAP.get(name, name) + ":",
|
||||
", ".join(xform(subval) for subval in val)))
|
||||
else:
|
||||
csv_data.extend(xform(subval) for subval in val)
|
||||
|
||||
def cb_add_bulk(bulk, count, timestamp, counter):
|
||||
if opts.verbose:
|
||||
print("Time range (UTC): {0} -> {1}".format(
|
||||
datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).replace(tzinfo=None).isoformat(),
|
||||
datetime.datetime.fromtimestamp(timestamp + count, datetime.timezone.utc).replace(tzinfo=None).isoformat()),
|
||||
file=print_file)
|
||||
for key, val in bulk.items():
|
||||
print("{0:22} {1}".format(key + ":", ", ".join(xform(subval) for subval in val)),
|
||||
file=print_file)
|
||||
if opts.loop_interval > 0.0:
|
||||
print(file=print_file)
|
||||
else:
|
||||
for i in range(count):
|
||||
timestamp += 1
|
||||
fields = [datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).replace(tzinfo=None).isoformat()]
|
||||
fields.extend([xform(val[i]) for val in bulk.values()])
|
||||
print(",".join(fields), file=print_file)
|
||||
|
||||
rc, status_ts, hist_ts = dish_common.get_data(opts,
|
||||
gstate,
|
||||
cb_data_add_item,
|
||||
cb_data_add_sequence,
|
||||
add_bulk=cb_add_bulk,
|
||||
flush_history=shutdown)
|
||||
|
||||
if opts.verbose:
|
||||
if csv_data:
|
||||
print("\n".join(csv_data), file=print_file)
|
||||
if opts.loop_interval > 0.0:
|
||||
print(file=print_file)
|
||||
else:
|
||||
if csv_data:
|
||||
timestamp = status_ts if status_ts is not None else hist_ts
|
||||
csv_data.insert(0, datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).replace(tzinfo=None).isoformat())
|
||||
print(",".join(csv_data), file=print_file)
|
||||
|
||||
return rc
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
||||
if opts.print_header:
|
||||
try:
|
||||
with open_out_file(opts, "a") as print_file:
|
||||
rc = print_header(opts, print_file)
|
||||
except OSError as e:
|
||||
logging.error("Failed opening output file: %s", str(e))
|
||||
rc = 1
|
||||
sys.exit(rc)
|
||||
|
||||
gstate = dish_common.GlobalState(target=opts.target)
|
||||
if opts.out_file != "-" and not opts.skip_query and opts.history_stats_mode:
|
||||
get_prior_counter(opts, gstate)
|
||||
|
||||
try:
|
||||
print_file = open_out_file(opts, "a")
|
||||
except OSError as e:
|
||||
logging.error("Failed opening output file: %s", str(e))
|
||||
sys.exit(1)
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
rc = 0
|
||||
try:
|
||||
next_loop = time.monotonic()
|
||||
while True:
|
||||
rc = loop_body(opts, gstate, print_file)
|
||||
if opts.loop_interval > 0.0:
|
||||
now = time.monotonic()
|
||||
next_loop = max(next_loop + opts.loop_interval, now)
|
||||
time.sleep(next_loop - now)
|
||||
else:
|
||||
break
|
||||
except (KeyboardInterrupt, Terminated):
|
||||
pass
|
||||
finally:
|
||||
loop_body(opts, gstate, print_file, shutdown=True)
|
||||
print_file.close()
|
||||
gstate.shutdown()
|
||||
|
||||
sys.exit(rc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,284 @@
|
||||
#!/usr/bin/env python3
|
||||
r"""Output Starlink user terminal data info in text format.
|
||||
|
||||
Expects input as from the following command:
|
||||
|
||||
grpcurl -plaintext -d {\"get_history\":{}} 192.168.100.1:9200 SpaceX.API.Device.Device/Handle
|
||||
|
||||
This script examines the most recent samples from the history data and
|
||||
prints several different metrics computed from them to stdout. By default,
|
||||
it will print the results in CSV format.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import starlink_json
|
||||
|
||||
BRACKETS_RE = re.compile(r"([^[]*)(\[((\d+),|)(\d*)\]|)$")
|
||||
SAMPLES_DEFAULT = 3600
|
||||
HISTORY_STATS_MODES = [
|
||||
"ping_drop", "ping_run_length", "ping_latency", "ping_loaded_latency", "usage"
|
||||
]
|
||||
VERBOSE_FIELD_MAP = {
|
||||
# ping_drop fields
|
||||
"samples": "Parsed samples",
|
||||
"end_counter": "Sample counter",
|
||||
"total_ping_drop": "Total ping drop",
|
||||
"count_full_ping_drop": "Count of drop == 1",
|
||||
"count_obstructed": "Obstructed",
|
||||
"total_obstructed_ping_drop": "Obstructed ping drop",
|
||||
"count_full_obstructed_ping_drop": "Obstructed drop == 1",
|
||||
"count_unscheduled": "Unscheduled",
|
||||
"total_unscheduled_ping_drop": "Unscheduled ping drop",
|
||||
"count_full_unscheduled_ping_drop": "Unscheduled drop == 1",
|
||||
|
||||
# ping_run_length fields
|
||||
"init_run_fragment": "Initial drop run fragment",
|
||||
"final_run_fragment": "Final drop run fragment",
|
||||
"run_seconds": "Per-second drop runs",
|
||||
"run_minutes": "Per-minute drop runs",
|
||||
|
||||
# ping_latency fields
|
||||
"mean_all_ping_latency": "Mean RTT, drop < 1",
|
||||
"deciles_all_ping_latency": "RTT deciles, drop < 1",
|
||||
"mean_full_ping_latency": "Mean RTT, drop == 0",
|
||||
"deciles_full_ping_latency": "RTT deciles, drop == 0",
|
||||
"stdev_full_ping_latency": "RTT standard deviation, drop == 0",
|
||||
|
||||
# ping_loaded_latency is still experimental, so leave those unexplained
|
||||
|
||||
# usage fields
|
||||
"download_usage": "Bytes downloaded",
|
||||
"upload_usage": "Bytes uploaded",
|
||||
}
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Collect status and/or history data from a Starlink user terminal and "
|
||||
"print it to standard output in text format; by default, will print in CSV format",
|
||||
add_help=False)
|
||||
|
||||
group = parser.add_argument_group(title="General options")
|
||||
group.add_argument("-f", "--filename", default="-", help="The file to parse, default: stdin")
|
||||
group.add_argument("-h", "--help", action="help", help="Be helpful")
|
||||
group.add_argument("-t",
|
||||
"--timestamp",
|
||||
help="UTC time history data was pulled, as YYYY-MM-DD_HH:MM:SS or as "
|
||||
"seconds since Unix epoch, default: current time")
|
||||
group.add_argument("-v", "--verbose", action="store_true", help="Be verbose")
|
||||
|
||||
group = parser.add_argument_group(title="History mode options")
|
||||
group.add_argument("-a",
|
||||
"--all-samples",
|
||||
action="store_const",
|
||||
const=-1,
|
||||
dest="samples",
|
||||
help="Parse all valid samples")
|
||||
group.add_argument("-s",
|
||||
"--samples",
|
||||
type=int,
|
||||
help="Number of data samples to parse, default: all in bulk mode, "
|
||||
"else " + str(SAMPLES_DEFAULT))
|
||||
|
||||
group = parser.add_argument_group(title="CSV output options")
|
||||
group.add_argument("-H",
|
||||
"--print-header",
|
||||
action="store_true",
|
||||
help="Print CSV header instead of parsing data")
|
||||
|
||||
all_modes = HISTORY_STATS_MODES + ["bulk_history"]
|
||||
parser.add_argument("mode",
|
||||
nargs="+",
|
||||
choices=all_modes,
|
||||
help="The data group to record, one or more of: " + ", ".join(all_modes),
|
||||
metavar="mode")
|
||||
|
||||
opts = parser.parse_args()
|
||||
|
||||
# for convenience, set flags for whether any mode in a group is selected
|
||||
opts.history_stats_mode = bool(set(HISTORY_STATS_MODES).intersection(opts.mode))
|
||||
opts.bulk_mode = "bulk_history" in opts.mode
|
||||
|
||||
if opts.history_stats_mode and opts.bulk_mode:
|
||||
parser.error("bulk_history cannot be combined with other modes for CSV output")
|
||||
|
||||
if opts.samples is None:
|
||||
opts.samples = -1 if opts.bulk_mode else SAMPLES_DEFAULT
|
||||
|
||||
if opts.timestamp is None:
|
||||
opts.history_time = None
|
||||
else:
|
||||
try:
|
||||
opts.history_time = int(opts.timestamp)
|
||||
except ValueError:
|
||||
try:
|
||||
opts.history_time = int(
|
||||
datetime.datetime.strptime(opts.timestamp, "%Y-%m-%d_%H:%M:%S").timestamp())
|
||||
except ValueError:
|
||||
parser.error("Could not parse timestamp")
|
||||
if opts.verbose:
|
||||
print("Using timestamp", datetime.datetime.fromtimestamp(opts.history_time, tz=datetime.timezone.utc))
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def print_header(opts):
|
||||
header = ["datetimestamp_utc"]
|
||||
|
||||
def header_add(names):
|
||||
for name in names:
|
||||
name, start, end = BRACKETS_RE.match(name).group(1, 4, 5)
|
||||
if start:
|
||||
header.extend(name + "_" + str(x) for x in range(int(start), int(end)))
|
||||
elif end:
|
||||
header.extend(name + "_" + str(x) for x in range(int(end)))
|
||||
else:
|
||||
header.append(name)
|
||||
|
||||
if opts.bulk_mode:
|
||||
general, bulk = starlink_json.history_bulk_field_names()
|
||||
header_add(general)
|
||||
header_add(bulk)
|
||||
|
||||
if opts.history_stats_mode:
|
||||
groups = starlink_json.history_stats_field_names()
|
||||
general, ping, runlen, latency, loaded, usage = groups[0:6]
|
||||
header_add(general)
|
||||
if "ping_drop" in opts.mode:
|
||||
header_add(ping)
|
||||
if "ping_run_length" in opts.mode:
|
||||
header_add(runlen)
|
||||
if "ping_loaded_latency" in opts.mode:
|
||||
header_add(loaded)
|
||||
if "ping_latency" in opts.mode:
|
||||
header_add(latency)
|
||||
if "usage" in opts.mode:
|
||||
header_add(usage)
|
||||
|
||||
print(",".join(header))
|
||||
return 0
|
||||
|
||||
|
||||
def get_data(opts, add_item, add_sequence, add_bulk):
|
||||
def add_data(data):
|
||||
for key, val in data.items():
|
||||
name, seq = BRACKETS_RE.match(key).group(1, 5)
|
||||
if seq is None:
|
||||
add_item(name, val)
|
||||
else:
|
||||
add_sequence(name, val)
|
||||
|
||||
if opts.history_stats_mode:
|
||||
try:
|
||||
groups = starlink_json.history_stats(opts.filename, opts.samples, verbose=opts.verbose)
|
||||
except starlink_json.JsonError as e:
|
||||
logging.error("Failure getting history stats: %s", str(e))
|
||||
return 1
|
||||
general, ping, runlen, latency, loaded, usage = groups[0:6]
|
||||
add_data(general)
|
||||
if "ping_drop" in opts.mode:
|
||||
add_data(ping)
|
||||
if "ping_run_length" in opts.mode:
|
||||
add_data(runlen)
|
||||
if "ping_latency" in opts.mode:
|
||||
add_data(latency)
|
||||
if "ping_loaded_latency" in opts.mode:
|
||||
add_data(loaded)
|
||||
if "usage" in opts.mode:
|
||||
add_data(usage)
|
||||
|
||||
if opts.bulk_mode and add_bulk:
|
||||
timestamp = int(time.time()) if opts.history_time is None else opts.history_time
|
||||
try:
|
||||
general, bulk = starlink_json.history_bulk_data(opts.filename,
|
||||
opts.samples,
|
||||
verbose=opts.verbose)
|
||||
except starlink_json.JsonError as e:
|
||||
logging.error("Failure getting bulk history: %s", str(e))
|
||||
return 1
|
||||
parsed_samples = general["samples"]
|
||||
new_counter = general["end_counter"]
|
||||
if opts.verbose:
|
||||
print("Establishing time base: {0} -> {1}".format(
|
||||
new_counter, datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)))
|
||||
timestamp -= parsed_samples
|
||||
|
||||
add_bulk(bulk, parsed_samples, timestamp, new_counter - parsed_samples)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def loop_body(opts):
|
||||
if opts.verbose:
|
||||
csv_data = []
|
||||
else:
|
||||
history_time = int(time.time()) if opts.history_time is None else opts.history_time
|
||||
csv_data = [datetime.datetime.fromtimestamp(history_time, datetime.timezone.utc).replace(tzinfo=None).isoformat()]
|
||||
|
||||
def cb_data_add_item(name, val):
|
||||
if opts.verbose:
|
||||
csv_data.append("{0:22} {1}".format(VERBOSE_FIELD_MAP.get(name, name) + ":", val))
|
||||
else:
|
||||
# special case for get_status failure: this will be the lone item added
|
||||
if name == "state" and val == "DISH_UNREACHABLE":
|
||||
csv_data.extend(["", "", "", val])
|
||||
else:
|
||||
csv_data.append(str(val))
|
||||
|
||||
def cb_data_add_sequence(name, val):
|
||||
if opts.verbose:
|
||||
csv_data.append("{0:22} {1}".format(
|
||||
VERBOSE_FIELD_MAP.get(name, name) + ":", ", ".join(str(subval) for subval in val)))
|
||||
else:
|
||||
csv_data.extend(str(subval) for subval in val)
|
||||
|
||||
def cb_add_bulk(bulk, count, timestamp, counter):
|
||||
if opts.verbose:
|
||||
print("Time range (UTC): {0} -> {1}".format(
|
||||
datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).replace(tzinfo=None).isoformat(),
|
||||
datetime.datetime.fromtimestamp(timestamp + count, datetime.timezone.utc).replace(tzinfo=None).isoformat()))
|
||||
for key, val in bulk.items():
|
||||
print("{0:22} {1}".format(key + ":", ", ".join(str(subval) for subval in val)))
|
||||
else:
|
||||
for i in range(count):
|
||||
timestamp += 1
|
||||
fields = [datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc).replace(tzinfo=None).isoformat()]
|
||||
fields.extend(["" if val[i] is None else str(val[i]) for val in bulk.values()])
|
||||
print(",".join(fields))
|
||||
|
||||
rc = get_data(opts, cb_data_add_item, cb_data_add_sequence, cb_add_bulk)
|
||||
|
||||
if opts.verbose:
|
||||
if csv_data:
|
||||
print("\n".join(csv_data))
|
||||
else:
|
||||
# skip if only timestamp
|
||||
if len(csv_data) > 1:
|
||||
print(",".join(csv_data))
|
||||
|
||||
return rc
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
||||
if opts.print_header:
|
||||
rc = print_header(opts)
|
||||
sys.exit(rc)
|
||||
|
||||
# for consistency with dish_grpc_text, pretend there was a loop
|
||||
rc = loop_body(opts)
|
||||
|
||||
sys.exit(rc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,227 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Write a PNG image representing Starlink obstruction map data.
|
||||
|
||||
This scripts queries obstruction map data from the Starlink user terminal
|
||||
(dish) reachable on the local network and writes a PNG image based on that
|
||||
data.
|
||||
|
||||
Each pixel in the image represents the signal quality in a particular
|
||||
direction, as observed by the dish. If the dish has not communicated with
|
||||
satellites located in that direction, the pixel will be the "no data" color;
|
||||
otherwise, it will be a color in the range from the "obstructed" color (no
|
||||
signal at all) to the "unobstructed" color (sufficient signal quality for full
|
||||
signal).
|
||||
|
||||
The coordinates of the pixels are the altitude and azimuth angles from the
|
||||
horizontal coordinate system representation of the sky, converted to Cartesian
|
||||
(rectangular) coordinates. The conversion is done in a way that maps all valid
|
||||
directions into a circle that touches the edges of the image. Pixels outside
|
||||
that circle will show up as "no data".
|
||||
|
||||
Azimuth is represented as angle from a line drawn from the center of the image
|
||||
to the center of the top edge of the image, where center-top is 0 degrees
|
||||
(North), the center of the right edge is 90 degrees (East), etc.
|
||||
|
||||
Altitude (elevation) is represented as distance from the center of the image,
|
||||
where the center of the image represents vertical up from the point of view of
|
||||
an observer located at the dish (zenith, which is usually not the physical
|
||||
direction the dish is pointing) and the further away from the center a pixel
|
||||
is, the closer to the horizon it is, down to a minimum altitude angle at the
|
||||
edge of the circle.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import os
|
||||
import png
|
||||
import sys
|
||||
import time
|
||||
|
||||
import starlink_grpc
|
||||
|
||||
DEFAULT_OBSTRUCTED_COLOR = "FFFF0000"
|
||||
DEFAULT_UNOBSTRUCTED_COLOR = "FFFFFFFF"
|
||||
DEFAULT_NO_DATA_COLOR = "00000000"
|
||||
DEFAULT_OBSTRUCTED_GREYSCALE = "FF00"
|
||||
DEFAULT_UNOBSTRUCTED_GREYSCALE = "FFFF"
|
||||
DEFAULT_NO_DATA_GREYSCALE = "0000"
|
||||
LOOP_TIME_DEFAULT = 0
|
||||
|
||||
|
||||
def loop_body(opts, context):
|
||||
try:
|
||||
snr_data = starlink_grpc.obstruction_map(context)
|
||||
except starlink_grpc.GrpcError as e:
|
||||
logging.error("Failed getting obstruction map data: %s", str(e))
|
||||
return 1
|
||||
|
||||
def pixel_bytes(row):
|
||||
for point in row:
|
||||
if point > 1.0:
|
||||
# shouldn't happen, but just in case...
|
||||
point = 1.0
|
||||
|
||||
if point >= 0.0:
|
||||
if opts.greyscale:
|
||||
yield round(point * opts.unobstructed_color_g +
|
||||
(1.0-point) * opts.obstructed_color_g)
|
||||
else:
|
||||
yield round(point * opts.unobstructed_color_r +
|
||||
(1.0-point) * opts.obstructed_color_r)
|
||||
yield round(point * opts.unobstructed_color_g +
|
||||
(1.0-point) * opts.obstructed_color_g)
|
||||
yield round(point * opts.unobstructed_color_b +
|
||||
(1.0-point) * opts.obstructed_color_b)
|
||||
if not opts.no_alpha:
|
||||
yield round(point * opts.unobstructed_color_a +
|
||||
(1.0-point) * opts.obstructed_color_a)
|
||||
else:
|
||||
if opts.greyscale:
|
||||
yield opts.no_data_color_g
|
||||
else:
|
||||
yield opts.no_data_color_r
|
||||
yield opts.no_data_color_g
|
||||
yield opts.no_data_color_b
|
||||
if not opts.no_alpha:
|
||||
yield opts.no_data_color_a
|
||||
|
||||
if opts.filename == "-":
|
||||
# Open new stdout file to get binary mode
|
||||
out_file = os.fdopen(sys.stdout.fileno(), "wb", closefd=False)
|
||||
else:
|
||||
now = int(time.time())
|
||||
filename = opts.filename.replace("%u", str(now))
|
||||
filename = filename.replace("%d",
|
||||
datetime.utcfromtimestamp(now).strftime("%Y_%m_%d_%H_%M_%S"))
|
||||
filename = filename.replace("%s", str(opts.sequence))
|
||||
out_file = open(filename, "wb")
|
||||
if not snr_data or not snr_data[0]:
|
||||
logging.error("Invalid SNR map data: Zero-length")
|
||||
return 1
|
||||
writer = png.Writer(len(snr_data[0]),
|
||||
len(snr_data),
|
||||
alpha=(not opts.no_alpha),
|
||||
greyscale=opts.greyscale)
|
||||
writer.write(out_file, (bytes(pixel_bytes(row)) for row in snr_data))
|
||||
out_file.close()
|
||||
|
||||
opts.sequence += 1
|
||||
return 0
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Collect directional obstruction map data from a Starlink user terminal and "
|
||||
"emit it as a PNG image")
|
||||
parser.add_argument(
|
||||
"filename",
|
||||
nargs="?",
|
||||
help="The image file to write, or - to write to stdout; may be a template with the "
|
||||
"following to be filled in per loop iteration: %%s for sequence number, %%d for UTC date "
|
||||
"and time, %%u for seconds since Unix epoch.")
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--obstructed-color",
|
||||
help="Color of obstructed areas, in RGB, ARGB, L, or AL hex notation, default: " +
|
||||
DEFAULT_OBSTRUCTED_COLOR + " or " + DEFAULT_OBSTRUCTED_GREYSCALE)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--unobstructed-color",
|
||||
help="Color of unobstructed areas, in RGB, ARGB, L, or AL hex notation, default: " +
|
||||
DEFAULT_UNOBSTRUCTED_COLOR + " or " + DEFAULT_UNOBSTRUCTED_GREYSCALE)
|
||||
parser.add_argument(
|
||||
"-n",
|
||||
"--no-data-color",
|
||||
help="Color of areas with no data, in RGB, ARGB, L, or AL hex notation, default: " +
|
||||
DEFAULT_NO_DATA_COLOR + " or " + DEFAULT_NO_DATA_GREYSCALE)
|
||||
parser.add_argument(
|
||||
"-g",
|
||||
"--greyscale",
|
||||
action="store_true",
|
||||
help="Emit a greyscale image instead of the default full color image; greyscale images "
|
||||
"use L or AL hex notation for the color options")
|
||||
parser.add_argument(
|
||||
"-z",
|
||||
"--no-alpha",
|
||||
action="store_true",
|
||||
help="Emit an image without alpha (transparency) channel instead of the default that "
|
||||
"includes alpha channel")
|
||||
parser.add_argument("-e",
|
||||
"--target",
|
||||
help="host:port of dish to query, default is the standard IP address "
|
||||
"and port (192.168.100.1:9200)")
|
||||
parser.add_argument("-t",
|
||||
"--loop-interval",
|
||||
type=float,
|
||||
default=float(LOOP_TIME_DEFAULT),
|
||||
help="Loop interval in seconds or 0 for no loop, default: " +
|
||||
str(LOOP_TIME_DEFAULT))
|
||||
parser.add_argument("-s",
|
||||
"--sequence",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Starting sequence number for templatized filenames, default: 1")
|
||||
parser.add_argument("-r",
|
||||
"--reset",
|
||||
action="store_true",
|
||||
help="Reset obstruction map data before starting")
|
||||
opts = parser.parse_args()
|
||||
|
||||
if opts.filename is None and not opts.reset:
|
||||
parser.error("Must specify a filename unless resetting")
|
||||
|
||||
if opts.obstructed_color is None:
|
||||
opts.obstructed_color = DEFAULT_OBSTRUCTED_GREYSCALE if opts.greyscale else DEFAULT_OBSTRUCTED_COLOR
|
||||
if opts.unobstructed_color is None:
|
||||
opts.unobstructed_color = DEFAULT_UNOBSTRUCTED_GREYSCALE if opts.greyscale else DEFAULT_UNOBSTRUCTED_COLOR
|
||||
if opts.no_data_color is None:
|
||||
opts.no_data_color = DEFAULT_NO_DATA_GREYSCALE if opts.greyscale else DEFAULT_NO_DATA_COLOR
|
||||
|
||||
for option in ("obstructed_color", "unobstructed_color", "no_data_color"):
|
||||
try:
|
||||
color = int(getattr(opts, option), 16)
|
||||
if opts.greyscale:
|
||||
setattr(opts, option + "_a", (color >> 8) & 255)
|
||||
setattr(opts, option + "_g", color & 255)
|
||||
else:
|
||||
setattr(opts, option + "_a", (color >> 24) & 255)
|
||||
setattr(opts, option + "_r", (color >> 16) & 255)
|
||||
setattr(opts, option + "_g", (color >> 8) & 255)
|
||||
setattr(opts, option + "_b", color & 255)
|
||||
except ValueError:
|
||||
logging.error("Invalid hex number for %s", option)
|
||||
sys.exit(1)
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
|
||||
context = starlink_grpc.ChannelContext(target=opts.target)
|
||||
|
||||
try:
|
||||
if opts.reset:
|
||||
starlink_grpc.reset_obstruction_map(context)
|
||||
|
||||
if opts.filename is not None:
|
||||
next_loop = time.monotonic()
|
||||
while True:
|
||||
rc = loop_body(opts, context)
|
||||
if opts.loop_interval > 0.0:
|
||||
now = time.monotonic()
|
||||
next_loop = max(next_loop + opts.loop_interval, now)
|
||||
time.sleep(next_loop - now)
|
||||
else:
|
||||
break
|
||||
finally:
|
||||
context.close()
|
||||
|
||||
sys.exit(rc)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Simple example of get_status request using grpc call directly."""
|
||||
|
||||
import sys
|
||||
|
||||
import grpc
|
||||
|
||||
try:
|
||||
from spacex_api.device import device_pb2
|
||||
from spacex_api.device import device_pb2_grpc
|
||||
except ModuleNotFoundError:
|
||||
print("This script requires the generated gRPC protocol modules. See README file for details.",
|
||||
file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Note that if you remove the 'with' clause here, you need to separately
|
||||
# call channel.close() when you're done with the gRPC connection.
|
||||
with grpc.insecure_channel("192.168.100.1:9200") as channel:
|
||||
stub = device_pb2_grpc.DeviceStub(channel)
|
||||
response = stub.Handle(device_pb2.Request(get_status={}), timeout=10)
|
||||
|
||||
# Dump everything
|
||||
print(response)
|
||||
|
||||
# Just the software version
|
||||
print("Software version:", response.dish_get_status.device_info.software_version)
|
||||
|
||||
# Check if connected
|
||||
print("Not connected" if response.dish_get_status.HasField("outage") else "Connected")
|
||||
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
printenv >> /etc/environment
|
||||
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
exec /usr/local/bin/python3 $@
|
||||
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Poll and record service information from a gRPC reflection server
|
||||
|
||||
This script will query a gRPC reflection server for descriptor information of
|
||||
all services supported by the server, excluding the reflection service itself,
|
||||
and write a serialized FileDescriptorSet protobuf containing all returned
|
||||
descriptors to a file, either once or in a periodic loop. This file can then
|
||||
be used by any tool that accepts such data, including protoc, the protocol
|
||||
buffer compiler.
|
||||
|
||||
Output files are named with the CRC32 value and byte length of the serialized
|
||||
FileDescriptorSet data. If those match the name of a file written previously,
|
||||
the data is assumed not to have changed and no new file is written. For this
|
||||
reason, it is recommended to use an output directory specific to the server,
|
||||
to avoid mixing with files written with data from other servers.
|
||||
|
||||
Although the default target option is the local IP and port number used by the
|
||||
gRPC service on a Starlink user terminal, this script is otherwise not
|
||||
specific to Starlink and should work for any gRPC server that does not require
|
||||
SSL and that has the reflection service enabled.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import binascii
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
import grpc
|
||||
from yagrc import dump
|
||||
from yagrc import reflector
|
||||
|
||||
TARGET_DEFAULT = "192.168.100.1:9200"
|
||||
LOOP_TIME_DEFAULT = 0
|
||||
RETRY_DELAY_DEFAULT = 0
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Poll a gRPC reflection server and record a serialized "
|
||||
"FileDescriptorSet (protoset) of the reflected information")
|
||||
|
||||
parser.add_argument("outdir",
|
||||
nargs="?",
|
||||
metavar="OUTDIR",
|
||||
help="Directory in which to write protoset files")
|
||||
parser.add_argument("-g",
|
||||
"--target",
|
||||
default=TARGET_DEFAULT,
|
||||
help="host:port of device to query, default: " + TARGET_DEFAULT)
|
||||
parser.add_argument("-n",
|
||||
"--print-only",
|
||||
action="store_true",
|
||||
help="Print the protoset filename instead of writing the data")
|
||||
parser.add_argument("-r",
|
||||
"--retry-delay",
|
||||
type=float,
|
||||
default=float(RETRY_DELAY_DEFAULT),
|
||||
help="Time in seconds to wait before retrying after network "
|
||||
"error or 0 for no retry, default: " + str(RETRY_DELAY_DEFAULT))
|
||||
parser.add_argument("-t",
|
||||
"--loop-interval",
|
||||
type=float,
|
||||
default=float(LOOP_TIME_DEFAULT),
|
||||
help="Loop interval in seconds or 0 for no loop, default: " +
|
||||
str(LOOP_TIME_DEFAULT))
|
||||
parser.add_argument("-v", "--verbose", action="store_true", help="Be verbose")
|
||||
|
||||
opts = parser.parse_args()
|
||||
|
||||
if opts.outdir is None and not opts.print_only:
|
||||
parser.error("Output dir is required unless --print-only option set")
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def loop_body(opts):
|
||||
while True:
|
||||
try:
|
||||
with grpc.insecure_channel(opts.target) as channel:
|
||||
protoset = dump.dump_protocols(channel)
|
||||
break
|
||||
except reflector.ServiceError as e:
|
||||
logging.error("Problem with reflection service: %s", str(e))
|
||||
# Only retry on network-related errors, not service errors
|
||||
return
|
||||
except grpc.RpcError as e:
|
||||
# grpc.RpcError error message is not very useful, but grpc.Call has
|
||||
# something slightly better
|
||||
if isinstance(e, grpc.Call):
|
||||
msg = e.details()
|
||||
else:
|
||||
msg = "Unknown communication or service error"
|
||||
print("Problem communicating with reflection service:", msg)
|
||||
if opts.retry_delay > 0.0:
|
||||
time.sleep(opts.retry_delay)
|
||||
else:
|
||||
return
|
||||
|
||||
filename = "{0:08x}_{1}.protoset".format(binascii.crc32(protoset), len(protoset))
|
||||
if opts.print_only:
|
||||
print("Protoset:", filename)
|
||||
else:
|
||||
try:
|
||||
with open(filename, mode="xb") as outfile:
|
||||
outfile.write(protoset)
|
||||
print("New protoset found:", filename)
|
||||
except FileExistsError:
|
||||
if opts.verbose:
|
||||
print("Existing protoset:", filename)
|
||||
|
||||
|
||||
def goto_dir(outdir):
|
||||
try:
|
||||
outdir_abs = os.path.abspath(outdir)
|
||||
os.makedirs(outdir_abs, exist_ok=True)
|
||||
os.chdir(outdir)
|
||||
except OSError as e:
|
||||
logging.error("Output directory error: %s", str(e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s")
|
||||
if not opts.print_only:
|
||||
goto_dir(opts.outdir)
|
||||
|
||||
next_loop = time.monotonic()
|
||||
while True:
|
||||
loop_body(opts)
|
||||
if opts.loop_interval > 0.0:
|
||||
now = time.monotonic()
|
||||
next_loop = max(next_loop + opts.loop_interval, now)
|
||||
time.sleep(next_loop - now)
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,101 @@
|
||||
"""Shared logic for main loop control.
|
||||
|
||||
This module provides support for running a function from a loop at fixed
|
||||
intervals using monotonic time or on cron-like schedule using wall clock time.
|
||||
|
||||
The cron scheduler uses the same schedule format string that cron uses for
|
||||
crontab entries, and will do its best to remain on schedule despite clock
|
||||
adjustments.
|
||||
"""
|
||||
|
||||
try:
|
||||
from croniter import croniter
|
||||
import dateutil.tz
|
||||
croniter_ok = True
|
||||
except ImportError:
|
||||
croniter_ok = False
|
||||
from datetime import datetime
|
||||
import signal
|
||||
import time
|
||||
|
||||
# Max time to sleep when using non-monotonic time. This helps protect against
|
||||
# oversleeping as the result of large clock adjustments.
|
||||
MAX_SLEEP = 3600.0
|
||||
|
||||
|
||||
class Terminated(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
# Turn SIGTERM into an exception so main loop can clean up
|
||||
raise Terminated
|
||||
|
||||
|
||||
def add_args(parser):
|
||||
group = parser.add_argument_group(title="Loop options")
|
||||
group.add_argument("-t", "--loop-interval", type=float, help="Run loop at interval, in seconds")
|
||||
group.add_argument("-c",
|
||||
"--loop-cron",
|
||||
help="Run loop on schedule defined by cron format expression")
|
||||
group.add_argument("-m",
|
||||
"--cron-timezone",
|
||||
help='Timezone name (IANA name or "UTC") to use for --loop-cron '
|
||||
'schedule; default is system local time')
|
||||
|
||||
|
||||
def check_args(opts, parser):
|
||||
if opts.loop_interval is not None and opts.loop_cron is not None:
|
||||
parser.error("At most one of --loop-interval and --loop-cron may be used")
|
||||
|
||||
if opts.cron_timezone and not opts.loop_cron:
|
||||
parser.error("cron timezone specified, but not using cron scheduling")
|
||||
|
||||
if opts.loop_cron is not None:
|
||||
if not croniter_ok:
|
||||
parser.error("croniter is not installed, --loop-cron requires it")
|
||||
if not croniter.is_valid(opts.loop_cron):
|
||||
parser.error("Invalid cron format")
|
||||
opts.timezone = dateutil.tz.gettz(opts.cron_timezone)
|
||||
if opts.timezone is None:
|
||||
if opts.cron_timezone is None:
|
||||
parser.error("Failed to get local timezone, may need to use --cron-timezone")
|
||||
else:
|
||||
parser.error("Invalid timezone name")
|
||||
|
||||
if opts.loop_interval is None:
|
||||
opts.loop_interval = 0.0
|
||||
|
||||
|
||||
def run_loop(opts, loop_body, *loop_args):
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
rc = 0
|
||||
try:
|
||||
if opts.loop_interval <= 0.0 and not opts.loop_cron:
|
||||
rc = loop_body(*loop_args)
|
||||
elif opts.loop_cron:
|
||||
criter = croniter(opts.loop_cron, datetime.now(tz=opts.timezone))
|
||||
now = time.time()
|
||||
next_loop = criter.get_next(start_time=now)
|
||||
while True:
|
||||
while now < next_loop:
|
||||
# This is to protect against clock getting set backwards
|
||||
# by a large amount. Normally, it should do nothing:
|
||||
next_loop = criter.get_next(start_time=now)
|
||||
time.sleep(min(next_loop - now, MAX_SLEEP))
|
||||
now = time.time()
|
||||
next_loop = criter.get_next(start_time=now)
|
||||
rc = loop_body(*loop_args)
|
||||
now = time.time()
|
||||
else:
|
||||
next_loop = time.monotonic()
|
||||
while True:
|
||||
rc = loop_body(*loop_args)
|
||||
now = time.monotonic()
|
||||
next_loop = max(next_loop + opts.loop_interval, now)
|
||||
time.sleep(next_loop - now)
|
||||
except (KeyboardInterrupt, Terminated):
|
||||
pass
|
||||
|
||||
return rc
|
||||
@@ -0,0 +1,10 @@
|
||||
[build-system]
|
||||
requires = [
|
||||
"setuptools>=42",
|
||||
"setuptools_scm[toml]>=3.4",
|
||||
"wheel"
|
||||
]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.setuptools_scm]
|
||||
root = ".."
|
||||
@@ -0,0 +1,27 @@
|
||||
[metadata]
|
||||
name = starlink-grpc-core
|
||||
url = https://github.com/sparky8512/starlink-grpc-tools
|
||||
author_email = sparky8512-py@yahoo.com
|
||||
license_files = ../LICENSE
|
||||
classifiers =
|
||||
Development Status :: 4 - Beta
|
||||
Intended Audience :: Developers
|
||||
License :: OSI Approved :: The Unlicense (Unlicense)
|
||||
Operating System :: OS Independent
|
||||
Programming Language :: Python :: 3
|
||||
Topic :: Software Development :: Libraries :: Python Modules
|
||||
description = Core functions for Starlink gRPC communication
|
||||
long_description = file: README.md
|
||||
long_description_content_type = text/markdown
|
||||
|
||||
[options]
|
||||
install_requires =
|
||||
grpcio>=1.12.0
|
||||
protobuf>=3.6.0
|
||||
yagrc>=1.1.1
|
||||
typing-extensions>=4.3.0
|
||||
package_dir =
|
||||
=..
|
||||
py_modules =
|
||||
starlink_grpc
|
||||
python_requires = >=3.7
|
||||
@@ -0,0 +1,3 @@
|
||||
import setuptools
|
||||
|
||||
setuptools.setup()
|
||||
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env python3
|
||||
"""A simple(?) example for using the starlink_grpc module.
|
||||
|
||||
This script shows an example of how to use the starlink_grpc module to
|
||||
implement polling of status and/or history data.
|
||||
|
||||
By itself, it's not very useful unless you're trying to understand how the
|
||||
status data correlates with certain aspects of the history data because all it
|
||||
does is to dump both status and history data when it detects certain
|
||||
conditions in the history data.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
import time
|
||||
|
||||
import starlink_grpc
|
||||
|
||||
INITIAL_SAMPLES = 20
|
||||
LOOP_SLEEP_TIME = 4
|
||||
|
||||
|
||||
def run_loop(context):
|
||||
samples = INITIAL_SAMPLES
|
||||
counter = None
|
||||
prev_triggered = False
|
||||
while True:
|
||||
try:
|
||||
# `starlink_grpc.status_data` returns a tuple of 3 dicts, but in case
|
||||
# the API changes to add more in the future, it's best to reference
|
||||
# them by index instead of direct assignment from the function call.
|
||||
groups = starlink_grpc.status_data(context=context)
|
||||
status = groups[0]
|
||||
|
||||
# On the other hand, `starlink_grpc.history_bulk_data` will always
|
||||
# return 2 dicts, because that's all the data there is.
|
||||
general, bulk = starlink_grpc.history_bulk_data(samples, start=counter, context=context)
|
||||
except starlink_grpc.GrpcError:
|
||||
# Dish rebooting maybe, or LAN connectivity error. Just ignore it
|
||||
# and hope it goes away.
|
||||
pass
|
||||
else:
|
||||
# The following is what actually does stuff with the data. It should
|
||||
# be replaced with something more useful.
|
||||
|
||||
# This computes a trigger detecting any packet loss (ping drop):
|
||||
#triggered = any(x > 0 for x in bulk["pop_ping_drop_rate"])
|
||||
# This computes a trigger detecting samples marked as obstructed:
|
||||
#triggered = any(bulk["obstructed"])
|
||||
# This computes a trigger detecting samples not marked as scheduled:
|
||||
triggered = not all(bulk["scheduled"])
|
||||
if triggered or prev_triggered:
|
||||
print("Triggered" if triggered else "Continued", "at:",
|
||||
datetime.now(tz=timezone.utc))
|
||||
print("status:", status)
|
||||
print("history:", bulk)
|
||||
if not triggered:
|
||||
print()
|
||||
|
||||
prev_triggered = triggered
|
||||
# The following makes the next loop only pull the history samples that
|
||||
# are newer than the ones already examined.
|
||||
samples = -1
|
||||
counter = general["end_counter"]
|
||||
|
||||
# And this is a not-very-robust way of implementing an interval loop.
|
||||
# Note that a 4 second loop will poll the history buffer pretty
|
||||
# frequently. Even though we only ask for new samples (which should
|
||||
# only be 4 of them), the grpc layer needs to pull the entire 12 hour
|
||||
# history buffer each time, only to discard most of it.
|
||||
time.sleep(LOOP_SLEEP_TIME)
|
||||
|
||||
|
||||
def main():
|
||||
# This part is optional. The `starlink_grpc` functions can work without a
|
||||
# `starlink_grpc.ChannelContext` object passed in, but they will open a
|
||||
# new channel for each RPC call (so twice for each loop iteration) without
|
||||
# it.
|
||||
context = starlink_grpc.ChannelContext()
|
||||
|
||||
try:
|
||||
run_loop(context)
|
||||
finally:
|
||||
context.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,11 @@
|
||||
grpcio>=1.12.0
|
||||
grpcio-tools>=1.20.0
|
||||
protobuf>=3.6.0
|
||||
yagrc>=1.1.1
|
||||
paho-mqtt>=1.5.1
|
||||
influxdb>=5.3.1
|
||||
influxdb_client>=1.23.0
|
||||
pypng>=0.0.20
|
||||
typing-extensions>=4.3.0
|
||||
croniter>=1.0.1
|
||||
python-dateutil>=2.7.0
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,396 @@
|
||||
"""Parser for JSON format gRPC output from a Starlink user terminal.
|
||||
|
||||
Expects input as from grpcurl get_history request.
|
||||
|
||||
Handling output for other request responses may be added in the future, but
|
||||
the others don't really need as much interpretation as the get_history
|
||||
response does.
|
||||
|
||||
See the starlink_grpc module docstring for descriptions of the stat elements.
|
||||
"""
|
||||
|
||||
import json
|
||||
import math
|
||||
import statistics
|
||||
import sys
|
||||
|
||||
from itertools import chain
|
||||
|
||||
|
||||
class JsonError(Exception):
|
||||
"""Provides error info when something went wrong with JSON parsing."""
|
||||
|
||||
|
||||
def history_bulk_field_names():
|
||||
"""Return the field names of the bulk history data.
|
||||
|
||||
Note:
|
||||
See `starlink_grpc` module docs regarding brackets in field names.
|
||||
|
||||
Returns:
|
||||
A tuple with 2 lists, the first with general data names, the second
|
||||
with bulk history data names.
|
||||
"""
|
||||
return [
|
||||
"samples",
|
||||
"end_counter",
|
||||
], [
|
||||
"pop_ping_drop_rate[]",
|
||||
"pop_ping_latency_ms[]",
|
||||
"downlink_throughput_bps[]",
|
||||
"uplink_throughput_bps[]",
|
||||
"snr[]",
|
||||
"scheduled[]",
|
||||
"obstructed[]",
|
||||
]
|
||||
|
||||
|
||||
def history_ping_field_names():
|
||||
"""Deprecated. Use history_stats_field_names instead."""
|
||||
return history_stats_field_names()[0:3]
|
||||
|
||||
|
||||
def history_stats_field_names():
|
||||
"""Return the field names of the packet loss stats.
|
||||
|
||||
Note:
|
||||
See `starlink_grpc` module docs regarding brackets in field names.
|
||||
|
||||
Returns:
|
||||
A tuple with 6 lists, with general data names, ping drop stat names,
|
||||
ping drop run length stat names, ping latency stat names, loaded ping
|
||||
latency stat names, and bandwidth usage stat names, in that order.
|
||||
|
||||
Note:
|
||||
Additional lists may be added to this tuple in the future with
|
||||
additional data groups, so it not recommended for the caller to
|
||||
assume exactly 6 elements.
|
||||
"""
|
||||
return [
|
||||
"samples",
|
||||
"end_counter",
|
||||
], [
|
||||
"total_ping_drop",
|
||||
"count_full_ping_drop",
|
||||
"count_obstructed",
|
||||
"total_obstructed_ping_drop",
|
||||
"count_full_obstructed_ping_drop",
|
||||
"count_unscheduled",
|
||||
"total_unscheduled_ping_drop",
|
||||
"count_full_unscheduled_ping_drop",
|
||||
], [
|
||||
"init_run_fragment",
|
||||
"final_run_fragment",
|
||||
"run_seconds[1,61]",
|
||||
"run_minutes[1,61]",
|
||||
], [
|
||||
"mean_all_ping_latency",
|
||||
"deciles_all_ping_latency[11]",
|
||||
"mean_full_ping_latency",
|
||||
"deciles_full_ping_latency[11]",
|
||||
"stdev_full_ping_latency",
|
||||
], [
|
||||
"load_bucket_samples[15]",
|
||||
"load_bucket_min_latency[15]",
|
||||
"load_bucket_median_latency[15]",
|
||||
"load_bucket_max_latency[15]",
|
||||
], [
|
||||
"download_usage",
|
||||
"upload_usage",
|
||||
]
|
||||
|
||||
|
||||
def get_history(filename):
|
||||
"""Read JSON data and return the raw history in dict format.
|
||||
|
||||
Args:
|
||||
filename (str): Filename from which to read JSON data, or "-" to read
|
||||
from standard input.
|
||||
|
||||
Raises:
|
||||
Various exceptions depending on Python version: Failure to open or
|
||||
read input or invalid JSON read on input.
|
||||
"""
|
||||
if filename == "-":
|
||||
json_data = json.load(sys.stdin)
|
||||
else:
|
||||
with open(filename) as json_file:
|
||||
json_data = json.load(json_file)
|
||||
return json_data["dishGetHistory"]
|
||||
|
||||
|
||||
def _compute_sample_range(history, parse_samples, verbose=False):
|
||||
current = int(history["current"])
|
||||
samples = len(history["popPingDropRate"])
|
||||
|
||||
if verbose:
|
||||
print("current counter: " + str(current))
|
||||
print("All samples: " + str(samples))
|
||||
|
||||
samples = min(samples, current)
|
||||
|
||||
if verbose:
|
||||
print("Valid samples: " + str(samples))
|
||||
|
||||
if parse_samples < 0 or samples < parse_samples:
|
||||
parse_samples = samples
|
||||
|
||||
start = current - parse_samples
|
||||
|
||||
if start == current:
|
||||
return range(0), 0, current
|
||||
|
||||
# This is ring buffer offset, so both index to oldest data sample and
|
||||
# index to next data sample after the newest one.
|
||||
end_offset = current % samples
|
||||
start_offset = start % samples
|
||||
|
||||
# Set the range for the requested set of samples. This will iterate
|
||||
# sample index in order from oldest to newest.
|
||||
if start_offset < end_offset:
|
||||
sample_range = range(start_offset, end_offset)
|
||||
else:
|
||||
sample_range = chain(range(start_offset, samples), range(0, end_offset))
|
||||
|
||||
return sample_range, current - start, current
|
||||
|
||||
|
||||
def history_bulk_data(filename, parse_samples, verbose=False):
|
||||
"""Fetch history data for a range of samples.
|
||||
|
||||
Args:
|
||||
filename (str): Filename from which to read JSON data, or "-" to read
|
||||
from standard input.
|
||||
parse_samples (int): Number of samples to process, or -1 to parse all
|
||||
available samples.
|
||||
verbose (bool): Optionally produce verbose output.
|
||||
|
||||
Returns:
|
||||
A tuple with 2 dicts, the first mapping general data names to their
|
||||
values and the second mapping bulk history data names to their values.
|
||||
|
||||
Note: The field names in the returned data do _not_ include brackets
|
||||
to indicate sequences, since those would just need to be parsed
|
||||
out. The general data is all single items and the bulk history
|
||||
data is all sequences.
|
||||
|
||||
Raises:
|
||||
JsonError: Failure to open, read, or parse JSON on input.
|
||||
"""
|
||||
try:
|
||||
history = get_history(filename)
|
||||
except ValueError as e:
|
||||
raise JsonError("Failed to parse JSON: " + str(e))
|
||||
except Exception as e:
|
||||
raise JsonError(e)
|
||||
|
||||
sample_range, parsed_samples, current = _compute_sample_range(history,
|
||||
parse_samples,
|
||||
verbose=verbose)
|
||||
|
||||
pop_ping_drop_rate = []
|
||||
pop_ping_latency_ms = []
|
||||
downlink_throughput_bps = []
|
||||
uplink_throughput_bps = []
|
||||
|
||||
for i in sample_range:
|
||||
pop_ping_drop_rate.append(history["popPingDropRate"][i])
|
||||
pop_ping_latency_ms.append(
|
||||
history["popPingLatencyMs"][i] if history["popPingDropRate"][i] < 1 else None)
|
||||
downlink_throughput_bps.append(history["downlinkThroughputBps"][i])
|
||||
uplink_throughput_bps.append(history["uplinkThroughputBps"][i])
|
||||
|
||||
return {
|
||||
"samples": parsed_samples,
|
||||
"end_counter": current,
|
||||
}, {
|
||||
"pop_ping_drop_rate": pop_ping_drop_rate,
|
||||
"pop_ping_latency_ms": pop_ping_latency_ms,
|
||||
"downlink_throughput_bps": downlink_throughput_bps,
|
||||
"uplink_throughput_bps": uplink_throughput_bps,
|
||||
"snr": [None] * parsed_samples, # obsoleted in grpc service
|
||||
"scheduled": [None] * parsed_samples, # obsoleted in grpc service
|
||||
"obstructed": [None] * parsed_samples, # obsoleted in grpc service
|
||||
}
|
||||
|
||||
|
||||
def history_ping_stats(filename, parse_samples, verbose=False):
|
||||
"""Deprecated. Use history_stats instead."""
|
||||
return history_stats(filename, parse_samples, verbose=verbose)[0:3]
|
||||
|
||||
|
||||
def history_stats(filename, parse_samples, verbose=False):
|
||||
"""Fetch, parse, and compute ping and usage stats.
|
||||
|
||||
Args:
|
||||
filename (str): Filename from which to read JSON data, or "-" to read
|
||||
from standard input.
|
||||
parse_samples (int): Number of samples to process, or -1 to parse all
|
||||
available samples.
|
||||
verbose (bool): Optionally produce verbose output.
|
||||
|
||||
Returns:
|
||||
A tuple with 6 dicts, mapping general data names, ping drop stat
|
||||
names, ping drop run length stat names, ping latency stat names,
|
||||
loaded ping latency stat names, and bandwidth usage stat names to
|
||||
their respective values, in that order.
|
||||
|
||||
Note:
|
||||
Additional dicts may be added to this tuple in the future with
|
||||
additional data groups, so it not recommended for the caller to
|
||||
assume exactly 6 elements.
|
||||
|
||||
Raises:
|
||||
JsonError: Failure to open, read, or parse JSON on input.
|
||||
"""
|
||||
try:
|
||||
history = get_history(filename)
|
||||
except ValueError as e:
|
||||
raise JsonError("Failed to parse JSON: " + str(e))
|
||||
except Exception as e:
|
||||
raise JsonError(e)
|
||||
|
||||
sample_range, parsed_samples, current = _compute_sample_range(history,
|
||||
parse_samples,
|
||||
verbose=verbose)
|
||||
|
||||
tot = 0.0
|
||||
count_full_drop = 0
|
||||
count_unsched = 0
|
||||
total_unsched_drop = 0.0
|
||||
count_full_unsched = 0
|
||||
count_obstruct = 0
|
||||
total_obstruct_drop = 0.0
|
||||
count_full_obstruct = 0
|
||||
|
||||
second_runs = [0] * 60
|
||||
minute_runs = [0] * 60
|
||||
run_length = 0
|
||||
init_run_length = None
|
||||
|
||||
usage_down = 0.0
|
||||
usage_up = 0.0
|
||||
|
||||
rtt_full = []
|
||||
rtt_all = []
|
||||
rtt_buckets = [[] for _ in range(15)]
|
||||
|
||||
for i in sample_range:
|
||||
d = history["popPingDropRate"][i]
|
||||
if d >= 1:
|
||||
# just in case...
|
||||
d = 1
|
||||
count_full_drop += 1
|
||||
run_length += 1
|
||||
elif run_length > 0:
|
||||
if init_run_length is None:
|
||||
init_run_length = run_length
|
||||
else:
|
||||
if run_length <= 60:
|
||||
second_runs[run_length - 1] += run_length
|
||||
else:
|
||||
minute_runs[min((run_length-1) // 60 - 1, 59)] += run_length
|
||||
run_length = 0
|
||||
elif init_run_length is None:
|
||||
init_run_length = 0
|
||||
tot += d
|
||||
|
||||
down = history["downlinkThroughputBps"][i]
|
||||
usage_down += down
|
||||
up = history["uplinkThroughputBps"][i]
|
||||
usage_up += up
|
||||
|
||||
rtt = history["popPingLatencyMs"][i]
|
||||
# note that "full" here means the opposite of ping drop full
|
||||
if d == 0.0:
|
||||
rtt_full.append(rtt)
|
||||
if down + up > 500000:
|
||||
rtt_buckets[min(14, int(math.log2((down+up) / 500000)))].append(rtt)
|
||||
else:
|
||||
rtt_buckets[0].append(rtt)
|
||||
if d < 1.0:
|
||||
rtt_all.append((rtt, 1.0 - d))
|
||||
|
||||
# If the entire sample set is one big drop run, it will be both initial
|
||||
# fragment (continued from prior sample range) and final one (continued
|
||||
# to next sample range), but to avoid double-reporting, just call it
|
||||
# the initial run.
|
||||
if init_run_length is None:
|
||||
init_run_length = run_length
|
||||
run_length = 0
|
||||
|
||||
def weighted_mean_and_quantiles(data, n):
|
||||
if not data:
|
||||
return None, [None] * (n+1)
|
||||
total_weight = sum(x[1] for x in data)
|
||||
result = []
|
||||
items = iter(data)
|
||||
value, accum_weight = next(items)
|
||||
accum_value = value * accum_weight
|
||||
for boundary in (total_weight * x / n for x in range(n)):
|
||||
while accum_weight < boundary:
|
||||
try:
|
||||
value, weight = next(items)
|
||||
accum_value += value * weight
|
||||
accum_weight += weight
|
||||
except StopIteration:
|
||||
# shouldn't happen, but in case of float precision weirdness...
|
||||
break
|
||||
result.append(value)
|
||||
result.append(data[-1][0])
|
||||
accum_value += sum(x[0] for x in items)
|
||||
return accum_value / total_weight, result
|
||||
|
||||
bucket_samples = []
|
||||
bucket_min = []
|
||||
bucket_median = []
|
||||
bucket_max = []
|
||||
for bucket in rtt_buckets:
|
||||
if bucket:
|
||||
bucket_samples.append(len(bucket))
|
||||
bucket_min.append(min(bucket))
|
||||
bucket_median.append(statistics.median(bucket))
|
||||
bucket_max.append(max(bucket))
|
||||
else:
|
||||
bucket_samples.append(0)
|
||||
bucket_min.append(None)
|
||||
bucket_median.append(None)
|
||||
bucket_max.append(None)
|
||||
|
||||
rtt_all.sort(key=lambda x: x[0])
|
||||
wmean_all, wdeciles_all = weighted_mean_and_quantiles(rtt_all, 10)
|
||||
rtt_full.sort()
|
||||
mean_full, deciles_full = weighted_mean_and_quantiles(tuple((x, 1.0) for x in rtt_full), 10)
|
||||
|
||||
return {
|
||||
"samples": parsed_samples,
|
||||
"end_counter": current,
|
||||
}, {
|
||||
"total_ping_drop": tot,
|
||||
"count_full_ping_drop": count_full_drop,
|
||||
"count_obstructed": count_obstruct,
|
||||
"total_obstructed_ping_drop": total_obstruct_drop,
|
||||
"count_full_obstructed_ping_drop": count_full_obstruct,
|
||||
"count_unscheduled": count_unsched,
|
||||
"total_unscheduled_ping_drop": total_unsched_drop,
|
||||
"count_full_unscheduled_ping_drop": count_full_unsched,
|
||||
}, {
|
||||
"init_run_fragment": init_run_length,
|
||||
"final_run_fragment": run_length,
|
||||
"run_seconds[1,]": second_runs,
|
||||
"run_minutes[1,]": minute_runs,
|
||||
}, {
|
||||
"mean_all_ping_latency": wmean_all,
|
||||
"deciles_all_ping_latency[]": wdeciles_all,
|
||||
"mean_full_ping_latency": mean_full,
|
||||
"deciles_full_ping_latency[]": deciles_full,
|
||||
"stdev_full_ping_latency": statistics.pstdev(rtt_full) if rtt_full else None,
|
||||
}, {
|
||||
"load_bucket_samples[]": bucket_samples,
|
||||
"load_bucket_min_latency[]": bucket_min,
|
||||
"load_bucket_median_latency[]": bucket_median,
|
||||
"load_bucket_max_latency[]": bucket_max,
|
||||
}, {
|
||||
"download_usage": int(round(usage_down / 8)),
|
||||
"upload_usage": int(round(usage_up / 8)),
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Starlink GRPC to InfluxDB 2.x exporter
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/starlink-grpc-tools/
|
||||
Environment=INFLUXDB_URL=http://localhost:8086 INFLUXDB_TOKEN=<changeme> INFLUXDB_Bucket=<changeme> INFLUXDB_ORG=<changeme>
|
||||
ExecStart=/opt/starlink-grpc-tools/venv/bin/python3 dish_grpc_influx2.py -t 10 status alert_detail
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Starlink GRPC to MQTT exporter
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/starlink-grpc-tools/
|
||||
Environment=MQTT_HOST=localhost MQTT_PORT=1883 MQTT_USERNAME=<changeme> MQTT_PASSWORD=<changeme> MQTT_SSL=false
|
||||
ExecStart=/opt/starlink-grpc-tools/venv/bin/python3 dish_grpc_mqtt.py -t 10 status alert_detail
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Starlink GRPC to Prometheus exporter
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/starlink-grpc-tools/
|
||||
ExecStart=/opt/starlink-grpc-tools/venv/bin/python3 dish_grpc_prometheus.py status alert_detail usage location power
|
||||
KillSignal=SIGINT
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Reference in New Issue
Block a user