This commit is contained in:
2025-06-16 15:14:23 +02:00
commit 074e590073
3174 changed files with 428263 additions and 0 deletions

View File

@ -0,0 +1,13 @@
namespace Mirror
{
public interface Capture
{
// server timestamp at time of capture.
double timestamp { get; set; }
// optional gizmo drawing for visual debugging.
// history is only known on the server, which usually doesn't render.
// showing Gizmos in the Editor is enough.
void DrawGizmo();
}
}

View File

@ -0,0 +1,18 @@
fileFormatVersion: 2
guid: 347e831952e943a49095cadd39a5aeb2
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
userData:
assetBundleName:
assetBundleVariant:
AssetOrigin:
serializedVersion: 1
productId: 129321
packageName: Mirror
packageVersion: 96.0.1
assetPath: Assets/Mirror/Core/LagCompensation/Capture.cs
uploadId: 736421

View File

@ -0,0 +1,139 @@
// HistoryBounds keeps a bounding box of all the object's bounds in the past N seconds.
// useful to decide which objects to rollback, instead of rolling back all of them.
// https://www.youtube.com/watch?v=zrIY0eIyqmI (37:00)
// standalone C# implementation to be engine (and language) agnostic.
using System.Collections.Generic;
using UnityEngine;
namespace Mirror
{
// FakeByte: gather bounds in smaller buckets.
// for example, bucket(t0,t1,t2), bucket(t3,t4,t5), ...
// instead of removing old bounds t0, t1, ...
// we remove a whole bucket every 3 times: bucket(t0,t1,t2)
// and when building total bounds, we encapsulate a few larger buckets
// instead of many smaller bounds.
//
// => a bucket is encapsulate(bounds0, bounds1, bounds2) so we don't
// need a custom struct, simply reuse bounds but remember that each
// entry includes N timestamps.
//
// => note that simply reducing capture interval is _not_ the same.
// we want to capture in detail in case players run in zig-zag.
// but still grow larger buckets internally.
public class HistoryBounds
{
// mischa: use MinMaxBounds to avoid Unity Bounds.Encapsulate conversions.
readonly int boundsPerBucket;
readonly Queue<MinMaxBounds> fullBuckets;
// full bucket limit. older ones will be removed.
readonly int bucketLimit;
// bucket in progress, contains 0..boundsPerBucket bounds encapsulated.
MinMaxBounds? currentBucket;
int currentBucketSize;
// amount of total bounds, including bounds in full buckets + current
public int boundsCount { get; private set; }
// total bounds encapsulating all of the bounds history.
// totalMinMax is used for internal calculations.
// public total is used for Unity representation.
MinMaxBounds totalMinMax;
public Bounds total
{
get
{
Bounds bounds = new Bounds();
bounds.SetMinMax(totalMinMax.min, totalMinMax.max);
return bounds;
}
}
public HistoryBounds(int boundsLimit, int boundsPerBucket)
{
// bucketLimit via '/' cuts off remainder.
// that's what we want, since we always have a 'currentBucket'.
this.boundsPerBucket = boundsPerBucket;
this.bucketLimit = (boundsLimit / boundsPerBucket);
// initialize queue with maximum capacity to avoid runtime resizing
// capacity +1 because it makes the code easier if we insert first, and then remove.
fullBuckets = new Queue<MinMaxBounds>(bucketLimit + 1);
}
// insert new bounds into history. calculates new total bounds.
// Queue.Dequeue() always has the oldest bounds.
public void Insert(Bounds bounds)
{
// convert to MinMax representation for faster .Encapsulate()
MinMaxBounds minmax = new MinMaxBounds
{
min = bounds.min,
max = bounds.max
};
// initialize 'total' if not initialized yet.
// we don't want to call (0,0).Encapsulate(bounds).
if (boundsCount == 0)
{
totalMinMax = minmax;
}
// add to current bucket:
// either initialize new one, or encapsulate into existing one
if (currentBucket == null)
{
currentBucket = minmax;
}
else
{
currentBucket.Value.Encapsulate(minmax);
}
// current bucket has one more bounds.
// total bounds increased as well.
currentBucketSize += 1;
boundsCount += 1;
// always encapsulate into total immediately.
// this is free.
totalMinMax.Encapsulate(minmax);
// current bucket full?
if (currentBucketSize == boundsPerBucket)
{
// move it to full buckets
fullBuckets.Enqueue(currentBucket.Value);
currentBucket = null;
currentBucketSize = 0;
// full bucket capacity reached?
if (fullBuckets.Count > bucketLimit)
{
// remove oldest bucket
fullBuckets.Dequeue();
boundsCount -= boundsPerBucket;
// recompute total bounds
// instead of iterating N buckets, we iterate N / boundsPerBucket buckets.
// TODO technically we could reuse 'currentBucket' before clearing instead of encapsulating again
totalMinMax = minmax;
foreach (MinMaxBounds bucket in fullBuckets)
totalMinMax.Encapsulate(bucket);
}
}
}
public void Reset()
{
fullBuckets.Clear();
currentBucket = null;
currentBucketSize = 0;
boundsCount = 0;
totalMinMax = new MinMaxBounds();
}
}
}

View File

@ -0,0 +1,18 @@
fileFormatVersion: 2
guid: ca9ea58b98a34f73801b162cd5de724e
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
userData:
assetBundleName:
assetBundleVariant:
AssetOrigin:
serializedVersion: 1
productId: 129321
packageName: Mirror
packageVersion: 96.0.1
assetPath: Assets/Mirror/Core/LagCompensation/HistoryBounds.cs
uploadId: 736421

View File

@ -0,0 +1,144 @@
// standalone lag compensation algorithm
// based on the Valve Networking Model:
// https://developer.valvesoftware.com/wiki/Source_Multiplayer_Networking
using System.Collections.Generic;
namespace Mirror
{
public static class LagCompensation
{
// history is of <timestamp, capture>.
// Queue allows for fast 'remove first' and 'append last'.
//
// make sure to always insert in order.
// inserting out of order like [1,2,4,3] would cause issues.
// can't safeguard this because Queue doesn't have .Last access.
public static void Insert<T>(
Queue<KeyValuePair<double, T>> history,
int historyLimit,
double timestamp,
T capture)
where T : Capture
{
// make space according to history limit.
// do this before inserting, to avoid resizing past capacity.
if (history.Count >= historyLimit)
history.Dequeue();
// insert
history.Enqueue(new KeyValuePair<double, T>(timestamp, capture));
}
// get the two snapshots closest to a given timestamp.
// those can be used to interpolate the exact snapshot at that time.
// if timestamp is newer than the newest history entry, then we extrapolate.
// 't' will be between 1 and 2, before is second last, after is last.
// callers should Lerp(before, after, t=1.5) to extrapolate the hit.
// see comments below for extrapolation.
public static bool Sample<T>(
Queue<KeyValuePair<double, T>> history,
double timestamp, // current server time
double interval, // capture interval
out T before,
out T after,
out double t) // interpolation factor
where T : Capture
{
before = default;
after = default;
t = 0;
// can't sample an empty history
// interpolation needs at least one entry.
// extrapolation needs at least two entries.
// can't Lerp(A, A, 1.5). dist(A, A) * 1.5 is always 0.
if(history.Count < 2) {
return false;
}
// older than oldest
if (timestamp < history.Peek().Key) {
return false;
}
// iterate through the history
// TODO faster version: guess start index by how many 'intervals' we are behind.
// search around that area.
// should be O(1) most of the time, unless sampling was off.
KeyValuePair<double, T> prev = new KeyValuePair<double, T>();
KeyValuePair<double, T> prevPrev = new KeyValuePair<double, T>();
foreach(KeyValuePair<double, T> entry in history) {
// exact match?
if (timestamp == entry.Key) {
before = entry.Value;
after = entry.Value;
t = Mathd.InverseLerp(before.timestamp, after.timestamp, timestamp);
return true;
}
// did we check beyond timestamp? then return the previous two.
if (entry.Key > timestamp) {
before = prev.Value;
after = entry.Value;
t = Mathd.InverseLerp(before.timestamp, after.timestamp, timestamp);
return true;
}
// remember the last two for extrapolation.
// Queue doesn't have access to .Last.
prevPrev = prev;
prev = entry;
}
// newer than newest: extrapolate up to one interval.
// let's say we capture every 100 ms:
// 100, 200, 300, 400
// and the server is at 499
// if a client sends CmdFire at time 480, then there's no history entry.
// => adding the current entry every time would be too expensive.
// worst case we would capture at 401, 402, 403, 404, ... 100 times
// => not extrapolating isn't great. low latency clients would be
// punished by missing their targets since no entry at 'time' was found.
// => extrapolation is the best solution. make sure this works as
// expected and within limits.
if (prev.Key < timestamp && timestamp <= prev.Key + interval) {
// return the last two valid snapshots.
// can't just return (after, after) because we can't extrapolate
// if their distance is 0.
before = prevPrev.Value;
after = prev.Value;
// InverseLerp will give [after, after+interval].
// but we return [before, after, t].
// so add +1 for the distance from before->after
t = 1 + Mathd.InverseLerp(after.timestamp, after.timestamp + interval, timestamp);
return true;
}
return false;
}
// never trust the client.
// we estimate when a message was sent.
// don't trust the client to tell us the time.
// https://developer.valvesoftware.com/wiki/Source_Multiplayer_Networking
// Command Execution Time = Current Server Time - Packet Latency - Client View Interpolation
// => lag compensation demo estimation is off by only ~6ms
public static double EstimateTime(double serverTime, double rtt, double bufferTime)
{
// packet latency is one trip from client to server, so rtt / 2
// client view interpolation is the snapshot interpolation buffer time
double latency = rtt / 2;
return serverTime - latency - bufferTime;
}
// convenience function to draw all history gizmos.
// this should be called from OnDrawGizmos.
public static void DrawGizmos<T>(Queue<KeyValuePair<double, T>> history)
where T : Capture
{
foreach (KeyValuePair<double, T> entry in history)
entry.Value.DrawGizmo();
}
}
}

View File

@ -0,0 +1,18 @@
fileFormatVersion: 2
guid: ad53cc7d12144d0ba3a8b0a4515e5d17
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
userData:
assetBundleName:
assetBundleVariant:
AssetOrigin:
serializedVersion: 1
productId: 129321
packageName: Mirror
packageVersion: 96.0.1
assetPath: Assets/Mirror/Core/LagCompensation/LagCompensation.cs
uploadId: 736421

View File

@ -0,0 +1,19 @@
// snapshot interpolation settings struct.
// can easily be exposed in Unity inspectors.
using System;
using UnityEngine;
namespace Mirror
{
// class so we can define defaults easily
[Serializable]
public class LagCompensationSettings
{
[Header("Buffering")]
[Tooltip("Keep this many past snapshots in the buffer. The larger this is, the further we can rewind into the past.\nMaximum rewind time := historyAmount * captureInterval")]
public int historyLimit = 6;
[Tooltip("Capture state every 'captureInterval' seconds. Larger values will space out the captures more, which gives a longer history but with possible gaps inbetween.\nSmaller values will have fewer gaps, with shorter history.")]
public float captureInterval = 0.100f; // 100 ms
}
}

View File

@ -0,0 +1,18 @@
fileFormatVersion: 2
guid: fa80bec245f94bf8a28ec78777992a1c
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
userData:
assetBundleName:
assetBundleVariant:
AssetOrigin:
serializedVersion: 1
productId: 129321
packageName: Mirror
packageVersion: 96.0.1
assetPath: Assets/Mirror/Core/LagCompensation/LagCompensationSettings.cs
uploadId: 736421

View File

@ -0,0 +1,73 @@
// Unity's Bounds struct is represented as (center, extents).
// HistoryBounds make heavy use of .Encapsulate(), which has to convert
// Unity's (center, extents) to (min, max) every time, and then convert back.
//
// It's faster to use a (min, max) representation directly instead.
using System;
using System.Runtime.CompilerServices;
using UnityEngine;
namespace Mirror
{
public struct MinMaxBounds: IEquatable<Bounds>
{
public Vector3 min;
public Vector3 max;
// encapsulate a single point
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Encapsulate(Vector3 point)
{
min = Vector3.Min(this.min, point);
max = Vector3.Max(this.max, point);
}
// encapsulate another bounds
public void Encapsulate(MinMaxBounds bounds)
{
Encapsulate(bounds.min);
Encapsulate(bounds.max);
}
// convenience comparison with Unity's bounds, for unit tests etc.
public static bool operator ==(MinMaxBounds lhs, Bounds rhs) =>
lhs.min == rhs.min &&
lhs.max == rhs.max;
public static bool operator !=(MinMaxBounds lhs, Bounds rhs) =>
!(lhs == rhs);
public override bool Equals(object obj) =>
obj is MinMaxBounds other &&
min == other.min &&
max == other.max;
public bool Equals(MinMaxBounds other) =>
min.Equals(other.min) && max.Equals(other.max);
public bool Equals(Bounds other) =>
min.Equals(other.min) && max.Equals(other.max);
#if UNITY_2021_3_OR_NEWER
// Unity 2019/2020 don't have HashCode.Combine yet.
// this is only to avoid reflection. without defining, it works too.
// default generated by rider
public override int GetHashCode() => HashCode.Combine(min, max);
#else
public override int GetHashCode()
{
// return HashCode.Combine(min, max); without using .Combine for older Unity versions
unchecked
{
int hash = 17;
hash = hash * 23 + min.GetHashCode();
hash = hash * 23 + max.GetHashCode();
return hash;
}
}
#endif
// tostring
public override string ToString() => $"({min}, {max})";
}
}

View File

@ -0,0 +1,18 @@
fileFormatVersion: 2
guid: 4372b1e1a1cc4c669cc7bf0925f59d29
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
userData:
assetBundleName:
assetBundleVariant:
AssetOrigin:
serializedVersion: 1
productId: 129321
packageName: Mirror
packageVersion: 96.0.1
assetPath: Assets/Mirror/Core/LagCompensation/MinMaxBounds.cs
uploadId: 736421