alap
This commit is contained in:
86
Assets/Mirror/Core/Tools/AccurateInterval.cs
Normal file
86
Assets/Mirror/Core/Tools/AccurateInterval.cs
Normal file
@ -0,0 +1,86 @@
|
||||
// accurate interval from Mirror II.
|
||||
// for sync / send intervals where it matters.
|
||||
// does not(!) do catch-up.
|
||||
//
|
||||
// first, let's understand the problem.
|
||||
// say we need an interval of 10 Hz, so every 100ms in Update we do:
|
||||
// if (Time.time >= lastTime + interval)
|
||||
// {
|
||||
// lastTime = Time.time;
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// this seems fine, but actually Time.time will always be a few ms beyond
|
||||
// the interval. but since lastTime is reset to Time.time, the remainder
|
||||
// is always ignored away.
|
||||
// with fixed tickRate servers (say 30 Hz), the remainder is significant!
|
||||
//
|
||||
// in practice if we have a 30 Hz tickRate server with a 30 Hz sendRate,
|
||||
// the above way to measure the interval would result in a 18-19 Hz sendRate!
|
||||
// => this is not just a little off. this is _way_ off, by almost half.
|
||||
// => displaying actual + target tick/send rate will show this very easily.
|
||||
//
|
||||
// we need an accurate way to measure intervals for where it matters.
|
||||
// and it needs to be testable to guarantee results.
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
public static class AccurateInterval
|
||||
{
|
||||
// static func instead of storing interval + lastTime struct.
|
||||
// + don't need to initialize struct ctor with interval in Awake
|
||||
// + allows for interval changes at runtime too
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static bool Elapsed(double time, double interval, ref double lastTime)
|
||||
{
|
||||
// enough time elapsed?
|
||||
if (time < lastTime + interval)
|
||||
return false;
|
||||
|
||||
// naive implementation:
|
||||
//lastTime = time;
|
||||
|
||||
// accurate but doesn't handle heavy load situations:
|
||||
//lastTime += interval;
|
||||
|
||||
// heavy load edge case:
|
||||
// * interval is 100ms
|
||||
// * server is under heavy load, Updates slow down to 1/s
|
||||
// * Elapsed(1.000) returns true.
|
||||
// technically 10 intervals have elapsed.
|
||||
// * server recovers to normal, Updates are every 10ms again
|
||||
// * Elapsed(1.010) should return false again until 1.100.
|
||||
//
|
||||
// increasing lastTime by interval would require 10 more calls
|
||||
// to ever catch up again:
|
||||
// lastTime += interval
|
||||
//
|
||||
// as result, the next 10 calls to Elapsed would return true.
|
||||
// Elapsed(1.001) => true
|
||||
// Elapsed(1.002) => true
|
||||
// Elapsed(1.003) => true
|
||||
// ...
|
||||
// even though technically the delta was not >= interval.
|
||||
//
|
||||
// this would keep the server under heavy load, and it may never
|
||||
// catch-up. this is not ideal for large virtual worlds.
|
||||
//
|
||||
// instead, we want to skip multiples of 'interval' and only
|
||||
// keep the remainder.
|
||||
//
|
||||
// see also: AccurateIntervalTests.Slowdown()
|
||||
|
||||
// easy to understand:
|
||||
//double elapsed = time - lastTime;
|
||||
//double remainder = elapsed % interval;
|
||||
//lastTime = time - remainder;
|
||||
|
||||
// easier: set to rounded multiples of interval (fholm).
|
||||
// long to match double time.
|
||||
long multiplier = (long)(time / interval);
|
||||
lastTime = multiplier * interval;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/AccurateInterval.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/AccurateInterval.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: c1b18064e25046f28b88db65a4012ec1
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
556
Assets/Mirror/Core/Tools/Compression.cs
Normal file
556
Assets/Mirror/Core/Tools/Compression.cs
Normal file
@ -0,0 +1,556 @@
|
||||
// Quaternion compression from DOTSNET
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using UnityEngine;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
/// <summary>Functions to Compress Quaternions and Floats</summary>
|
||||
public static class Compression
|
||||
{
|
||||
// divide by precision (functions backported from Mirror II)
|
||||
// for example, 0.1 cm precision converts '5.0f' float to '50' long.
|
||||
//
|
||||
// 'long' instead of 'int' to allow for large enough worlds.
|
||||
// value / precision exceeds int.max range too easily.
|
||||
// Convert.ToInt32/64 would throw.
|
||||
// https://github.com/vis2k/DOTSNET/issues/59
|
||||
//
|
||||
// 'long' and 'int' will result in the same bandwidth though.
|
||||
// for example, ScaleToLong(10.5, 0.1) = 105.
|
||||
// int: 0x00000069
|
||||
// long: 0x0000000000000069
|
||||
// delta compression will reduce both to 1 byte.
|
||||
//
|
||||
// returns
|
||||
// 'true' if scaling was possible within 'long' bounds.
|
||||
// 'false' if clamping was necessary.
|
||||
// never throws. checking result is optional.
|
||||
public static bool ScaleToLong(float value, float precision, out long result)
|
||||
{
|
||||
// user might try to pass precision = 0 to disable rounding.
|
||||
// this is not supported.
|
||||
// throw to make the user fix this immediately.
|
||||
// otherwise we would have to reinterpret-cast if ==0 etc.
|
||||
// this function should be kept simple.
|
||||
// if rounding isn't wanted, this function shouldn't be called.
|
||||
if (precision == 0) throw new DivideByZeroException($"ScaleToLong: precision=0 would cause null division. If rounding isn't wanted, don't call this function.");
|
||||
|
||||
// catch OverflowException if value/precision > long.max.
|
||||
// attackers should never be able to throw exceptions.
|
||||
try
|
||||
{
|
||||
result = Convert.ToInt64(value / precision);
|
||||
return true;
|
||||
}
|
||||
// clamp to .max/.min.
|
||||
// returning '0' would make far away entities reset to origin.
|
||||
// returning 'max' would keep them stuck at the end of the world.
|
||||
// the latter is much easier to debug.
|
||||
catch (OverflowException)
|
||||
{
|
||||
result = value > 0 ? long.MaxValue : long.MinValue;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// returns
|
||||
// 'true' if scaling was possible within 'long' bounds.
|
||||
// 'false' if clamping was necessary.
|
||||
// never throws. checking result is optional.
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static bool ScaleToLong(Vector3 value, float precision, out long x, out long y, out long z)
|
||||
{
|
||||
// attempt to convert every component.
|
||||
// do not return early if one conversion returned 'false'.
|
||||
// the return value is optional. always attempt to convert all.
|
||||
bool result = true;
|
||||
result &= ScaleToLong(value.x, precision, out x);
|
||||
result &= ScaleToLong(value.y, precision, out y);
|
||||
result &= ScaleToLong(value.z, precision, out z);
|
||||
return result;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static bool ScaleToLong(Vector3 value, float precision, out Vector3Long quantized)
|
||||
{
|
||||
quantized = Vector3Long.zero;
|
||||
return ScaleToLong(value, precision, out quantized.x, out quantized.y, out quantized.z);
|
||||
}
|
||||
|
||||
// multiple by precision.
|
||||
// for example, 0.1 cm precision converts '50' long to '5.0f' float.
|
||||
public static float ScaleToFloat(long value, float precision)
|
||||
{
|
||||
// user might try to pass precision = 0 to disable rounding.
|
||||
// this is not supported.
|
||||
// throw to make the user fix this immediately.
|
||||
// otherwise we would have to reinterpret-cast if ==0 etc.
|
||||
// this function should be kept simple.
|
||||
// if rounding isn't wanted, this function shouldn't be called.
|
||||
if (precision == 0) throw new DivideByZeroException($"ScaleToLong: precision=0 would cause null division. If rounding isn't wanted, don't call this function.");
|
||||
|
||||
return value * precision;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static Vector3 ScaleToFloat(long x, long y, long z, float precision)
|
||||
{
|
||||
Vector3 v;
|
||||
v.x = ScaleToFloat(x, precision);
|
||||
v.y = ScaleToFloat(y, precision);
|
||||
v.z = ScaleToFloat(z, precision);
|
||||
return v;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static Vector3 ScaleToFloat(Vector3Long value, float precision) =>
|
||||
ScaleToFloat(value.x, value.y, value.z, precision);
|
||||
|
||||
// scale a float within min/max range to an ushort between min/max range
|
||||
// note: can also use this for byte range from byte.MinValue to byte.MaxValue
|
||||
public static ushort ScaleFloatToUShort(float value, float minValue, float maxValue, ushort minTarget, ushort maxTarget)
|
||||
{
|
||||
// note: C# ushort - ushort => int, hence so many casts
|
||||
// max ushort - min ushort only fits into something bigger
|
||||
int targetRange = maxTarget - minTarget;
|
||||
float valueRange = maxValue - minValue;
|
||||
float valueRelative = value - minValue;
|
||||
return (ushort)(minTarget + (ushort)(valueRelative / valueRange * targetRange));
|
||||
}
|
||||
|
||||
// scale an ushort within min/max range to a float between min/max range
|
||||
// note: can also use this for byte range from byte.MinValue to byte.MaxValue
|
||||
public static float ScaleUShortToFloat(ushort value, ushort minValue, ushort maxValue, float minTarget, float maxTarget)
|
||||
{
|
||||
// note: C# ushort - ushort => int, hence so many casts
|
||||
float targetRange = maxTarget - minTarget;
|
||||
ushort valueRange = (ushort)(maxValue - minValue);
|
||||
ushort valueRelative = (ushort)(value - minValue);
|
||||
return minTarget + (valueRelative / (float)valueRange * targetRange);
|
||||
}
|
||||
|
||||
// quaternion compression //////////////////////////////////////////////
|
||||
// smallest three: https://gafferongames.com/post/snapshot_compression/
|
||||
// compresses 16 bytes quaternion into 4 bytes
|
||||
|
||||
// helper function to find largest absolute element
|
||||
// returns the index of the largest one
|
||||
public static int LargestAbsoluteComponentIndex(Vector4 value, out float largestAbs, out Vector3 withoutLargest)
|
||||
{
|
||||
// convert to abs
|
||||
Vector4 abs = new Vector4(Mathf.Abs(value.x), Mathf.Abs(value.y), Mathf.Abs(value.z), Mathf.Abs(value.w));
|
||||
|
||||
// set largest to first abs (x)
|
||||
largestAbs = abs.x;
|
||||
withoutLargest = new Vector3(value.y, value.z, value.w);
|
||||
int largestIndex = 0;
|
||||
|
||||
// compare to the others, starting at second value
|
||||
// performance for 100k calls
|
||||
// for-loop: 25ms
|
||||
// manual checks: 22ms
|
||||
if (abs.y > largestAbs)
|
||||
{
|
||||
largestIndex = 1;
|
||||
largestAbs = abs.y;
|
||||
withoutLargest = new Vector3(value.x, value.z, value.w);
|
||||
}
|
||||
if (abs.z > largestAbs)
|
||||
{
|
||||
largestIndex = 2;
|
||||
largestAbs = abs.z;
|
||||
withoutLargest = new Vector3(value.x, value.y, value.w);
|
||||
}
|
||||
if (abs.w > largestAbs)
|
||||
{
|
||||
largestIndex = 3;
|
||||
largestAbs = abs.w;
|
||||
withoutLargest = new Vector3(value.x, value.y, value.z);
|
||||
}
|
||||
|
||||
return largestIndex;
|
||||
}
|
||||
|
||||
const float QuaternionMinRange = -0.707107f;
|
||||
const float QuaternionMaxRange = 0.707107f;
|
||||
const ushort TenBitsMax = 0b11_1111_1111;
|
||||
|
||||
// note: assumes normalized quaternions
|
||||
public static uint CompressQuaternion(Quaternion q)
|
||||
{
|
||||
// note: assuming normalized quaternions is enough. no need to force
|
||||
// normalize here. we already normalize when decompressing.
|
||||
|
||||
// find the largest component index [0,3] + value
|
||||
int largestIndex = LargestAbsoluteComponentIndex(new Vector4(q.x, q.y, q.z, q.w), out float _, out Vector3 withoutLargest);
|
||||
|
||||
// from here on, we work with the 3 components without largest!
|
||||
|
||||
// "You might think you need to send a sign bit for [largest] in
|
||||
// case it is negative, but you don’t, because you can make
|
||||
// [largest] always positive by negating the entire quaternion if
|
||||
// [largest] is negative. in quaternion space (x,y,z,w) and
|
||||
// (-x,-y,-z,-w) represent the same rotation."
|
||||
if (q[largestIndex] < 0)
|
||||
withoutLargest = -withoutLargest;
|
||||
|
||||
// put index & three floats into one integer.
|
||||
// => index is 2 bits (4 values require 2 bits to store them)
|
||||
// => the three floats are between [-0.707107,+0.707107] because:
|
||||
// "If v is the absolute value of the largest quaternion
|
||||
// component, the next largest possible component value occurs
|
||||
// when two components have the same absolute value and the
|
||||
// other two components are zero. The length of that quaternion
|
||||
// (v,v,0,0) is 1, therefore v^2 + v^2 = 1, 2v^2 = 1,
|
||||
// v = 1/sqrt(2). This means you can encode the smallest three
|
||||
// components in [-0.707107,+0.707107] instead of [-1,+1] giving
|
||||
// you more precision with the same number of bits."
|
||||
// => the article recommends storing each float in 9 bits
|
||||
// => our uint has 32 bits, so we might as well store in (32-2)/3=10
|
||||
// 10 bits max value: 1023=0x3FF (use OSX calc to flip 10 bits)
|
||||
ushort aScaled = ScaleFloatToUShort(withoutLargest.x, QuaternionMinRange, QuaternionMaxRange, 0, TenBitsMax);
|
||||
ushort bScaled = ScaleFloatToUShort(withoutLargest.y, QuaternionMinRange, QuaternionMaxRange, 0, TenBitsMax);
|
||||
ushort cScaled = ScaleFloatToUShort(withoutLargest.z, QuaternionMinRange, QuaternionMaxRange, 0, TenBitsMax);
|
||||
|
||||
// now we just need to pack them into one integer
|
||||
// -> index is 2 bit and needs to be shifted to 31..32
|
||||
// -> a is 10 bit and needs to be shifted 20..30
|
||||
// -> b is 10 bit and needs to be shifted 10..20
|
||||
// -> c is 10 bit and needs to be at 0..10
|
||||
return (uint)(largestIndex << 30 | aScaled << 20 | bScaled << 10 | cScaled);
|
||||
}
|
||||
|
||||
// Quaternion normalizeSAFE from ECS math.normalizesafe()
|
||||
// => useful to produce valid quaternions even if client sends invalid
|
||||
// data
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
static Quaternion QuaternionNormalizeSafe(Quaternion value)
|
||||
{
|
||||
// The smallest positive normal number representable in a float.
|
||||
const float FLT_MIN_NORMAL = 1.175494351e-38F;
|
||||
|
||||
Vector4 v = new Vector4(value.x, value.y, value.z, value.w);
|
||||
float length = Vector4.Dot(v, v);
|
||||
return length > FLT_MIN_NORMAL
|
||||
? value.normalized
|
||||
: Quaternion.identity;
|
||||
}
|
||||
|
||||
// note: gives normalized quaternions
|
||||
public static Quaternion DecompressQuaternion(uint data)
|
||||
{
|
||||
// get cScaled which is at 0..10 and ignore the rest
|
||||
ushort cScaled = (ushort)(data & TenBitsMax);
|
||||
|
||||
// get bScaled which is at 10..20 and ignore the rest
|
||||
ushort bScaled = (ushort)((data >> 10) & TenBitsMax);
|
||||
|
||||
// get aScaled which is at 20..30 and ignore the rest
|
||||
ushort aScaled = (ushort)((data >> 20) & TenBitsMax);
|
||||
|
||||
// get 2 bit largest index, which is at 31..32
|
||||
int largestIndex = (int)(data >> 30);
|
||||
|
||||
// scale back to floats
|
||||
float a = ScaleUShortToFloat(aScaled, 0, TenBitsMax, QuaternionMinRange, QuaternionMaxRange);
|
||||
float b = ScaleUShortToFloat(bScaled, 0, TenBitsMax, QuaternionMinRange, QuaternionMaxRange);
|
||||
float c = ScaleUShortToFloat(cScaled, 0, TenBitsMax, QuaternionMinRange, QuaternionMaxRange);
|
||||
|
||||
// calculate the omitted component based on a²+b²+c²+d²=1
|
||||
float d = Mathf.Sqrt(1 - a*a - b*b - c*c);
|
||||
|
||||
// reconstruct based on largest index
|
||||
Vector4 value;
|
||||
switch (largestIndex)
|
||||
{
|
||||
case 0: value = new Vector4(d, a, b, c); break;
|
||||
case 1: value = new Vector4(a, d, b, c); break;
|
||||
case 2: value = new Vector4(a, b, d, c); break;
|
||||
default: value = new Vector4(a, b, c, d); break;
|
||||
}
|
||||
|
||||
// ECS Rotation only works with normalized quaternions.
|
||||
// make sure that's always the case here to avoid ECS bugs where
|
||||
// everything stops moving if the quaternion isn't normalized.
|
||||
// => NormalizeSafe returns a normalized quaternion even if we pass
|
||||
// in NaN from deserializing invalid values!
|
||||
return QuaternionNormalizeSafe(new Quaternion(value.x, value.y, value.z, value.w));
|
||||
}
|
||||
|
||||
// varint compression //////////////////////////////////////////////////
|
||||
// helper function to predict varint size for a given number.
|
||||
// useful when checking if a message + size header will fit, etc.
|
||||
public static int VarUIntSize(ulong value)
|
||||
{
|
||||
if (value <= 240)
|
||||
return 1;
|
||||
if (value <= 2287)
|
||||
return 2;
|
||||
if (value <= 67823)
|
||||
return 3;
|
||||
if (value <= 16777215)
|
||||
return 4;
|
||||
if (value <= 4294967295)
|
||||
return 5;
|
||||
if (value <= 1099511627775)
|
||||
return 6;
|
||||
if (value <= 281474976710655)
|
||||
return 7;
|
||||
if (value <= 72057594037927935)
|
||||
return 8;
|
||||
return 9;
|
||||
}
|
||||
|
||||
// helper function to predict varint size for a given number.
|
||||
// useful when checking if a message + size header will fit, etc.
|
||||
public static int VarIntSize(long value)
|
||||
{
|
||||
// CompressVarInt zigzags it first
|
||||
ulong zigzagged = (ulong)((value >> 63) ^ (value << 1));
|
||||
return VarUIntSize(zigzagged);
|
||||
}
|
||||
|
||||
// compress ulong varint.
|
||||
// same result for ulong, uint, ushort and byte. only need one function.
|
||||
// NOT an extension. otherwise weaver might accidentally use it.
|
||||
public static void CompressVarUInt(NetworkWriter writer, ulong value)
|
||||
{
|
||||
// straight forward implementation:
|
||||
// keep this for understanding & debugging.
|
||||
/*
|
||||
if (value <= 240)
|
||||
{
|
||||
writer.WriteByte((byte)value);
|
||||
return;
|
||||
}
|
||||
if (value <= 2287)
|
||||
{
|
||||
writer.WriteByte((byte)(((value - 240) >> 8) + 241));
|
||||
writer.WriteByte((byte)((value - 240) & 0xFF));
|
||||
return;
|
||||
}
|
||||
if (value <= 67823)
|
||||
{
|
||||
writer.WriteByte((byte)249);
|
||||
writer.WriteByte((byte)((value - 2288) >> 8));
|
||||
writer.WriteByte((byte)((value - 2288) & 0xFF));
|
||||
return;
|
||||
}
|
||||
if (value <= 16777215)
|
||||
{
|
||||
writer.WriteByte((byte)250);
|
||||
writer.WriteByte((byte)(value & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 8) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 16) & 0xFF));
|
||||
return;
|
||||
}
|
||||
if (value <= 4294967295)
|
||||
{
|
||||
writer.WriteByte((byte)251);
|
||||
writer.WriteByte((byte)(value & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 8) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 16) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 24) & 0xFF));
|
||||
return;
|
||||
}
|
||||
if (value <= 1099511627775)
|
||||
{
|
||||
writer.WriteByte((byte)252);
|
||||
writer.WriteByte((byte)(value & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 8) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 16) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 24) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 32) & 0xFF));
|
||||
return;
|
||||
}
|
||||
if (value <= 281474976710655)
|
||||
{
|
||||
writer.WriteByte((byte)253);
|
||||
writer.WriteByte((byte)(value & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 8) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 16) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 24) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 32) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 40) & 0xFF));
|
||||
return;
|
||||
}
|
||||
if (value <= 72057594037927935)
|
||||
{
|
||||
writer.WriteByte((byte)254);
|
||||
writer.WriteByte((byte)(value & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 8) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 16) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 24) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 32) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 40) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 48) & 0xFF));
|
||||
return;
|
||||
}
|
||||
|
||||
// all others
|
||||
{
|
||||
writer.WriteByte((byte)255);
|
||||
writer.WriteByte((byte)(value & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 8) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 16) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 24) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 32) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 40) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 48) & 0xFF));
|
||||
writer.WriteByte((byte)((value >> 56) & 0xFF));
|
||||
}
|
||||
*/
|
||||
|
||||
// faster implementation writes multiple bytes at once.
|
||||
// avoids extra Space, WriteBlittable overhead.
|
||||
// VarInt is in hot path, performance matters here.
|
||||
if (value <= 240)
|
||||
{
|
||||
byte a = (byte)value;
|
||||
writer.WriteByte(a);
|
||||
return;
|
||||
}
|
||||
if (value <= 2287)
|
||||
{
|
||||
byte a = (byte)(((value - 240) >> 8) + 241);
|
||||
byte b = (byte)((value - 240) & 0xFF);
|
||||
writer.WriteUShort((ushort)(b << 8 | a));
|
||||
return;
|
||||
}
|
||||
if (value <= 67823)
|
||||
{
|
||||
byte a = (byte)249;
|
||||
byte b = (byte)((value - 2288) >> 8);
|
||||
byte c = (byte)((value - 2288) & 0xFF);
|
||||
writer.WriteByte(a);
|
||||
writer.WriteUShort((ushort)(c << 8 | b));
|
||||
return;
|
||||
}
|
||||
if (value <= 16777215)
|
||||
{
|
||||
byte a = (byte)250;
|
||||
uint b = (uint)(value << 8);
|
||||
writer.WriteUInt(b | a);
|
||||
return;
|
||||
}
|
||||
if (value <= 4294967295)
|
||||
{
|
||||
byte a = (byte)251;
|
||||
uint b = (uint)value;
|
||||
writer.WriteByte(a);
|
||||
writer.WriteUInt(b);
|
||||
return;
|
||||
}
|
||||
if (value <= 1099511627775)
|
||||
{
|
||||
byte a = (byte)252;
|
||||
byte b = (byte)(value & 0xFF);
|
||||
uint c = (uint)(value >> 8);
|
||||
writer.WriteUShort((ushort)(b << 8 | a));
|
||||
writer.WriteUInt(c);
|
||||
return;
|
||||
}
|
||||
if (value <= 281474976710655)
|
||||
{
|
||||
byte a = (byte)253;
|
||||
byte b = (byte)(value & 0xFF);
|
||||
byte c = (byte)((value >> 8) & 0xFF);
|
||||
uint d = (uint)(value >> 16);
|
||||
writer.WriteByte(a);
|
||||
writer.WriteUShort((ushort)(c << 8 | b));
|
||||
writer.WriteUInt(d);
|
||||
return;
|
||||
}
|
||||
if (value <= 72057594037927935)
|
||||
{
|
||||
byte a = 254;
|
||||
ulong b = value << 8;
|
||||
writer.WriteULong(b | a);
|
||||
return;
|
||||
}
|
||||
|
||||
// all others
|
||||
{
|
||||
writer.WriteByte(255);
|
||||
writer.WriteULong(value);
|
||||
}
|
||||
}
|
||||
|
||||
// zigzag encoding https://gist.github.com/mfuerstenau/ba870a29e16536fdbaba
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static void CompressVarInt(NetworkWriter writer, long i)
|
||||
{
|
||||
ulong zigzagged = (ulong)((i >> 63) ^ (i << 1));
|
||||
CompressVarUInt(writer, zigzagged);
|
||||
}
|
||||
|
||||
// NOT an extension. otherwise weaver might accidentally use it.
|
||||
public static ulong DecompressVarUInt(NetworkReader reader)
|
||||
{
|
||||
byte a0 = reader.ReadByte();
|
||||
if (a0 < 241)
|
||||
{
|
||||
return a0;
|
||||
}
|
||||
|
||||
byte a1 = reader.ReadByte();
|
||||
if (a0 <= 248)
|
||||
{
|
||||
return 240 + ((a0 - (ulong)241) << 8) + a1;
|
||||
}
|
||||
|
||||
byte a2 = reader.ReadByte();
|
||||
if (a0 == 249)
|
||||
{
|
||||
return 2288 + ((ulong)a1 << 8) + a2;
|
||||
}
|
||||
|
||||
byte a3 = reader.ReadByte();
|
||||
if (a0 == 250)
|
||||
{
|
||||
return a1 + (((ulong)a2) << 8) + (((ulong)a3) << 16);
|
||||
}
|
||||
|
||||
byte a4 = reader.ReadByte();
|
||||
if (a0 == 251)
|
||||
{
|
||||
return a1 + (((ulong)a2) << 8) + (((ulong)a3) << 16) + (((ulong)a4) << 24);
|
||||
}
|
||||
|
||||
byte a5 = reader.ReadByte();
|
||||
if (a0 == 252)
|
||||
{
|
||||
return a1 + (((ulong)a2) << 8) + (((ulong)a3) << 16) + (((ulong)a4) << 24) + (((ulong)a5) << 32);
|
||||
}
|
||||
|
||||
byte a6 = reader.ReadByte();
|
||||
if (a0 == 253)
|
||||
{
|
||||
return a1 + (((ulong)a2) << 8) + (((ulong)a3) << 16) + (((ulong)a4) << 24) + (((ulong)a5) << 32) + (((ulong)a6) << 40);
|
||||
}
|
||||
|
||||
byte a7 = reader.ReadByte();
|
||||
if (a0 == 254)
|
||||
{
|
||||
return a1 + (((ulong)a2) << 8) + (((ulong)a3) << 16) + (((ulong)a4) << 24) + (((ulong)a5) << 32) + (((ulong)a6) << 40) + (((ulong)a7) << 48);
|
||||
}
|
||||
|
||||
byte a8 = reader.ReadByte();
|
||||
if (a0 == 255)
|
||||
{
|
||||
return a1 + (((ulong)a2) << 8) + (((ulong)a3) << 16) + (((ulong)a4) << 24) + (((ulong)a5) << 32) + (((ulong)a6) << 40) + (((ulong)a7) << 48) + (((ulong)a8) << 56);
|
||||
}
|
||||
|
||||
throw new IndexOutOfRangeException($"DecompressVarInt failure: {a0}");
|
||||
}
|
||||
|
||||
// zigzag decoding https://gist.github.com/mfuerstenau/ba870a29e16536fdbaba
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static long DecompressVarInt(NetworkReader reader)
|
||||
{
|
||||
ulong data = DecompressVarUInt(reader);
|
||||
return ((long)(data >> 1)) ^ -((long)data & 1);
|
||||
}
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/Compression.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/Compression.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: 5c28963f9c4b97e418252a55500fb91e
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
38
Assets/Mirror/Core/Tools/DeltaCompression.cs
Normal file
38
Assets/Mirror/Core/Tools/DeltaCompression.cs
Normal file
@ -0,0 +1,38 @@
|
||||
// manual delta compression for some types.
|
||||
// varint(b-a)
|
||||
// Mirror can't use Mirror II's bit-tree delta compression.
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
public static class DeltaCompression
|
||||
{
|
||||
// delta (usually small), then zigzag varint to support +- changes
|
||||
// parameter order: (last, current) makes most sense (Q3 does this too).
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static void Compress(NetworkWriter writer, long last, long current) =>
|
||||
Compression.CompressVarInt(writer, current - last);
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static long Decompress(NetworkReader reader, long last) =>
|
||||
last + Compression.DecompressVarInt(reader);
|
||||
|
||||
// delta (usually small), then zigzag varint to support +- changes
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static void Compress(NetworkWriter writer, Vector3Long last, Vector3Long current)
|
||||
{
|
||||
Compress(writer, last.x, current.x);
|
||||
Compress(writer, last.y, current.y);
|
||||
Compress(writer, last.z, current.z);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static Vector3Long Decompress(NetworkReader reader, Vector3Long last)
|
||||
{
|
||||
long x = Decompress(reader, last.x);
|
||||
long y = Decompress(reader, last.y);
|
||||
long z = Decompress(reader, last.z);
|
||||
return new Vector3Long(x, y, z);
|
||||
}
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/DeltaCompression.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/DeltaCompression.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: 6b8f3fffcb4754c15bc5ed4c33e2497b
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
53
Assets/Mirror/Core/Tools/ExponentialMovingAverage.cs
Normal file
53
Assets/Mirror/Core/Tools/ExponentialMovingAverage.cs
Normal file
@ -0,0 +1,53 @@
|
||||
// N-day EMA implementation from Mirror with a few changes (struct etc.)
|
||||
// it calculates an exponential moving average roughly equivalent to the last n observations
|
||||
// https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
|
||||
using System;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
public struct ExponentialMovingAverage
|
||||
{
|
||||
readonly double alpha;
|
||||
bool initialized;
|
||||
|
||||
public double Value;
|
||||
public double Variance;
|
||||
public double StandardDeviation; // absolute value, see test
|
||||
|
||||
public ExponentialMovingAverage(int n)
|
||||
{
|
||||
// standard N-day EMA alpha calculation
|
||||
alpha = 2.0 / (n + 1);
|
||||
initialized = false;
|
||||
Value = 0;
|
||||
Variance = 0;
|
||||
StandardDeviation = 0;
|
||||
}
|
||||
|
||||
public void Add(double newValue)
|
||||
{
|
||||
// simple algorithm for EMA described here:
|
||||
// https://en.wikipedia.org/wiki/Moving_average#Exponentially_weighted_moving_variance_and_standard_deviation
|
||||
if (initialized)
|
||||
{
|
||||
double delta = newValue - Value;
|
||||
Value += alpha * delta;
|
||||
Variance = (1 - alpha) * (Variance + alpha * delta * delta);
|
||||
StandardDeviation = Math.Sqrt(Variance);
|
||||
}
|
||||
else
|
||||
{
|
||||
Value = newValue;
|
||||
initialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
public void Reset()
|
||||
{
|
||||
initialized = false;
|
||||
Value = 0;
|
||||
Variance = 0;
|
||||
StandardDeviation = 0;
|
||||
}
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/ExponentialMovingAverage.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/ExponentialMovingAverage.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: 05e858cbaa54b4ce4a48c8c7f50c1914
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
112
Assets/Mirror/Core/Tools/Extensions.cs
Normal file
112
Assets/Mirror/Core/Tools/Extensions.cs
Normal file
@ -0,0 +1,112 @@
|
||||
using System;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Collections.Generic;
|
||||
using System.Runtime.CompilerServices;
|
||||
using UnityEngine;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
public static class Extensions
|
||||
{
|
||||
public static string ToHexString(this ArraySegment<byte> segment) =>
|
||||
BitConverter.ToString(segment.Array, segment.Offset, segment.Count);
|
||||
|
||||
// string.GetHashCode is not guaranteed to be the same on all
|
||||
// machines, but we need one that is the same on all machines.
|
||||
// Uses fnv1a as hash function for more uniform distribution http://www.isthe.com/chongo/tech/comp/fnv/
|
||||
// Tests: https://softwareengineering.stackexchange.com/questions/49550/which-hashing-algorithm-is-best-for-uniqueness-and-speed
|
||||
// NOTE: Do not call this from hot path because it's slow O(N) for long method names.
|
||||
// - As of 2012-02-16 There are 2 design-time callers (weaver) and 1 runtime caller that caches.
|
||||
public static int GetStableHashCode(this string text)
|
||||
{
|
||||
unchecked
|
||||
{
|
||||
uint hash = 0x811c9dc5;
|
||||
uint prime = 0x1000193;
|
||||
|
||||
for (int i = 0; i < text.Length; ++i)
|
||||
{
|
||||
byte value = (byte)text[i];
|
||||
hash = hash ^ value;
|
||||
hash *= prime;
|
||||
}
|
||||
|
||||
//UnityEngine.Debug.Log($"Created stable hash {(ushort)hash} for {text}");
|
||||
return (int)hash;
|
||||
}
|
||||
}
|
||||
|
||||
// smaller version of our GetStableHashCode.
|
||||
// careful, this significantly increases chance of collisions.
|
||||
public static ushort GetStableHashCode16(this string text)
|
||||
{
|
||||
// deterministic hash
|
||||
int hash = GetStableHashCode(text);
|
||||
|
||||
// Gets the 32bit fnv1a hash
|
||||
// To get it down to 16bit but still reduce hash collisions we cant just cast it to ushort
|
||||
// Instead we take the highest 16bits of the 32bit hash and fold them with xor into the lower 16bits
|
||||
// This will create a more uniform 16bit hash, the method is described in:
|
||||
// http://www.isthe.com/chongo/tech/comp/fnv/ in section "Changing the FNV hash size - xor-folding"
|
||||
return (ushort)((hash >> 16) ^ hash);
|
||||
}
|
||||
|
||||
// previously in DotnetCompatibility.cs
|
||||
// leftover from the UNET days. supposedly for windows store?
|
||||
internal static string GetMethodName(this Delegate func)
|
||||
{
|
||||
#if NETFX_CORE
|
||||
return func.GetMethodInfo().Name;
|
||||
#else
|
||||
return func.Method.Name;
|
||||
#endif
|
||||
}
|
||||
|
||||
// helper function to copy to List<T>
|
||||
// C# only provides CopyTo(T[])
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static void CopyTo<T>(this IEnumerable<T> source, List<T> destination)
|
||||
{
|
||||
// foreach allocates. use AddRange.
|
||||
destination.AddRange(source);
|
||||
}
|
||||
|
||||
#if !UNITY_2021_OR_NEWER
|
||||
// Unity 2020 and earlier don't have Queue.TryDequeue which we need for batching.
|
||||
public static bool TryDequeue<T>(this Queue<T> source, out T element)
|
||||
{
|
||||
if (source.Count > 0)
|
||||
{
|
||||
element = source.Dequeue();
|
||||
return true;
|
||||
}
|
||||
|
||||
element = default;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !UNITY_2021_OR_NEWER
|
||||
// Unity 2020 and earlier don't have ConcurrentQueue.Clear which we need for ThreadedTransport.
|
||||
public static void Clear<T>(this ConcurrentQueue<T> source)
|
||||
{
|
||||
// while count > 0 risks deadlock if other thread write at the same time.
|
||||
// our safest solution is a best-effort approach to clear 'Count' once.
|
||||
int count = source.Count; // get it only once
|
||||
for (int i = 0; i < count; ++i)
|
||||
{
|
||||
source.TryDequeue(out _);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !UNITY_2021_3_OR_NEWER
|
||||
// Unity 2021.2 and earlier don't have transform.GetPositionAndRotation which we use for performance in some places
|
||||
public static void GetPositionAndRotation(this Transform transform, out Vector3 position, out Quaternion rotation)
|
||||
{
|
||||
position = transform.position;
|
||||
rotation = transform.rotation;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/Extensions.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/Extensions.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: decf32fd053744d18f35712b7a6f5116
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
32
Assets/Mirror/Core/Tools/Mathd.cs
Normal file
32
Assets/Mirror/Core/Tools/Mathd.cs
Normal file
@ -0,0 +1,32 @@
|
||||
// 'double' precision variants for some of Unity's Mathf functions.
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
public static class Mathd
|
||||
{
|
||||
// Unity 2020 doesn't have Math.Clamp yet.
|
||||
/// <summary>Clamps value between 0 and 1 and returns value.</summary>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static double Clamp(double value, double min, double max)
|
||||
{
|
||||
if (value < min) return min;
|
||||
if (value > max) return max;
|
||||
return value;
|
||||
}
|
||||
|
||||
/// <summary>Clamps value between 0 and 1 and returns value.</summary>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static double Clamp01(double value) => Clamp(value, 0, 1);
|
||||
|
||||
/// <summary>Calculates the linear parameter t that produces the interpolant value within the range [a, b].</summary>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static double InverseLerp(double a, double b, double value) =>
|
||||
a != b ? Clamp01((value - a) / (b - a)) : 0;
|
||||
|
||||
/// <summary>Linearly interpolates between a and b by t with no limit to t.</summary>
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static double LerpUnclamped(double a, double b, double t) =>
|
||||
a + (b - a) * t;
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/Mathd.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/Mathd.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: 5f74084b91c74df2839b426c4a381373
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
40
Assets/Mirror/Core/Tools/Pool.cs
Normal file
40
Assets/Mirror/Core/Tools/Pool.cs
Normal file
@ -0,0 +1,40 @@
|
||||
// Pool to avoid allocations (from libuv2k)
|
||||
// API consistent with Microsoft's ObjectPool<T>.
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
public class Pool<T>
|
||||
{
|
||||
// Mirror is single threaded, no need for concurrent collections.
|
||||
// stack increases the chance that a reused writer remains in cache.
|
||||
readonly Stack<T> objects = new Stack<T>();
|
||||
|
||||
// some types might need additional parameters in their constructor, so
|
||||
// we use a Func<T> generator
|
||||
readonly Func<T> objectGenerator;
|
||||
|
||||
public Pool(Func<T> objectGenerator, int initialCapacity)
|
||||
{
|
||||
this.objectGenerator = objectGenerator;
|
||||
|
||||
// allocate an initial pool so we have fewer (if any)
|
||||
// allocations in the first few frames (or seconds).
|
||||
for (int i = 0; i < initialCapacity; ++i)
|
||||
objects.Push(objectGenerator());
|
||||
}
|
||||
|
||||
// take an element from the pool, or create a new one if empty
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public T Get() => objects.Count > 0 ? objects.Pop() : objectGenerator();
|
||||
|
||||
// return an element to the pool
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public void Return(T item) => objects.Push(item);
|
||||
|
||||
// count to see how many objects are in the pool. useful for tests.
|
||||
public int Count => objects.Count;
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/Pool.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/Pool.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: 845bb05fa349344c3811022f4f15dfbc
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
1
Assets/Mirror/Core/Tools/Readme.txt
Normal file
1
Assets/Mirror/Core/Tools/Readme.txt
Normal file
@ -0,0 +1 @@
|
||||
Standalone algorithms & structs to help build Mirror.
|
3
Assets/Mirror/Core/Tools/Readme.txt.meta
Normal file
3
Assets/Mirror/Core/Tools/Readme.txt.meta
Normal file
@ -0,0 +1,3 @@
|
||||
fileFormatVersion: 2
|
||||
guid: da033671de7d49e0838223a997c56bf1
|
||||
timeCreated: 1667486850
|
61
Assets/Mirror/Core/Tools/TimeSample.cs
Normal file
61
Assets/Mirror/Core/Tools/TimeSample.cs
Normal file
@ -0,0 +1,61 @@
|
||||
// TimeSample from Mirror II.
|
||||
// simple profiling sample, averaged for display in statistics.
|
||||
// usable in builds without unitiy profiler overhead etc.
|
||||
//
|
||||
// .average may safely be called from main thread while Begin/End is in another.
|
||||
// i.e. worker threads, transport, etc.
|
||||
using System.Diagnostics;
|
||||
using System.Threading;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
public struct TimeSample
|
||||
{
|
||||
// UnityEngine.Time isn't thread safe. use stopwatch instead.
|
||||
readonly Stopwatch watch;
|
||||
|
||||
// remember when Begin was called
|
||||
double beginTime;
|
||||
|
||||
// keep accumulating times over the given interval.
|
||||
// (not readonly. we modify its contents.)
|
||||
ExponentialMovingAverage ema;
|
||||
|
||||
// average in seconds.
|
||||
// code often runs in sub-millisecond time. float is more precise.
|
||||
//
|
||||
// set with Interlocked for thread safety.
|
||||
// can be read from main thread while sampling happens in other thread.
|
||||
public double average; // THREAD SAFE
|
||||
|
||||
// average over N begin/end captures
|
||||
public TimeSample(int n)
|
||||
{
|
||||
watch = new Stopwatch();
|
||||
watch.Start();
|
||||
ema = new ExponentialMovingAverage(n);
|
||||
beginTime = 0;
|
||||
average = 0;
|
||||
}
|
||||
|
||||
// begin is called before the code to be sampled
|
||||
public void Begin()
|
||||
{
|
||||
// remember when Begin was called.
|
||||
// keep StopWatch running so we can average over the given interval.
|
||||
beginTime = watch.Elapsed.TotalSeconds;
|
||||
// Debug.Log($"Begin @ {beginTime:F4}");
|
||||
}
|
||||
|
||||
// end is called after the code to be sampled
|
||||
public void End()
|
||||
{
|
||||
// add duration in seconds to accumulated durations
|
||||
double elapsed = watch.Elapsed.TotalSeconds - beginTime;
|
||||
ema.Add(elapsed);
|
||||
|
||||
// expose new average thread safely
|
||||
Interlocked.Exchange(ref average, ema.Value);
|
||||
}
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/TimeSample.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/TimeSample.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: 26c32f6429554546a88d800c846c74ed
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
222
Assets/Mirror/Core/Tools/Utils.cs
Normal file
222
Assets/Mirror/Core/Tools/Utils.cs
Normal file
@ -0,0 +1,222 @@
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Security.Cryptography;
|
||||
using UnityEngine;
|
||||
using UnityEngine.Rendering;
|
||||
using UnityEngine.SceneManagement;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
// Handles network messages on client and server
|
||||
public delegate void NetworkMessageDelegate(NetworkConnection conn, NetworkReader reader, int channelId);
|
||||
|
||||
// Handles requests to spawn objects on the client
|
||||
public delegate GameObject SpawnDelegate(Vector3 position, uint assetId);
|
||||
|
||||
public delegate GameObject SpawnHandlerDelegate(SpawnMessage msg);
|
||||
|
||||
// Handles requests to unspawn objects on the client
|
||||
public delegate void UnSpawnDelegate(GameObject spawned);
|
||||
|
||||
// channels are const ints instead of an enum so people can add their own
|
||||
// channels (can't extend an enum otherwise).
|
||||
//
|
||||
// note that Mirror is slowly moving towards quake style networking which
|
||||
// will only require reliable for handshake, and unreliable for the rest.
|
||||
// so eventually we can change this to an Enum and transports shouldn't
|
||||
// add custom channels anymore.
|
||||
public static class Channels
|
||||
{
|
||||
public const int Reliable = 0; // ordered
|
||||
public const int Unreliable = 1; // unordered
|
||||
}
|
||||
|
||||
public static class Utils
|
||||
{
|
||||
// detect headless / dedicated server mode
|
||||
// SystemInfo.graphicsDeviceType is never null in the editor.
|
||||
// UNITY_SERVER works in builds for all Unity versions 2019 LTS and later.
|
||||
// For Unity 2019 / 2020, there is no way to detect Server Build checkbox
|
||||
// state in Build Settings, so they never auto-start headless server / client.
|
||||
// UNITY_SERVER works in the editor in Unity 2021 LTS and later
|
||||
// because that's when Dedicated Server platform was added.
|
||||
// It is intentional for editor play mode to auto-start headless server / client
|
||||
// when Dedicated Server platform is selected in the editor so that editor
|
||||
// acts like a headless build to every extent possible for testing / debugging.
|
||||
public static bool IsHeadless() =>
|
||||
#if UNITY_SERVER
|
||||
true;
|
||||
#else
|
||||
SystemInfo.graphicsDeviceType == GraphicsDeviceType.Null;
|
||||
#endif
|
||||
|
||||
// detect WebGL mode
|
||||
public const bool IsWebGL =
|
||||
#if UNITY_WEBGL
|
||||
true;
|
||||
#else
|
||||
false;
|
||||
#endif
|
||||
|
||||
// detect Debug mode
|
||||
public const bool IsDebug =
|
||||
#if DEBUG
|
||||
true;
|
||||
#else
|
||||
false;
|
||||
#endif
|
||||
|
||||
public static uint GetTrueRandomUInt()
|
||||
{
|
||||
// use Crypto RNG to avoid having time based duplicates
|
||||
using (RNGCryptoServiceProvider rng = new RNGCryptoServiceProvider())
|
||||
{
|
||||
byte[] bytes = new byte[4];
|
||||
rng.GetBytes(bytes);
|
||||
return BitConverter.ToUInt32(bytes, 0);
|
||||
}
|
||||
}
|
||||
|
||||
public static bool IsPrefab(GameObject obj)
|
||||
{
|
||||
#if UNITY_EDITOR
|
||||
return UnityEditor.PrefabUtility.IsPartOfPrefabAsset(obj);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// simplified IsSceneObject check from Mirror II
|
||||
public static bool IsSceneObject(NetworkIdentity identity)
|
||||
{
|
||||
// original UNET / Mirror still had the IsPersistent check.
|
||||
// it never fires though. even for Prefabs dragged to the Scene.
|
||||
// (see Scene Objects example scene.)
|
||||
// #if UNITY_EDITOR
|
||||
// if (UnityEditor.EditorUtility.IsPersistent(identity.gameObject))
|
||||
// return false;
|
||||
// #endif
|
||||
|
||||
return identity.gameObject.hideFlags != HideFlags.NotEditable &&
|
||||
identity.gameObject.hideFlags != HideFlags.HideAndDontSave &&
|
||||
identity.sceneId != 0;
|
||||
}
|
||||
|
||||
public static bool IsSceneObjectWithPrefabParent(GameObject gameObject, out GameObject prefab)
|
||||
{
|
||||
prefab = null;
|
||||
|
||||
#if UNITY_EDITOR
|
||||
if (!UnityEditor.PrefabUtility.IsPartOfPrefabInstance(gameObject))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
prefab = UnityEditor.PrefabUtility.GetCorrespondingObjectFromSource(gameObject);
|
||||
#endif
|
||||
|
||||
if (prefab == null)
|
||||
{
|
||||
Debug.LogError($"Failed to find prefab parent for scene object [name:{gameObject.name}]");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// is a 2D point in screen? (from ummorpg)
|
||||
// (if width = 1024, then indices from 0..1023 are valid (=1024 indices)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static bool IsPointInScreen(Vector2 point) =>
|
||||
0 <= point.x && point.x < Screen.width &&
|
||||
0 <= point.y && point.y < Screen.height;
|
||||
|
||||
// pretty print bytes as KB/MB/GB/etc. from DOTSNET
|
||||
// long to support > 2GB
|
||||
// divides by floats to return "2.5MB" etc.
|
||||
public static string PrettyBytes(long bytes)
|
||||
{
|
||||
// bytes
|
||||
if (bytes < 1024)
|
||||
return $"{bytes} B";
|
||||
// kilobytes
|
||||
else if (bytes < 1024L * 1024L)
|
||||
return $"{(bytes / 1024f):F2} KB";
|
||||
// megabytes
|
||||
else if (bytes < 1024 * 1024L * 1024L)
|
||||
return $"{(bytes / (1024f * 1024f)):F2} MB";
|
||||
// gigabytes
|
||||
return $"{(bytes / (1024f * 1024f * 1024f)):F2} GB";
|
||||
}
|
||||
|
||||
// pretty print seconds as hours:minutes:seconds(.milliseconds/100)s.
|
||||
// double for long running servers.
|
||||
public static string PrettySeconds(double seconds)
|
||||
{
|
||||
TimeSpan t = TimeSpan.FromSeconds(seconds);
|
||||
string res = "";
|
||||
if (t.Days > 0) res += $"{t.Days}d";
|
||||
if (t.Hours > 0) res += $"{(res.Length > 0 ? " " : "")}{t.Hours}h";
|
||||
if (t.Minutes > 0) res += $"{(res.Length > 0 ? " " : "")}{t.Minutes}m";
|
||||
// 0.5s, 1.5s etc. if any milliseconds. 1s, 2s etc. if any seconds
|
||||
if (t.Milliseconds > 0) res += $"{(res.Length > 0 ? " " : "")}{t.Seconds}.{(t.Milliseconds / 100)}s";
|
||||
else if (t.Seconds > 0) res += $"{(res.Length > 0 ? " " : "")}{t.Seconds}s";
|
||||
// if the string is still empty because the value was '0', then at least
|
||||
// return the seconds instead of returning an empty string
|
||||
return res != "" ? res : "0s";
|
||||
}
|
||||
|
||||
// universal .spawned function
|
||||
public static NetworkIdentity GetSpawnedInServerOrClient(uint netId)
|
||||
{
|
||||
// server / host mode: use the one from server.
|
||||
// host mode has access to all spawned.
|
||||
if (NetworkServer.active)
|
||||
{
|
||||
NetworkServer.spawned.TryGetValue(netId, out NetworkIdentity entry);
|
||||
return entry;
|
||||
}
|
||||
|
||||
// client
|
||||
if (NetworkClient.active)
|
||||
{
|
||||
NetworkClient.spawned.TryGetValue(netId, out NetworkIdentity entry);
|
||||
return entry;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// keep a GUI window in screen.
|
||||
// for example. if it's at x=1000 and screen is resized to w=500,
|
||||
// it won't get lost in the invisible area etc.
|
||||
public static Rect KeepInScreen(Rect rect)
|
||||
{
|
||||
// ensure min
|
||||
rect.x = Math.Max(rect.x, 0);
|
||||
rect.y = Math.Max(rect.y, 0);
|
||||
|
||||
// ensure max
|
||||
rect.x = Math.Min(rect.x, Screen.width - rect.width);
|
||||
rect.y = Math.Min(rect.y, Screen.width - rect.height);
|
||||
|
||||
return rect;
|
||||
}
|
||||
|
||||
// create local connections pair and connect them
|
||||
public static void CreateLocalConnections(
|
||||
out LocalConnectionToClient connectionToClient,
|
||||
out LocalConnectionToServer connectionToServer)
|
||||
{
|
||||
connectionToServer = new LocalConnectionToServer();
|
||||
connectionToClient = new LocalConnectionToClient();
|
||||
connectionToServer.connectionToClient = connectionToClient;
|
||||
connectionToClient.connectionToServer = connectionToServer;
|
||||
}
|
||||
|
||||
public static bool IsSceneActive(string scene)
|
||||
{
|
||||
Scene activeScene = SceneManager.GetActiveScene();
|
||||
return activeScene.path == scene ||
|
||||
activeScene.name == scene;
|
||||
}
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/Utils.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/Utils.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: b530ce39098b54374a29ad308c8e4554
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
125
Assets/Mirror/Core/Tools/Vector3Long.cs
Normal file
125
Assets/Mirror/Core/Tools/Vector3Long.cs
Normal file
@ -0,0 +1,125 @@
|
||||
#pragma warning disable CS0659 // 'Vector3Long' overrides Object.Equals(object o) but does not override Object.GetHashCode()
|
||||
#pragma warning disable CS0661 // 'Vector3Long' defines operator == or operator != but does not override Object.GetHashCode()
|
||||
|
||||
// Vector3Long by mischa (based on game engine project)
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Mirror
|
||||
{
|
||||
public struct Vector3Long
|
||||
{
|
||||
public long x;
|
||||
public long y;
|
||||
public long z;
|
||||
|
||||
public static readonly Vector3Long zero = new Vector3Long(0, 0, 0);
|
||||
public static readonly Vector3Long one = new Vector3Long(1, 1, 1);
|
||||
public static readonly Vector3Long forward = new Vector3Long(0, 0, 1);
|
||||
public static readonly Vector3Long back = new Vector3Long(0, 0, -1);
|
||||
public static readonly Vector3Long left = new Vector3Long(-1, 0, 0);
|
||||
public static readonly Vector3Long right = new Vector3Long(1, 0, 0);
|
||||
public static readonly Vector3Long up = new Vector3Long(0, 1, 0);
|
||||
public static readonly Vector3Long down = new Vector3Long(0, -1, 0);
|
||||
|
||||
// constructor /////////////////////////////////////////////////////////
|
||||
public Vector3Long(long x, long y, long z)
|
||||
{
|
||||
this.x = x;
|
||||
this.y = y;
|
||||
this.z = z;
|
||||
}
|
||||
|
||||
// operators ///////////////////////////////////////////////////////////
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static Vector3Long operator +(Vector3Long a, Vector3Long b) =>
|
||||
new Vector3Long(a.x + b.x, a.y + b.y, a.z + b.z);
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static Vector3Long operator -(Vector3Long a, Vector3Long b) =>
|
||||
new Vector3Long(a.x - b.x, a.y - b.y, a.z - b.z);
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static Vector3Long operator -(Vector3Long v) =>
|
||||
new Vector3Long(-v.x, -v.y, -v.z);
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static Vector3Long operator *(Vector3Long a, long n) =>
|
||||
new Vector3Long(a.x * n, a.y * n, a.z * n);
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static Vector3Long operator *(long n, Vector3Long a) =>
|
||||
new Vector3Long(a.x * n, a.y * n, a.z * n);
|
||||
|
||||
// == returns true if approximately equal (with epsilon).
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static bool operator ==(Vector3Long a, Vector3Long b) =>
|
||||
a.x == b.x &&
|
||||
a.y == b.y &&
|
||||
a.z == b.z;
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static bool operator !=(Vector3Long a, Vector3Long b) => !(a == b);
|
||||
|
||||
// NO IMPLICIT System.Numerics.Vector3Long conversion because double<->float
|
||||
// would silently lose precision in large worlds.
|
||||
|
||||
// [i] component index. useful for iterating all components etc.
|
||||
public long this[int index]
|
||||
{
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
get
|
||||
{
|
||||
switch (index)
|
||||
{
|
||||
case 0: return x;
|
||||
case 1: return y;
|
||||
case 2: return z;
|
||||
default: throw new IndexOutOfRangeException($"Vector3Long[{index}] out of range.");
|
||||
}
|
||||
}
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
set
|
||||
{
|
||||
switch (index)
|
||||
{
|
||||
case 0:
|
||||
x = value;
|
||||
break;
|
||||
case 1:
|
||||
y = value;
|
||||
break;
|
||||
case 2:
|
||||
z = value;
|
||||
break;
|
||||
default: throw new IndexOutOfRangeException($"Vector3Long[{index}] out of range.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// instance functions //////////////////////////////////////////////////
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public override string ToString() => $"({x} {y} {z})";
|
||||
|
||||
// equality ////////////////////////////////////////////////////////////
|
||||
// implement Equals & HashCode explicitly for performance.
|
||||
// calling .Equals (instead of "==") checks for exact equality.
|
||||
// (API compatibility)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public bool Equals(Vector3Long other) =>
|
||||
x == other.x && y == other.y && z == other.z;
|
||||
|
||||
// Equals(object) can reuse Equals(Vector4)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public override bool Equals(object other) =>
|
||||
other is Vector3Long vector4 && Equals(vector4);
|
||||
|
||||
#if UNITY_2021_3_OR_NEWER
|
||||
// Unity 2019/2020 don't have HashCode.Combine yet.
|
||||
// this is only to avoid reflection. without defining, it works too.
|
||||
// default generated by rider
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public override int GetHashCode() => HashCode.Combine(x, y, z);
|
||||
#endif
|
||||
}
|
||||
}
|
11
Assets/Mirror/Core/Tools/Vector3Long.cs.meta
Normal file
11
Assets/Mirror/Core/Tools/Vector3Long.cs.meta
Normal file
@ -0,0 +1,11 @@
|
||||
fileFormatVersion: 2
|
||||
guid: 18efa4e349254185ad257401dd24628b
|
||||
MonoImporter:
|
||||
externalObjects: {}
|
||||
serializedVersion: 2
|
||||
defaultReferences: []
|
||||
executionOrder: 0
|
||||
icon: {fileID: 2800000, guid: 7453abfe9e8b2c04a8a47eb536fe21eb, type: 3}
|
||||
userData:
|
||||
assetBundleName:
|
||||
assetBundleVariant:
|
Reference in New Issue
Block a user