Using Xamarin Forms, I have created a combined PinchToZoom and Pan Container that allows me to zoom and position an image within a viewport - the viewport is just the pinch to zoom and pan container, which from the user's point of view is just the tool that allows them to select part of the image in a square..
The following boilerplate code (with my additions) illustrates the process, with the ICameraBoothService being my service that got the original bytes, which consumes the latest data coordinates each time there is a pinch and zoom, and this means that when I dismiss this modal page, I can go back to the original image byte[] and apply some sort of activity to extract the cropped byte-set into a new image.:
public class PinchToZoomContainer : ContentView
{
private ICameraBoothService _cbs;
// pinch
double currentScale = 1;
double startScale = 1;
double xOffset = 0;
double yOffset = 0;
double xCoord;
double yCoord;
// pan
bool blnDisableMove = false;
public PinchToZoomContainer()
{
_cbs = SimpleIoc.Default.GetInstance<ICameraBoothService>("Default");
var pinchGesture = new PinchGestureRecognizer();
pinchGesture.PinchUpdated += OnPinchUpdated;
GestureRecognizers.Add(pinchGesture);
var panGesture = new PanGestureRecognizer();
panGesture.PanUpdated += OnPanUpdated;
GestureRecognizers.Add(panGesture);
}
void OnPanUpdated(object sender, PanUpdatedEventArgs e)
{
if (Content.Scale == 1)
{
return;
}
switch (e.StatusType)
{
case GestureStatus.Running:
if (!blnDisableMove)
{
Content.TranslationX = Math.Max(Math.Min(0, xOffset + (e.TotalX * Scale)), -Math.Abs((Content.Width * Content.Scale) - Application.Current.MainPage.Width));
Content.TranslationY = Math.Max(Math.Min(0, yOffset + (e.TotalY * Scale)), -Math.Abs((Content.Height * Content.Scale) - Application.Current.MainPage.Height));
}
break;
case GestureStatus.Completed:
if (blnDisableMove)
{
blnDisableMove = false;
return;
}
// Store the translation applied during the pan
xOffset = Content.TranslationX;
yOffset = Content.TranslationY;
break;
}
Debug.WriteLine("Panning");
OnUpdated();
}
private bool _subscribedImageSizeChanged;
private CachedImage _cachedImage;
private void OnUpdated()
{
_cbs.EnableSelect();
_cbs.LoadTransformation(Content.Height, Content.Width, xOffset, yOffset, currentScale, startScale);
//var temp = this.Content;
Debug.WriteLine("______________________________");
Debug.WriteLine($"Content Height: {Content.Height}");
Debug.WriteLine($"Content Width: {Content.Width}");
Debug.WriteLine($"Image Height: {((Image)Content).Height}");
Debug.WriteLine($"Image Width: {((Image)Content).Width}");
Debug.WriteLine($"xOffset: {xOffset}");
Debug.WriteLine($"xOffset: {yOffset}");
Debug.WriteLine($"currentScale: {currentScale}");
Debug.WriteLine($"startScale: {startScale}");
Debug.WriteLine("______________________________");
}
void OnPinchUpdated(object sender, PinchGestureUpdatedEventArgs e)
{
if (e.Status == GestureStatus.Started)
{
// Store the current scale factor applied to the wrapped user interface element,
// and zero the components for the center point of the translate transform.
startScale = Content.Scale;
Content.AnchorX = 0;
Content.AnchorY = 0;
}
if (e.Status == GestureStatus.Running)
{
// Calculate the scale factor to be applied.
currentScale += (e.Scale - 1) * startScale;
currentScale = Math.Max(1, currentScale);
// The ScaleOrigin is in relative coordinates to the wrapped user interface element,
// so get the X pixel coordinate.
double renderedX = Content.X + xOffset;
double deltaX = renderedX / Width;
double deltaWidth = Width / (Content.Width * startScale);
double originX = (e.ScaleOrigin.X - deltaX) * deltaWidth;
// The ScaleOrigin is in relative coordinates to the wrapped user interface element,
// so get the Y pixel coordinate.
double renderedY = Content.Y + yOffset;
double deltaY = renderedY / Height;
double deltaHeight = Height / (Content.Height * startScale);
double originY = (e.ScaleOrigin.Y - deltaY) * deltaHeight;
// Calculate the transformed element pixel coordinates.
double targetX = xOffset - (originX * Content.Width) * (currentScale - startScale);
xCoord = targetX;
double targetY = yOffset - (originY * Content.Height) * (currentScale - startScale);
yCoord = targetY;
// Apply translation based on the change in origin.
Content.TranslationX = targetX.Clamp(-Content.Width * (currentScale - 1), 0);
Content.TranslationY = targetY.Clamp(-Content.Height * (currentScale - 1), 0);
// Apply scale factor
Content.Scale = currentScale;
}
if (e.Status == GestureStatus.Completed)
{
// Store the translation delta's of the wrapped user interface element.
xOffset = Content.TranslationX;
yOffset = Content.TranslationY;
}
Debug.WriteLine("Pinching");
OnUpdated();
}
}
The following line merely passes the latest state back to where the original image came from:
_cbs.LoadTransformation(Content.Height, Content.Width, xOffset, yOffset, currentScale, startScale);
It is inside that service that I want to apply a process to the original image bytes and crop from the x and y coordinate of the top left hand position of the viewport, for the width of the view port, taking into account how the mage has been scaled.
The following would be easy, if I could determine the x,y coord, and width (using https://www.nuget.org/packages/Xamarin.Plugin.ImageEdit/):
var image = await CrossImageEdit.Current.CreateImageAsync(copy);
image.Crop(an x coord, a y coord, a calculated width, a calculated height);
However it seems to me that the data I have to play with is insufficient for me to apply this to the original image.
Any ideas on how to do this - even if I have to start again.