use of frc.robot.VisionConstants.CameraPosition in project RobotCode2022 by Mechanical-Advantage.
the class Drive method addVisionMeasurement.
/**
* Adds a new timestamped vision measurement
*/
public void addVisionMeasurement(TimestampedTranslation2d data) {
Optional<Pose2d> historicalFieldToTarget = poseHistory.get(data.timestamp);
if (historicalFieldToTarget.isPresent()) {
// Get camera constants
CameraPosition cameraPosition = VisionConstants.getCameraPosition(hoodStateSupplier.get());
if (cameraPosition == null) {
// Hood is moving, don't process frame
return;
}
// Calculate new robot pose
Rotation2d robotRotation = historicalFieldToTarget.get().getRotation();
Rotation2d cameraRotation = robotRotation.rotateBy(cameraPosition.vehicleToCamera.getRotation());
Transform2d fieldToTargetRotated = new Transform2d(FieldConstants.hubCenter, cameraRotation);
Transform2d fieldToCamera = fieldToTargetRotated.plus(GeomUtil.transformFromTranslation(data.translation.unaryMinus()));
Pose2d visionFieldToTarget = GeomUtil.transformToPose(fieldToCamera.plus(cameraPosition.vehicleToCamera.inverse()));
if (visionFieldToTarget.getX() > FieldConstants.fieldLength || visionFieldToTarget.getX() < 0.0 || visionFieldToTarget.getY() > FieldConstants.fieldWidth || visionFieldToTarget.getY() < 0.0) {
return;
}
// Save vision pose for logging
noVisionTimer.reset();
lastVisionPose = visionFieldToTarget;
// Calculate vision percent
double angularErrorScale = Math.abs(inputs.gyroYawVelocityRadPerSec) / visionMaxAngularVelocity;
angularErrorScale = MathUtil.clamp(angularErrorScale, 0, 1);
double visionShift = 1 - Math.pow(1 - visionShiftPerSec, 1 / visionNominalFramerate);
visionShift *= 1 - angularErrorScale;
// Reset pose
Pose2d currentFieldToTarget = getPose();
Translation2d fieldToVisionField = new Translation2d(visionFieldToTarget.getX() - historicalFieldToTarget.get().getX(), visionFieldToTarget.getY() - historicalFieldToTarget.get().getY());
Pose2d visionLatencyCompFieldToTarget = new Pose2d(currentFieldToTarget.getX() + fieldToVisionField.getX(), currentFieldToTarget.getY() + fieldToVisionField.getY(), currentFieldToTarget.getRotation());
if (resetOnVision) {
setPose(new Pose2d(visionFieldToTarget.getX(), visionFieldToTarget.getY(), currentFieldToTarget.getRotation()), true);
resetOnVision = false;
} else {
setPose(new Pose2d(currentFieldToTarget.getX() * (1 - visionShift) + visionLatencyCompFieldToTarget.getX() * visionShift, currentFieldToTarget.getY() * (1 - visionShift) + visionLatencyCompFieldToTarget.getY() * visionShift, currentFieldToTarget.getRotation()), false);
}
}
}
use of frc.robot.VisionConstants.CameraPosition in project RobotCode2022 by Mechanical-Advantage.
the class Vision method periodic.
@Override
public void periodic() {
io.updateInputs(inputs);
Logger.getInstance().processInputs("Vision", inputs);
int targetCount = ledsOn ? inputs.cornerX.length / 4 : 0;
// Update LED idle state
if (targetCount > 0) {
targetGraceTimer.reset();
}
boolean idleOn = targetGraceTimer.get() < targetGraceSecs || Timer.getFPGATimestamp() % blinkPeriodSecs < blinkLengthSecs;
// Update LED state based on switch
switch(modeSupplier.get()) {
case ALWAYS_OFF:
ledsOn = false;
break;
case ALWAYS_ON:
ledsOn = true;
break;
case AUTO:
if (forceLeds) {
ledsOn = true;
} else if (DriverStation.isDisabled()) {
ledsOn = false;
} else if (DriverStation.isAutonomous()) {
ledsOn = autoEnabled;
} else if (climbModeSupplier.get()) {
ledsOn = false;
} else {
ledsOn = idleOn;
}
break;
default:
ledsOn = false;
break;
}
io.setLeds(ledsOn);
// Exit if no new frame
if (inputs.captureTimestamp == lastCaptureTimestamp) {
return;
}
lastCaptureTimestamp = inputs.captureTimestamp;
// Get camera constants
CameraPosition cameraPosition = VisionConstants.getCameraPosition(hoodStateSupplier.get());
if (cameraPosition == null) {
// Hood is moving or unknown, don't process frame
return;
}
// Calculate camera to target translation
if (targetCount >= minTargetCount) {
List<Translation2d> cameraToTargetTranslations = new ArrayList<>();
for (int targetIndex = 0; targetIndex < targetCount; targetIndex++) {
List<VisionPoint> corners = new ArrayList<>();
double totalX = 0.0, totalY = 0.0;
for (int i = targetIndex * 4; i < (targetIndex * 4) + 4; i++) {
if (i < inputs.cornerX.length && i < inputs.cornerY.length) {
corners.add(new VisionPoint(inputs.cornerX[i], inputs.cornerY[i]));
totalX += inputs.cornerX[i];
totalY += inputs.cornerY[i];
}
}
VisionPoint targetAvg = new VisionPoint(totalX / 4, totalY / 4);
corners = sortCorners(corners, targetAvg);
for (int i = 0; i < corners.size(); i++) {
Translation2d translation = solveCameraToTargetTranslation(corners.get(i), i < 2 ? FieldConstants.visionTargetHeightUpper : FieldConstants.visionTargetHeightLower, cameraPosition);
if (translation != null) {
cameraToTargetTranslations.add(translation);
}
}
}
if (cameraToTargetTranslations.size() >= minTargetCount * 4) {
Translation2d cameraToTargetTranslation = CircleFitter.fit(FieldConstants.visionTargetDiameter / 2.0, cameraToTargetTranslations, circleFitPrecision);
translationConsumer.accept(new TimestampedTranslation2d(inputs.captureTimestamp - extraLatencySecs, cameraToTargetTranslation));
}
}
}
Aggregations